From 9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 Mon Sep 17 00:00:00 2001 From: Yunhong Jiang Date: Tue, 4 Aug 2015 12:17:53 -0700 Subject: Add the rt linux 4.1.3-rt3 as base Import the rt linux 4.1.3-rt3 as OPNFV kvm base. It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and the base is: commit 0917f823c59692d751951bf5ea699a2d1e2f26a2 Author: Sebastian Andrzej Siewior Date: Sat Jul 25 12:13:34 2015 +0200 Prepare v4.1.3-rt3 Signed-off-by: Sebastian Andrzej Siewior We lose all the git history this way and it's not good. We should apply another opnfv project repo in future. Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423 Signed-off-by: Yunhong Jiang --- kernel/drivers/infiniband/hw/ocrdma/Kconfig | 8 + kernel/drivers/infiniband/hw/ocrdma/Makefile | 5 + kernel/drivers/infiniband/hw/ocrdma/ocrdma.h | 579 ++++ kernel/drivers/infiniband/hw/ocrdma/ocrdma_abi.h | 134 + kernel/drivers/infiniband/hw/ocrdma/ocrdma_ah.c | 227 ++ kernel/drivers/infiniband/hw/ocrdma/ocrdma_ah.h | 48 + kernel/drivers/infiniband/hw/ocrdma/ocrdma_hw.c | 3172 +++++++++++++++++++ kernel/drivers/infiniband/hw/ocrdma/ocrdma_hw.h | 142 + kernel/drivers/infiniband/hw/ocrdma/ocrdma_main.c | 682 +++++ kernel/drivers/infiniband/hw/ocrdma/ocrdma_sli.h | 2173 +++++++++++++ kernel/drivers/infiniband/hw/ocrdma/ocrdma_stats.c | 857 ++++++ kernel/drivers/infiniband/hw/ocrdma/ocrdma_stats.h | 58 + kernel/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 3195 ++++++++++++++++++++ kernel/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h | 99 + 14 files changed, 11379 insertions(+) create mode 100644 kernel/drivers/infiniband/hw/ocrdma/Kconfig create mode 100644 kernel/drivers/infiniband/hw/ocrdma/Makefile create mode 100644 kernel/drivers/infiniband/hw/ocrdma/ocrdma.h create mode 100644 kernel/drivers/infiniband/hw/ocrdma/ocrdma_abi.h create mode 100644 kernel/drivers/infiniband/hw/ocrdma/ocrdma_ah.c create mode 100644 kernel/drivers/infiniband/hw/ocrdma/ocrdma_ah.h create mode 100644 kernel/drivers/infiniband/hw/ocrdma/ocrdma_hw.c create mode 100644 kernel/drivers/infiniband/hw/ocrdma/ocrdma_hw.h create mode 100644 kernel/drivers/infiniband/hw/ocrdma/ocrdma_main.c create mode 100644 kernel/drivers/infiniband/hw/ocrdma/ocrdma_sli.h create mode 100644 kernel/drivers/infiniband/hw/ocrdma/ocrdma_stats.c create mode 100644 kernel/drivers/infiniband/hw/ocrdma/ocrdma_stats.h create mode 100644 kernel/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c create mode 100644 kernel/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h (limited to 'kernel/drivers/infiniband/hw/ocrdma') diff --git a/kernel/drivers/infiniband/hw/ocrdma/Kconfig b/kernel/drivers/infiniband/hw/ocrdma/Kconfig new file mode 100644 index 000000000..c0cddc019 --- /dev/null +++ b/kernel/drivers/infiniband/hw/ocrdma/Kconfig @@ -0,0 +1,8 @@ +config INFINIBAND_OCRDMA + tristate "Emulex One Connect HCA support" + depends on ETHERNET && NETDEVICES && PCI && INET && (IPV6 || IPV6=n) + select NET_VENDOR_EMULEX + select BE2NET + ---help--- + This driver provides low-level InfiniBand over Ethernet + support for Emulex One Connect host channel adapters (HCAs). diff --git a/kernel/drivers/infiniband/hw/ocrdma/Makefile b/kernel/drivers/infiniband/hw/ocrdma/Makefile new file mode 100644 index 000000000..d1bfd4f4c --- /dev/null +++ b/kernel/drivers/infiniband/hw/ocrdma/Makefile @@ -0,0 +1,5 @@ +ccflags-y := -Idrivers/net/ethernet/emulex/benet + +obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma.o + +ocrdma-y := ocrdma_main.o ocrdma_verbs.o ocrdma_hw.o ocrdma_ah.o ocrdma_stats.o diff --git a/kernel/drivers/infiniband/hw/ocrdma/ocrdma.h b/kernel/drivers/infiniband/hw/ocrdma/ocrdma.h new file mode 100644 index 000000000..b396344fa --- /dev/null +++ b/kernel/drivers/infiniband/hw/ocrdma/ocrdma.h @@ -0,0 +1,579 @@ +/******************************************************************* + * This file is part of the Emulex RoCE Device Driver for * + * RoCE (RDMA over Converged Ethernet) adapters. * + * Copyright (C) 2008-2012 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + * + * Contact Information: + * linux-drivers@emulex.com + * + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 + *******************************************************************/ + +#ifndef __OCRDMA_H__ +#define __OCRDMA_H__ + +#include +#include +#include +#include + +#include +#include +#include + +#include +#include "ocrdma_sli.h" + +#define OCRDMA_ROCE_DRV_VERSION "10.6.0.0" + +#define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver" +#define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" + +#define OC_NAME_SH OCRDMA_NODE_DESC "(Skyhawk)" +#define OC_NAME_UNKNOWN OCRDMA_NODE_DESC "(Unknown)" + +#define OC_SKH_DEVICE_PF 0x720 +#define OC_SKH_DEVICE_VF 0x728 +#define OCRDMA_MAX_AH 512 + +#define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) + +#define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo) +#define EQ_INTR_PER_SEC_THRSH_HI 150000 +#define EQ_INTR_PER_SEC_THRSH_LOW 100000 +#define EQ_AIC_MAX_EQD 20 +#define EQ_AIC_MIN_EQD 0 + +void ocrdma_eqd_set_task(struct work_struct *work); + +struct ocrdma_dev_attr { + u8 fw_ver[32]; + u32 vendor_id; + u32 device_id; + u16 max_pd; + u16 max_dpp_pds; + u16 max_cq; + u16 max_cqe; + u16 max_qp; + u16 max_wqe; + u16 max_rqe; + u16 max_srq; + u32 max_inline_data; + int max_send_sge; + int max_recv_sge; + int max_srq_sge; + int max_rdma_sge; + int max_mr; + u64 max_mr_size; + u32 max_num_mr_pbl; + int max_mw; + int max_fmr; + int max_map_per_fmr; + int max_pages_per_frmr; + u16 max_ord_per_qp; + u16 max_ird_per_qp; + + int device_cap_flags; + u8 cq_overflow_detect; + u8 srq_supported; + + u32 wqe_size; + u32 rqe_size; + u32 ird_page_size; + u8 local_ca_ack_delay; + u8 ird; + u8 num_ird_pages; +}; + +struct ocrdma_dma_mem { + void *va; + dma_addr_t pa; + u32 size; +}; + +struct ocrdma_pbl { + void *va; + dma_addr_t pa; +}; + +struct ocrdma_queue_info { + void *va; + dma_addr_t dma; + u32 size; + u16 len; + u16 entry_size; /* Size of an element in the queue */ + u16 id; /* qid, where to ring the doorbell. */ + u16 head, tail; + bool created; +}; + +struct ocrdma_aic_obj { /* Adaptive interrupt coalescing (AIC) info */ + u32 prev_eqd; + u64 eq_intr_cnt; + u64 prev_eq_intr_cnt; +}; + +struct ocrdma_eq { + struct ocrdma_queue_info q; + u32 vector; + int cq_cnt; + struct ocrdma_dev *dev; + char irq_name[32]; + struct ocrdma_aic_obj aic_obj; +}; + +struct ocrdma_mq { + struct ocrdma_queue_info sq; + struct ocrdma_queue_info cq; + bool rearm_cq; +}; + +struct mqe_ctx { + struct mutex lock; /* for serializing mailbox commands on MQ */ + wait_queue_head_t cmd_wait; + u32 tag; + u16 cqe_status; + u16 ext_status; + bool cmd_done; + bool fw_error_state; +}; + +struct ocrdma_hw_mr { + u32 lkey; + u8 fr_mr; + u8 remote_atomic; + u8 remote_rd; + u8 remote_wr; + u8 local_rd; + u8 local_wr; + u8 mw_bind; + u8 rsvd; + u64 len; + struct ocrdma_pbl *pbl_table; + u32 num_pbls; + u32 num_pbes; + u32 pbl_size; + u32 pbe_size; + u64 fbo; + u64 va; +}; + +struct ocrdma_mr { + struct ib_mr ibmr; + struct ib_umem *umem; + struct ocrdma_hw_mr hwmr; +}; + +struct ocrdma_stats { + u8 type; + struct ocrdma_dev *dev; +}; + +struct ocrdma_pd_resource_mgr { + u32 pd_norm_start; + u16 pd_norm_count; + u16 pd_norm_thrsh; + u16 max_normal_pd; + u32 pd_dpp_start; + u16 pd_dpp_count; + u16 pd_dpp_thrsh; + u16 max_dpp_pd; + u16 dpp_page_index; + unsigned long *pd_norm_bitmap; + unsigned long *pd_dpp_bitmap; + bool pd_prealloc_valid; +}; + +struct stats_mem { + struct ocrdma_mqe mqe; + void *va; + dma_addr_t pa; + u32 size; + char *debugfs_mem; +}; + +struct phy_info { + u16 auto_speeds_supported; + u16 fixed_speeds_supported; + u16 phy_type; + u16 interface_type; +}; + +struct ocrdma_dev { + struct ib_device ibdev; + struct ocrdma_dev_attr attr; + + struct mutex dev_lock; /* provides syncronise access to device data */ + spinlock_t flush_q_lock ____cacheline_aligned; + + struct ocrdma_cq **cq_tbl; + struct ocrdma_qp **qp_tbl; + + struct ocrdma_eq *eq_tbl; + int eq_cnt; + struct delayed_work eqd_work; + u16 base_eqid; + u16 max_eq; + + union ib_gid *sgid_tbl; + /* provided synchronization to sgid table for + * updating gid entries triggered by notifier. + */ + spinlock_t sgid_lock; + + int gsi_qp_created; + struct ocrdma_cq *gsi_sqcq; + struct ocrdma_cq *gsi_rqcq; + + struct { + struct ocrdma_av *va; + dma_addr_t pa; + u32 size; + u32 num_ah; + /* provide synchronization for av + * entry allocations. + */ + spinlock_t lock; + u32 ahid; + struct ocrdma_pbl pbl; + } av_tbl; + + void *mbx_cmd; + struct ocrdma_mq mq; + struct mqe_ctx mqe_ctx; + + struct be_dev_info nic_info; + struct phy_info phy; + char model_number[32]; + u32 hba_port_num; + + struct list_head entry; + struct rcu_head rcu; + int id; + u64 *stag_arr; + u8 sl; /* service level */ + bool pfc_state; + atomic_t update_sl; + u16 pvid; + u32 asic_id; + + ulong last_stats_time; + struct mutex stats_lock; /* provide synch for debugfs operations */ + struct stats_mem stats_mem; + struct ocrdma_stats rsrc_stats; + struct ocrdma_stats rx_stats; + struct ocrdma_stats wqe_stats; + struct ocrdma_stats tx_stats; + struct ocrdma_stats db_err_stats; + struct ocrdma_stats tx_qp_err_stats; + struct ocrdma_stats rx_qp_err_stats; + struct ocrdma_stats tx_dbg_stats; + struct ocrdma_stats rx_dbg_stats; + struct ocrdma_stats driver_stats; + struct ocrdma_stats reset_stats; + struct dentry *dir; + atomic_t async_err_stats[OCRDMA_MAX_ASYNC_ERRORS]; + atomic_t cqe_err_stats[OCRDMA_MAX_CQE_ERR]; + struct ocrdma_pd_resource_mgr *pd_mgr; +}; + +struct ocrdma_cq { + struct ib_cq ibcq; + struct ocrdma_cqe *va; + u32 phase; + u32 getp; /* pointer to pending wrs to + * return to stack, wrap arounds + * at max_hw_cqe + */ + u32 max_hw_cqe; + bool phase_change; + bool deferred_arm, deferred_sol; + bool first_arm; + + spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization + * to cq polling + */ + /* syncronizes cq completion handler invoked from multiple context */ + spinlock_t comp_handler_lock ____cacheline_aligned; + u16 id; + u16 eqn; + + struct ocrdma_ucontext *ucontext; + dma_addr_t pa; + u32 len; + u32 cqe_cnt; + + /* head of all qp's sq and rq for which cqes need to be flushed + * by the software. + */ + struct list_head sq_head, rq_head; +}; + +struct ocrdma_pd { + struct ib_pd ibpd; + struct ocrdma_ucontext *uctx; + u32 id; + int num_dpp_qp; + u32 dpp_page; + bool dpp_enabled; +}; + +struct ocrdma_ah { + struct ib_ah ibah; + struct ocrdma_av *av; + u16 sgid_index; + u32 id; +}; + +struct ocrdma_qp_hwq_info { + u8 *va; /* virtual address */ + u32 max_sges; + u32 head, tail; + u32 entry_size; + u32 max_cnt; + u32 max_wqe_idx; + u16 dbid; /* qid, where to ring the doorbell. */ + u32 len; + dma_addr_t pa; +}; + +struct ocrdma_srq { + struct ib_srq ibsrq; + u8 __iomem *db; + struct ocrdma_qp_hwq_info rq; + u64 *rqe_wr_id_tbl; + u32 *idx_bit_fields; + u32 bit_fields_len; + + /* provide synchronization to multiple context(s) posting rqe */ + spinlock_t q_lock ____cacheline_aligned; + + struct ocrdma_pd *pd; + u32 id; +}; + +struct ocrdma_qp { + struct ib_qp ibqp; + + u8 __iomem *sq_db; + struct ocrdma_qp_hwq_info sq; + struct { + uint64_t wrid; + uint16_t dpp_wqe_idx; + uint16_t dpp_wqe; + uint8_t signaled; + uint8_t rsvd[3]; + } *wqe_wr_id_tbl; + u32 max_inline_data; + + /* provide synchronization to multiple context(s) posting wqe, rqe */ + spinlock_t q_lock ____cacheline_aligned; + struct ocrdma_cq *sq_cq; + /* list maintained per CQ to flush SQ errors */ + struct list_head sq_entry; + + u8 __iomem *rq_db; + struct ocrdma_qp_hwq_info rq; + u64 *rqe_wr_id_tbl; + struct ocrdma_cq *rq_cq; + struct ocrdma_srq *srq; + /* list maintained per CQ to flush RQ errors */ + struct list_head rq_entry; + + enum ocrdma_qp_state state; /* QP state */ + int cap_flags; + u32 max_ord, max_ird; + + u32 id; + struct ocrdma_pd *pd; + + enum ib_qp_type qp_type; + + int sgid_idx; + u32 qkey; + bool dpp_enabled; + u8 *ird_q_va; + bool signaled; +}; + +struct ocrdma_ucontext { + struct ib_ucontext ibucontext; + + struct list_head mm_head; + struct mutex mm_list_lock; /* protects list entries of mm type */ + struct ocrdma_pd *cntxt_pd; + int pd_in_use; + + struct { + u32 *va; + dma_addr_t pa; + u32 len; + } ah_tbl; +}; + +struct ocrdma_mm { + struct { + u64 phy_addr; + unsigned long len; + } key; + struct list_head entry; +}; + +static inline struct ocrdma_dev *get_ocrdma_dev(struct ib_device *ibdev) +{ + return container_of(ibdev, struct ocrdma_dev, ibdev); +} + +static inline struct ocrdma_ucontext *get_ocrdma_ucontext(struct ib_ucontext + *ibucontext) +{ + return container_of(ibucontext, struct ocrdma_ucontext, ibucontext); +} + +static inline struct ocrdma_pd *get_ocrdma_pd(struct ib_pd *ibpd) +{ + return container_of(ibpd, struct ocrdma_pd, ibpd); +} + +static inline struct ocrdma_cq *get_ocrdma_cq(struct ib_cq *ibcq) +{ + return container_of(ibcq, struct ocrdma_cq, ibcq); +} + +static inline struct ocrdma_qp *get_ocrdma_qp(struct ib_qp *ibqp) +{ + return container_of(ibqp, struct ocrdma_qp, ibqp); +} + +static inline struct ocrdma_mr *get_ocrdma_mr(struct ib_mr *ibmr) +{ + return container_of(ibmr, struct ocrdma_mr, ibmr); +} + +static inline struct ocrdma_ah *get_ocrdma_ah(struct ib_ah *ibah) +{ + return container_of(ibah, struct ocrdma_ah, ibah); +} + +static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq) +{ + return container_of(ibsrq, struct ocrdma_srq, ibsrq); +} + +static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe) +{ + int cqe_valid; + cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID; + return (cqe_valid == cq->phase); +} + +static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe) +{ + return (le32_to_cpu(cqe->flags_status_srcqpn) & + OCRDMA_CQE_QTYPE) ? 0 : 1; +} + +static inline int is_cqe_invalidated(struct ocrdma_cqe *cqe) +{ + return (le32_to_cpu(cqe->flags_status_srcqpn) & + OCRDMA_CQE_INVALIDATE) ? 1 : 0; +} + +static inline int is_cqe_imm(struct ocrdma_cqe *cqe) +{ + return (le32_to_cpu(cqe->flags_status_srcqpn) & + OCRDMA_CQE_IMM) ? 1 : 0; +} + +static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe) +{ + return (le32_to_cpu(cqe->flags_status_srcqpn) & + OCRDMA_CQE_WRITE_IMM) ? 1 : 0; +} + +static inline int ocrdma_resolve_dmac(struct ocrdma_dev *dev, + struct ib_ah_attr *ah_attr, u8 *mac_addr) +{ + struct in6_addr in6; + + memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6)); + if (rdma_is_multicast_addr(&in6)) + rdma_get_mcast_mac(&in6, mac_addr); + else if (rdma_link_local_addr(&in6)) + rdma_get_ll_mac(&in6, mac_addr); + else + memcpy(mac_addr, ah_attr->dmac, ETH_ALEN); + return 0; +} + +static inline char *hca_name(struct ocrdma_dev *dev) +{ + switch (dev->nic_info.pdev->device) { + case OC_SKH_DEVICE_PF: + case OC_SKH_DEVICE_VF: + return OC_NAME_SH; + default: + return OC_NAME_UNKNOWN; + } +} + +static inline int ocrdma_get_eq_table_index(struct ocrdma_dev *dev, + int eqid) +{ + int indx; + + for (indx = 0; indx < dev->eq_cnt; indx++) { + if (dev->eq_tbl[indx].q.id == eqid) + return indx; + } + + return -EINVAL; +} + +static inline u8 ocrdma_get_asic_type(struct ocrdma_dev *dev) +{ + if (dev->nic_info.dev_family == 0xF && !dev->asic_id) { + pci_read_config_dword( + dev->nic_info.pdev, + OCRDMA_SLI_ASIC_ID_OFFSET, &dev->asic_id); + } + + return (dev->asic_id & OCRDMA_SLI_ASIC_GEN_NUM_MASK) >> + OCRDMA_SLI_ASIC_GEN_NUM_SHIFT; +} + +static inline u8 ocrdma_get_pfc_prio(u8 *pfc, u8 prio) +{ + return *(pfc + prio); +} + +static inline u8 ocrdma_get_app_prio(u8 *app_prio, u8 prio) +{ + return *(app_prio + prio); +} + +static inline u8 ocrdma_is_enabled_and_synced(u32 state) +{ /* May also be used to interpret TC-state, QCN-state + * Appl-state and Logical-link-state in future. + */ + return (state & OCRDMA_STATE_FLAG_ENABLED) && + (state & OCRDMA_STATE_FLAG_SYNC); +} + +#endif diff --git a/kernel/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/kernel/drivers/infiniband/hw/ocrdma/ocrdma_abi.h new file mode 100644 index 000000000..1554cca57 --- /dev/null +++ b/kernel/drivers/infiniband/hw/ocrdma/ocrdma_abi.h @@ -0,0 +1,134 @@ +/******************************************************************* + * This file is part of the Emulex RoCE Device Driver for * + * RoCE (RDMA over Converged Ethernet) adapters. * + * Copyright (C) 2008-2012 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + * + * Contact Information: + * linux-drivers@emulex.com + * + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 + *******************************************************************/ + +#ifndef __OCRDMA_ABI_H__ +#define __OCRDMA_ABI_H__ + +#define OCRDMA_ABI_VERSION 2 +#define OCRDMA_BE_ROCE_ABI_VERSION 1 +/* user kernel communication data structures. */ + +struct ocrdma_alloc_ucontext_resp { + u32 dev_id; + u32 wqe_size; + u32 max_inline_data; + u32 dpp_wqe_size; + u64 ah_tbl_page; + u32 ah_tbl_len; + u32 rqe_size; + u8 fw_ver[32]; + /* for future use/new features in progress */ + u64 rsvd1; + u64 rsvd2; +}; + +struct ocrdma_alloc_pd_ureq { + u64 rsvd1; +}; + +struct ocrdma_alloc_pd_uresp { + u32 id; + u32 dpp_enabled; + u32 dpp_page_addr_hi; + u32 dpp_page_addr_lo; + u64 rsvd1; +}; + +struct ocrdma_create_cq_ureq { + u32 dpp_cq; + u32 rsvd; /* pad */ +}; + +#define MAX_CQ_PAGES 8 +struct ocrdma_create_cq_uresp { + u32 cq_id; + u32 page_size; + u32 num_pages; + u32 max_hw_cqe; + u64 page_addr[MAX_CQ_PAGES]; + u64 db_page_addr; + u32 db_page_size; + u32 phase_change; + /* for future use/new features in progress */ + u64 rsvd1; + u64 rsvd2; +}; + +#define MAX_QP_PAGES 8 +#define MAX_UD_AV_PAGES 8 + +struct ocrdma_create_qp_ureq { + u8 enable_dpp_cq; + u8 rsvd; + u16 dpp_cq_id; + u32 rsvd1; /* pad */ +}; + +struct ocrdma_create_qp_uresp { + u16 qp_id; + u16 sq_dbid; + u16 rq_dbid; + u16 resv0; /* pad */ + u32 sq_page_size; + u32 rq_page_size; + u32 num_sq_pages; + u32 num_rq_pages; + u64 sq_page_addr[MAX_QP_PAGES]; + u64 rq_page_addr[MAX_QP_PAGES]; + u64 db_page_addr; + u32 db_page_size; + u32 dpp_credit; + u32 dpp_offset; + u32 num_wqe_allocated; + u32 num_rqe_allocated; + u32 db_sq_offset; + u32 db_rq_offset; + u32 db_shift; + u64 rsvd[11]; +} __packed; + +struct ocrdma_create_srq_uresp { + u16 rq_dbid; + u16 resv0; /* pad */ + u32 resv1; + + u32 rq_page_size; + u32 num_rq_pages; + + u64 rq_page_addr[MAX_QP_PAGES]; + u64 db_page_addr; + + u32 db_page_size; + u32 num_rqe_allocated; + u32 db_rq_offset; + u32 db_shift; + + u64 rsvd2; + u64 rsvd3; +}; + +#endif /* __OCRDMA_ABI_H__ */ diff --git a/kernel/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/kernel/drivers/infiniband/hw/ocrdma/ocrdma_ah.c new file mode 100644 index 000000000..f5a5ea836 --- /dev/null +++ b/kernel/drivers/infiniband/hw/ocrdma/ocrdma_ah.c @@ -0,0 +1,227 @@ +/******************************************************************* + * This file is part of the Emulex RoCE Device Driver for * + * RoCE (RDMA over Converged Ethernet) adapters. * + * Copyright (C) 2008-2012 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + * + * Contact Information: + * linux-drivers@emulex.com + * + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 + *******************************************************************/ + +#include +#include + +#include +#include + +#include "ocrdma.h" +#include "ocrdma_verbs.h" +#include "ocrdma_ah.h" +#include "ocrdma_hw.h" +#include "ocrdma_stats.h" + +#define OCRDMA_VID_PCP_SHIFT 0xD + +static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, + struct ib_ah_attr *attr, union ib_gid *sgid, + int pdid, bool *isvlan) +{ + int status = 0; + u16 vlan_tag; + struct ocrdma_eth_vlan eth; + struct ocrdma_grh grh; + int eth_sz; + + memset(ð, 0, sizeof(eth)); + memset(&grh, 0, sizeof(grh)); + + /* VLAN */ + vlan_tag = attr->vlan_id; + if (!vlan_tag || (vlan_tag > 0xFFF)) + vlan_tag = dev->pvid; + if (vlan_tag || dev->pfc_state) { + if (!vlan_tag) { + pr_err("ocrdma%d:Using VLAN with PFC is recommended\n", + dev->id); + pr_err("ocrdma%d:Using VLAN 0 for this connection\n", + dev->id); + } + eth.eth_type = cpu_to_be16(0x8100); + eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); + vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT; + eth.vlan_tag = cpu_to_be16(vlan_tag); + eth_sz = sizeof(struct ocrdma_eth_vlan); + *isvlan = true; + } else { + eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); + eth_sz = sizeof(struct ocrdma_eth_basic); + } + /* MAC */ + memcpy(ð.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN); + status = ocrdma_resolve_dmac(dev, attr, ð.dmac[0]); + if (status) + return status; + ah->sgid_index = attr->grh.sgid_index; + memcpy(&grh.sgid[0], sgid->raw, sizeof(union ib_gid)); + memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw)); + + grh.tclass_flow = cpu_to_be32((6 << 28) | + (attr->grh.traffic_class << 24) | + attr->grh.flow_label); + /* 0x1b is next header value in GRH */ + grh.pdid_hoplimit = cpu_to_be32((pdid << 16) | + (0x1b << 8) | attr->grh.hop_limit); + /* Eth HDR */ + memcpy(&ah->av->eth_hdr, ð, eth_sz); + memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh)); + if (*isvlan) + ah->av->valid |= OCRDMA_AV_VLAN_VALID; + ah->av->valid = cpu_to_le32(ah->av->valid); + return status; +} + +struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) +{ + u32 *ahid_addr; + bool isvlan = false; + int status; + struct ocrdma_ah *ah; + struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); + struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); + union ib_gid sgid; + + if (!(attr->ah_flags & IB_AH_GRH)) + return ERR_PTR(-EINVAL); + + if (atomic_cmpxchg(&dev->update_sl, 1, 0)) + ocrdma_init_service_level(dev); + ah = kzalloc(sizeof(*ah), GFP_ATOMIC); + if (!ah) + return ERR_PTR(-ENOMEM); + + status = ocrdma_alloc_av(dev, ah); + if (status) + goto av_err; + + status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index, &sgid); + if (status) { + pr_err("%s(): Failed to query sgid, status = %d\n", + __func__, status); + goto av_conf_err; + } + + if ((pd->uctx) && + (!rdma_is_multicast_addr((struct in6_addr *)attr->grh.dgid.raw)) && + (!rdma_link_local_addr((struct in6_addr *)attr->grh.dgid.raw))) { + status = rdma_addr_find_dmac_by_grh(&sgid, &attr->grh.dgid, + attr->dmac, &attr->vlan_id); + if (status) { + pr_err("%s(): Failed to resolve dmac from gid." + "status = %d\n", __func__, status); + goto av_conf_err; + } + } + + status = set_av_attr(dev, ah, attr, &sgid, pd->id, &isvlan); + if (status) + goto av_conf_err; + + /* if pd is for the user process, pass the ah_id to user space */ + if ((pd->uctx) && (pd->uctx->ah_tbl.va)) { + ahid_addr = pd->uctx->ah_tbl.va + attr->dlid; + *ahid_addr = 0; + *ahid_addr |= ah->id & OCRDMA_AH_ID_MASK; + if (isvlan) + *ahid_addr |= (OCRDMA_AH_VLAN_VALID_MASK << + OCRDMA_AH_VLAN_VALID_SHIFT); + } + + return &ah->ibah; + +av_conf_err: + ocrdma_free_av(dev, ah); +av_err: + kfree(ah); + return ERR_PTR(status); +} + +int ocrdma_destroy_ah(struct ib_ah *ibah) +{ + struct ocrdma_ah *ah = get_ocrdma_ah(ibah); + struct ocrdma_dev *dev = get_ocrdma_dev(ibah->device); + + ocrdma_free_av(dev, ah); + kfree(ah); + return 0; +} + +int ocrdma_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr) +{ + struct ocrdma_ah *ah = get_ocrdma_ah(ibah); + struct ocrdma_av *av = ah->av; + struct ocrdma_grh *grh; + attr->ah_flags |= IB_AH_GRH; + if (ah->av->valid & OCRDMA_AV_VALID) { + grh = (struct ocrdma_grh *)((u8 *)ah->av + + sizeof(struct ocrdma_eth_vlan)); + attr->sl = be16_to_cpu(av->eth_hdr.vlan_tag) >> 13; + } else { + grh = (struct ocrdma_grh *)((u8 *)ah->av + + sizeof(struct ocrdma_eth_basic)); + attr->sl = 0; + } + memcpy(&attr->grh.dgid.raw[0], &grh->dgid[0], sizeof(grh->dgid)); + attr->grh.sgid_index = ah->sgid_index; + attr->grh.hop_limit = be32_to_cpu(grh->pdid_hoplimit) & 0xff; + attr->grh.traffic_class = be32_to_cpu(grh->tclass_flow) >> 24; + attr->grh.flow_label = be32_to_cpu(grh->tclass_flow) & 0x00ffffffff; + return 0; +} + +int ocrdma_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *attr) +{ + /* modify_ah is unsupported */ + return -ENOSYS; +} + +int ocrdma_process_mad(struct ib_device *ibdev, + int process_mad_flags, + u8 port_num, + struct ib_wc *in_wc, + struct ib_grh *in_grh, + struct ib_mad *in_mad, struct ib_mad *out_mad) +{ + int status; + struct ocrdma_dev *dev; + + switch (in_mad->mad_hdr.mgmt_class) { + case IB_MGMT_CLASS_PERF_MGMT: + dev = get_ocrdma_dev(ibdev); + if (!ocrdma_pma_counters(dev, out_mad)) + status = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; + else + status = IB_MAD_RESULT_SUCCESS; + break; + default: + status = IB_MAD_RESULT_SUCCESS; + break; + } + return status; +} diff --git a/kernel/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/kernel/drivers/infiniband/hw/ocrdma/ocrdma_ah.h new file mode 100644 index 000000000..726a87cf2 --- /dev/null +++ b/kernel/drivers/infiniband/hw/ocrdma/ocrdma_ah.h @@ -0,0 +1,48 @@ +/******************************************************************* + * This file is part of the Emulex RoCE Device Driver for * + * RoCE (RDMA over Converged Ethernet) adapters. * + * Copyright (C) 2008-2012 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + * + * Contact Information: + * linux-drivers@emulex.com + * + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 + *******************************************************************/ + +#ifndef __OCRDMA_AH_H__ +#define __OCRDMA_AH_H__ + +enum { + OCRDMA_AH_ID_MASK = 0x3FF, + OCRDMA_AH_VLAN_VALID_MASK = 0x01, + OCRDMA_AH_VLAN_VALID_SHIFT = 0x1F +}; + +struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *); +int ocrdma_destroy_ah(struct ib_ah *); +int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *); +int ocrdma_modify_ah(struct ib_ah *, struct ib_ah_attr *); + +int ocrdma_process_mad(struct ib_device *, + int process_mad_flags, + u8 port_num, + struct ib_wc *in_wc, + struct ib_grh *in_grh, + struct ib_mad *in_mad, struct ib_mad *out_mad); +#endif /* __OCRDMA_AH_H__ */ diff --git a/kernel/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/kernel/drivers/infiniband/hw/ocrdma/ocrdma_hw.c new file mode 100644 index 000000000..47615ff33 --- /dev/null +++ b/kernel/drivers/infiniband/hw/ocrdma/ocrdma_hw.c @@ -0,0 +1,3172 @@ +/******************************************************************* + * This file is part of the Emulex RoCE Device Driver for * + * RoCE (RDMA over Converged Ethernet) CNA Adapters. * + * Copyright (C) 2008-2012 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + * + * Contact Information: + * linux-drivers@emulex.com + * + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 + *******************************************************************/ + +#include +#include +#include +#include + +#include +#include + +#include "ocrdma.h" +#include "ocrdma_hw.h" +#include "ocrdma_verbs.h" +#include "ocrdma_ah.h" + +enum mbx_status { + OCRDMA_MBX_STATUS_FAILED = 1, + OCRDMA_MBX_STATUS_ILLEGAL_FIELD = 3, + OCRDMA_MBX_STATUS_OOR = 100, + OCRDMA_MBX_STATUS_INVALID_PD = 101, + OCRDMA_MBX_STATUS_PD_INUSE = 102, + OCRDMA_MBX_STATUS_INVALID_CQ = 103, + OCRDMA_MBX_STATUS_INVALID_QP = 104, + OCRDMA_MBX_STATUS_INVALID_LKEY = 105, + OCRDMA_MBX_STATUS_ORD_EXCEEDS = 106, + OCRDMA_MBX_STATUS_IRD_EXCEEDS = 107, + OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS = 108, + OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS = 109, + OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS = 110, + OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS = 111, + OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS = 112, + OCRDMA_MBX_STATUS_INVALID_STATE_CHANGE = 113, + OCRDMA_MBX_STATUS_MW_BOUND = 114, + OCRDMA_MBX_STATUS_INVALID_VA = 115, + OCRDMA_MBX_STATUS_INVALID_LENGTH = 116, + OCRDMA_MBX_STATUS_INVALID_FBO = 117, + OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS = 118, + OCRDMA_MBX_STATUS_INVALID_PBE_SIZE = 119, + OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY = 120, + OCRDMA_MBX_STATUS_INVALID_PBL_SHIFT = 121, + OCRDMA_MBX_STATUS_INVALID_SRQ_ID = 129, + OCRDMA_MBX_STATUS_SRQ_ERROR = 133, + OCRDMA_MBX_STATUS_RQE_EXCEEDS = 134, + OCRDMA_MBX_STATUS_MTU_EXCEEDS = 135, + OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS = 136, + OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS = 137, + OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS = 138, + OCRDMA_MBX_STATUS_QP_BOUND = 130, + OCRDMA_MBX_STATUS_INVALID_CHANGE = 139, + OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP = 140, + OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER = 141, + OCRDMA_MBX_STATUS_MW_STILL_BOUND = 142, + OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID = 143, + OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS = 144 +}; + +enum additional_status { + OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES = 22 +}; + +enum cqe_status { + OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES = 1, + OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER = 2, + OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES = 3, + OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING = 4, + OCRDMA_MBX_CQE_STATUS_DMA_FAILED = 5 +}; + +static inline void *ocrdma_get_eqe(struct ocrdma_eq *eq) +{ + return eq->q.va + (eq->q.tail * sizeof(struct ocrdma_eqe)); +} + +static inline void ocrdma_eq_inc_tail(struct ocrdma_eq *eq) +{ + eq->q.tail = (eq->q.tail + 1) & (OCRDMA_EQ_LEN - 1); +} + +static inline void *ocrdma_get_mcqe(struct ocrdma_dev *dev) +{ + struct ocrdma_mcqe *cqe = (struct ocrdma_mcqe *) + (dev->mq.cq.va + (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe))); + + if (!(le32_to_cpu(cqe->valid_ae_cmpl_cons) & OCRDMA_MCQE_VALID_MASK)) + return NULL; + return cqe; +} + +static inline void ocrdma_mcq_inc_tail(struct ocrdma_dev *dev) +{ + dev->mq.cq.tail = (dev->mq.cq.tail + 1) & (OCRDMA_MQ_CQ_LEN - 1); +} + +static inline struct ocrdma_mqe *ocrdma_get_mqe(struct ocrdma_dev *dev) +{ + return dev->mq.sq.va + (dev->mq.sq.head * sizeof(struct ocrdma_mqe)); +} + +static inline void ocrdma_mq_inc_head(struct ocrdma_dev *dev) +{ + dev->mq.sq.head = (dev->mq.sq.head + 1) & (OCRDMA_MQ_LEN - 1); +} + +static inline void *ocrdma_get_mqe_rsp(struct ocrdma_dev *dev) +{ + return dev->mq.sq.va + (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe)); +} + +enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps) +{ + switch (qps) { + case OCRDMA_QPS_RST: + return IB_QPS_RESET; + case OCRDMA_QPS_INIT: + return IB_QPS_INIT; + case OCRDMA_QPS_RTR: + return IB_QPS_RTR; + case OCRDMA_QPS_RTS: + return IB_QPS_RTS; + case OCRDMA_QPS_SQD: + case OCRDMA_QPS_SQ_DRAINING: + return IB_QPS_SQD; + case OCRDMA_QPS_SQE: + return IB_QPS_SQE; + case OCRDMA_QPS_ERR: + return IB_QPS_ERR; + } + return IB_QPS_ERR; +} + +static enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps) +{ + switch (qps) { + case IB_QPS_RESET: + return OCRDMA_QPS_RST; + case IB_QPS_INIT: + return OCRDMA_QPS_INIT; + case IB_QPS_RTR: + return OCRDMA_QPS_RTR; + case IB_QPS_RTS: + return OCRDMA_QPS_RTS; + case IB_QPS_SQD: + return OCRDMA_QPS_SQD; + case IB_QPS_SQE: + return OCRDMA_QPS_SQE; + case IB_QPS_ERR: + return OCRDMA_QPS_ERR; + } + return OCRDMA_QPS_ERR; +} + +static int ocrdma_get_mbx_errno(u32 status) +{ + int err_num; + u8 mbox_status = (status & OCRDMA_MBX_RSP_STATUS_MASK) >> + OCRDMA_MBX_RSP_STATUS_SHIFT; + u8 add_status = (status & OCRDMA_MBX_RSP_ASTATUS_MASK) >> + OCRDMA_MBX_RSP_ASTATUS_SHIFT; + + switch (mbox_status) { + case OCRDMA_MBX_STATUS_OOR: + case OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS: + err_num = -EAGAIN; + break; + + case OCRDMA_MBX_STATUS_INVALID_PD: + case OCRDMA_MBX_STATUS_INVALID_CQ: + case OCRDMA_MBX_STATUS_INVALID_SRQ_ID: + case OCRDMA_MBX_STATUS_INVALID_QP: + case OCRDMA_MBX_STATUS_INVALID_CHANGE: + case OCRDMA_MBX_STATUS_MTU_EXCEEDS: + case OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER: + case OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID: + case OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS: + case OCRDMA_MBX_STATUS_ILLEGAL_FIELD: + case OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY: + case OCRDMA_MBX_STATUS_INVALID_LKEY: + case OCRDMA_MBX_STATUS_INVALID_VA: + case OCRDMA_MBX_STATUS_INVALID_LENGTH: + case OCRDMA_MBX_STATUS_INVALID_FBO: + case OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS: + case OCRDMA_MBX_STATUS_INVALID_PBE_SIZE: + case OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP: + case OCRDMA_MBX_STATUS_SRQ_ERROR: + case OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS: + err_num = -EINVAL; + break; + + case OCRDMA_MBX_STATUS_PD_INUSE: + case OCRDMA_MBX_STATUS_QP_BOUND: + case OCRDMA_MBX_STATUS_MW_STILL_BOUND: + case OCRDMA_MBX_STATUS_MW_BOUND: + err_num = -EBUSY; + break; + + case OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS: + case OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS: + case OCRDMA_MBX_STATUS_RQE_EXCEEDS: + case OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS: + case OCRDMA_MBX_STATUS_ORD_EXCEEDS: + case OCRDMA_MBX_STATUS_IRD_EXCEEDS: + case OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS: + case OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS: + case OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS: + err_num = -ENOBUFS; + break; + + case OCRDMA_MBX_STATUS_FAILED: + switch (add_status) { + case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES: + err_num = -EAGAIN; + break; + } + default: + err_num = -EFAULT; + } + return err_num; +} + +char *port_speed_string(struct ocrdma_dev *dev) +{ + char *str = ""; + u16 speeds_supported; + + speeds_supported = dev->phy.fixed_speeds_supported | + dev->phy.auto_speeds_supported; + if (speeds_supported & OCRDMA_PHY_SPEED_40GBPS) + str = "40Gbps "; + else if (speeds_supported & OCRDMA_PHY_SPEED_10GBPS) + str = "10Gbps "; + else if (speeds_supported & OCRDMA_PHY_SPEED_1GBPS) + str = "1Gbps "; + + return str; +} + +static int ocrdma_get_mbx_cqe_errno(u16 cqe_status) +{ + int err_num = -EINVAL; + + switch (cqe_status) { + case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES: + err_num = -EPERM; + break; + case OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER: + err_num = -EINVAL; + break; + case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES: + case OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING: + err_num = -EINVAL; + break; + case OCRDMA_MBX_CQE_STATUS_DMA_FAILED: + default: + err_num = -EINVAL; + break; + } + return err_num; +} + +void ocrdma_ring_cq_db(struct ocrdma_dev *dev, u16 cq_id, bool armed, + bool solicited, u16 cqe_popped) +{ + u32 val = cq_id & OCRDMA_DB_CQ_RING_ID_MASK; + + val |= ((cq_id & OCRDMA_DB_CQ_RING_ID_EXT_MASK) << + OCRDMA_DB_CQ_RING_ID_EXT_MASK_SHIFT); + + if (armed) + val |= (1 << OCRDMA_DB_CQ_REARM_SHIFT); + if (solicited) + val |= (1 << OCRDMA_DB_CQ_SOLICIT_SHIFT); + val |= (cqe_popped << OCRDMA_DB_CQ_NUM_POPPED_SHIFT); + iowrite32(val, dev->nic_info.db + OCRDMA_DB_CQ_OFFSET); +} + +static void ocrdma_ring_mq_db(struct ocrdma_dev *dev) +{ + u32 val = 0; + + val |= dev->mq.sq.id & OCRDMA_MQ_ID_MASK; + val |= 1 << OCRDMA_MQ_NUM_MQE_SHIFT; + iowrite32(val, dev->nic_info.db + OCRDMA_DB_MQ_OFFSET); +} + +static void ocrdma_ring_eq_db(struct ocrdma_dev *dev, u16 eq_id, + bool arm, bool clear_int, u16 num_eqe) +{ + u32 val = 0; + + val |= eq_id & OCRDMA_EQ_ID_MASK; + val |= ((eq_id & OCRDMA_EQ_ID_EXT_MASK) << OCRDMA_EQ_ID_EXT_MASK_SHIFT); + if (arm) + val |= (1 << OCRDMA_REARM_SHIFT); + if (clear_int) + val |= (1 << OCRDMA_EQ_CLR_SHIFT); + val |= (1 << OCRDMA_EQ_TYPE_SHIFT); + val |= (num_eqe << OCRDMA_NUM_EQE_SHIFT); + iowrite32(val, dev->nic_info.db + OCRDMA_DB_EQ_OFFSET); +} + +static void ocrdma_init_mch(struct ocrdma_mbx_hdr *cmd_hdr, + u8 opcode, u8 subsys, u32 cmd_len) +{ + cmd_hdr->subsys_op = (opcode | (subsys << OCRDMA_MCH_SUBSYS_SHIFT)); + cmd_hdr->timeout = 20; /* seconds */ + cmd_hdr->cmd_len = cmd_len - sizeof(struct ocrdma_mbx_hdr); +} + +static void *ocrdma_init_emb_mqe(u8 opcode, u32 cmd_len) +{ + struct ocrdma_mqe *mqe; + + mqe = kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL); + if (!mqe) + return NULL; + mqe->hdr.spcl_sge_cnt_emb |= + (OCRDMA_MQE_EMBEDDED << OCRDMA_MQE_HDR_EMB_SHIFT) & + OCRDMA_MQE_HDR_EMB_MASK; + mqe->hdr.pyld_len = cmd_len - sizeof(struct ocrdma_mqe_hdr); + + ocrdma_init_mch(&mqe->u.emb_req.mch, opcode, OCRDMA_SUBSYS_ROCE, + mqe->hdr.pyld_len); + return mqe; +} + +static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q) +{ + dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma); +} + +static int ocrdma_alloc_q(struct ocrdma_dev *dev, + struct ocrdma_queue_info *q, u16 len, u16 entry_size) +{ + memset(q, 0, sizeof(*q)); + q->len = len; + q->entry_size = entry_size; + q->size = len * entry_size; + q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size, + &q->dma, GFP_KERNEL); + if (!q->va) + return -ENOMEM; + memset(q->va, 0, q->size); + return 0; +} + +static void ocrdma_build_q_pages(struct ocrdma_pa *q_pa, int cnt, + dma_addr_t host_pa, int hw_page_size) +{ + int i; + + for (i = 0; i < cnt; i++) { + q_pa[i].lo = (u32) (host_pa & 0xffffffff); + q_pa[i].hi = (u32) upper_32_bits(host_pa); + host_pa += hw_page_size; + } +} + +static int ocrdma_mbx_delete_q(struct ocrdma_dev *dev, + struct ocrdma_queue_info *q, int queue_type) +{ + u8 opcode = 0; + int status; + struct ocrdma_delete_q_req *cmd = dev->mbx_cmd; + + switch (queue_type) { + case QTYPE_MCCQ: + opcode = OCRDMA_CMD_DELETE_MQ; + break; + case QTYPE_CQ: + opcode = OCRDMA_CMD_DELETE_CQ; + break; + case QTYPE_EQ: + opcode = OCRDMA_CMD_DELETE_EQ; + break; + default: + BUG(); + } + memset(cmd, 0, sizeof(*cmd)); + ocrdma_init_mch(&cmd->req, opcode, OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); + cmd->id = q->id; + + status = be_roce_mcc_cmd(dev->nic_info.netdev, + cmd, sizeof(*cmd), NULL, NULL); + if (!status) + q->created = false; + return status; +} + +static int ocrdma_mbx_create_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq) +{ + int status; + struct ocrdma_create_eq_req *cmd = dev->mbx_cmd; + struct ocrdma_create_eq_rsp *rsp = dev->mbx_cmd; + + memset(cmd, 0, sizeof(*cmd)); + ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_EQ, OCRDMA_SUBSYS_COMMON, + sizeof(*cmd)); + + cmd->req.rsvd_version = 2; + cmd->num_pages = 4; + cmd->valid = OCRDMA_CREATE_EQ_VALID; + cmd->cnt = 4 << OCRDMA_CREATE_EQ_CNT_SHIFT; + + ocrdma_build_q_pages(&cmd->pa[0], cmd->num_pages, eq->q.dma, + PAGE_SIZE_4K); + status = be_roce_mcc_cmd(dev->nic_info.netdev, cmd, sizeof(*cmd), NULL, + NULL); + if (!status) { + eq->q.id = rsp->vector_eqid & 0xffff; + eq->vector = (rsp->vector_eqid >> 16) & 0xffff; + eq->q.created = true; + } + return status; +} + +static int ocrdma_create_eq(struct ocrdma_dev *dev, + struct ocrdma_eq *eq, u16 q_len) +{ + int status; + + status = ocrdma_alloc_q(dev, &eq->q, OCRDMA_EQ_LEN, + sizeof(struct ocrdma_eqe)); + if (status) + return status; + + status = ocrdma_mbx_create_eq(dev, eq); + if (status) + goto mbx_err; + eq->dev = dev; + ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0); + + return 0; +mbx_err: + ocrdma_free_q(dev, &eq->q); + return status; +} + +int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq) +{ + int irq; + + if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) + irq = dev->nic_info.pdev->irq; + else + irq = dev->nic_info.msix.vector_list[eq->vector]; + return irq; +} + +static void _ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq) +{ + if (eq->q.created) { + ocrdma_mbx_delete_q(dev, &eq->q, QTYPE_EQ); + ocrdma_free_q(dev, &eq->q); + } +} + +static void ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq) +{ + int irq; + + /* disarm EQ so that interrupts are not generated + * during freeing and EQ delete is in progress. + */ + ocrdma_ring_eq_db(dev, eq->q.id, false, false, 0); + + irq = ocrdma_get_irq(dev, eq); + free_irq(irq, eq); + _ocrdma_destroy_eq(dev, eq); +} + +static void ocrdma_destroy_eqs(struct ocrdma_dev *dev) +{ + int i; + + for (i = 0; i < dev->eq_cnt; i++) + ocrdma_destroy_eq(dev, &dev->eq_tbl[i]); +} + +static int ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev, + struct ocrdma_queue_info *cq, + struct ocrdma_queue_info *eq) +{ + struct ocrdma_create_cq_cmd *cmd = dev->mbx_cmd; + struct ocrdma_create_cq_cmd_rsp *rsp = dev->mbx_cmd; + int status; + + memset(cmd, 0, sizeof(*cmd)); + ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_CQ, + OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); + + cmd->req.rsvd_version = OCRDMA_CREATE_CQ_VER2; + cmd->pgsz_pgcnt = (cq->size / OCRDMA_MIN_Q_PAGE_SIZE) << + OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT; + cmd->pgsz_pgcnt |= PAGES_4K_SPANNED(cq->va, cq->size); + + cmd->ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS; + cmd->eqn = eq->id; + cmd->pdid_cqecnt = cq->size / sizeof(struct ocrdma_mcqe); + + ocrdma_build_q_pages(&cmd->pa[0], cq->size / OCRDMA_MIN_Q_PAGE_SIZE, + cq->dma, PAGE_SIZE_4K); + status = be_roce_mcc_cmd(dev->nic_info.netdev, + cmd, sizeof(*cmd), NULL, NULL); + if (!status) { + cq->id = (u16) (rsp->cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK); + cq->created = true; + } + return status; +} + +static u32 ocrdma_encoded_q_len(int q_len) +{ + u32 len_encoded = fls(q_len); /* log2(len) + 1 */ + + if (len_encoded == 16) + len_encoded = 0; + return len_encoded; +} + +static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev, + struct ocrdma_queue_info *mq, + struct ocrdma_queue_info *cq) +{ + int num_pages, status; + struct ocrdma_create_mq_req *cmd = dev->mbx_cmd; + struct ocrdma_create_mq_rsp *rsp = dev->mbx_cmd; + struct ocrdma_pa *pa; + + memset(cmd, 0, sizeof(*cmd)); + num_pages = PAGES_4K_SPANNED(mq->va, mq->size); + + ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ_EXT, + OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); + cmd->req.rsvd_version = 1; + cmd->cqid_pages = num_pages; + cmd->cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT); + cmd->async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID; + + cmd->async_event_bitmap = BIT(OCRDMA_ASYNC_GRP5_EVE_CODE); + cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_RDMA_EVE_CODE); + + cmd->async_cqid_ringsize = cq->id; + cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) << + OCRDMA_CREATE_MQ_RING_SIZE_SHIFT); + cmd->valid = OCRDMA_CREATE_MQ_VALID; + pa = &cmd->pa[0]; + + ocrdma_build_q_pages(pa, num_pages, mq->dma, PAGE_SIZE_4K); + status = be_roce_mcc_cmd(dev->nic_info.netdev, + cmd, sizeof(*cmd), NULL, NULL); + if (!status) { + mq->id = rsp->id; + mq->created = true; + } + return status; +} + +static int ocrdma_create_mq(struct ocrdma_dev *dev) +{ + int status; + + /* Alloc completion queue for Mailbox queue */ + status = ocrdma_alloc_q(dev, &dev->mq.cq, OCRDMA_MQ_CQ_LEN, + sizeof(struct ocrdma_mcqe)); + if (status) + goto alloc_err; + + dev->eq_tbl[0].cq_cnt++; + status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->eq_tbl[0].q); + if (status) + goto mbx_cq_free; + + memset(&dev->mqe_ctx, 0, sizeof(dev->mqe_ctx)); + init_waitqueue_head(&dev->mqe_ctx.cmd_wait); + mutex_init(&dev->mqe_ctx.lock); + + /* Alloc Mailbox queue */ + status = ocrdma_alloc_q(dev, &dev->mq.sq, OCRDMA_MQ_LEN, + sizeof(struct ocrdma_mqe)); + if (status) + goto mbx_cq_destroy; + status = ocrdma_mbx_create_mq(dev, &dev->mq.sq, &dev->mq.cq); + if (status) + goto mbx_q_free; + ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, 0); + return 0; + +mbx_q_free: + ocrdma_free_q(dev, &dev->mq.sq); +mbx_cq_destroy: + ocrdma_mbx_delete_q(dev, &dev->mq.cq, QTYPE_CQ); +mbx_cq_free: + ocrdma_free_q(dev, &dev->mq.cq); +alloc_err: + return status; +} + +static void ocrdma_destroy_mq(struct ocrdma_dev *dev) +{ + struct ocrdma_queue_info *mbxq, *cq; + + /* mqe_ctx lock synchronizes with any other pending cmds. */ + mutex_lock(&dev->mqe_ctx.lock); + mbxq = &dev->mq.sq; + if (mbxq->created) { + ocrdma_mbx_delete_q(dev, mbxq, QTYPE_MCCQ); + ocrdma_free_q(dev, mbxq); + } + mutex_unlock(&dev->mqe_ctx.lock); + + cq = &dev->mq.cq; + if (cq->created) { + ocrdma_mbx_delete_q(dev, cq, QTYPE_CQ); + ocrdma_free_q(dev, cq); + } +} + +static void ocrdma_process_qpcat_error(struct ocrdma_dev *dev, + struct ocrdma_qp *qp) +{ + enum ib_qp_state new_ib_qps = IB_QPS_ERR; + enum ib_qp_state old_ib_qps; + + if (qp == NULL) + BUG(); + ocrdma_qp_state_change(qp, new_ib_qps, &old_ib_qps); +} + +static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev, + struct ocrdma_ae_mcqe *cqe) +{ + struct ocrdma_qp *qp = NULL; + struct ocrdma_cq *cq = NULL; + struct ib_event ib_evt; + int cq_event = 0; + int qp_event = 1; + int srq_event = 0; + int dev_event = 0; + int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >> + OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT; + + if (cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPVALID) + qp = dev->qp_tbl[cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPID_MASK]; + if (cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQVALID) + cq = dev->cq_tbl[cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQID_MASK]; + + memset(&ib_evt, 0, sizeof(ib_evt)); + + ib_evt.device = &dev->ibdev; + + switch (type) { + case OCRDMA_CQ_ERROR: + ib_evt.element.cq = &cq->ibcq; + ib_evt.event = IB_EVENT_CQ_ERR; + cq_event = 1; + qp_event = 0; + break; + case OCRDMA_CQ_OVERRUN_ERROR: + ib_evt.element.cq = &cq->ibcq; + ib_evt.event = IB_EVENT_CQ_ERR; + cq_event = 1; + qp_event = 0; + break; + case OCRDMA_CQ_QPCAT_ERROR: + ib_evt.element.qp = &qp->ibqp; + ib_evt.event = IB_EVENT_QP_FATAL; + ocrdma_process_qpcat_error(dev, qp); + break; + case OCRDMA_QP_ACCESS_ERROR: + ib_evt.element.qp = &qp->ibqp; + ib_evt.event = IB_EVENT_QP_ACCESS_ERR; + break; + case OCRDMA_QP_COMM_EST_EVENT: + ib_evt.element.qp = &qp->ibqp; + ib_evt.event = IB_EVENT_COMM_EST; + break; + case OCRDMA_SQ_DRAINED_EVENT: + ib_evt.element.qp = &qp->ibqp; + ib_evt.event = IB_EVENT_SQ_DRAINED; + break; + case OCRDMA_DEVICE_FATAL_EVENT: + ib_evt.element.port_num = 1; + ib_evt.event = IB_EVENT_DEVICE_FATAL; + qp_event = 0; + dev_event = 1; + break; + case OCRDMA_SRQCAT_ERROR: + ib_evt.element.srq = &qp->srq->ibsrq; + ib_evt.event = IB_EVENT_SRQ_ERR; + srq_event = 1; + qp_event = 0; + break; + case OCRDMA_SRQ_LIMIT_EVENT: + ib_evt.element.srq = &qp->srq->ibsrq; + ib_evt.event = IB_EVENT_SRQ_LIMIT_REACHED; + srq_event = 1; + qp_event = 0; + break; + case OCRDMA_QP_LAST_WQE_EVENT: + ib_evt.element.qp = &qp->ibqp; + ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED; + break; + default: + cq_event = 0; + qp_event = 0; + srq_event = 0; + dev_event = 0; + pr_err("%s() unknown type=0x%x\n", __func__, type); + break; + } + + if (type < OCRDMA_MAX_ASYNC_ERRORS) + atomic_inc(&dev->async_err_stats[type]); + + if (qp_event) { + if (qp->ibqp.event_handler) + qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context); + } else if (cq_event) { + if (cq->ibcq.event_handler) + cq->ibcq.event_handler(&ib_evt, cq->ibcq.cq_context); + } else if (srq_event) { + if (qp->srq->ibsrq.event_handler) + qp->srq->ibsrq.event_handler(&ib_evt, + qp->srq->ibsrq. + srq_context); + } else if (dev_event) { + pr_err("%s: Fatal event received\n", dev->ibdev.name); + ib_dispatch_event(&ib_evt); + } + +} + +static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev, + struct ocrdma_ae_mcqe *cqe) +{ + struct ocrdma_ae_pvid_mcqe *evt; + int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >> + OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT; + + switch (type) { + case OCRDMA_ASYNC_EVENT_PVID_STATE: + evt = (struct ocrdma_ae_pvid_mcqe *)cqe; + if ((evt->tag_enabled & OCRDMA_AE_PVID_MCQE_ENABLED_MASK) >> + OCRDMA_AE_PVID_MCQE_ENABLED_SHIFT) + dev->pvid = ((evt->tag_enabled & + OCRDMA_AE_PVID_MCQE_TAG_MASK) >> + OCRDMA_AE_PVID_MCQE_TAG_SHIFT); + break; + + case OCRDMA_ASYNC_EVENT_COS_VALUE: + atomic_set(&dev->update_sl, 1); + break; + default: + /* Not interested evts. */ + break; + } +} + +static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe) +{ + /* async CQE processing */ + struct ocrdma_ae_mcqe *cqe = ae_cqe; + u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >> + OCRDMA_AE_MCQE_EVENT_CODE_SHIFT; + + if (evt_code == OCRDMA_ASYNC_RDMA_EVE_CODE) + ocrdma_dispatch_ibevent(dev, cqe); + else if (evt_code == OCRDMA_ASYNC_GRP5_EVE_CODE) + ocrdma_process_grp5_aync(dev, cqe); + else + pr_err("%s(%d) invalid evt code=0x%x\n", __func__, + dev->id, evt_code); +} + +static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe) +{ + if (dev->mqe_ctx.tag == cqe->tag_lo && dev->mqe_ctx.cmd_done == false) { + dev->mqe_ctx.cqe_status = (cqe->status & + OCRDMA_MCQE_STATUS_MASK) >> OCRDMA_MCQE_STATUS_SHIFT; + dev->mqe_ctx.ext_status = + (cqe->status & OCRDMA_MCQE_ESTATUS_MASK) + >> OCRDMA_MCQE_ESTATUS_SHIFT; + dev->mqe_ctx.cmd_done = true; + wake_up(&dev->mqe_ctx.cmd_wait); + } else + pr_err("%s() cqe for invalid tag0x%x.expected=0x%x\n", + __func__, cqe->tag_lo, dev->mqe_ctx.tag); +} + +static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id) +{ + u16 cqe_popped = 0; + struct ocrdma_mcqe *cqe; + + while (1) { + cqe = ocrdma_get_mcqe(dev); + if (cqe == NULL) + break; + ocrdma_le32_to_cpu(cqe, sizeof(*cqe)); + cqe_popped += 1; + if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_AE_MASK) + ocrdma_process_acqe(dev, cqe); + else if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_CMPL_MASK) + ocrdma_process_mcqe(dev, cqe); + memset(cqe, 0, sizeof(struct ocrdma_mcqe)); + ocrdma_mcq_inc_tail(dev); + } + ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, cqe_popped); + return 0; +} + +static struct ocrdma_cq *_ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev, + struct ocrdma_cq *cq, bool sq) +{ + struct ocrdma_qp *qp; + struct list_head *cur; + struct ocrdma_cq *bcq = NULL; + struct list_head *head = sq?(&cq->sq_head):(&cq->rq_head); + + list_for_each(cur, head) { + if (sq) + qp = list_entry(cur, struct ocrdma_qp, sq_entry); + else + qp = list_entry(cur, struct ocrdma_qp, rq_entry); + + if (qp->srq) + continue; + /* if wq and rq share the same cq, than comp_handler + * is already invoked. + */ + if (qp->sq_cq == qp->rq_cq) + continue; + /* if completion came on sq, rq's cq is buddy cq. + * if completion came on rq, sq's cq is buddy cq. + */ + if (qp->sq_cq == cq) + bcq = qp->rq_cq; + else + bcq = qp->sq_cq; + return bcq; + } + return NULL; +} + +static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev, + struct ocrdma_cq *cq) +{ + unsigned long flags; + struct ocrdma_cq *bcq = NULL; + + /* Go through list of QPs in error state which are using this CQ + * and invoke its callback handler to trigger CQE processing for + * error/flushed CQE. It is rare to find more than few entries in + * this list as most consumers stops after getting error CQE. + * List is traversed only once when a matching buddy cq found for a QP. + */ + spin_lock_irqsave(&dev->flush_q_lock, flags); + /* Check if buddy CQ is present. + * true - Check for SQ CQ + * false - Check for RQ CQ + */ + bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, true); + if (bcq == NULL) + bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, false); + spin_unlock_irqrestore(&dev->flush_q_lock, flags); + + /* if there is valid buddy cq, look for its completion handler */ + if (bcq && bcq->ibcq.comp_handler) { + spin_lock_irqsave(&bcq->comp_handler_lock, flags); + (*bcq->ibcq.comp_handler) (&bcq->ibcq, bcq->ibcq.cq_context); + spin_unlock_irqrestore(&bcq->comp_handler_lock, flags); + } +} + +static void ocrdma_qp_cq_handler(struct ocrdma_dev *dev, u16 cq_idx) +{ + unsigned long flags; + struct ocrdma_cq *cq; + + if (cq_idx >= OCRDMA_MAX_CQ) + BUG(); + + cq = dev->cq_tbl[cq_idx]; + if (cq == NULL) + return; + + if (cq->ibcq.comp_handler) { + spin_lock_irqsave(&cq->comp_handler_lock, flags); + (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); + spin_unlock_irqrestore(&cq->comp_handler_lock, flags); + } + ocrdma_qp_buddy_cq_handler(dev, cq); +} + +static void ocrdma_cq_handler(struct ocrdma_dev *dev, u16 cq_id) +{ + /* process the MQ-CQE. */ + if (cq_id == dev->mq.cq.id) + ocrdma_mq_cq_handler(dev, cq_id); + else + ocrdma_qp_cq_handler(dev, cq_id); +} + +static irqreturn_t ocrdma_irq_handler(int irq, void *handle) +{ + struct ocrdma_eq *eq = handle; + struct ocrdma_dev *dev = eq->dev; + struct ocrdma_eqe eqe; + struct ocrdma_eqe *ptr; + u16 cq_id; + u8 mcode; + int budget = eq->cq_cnt; + + do { + ptr = ocrdma_get_eqe(eq); + eqe = *ptr; + ocrdma_le32_to_cpu(&eqe, sizeof(eqe)); + mcode = (eqe.id_valid & OCRDMA_EQE_MAJOR_CODE_MASK) + >> OCRDMA_EQE_MAJOR_CODE_SHIFT; + if (mcode == OCRDMA_MAJOR_CODE_SENTINAL) + pr_err("EQ full on eqid = 0x%x, eqe = 0x%x\n", + eq->q.id, eqe.id_valid); + if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0) + break; + + ptr->id_valid = 0; + /* ring eq doorbell as soon as its consumed. */ + ocrdma_ring_eq_db(dev, eq->q.id, false, true, 1); + /* check whether its CQE or not. */ + if ((eqe.id_valid & OCRDMA_EQE_FOR_CQE_MASK) == 0) { + cq_id = eqe.id_valid >> OCRDMA_EQE_RESOURCE_ID_SHIFT; + ocrdma_cq_handler(dev, cq_id); + } + ocrdma_eq_inc_tail(eq); + + /* There can be a stale EQE after the last bound CQ is + * destroyed. EQE valid and budget == 0 implies this. + */ + if (budget) + budget--; + + } while (budget); + + eq->aic_obj.eq_intr_cnt++; + ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0); + return IRQ_HANDLED; +} + +static void ocrdma_post_mqe(struct ocrdma_dev *dev, struct ocrdma_mqe *cmd) +{ + struct ocrdma_mqe *mqe; + + dev->mqe_ctx.tag = dev->mq.sq.head; + dev->mqe_ctx.cmd_done = false; + mqe = ocrdma_get_mqe(dev); + cmd->hdr.tag_lo = dev->mq.sq.head; + ocrdma_copy_cpu_to_le32(mqe, cmd, sizeof(*mqe)); + /* make sure descriptor is written before ringing doorbell */ + wmb(); + ocrdma_mq_inc_head(dev); + ocrdma_ring_mq_db(dev); +} + +static int ocrdma_wait_mqe_cmpl(struct ocrdma_dev *dev) +{ + long status; + /* 30 sec timeout */ + status = wait_event_timeout(dev->mqe_ctx.cmd_wait, + (dev->mqe_ctx.cmd_done != false), + msecs_to_jiffies(30000)); + if (status) + return 0; + else { + dev->mqe_ctx.fw_error_state = true; + pr_err("%s(%d) mailbox timeout: fw not responding\n", + __func__, dev->id); + return -1; + } +} + +/* issue a mailbox command on the MQ */ +static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe) +{ + int status = 0; + u16 cqe_status, ext_status; + struct ocrdma_mqe *rsp_mqe; + struct ocrdma_mbx_rsp *rsp = NULL; + + mutex_lock(&dev->mqe_ctx.lock); + if (dev->mqe_ctx.fw_error_state) + goto mbx_err; + ocrdma_post_mqe(dev, mqe); + status = ocrdma_wait_mqe_cmpl(dev); + if (status) + goto mbx_err; + cqe_status = dev->mqe_ctx.cqe_status; + ext_status = dev->mqe_ctx.ext_status; + rsp_mqe = ocrdma_get_mqe_rsp(dev); + ocrdma_copy_le32_to_cpu(mqe, rsp_mqe, (sizeof(*mqe))); + if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >> + OCRDMA_MQE_HDR_EMB_SHIFT) + rsp = &mqe->u.rsp; + + if (cqe_status || ext_status) { + pr_err("%s() cqe_status=0x%x, ext_status=0x%x,", + __func__, cqe_status, ext_status); + if (rsp) { + /* This is for embedded cmds. */ + pr_err("opcode=0x%x, subsystem=0x%x\n", + (rsp->subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >> + OCRDMA_MBX_RSP_OPCODE_SHIFT, + (rsp->subsys_op & OCRDMA_MBX_RSP_SUBSYS_MASK) >> + OCRDMA_MBX_RSP_SUBSYS_SHIFT); + } + status = ocrdma_get_mbx_cqe_errno(cqe_status); + goto mbx_err; + } + /* For non embedded, rsp errors are handled in ocrdma_nonemb_mbx_cmd */ + if (rsp && (mqe->u.rsp.status & OCRDMA_MBX_RSP_STATUS_MASK)) + status = ocrdma_get_mbx_errno(mqe->u.rsp.status); +mbx_err: + mutex_unlock(&dev->mqe_ctx.lock); + return status; +} + +static int ocrdma_nonemb_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe, + void *payload_va) +{ + int status = 0; + struct ocrdma_mbx_rsp *rsp = payload_va; + + if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >> + OCRDMA_MQE_HDR_EMB_SHIFT) + BUG(); + + status = ocrdma_mbx_cmd(dev, mqe); + if (!status) + /* For non embedded, only CQE failures are handled in + * ocrdma_mbx_cmd. We need to check for RSP errors. + */ + if (rsp->status & OCRDMA_MBX_RSP_STATUS_MASK) + status = ocrdma_get_mbx_errno(rsp->status); + + if (status) + pr_err("opcode=0x%x, subsystem=0x%x\n", + (rsp->subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >> + OCRDMA_MBX_RSP_OPCODE_SHIFT, + (rsp->subsys_op & OCRDMA_MBX_RSP_SUBSYS_MASK) >> + OCRDMA_MBX_RSP_SUBSYS_SHIFT); + return status; +} + +static void ocrdma_get_attr(struct ocrdma_dev *dev, + struct ocrdma_dev_attr *attr, + struct ocrdma_mbx_query_config *rsp) +{ + attr->max_pd = + (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >> + OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT; + attr->max_dpp_pds = + (rsp->max_dpp_pds_credits & OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK) >> + OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET; + attr->max_qp = + (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >> + OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT; + attr->max_srq = + (rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >> + OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET; + attr->max_send_sge = ((rsp->max_write_send_sge & + OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >> + OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT); + attr->max_recv_sge = (rsp->max_write_send_sge & + OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >> + OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT; + attr->max_srq_sge = (rsp->max_srq_rqe_sge & + OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >> + OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET; + attr->max_rdma_sge = (rsp->max_write_send_sge & + OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK) >> + OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT; + attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp & + OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >> + OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT; + attr->max_ird_per_qp = (rsp->max_ird_ord_per_qp & + OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_MASK) >> + OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT; + attr->cq_overflow_detect = (rsp->qp_srq_cq_ird_ord & + OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_MASK) >> + OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT; + attr->srq_supported = (rsp->qp_srq_cq_ird_ord & + OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_MASK) >> + OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_SHIFT; + attr->local_ca_ack_delay = (rsp->max_pd_ca_ack_delay & + OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK) >> + OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT; + attr->max_mw = rsp->max_mw; + attr->max_mr = rsp->max_mr; + attr->max_mr_size = ((u64)rsp->max_mr_size_hi << 32) | + rsp->max_mr_size_lo; + attr->max_fmr = 0; + attr->max_pages_per_frmr = rsp->max_pages_per_frmr; + attr->max_num_mr_pbl = rsp->max_num_mr_pbl; + attr->max_cqe = rsp->max_cq_cqes_per_cq & + OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_MASK; + attr->max_cq = (rsp->max_cq_cqes_per_cq & + OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK) >> + OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET; + attr->wqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs & + OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_MASK) >> + OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_OFFSET) * + OCRDMA_WQE_STRIDE; + attr->rqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs & + OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_MASK) >> + OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_OFFSET) * + OCRDMA_WQE_STRIDE; + attr->max_inline_data = + attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) + + sizeof(struct ocrdma_sge)); + if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) { + attr->ird = 1; + attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE; + attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES; + } + dev->attr.max_wqe = rsp->max_wqes_rqes_per_q >> + OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET; + dev->attr.max_rqe = rsp->max_wqes_rqes_per_q & + OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK; +} + +static int ocrdma_check_fw_config(struct ocrdma_dev *dev, + struct ocrdma_fw_conf_rsp *conf) +{ + u32 fn_mode; + + fn_mode = conf->fn_mode & OCRDMA_FN_MODE_RDMA; + if (fn_mode != OCRDMA_FN_MODE_RDMA) + return -EINVAL; + dev->base_eqid = conf->base_eqid; + dev->max_eq = conf->max_eq; + return 0; +} + +/* can be issued only during init time. */ +static int ocrdma_mbx_query_fw_ver(struct ocrdma_dev *dev) +{ + int status = -ENOMEM; + struct ocrdma_mqe *cmd; + struct ocrdma_fw_ver_rsp *rsp; + + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_VER, sizeof(*cmd)); + if (!cmd) + return -ENOMEM; + ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0], + OCRDMA_CMD_GET_FW_VER, + OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); + + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + if (status) + goto mbx_err; + rsp = (struct ocrdma_fw_ver_rsp *)cmd; + memset(&dev->attr.fw_ver[0], 0, sizeof(dev->attr.fw_ver)); + memcpy(&dev->attr.fw_ver[0], &rsp->running_ver[0], + sizeof(rsp->running_ver)); + ocrdma_le32_to_cpu(dev->attr.fw_ver, sizeof(rsp->running_ver)); +mbx_err: + kfree(cmd); + return status; +} + +/* can be issued only during init time. */ +static int ocrdma_mbx_query_fw_config(struct ocrdma_dev *dev) +{ + int status = -ENOMEM; + struct ocrdma_mqe *cmd; + struct ocrdma_fw_conf_rsp *rsp; + + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_CONFIG, sizeof(*cmd)); + if (!cmd) + return -ENOMEM; + ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0], + OCRDMA_CMD_GET_FW_CONFIG, + OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + if (status) + goto mbx_err; + rsp = (struct ocrdma_fw_conf_rsp *)cmd; + status = ocrdma_check_fw_config(dev, rsp); +mbx_err: + kfree(cmd); + return status; +} + +int ocrdma_mbx_rdma_stats(struct ocrdma_dev *dev, bool reset) +{ + struct ocrdma_rdma_stats_req *req = dev->stats_mem.va; + struct ocrdma_mqe *mqe = &dev->stats_mem.mqe; + struct ocrdma_rdma_stats_resp *old_stats; + int status; + + old_stats = kmalloc(sizeof(*old_stats), GFP_KERNEL); + if (old_stats == NULL) + return -ENOMEM; + + memset(mqe, 0, sizeof(*mqe)); + mqe->hdr.pyld_len = dev->stats_mem.size; + mqe->hdr.spcl_sge_cnt_emb |= + (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) & + OCRDMA_MQE_HDR_SGE_CNT_MASK; + mqe->u.nonemb_req.sge[0].pa_lo = (u32) (dev->stats_mem.pa & 0xffffffff); + mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dev->stats_mem.pa); + mqe->u.nonemb_req.sge[0].len = dev->stats_mem.size; + + /* Cache the old stats */ + memcpy(old_stats, req, sizeof(struct ocrdma_rdma_stats_resp)); + memset(req, 0, dev->stats_mem.size); + + ocrdma_init_mch((struct ocrdma_mbx_hdr *)req, + OCRDMA_CMD_GET_RDMA_STATS, + OCRDMA_SUBSYS_ROCE, + dev->stats_mem.size); + if (reset) + req->reset_stats = reset; + + status = ocrdma_nonemb_mbx_cmd(dev, mqe, dev->stats_mem.va); + if (status) + /* Copy from cache, if mbox fails */ + memcpy(req, old_stats, sizeof(struct ocrdma_rdma_stats_resp)); + else + ocrdma_le32_to_cpu(req, dev->stats_mem.size); + + kfree(old_stats); + return status; +} + +static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev) +{ + int status = -ENOMEM; + struct ocrdma_dma_mem dma; + struct ocrdma_mqe *mqe; + struct ocrdma_get_ctrl_attribs_rsp *ctrl_attr_rsp; + struct mgmt_hba_attribs *hba_attribs; + + mqe = kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL); + if (!mqe) + return status; + + dma.size = sizeof(struct ocrdma_get_ctrl_attribs_rsp); + dma.va = dma_alloc_coherent(&dev->nic_info.pdev->dev, + dma.size, &dma.pa, GFP_KERNEL); + if (!dma.va) + goto free_mqe; + + mqe->hdr.pyld_len = dma.size; + mqe->hdr.spcl_sge_cnt_emb |= + (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) & + OCRDMA_MQE_HDR_SGE_CNT_MASK; + mqe->u.nonemb_req.sge[0].pa_lo = (u32) (dma.pa & 0xffffffff); + mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dma.pa); + mqe->u.nonemb_req.sge[0].len = dma.size; + + memset(dma.va, 0, dma.size); + ocrdma_init_mch((struct ocrdma_mbx_hdr *)dma.va, + OCRDMA_CMD_GET_CTRL_ATTRIBUTES, + OCRDMA_SUBSYS_COMMON, + dma.size); + + status = ocrdma_nonemb_mbx_cmd(dev, mqe, dma.va); + if (!status) { + ctrl_attr_rsp = (struct ocrdma_get_ctrl_attribs_rsp *)dma.va; + hba_attribs = &ctrl_attr_rsp->ctrl_attribs.hba_attribs; + + dev->hba_port_num = (hba_attribs->ptpnum_maxdoms_hbast_cv & + OCRDMA_HBA_ATTRB_PTNUM_MASK) + >> OCRDMA_HBA_ATTRB_PTNUM_SHIFT; + strncpy(dev->model_number, + hba_attribs->controller_model_number, 31); + } + dma_free_coherent(&dev->nic_info.pdev->dev, dma.size, dma.va, dma.pa); +free_mqe: + kfree(mqe); + return status; +} + +static int ocrdma_mbx_query_dev(struct ocrdma_dev *dev) +{ + int status = -ENOMEM; + struct ocrdma_mbx_query_config *rsp; + struct ocrdma_mqe *cmd; + + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_CONFIG, sizeof(*cmd)); + if (!cmd) + return status; + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + if (status) + goto mbx_err; + rsp = (struct ocrdma_mbx_query_config *)cmd; + ocrdma_get_attr(dev, &dev->attr, rsp); +mbx_err: + kfree(cmd); + return status; +} + +int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed) +{ + int status = -ENOMEM; + struct ocrdma_get_link_speed_rsp *rsp; + struct ocrdma_mqe *cmd; + + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_NTWK_LINK_CONFIG_V1, + sizeof(*cmd)); + if (!cmd) + return status; + ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0], + OCRDMA_CMD_QUERY_NTWK_LINK_CONFIG_V1, + OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); + + ((struct ocrdma_mbx_hdr *)cmd->u.cmd)->rsvd_version = 0x1; + + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + if (status) + goto mbx_err; + + rsp = (struct ocrdma_get_link_speed_rsp *)cmd; + *lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK) + >> OCRDMA_PHY_PS_SHIFT; + +mbx_err: + kfree(cmd); + return status; +} + +static int ocrdma_mbx_get_phy_info(struct ocrdma_dev *dev) +{ + int status = -ENOMEM; + struct ocrdma_mqe *cmd; + struct ocrdma_get_phy_info_rsp *rsp; + + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_PHY_DETAILS, sizeof(*cmd)); + if (!cmd) + return status; + + ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0], + OCRDMA_CMD_PHY_DETAILS, OCRDMA_SUBSYS_COMMON, + sizeof(*cmd)); + + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + if (status) + goto mbx_err; + + rsp = (struct ocrdma_get_phy_info_rsp *)cmd; + dev->phy.phy_type = + (rsp->ityp_ptyp & OCRDMA_PHY_TYPE_MASK); + dev->phy.interface_type = + (rsp->ityp_ptyp & OCRDMA_IF_TYPE_MASK) + >> OCRDMA_IF_TYPE_SHIFT; + dev->phy.auto_speeds_supported = + (rsp->fspeed_aspeed & OCRDMA_ASPEED_SUPP_MASK); + dev->phy.fixed_speeds_supported = + (rsp->fspeed_aspeed & OCRDMA_FSPEED_SUPP_MASK) + >> OCRDMA_FSPEED_SUPP_SHIFT; +mbx_err: + kfree(cmd); + return status; +} + +int ocrdma_mbx_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd) +{ + int status = -ENOMEM; + struct ocrdma_alloc_pd *cmd; + struct ocrdma_alloc_pd_rsp *rsp; + + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD, sizeof(*cmd)); + if (!cmd) + return status; + if (pd->dpp_enabled) + cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP; + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + if (status) + goto mbx_err; + rsp = (struct ocrdma_alloc_pd_rsp *)cmd; + pd->id = rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_PDID_MASK; + if (rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) { + pd->dpp_enabled = true; + pd->dpp_page = rsp->dpp_page_pdid >> + OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT; + } else { + pd->dpp_enabled = false; + pd->num_dpp_qp = 0; + } +mbx_err: + kfree(cmd); + return status; +} + +int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd) +{ + int status = -ENOMEM; + struct ocrdma_dealloc_pd *cmd; + + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD, sizeof(*cmd)); + if (!cmd) + return status; + cmd->id = pd->id; + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + kfree(cmd); + return status; +} + + +static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev) +{ + int status = -ENOMEM; + size_t pd_bitmap_size; + struct ocrdma_alloc_pd_range *cmd; + struct ocrdma_alloc_pd_range_rsp *rsp; + + /* Pre allocate the DPP PDs */ + if (dev->attr.max_dpp_pds) { + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, + sizeof(*cmd)); + if (!cmd) + return -ENOMEM; + cmd->pd_count = dev->attr.max_dpp_pds; + cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP; + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd; + + if (!status && (rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) && + rsp->pd_count) { + dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >> + OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT; + dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid & + OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK; + dev->pd_mgr->max_dpp_pd = rsp->pd_count; + pd_bitmap_size = + BITS_TO_LONGS(rsp->pd_count) * sizeof(long); + dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size, + GFP_KERNEL); + } + kfree(cmd); + } + + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd)); + if (!cmd) + return -ENOMEM; + + cmd->pd_count = dev->attr.max_pd - dev->attr.max_dpp_pds; + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd; + if (!status && rsp->pd_count) { + dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid & + OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK; + dev->pd_mgr->max_normal_pd = rsp->pd_count; + pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long); + dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size, + GFP_KERNEL); + } + kfree(cmd); + + if (dev->pd_mgr->pd_norm_bitmap || dev->pd_mgr->pd_dpp_bitmap) { + /* Enable PD resource manager */ + dev->pd_mgr->pd_prealloc_valid = true; + return 0; + } + return status; +} + +static void ocrdma_mbx_dealloc_pd_range(struct ocrdma_dev *dev) +{ + struct ocrdma_dealloc_pd_range *cmd; + + /* return normal PDs to firmware */ + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE, sizeof(*cmd)); + if (!cmd) + goto mbx_err; + + if (dev->pd_mgr->max_normal_pd) { + cmd->start_pd_id = dev->pd_mgr->pd_norm_start; + cmd->pd_count = dev->pd_mgr->max_normal_pd; + ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + } + + if (dev->pd_mgr->max_dpp_pd) { + kfree(cmd); + /* return DPP PDs to firmware */ + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE, + sizeof(*cmd)); + if (!cmd) + goto mbx_err; + + cmd->start_pd_id = dev->pd_mgr->pd_dpp_start; + cmd->pd_count = dev->pd_mgr->max_dpp_pd; + ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + } +mbx_err: + kfree(cmd); +} + +void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev) +{ + int status; + + dev->pd_mgr = kzalloc(sizeof(struct ocrdma_pd_resource_mgr), + GFP_KERNEL); + if (!dev->pd_mgr) { + pr_err("%s(%d)Memory allocation failure.\n", __func__, dev->id); + return; + } + status = ocrdma_mbx_alloc_pd_range(dev); + if (status) { + pr_err("%s(%d) Unable to initialize PD pool, using default.\n", + __func__, dev->id); + } +} + +static void ocrdma_free_pd_pool(struct ocrdma_dev *dev) +{ + ocrdma_mbx_dealloc_pd_range(dev); + kfree(dev->pd_mgr->pd_norm_bitmap); + kfree(dev->pd_mgr->pd_dpp_bitmap); + kfree(dev->pd_mgr); +} + +static int ocrdma_build_q_conf(u32 *num_entries, int entry_size, + int *num_pages, int *page_size) +{ + int i; + int mem_size; + + *num_entries = roundup_pow_of_two(*num_entries); + mem_size = *num_entries * entry_size; + /* find the possible lowest possible multiplier */ + for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) { + if (mem_size <= (OCRDMA_Q_PAGE_BASE_SIZE << i)) + break; + } + if (i >= OCRDMA_MAX_Q_PAGE_SIZE_CNT) + return -EINVAL; + mem_size = roundup(mem_size, + ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES)); + *num_pages = + mem_size / ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES); + *page_size = ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES); + *num_entries = mem_size / entry_size; + return 0; +} + +static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev) +{ + int i; + int status = 0; + int max_ah; + struct ocrdma_create_ah_tbl *cmd; + struct ocrdma_create_ah_tbl_rsp *rsp; + struct pci_dev *pdev = dev->nic_info.pdev; + dma_addr_t pa; + struct ocrdma_pbe *pbes; + + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_AH_TBL, sizeof(*cmd)); + if (!cmd) + return status; + + max_ah = OCRDMA_MAX_AH; + dev->av_tbl.size = sizeof(struct ocrdma_av) * max_ah; + + /* number of PBEs in PBL */ + cmd->ah_conf = (OCRDMA_AH_TBL_PAGES << + OCRDMA_CREATE_AH_NUM_PAGES_SHIFT) & + OCRDMA_CREATE_AH_NUM_PAGES_MASK; + + /* page size */ + for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) { + if (PAGE_SIZE == (OCRDMA_MIN_Q_PAGE_SIZE << i)) + break; + } + cmd->ah_conf |= (i << OCRDMA_CREATE_AH_PAGE_SIZE_SHIFT) & + OCRDMA_CREATE_AH_PAGE_SIZE_MASK; + + /* ah_entry size */ + cmd->ah_conf |= (sizeof(struct ocrdma_av) << + OCRDMA_CREATE_AH_ENTRY_SIZE_SHIFT) & + OCRDMA_CREATE_AH_ENTRY_SIZE_MASK; + + dev->av_tbl.pbl.va = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, + &dev->av_tbl.pbl.pa, + GFP_KERNEL); + if (dev->av_tbl.pbl.va == NULL) + goto mem_err; + + dev->av_tbl.va = dma_alloc_coherent(&pdev->dev, dev->av_tbl.size, + &pa, GFP_KERNEL); + if (dev->av_tbl.va == NULL) + goto mem_err_ah; + dev->av_tbl.pa = pa; + dev->av_tbl.num_ah = max_ah; + memset(dev->av_tbl.va, 0, dev->av_tbl.size); + + pbes = (struct ocrdma_pbe *)dev->av_tbl.pbl.va; + for (i = 0; i < dev->av_tbl.size / OCRDMA_MIN_Q_PAGE_SIZE; i++) { + pbes[i].pa_lo = (u32)cpu_to_le32(pa & 0xffffffff); + pbes[i].pa_hi = (u32)cpu_to_le32(upper_32_bits(pa)); + pa += PAGE_SIZE; + } + cmd->tbl_addr[0].lo = (u32)(dev->av_tbl.pbl.pa & 0xFFFFFFFF); + cmd->tbl_addr[0].hi = (u32)upper_32_bits(dev->av_tbl.pbl.pa); + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + if (status) + goto mbx_err; + rsp = (struct ocrdma_create_ah_tbl_rsp *)cmd; + dev->av_tbl.ahid = rsp->ahid & 0xFFFF; + kfree(cmd); + return 0; + +mbx_err: + dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va, + dev->av_tbl.pa); + dev->av_tbl.va = NULL; +mem_err_ah: + dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va, + dev->av_tbl.pbl.pa); + dev->av_tbl.pbl.va = NULL; + dev->av_tbl.size = 0; +mem_err: + kfree(cmd); + return status; +} + +static void ocrdma_mbx_delete_ah_tbl(struct ocrdma_dev *dev) +{ + struct ocrdma_delete_ah_tbl *cmd; + struct pci_dev *pdev = dev->nic_info.pdev; + + if (dev->av_tbl.va == NULL) + return; + + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_AH_TBL, sizeof(*cmd)); + if (!cmd) + return; + cmd->ahid = dev->av_tbl.ahid; + + ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va, + dev->av_tbl.pa); + dev->av_tbl.va = NULL; + dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va, + dev->av_tbl.pbl.pa); + kfree(cmd); +} + +/* Multiple CQs uses the EQ. This routine returns least used + * EQ to associate with CQ. This will distributes the interrupt + * processing and CPU load to associated EQ, vector and so to that CPU. + */ +static u16 ocrdma_bind_eq(struct ocrdma_dev *dev) +{ + int i, selected_eq = 0, cq_cnt = 0; + u16 eq_id; + + mutex_lock(&dev->dev_lock); + cq_cnt = dev->eq_tbl[0].cq_cnt; + eq_id = dev->eq_tbl[0].q.id; + /* find the EQ which is has the least number of + * CQs associated with it. + */ + for (i = 0; i < dev->eq_cnt; i++) { + if (dev->eq_tbl[i].cq_cnt < cq_cnt) { + cq_cnt = dev->eq_tbl[i].cq_cnt; + eq_id = dev->eq_tbl[i].q.id; + selected_eq = i; + } + } + dev->eq_tbl[selected_eq].cq_cnt += 1; + mutex_unlock(&dev->dev_lock); + return eq_id; +} + +static void ocrdma_unbind_eq(struct ocrdma_dev *dev, u16 eq_id) +{ + int i; + + mutex_lock(&dev->dev_lock); + i = ocrdma_get_eq_table_index(dev, eq_id); + if (i == -EINVAL) + BUG(); + dev->eq_tbl[i].cq_cnt -= 1; + mutex_unlock(&dev->dev_lock); +} + +int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq, + int entries, int dpp_cq, u16 pd_id) +{ + int status = -ENOMEM; int max_hw_cqe; + struct pci_dev *pdev = dev->nic_info.pdev; + struct ocrdma_create_cq *cmd; + struct ocrdma_create_cq_rsp *rsp; + u32 hw_pages, cqe_size, page_size, cqe_count; + + if (entries > dev->attr.max_cqe) { + pr_err("%s(%d) max_cqe=0x%x, requester_cqe=0x%x\n", + __func__, dev->id, dev->attr.max_cqe, entries); + return -EINVAL; + } + if (dpp_cq && (ocrdma_get_asic_type(dev) != OCRDMA_ASIC_GEN_SKH_R)) + return -EINVAL; + + if (dpp_cq) { + cq->max_hw_cqe = 1; + max_hw_cqe = 1; + cqe_size = OCRDMA_DPP_CQE_SIZE; + hw_pages = 1; + } else { + cq->max_hw_cqe = dev->attr.max_cqe; + max_hw_cqe = dev->attr.max_cqe; + cqe_size = sizeof(struct ocrdma_cqe); + hw_pages = OCRDMA_CREATE_CQ_MAX_PAGES; + } + + cq->len = roundup(max_hw_cqe * cqe_size, OCRDMA_MIN_Q_PAGE_SIZE); + + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_CQ, sizeof(*cmd)); + if (!cmd) + return -ENOMEM; + ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ, + OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); + cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL); + if (!cq->va) { + status = -ENOMEM; + goto mem_err; + } + memset(cq->va, 0, cq->len); + page_size = cq->len / hw_pages; + cmd->cmd.pgsz_pgcnt = (page_size / OCRDMA_MIN_Q_PAGE_SIZE) << + OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT; + cmd->cmd.pgsz_pgcnt |= hw_pages; + cmd->cmd.ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS; + + cq->eqn = ocrdma_bind_eq(dev); + cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER3; + cqe_count = cq->len / cqe_size; + cq->cqe_cnt = cqe_count; + if (cqe_count > 1024) { + /* Set cnt to 3 to indicate more than 1024 cq entries */ + cmd->cmd.ev_cnt_flags |= (0x3 << OCRDMA_CREATE_CQ_CNT_SHIFT); + } else { + u8 count = 0; + switch (cqe_count) { + case 256: + count = 0; + break; + case 512: + count = 1; + break; + case 1024: + count = 2; + break; + default: + goto mbx_err; + } + cmd->cmd.ev_cnt_flags |= (count << OCRDMA_CREATE_CQ_CNT_SHIFT); + } + /* shared eq between all the consumer cqs. */ + cmd->cmd.eqn = cq->eqn; + if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) { + if (dpp_cq) + cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP << + OCRDMA_CREATE_CQ_TYPE_SHIFT; + cq->phase_change = false; + cmd->cmd.pdid_cqecnt = (cq->len / cqe_size); + } else { + cmd->cmd.pdid_cqecnt = (cq->len / cqe_size) - 1; + cmd->cmd.ev_cnt_flags |= OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID; + cq->phase_change = true; + } + + /* pd_id valid only for v3 */ + cmd->cmd.pdid_cqecnt |= (pd_id << + OCRDMA_CREATE_CQ_CMD_PDID_SHIFT); + ocrdma_build_q_pages(&cmd->cmd.pa[0], hw_pages, cq->pa, page_size); + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + if (status) + goto mbx_err; + + rsp = (struct ocrdma_create_cq_rsp *)cmd; + cq->id = (u16) (rsp->rsp.cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK); + kfree(cmd); + return 0; +mbx_err: + ocrdma_unbind_eq(dev, cq->eqn); + dma_free_coherent(&pdev->dev, cq->len, cq->va, cq->pa); +mem_err: + kfree(cmd); + return status; +} + +int ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq) +{ + int status = -ENOMEM; + struct ocrdma_destroy_cq *cmd; + + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_CQ, sizeof(*cmd)); + if (!cmd) + return status; + ocrdma_init_mch(&cmd->req, OCRDMA_CMD_DELETE_CQ, + OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); + + cmd->bypass_flush_qid |= + (cq->id << OCRDMA_DESTROY_CQ_QID_SHIFT) & + OCRDMA_DESTROY_CQ_QID_MASK; + + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + ocrdma_unbind_eq(dev, cq->eqn); + dma_free_coherent(&dev->nic_info.pdev->dev, cq->len, cq->va, cq->pa); + kfree(cmd); + return status; +} + +int ocrdma_mbx_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr, + u32 pdid, int addr_check) +{ + int status = -ENOMEM; + struct ocrdma_alloc_lkey *cmd; + struct ocrdma_alloc_lkey_rsp *rsp; + + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_LKEY, sizeof(*cmd)); + if (!cmd) + return status; + cmd->pdid = pdid; + cmd->pbl_sz_flags |= addr_check; + cmd->pbl_sz_flags |= (hwmr->fr_mr << OCRDMA_ALLOC_LKEY_FMR_SHIFT); + cmd->pbl_sz_flags |= + (hwmr->remote_wr << OCRDMA_ALLOC_LKEY_REMOTE_WR_SHIFT); + cmd->pbl_sz_flags |= + (hwmr->remote_rd << OCRDMA_ALLOC_LKEY_REMOTE_RD_SHIFT); + cmd->pbl_sz_flags |= + (hwmr->local_wr << OCRDMA_ALLOC_LKEY_LOCAL_WR_SHIFT); + cmd->pbl_sz_flags |= + (hwmr->remote_atomic << OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_SHIFT); + cmd->pbl_sz_flags |= + (hwmr->num_pbls << OCRDMA_ALLOC_LKEY_PBL_SIZE_SHIFT); + + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + if (status) + goto mbx_err; + rsp = (struct ocrdma_alloc_lkey_rsp *)cmd; + hwmr->lkey = rsp->lrkey; +mbx_err: + kfree(cmd); + return status; +} + +int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *dev, int fr_mr, u32 lkey) +{ + int status = -ENOMEM; + struct ocrdma_dealloc_lkey *cmd; + + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_LKEY, sizeof(*cmd)); + if (!cmd) + return -ENOMEM; + cmd->lkey = lkey; + cmd->rsvd_frmr = fr_mr ? 1 : 0; + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + if (status) + goto mbx_err; +mbx_err: + kfree(cmd); + return status; +} + +static int ocrdma_mbx_reg_mr(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr, + u32 pdid, u32 pbl_cnt, u32 pbe_size, u32 last) +{ + int status = -ENOMEM; + int i; + struct ocrdma_reg_nsmr *cmd; + struct ocrdma_reg_nsmr_rsp *rsp; + + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR, sizeof(*cmd)); + if (!cmd) + return -ENOMEM; + cmd->num_pbl_pdid = + pdid | (hwmr->num_pbls << OCRDMA_REG_NSMR_NUM_PBL_SHIFT); + cmd->fr_mr = hwmr->fr_mr; + + cmd->flags_hpage_pbe_sz |= (hwmr->remote_wr << + OCRDMA_REG_NSMR_REMOTE_WR_SHIFT); + cmd->flags_hpage_pbe_sz |= (hwmr->remote_rd << + OCRDMA_REG_NSMR_REMOTE_RD_SHIFT); + cmd->flags_hpage_pbe_sz |= (hwmr->local_wr << + OCRDMA_REG_NSMR_LOCAL_WR_SHIFT); + cmd->flags_hpage_pbe_sz |= (hwmr->remote_atomic << + OCRDMA_REG_NSMR_REMOTE_ATOMIC_SHIFT); + cmd->flags_hpage_pbe_sz |= (hwmr->mw_bind << + OCRDMA_REG_NSMR_BIND_MEMWIN_SHIFT); + cmd->flags_hpage_pbe_sz |= (last << OCRDMA_REG_NSMR_LAST_SHIFT); + + cmd->flags_hpage_pbe_sz |= (hwmr->pbe_size / OCRDMA_MIN_HPAGE_SIZE); + cmd->flags_hpage_pbe_sz |= (hwmr->pbl_size / OCRDMA_MIN_HPAGE_SIZE) << + OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT; + cmd->totlen_low = hwmr->len; + cmd->totlen_high = upper_32_bits(hwmr->len); + cmd->fbo_low = (u32) (hwmr->fbo & 0xffffffff); + cmd->fbo_high = (u32) upper_32_bits(hwmr->fbo); + cmd->va_loaddr = (u32) hwmr->va; + cmd->va_hiaddr = (u32) upper_32_bits(hwmr->va); + + for (i = 0; i < pbl_cnt; i++) { + cmd->pbl[i].lo = (u32) (hwmr->pbl_table[i].pa & 0xffffffff); + cmd->pbl[i].hi = upper_32_bits(hwmr->pbl_table[i].pa); + } + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + if (status) + goto mbx_err; + rsp = (struct ocrdma_reg_nsmr_rsp *)cmd; + hwmr->lkey = rsp->lrkey; +mbx_err: + kfree(cmd); + return status; +} + +static int ocrdma_mbx_reg_mr_cont(struct ocrdma_dev *dev, + struct ocrdma_hw_mr *hwmr, u32 pbl_cnt, + u32 pbl_offset, u32 last) +{ + int status = -ENOMEM; + int i; + struct ocrdma_reg_nsmr_cont *cmd; + + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR_CONT, sizeof(*cmd)); + if (!cmd) + return -ENOMEM; + cmd->lrkey = hwmr->lkey; + cmd->num_pbl_offset = (pbl_cnt << OCRDMA_REG_NSMR_CONT_NUM_PBL_SHIFT) | + (pbl_offset & OCRDMA_REG_NSMR_CONT_PBL_SHIFT_MASK); + cmd->last = last << OCRDMA_REG_NSMR_CONT_LAST_SHIFT; + + for (i = 0; i < pbl_cnt; i++) { + cmd->pbl[i].lo = + (u32) (hwmr->pbl_table[i + pbl_offset].pa & 0xffffffff); + cmd->pbl[i].hi = + upper_32_bits(hwmr->pbl_table[i + pbl_offset].pa); + } + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + if (status) + goto mbx_err; +mbx_err: + kfree(cmd); + return status; +} + +int ocrdma_reg_mr(struct ocrdma_dev *dev, + struct ocrdma_hw_mr *hwmr, u32 pdid, int acc) +{ + int status; + u32 last = 0; + u32 cur_pbl_cnt, pbl_offset; + u32 pending_pbl_cnt = hwmr->num_pbls; + + pbl_offset = 0; + cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL); + if (cur_pbl_cnt == pending_pbl_cnt) + last = 1; + + status = ocrdma_mbx_reg_mr(dev, hwmr, pdid, + cur_pbl_cnt, hwmr->pbe_size, last); + if (status) { + pr_err("%s() status=%d\n", __func__, status); + return status; + } + /* if there is no more pbls to register then exit. */ + if (last) + return 0; + + while (!last) { + pbl_offset += cur_pbl_cnt; + pending_pbl_cnt -= cur_pbl_cnt; + cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL); + /* if we reach the end of the pbls, then need to set the last + * bit, indicating no more pbls to register for this memory key. + */ + if (cur_pbl_cnt == pending_pbl_cnt) + last = 1; + + status = ocrdma_mbx_reg_mr_cont(dev, hwmr, cur_pbl_cnt, + pbl_offset, last); + if (status) + break; + } + if (status) + pr_err("%s() err. status=%d\n", __func__, status); + + return status; +} + +bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp) +{ + struct ocrdma_qp *tmp; + bool found = false; + list_for_each_entry(tmp, &cq->sq_head, sq_entry) { + if (qp == tmp) { + found = true; + break; + } + } + return found; +} + +bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp) +{ + struct ocrdma_qp *tmp; + bool found = false; + list_for_each_entry(tmp, &cq->rq_head, rq_entry) { + if (qp == tmp) { + found = true; + break; + } + } + return found; +} + +void ocrdma_flush_qp(struct ocrdma_qp *qp) +{ + bool found; + unsigned long flags; + struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); + + spin_lock_irqsave(&dev->flush_q_lock, flags); + found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp); + if (!found) + list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head); + if (!qp->srq) { + found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp); + if (!found) + list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head); + } + spin_unlock_irqrestore(&dev->flush_q_lock, flags); +} + +static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp) +{ + qp->sq.head = 0; + qp->sq.tail = 0; + qp->rq.head = 0; + qp->rq.tail = 0; +} + +int ocrdma_qp_state_change(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state, + enum ib_qp_state *old_ib_state) +{ + unsigned long flags; + int status = 0; + enum ocrdma_qp_state new_state; + new_state = get_ocrdma_qp_state(new_ib_state); + + /* sync with wqe and rqe posting */ + spin_lock_irqsave(&qp->q_lock, flags); + + if (old_ib_state) + *old_ib_state = get_ibqp_state(qp->state); + if (new_state == qp->state) { + spin_unlock_irqrestore(&qp->q_lock, flags); + return 1; + } + + + if (new_state == OCRDMA_QPS_INIT) { + ocrdma_init_hwq_ptr(qp); + ocrdma_del_flush_qp(qp); + } else if (new_state == OCRDMA_QPS_ERR) { + ocrdma_flush_qp(qp); + } + + qp->state = new_state; + + spin_unlock_irqrestore(&qp->q_lock, flags); + return status; +} + +static u32 ocrdma_set_create_qp_mbx_access_flags(struct ocrdma_qp *qp) +{ + u32 flags = 0; + if (qp->cap_flags & OCRDMA_QP_INB_RD) + flags |= OCRDMA_CREATE_QP_REQ_INB_RDEN_MASK; + if (qp->cap_flags & OCRDMA_QP_INB_WR) + flags |= OCRDMA_CREATE_QP_REQ_INB_WREN_MASK; + if (qp->cap_flags & OCRDMA_QP_MW_BIND) + flags |= OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_MASK; + if (qp->cap_flags & OCRDMA_QP_LKEY0) + flags |= OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_MASK; + if (qp->cap_flags & OCRDMA_QP_FAST_REG) + flags |= OCRDMA_CREATE_QP_REQ_FMR_EN_MASK; + return flags; +} + +static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd, + struct ib_qp_init_attr *attrs, + struct ocrdma_qp *qp) +{ + int status; + u32 len, hw_pages, hw_page_size; + dma_addr_t pa; + struct ocrdma_pd *pd = qp->pd; + struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); + struct pci_dev *pdev = dev->nic_info.pdev; + u32 max_wqe_allocated; + u32 max_sges = attrs->cap.max_send_sge; + + /* QP1 may exceed 127 */ + max_wqe_allocated = min_t(u32, attrs->cap.max_send_wr + 1, + dev->attr.max_wqe); + + status = ocrdma_build_q_conf(&max_wqe_allocated, + dev->attr.wqe_size, &hw_pages, &hw_page_size); + if (status) { + pr_err("%s() req. max_send_wr=0x%x\n", __func__, + max_wqe_allocated); + return -EINVAL; + } + qp->sq.max_cnt = max_wqe_allocated; + len = (hw_pages * hw_page_size); + + qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); + if (!qp->sq.va) + return -EINVAL; + memset(qp->sq.va, 0, len); + qp->sq.len = len; + qp->sq.pa = pa; + qp->sq.entry_size = dev->attr.wqe_size; + ocrdma_build_q_pages(&cmd->wq_addr[0], hw_pages, pa, hw_page_size); + + cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE) + << OCRDMA_CREATE_QP_REQ_SQ_PAGE_SIZE_SHIFT); + cmd->num_wq_rq_pages |= (hw_pages << + OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_SHIFT) & + OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_MASK; + cmd->max_sge_send_write |= (max_sges << + OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_SHIFT) & + OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_MASK; + cmd->max_sge_send_write |= (max_sges << + OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_SHIFT) & + OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_MASK; + cmd->max_wqe_rqe |= (ilog2(qp->sq.max_cnt) << + OCRDMA_CREATE_QP_REQ_MAX_WQE_SHIFT) & + OCRDMA_CREATE_QP_REQ_MAX_WQE_MASK; + cmd->wqe_rqe_size |= (dev->attr.wqe_size << + OCRDMA_CREATE_QP_REQ_WQE_SIZE_SHIFT) & + OCRDMA_CREATE_QP_REQ_WQE_SIZE_MASK; + return 0; +} + +static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd, + struct ib_qp_init_attr *attrs, + struct ocrdma_qp *qp) +{ + int status; + u32 len, hw_pages, hw_page_size; + dma_addr_t pa = 0; + struct ocrdma_pd *pd = qp->pd; + struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); + struct pci_dev *pdev = dev->nic_info.pdev; + u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1; + + status = ocrdma_build_q_conf(&max_rqe_allocated, dev->attr.rqe_size, + &hw_pages, &hw_page_size); + if (status) { + pr_err("%s() req. max_recv_wr=0x%x\n", __func__, + attrs->cap.max_recv_wr + 1); + return status; + } + qp->rq.max_cnt = max_rqe_allocated; + len = (hw_pages * hw_page_size); + + qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); + if (!qp->rq.va) + return -ENOMEM; + memset(qp->rq.va, 0, len); + qp->rq.pa = pa; + qp->rq.len = len; + qp->rq.entry_size = dev->attr.rqe_size; + + ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size); + cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE) << + OCRDMA_CREATE_QP_REQ_RQ_PAGE_SIZE_SHIFT); + cmd->num_wq_rq_pages |= + (hw_pages << OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_SHIFT) & + OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_MASK; + cmd->max_sge_recv_flags |= (attrs->cap.max_recv_sge << + OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT) & + OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_MASK; + cmd->max_wqe_rqe |= (ilog2(qp->rq.max_cnt) << + OCRDMA_CREATE_QP_REQ_MAX_RQE_SHIFT) & + OCRDMA_CREATE_QP_REQ_MAX_RQE_MASK; + cmd->wqe_rqe_size |= (dev->attr.rqe_size << + OCRDMA_CREATE_QP_REQ_RQE_SIZE_SHIFT) & + OCRDMA_CREATE_QP_REQ_RQE_SIZE_MASK; + return 0; +} + +static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd, + struct ocrdma_pd *pd, + struct ocrdma_qp *qp, + u8 enable_dpp_cq, u16 dpp_cq_id) +{ + pd->num_dpp_qp--; + qp->dpp_enabled = true; + cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK; + if (!enable_dpp_cq) + return; + cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK; + cmd->dpp_credits_cqid = dpp_cq_id; + cmd->dpp_credits_cqid |= OCRDMA_CREATE_QP_REQ_DPP_CREDIT_LIMIT << + OCRDMA_CREATE_QP_REQ_DPP_CREDIT_SHIFT; +} + +static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd, + struct ocrdma_qp *qp) +{ + struct ocrdma_pd *pd = qp->pd; + struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); + struct pci_dev *pdev = dev->nic_info.pdev; + dma_addr_t pa = 0; + int ird_page_size = dev->attr.ird_page_size; + int ird_q_len = dev->attr.num_ird_pages * ird_page_size; + struct ocrdma_hdr_wqe *rqe; + int i = 0; + + if (dev->attr.ird == 0) + return 0; + + qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len, + &pa, GFP_KERNEL); + if (!qp->ird_q_va) + return -ENOMEM; + memset(qp->ird_q_va, 0, ird_q_len); + ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages, + pa, ird_page_size); + for (; i < ird_q_len / dev->attr.rqe_size; i++) { + rqe = (struct ocrdma_hdr_wqe *)(qp->ird_q_va + + (i * dev->attr.rqe_size)); + rqe->cw = 0; + rqe->cw |= 2; + rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); + rqe->cw |= (8 << OCRDMA_WQE_SIZE_SHIFT); + rqe->cw |= (8 << OCRDMA_WQE_NXT_WQE_SIZE_SHIFT); + } + return 0; +} + +static void ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp *rsp, + struct ocrdma_qp *qp, + struct ib_qp_init_attr *attrs, + u16 *dpp_offset, u16 *dpp_credit_lmt) +{ + u32 max_wqe_allocated, max_rqe_allocated; + qp->id = rsp->qp_id & OCRDMA_CREATE_QP_RSP_QP_ID_MASK; + qp->rq.dbid = rsp->sq_rq_id & OCRDMA_CREATE_QP_RSP_RQ_ID_MASK; + qp->sq.dbid = rsp->sq_rq_id >> OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT; + qp->max_ird = rsp->max_ord_ird & OCRDMA_CREATE_QP_RSP_MAX_IRD_MASK; + qp->max_ord = (rsp->max_ord_ird >> OCRDMA_CREATE_QP_RSP_MAX_ORD_SHIFT); + qp->dpp_enabled = false; + if (rsp->dpp_response & OCRDMA_CREATE_QP_RSP_DPP_ENABLED_MASK) { + qp->dpp_enabled = true; + *dpp_credit_lmt = (rsp->dpp_response & + OCRDMA_CREATE_QP_RSP_DPP_CREDITS_MASK) >> + OCRDMA_CREATE_QP_RSP_DPP_CREDITS_SHIFT; + *dpp_offset = (rsp->dpp_response & + OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_MASK) >> + OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT; + } + max_wqe_allocated = + rsp->max_wqe_rqe >> OCRDMA_CREATE_QP_RSP_MAX_WQE_SHIFT; + max_wqe_allocated = 1 << max_wqe_allocated; + max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe); + + qp->sq.max_cnt = max_wqe_allocated; + qp->sq.max_wqe_idx = max_wqe_allocated - 1; + + if (!attrs->srq) { + qp->rq.max_cnt = max_rqe_allocated; + qp->rq.max_wqe_idx = max_rqe_allocated - 1; + } +} + +int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs, + u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset, + u16 *dpp_credit_lmt) +{ + int status = -ENOMEM; + u32 flags = 0; + struct ocrdma_pd *pd = qp->pd; + struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); + struct pci_dev *pdev = dev->nic_info.pdev; + struct ocrdma_cq *cq; + struct ocrdma_create_qp_req *cmd; + struct ocrdma_create_qp_rsp *rsp; + int qptype; + + switch (attrs->qp_type) { + case IB_QPT_GSI: + qptype = OCRDMA_QPT_GSI; + break; + case IB_QPT_RC: + qptype = OCRDMA_QPT_RC; + break; + case IB_QPT_UD: + qptype = OCRDMA_QPT_UD; + break; + default: + return -EINVAL; + } + + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd)); + if (!cmd) + return status; + cmd->type_pgsz_pdn |= (qptype << OCRDMA_CREATE_QP_REQ_QPT_SHIFT) & + OCRDMA_CREATE_QP_REQ_QPT_MASK; + status = ocrdma_set_create_qp_sq_cmd(cmd, attrs, qp); + if (status) + goto sq_err; + + if (attrs->srq) { + struct ocrdma_srq *srq = get_ocrdma_srq(attrs->srq); + cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_USE_SRQ_MASK; + cmd->rq_addr[0].lo = srq->id; + qp->srq = srq; + } else { + status = ocrdma_set_create_qp_rq_cmd(cmd, attrs, qp); + if (status) + goto rq_err; + } + + status = ocrdma_set_create_qp_ird_cmd(cmd, qp); + if (status) + goto mbx_err; + + cmd->type_pgsz_pdn |= (pd->id << OCRDMA_CREATE_QP_REQ_PD_ID_SHIFT) & + OCRDMA_CREATE_QP_REQ_PD_ID_MASK; + + flags = ocrdma_set_create_qp_mbx_access_flags(qp); + + cmd->max_sge_recv_flags |= flags; + cmd->max_ord_ird |= (dev->attr.max_ord_per_qp << + OCRDMA_CREATE_QP_REQ_MAX_ORD_SHIFT) & + OCRDMA_CREATE_QP_REQ_MAX_ORD_MASK; + cmd->max_ord_ird |= (dev->attr.max_ird_per_qp << + OCRDMA_CREATE_QP_REQ_MAX_IRD_SHIFT) & + OCRDMA_CREATE_QP_REQ_MAX_IRD_MASK; + cq = get_ocrdma_cq(attrs->send_cq); + cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_WQ_CQID_SHIFT) & + OCRDMA_CREATE_QP_REQ_WQ_CQID_MASK; + qp->sq_cq = cq; + cq = get_ocrdma_cq(attrs->recv_cq); + cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_RQ_CQID_SHIFT) & + OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK; + qp->rq_cq = cq; + + if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp && + (attrs->cap.max_inline_data <= dev->attr.max_inline_data)) { + ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq, + dpp_cq_id); + } + + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + if (status) + goto mbx_err; + rsp = (struct ocrdma_create_qp_rsp *)cmd; + ocrdma_get_create_qp_rsp(rsp, qp, attrs, dpp_offset, dpp_credit_lmt); + qp->state = OCRDMA_QPS_RST; + kfree(cmd); + return 0; +mbx_err: + if (qp->rq.va) + dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa); +rq_err: + pr_err("%s(%d) rq_err\n", __func__, dev->id); + dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa); +sq_err: + pr_err("%s(%d) sq_err\n", __func__, dev->id); + kfree(cmd); + return status; +} + +int ocrdma_mbx_query_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp, + struct ocrdma_qp_params *param) +{ + int status = -ENOMEM; + struct ocrdma_query_qp *cmd; + struct ocrdma_query_qp_rsp *rsp; + + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP, sizeof(*rsp)); + if (!cmd) + return status; + cmd->qp_id = qp->id; + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + if (status) + goto mbx_err; + rsp = (struct ocrdma_query_qp_rsp *)cmd; + memcpy(param, &rsp->params, sizeof(struct ocrdma_qp_params)); +mbx_err: + kfree(cmd); + return status; +} + +static int ocrdma_set_av_params(struct ocrdma_qp *qp, + struct ocrdma_modify_qp *cmd, + struct ib_qp_attr *attrs, + int attr_mask) +{ + int status; + struct ib_ah_attr *ah_attr = &attrs->ah_attr; + union ib_gid sgid, zgid; + u32 vlan_id = 0xFFFF; + u8 mac_addr[6]; + struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); + + if ((ah_attr->ah_flags & IB_AH_GRH) == 0) + return -EINVAL; + if (atomic_cmpxchg(&dev->update_sl, 1, 0)) + ocrdma_init_service_level(dev); + cmd->params.tclass_sq_psn |= + (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT); + cmd->params.rnt_rc_sl_fl |= + (ah_attr->grh.flow_label & OCRDMA_QP_PARAMS_FLOW_LABEL_MASK); + cmd->params.rnt_rc_sl_fl |= (ah_attr->sl << OCRDMA_QP_PARAMS_SL_SHIFT); + cmd->params.hop_lmt_rq_psn |= + (ah_attr->grh.hop_limit << OCRDMA_QP_PARAMS_HOP_LMT_SHIFT); + cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID; + memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0], + sizeof(cmd->params.dgid)); + status = ocrdma_query_gid(&dev->ibdev, 1, + ah_attr->grh.sgid_index, &sgid); + if (status) + return status; + + memset(&zgid, 0, sizeof(zgid)); + if (!memcmp(&sgid, &zgid, sizeof(zgid))) + return -EINVAL; + + qp->sgid_idx = ah_attr->grh.sgid_index; + memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid)); + status = ocrdma_resolve_dmac(dev, ah_attr, &mac_addr[0]); + if (status) + return status; + cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) | + (mac_addr[2] << 16) | (mac_addr[3] << 24); + /* convert them to LE format. */ + ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid)); + ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid)); + cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8); + if (attr_mask & IB_QP_VID) { + vlan_id = attrs->vlan_id; + } else if (dev->pfc_state) { + vlan_id = 0; + pr_err("ocrdma%d:Using VLAN with PFC is recommended\n", + dev->id); + pr_err("ocrdma%d:Using VLAN 0 for this connection\n", + dev->id); + } + + if (vlan_id < 0x1000) { + cmd->params.vlan_dmac_b4_to_b5 |= + vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT; + cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID; + cmd->params.rnt_rc_sl_fl |= + (dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT; + } + + return 0; +} + +static int ocrdma_set_qp_params(struct ocrdma_qp *qp, + struct ocrdma_modify_qp *cmd, + struct ib_qp_attr *attrs, int attr_mask) +{ + int status = 0; + struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); + + if (attr_mask & IB_QP_PKEY_INDEX) { + cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index & + OCRDMA_QP_PARAMS_PKEY_INDEX_MASK); + cmd->flags |= OCRDMA_QP_PARA_PKEY_VALID; + } + if (attr_mask & IB_QP_QKEY) { + qp->qkey = attrs->qkey; + cmd->params.qkey = attrs->qkey; + cmd->flags |= OCRDMA_QP_PARA_QKEY_VALID; + } + if (attr_mask & IB_QP_AV) { + status = ocrdma_set_av_params(qp, cmd, attrs, attr_mask); + if (status) + return status; + } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) { + /* set the default mac address for UD, GSI QPs */ + cmd->params.dmac_b0_to_b3 = dev->nic_info.mac_addr[0] | + (dev->nic_info.mac_addr[1] << 8) | + (dev->nic_info.mac_addr[2] << 16) | + (dev->nic_info.mac_addr[3] << 24); + cmd->params.vlan_dmac_b4_to_b5 = dev->nic_info.mac_addr[4] | + (dev->nic_info.mac_addr[5] << 8); + } + if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) && + attrs->en_sqd_async_notify) { + cmd->params.max_sge_recv_flags |= + OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC; + cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID; + } + if (attr_mask & IB_QP_DEST_QPN) { + cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->dest_qp_num & + OCRDMA_QP_PARAMS_DEST_QPN_MASK); + cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID; + } + if (attr_mask & IB_QP_PATH_MTU) { + if (attrs->path_mtu < IB_MTU_512 || + attrs->path_mtu > IB_MTU_4096) { + pr_err("ocrdma%d: IB MTU %d is not supported\n", + dev->id, ib_mtu_enum_to_int(attrs->path_mtu)); + status = -EINVAL; + goto pmtu_err; + } + cmd->params.path_mtu_pkey_indx |= + (ib_mtu_enum_to_int(attrs->path_mtu) << + OCRDMA_QP_PARAMS_PATH_MTU_SHIFT) & + OCRDMA_QP_PARAMS_PATH_MTU_MASK; + cmd->flags |= OCRDMA_QP_PARA_PMTU_VALID; + } + if (attr_mask & IB_QP_TIMEOUT) { + cmd->params.ack_to_rnr_rtc_dest_qpn |= attrs->timeout << + OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT; + cmd->flags |= OCRDMA_QP_PARA_ACK_TO_VALID; + } + if (attr_mask & IB_QP_RETRY_CNT) { + cmd->params.rnt_rc_sl_fl |= (attrs->retry_cnt << + OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT) & + OCRDMA_QP_PARAMS_RETRY_CNT_MASK; + cmd->flags |= OCRDMA_QP_PARA_RETRY_CNT_VALID; + } + if (attr_mask & IB_QP_MIN_RNR_TIMER) { + cmd->params.rnt_rc_sl_fl |= (attrs->min_rnr_timer << + OCRDMA_QP_PARAMS_RNR_NAK_TIMER_SHIFT) & + OCRDMA_QP_PARAMS_RNR_NAK_TIMER_MASK; + cmd->flags |= OCRDMA_QP_PARA_RNT_VALID; + } + if (attr_mask & IB_QP_RNR_RETRY) { + cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->rnr_retry << + OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT) + & OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK; + cmd->flags |= OCRDMA_QP_PARA_RRC_VALID; + } + if (attr_mask & IB_QP_SQ_PSN) { + cmd->params.tclass_sq_psn |= (attrs->sq_psn & 0x00ffffff); + cmd->flags |= OCRDMA_QP_PARA_SQPSN_VALID; + } + if (attr_mask & IB_QP_RQ_PSN) { + cmd->params.hop_lmt_rq_psn |= (attrs->rq_psn & 0x00ffffff); + cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID; + } + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { + if (attrs->max_rd_atomic > dev->attr.max_ord_per_qp) { + status = -EINVAL; + goto pmtu_err; + } + qp->max_ord = attrs->max_rd_atomic; + cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID; + } + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { + if (attrs->max_dest_rd_atomic > dev->attr.max_ird_per_qp) { + status = -EINVAL; + goto pmtu_err; + } + qp->max_ird = attrs->max_dest_rd_atomic; + cmd->flags |= OCRDMA_QP_PARA_MAX_IRD_VALID; + } + cmd->params.max_ord_ird = (qp->max_ord << + OCRDMA_QP_PARAMS_MAX_ORD_SHIFT) | + (qp->max_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK); +pmtu_err: + return status; +} + +int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp, + struct ib_qp_attr *attrs, int attr_mask) +{ + int status = -ENOMEM; + struct ocrdma_modify_qp *cmd; + + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_QP, sizeof(*cmd)); + if (!cmd) + return status; + + cmd->params.id = qp->id; + cmd->flags = 0; + if (attr_mask & IB_QP_STATE) { + cmd->params.max_sge_recv_flags |= + (get_ocrdma_qp_state(attrs->qp_state) << + OCRDMA_QP_PARAMS_STATE_SHIFT) & + OCRDMA_QP_PARAMS_STATE_MASK; + cmd->flags |= OCRDMA_QP_PARA_QPS_VALID; + } else { + cmd->params.max_sge_recv_flags |= + (qp->state << OCRDMA_QP_PARAMS_STATE_SHIFT) & + OCRDMA_QP_PARAMS_STATE_MASK; + } + + status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask); + if (status) + goto mbx_err; + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + if (status) + goto mbx_err; + +mbx_err: + kfree(cmd); + return status; +} + +int ocrdma_mbx_destroy_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp) +{ + int status = -ENOMEM; + struct ocrdma_destroy_qp *cmd; + struct pci_dev *pdev = dev->nic_info.pdev; + + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_QP, sizeof(*cmd)); + if (!cmd) + return status; + cmd->qp_id = qp->id; + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + if (status) + goto mbx_err; + +mbx_err: + kfree(cmd); + if (qp->sq.va) + dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa); + if (!qp->srq && qp->rq.va) + dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa); + if (qp->dpp_enabled) + qp->pd->num_dpp_qp++; + return status; +} + +int ocrdma_mbx_create_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq, + struct ib_srq_init_attr *srq_attr, + struct ocrdma_pd *pd) +{ + int status = -ENOMEM; + int hw_pages, hw_page_size; + int len; + struct ocrdma_create_srq_rsp *rsp; + struct ocrdma_create_srq *cmd; + dma_addr_t pa; + struct pci_dev *pdev = dev->nic_info.pdev; + u32 max_rqe_allocated; + + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd)); + if (!cmd) + return status; + + cmd->pgsz_pdid = pd->id & OCRDMA_CREATE_SRQ_PD_ID_MASK; + max_rqe_allocated = srq_attr->attr.max_wr + 1; + status = ocrdma_build_q_conf(&max_rqe_allocated, + dev->attr.rqe_size, + &hw_pages, &hw_page_size); + if (status) { + pr_err("%s() req. max_wr=0x%x\n", __func__, + srq_attr->attr.max_wr); + status = -EINVAL; + goto ret; + } + len = hw_pages * hw_page_size; + srq->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); + if (!srq->rq.va) { + status = -ENOMEM; + goto ret; + } + ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size); + + srq->rq.entry_size = dev->attr.rqe_size; + srq->rq.pa = pa; + srq->rq.len = len; + srq->rq.max_cnt = max_rqe_allocated; + + cmd->max_sge_rqe = ilog2(max_rqe_allocated); + cmd->max_sge_rqe |= srq_attr->attr.max_sge << + OCRDMA_CREATE_SRQ_MAX_SGE_RECV_SHIFT; + + cmd->pgsz_pdid |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE) + << OCRDMA_CREATE_SRQ_PG_SZ_SHIFT); + cmd->pages_rqe_sz |= (dev->attr.rqe_size + << OCRDMA_CREATE_SRQ_RQE_SIZE_SHIFT) + & OCRDMA_CREATE_SRQ_RQE_SIZE_MASK; + cmd->pages_rqe_sz |= hw_pages << OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_SHIFT; + + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + if (status) + goto mbx_err; + rsp = (struct ocrdma_create_srq_rsp *)cmd; + srq->id = rsp->id; + srq->rq.dbid = rsp->id; + max_rqe_allocated = ((rsp->max_sge_rqe_allocated & + OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_MASK) >> + OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_SHIFT); + max_rqe_allocated = (1 << max_rqe_allocated); + srq->rq.max_cnt = max_rqe_allocated; + srq->rq.max_wqe_idx = max_rqe_allocated - 1; + srq->rq.max_sges = (rsp->max_sge_rqe_allocated & + OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_MASK) >> + OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_SHIFT; + goto ret; +mbx_err: + dma_free_coherent(&pdev->dev, srq->rq.len, srq->rq.va, pa); +ret: + kfree(cmd); + return status; +} + +int ocrdma_mbx_modify_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr) +{ + int status = -ENOMEM; + struct ocrdma_modify_srq *cmd; + struct ocrdma_pd *pd = srq->pd; + struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); + + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_SRQ, sizeof(*cmd)); + if (!cmd) + return status; + cmd->id = srq->id; + cmd->limit_max_rqe |= srq_attr->srq_limit << + OCRDMA_MODIFY_SRQ_LIMIT_SHIFT; + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + kfree(cmd); + return status; +} + +int ocrdma_mbx_query_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr) +{ + int status = -ENOMEM; + struct ocrdma_query_srq *cmd; + struct ocrdma_dev *dev = get_ocrdma_dev(srq->ibsrq.device); + + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_SRQ, sizeof(*cmd)); + if (!cmd) + return status; + cmd->id = srq->rq.dbid; + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + if (status == 0) { + struct ocrdma_query_srq_rsp *rsp = + (struct ocrdma_query_srq_rsp *)cmd; + srq_attr->max_sge = + rsp->srq_lmt_max_sge & + OCRDMA_QUERY_SRQ_RSP_MAX_SGE_RECV_MASK; + srq_attr->max_wr = + rsp->max_rqe_pdid >> OCRDMA_QUERY_SRQ_RSP_MAX_RQE_SHIFT; + srq_attr->srq_limit = rsp->srq_lmt_max_sge >> + OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_SHIFT; + } + kfree(cmd); + return status; +} + +int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq) +{ + int status = -ENOMEM; + struct ocrdma_destroy_srq *cmd; + struct pci_dev *pdev = dev->nic_info.pdev; + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_SRQ, sizeof(*cmd)); + if (!cmd) + return status; + cmd->id = srq->id; + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + if (srq->rq.va) + dma_free_coherent(&pdev->dev, srq->rq.len, + srq->rq.va, srq->rq.pa); + kfree(cmd); + return status; +} + +static int ocrdma_mbx_get_dcbx_config(struct ocrdma_dev *dev, u32 ptype, + struct ocrdma_dcbx_cfg *dcbxcfg) +{ + int status = 0; + dma_addr_t pa; + struct ocrdma_mqe cmd; + + struct ocrdma_get_dcbx_cfg_req *req = NULL; + struct ocrdma_get_dcbx_cfg_rsp *rsp = NULL; + struct pci_dev *pdev = dev->nic_info.pdev; + struct ocrdma_mqe_sge *mqe_sge = cmd.u.nonemb_req.sge; + + memset(&cmd, 0, sizeof(struct ocrdma_mqe)); + cmd.hdr.pyld_len = max_t (u32, sizeof(struct ocrdma_get_dcbx_cfg_rsp), + sizeof(struct ocrdma_get_dcbx_cfg_req)); + req = dma_alloc_coherent(&pdev->dev, cmd.hdr.pyld_len, &pa, GFP_KERNEL); + if (!req) { + status = -ENOMEM; + goto mem_err; + } + + cmd.hdr.spcl_sge_cnt_emb |= (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) & + OCRDMA_MQE_HDR_SGE_CNT_MASK; + mqe_sge->pa_lo = (u32) (pa & 0xFFFFFFFFUL); + mqe_sge->pa_hi = (u32) upper_32_bits(pa); + mqe_sge->len = cmd.hdr.pyld_len; + + memset(req, 0, sizeof(struct ocrdma_get_dcbx_cfg_req)); + ocrdma_init_mch(&req->hdr, OCRDMA_CMD_GET_DCBX_CONFIG, + OCRDMA_SUBSYS_DCBX, cmd.hdr.pyld_len); + req->param_type = ptype; + + status = ocrdma_mbx_cmd(dev, &cmd); + if (status) + goto mbx_err; + + rsp = (struct ocrdma_get_dcbx_cfg_rsp *)req; + ocrdma_le32_to_cpu(rsp, sizeof(struct ocrdma_get_dcbx_cfg_rsp)); + memcpy(dcbxcfg, &rsp->cfg, sizeof(struct ocrdma_dcbx_cfg)); + +mbx_err: + dma_free_coherent(&pdev->dev, cmd.hdr.pyld_len, req, pa); +mem_err: + return status; +} + +#define OCRDMA_MAX_SERVICE_LEVEL_INDEX 0x08 +#define OCRDMA_DEFAULT_SERVICE_LEVEL 0x05 + +static int ocrdma_parse_dcbxcfg_rsp(struct ocrdma_dev *dev, int ptype, + struct ocrdma_dcbx_cfg *dcbxcfg, + u8 *srvc_lvl) +{ + int status = -EINVAL, indx, slindx; + int ventry_cnt; + struct ocrdma_app_parameter *app_param; + u8 valid, proto_sel; + u8 app_prio, pfc_prio; + u16 proto; + + if (!(dcbxcfg->tcv_aev_opv_st & OCRDMA_DCBX_STATE_MASK)) { + pr_info("%s ocrdma%d DCBX is disabled\n", + dev_name(&dev->nic_info.pdev->dev), dev->id); + goto out; + } + + if (!ocrdma_is_enabled_and_synced(dcbxcfg->pfc_state)) { + pr_info("%s ocrdma%d priority flow control(%s) is %s%s\n", + dev_name(&dev->nic_info.pdev->dev), dev->id, + (ptype > 0 ? "operational" : "admin"), + (dcbxcfg->pfc_state & OCRDMA_STATE_FLAG_ENABLED) ? + "enabled" : "disabled", + (dcbxcfg->pfc_state & OCRDMA_STATE_FLAG_SYNC) ? + "" : ", not sync'ed"); + goto out; + } else { + pr_info("%s ocrdma%d priority flow control is enabled and sync'ed\n", + dev_name(&dev->nic_info.pdev->dev), dev->id); + } + + ventry_cnt = (dcbxcfg->tcv_aev_opv_st >> + OCRDMA_DCBX_APP_ENTRY_SHIFT) + & OCRDMA_DCBX_STATE_MASK; + + for (indx = 0; indx < ventry_cnt; indx++) { + app_param = &dcbxcfg->app_param[indx]; + valid = (app_param->valid_proto_app >> + OCRDMA_APP_PARAM_VALID_SHIFT) + & OCRDMA_APP_PARAM_VALID_MASK; + proto_sel = (app_param->valid_proto_app + >> OCRDMA_APP_PARAM_PROTO_SEL_SHIFT) + & OCRDMA_APP_PARAM_PROTO_SEL_MASK; + proto = app_param->valid_proto_app & + OCRDMA_APP_PARAM_APP_PROTO_MASK; + + if ( + valid && proto == OCRDMA_APP_PROTO_ROCE && + proto_sel == OCRDMA_PROTO_SELECT_L2) { + for (slindx = 0; slindx < + OCRDMA_MAX_SERVICE_LEVEL_INDEX; slindx++) { + app_prio = ocrdma_get_app_prio( + (u8 *)app_param->app_prio, + slindx); + pfc_prio = ocrdma_get_pfc_prio( + (u8 *)dcbxcfg->pfc_prio, + slindx); + + if (app_prio && pfc_prio) { + *srvc_lvl = slindx; + status = 0; + goto out; + } + } + if (slindx == OCRDMA_MAX_SERVICE_LEVEL_INDEX) { + pr_info("%s ocrdma%d application priority not set for 0x%x protocol\n", + dev_name(&dev->nic_info.pdev->dev), + dev->id, proto); + } + } + } + +out: + return status; +} + +void ocrdma_init_service_level(struct ocrdma_dev *dev) +{ + int status = 0, indx; + struct ocrdma_dcbx_cfg dcbxcfg; + u8 srvc_lvl = OCRDMA_DEFAULT_SERVICE_LEVEL; + int ptype = OCRDMA_PARAMETER_TYPE_OPER; + + for (indx = 0; indx < 2; indx++) { + status = ocrdma_mbx_get_dcbx_config(dev, ptype, &dcbxcfg); + if (status) { + pr_err("%s(): status=%d\n", __func__, status); + ptype = OCRDMA_PARAMETER_TYPE_ADMIN; + continue; + } + + status = ocrdma_parse_dcbxcfg_rsp(dev, ptype, + &dcbxcfg, &srvc_lvl); + if (status) { + ptype = OCRDMA_PARAMETER_TYPE_ADMIN; + continue; + } + + break; + } + + if (status) + pr_info("%s ocrdma%d service level default\n", + dev_name(&dev->nic_info.pdev->dev), dev->id); + else + pr_info("%s ocrdma%d service level %d\n", + dev_name(&dev->nic_info.pdev->dev), dev->id, + srvc_lvl); + + dev->pfc_state = ocrdma_is_enabled_and_synced(dcbxcfg.pfc_state); + dev->sl = srvc_lvl; +} + +int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah) +{ + int i; + int status = -EINVAL; + struct ocrdma_av *av; + unsigned long flags; + + av = dev->av_tbl.va; + spin_lock_irqsave(&dev->av_tbl.lock, flags); + for (i = 0; i < dev->av_tbl.num_ah; i++) { + if (av->valid == 0) { + av->valid = OCRDMA_AV_VALID; + ah->av = av; + ah->id = i; + status = 0; + break; + } + av++; + } + if (i == dev->av_tbl.num_ah) + status = -EAGAIN; + spin_unlock_irqrestore(&dev->av_tbl.lock, flags); + return status; +} + +int ocrdma_free_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah) +{ + unsigned long flags; + spin_lock_irqsave(&dev->av_tbl.lock, flags); + ah->av->valid = 0; + spin_unlock_irqrestore(&dev->av_tbl.lock, flags); + return 0; +} + +static int ocrdma_create_eqs(struct ocrdma_dev *dev) +{ + int num_eq, i, status = 0; + int irq; + unsigned long flags = 0; + + num_eq = dev->nic_info.msix.num_vectors - + dev->nic_info.msix.start_vector; + if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) { + num_eq = 1; + flags = IRQF_SHARED; + } else { + num_eq = min_t(u32, num_eq, num_online_cpus()); + } + + if (!num_eq) + return -EINVAL; + + dev->eq_tbl = kzalloc(sizeof(struct ocrdma_eq) * num_eq, GFP_KERNEL); + if (!dev->eq_tbl) + return -ENOMEM; + + for (i = 0; i < num_eq; i++) { + status = ocrdma_create_eq(dev, &dev->eq_tbl[i], + OCRDMA_EQ_LEN); + if (status) { + status = -EINVAL; + break; + } + sprintf(dev->eq_tbl[i].irq_name, "ocrdma%d-%d", + dev->id, i); + irq = ocrdma_get_irq(dev, &dev->eq_tbl[i]); + status = request_irq(irq, ocrdma_irq_handler, flags, + dev->eq_tbl[i].irq_name, + &dev->eq_tbl[i]); + if (status) + goto done; + dev->eq_cnt += 1; + } + /* one eq is sufficient for data path to work */ + return 0; +done: + ocrdma_destroy_eqs(dev); + return status; +} + +static int ocrdma_mbx_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq, + int num) +{ + int i, status = -ENOMEM; + struct ocrdma_modify_eqd_req *cmd; + + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_EQ_DELAY, sizeof(*cmd)); + if (!cmd) + return status; + + ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_MODIFY_EQ_DELAY, + OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); + + cmd->cmd.num_eq = num; + for (i = 0; i < num; i++) { + cmd->cmd.set_eqd[i].eq_id = eq[i].q.id; + cmd->cmd.set_eqd[i].phase = 0; + cmd->cmd.set_eqd[i].delay_multiplier = + (eq[i].aic_obj.prev_eqd * 65)/100; + } + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + if (status) + goto mbx_err; +mbx_err: + kfree(cmd); + return status; +} + +static int ocrdma_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq, + int num) +{ + int num_eqs, i = 0; + if (num > 8) { + while (num) { + num_eqs = min(num, 8); + ocrdma_mbx_modify_eqd(dev, &eq[i], num_eqs); + i += num_eqs; + num -= num_eqs; + } + } else { + ocrdma_mbx_modify_eqd(dev, eq, num); + } + return 0; +} + +void ocrdma_eqd_set_task(struct work_struct *work) +{ + struct ocrdma_dev *dev = + container_of(work, struct ocrdma_dev, eqd_work.work); + struct ocrdma_eq *eq = 0; + int i, num = 0, status = -EINVAL; + u64 eq_intr; + + for (i = 0; i < dev->eq_cnt; i++) { + eq = &dev->eq_tbl[i]; + if (eq->aic_obj.eq_intr_cnt > eq->aic_obj.prev_eq_intr_cnt) { + eq_intr = eq->aic_obj.eq_intr_cnt - + eq->aic_obj.prev_eq_intr_cnt; + if ((eq_intr > EQ_INTR_PER_SEC_THRSH_HI) && + (eq->aic_obj.prev_eqd == EQ_AIC_MIN_EQD)) { + eq->aic_obj.prev_eqd = EQ_AIC_MAX_EQD; + num++; + } else if ((eq_intr < EQ_INTR_PER_SEC_THRSH_LOW) && + (eq->aic_obj.prev_eqd == EQ_AIC_MAX_EQD)) { + eq->aic_obj.prev_eqd = EQ_AIC_MIN_EQD; + num++; + } + } + eq->aic_obj.prev_eq_intr_cnt = eq->aic_obj.eq_intr_cnt; + } + + if (num) + status = ocrdma_modify_eqd(dev, &dev->eq_tbl[0], num); + schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000)); +} + +int ocrdma_init_hw(struct ocrdma_dev *dev) +{ + int status; + + /* create the eqs */ + status = ocrdma_create_eqs(dev); + if (status) + goto qpeq_err; + status = ocrdma_create_mq(dev); + if (status) + goto mq_err; + status = ocrdma_mbx_query_fw_config(dev); + if (status) + goto conf_err; + status = ocrdma_mbx_query_dev(dev); + if (status) + goto conf_err; + status = ocrdma_mbx_query_fw_ver(dev); + if (status) + goto conf_err; + status = ocrdma_mbx_create_ah_tbl(dev); + if (status) + goto conf_err; + status = ocrdma_mbx_get_phy_info(dev); + if (status) + goto info_attrb_err; + status = ocrdma_mbx_get_ctrl_attribs(dev); + if (status) + goto info_attrb_err; + + return 0; + +info_attrb_err: + ocrdma_mbx_delete_ah_tbl(dev); +conf_err: + ocrdma_destroy_mq(dev); +mq_err: + ocrdma_destroy_eqs(dev); +qpeq_err: + pr_err("%s() status=%d\n", __func__, status); + return status; +} + +void ocrdma_cleanup_hw(struct ocrdma_dev *dev) +{ + ocrdma_free_pd_pool(dev); + ocrdma_mbx_delete_ah_tbl(dev); + + /* cleanup the control path */ + ocrdma_destroy_mq(dev); + + /* cleanup the eqs */ + ocrdma_destroy_eqs(dev); +} diff --git a/kernel/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/kernel/drivers/infiniband/hw/ocrdma/ocrdma_hw.h new file mode 100644 index 000000000..e905972fc --- /dev/null +++ b/kernel/drivers/infiniband/hw/ocrdma/ocrdma_hw.h @@ -0,0 +1,142 @@ +/******************************************************************* + * This file is part of the Emulex RoCE Device Driver for * + * RoCE (RDMA over Converged Ethernet) CNA Adapters. * + * Copyright (C) 2008-2012 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + * + * Contact Information: + * linux-drivers@emulex.com + * + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 + *******************************************************************/ + +#ifndef __OCRDMA_HW_H__ +#define __OCRDMA_HW_H__ + +#include "ocrdma_sli.h" + +static inline void ocrdma_cpu_to_le32(void *dst, u32 len) +{ +#ifdef __BIG_ENDIAN + int i = 0; + u32 *src_ptr = dst; + u32 *dst_ptr = dst; + for (; i < (len / 4); i++) + *(dst_ptr + i) = cpu_to_le32p(src_ptr + i); +#endif +} + +static inline void ocrdma_le32_to_cpu(void *dst, u32 len) +{ +#ifdef __BIG_ENDIAN + int i = 0; + u32 *src_ptr = dst; + u32 *dst_ptr = dst; + for (; i < (len / sizeof(u32)); i++) + *(dst_ptr + i) = le32_to_cpu(*(src_ptr + i)); +#endif +} + +static inline void ocrdma_copy_cpu_to_le32(void *dst, void *src, u32 len) +{ +#ifdef __BIG_ENDIAN + int i = 0; + u32 *src_ptr = src; + u32 *dst_ptr = dst; + for (; i < (len / sizeof(u32)); i++) + *(dst_ptr + i) = cpu_to_le32p(src_ptr + i); +#else + memcpy(dst, src, len); +#endif +} + +static inline void ocrdma_copy_le32_to_cpu(void *dst, void *src, u32 len) +{ +#ifdef __BIG_ENDIAN + int i = 0; + u32 *src_ptr = src; + u32 *dst_ptr = dst; + for (; i < len / sizeof(u32); i++) + *(dst_ptr + i) = le32_to_cpu(*(src_ptr + i)); +#else + memcpy(dst, src, len); +#endif +} + +static inline u64 ocrdma_get_db_addr(struct ocrdma_dev *dev, u32 pdid) +{ + return dev->nic_info.unmapped_db + (pdid * dev->nic_info.db_page_size); +} + +int ocrdma_init_hw(struct ocrdma_dev *); +void ocrdma_cleanup_hw(struct ocrdma_dev *); + +enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps); +void ocrdma_ring_cq_db(struct ocrdma_dev *, u16 cq_id, bool armed, + bool solicited, u16 cqe_popped); + +/* verbs specific mailbox commands */ +int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed); +int ocrdma_query_config(struct ocrdma_dev *, + struct ocrdma_mbx_query_config *config); + +int ocrdma_mbx_alloc_pd(struct ocrdma_dev *, struct ocrdma_pd *); +int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *, struct ocrdma_pd *); + +int ocrdma_mbx_alloc_lkey(struct ocrdma_dev *, struct ocrdma_hw_mr *hwmr, + u32 pd_id, int addr_check); +int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *, int fmr, u32 lkey); + +int ocrdma_reg_mr(struct ocrdma_dev *, struct ocrdma_hw_mr *hwmr, + u32 pd_id, int acc); +int ocrdma_mbx_create_cq(struct ocrdma_dev *, struct ocrdma_cq *, + int entries, int dpp_cq, u16 pd_id); +int ocrdma_mbx_destroy_cq(struct ocrdma_dev *, struct ocrdma_cq *); + +int ocrdma_mbx_create_qp(struct ocrdma_qp *, struct ib_qp_init_attr *attrs, + u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset, + u16 *dpp_credit_lmt); +int ocrdma_mbx_modify_qp(struct ocrdma_dev *, struct ocrdma_qp *, + struct ib_qp_attr *attrs, int attr_mask); +int ocrdma_mbx_query_qp(struct ocrdma_dev *, struct ocrdma_qp *, + struct ocrdma_qp_params *param); +int ocrdma_mbx_destroy_qp(struct ocrdma_dev *, struct ocrdma_qp *); +int ocrdma_mbx_create_srq(struct ocrdma_dev *, struct ocrdma_srq *, + struct ib_srq_init_attr *, + struct ocrdma_pd *); +int ocrdma_mbx_modify_srq(struct ocrdma_srq *, struct ib_srq_attr *); +int ocrdma_mbx_query_srq(struct ocrdma_srq *, struct ib_srq_attr *); +int ocrdma_mbx_destroy_srq(struct ocrdma_dev *, struct ocrdma_srq *); + +int ocrdma_alloc_av(struct ocrdma_dev *, struct ocrdma_ah *); +int ocrdma_free_av(struct ocrdma_dev *, struct ocrdma_ah *); + +int ocrdma_qp_state_change(struct ocrdma_qp *, enum ib_qp_state new_state, + enum ib_qp_state *old_ib_state); +bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *); +bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *); +void ocrdma_flush_qp(struct ocrdma_qp *); +int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq); + +int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset); +char *port_speed_string(struct ocrdma_dev *dev); +void ocrdma_init_service_level(struct ocrdma_dev *); +void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev); +void ocrdma_free_pd_range(struct ocrdma_dev *dev); + +#endif /* __OCRDMA_HW_H__ */ diff --git a/kernel/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/kernel/drivers/infiniband/hw/ocrdma/ocrdma_main.c new file mode 100644 index 000000000..7a2b59aca --- /dev/null +++ b/kernel/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -0,0 +1,682 @@ +/******************************************************************* + * This file is part of the Emulex RoCE Device Driver for * + * RoCE (RDMA over Converged Ethernet) adapters. * + * Copyright (C) 2008-2012 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + * + * Contact Information: + * linux-drivers@emulex.com + * + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 + *******************************************************************/ + +#include +#include +#include +#include +#include + +#include +#include + +#include "ocrdma.h" +#include "ocrdma_verbs.h" +#include "ocrdma_ah.h" +#include "be_roce.h" +#include "ocrdma_hw.h" +#include "ocrdma_stats.h" +#include "ocrdma_abi.h" + +MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION); +MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION); +MODULE_AUTHOR("Emulex Corporation"); +MODULE_LICENSE("GPL"); + +static LIST_HEAD(ocrdma_dev_list); +static DEFINE_SPINLOCK(ocrdma_devlist_lock); +static DEFINE_IDR(ocrdma_dev_id); + +static union ib_gid ocrdma_zero_sgid; + +void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid) +{ + u8 mac_addr[6]; + + memcpy(&mac_addr[0], &dev->nic_info.mac_addr[0], ETH_ALEN); + guid[0] = mac_addr[0] ^ 2; + guid[1] = mac_addr[1]; + guid[2] = mac_addr[2]; + guid[3] = 0xff; + guid[4] = 0xfe; + guid[5] = mac_addr[3]; + guid[6] = mac_addr[4]; + guid[7] = mac_addr[5]; +} + +static bool ocrdma_add_sgid(struct ocrdma_dev *dev, union ib_gid *new_sgid) +{ + int i; + unsigned long flags; + + memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid)); + + + spin_lock_irqsave(&dev->sgid_lock, flags); + for (i = 0; i < OCRDMA_MAX_SGID; i++) { + if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid, + sizeof(union ib_gid))) { + /* found free entry */ + memcpy(&dev->sgid_tbl[i], new_sgid, + sizeof(union ib_gid)); + spin_unlock_irqrestore(&dev->sgid_lock, flags); + return true; + } else if (!memcmp(&dev->sgid_tbl[i], new_sgid, + sizeof(union ib_gid))) { + /* entry already present, no addition is required. */ + spin_unlock_irqrestore(&dev->sgid_lock, flags); + return false; + } + } + spin_unlock_irqrestore(&dev->sgid_lock, flags); + return false; +} + +static bool ocrdma_del_sgid(struct ocrdma_dev *dev, union ib_gid *sgid) +{ + int found = false; + int i; + unsigned long flags; + + + spin_lock_irqsave(&dev->sgid_lock, flags); + /* first is default sgid, which cannot be deleted. */ + for (i = 1; i < OCRDMA_MAX_SGID; i++) { + if (!memcmp(&dev->sgid_tbl[i], sgid, sizeof(union ib_gid))) { + /* found matching entry */ + memset(&dev->sgid_tbl[i], 0, sizeof(union ib_gid)); + found = true; + break; + } + } + spin_unlock_irqrestore(&dev->sgid_lock, flags); + return found; +} + +static int ocrdma_addr_event(unsigned long event, struct net_device *netdev, + union ib_gid *gid) +{ + struct ib_event gid_event; + struct ocrdma_dev *dev; + bool found = false; + bool updated = false; + bool is_vlan = false; + + is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN; + if (is_vlan) + netdev = rdma_vlan_dev_real_dev(netdev); + + rcu_read_lock(); + list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) { + if (dev->nic_info.netdev == netdev) { + found = true; + break; + } + } + rcu_read_unlock(); + + if (!found) + return NOTIFY_DONE; + + mutex_lock(&dev->dev_lock); + switch (event) { + case NETDEV_UP: + updated = ocrdma_add_sgid(dev, gid); + break; + case NETDEV_DOWN: + updated = ocrdma_del_sgid(dev, gid); + break; + default: + break; + } + if (updated) { + /* GID table updated, notify the consumers about it */ + gid_event.device = &dev->ibdev; + gid_event.element.port_num = 1; + gid_event.event = IB_EVENT_GID_CHANGE; + ib_dispatch_event(&gid_event); + } + mutex_unlock(&dev->dev_lock); + return NOTIFY_OK; +} + +static int ocrdma_inetaddr_event(struct notifier_block *notifier, + unsigned long event, void *ptr) +{ + struct in_ifaddr *ifa = ptr; + union ib_gid gid; + struct net_device *netdev = ifa->ifa_dev->dev; + + ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid); + return ocrdma_addr_event(event, netdev, &gid); +} + +static struct notifier_block ocrdma_inetaddr_notifier = { + .notifier_call = ocrdma_inetaddr_event +}; + +#if IS_ENABLED(CONFIG_IPV6) + +static int ocrdma_inet6addr_event(struct notifier_block *notifier, + unsigned long event, void *ptr) +{ + struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; + union ib_gid *gid = (union ib_gid *)&ifa->addr; + struct net_device *netdev = ifa->idev->dev; + return ocrdma_addr_event(event, netdev, gid); +} + +static struct notifier_block ocrdma_inet6addr_notifier = { + .notifier_call = ocrdma_inet6addr_event +}; + +#endif /* IPV6 and VLAN */ + +static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device, + u8 port_num) +{ + return IB_LINK_LAYER_ETHERNET; +} + +static int ocrdma_register_device(struct ocrdma_dev *dev) +{ + strlcpy(dev->ibdev.name, "ocrdma%d", IB_DEVICE_NAME_MAX); + ocrdma_get_guid(dev, (u8 *)&dev->ibdev.node_guid); + memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC, + sizeof(OCRDMA_NODE_DESC)); + dev->ibdev.owner = THIS_MODULE; + dev->ibdev.uverbs_abi_ver = OCRDMA_ABI_VERSION; + dev->ibdev.uverbs_cmd_mask = + OCRDMA_UVERBS(GET_CONTEXT) | + OCRDMA_UVERBS(QUERY_DEVICE) | + OCRDMA_UVERBS(QUERY_PORT) | + OCRDMA_UVERBS(ALLOC_PD) | + OCRDMA_UVERBS(DEALLOC_PD) | + OCRDMA_UVERBS(REG_MR) | + OCRDMA_UVERBS(DEREG_MR) | + OCRDMA_UVERBS(CREATE_COMP_CHANNEL) | + OCRDMA_UVERBS(CREATE_CQ) | + OCRDMA_UVERBS(RESIZE_CQ) | + OCRDMA_UVERBS(DESTROY_CQ) | + OCRDMA_UVERBS(REQ_NOTIFY_CQ) | + OCRDMA_UVERBS(CREATE_QP) | + OCRDMA_UVERBS(MODIFY_QP) | + OCRDMA_UVERBS(QUERY_QP) | + OCRDMA_UVERBS(DESTROY_QP) | + OCRDMA_UVERBS(POLL_CQ) | + OCRDMA_UVERBS(POST_SEND) | + OCRDMA_UVERBS(POST_RECV); + + dev->ibdev.uverbs_cmd_mask |= + OCRDMA_UVERBS(CREATE_AH) | + OCRDMA_UVERBS(MODIFY_AH) | + OCRDMA_UVERBS(QUERY_AH) | + OCRDMA_UVERBS(DESTROY_AH); + + dev->ibdev.node_type = RDMA_NODE_IB_CA; + dev->ibdev.phys_port_cnt = 1; + dev->ibdev.num_comp_vectors = dev->eq_cnt; + + /* mandatory verbs. */ + dev->ibdev.query_device = ocrdma_query_device; + dev->ibdev.query_port = ocrdma_query_port; + dev->ibdev.modify_port = ocrdma_modify_port; + dev->ibdev.query_gid = ocrdma_query_gid; + dev->ibdev.get_link_layer = ocrdma_link_layer; + dev->ibdev.alloc_pd = ocrdma_alloc_pd; + dev->ibdev.dealloc_pd = ocrdma_dealloc_pd; + + dev->ibdev.create_cq = ocrdma_create_cq; + dev->ibdev.destroy_cq = ocrdma_destroy_cq; + dev->ibdev.resize_cq = ocrdma_resize_cq; + + dev->ibdev.create_qp = ocrdma_create_qp; + dev->ibdev.modify_qp = ocrdma_modify_qp; + dev->ibdev.query_qp = ocrdma_query_qp; + dev->ibdev.destroy_qp = ocrdma_destroy_qp; + + dev->ibdev.query_pkey = ocrdma_query_pkey; + dev->ibdev.create_ah = ocrdma_create_ah; + dev->ibdev.destroy_ah = ocrdma_destroy_ah; + dev->ibdev.query_ah = ocrdma_query_ah; + dev->ibdev.modify_ah = ocrdma_modify_ah; + + dev->ibdev.poll_cq = ocrdma_poll_cq; + dev->ibdev.post_send = ocrdma_post_send; + dev->ibdev.post_recv = ocrdma_post_recv; + dev->ibdev.req_notify_cq = ocrdma_arm_cq; + + dev->ibdev.get_dma_mr = ocrdma_get_dma_mr; + dev->ibdev.reg_phys_mr = ocrdma_reg_kernel_mr; + dev->ibdev.dereg_mr = ocrdma_dereg_mr; + dev->ibdev.reg_user_mr = ocrdma_reg_user_mr; + + dev->ibdev.alloc_fast_reg_mr = ocrdma_alloc_frmr; + dev->ibdev.alloc_fast_reg_page_list = ocrdma_alloc_frmr_page_list; + dev->ibdev.free_fast_reg_page_list = ocrdma_free_frmr_page_list; + + /* mandatory to support user space verbs consumer. */ + dev->ibdev.alloc_ucontext = ocrdma_alloc_ucontext; + dev->ibdev.dealloc_ucontext = ocrdma_dealloc_ucontext; + dev->ibdev.mmap = ocrdma_mmap; + dev->ibdev.dma_device = &dev->nic_info.pdev->dev; + + dev->ibdev.process_mad = ocrdma_process_mad; + + if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) { + dev->ibdev.uverbs_cmd_mask |= + OCRDMA_UVERBS(CREATE_SRQ) | + OCRDMA_UVERBS(MODIFY_SRQ) | + OCRDMA_UVERBS(QUERY_SRQ) | + OCRDMA_UVERBS(DESTROY_SRQ) | + OCRDMA_UVERBS(POST_SRQ_RECV); + + dev->ibdev.create_srq = ocrdma_create_srq; + dev->ibdev.modify_srq = ocrdma_modify_srq; + dev->ibdev.query_srq = ocrdma_query_srq; + dev->ibdev.destroy_srq = ocrdma_destroy_srq; + dev->ibdev.post_srq_recv = ocrdma_post_srq_recv; + } + return ib_register_device(&dev->ibdev, NULL); +} + +static int ocrdma_alloc_resources(struct ocrdma_dev *dev) +{ + mutex_init(&dev->dev_lock); + dev->sgid_tbl = kzalloc(sizeof(union ib_gid) * + OCRDMA_MAX_SGID, GFP_KERNEL); + if (!dev->sgid_tbl) + goto alloc_err; + spin_lock_init(&dev->sgid_lock); + + dev->cq_tbl = kzalloc(sizeof(struct ocrdma_cq *) * + OCRDMA_MAX_CQ, GFP_KERNEL); + if (!dev->cq_tbl) + goto alloc_err; + + if (dev->attr.max_qp) { + dev->qp_tbl = kzalloc(sizeof(struct ocrdma_qp *) * + OCRDMA_MAX_QP, GFP_KERNEL); + if (!dev->qp_tbl) + goto alloc_err; + } + + dev->stag_arr = kzalloc(sizeof(u64) * OCRDMA_MAX_STAG, GFP_KERNEL); + if (dev->stag_arr == NULL) + goto alloc_err; + + ocrdma_alloc_pd_pool(dev); + + spin_lock_init(&dev->av_tbl.lock); + spin_lock_init(&dev->flush_q_lock); + return 0; +alloc_err: + pr_err("%s(%d) error.\n", __func__, dev->id); + return -ENOMEM; +} + +static void ocrdma_free_resources(struct ocrdma_dev *dev) +{ + kfree(dev->stag_arr); + kfree(dev->qp_tbl); + kfree(dev->cq_tbl); + kfree(dev->sgid_tbl); +} + +/* OCRDMA sysfs interface */ +static ssize_t show_rev(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct ocrdma_dev *dev = dev_get_drvdata(device); + + return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->nic_info.pdev->vendor); +} + +static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct ocrdma_dev *dev = dev_get_drvdata(device); + + return scnprintf(buf, PAGE_SIZE, "%s\n", &dev->attr.fw_ver[0]); +} + +static ssize_t show_hca_type(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct ocrdma_dev *dev = dev_get_drvdata(device); + + return scnprintf(buf, PAGE_SIZE, "%s\n", &dev->model_number[0]); +} + +static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); +static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); +static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL); + +static struct device_attribute *ocrdma_attributes[] = { + &dev_attr_hw_rev, + &dev_attr_fw_ver, + &dev_attr_hca_type +}; + +static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++) + device_remove_file(&dev->ibdev.dev, ocrdma_attributes[i]); +} + +static void ocrdma_add_default_sgid(struct ocrdma_dev *dev) +{ + /* GID Index 0 - Invariant manufacturer-assigned EUI-64 */ + union ib_gid *sgid = &dev->sgid_tbl[0]; + + sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); + ocrdma_get_guid(dev, &sgid->raw[8]); +} + +static void ocrdma_init_ipv4_gids(struct ocrdma_dev *dev, + struct net_device *net) +{ + struct in_device *in_dev; + union ib_gid gid; + in_dev = in_dev_get(net); + if (in_dev) { + for_ifa(in_dev) { + ipv6_addr_set_v4mapped(ifa->ifa_address, + (struct in6_addr *)&gid); + ocrdma_add_sgid(dev, &gid); + } + endfor_ifa(in_dev); + in_dev_put(in_dev); + } +} + +static void ocrdma_init_ipv6_gids(struct ocrdma_dev *dev, + struct net_device *net) +{ +#if IS_ENABLED(CONFIG_IPV6) + struct inet6_dev *in6_dev; + union ib_gid *pgid; + struct inet6_ifaddr *ifp; + in6_dev = in6_dev_get(net); + if (in6_dev) { + read_lock_bh(&in6_dev->lock); + list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { + pgid = (union ib_gid *)&ifp->addr; + ocrdma_add_sgid(dev, pgid); + } + read_unlock_bh(&in6_dev->lock); + in6_dev_put(in6_dev); + } +#endif +} + +static void ocrdma_init_gid_table(struct ocrdma_dev *dev) +{ + struct net_device *net_dev; + + for_each_netdev(&init_net, net_dev) { + struct net_device *real_dev = rdma_vlan_dev_real_dev(net_dev) ? + rdma_vlan_dev_real_dev(net_dev) : net_dev; + + if (real_dev == dev->nic_info.netdev) { + ocrdma_add_default_sgid(dev); + ocrdma_init_ipv4_gids(dev, net_dev); + ocrdma_init_ipv6_gids(dev, net_dev); + } + } +} + +static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info) +{ + int status = 0, i; + struct ocrdma_dev *dev; + + dev = (struct ocrdma_dev *)ib_alloc_device(sizeof(struct ocrdma_dev)); + if (!dev) { + pr_err("Unable to allocate ib device\n"); + return NULL; + } + dev->mbx_cmd = kzalloc(sizeof(struct ocrdma_mqe_emb_cmd), GFP_KERNEL); + if (!dev->mbx_cmd) + goto idr_err; + + memcpy(&dev->nic_info, dev_info, sizeof(*dev_info)); + dev->id = idr_alloc(&ocrdma_dev_id, NULL, 0, 0, GFP_KERNEL); + if (dev->id < 0) + goto idr_err; + + status = ocrdma_init_hw(dev); + if (status) + goto init_err; + + status = ocrdma_alloc_resources(dev); + if (status) + goto alloc_err; + + ocrdma_init_service_level(dev); + ocrdma_init_gid_table(dev); + status = ocrdma_register_device(dev); + if (status) + goto alloc_err; + + for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++) + if (device_create_file(&dev->ibdev.dev, ocrdma_attributes[i])) + goto sysfs_err; + spin_lock(&ocrdma_devlist_lock); + list_add_tail_rcu(&dev->entry, &ocrdma_dev_list); + spin_unlock(&ocrdma_devlist_lock); + /* Init stats */ + ocrdma_add_port_stats(dev); + /* Interrupt Moderation */ + INIT_DELAYED_WORK(&dev->eqd_work, ocrdma_eqd_set_task); + schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000)); + + pr_info("%s %s: %s \"%s\" port %d\n", + dev_name(&dev->nic_info.pdev->dev), hca_name(dev), + port_speed_string(dev), dev->model_number, + dev->hba_port_num); + pr_info("%s ocrdma%d driver loaded successfully\n", + dev_name(&dev->nic_info.pdev->dev), dev->id); + return dev; + +sysfs_err: + ocrdma_remove_sysfiles(dev); +alloc_err: + ocrdma_free_resources(dev); + ocrdma_cleanup_hw(dev); +init_err: + idr_remove(&ocrdma_dev_id, dev->id); +idr_err: + kfree(dev->mbx_cmd); + ib_dealloc_device(&dev->ibdev); + pr_err("%s() leaving. ret=%d\n", __func__, status); + return NULL; +} + +static void ocrdma_remove_free(struct rcu_head *rcu) +{ + struct ocrdma_dev *dev = container_of(rcu, struct ocrdma_dev, rcu); + + idr_remove(&ocrdma_dev_id, dev->id); + kfree(dev->mbx_cmd); + ib_dealloc_device(&dev->ibdev); +} + +static void ocrdma_remove(struct ocrdma_dev *dev) +{ + /* first unregister with stack to stop all the active traffic + * of the registered clients. + */ + cancel_delayed_work_sync(&dev->eqd_work); + ocrdma_remove_sysfiles(dev); + ib_unregister_device(&dev->ibdev); + + ocrdma_rem_port_stats(dev); + + spin_lock(&ocrdma_devlist_lock); + list_del_rcu(&dev->entry); + spin_unlock(&ocrdma_devlist_lock); + + ocrdma_free_resources(dev); + ocrdma_cleanup_hw(dev); + + call_rcu(&dev->rcu, ocrdma_remove_free); +} + +static int ocrdma_open(struct ocrdma_dev *dev) +{ + struct ib_event port_event; + + port_event.event = IB_EVENT_PORT_ACTIVE; + port_event.element.port_num = 1; + port_event.device = &dev->ibdev; + ib_dispatch_event(&port_event); + return 0; +} + +static int ocrdma_close(struct ocrdma_dev *dev) +{ + int i; + struct ocrdma_qp *qp, **cur_qp; + struct ib_event err_event; + struct ib_qp_attr attrs; + int attr_mask = IB_QP_STATE; + + attrs.qp_state = IB_QPS_ERR; + mutex_lock(&dev->dev_lock); + if (dev->qp_tbl) { + cur_qp = dev->qp_tbl; + for (i = 0; i < OCRDMA_MAX_QP; i++) { + qp = cur_qp[i]; + if (qp && qp->ibqp.qp_type != IB_QPT_GSI) { + /* change the QP state to ERROR */ + _ocrdma_modify_qp(&qp->ibqp, &attrs, attr_mask); + + err_event.event = IB_EVENT_QP_FATAL; + err_event.element.qp = &qp->ibqp; + err_event.device = &dev->ibdev; + ib_dispatch_event(&err_event); + } + } + } + mutex_unlock(&dev->dev_lock); + + err_event.event = IB_EVENT_PORT_ERR; + err_event.element.port_num = 1; + err_event.device = &dev->ibdev; + ib_dispatch_event(&err_event); + return 0; +} + +static void ocrdma_shutdown(struct ocrdma_dev *dev) +{ + ocrdma_close(dev); + ocrdma_remove(dev); +} + +/* event handling via NIC driver ensures that all the NIC specific + * initialization done before RoCE driver notifies + * event to stack. + */ +static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event) +{ + switch (event) { + case BE_DEV_UP: + ocrdma_open(dev); + break; + case BE_DEV_DOWN: + ocrdma_close(dev); + break; + case BE_DEV_SHUTDOWN: + ocrdma_shutdown(dev); + break; + } +} + +static struct ocrdma_driver ocrdma_drv = { + .name = "ocrdma_driver", + .add = ocrdma_add, + .remove = ocrdma_remove, + .state_change_handler = ocrdma_event_handler, + .be_abi_version = OCRDMA_BE_ROCE_ABI_VERSION, +}; + +static void ocrdma_unregister_inet6addr_notifier(void) +{ +#if IS_ENABLED(CONFIG_IPV6) + unregister_inet6addr_notifier(&ocrdma_inet6addr_notifier); +#endif +} + +static void ocrdma_unregister_inetaddr_notifier(void) +{ + unregister_inetaddr_notifier(&ocrdma_inetaddr_notifier); +} + +static int __init ocrdma_init_module(void) +{ + int status; + + ocrdma_init_debugfs(); + + status = register_inetaddr_notifier(&ocrdma_inetaddr_notifier); + if (status) + return status; + +#if IS_ENABLED(CONFIG_IPV6) + status = register_inet6addr_notifier(&ocrdma_inet6addr_notifier); + if (status) + goto err_notifier6; +#endif + + status = be_roce_register_driver(&ocrdma_drv); + if (status) + goto err_be_reg; + + return 0; + +err_be_reg: +#if IS_ENABLED(CONFIG_IPV6) + ocrdma_unregister_inet6addr_notifier(); +err_notifier6: +#endif + ocrdma_unregister_inetaddr_notifier(); + return status; +} + +static void __exit ocrdma_exit_module(void) +{ + be_roce_unregister_driver(&ocrdma_drv); + ocrdma_unregister_inet6addr_notifier(); + ocrdma_unregister_inetaddr_notifier(); + ocrdma_rem_debugfs(); +} + +module_init(ocrdma_init_module); +module_exit(ocrdma_exit_module); diff --git a/kernel/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/kernel/drivers/infiniband/hw/ocrdma/ocrdma_sli.h new file mode 100644 index 000000000..02ad0aee9 --- /dev/null +++ b/kernel/drivers/infiniband/hw/ocrdma/ocrdma_sli.h @@ -0,0 +1,2173 @@ +/******************************************************************* + * This file is part of the Emulex RoCE Device Driver for * + * RoCE (RDMA over Converged Ethernet) adapters. * + * Copyright (C) 2008-2012 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + * + * Contact Information: + * linux-drivers@emulex.com + * + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 + *******************************************************************/ + +#ifndef __OCRDMA_SLI_H__ +#define __OCRDMA_SLI_H__ + +enum { + OCRDMA_ASIC_GEN_SKH_R = 0x04, + OCRDMA_ASIC_GEN_LANCER = 0x0B +}; + +enum { + OCRDMA_ASIC_REV_A0 = 0x00, + OCRDMA_ASIC_REV_B0 = 0x10, + OCRDMA_ASIC_REV_C0 = 0x20 +}; + +#define OCRDMA_SUBSYS_ROCE 10 +enum { + OCRDMA_CMD_QUERY_CONFIG = 1, + OCRDMA_CMD_ALLOC_PD = 2, + OCRDMA_CMD_DEALLOC_PD = 3, + + OCRDMA_CMD_CREATE_AH_TBL = 4, + OCRDMA_CMD_DELETE_AH_TBL = 5, + + OCRDMA_CMD_CREATE_QP = 6, + OCRDMA_CMD_QUERY_QP = 7, + OCRDMA_CMD_MODIFY_QP = 8 , + OCRDMA_CMD_DELETE_QP = 9, + + OCRDMA_CMD_RSVD1 = 10, + OCRDMA_CMD_ALLOC_LKEY = 11, + OCRDMA_CMD_DEALLOC_LKEY = 12, + OCRDMA_CMD_REGISTER_NSMR = 13, + OCRDMA_CMD_REREGISTER_NSMR = 14, + OCRDMA_CMD_REGISTER_NSMR_CONT = 15, + OCRDMA_CMD_QUERY_NSMR = 16, + OCRDMA_CMD_ALLOC_MW = 17, + OCRDMA_CMD_QUERY_MW = 18, + + OCRDMA_CMD_CREATE_SRQ = 19, + OCRDMA_CMD_QUERY_SRQ = 20, + OCRDMA_CMD_MODIFY_SRQ = 21, + OCRDMA_CMD_DELETE_SRQ = 22, + + OCRDMA_CMD_ATTACH_MCAST = 23, + OCRDMA_CMD_DETACH_MCAST = 24, + + OCRDMA_CMD_CREATE_RBQ = 25, + OCRDMA_CMD_DESTROY_RBQ = 26, + + OCRDMA_CMD_GET_RDMA_STATS = 27, + OCRDMA_CMD_ALLOC_PD_RANGE = 28, + OCRDMA_CMD_DEALLOC_PD_RANGE = 29, + + OCRDMA_CMD_MAX +}; + +#define OCRDMA_SUBSYS_COMMON 1 +enum { + OCRDMA_CMD_QUERY_NTWK_LINK_CONFIG_V1 = 5, + OCRDMA_CMD_CREATE_CQ = 12, + OCRDMA_CMD_CREATE_EQ = 13, + OCRDMA_CMD_CREATE_MQ = 21, + OCRDMA_CMD_GET_CTRL_ATTRIBUTES = 32, + OCRDMA_CMD_GET_FW_VER = 35, + OCRDMA_CMD_MODIFY_EQ_DELAY = 41, + OCRDMA_CMD_DELETE_MQ = 53, + OCRDMA_CMD_DELETE_CQ = 54, + OCRDMA_CMD_DELETE_EQ = 55, + OCRDMA_CMD_GET_FW_CONFIG = 58, + OCRDMA_CMD_CREATE_MQ_EXT = 90, + OCRDMA_CMD_PHY_DETAILS = 102 +}; + +enum { + QTYPE_EQ = 1, + QTYPE_CQ = 2, + QTYPE_MCCQ = 3 +}; + +#define OCRDMA_MAX_SGID 16 + +#define OCRDMA_MAX_QP 2048 +#define OCRDMA_MAX_CQ 2048 +#define OCRDMA_MAX_STAG 16384 + +enum { + OCRDMA_DB_RQ_OFFSET = 0xE0, + OCRDMA_DB_GEN2_RQ_OFFSET = 0x100, + OCRDMA_DB_SQ_OFFSET = 0x60, + OCRDMA_DB_GEN2_SQ_OFFSET = 0x1C0, + OCRDMA_DB_SRQ_OFFSET = OCRDMA_DB_RQ_OFFSET, + OCRDMA_DB_GEN2_SRQ_OFFSET = OCRDMA_DB_GEN2_RQ_OFFSET, + OCRDMA_DB_CQ_OFFSET = 0x120, + OCRDMA_DB_EQ_OFFSET = OCRDMA_DB_CQ_OFFSET, + OCRDMA_DB_MQ_OFFSET = 0x140, + + OCRDMA_DB_SQ_SHIFT = 16, + OCRDMA_DB_RQ_SHIFT = 24 +}; + +#define OCRDMA_DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */ +#define OCRDMA_DB_CQ_RING_ID_EXT_MASK 0x0C00 /* bits 10-11 of qid at 12-11 */ +/* qid #2 msbits at 12-11 */ +#define OCRDMA_DB_CQ_RING_ID_EXT_MASK_SHIFT 0x1 +#define OCRDMA_DB_CQ_NUM_POPPED_SHIFT 16 /* bits 16 - 28 */ +/* Rearm bit */ +#define OCRDMA_DB_CQ_REARM_SHIFT 29 /* bit 29 */ +/* solicited bit */ +#define OCRDMA_DB_CQ_SOLICIT_SHIFT 31 /* bit 31 */ + +#define OCRDMA_EQ_ID_MASK 0x1FF /* bits 0 - 8 */ +#define OCRDMA_EQ_ID_EXT_MASK 0x3e00 /* bits 9-13 */ +#define OCRDMA_EQ_ID_EXT_MASK_SHIFT 2 /* qid bits 9-13 at 11-15 */ + +/* Clear the interrupt for this eq */ +#define OCRDMA_EQ_CLR_SHIFT 9 /* bit 9 */ +/* Must be 1 */ +#define OCRDMA_EQ_TYPE_SHIFT 10 /* bit 10 */ +/* Number of event entries processed */ +#define OCRDMA_NUM_EQE_SHIFT 16 /* bits 16 - 28 */ +/* Rearm bit */ +#define OCRDMA_REARM_SHIFT 29 /* bit 29 */ + +#define OCRDMA_MQ_ID_MASK 0x7FF /* bits 0 - 10 */ +/* Number of entries posted */ +#define OCRDMA_MQ_NUM_MQE_SHIFT 16 /* bits 16 - 29 */ + +#define OCRDMA_MIN_HPAGE_SIZE 4096 + +#define OCRDMA_MIN_Q_PAGE_SIZE 4096 +#define OCRDMA_MAX_Q_PAGES 8 + +#define OCRDMA_SLI_ASIC_ID_OFFSET 0x9C +#define OCRDMA_SLI_ASIC_REV_MASK 0x000000FF +#define OCRDMA_SLI_ASIC_GEN_NUM_MASK 0x0000FF00 +#define OCRDMA_SLI_ASIC_GEN_NUM_SHIFT 0x08 +/* +# 0: 4K Bytes +# 1: 8K Bytes +# 2: 16K Bytes +# 3: 32K Bytes +# 4: 64K Bytes +# 5: 128K Bytes +# 6: 256K Bytes +# 7: 512K Bytes +*/ +#define OCRDMA_MAX_Q_PAGE_SIZE_CNT 8 +#define OCRDMA_Q_PAGE_BASE_SIZE (OCRDMA_MIN_Q_PAGE_SIZE * OCRDMA_MAX_Q_PAGES) + +#define MAX_OCRDMA_QP_PAGES 8 +#define OCRDMA_MAX_WQE_MEM_SIZE (MAX_OCRDMA_QP_PAGES * OCRDMA_MIN_HQ_PAGE_SIZE) + +#define OCRDMA_CREATE_CQ_MAX_PAGES 4 +#define OCRDMA_DPP_CQE_SIZE 4 + +#define OCRDMA_GEN2_MAX_CQE 1024 +#define OCRDMA_GEN2_CQ_PAGE_SIZE 4096 +#define OCRDMA_GEN2_WQE_SIZE 256 +#define OCRDMA_MAX_CQE 4095 +#define OCRDMA_CQ_PAGE_SIZE 16384 +#define OCRDMA_WQE_SIZE 128 +#define OCRDMA_WQE_STRIDE 8 +#define OCRDMA_WQE_ALIGN_BYTES 16 + +#define MAX_OCRDMA_SRQ_PAGES MAX_OCRDMA_QP_PAGES + +enum { + OCRDMA_MCH_OPCODE_SHIFT = 0, + OCRDMA_MCH_OPCODE_MASK = 0xFF, + OCRDMA_MCH_SUBSYS_SHIFT = 8, + OCRDMA_MCH_SUBSYS_MASK = 0xFF00 +}; + +/* mailbox cmd header */ +struct ocrdma_mbx_hdr { + u32 subsys_op; + u32 timeout; /* in seconds */ + u32 cmd_len; + u32 rsvd_version; +}; + +enum { + OCRDMA_MBX_RSP_OPCODE_SHIFT = 0, + OCRDMA_MBX_RSP_OPCODE_MASK = 0xFF, + OCRDMA_MBX_RSP_SUBSYS_SHIFT = 8, + OCRDMA_MBX_RSP_SUBSYS_MASK = 0xFF << OCRDMA_MBX_RSP_SUBSYS_SHIFT, + + OCRDMA_MBX_RSP_STATUS_SHIFT = 0, + OCRDMA_MBX_RSP_STATUS_MASK = 0xFF, + OCRDMA_MBX_RSP_ASTATUS_SHIFT = 8, + OCRDMA_MBX_RSP_ASTATUS_MASK = 0xFF << OCRDMA_MBX_RSP_ASTATUS_SHIFT +}; + +/* mailbox cmd response */ +struct ocrdma_mbx_rsp { + u32 subsys_op; + u32 status; + u32 rsp_len; + u32 add_rsp_len; +}; + +enum { + OCRDMA_MQE_EMBEDDED = 1, + OCRDMA_MQE_NONEMBEDDED = 0 +}; + +struct ocrdma_mqe_sge { + u32 pa_lo; + u32 pa_hi; + u32 len; +}; + +enum { + OCRDMA_MQE_HDR_EMB_SHIFT = 0, + OCRDMA_MQE_HDR_EMB_MASK = BIT(0), + OCRDMA_MQE_HDR_SGE_CNT_SHIFT = 3, + OCRDMA_MQE_HDR_SGE_CNT_MASK = 0x1F << OCRDMA_MQE_HDR_SGE_CNT_SHIFT, + OCRDMA_MQE_HDR_SPECIAL_SHIFT = 24, + OCRDMA_MQE_HDR_SPECIAL_MASK = 0xFF << OCRDMA_MQE_HDR_SPECIAL_SHIFT +}; + +struct ocrdma_mqe_hdr { + u32 spcl_sge_cnt_emb; + u32 pyld_len; + u32 tag_lo; + u32 tag_hi; + u32 rsvd3; +}; + +struct ocrdma_mqe_emb_cmd { + struct ocrdma_mbx_hdr mch; + u8 pyld[220]; +}; + +struct ocrdma_mqe { + struct ocrdma_mqe_hdr hdr; + union { + struct ocrdma_mqe_emb_cmd emb_req; + struct { + struct ocrdma_mqe_sge sge[19]; + } nonemb_req; + u8 cmd[236]; + struct ocrdma_mbx_rsp rsp; + } u; +}; + +#define OCRDMA_EQ_LEN 4096 +#define OCRDMA_MQ_CQ_LEN 256 +#define OCRDMA_MQ_LEN 128 + +#define PAGE_SHIFT_4K 12 +#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) + +/* Returns number of pages spanned by the data starting at the given addr */ +#define PAGES_4K_SPANNED(_address, size) \ + ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \ + (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K)) + +struct ocrdma_delete_q_req { + struct ocrdma_mbx_hdr req; + u32 id; +}; + +struct ocrdma_pa { + u32 lo; + u32 hi; +}; + +#define MAX_OCRDMA_EQ_PAGES 8 +struct ocrdma_create_eq_req { + struct ocrdma_mbx_hdr req; + u32 num_pages; + u32 valid; + u32 cnt; + u32 delay; + u32 rsvd; + struct ocrdma_pa pa[MAX_OCRDMA_EQ_PAGES]; +}; + +enum { + OCRDMA_CREATE_EQ_VALID = BIT(29), + OCRDMA_CREATE_EQ_CNT_SHIFT = 26, + OCRDMA_CREATE_CQ_DELAY_SHIFT = 13, +}; + +struct ocrdma_create_eq_rsp { + struct ocrdma_mbx_rsp rsp; + u32 vector_eqid; +}; + +#define OCRDMA_EQ_MINOR_OTHER 0x1 + +struct ocrmda_set_eqd { + u32 eq_id; + u32 phase; + u32 delay_multiplier; +}; + +struct ocrdma_modify_eqd_cmd { + struct ocrdma_mbx_hdr req; + u32 num_eq; + struct ocrmda_set_eqd set_eqd[8]; +} __packed; + +struct ocrdma_modify_eqd_req { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_modify_eqd_cmd cmd; +}; + + +struct ocrdma_modify_eq_delay_rsp { + struct ocrdma_mbx_rsp hdr; + u32 rsvd0; +} __packed; + +enum { + OCRDMA_MCQE_STATUS_SHIFT = 0, + OCRDMA_MCQE_STATUS_MASK = 0xFFFF, + OCRDMA_MCQE_ESTATUS_SHIFT = 16, + OCRDMA_MCQE_ESTATUS_MASK = 0xFFFF << OCRDMA_MCQE_ESTATUS_SHIFT, + OCRDMA_MCQE_CONS_SHIFT = 27, + OCRDMA_MCQE_CONS_MASK = BIT(27), + OCRDMA_MCQE_CMPL_SHIFT = 28, + OCRDMA_MCQE_CMPL_MASK = BIT(28), + OCRDMA_MCQE_AE_SHIFT = 30, + OCRDMA_MCQE_AE_MASK = BIT(30), + OCRDMA_MCQE_VALID_SHIFT = 31, + OCRDMA_MCQE_VALID_MASK = BIT(31) +}; + +struct ocrdma_mcqe { + u32 status; + u32 tag_lo; + u32 tag_hi; + u32 valid_ae_cmpl_cons; +}; + +enum { + OCRDMA_AE_MCQE_QPVALID = BIT(31), + OCRDMA_AE_MCQE_QPID_MASK = 0xFFFF, + + OCRDMA_AE_MCQE_CQVALID = BIT(31), + OCRDMA_AE_MCQE_CQID_MASK = 0xFFFF, + OCRDMA_AE_MCQE_VALID = BIT(31), + OCRDMA_AE_MCQE_AE = BIT(30), + OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT = 16, + OCRDMA_AE_MCQE_EVENT_TYPE_MASK = + 0xFF << OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT, + OCRDMA_AE_MCQE_EVENT_CODE_SHIFT = 8, + OCRDMA_AE_MCQE_EVENT_CODE_MASK = + 0xFF << OCRDMA_AE_MCQE_EVENT_CODE_SHIFT +}; +struct ocrdma_ae_mcqe { + u32 qpvalid_qpid; + u32 cqvalid_cqid; + u32 evt_tag; + u32 valid_ae_event; +}; + +enum { + OCRDMA_AE_PVID_MCQE_ENABLED_SHIFT = 0, + OCRDMA_AE_PVID_MCQE_ENABLED_MASK = 0xFF, + OCRDMA_AE_PVID_MCQE_TAG_SHIFT = 16, + OCRDMA_AE_PVID_MCQE_TAG_MASK = 0xFFFF << OCRDMA_AE_PVID_MCQE_TAG_SHIFT +}; + +struct ocrdma_ae_pvid_mcqe { + u32 tag_enabled; + u32 event_tag; + u32 rsvd1; + u32 rsvd2; +}; + +enum { + OCRDMA_AE_MPA_MCQE_REQ_ID_SHIFT = 16, + OCRDMA_AE_MPA_MCQE_REQ_ID_MASK = 0xFFFF << + OCRDMA_AE_MPA_MCQE_REQ_ID_SHIFT, + + OCRDMA_AE_MPA_MCQE_EVENT_CODE_SHIFT = 8, + OCRDMA_AE_MPA_MCQE_EVENT_CODE_MASK = 0xFF << + OCRDMA_AE_MPA_MCQE_EVENT_CODE_SHIFT, + OCRDMA_AE_MPA_MCQE_EVENT_TYPE_SHIFT = 16, + OCRDMA_AE_MPA_MCQE_EVENT_TYPE_MASK = 0xFF << + OCRDMA_AE_MPA_MCQE_EVENT_TYPE_SHIFT, + OCRDMA_AE_MPA_MCQE_EVENT_AE_SHIFT = 30, + OCRDMA_AE_MPA_MCQE_EVENT_AE_MASK = BIT(30), + OCRDMA_AE_MPA_MCQE_EVENT_VALID_SHIFT = 31, + OCRDMA_AE_MPA_MCQE_EVENT_VALID_MASK = BIT(31) +}; + +struct ocrdma_ae_mpa_mcqe { + u32 req_id; + u32 w1; + u32 w2; + u32 valid_ae_event; +}; + +enum { + OCRDMA_AE_QP_MCQE_NEW_QP_STATE_SHIFT = 0, + OCRDMA_AE_QP_MCQE_NEW_QP_STATE_MASK = 0xFFFF, + OCRDMA_AE_QP_MCQE_QP_ID_SHIFT = 16, + OCRDMA_AE_QP_MCQE_QP_ID_MASK = 0xFFFF << + OCRDMA_AE_QP_MCQE_QP_ID_SHIFT, + + OCRDMA_AE_QP_MCQE_EVENT_CODE_SHIFT = 8, + OCRDMA_AE_QP_MCQE_EVENT_CODE_MASK = 0xFF << + OCRDMA_AE_QP_MCQE_EVENT_CODE_SHIFT, + OCRDMA_AE_QP_MCQE_EVENT_TYPE_SHIFT = 16, + OCRDMA_AE_QP_MCQE_EVENT_TYPE_MASK = 0xFF << + OCRDMA_AE_QP_MCQE_EVENT_TYPE_SHIFT, + OCRDMA_AE_QP_MCQE_EVENT_AE_SHIFT = 30, + OCRDMA_AE_QP_MCQE_EVENT_AE_MASK = BIT(30), + OCRDMA_AE_QP_MCQE_EVENT_VALID_SHIFT = 31, + OCRDMA_AE_QP_MCQE_EVENT_VALID_MASK = BIT(31) +}; + +struct ocrdma_ae_qp_mcqe { + u32 qp_id_state; + u32 w1; + u32 w2; + u32 valid_ae_event; +}; + +#define OCRDMA_ASYNC_RDMA_EVE_CODE 0x14 +#define OCRDMA_ASYNC_GRP5_EVE_CODE 0x5 + +enum ocrdma_async_grp5_events { + OCRDMA_ASYNC_EVENT_QOS_VALUE = 0x01, + OCRDMA_ASYNC_EVENT_COS_VALUE = 0x02, + OCRDMA_ASYNC_EVENT_PVID_STATE = 0x03 +}; + +enum OCRDMA_ASYNC_EVENT_TYPE { + OCRDMA_CQ_ERROR = 0x00, + OCRDMA_CQ_OVERRUN_ERROR = 0x01, + OCRDMA_CQ_QPCAT_ERROR = 0x02, + OCRDMA_QP_ACCESS_ERROR = 0x03, + OCRDMA_QP_COMM_EST_EVENT = 0x04, + OCRDMA_SQ_DRAINED_EVENT = 0x05, + OCRDMA_DEVICE_FATAL_EVENT = 0x08, + OCRDMA_SRQCAT_ERROR = 0x0E, + OCRDMA_SRQ_LIMIT_EVENT = 0x0F, + OCRDMA_QP_LAST_WQE_EVENT = 0x10, + + OCRDMA_MAX_ASYNC_ERRORS +}; + +/* mailbox command request and responses */ +enum { + OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT = 2, + OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_MASK = BIT(2), + OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_SHIFT = 3, + OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_MASK = BIT(3), + OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT = 8, + OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK = 0xFFFFFF << + OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT, + + OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT = 16, + OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK = 0xFFFF << + OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT, + OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT = 8, + OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK = 0xFF << + OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT, + + OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT = 0, + OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK = 0xFFFF, + OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT = 16, + OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK = 0xFFFF << + OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT, + + OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT = 0, + OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK = 0xFFFF, + OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT = 16, + OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_MASK = 0xFFFF << + OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT, + + OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_OFFSET = 24, + OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_MASK = 0xFF << + OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_OFFSET, + OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_OFFSET = 16, + OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_MASK = 0xFF << + OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_OFFSET, + OCRDMA_MBX_QUERY_CFG_MAX_DPP_CQES_OFFSET = 0, + OCRDMA_MBX_QUERY_CFG_MAX_DPP_CQES_MASK = 0xFFFF << + OCRDMA_MBX_QUERY_CFG_MAX_DPP_CQES_OFFSET, + + OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET = 16, + OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK = 0xFFFF << + OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET, + OCRDMA_MBX_QUERY_CFG_MAX_RPIR_QPS_OFFSET = 0, + OCRDMA_MBX_QUERY_CFG_MAX_RPIR_QPS_MASK = 0xFFFF << + OCRDMA_MBX_QUERY_CFG_MAX_RPIR_QPS_OFFSET, + + OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET = 16, + OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK = 0xFFFF << + OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET, + OCRDMA_MBX_QUERY_CFG_MAX_DPP_CREDITS_OFFSET = 0, + OCRDMA_MBX_QUERY_CFG_MAX_DPP_CREDITS_MASK = 0xFFFF << + OCRDMA_MBX_QUERY_CFG_MAX_DPP_CREDITS_OFFSET, + + OCRDMA_MBX_QUERY_CFG_MAX_DPP_QPS_OFFSET = 0, + OCRDMA_MBX_QUERY_CFG_MAX_DPP_QPS_MASK = 0xFFFF << + OCRDMA_MBX_QUERY_CFG_MAX_DPP_QPS_OFFSET, + + OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET = 16, + OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_MASK = 0xFFFF << + OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET, + OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET = 0, + OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK = 0xFFFF << + OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET, + + OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET = 16, + OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK = 0xFFFF << + OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET, + OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_OFFSET = 0, + OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_MASK = 0xFFFF << + OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_OFFSET, + + OCRDMA_MBX_QUERY_CFG_MAX_SRQ_RQE_OFFSET = 16, + OCRDMA_MBX_QUERY_CFG_MAX_SRQ_RQE_MASK = 0xFFFF << + OCRDMA_MBX_QUERY_CFG_MAX_SRQ_RQE_OFFSET, + OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET = 0, + OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK = 0xFFFF << + OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET, +}; + +struct ocrdma_mbx_query_config { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_rsp rsp; + u32 qp_srq_cq_ird_ord; + u32 max_pd_ca_ack_delay; + u32 max_write_send_sge; + u32 max_ird_ord_per_qp; + u32 max_shared_ird_ord; + u32 max_mr; + u32 max_mr_size_hi; + u32 max_mr_size_lo; + u32 max_num_mr_pbl; + u32 max_mw; + u32 max_fmr; + u32 max_pages_per_frmr; + u32 max_mcast_group; + u32 max_mcast_qp_attach; + u32 max_total_mcast_qp_attach; + u32 wqe_rqe_stride_max_dpp_cqs; + u32 max_srq_rpir_qps; + u32 max_dpp_pds_credits; + u32 max_dpp_credits_pds_per_pd; + u32 max_wqes_rqes_per_q; + u32 max_cq_cqes_per_cq; + u32 max_srq_rqe_sge; +}; + +struct ocrdma_fw_ver_rsp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_rsp rsp; + + u8 running_ver[32]; +}; + +struct ocrdma_fw_conf_rsp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_rsp rsp; + + u32 config_num; + u32 asic_revision; + u32 phy_port; + u32 fn_mode; + struct { + u32 mode; + u32 nic_wqid_base; + u32 nic_wq_tot; + u32 prot_wqid_base; + u32 prot_wq_tot; + u32 prot_rqid_base; + u32 prot_rqid_tot; + u32 rsvd[6]; + } ulp[2]; + u32 fn_capabilities; + u32 rsvd1; + u32 rsvd2; + u32 base_eqid; + u32 max_eq; + +}; + +enum { + OCRDMA_FN_MODE_RDMA = 0x4 +}; + +enum { + OCRDMA_IF_TYPE_MASK = 0xFFFF0000, + OCRDMA_IF_TYPE_SHIFT = 0x10, + OCRDMA_PHY_TYPE_MASK = 0x0000FFFF, + OCRDMA_FUTURE_DETAILS_MASK = 0xFFFF0000, + OCRDMA_FUTURE_DETAILS_SHIFT = 0x10, + OCRDMA_EX_PHY_DETAILS_MASK = 0x0000FFFF, + OCRDMA_FSPEED_SUPP_MASK = 0xFFFF0000, + OCRDMA_FSPEED_SUPP_SHIFT = 0x10, + OCRDMA_ASPEED_SUPP_MASK = 0x0000FFFF +}; + +struct ocrdma_get_phy_info_rsp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_rsp rsp; + + u32 ityp_ptyp; + u32 misc_params; + u32 ftrdtl_exphydtl; + u32 fspeed_aspeed; + u32 future_use[2]; +}; + +enum { + OCRDMA_PHY_SPEED_ZERO = 0x0, + OCRDMA_PHY_SPEED_10MBPS = 0x1, + OCRDMA_PHY_SPEED_100MBPS = 0x2, + OCRDMA_PHY_SPEED_1GBPS = 0x4, + OCRDMA_PHY_SPEED_10GBPS = 0x8, + OCRDMA_PHY_SPEED_40GBPS = 0x20 +}; + +enum { + OCRDMA_PORT_NUM_MASK = 0x3F, + OCRDMA_PT_MASK = 0xC0, + OCRDMA_PT_SHIFT = 0x6, + OCRDMA_LINK_DUP_MASK = 0x0000FF00, + OCRDMA_LINK_DUP_SHIFT = 0x8, + OCRDMA_PHY_PS_MASK = 0x00FF0000, + OCRDMA_PHY_PS_SHIFT = 0x10, + OCRDMA_PHY_PFLT_MASK = 0xFF000000, + OCRDMA_PHY_PFLT_SHIFT = 0x18, + OCRDMA_QOS_LNKSP_MASK = 0xFFFF0000, + OCRDMA_QOS_LNKSP_SHIFT = 0x10, + OCRDMA_LLST_MASK = 0xFF, + OCRDMA_PLFC_MASK = 0x00000400, + OCRDMA_PLFC_SHIFT = 0x8, + OCRDMA_PLRFC_MASK = 0x00000200, + OCRDMA_PLRFC_SHIFT = 0x8, + OCRDMA_PLTFC_MASK = 0x00000100, + OCRDMA_PLTFC_SHIFT = 0x8 +}; + +struct ocrdma_get_link_speed_rsp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_rsp rsp; + + u32 pflt_pps_ld_pnum; + u32 qos_lsp; + u32 res_lls; +}; + +enum { + OCRDMA_PHYS_LINK_SPEED_ZERO = 0x0, + OCRDMA_PHYS_LINK_SPEED_10MBPS = 0x1, + OCRDMA_PHYS_LINK_SPEED_100MBPS = 0x2, + OCRDMA_PHYS_LINK_SPEED_1GBPS = 0x3, + OCRDMA_PHYS_LINK_SPEED_10GBPS = 0x4, + OCRDMA_PHYS_LINK_SPEED_20GBPS = 0x5, + OCRDMA_PHYS_LINK_SPEED_25GBPS = 0x6, + OCRDMA_PHYS_LINK_SPEED_40GBPS = 0x7, + OCRDMA_PHYS_LINK_SPEED_100GBPS = 0x8 +}; + +enum { + OCRDMA_CREATE_CQ_VER2 = 2, + OCRDMA_CREATE_CQ_VER3 = 3, + + OCRDMA_CREATE_CQ_PAGE_CNT_MASK = 0xFFFF, + OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT = 16, + OCRDMA_CREATE_CQ_PAGE_SIZE_MASK = 0xFF, + + OCRDMA_CREATE_CQ_COALESCWM_SHIFT = 12, + OCRDMA_CREATE_CQ_COALESCWM_MASK = BIT(13) | BIT(12), + OCRDMA_CREATE_CQ_FLAGS_NODELAY = BIT(14), + OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID = BIT(15), + + OCRDMA_CREATE_CQ_EQ_ID_MASK = 0xFFFF, + OCRDMA_CREATE_CQ_CQE_COUNT_MASK = 0xFFFF +}; + +enum { + OCRDMA_CREATE_CQ_VER0 = 0, + OCRDMA_CREATE_CQ_DPP = 1, + OCRDMA_CREATE_CQ_TYPE_SHIFT = 24, + OCRDMA_CREATE_CQ_EQID_SHIFT = 22, + + OCRDMA_CREATE_CQ_CNT_SHIFT = 27, + OCRDMA_CREATE_CQ_FLAGS_VALID = BIT(29), + OCRDMA_CREATE_CQ_FLAGS_EVENTABLE = BIT(31), + OCRDMA_CREATE_CQ_DEF_FLAGS = OCRDMA_CREATE_CQ_FLAGS_VALID | + OCRDMA_CREATE_CQ_FLAGS_EVENTABLE | + OCRDMA_CREATE_CQ_FLAGS_NODELAY +}; + +struct ocrdma_create_cq_cmd { + struct ocrdma_mbx_hdr req; + u32 pgsz_pgcnt; + u32 ev_cnt_flags; + u32 eqn; + u32 pdid_cqecnt; + u32 rsvd6; + struct ocrdma_pa pa[OCRDMA_CREATE_CQ_MAX_PAGES]; +}; + +struct ocrdma_create_cq { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_create_cq_cmd cmd; +}; + +enum { + OCRDMA_CREATE_CQ_CMD_PDID_SHIFT = 0x10 +}; + +enum { + OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK = 0xFFFF +}; + +struct ocrdma_create_cq_cmd_rsp { + struct ocrdma_mbx_rsp rsp; + u32 cq_id; +}; + +struct ocrdma_create_cq_rsp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_create_cq_cmd_rsp rsp; +}; + +enum { + OCRDMA_CREATE_MQ_V0_CQ_ID_SHIFT = 22, + OCRDMA_CREATE_MQ_CQ_ID_SHIFT = 16, + OCRDMA_CREATE_MQ_RING_SIZE_SHIFT = 16, + OCRDMA_CREATE_MQ_VALID = BIT(31), + OCRDMA_CREATE_MQ_ASYNC_CQ_VALID = BIT(0) +}; + +struct ocrdma_create_mq_req { + struct ocrdma_mbx_hdr req; + u32 cqid_pages; + u32 async_event_bitmap; + u32 async_cqid_ringsize; + u32 valid; + u32 async_cqid_valid; + u32 rsvd; + struct ocrdma_pa pa[8]; +}; + +struct ocrdma_create_mq_rsp { + struct ocrdma_mbx_rsp rsp; + u32 id; +}; + +enum { + OCRDMA_DESTROY_CQ_QID_SHIFT = 0, + OCRDMA_DESTROY_CQ_QID_MASK = 0xFFFF, + OCRDMA_DESTROY_CQ_QID_BYPASS_FLUSH_SHIFT = 16, + OCRDMA_DESTROY_CQ_QID_BYPASS_FLUSH_MASK = 0xFFFF << + OCRDMA_DESTROY_CQ_QID_BYPASS_FLUSH_SHIFT +}; + +struct ocrdma_destroy_cq { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_hdr req; + + u32 bypass_flush_qid; +}; + +struct ocrdma_destroy_cq_rsp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_rsp rsp; +}; + +enum { + OCRDMA_QPT_GSI = 1, + OCRDMA_QPT_RC = 2, + OCRDMA_QPT_UD = 4, +}; + +enum { + OCRDMA_CREATE_QP_REQ_PD_ID_SHIFT = 0, + OCRDMA_CREATE_QP_REQ_PD_ID_MASK = 0xFFFF, + OCRDMA_CREATE_QP_REQ_SQ_PAGE_SIZE_SHIFT = 16, + OCRDMA_CREATE_QP_REQ_RQ_PAGE_SIZE_SHIFT = 19, + OCRDMA_CREATE_QP_REQ_QPT_SHIFT = 29, + OCRDMA_CREATE_QP_REQ_QPT_MASK = BIT(31) | BIT(30) | BIT(29), + + OCRDMA_CREATE_QP_REQ_MAX_RQE_SHIFT = 0, + OCRDMA_CREATE_QP_REQ_MAX_RQE_MASK = 0xFFFF, + OCRDMA_CREATE_QP_REQ_MAX_WQE_SHIFT = 16, + OCRDMA_CREATE_QP_REQ_MAX_WQE_MASK = 0xFFFF << + OCRDMA_CREATE_QP_REQ_MAX_WQE_SHIFT, + + OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_SHIFT = 0, + OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_MASK = 0xFFFF, + OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_SHIFT = 16, + OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_MASK = 0xFFFF << + OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_SHIFT, + + OCRDMA_CREATE_QP_REQ_FMR_EN_SHIFT = 0, + OCRDMA_CREATE_QP_REQ_FMR_EN_MASK = BIT(0), + OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_SHIFT = 1, + OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_MASK = BIT(1), + OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_SHIFT = 2, + OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_MASK = BIT(2), + OCRDMA_CREATE_QP_REQ_INB_WREN_SHIFT = 3, + OCRDMA_CREATE_QP_REQ_INB_WREN_MASK = BIT(3), + OCRDMA_CREATE_QP_REQ_INB_RDEN_SHIFT = 4, + OCRDMA_CREATE_QP_REQ_INB_RDEN_MASK = BIT(4), + OCRDMA_CREATE_QP_REQ_USE_SRQ_SHIFT = 5, + OCRDMA_CREATE_QP_REQ_USE_SRQ_MASK = BIT(5), + OCRDMA_CREATE_QP_REQ_ENABLE_RPIR_SHIFT = 6, + OCRDMA_CREATE_QP_REQ_ENABLE_RPIR_MASK = BIT(6), + OCRDMA_CREATE_QP_REQ_ENABLE_DPP_SHIFT = 7, + OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK = BIT(7), + OCRDMA_CREATE_QP_REQ_ENABLE_DPP_CQ_SHIFT = 8, + OCRDMA_CREATE_QP_REQ_ENABLE_DPP_CQ_MASK = BIT(8), + OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT = 16, + OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_MASK = 0xFFFF << + OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT, + + OCRDMA_CREATE_QP_REQ_MAX_IRD_SHIFT = 0, + OCRDMA_CREATE_QP_REQ_MAX_IRD_MASK = 0xFFFF, + OCRDMA_CREATE_QP_REQ_MAX_ORD_SHIFT = 16, + OCRDMA_CREATE_QP_REQ_MAX_ORD_MASK = 0xFFFF << + OCRDMA_CREATE_QP_REQ_MAX_ORD_SHIFT, + + OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_SHIFT = 0, + OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_MASK = 0xFFFF, + OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_SHIFT = 16, + OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_MASK = 0xFFFF << + OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_SHIFT, + + OCRDMA_CREATE_QP_REQ_RQE_SIZE_SHIFT = 0, + OCRDMA_CREATE_QP_REQ_RQE_SIZE_MASK = 0xFFFF, + OCRDMA_CREATE_QP_REQ_WQE_SIZE_SHIFT = 16, + OCRDMA_CREATE_QP_REQ_WQE_SIZE_MASK = 0xFFFF << + OCRDMA_CREATE_QP_REQ_WQE_SIZE_SHIFT, + + OCRDMA_CREATE_QP_REQ_RQ_CQID_SHIFT = 0, + OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK = 0xFFFF, + OCRDMA_CREATE_QP_REQ_WQ_CQID_SHIFT = 16, + OCRDMA_CREATE_QP_REQ_WQ_CQID_MASK = 0xFFFF << + OCRDMA_CREATE_QP_REQ_WQ_CQID_SHIFT, + + OCRDMA_CREATE_QP_REQ_DPP_CQPID_SHIFT = 0, + OCRDMA_CREATE_QP_REQ_DPP_CQPID_MASK = 0xFFFF, + OCRDMA_CREATE_QP_REQ_DPP_CREDIT_SHIFT = 16, + OCRDMA_CREATE_QP_REQ_DPP_CREDIT_MASK = 0xFFFF << + OCRDMA_CREATE_QP_REQ_DPP_CREDIT_SHIFT +}; + +enum { + OCRDMA_CREATE_QP_REQ_DPP_CREDIT_LIMIT = 16, + OCRDMA_CREATE_QP_RSP_DPP_PAGE_SHIFT = 1 +}; + +#define MAX_OCRDMA_IRD_PAGES 4 + +enum ocrdma_qp_flags { + OCRDMA_QP_MW_BIND = 1, + OCRDMA_QP_LKEY0 = (1 << 1), + OCRDMA_QP_FAST_REG = (1 << 2), + OCRDMA_QP_INB_RD = (1 << 6), + OCRDMA_QP_INB_WR = (1 << 7), +}; + +enum ocrdma_qp_state { + OCRDMA_QPS_RST = 0, + OCRDMA_QPS_INIT = 1, + OCRDMA_QPS_RTR = 2, + OCRDMA_QPS_RTS = 3, + OCRDMA_QPS_SQE = 4, + OCRDMA_QPS_SQ_DRAINING = 5, + OCRDMA_QPS_ERR = 6, + OCRDMA_QPS_SQD = 7 +}; + +struct ocrdma_create_qp_req { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_hdr req; + + u32 type_pgsz_pdn; + u32 max_wqe_rqe; + u32 max_sge_send_write; + u32 max_sge_recv_flags; + u32 max_ord_ird; + u32 num_wq_rq_pages; + u32 wqe_rqe_size; + u32 wq_rq_cqid; + struct ocrdma_pa wq_addr[MAX_OCRDMA_QP_PAGES]; + struct ocrdma_pa rq_addr[MAX_OCRDMA_QP_PAGES]; + u32 dpp_credits_cqid; + u32 rpir_lkey; + struct ocrdma_pa ird_addr[MAX_OCRDMA_IRD_PAGES]; +}; + +enum { + OCRDMA_CREATE_QP_RSP_QP_ID_SHIFT = 0, + OCRDMA_CREATE_QP_RSP_QP_ID_MASK = 0xFFFF, + + OCRDMA_CREATE_QP_RSP_MAX_RQE_SHIFT = 0, + OCRDMA_CREATE_QP_RSP_MAX_RQE_MASK = 0xFFFF, + OCRDMA_CREATE_QP_RSP_MAX_WQE_SHIFT = 16, + OCRDMA_CREATE_QP_RSP_MAX_WQE_MASK = 0xFFFF << + OCRDMA_CREATE_QP_RSP_MAX_WQE_SHIFT, + + OCRDMA_CREATE_QP_RSP_MAX_SGE_WRITE_SHIFT = 0, + OCRDMA_CREATE_QP_RSP_MAX_SGE_WRITE_MASK = 0xFFFF, + OCRDMA_CREATE_QP_RSP_MAX_SGE_SEND_SHIFT = 16, + OCRDMA_CREATE_QP_RSP_MAX_SGE_SEND_MASK = 0xFFFF << + OCRDMA_CREATE_QP_RSP_MAX_SGE_SEND_SHIFT, + + OCRDMA_CREATE_QP_RSP_MAX_SGE_RECV_SHIFT = 16, + OCRDMA_CREATE_QP_RSP_MAX_SGE_RECV_MASK = 0xFFFF << + OCRDMA_CREATE_QP_RSP_MAX_SGE_RECV_SHIFT, + + OCRDMA_CREATE_QP_RSP_MAX_IRD_SHIFT = 0, + OCRDMA_CREATE_QP_RSP_MAX_IRD_MASK = 0xFFFF, + OCRDMA_CREATE_QP_RSP_MAX_ORD_SHIFT = 16, + OCRDMA_CREATE_QP_RSP_MAX_ORD_MASK = 0xFFFF << + OCRDMA_CREATE_QP_RSP_MAX_ORD_SHIFT, + + OCRDMA_CREATE_QP_RSP_RQ_ID_SHIFT = 0, + OCRDMA_CREATE_QP_RSP_RQ_ID_MASK = 0xFFFF, + OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT = 16, + OCRDMA_CREATE_QP_RSP_SQ_ID_MASK = 0xFFFF << + OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT, + + OCRDMA_CREATE_QP_RSP_DPP_ENABLED_MASK = BIT(0), + OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT = 1, + OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_MASK = 0x7FFF << + OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT, + OCRDMA_CREATE_QP_RSP_DPP_CREDITS_SHIFT = 16, + OCRDMA_CREATE_QP_RSP_DPP_CREDITS_MASK = 0xFFFF << + OCRDMA_CREATE_QP_RSP_DPP_CREDITS_SHIFT, +}; + +struct ocrdma_create_qp_rsp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_rsp rsp; + + u32 qp_id; + u32 max_wqe_rqe; + u32 max_sge_send_write; + u32 max_sge_recv; + u32 max_ord_ird; + u32 sq_rq_id; + u32 dpp_response; +}; + +struct ocrdma_destroy_qp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_hdr req; + u32 qp_id; +}; + +struct ocrdma_destroy_qp_rsp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_rsp rsp; +}; + +enum { + OCRDMA_MODIFY_QP_ID_SHIFT = 0, + OCRDMA_MODIFY_QP_ID_MASK = 0xFFFF, + + OCRDMA_QP_PARA_QPS_VALID = BIT(0), + OCRDMA_QP_PARA_SQD_ASYNC_VALID = BIT(1), + OCRDMA_QP_PARA_PKEY_VALID = BIT(2), + OCRDMA_QP_PARA_QKEY_VALID = BIT(3), + OCRDMA_QP_PARA_PMTU_VALID = BIT(4), + OCRDMA_QP_PARA_ACK_TO_VALID = BIT(5), + OCRDMA_QP_PARA_RETRY_CNT_VALID = BIT(6), + OCRDMA_QP_PARA_RRC_VALID = BIT(7), + OCRDMA_QP_PARA_RQPSN_VALID = BIT(8), + OCRDMA_QP_PARA_MAX_IRD_VALID = BIT(9), + OCRDMA_QP_PARA_MAX_ORD_VALID = BIT(10), + OCRDMA_QP_PARA_RNT_VALID = BIT(11), + OCRDMA_QP_PARA_SQPSN_VALID = BIT(12), + OCRDMA_QP_PARA_DST_QPN_VALID = BIT(13), + OCRDMA_QP_PARA_MAX_WQE_VALID = BIT(14), + OCRDMA_QP_PARA_MAX_RQE_VALID = BIT(15), + OCRDMA_QP_PARA_SGE_SEND_VALID = BIT(16), + OCRDMA_QP_PARA_SGE_RECV_VALID = BIT(17), + OCRDMA_QP_PARA_SGE_WR_VALID = BIT(18), + OCRDMA_QP_PARA_INB_RDEN_VALID = BIT(19), + OCRDMA_QP_PARA_INB_WREN_VALID = BIT(20), + OCRDMA_QP_PARA_FLOW_LBL_VALID = BIT(21), + OCRDMA_QP_PARA_BIND_EN_VALID = BIT(22), + OCRDMA_QP_PARA_ZLKEY_EN_VALID = BIT(23), + OCRDMA_QP_PARA_FMR_EN_VALID = BIT(24), + OCRDMA_QP_PARA_INBAT_EN_VALID = BIT(25), + OCRDMA_QP_PARA_VLAN_EN_VALID = BIT(26), + + OCRDMA_MODIFY_QP_FLAGS_RD = BIT(0), + OCRDMA_MODIFY_QP_FLAGS_WR = BIT(1), + OCRDMA_MODIFY_QP_FLAGS_SEND = BIT(2), + OCRDMA_MODIFY_QP_FLAGS_ATOMIC = BIT(3) +}; + +enum { + OCRDMA_QP_PARAMS_SRQ_ID_SHIFT = 0, + OCRDMA_QP_PARAMS_SRQ_ID_MASK = 0xFFFF, + + OCRDMA_QP_PARAMS_MAX_RQE_SHIFT = 0, + OCRDMA_QP_PARAMS_MAX_RQE_MASK = 0xFFFF, + OCRDMA_QP_PARAMS_MAX_WQE_SHIFT = 16, + OCRDMA_QP_PARAMS_MAX_WQE_MASK = 0xFFFF << + OCRDMA_QP_PARAMS_MAX_WQE_SHIFT, + + OCRDMA_QP_PARAMS_MAX_SGE_WRITE_SHIFT = 0, + OCRDMA_QP_PARAMS_MAX_SGE_WRITE_MASK = 0xFFFF, + OCRDMA_QP_PARAMS_MAX_SGE_SEND_SHIFT = 16, + OCRDMA_QP_PARAMS_MAX_SGE_SEND_MASK = 0xFFFF << + OCRDMA_QP_PARAMS_MAX_SGE_SEND_SHIFT, + + OCRDMA_QP_PARAMS_FLAGS_FMR_EN = BIT(0), + OCRDMA_QP_PARAMS_FLAGS_LKEY_0_EN = BIT(1), + OCRDMA_QP_PARAMS_FLAGS_BIND_MW_EN = BIT(2), + OCRDMA_QP_PARAMS_FLAGS_INBWR_EN = BIT(3), + OCRDMA_QP_PARAMS_FLAGS_INBRD_EN = BIT(4), + OCRDMA_QP_PARAMS_STATE_SHIFT = 5, + OCRDMA_QP_PARAMS_STATE_MASK = BIT(5) | BIT(6) | BIT(7), + OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC = BIT(8), + OCRDMA_QP_PARAMS_FLAGS_INB_ATEN = BIT(9), + OCRDMA_QP_PARAMS_MAX_SGE_RECV_SHIFT = 16, + OCRDMA_QP_PARAMS_MAX_SGE_RECV_MASK = 0xFFFF << + OCRDMA_QP_PARAMS_MAX_SGE_RECV_SHIFT, + + OCRDMA_QP_PARAMS_MAX_IRD_SHIFT = 0, + OCRDMA_QP_PARAMS_MAX_IRD_MASK = 0xFFFF, + OCRDMA_QP_PARAMS_MAX_ORD_SHIFT = 16, + OCRDMA_QP_PARAMS_MAX_ORD_MASK = 0xFFFF << + OCRDMA_QP_PARAMS_MAX_ORD_SHIFT, + + OCRDMA_QP_PARAMS_RQ_CQID_SHIFT = 0, + OCRDMA_QP_PARAMS_RQ_CQID_MASK = 0xFFFF, + OCRDMA_QP_PARAMS_WQ_CQID_SHIFT = 16, + OCRDMA_QP_PARAMS_WQ_CQID_MASK = 0xFFFF << + OCRDMA_QP_PARAMS_WQ_CQID_SHIFT, + + OCRDMA_QP_PARAMS_RQ_PSN_SHIFT = 0, + OCRDMA_QP_PARAMS_RQ_PSN_MASK = 0xFFFFFF, + OCRDMA_QP_PARAMS_HOP_LMT_SHIFT = 24, + OCRDMA_QP_PARAMS_HOP_LMT_MASK = 0xFF << + OCRDMA_QP_PARAMS_HOP_LMT_SHIFT, + + OCRDMA_QP_PARAMS_SQ_PSN_SHIFT = 0, + OCRDMA_QP_PARAMS_SQ_PSN_MASK = 0xFFFFFF, + OCRDMA_QP_PARAMS_TCLASS_SHIFT = 24, + OCRDMA_QP_PARAMS_TCLASS_MASK = 0xFF << + OCRDMA_QP_PARAMS_TCLASS_SHIFT, + + OCRDMA_QP_PARAMS_DEST_QPN_SHIFT = 0, + OCRDMA_QP_PARAMS_DEST_QPN_MASK = 0xFFFFFF, + OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT = 24, + OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK = 0x7 << + OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT, + OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT = 27, + OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK = 0x1F << + OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT, + + OCRDMA_QP_PARAMS_PKEY_IDNEX_SHIFT = 0, + OCRDMA_QP_PARAMS_PKEY_INDEX_MASK = 0xFFFF, + OCRDMA_QP_PARAMS_PATH_MTU_SHIFT = 18, + OCRDMA_QP_PARAMS_PATH_MTU_MASK = 0x3FFF << + OCRDMA_QP_PARAMS_PATH_MTU_SHIFT, + + OCRDMA_QP_PARAMS_FLOW_LABEL_SHIFT = 0, + OCRDMA_QP_PARAMS_FLOW_LABEL_MASK = 0xFFFFF, + OCRDMA_QP_PARAMS_SL_SHIFT = 20, + OCRDMA_QP_PARAMS_SL_MASK = 0xF << + OCRDMA_QP_PARAMS_SL_SHIFT, + OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT = 24, + OCRDMA_QP_PARAMS_RETRY_CNT_MASK = 0x7 << + OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT, + OCRDMA_QP_PARAMS_RNR_NAK_TIMER_SHIFT = 27, + OCRDMA_QP_PARAMS_RNR_NAK_TIMER_MASK = 0x1F << + OCRDMA_QP_PARAMS_RNR_NAK_TIMER_SHIFT, + + OCRDMA_QP_PARAMS_DMAC_B4_TO_B5_SHIFT = 0, + OCRDMA_QP_PARAMS_DMAC_B4_TO_B5_MASK = 0xFFFF, + OCRDMA_QP_PARAMS_VLAN_SHIFT = 16, + OCRDMA_QP_PARAMS_VLAN_MASK = 0xFFFF << + OCRDMA_QP_PARAMS_VLAN_SHIFT +}; + +struct ocrdma_qp_params { + u32 id; + u32 max_wqe_rqe; + u32 max_sge_send_write; + u32 max_sge_recv_flags; + u32 max_ord_ird; + u32 wq_rq_cqid; + u32 hop_lmt_rq_psn; + u32 tclass_sq_psn; + u32 ack_to_rnr_rtc_dest_qpn; + u32 path_mtu_pkey_indx; + u32 rnt_rc_sl_fl; + u8 sgid[16]; + u8 dgid[16]; + u32 dmac_b0_to_b3; + u32 vlan_dmac_b4_to_b5; + u32 qkey; +}; + + +struct ocrdma_modify_qp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_hdr req; + + struct ocrdma_qp_params params; + u32 flags; + u32 rdma_flags; + u32 num_outstanding_atomic_rd; +}; + +enum { + OCRDMA_MODIFY_QP_RSP_MAX_RQE_SHIFT = 0, + OCRDMA_MODIFY_QP_RSP_MAX_RQE_MASK = 0xFFFF, + OCRDMA_MODIFY_QP_RSP_MAX_WQE_SHIFT = 16, + OCRDMA_MODIFY_QP_RSP_MAX_WQE_MASK = 0xFFFF << + OCRDMA_MODIFY_QP_RSP_MAX_WQE_SHIFT, + + OCRDMA_MODIFY_QP_RSP_MAX_IRD_SHIFT = 0, + OCRDMA_MODIFY_QP_RSP_MAX_IRD_MASK = 0xFFFF, + OCRDMA_MODIFY_QP_RSP_MAX_ORD_SHIFT = 16, + OCRDMA_MODIFY_QP_RSP_MAX_ORD_MASK = 0xFFFF << + OCRDMA_MODIFY_QP_RSP_MAX_ORD_SHIFT +}; + +struct ocrdma_modify_qp_rsp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_rsp rsp; + + u32 max_wqe_rqe; + u32 max_ord_ird; +}; + +struct ocrdma_query_qp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_hdr req; + +#define OCRDMA_QUERY_UP_QP_ID_SHIFT 0 +#define OCRDMA_QUERY_UP_QP_ID_MASK 0xFFFFFF + u32 qp_id; +}; + +struct ocrdma_query_qp_rsp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_rsp rsp; + struct ocrdma_qp_params params; + u32 dpp_credits_cqid; + u32 rbq_id; +}; + +enum { + OCRDMA_CREATE_SRQ_PD_ID_SHIFT = 0, + OCRDMA_CREATE_SRQ_PD_ID_MASK = 0xFFFF, + OCRDMA_CREATE_SRQ_PG_SZ_SHIFT = 16, + OCRDMA_CREATE_SRQ_PG_SZ_MASK = 0x3 << + OCRDMA_CREATE_SRQ_PG_SZ_SHIFT, + + OCRDMA_CREATE_SRQ_MAX_RQE_SHIFT = 0, + OCRDMA_CREATE_SRQ_MAX_SGE_RECV_SHIFT = 16, + OCRDMA_CREATE_SRQ_MAX_SGE_RECV_MASK = 0xFFFF << + OCRDMA_CREATE_SRQ_MAX_SGE_RECV_SHIFT, + + OCRDMA_CREATE_SRQ_RQE_SIZE_SHIFT = 0, + OCRDMA_CREATE_SRQ_RQE_SIZE_MASK = 0xFFFF, + OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_SHIFT = 16, + OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_MASK = 0xFFFF << + OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_SHIFT +}; + +struct ocrdma_create_srq { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_hdr req; + + u32 pgsz_pdid; + u32 max_sge_rqe; + u32 pages_rqe_sz; + struct ocrdma_pa rq_addr[MAX_OCRDMA_SRQ_PAGES]; +}; + +enum { + OCRDMA_CREATE_SRQ_RSP_SRQ_ID_SHIFT = 0, + OCRDMA_CREATE_SRQ_RSP_SRQ_ID_MASK = 0xFFFFFF, + + OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_SHIFT = 0, + OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_MASK = 0xFFFF, + OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_SHIFT = 16, + OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_MASK = 0xFFFF << + OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_SHIFT +}; + +struct ocrdma_create_srq_rsp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_rsp rsp; + + u32 id; + u32 max_sge_rqe_allocated; +}; + +enum { + OCRDMA_MODIFY_SRQ_ID_SHIFT = 0, + OCRDMA_MODIFY_SRQ_ID_MASK = 0xFFFFFF, + + OCRDMA_MODIFY_SRQ_MAX_RQE_SHIFT = 0, + OCRDMA_MODIFY_SRQ_MAX_RQE_MASK = 0xFFFF, + OCRDMA_MODIFY_SRQ_LIMIT_SHIFT = 16, + OCRDMA_MODIFY_SRQ__LIMIT_MASK = 0xFFFF << + OCRDMA_MODIFY_SRQ_LIMIT_SHIFT +}; + +struct ocrdma_modify_srq { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_rsp rep; + + u32 id; + u32 limit_max_rqe; +}; + +enum { + OCRDMA_QUERY_SRQ_ID_SHIFT = 0, + OCRDMA_QUERY_SRQ_ID_MASK = 0xFFFFFF +}; + +struct ocrdma_query_srq { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_rsp req; + + u32 id; +}; + +enum { + OCRDMA_QUERY_SRQ_RSP_PD_ID_SHIFT = 0, + OCRDMA_QUERY_SRQ_RSP_PD_ID_MASK = 0xFFFF, + OCRDMA_QUERY_SRQ_RSP_MAX_RQE_SHIFT = 16, + OCRDMA_QUERY_SRQ_RSP_MAX_RQE_MASK = 0xFFFF << + OCRDMA_QUERY_SRQ_RSP_MAX_RQE_SHIFT, + + OCRDMA_QUERY_SRQ_RSP_MAX_SGE_RECV_SHIFT = 0, + OCRDMA_QUERY_SRQ_RSP_MAX_SGE_RECV_MASK = 0xFFFF, + OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_SHIFT = 16, + OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_MASK = 0xFFFF << + OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_SHIFT +}; + +struct ocrdma_query_srq_rsp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_rsp req; + + u32 max_rqe_pdid; + u32 srq_lmt_max_sge; +}; + +enum { + OCRDMA_DESTROY_SRQ_ID_SHIFT = 0, + OCRDMA_DESTROY_SRQ_ID_MASK = 0xFFFFFF +}; + +struct ocrdma_destroy_srq { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_rsp req; + + u32 id; +}; + +enum { + OCRDMA_ALLOC_PD_ENABLE_DPP = BIT(16), + OCRDMA_DPP_PAGE_SIZE = 4096 +}; + +struct ocrdma_alloc_pd { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_hdr req; + u32 enable_dpp_rsvd; +}; + +enum { + OCRDMA_ALLOC_PD_RSP_DPP = BIT(16), + OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT = 20, + OCRDMA_ALLOC_PD_RSP_PDID_MASK = 0xFFFF, +}; + +struct ocrdma_alloc_pd_rsp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_rsp rsp; + u32 dpp_page_pdid; +}; + +struct ocrdma_dealloc_pd { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_hdr req; + u32 id; +}; + +struct ocrdma_dealloc_pd_rsp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_rsp rsp; +}; + +struct ocrdma_alloc_pd_range { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_hdr req; + u32 enable_dpp_rsvd; + u32 pd_count; +}; + +struct ocrdma_alloc_pd_range_rsp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_rsp rsp; + u32 dpp_page_pdid; + u32 pd_count; +}; + +enum { + OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK = 0xFFFF, +}; + +struct ocrdma_dealloc_pd_range { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_hdr req; + u32 start_pd_id; + u32 pd_count; +}; + +struct ocrdma_dealloc_pd_range_rsp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_hdr req; + u32 rsvd; +}; + +enum { + OCRDMA_ADDR_CHECK_ENABLE = 1, + OCRDMA_ADDR_CHECK_DISABLE = 0 +}; + +enum { + OCRDMA_ALLOC_LKEY_PD_ID_SHIFT = 0, + OCRDMA_ALLOC_LKEY_PD_ID_MASK = 0xFFFF, + + OCRDMA_ALLOC_LKEY_ADDR_CHECK_SHIFT = 0, + OCRDMA_ALLOC_LKEY_ADDR_CHECK_MASK = BIT(0), + OCRDMA_ALLOC_LKEY_FMR_SHIFT = 1, + OCRDMA_ALLOC_LKEY_FMR_MASK = BIT(1), + OCRDMA_ALLOC_LKEY_REMOTE_INV_SHIFT = 2, + OCRDMA_ALLOC_LKEY_REMOTE_INV_MASK = BIT(2), + OCRDMA_ALLOC_LKEY_REMOTE_WR_SHIFT = 3, + OCRDMA_ALLOC_LKEY_REMOTE_WR_MASK = BIT(3), + OCRDMA_ALLOC_LKEY_REMOTE_RD_SHIFT = 4, + OCRDMA_ALLOC_LKEY_REMOTE_RD_MASK = BIT(4), + OCRDMA_ALLOC_LKEY_LOCAL_WR_SHIFT = 5, + OCRDMA_ALLOC_LKEY_LOCAL_WR_MASK = BIT(5), + OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_MASK = BIT(6), + OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_SHIFT = 6, + OCRDMA_ALLOC_LKEY_PBL_SIZE_SHIFT = 16, + OCRDMA_ALLOC_LKEY_PBL_SIZE_MASK = 0xFFFF << + OCRDMA_ALLOC_LKEY_PBL_SIZE_SHIFT +}; + +struct ocrdma_alloc_lkey { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_hdr req; + + u32 pdid; + u32 pbl_sz_flags; +}; + +struct ocrdma_alloc_lkey_rsp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_rsp rsp; + + u32 lrkey; + u32 num_pbl_rsvd; +}; + +struct ocrdma_dealloc_lkey { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_hdr req; + + u32 lkey; + u32 rsvd_frmr; +}; + +struct ocrdma_dealloc_lkey_rsp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_rsp rsp; +}; + +#define MAX_OCRDMA_NSMR_PBL (u32)22 +#define MAX_OCRDMA_PBL_SIZE 65536 +#define MAX_OCRDMA_PBL_PER_LKEY 32767 + +enum { + OCRDMA_REG_NSMR_LRKEY_INDEX_SHIFT = 0, + OCRDMA_REG_NSMR_LRKEY_INDEX_MASK = 0xFFFFFF, + OCRDMA_REG_NSMR_LRKEY_SHIFT = 24, + OCRDMA_REG_NSMR_LRKEY_MASK = 0xFF << + OCRDMA_REG_NSMR_LRKEY_SHIFT, + + OCRDMA_REG_NSMR_PD_ID_SHIFT = 0, + OCRDMA_REG_NSMR_PD_ID_MASK = 0xFFFF, + OCRDMA_REG_NSMR_NUM_PBL_SHIFT = 16, + OCRDMA_REG_NSMR_NUM_PBL_MASK = 0xFFFF << + OCRDMA_REG_NSMR_NUM_PBL_SHIFT, + + OCRDMA_REG_NSMR_PBE_SIZE_SHIFT = 0, + OCRDMA_REG_NSMR_PBE_SIZE_MASK = 0xFFFF, + OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT = 16, + OCRDMA_REG_NSMR_HPAGE_SIZE_MASK = 0xFF << + OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT, + OCRDMA_REG_NSMR_BIND_MEMWIN_SHIFT = 24, + OCRDMA_REG_NSMR_BIND_MEMWIN_MASK = BIT(24), + OCRDMA_REG_NSMR_ZB_SHIFT = 25, + OCRDMA_REG_NSMR_ZB_SHIFT_MASK = BIT(25), + OCRDMA_REG_NSMR_REMOTE_INV_SHIFT = 26, + OCRDMA_REG_NSMR_REMOTE_INV_MASK = BIT(26), + OCRDMA_REG_NSMR_REMOTE_WR_SHIFT = 27, + OCRDMA_REG_NSMR_REMOTE_WR_MASK = BIT(27), + OCRDMA_REG_NSMR_REMOTE_RD_SHIFT = 28, + OCRDMA_REG_NSMR_REMOTE_RD_MASK = BIT(28), + OCRDMA_REG_NSMR_LOCAL_WR_SHIFT = 29, + OCRDMA_REG_NSMR_LOCAL_WR_MASK = BIT(29), + OCRDMA_REG_NSMR_REMOTE_ATOMIC_SHIFT = 30, + OCRDMA_REG_NSMR_REMOTE_ATOMIC_MASK = BIT(30), + OCRDMA_REG_NSMR_LAST_SHIFT = 31, + OCRDMA_REG_NSMR_LAST_MASK = BIT(31) +}; + +struct ocrdma_reg_nsmr { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_hdr cmd; + + u32 fr_mr; + u32 num_pbl_pdid; + u32 flags_hpage_pbe_sz; + u32 totlen_low; + u32 totlen_high; + u32 fbo_low; + u32 fbo_high; + u32 va_loaddr; + u32 va_hiaddr; + struct ocrdma_pa pbl[MAX_OCRDMA_NSMR_PBL]; +}; + +enum { + OCRDMA_REG_NSMR_CONT_PBL_SHIFT = 0, + OCRDMA_REG_NSMR_CONT_PBL_SHIFT_MASK = 0xFFFF, + OCRDMA_REG_NSMR_CONT_NUM_PBL_SHIFT = 16, + OCRDMA_REG_NSMR_CONT_NUM_PBL_MASK = 0xFFFF << + OCRDMA_REG_NSMR_CONT_NUM_PBL_SHIFT, + + OCRDMA_REG_NSMR_CONT_LAST_SHIFT = 31, + OCRDMA_REG_NSMR_CONT_LAST_MASK = BIT(31) +}; + +struct ocrdma_reg_nsmr_cont { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_hdr cmd; + + u32 lrkey; + u32 num_pbl_offset; + u32 last; + + struct ocrdma_pa pbl[MAX_OCRDMA_NSMR_PBL]; +}; + +struct ocrdma_pbe { + u32 pa_hi; + u32 pa_lo; +}; + +enum { + OCRDMA_REG_NSMR_RSP_NUM_PBL_SHIFT = 16, + OCRDMA_REG_NSMR_RSP_NUM_PBL_MASK = 0xFFFF0000 +}; +struct ocrdma_reg_nsmr_rsp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_rsp rsp; + + u32 lrkey; + u32 num_pbl; +}; + +enum { + OCRDMA_REG_NSMR_CONT_RSP_LRKEY_INDEX_SHIFT = 0, + OCRDMA_REG_NSMR_CONT_RSP_LRKEY_INDEX_MASK = 0xFFFFFF, + OCRDMA_REG_NSMR_CONT_RSP_LRKEY_SHIFT = 24, + OCRDMA_REG_NSMR_CONT_RSP_LRKEY_MASK = 0xFF << + OCRDMA_REG_NSMR_CONT_RSP_LRKEY_SHIFT, + + OCRDMA_REG_NSMR_CONT_RSP_NUM_PBL_SHIFT = 16, + OCRDMA_REG_NSMR_CONT_RSP_NUM_PBL_MASK = 0xFFFF << + OCRDMA_REG_NSMR_CONT_RSP_NUM_PBL_SHIFT +}; + +struct ocrdma_reg_nsmr_cont_rsp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_rsp rsp; + + u32 lrkey_key_index; + u32 num_pbl; +}; + +enum { + OCRDMA_ALLOC_MW_PD_ID_SHIFT = 0, + OCRDMA_ALLOC_MW_PD_ID_MASK = 0xFFFF +}; + +struct ocrdma_alloc_mw { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_hdr req; + + u32 pdid; +}; + +enum { + OCRDMA_ALLOC_MW_RSP_LRKEY_INDEX_SHIFT = 0, + OCRDMA_ALLOC_MW_RSP_LRKEY_INDEX_MASK = 0xFFFFFF +}; + +struct ocrdma_alloc_mw_rsp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_rsp rsp; + + u32 lrkey_index; +}; + +struct ocrdma_attach_mcast { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_hdr req; + u32 qp_id; + u8 mgid[16]; + u32 mac_b0_to_b3; + u32 vlan_mac_b4_to_b5; +}; + +struct ocrdma_attach_mcast_rsp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_rsp rsp; +}; + +struct ocrdma_detach_mcast { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_hdr req; + u32 qp_id; + u8 mgid[16]; + u32 mac_b0_to_b3; + u32 vlan_mac_b4_to_b5; +}; + +struct ocrdma_detach_mcast_rsp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_rsp rsp; +}; + +enum { + OCRDMA_CREATE_AH_NUM_PAGES_SHIFT = 19, + OCRDMA_CREATE_AH_NUM_PAGES_MASK = 0xF << + OCRDMA_CREATE_AH_NUM_PAGES_SHIFT, + + OCRDMA_CREATE_AH_PAGE_SIZE_SHIFT = 16, + OCRDMA_CREATE_AH_PAGE_SIZE_MASK = 0x7 << + OCRDMA_CREATE_AH_PAGE_SIZE_SHIFT, + + OCRDMA_CREATE_AH_ENTRY_SIZE_SHIFT = 23, + OCRDMA_CREATE_AH_ENTRY_SIZE_MASK = 0x1FF << + OCRDMA_CREATE_AH_ENTRY_SIZE_SHIFT, +}; + +#define OCRDMA_AH_TBL_PAGES 8 + +struct ocrdma_create_ah_tbl { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_hdr req; + + u32 ah_conf; + struct ocrdma_pa tbl_addr[8]; +}; + +struct ocrdma_create_ah_tbl_rsp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_rsp rsp; + u32 ahid; +}; + +struct ocrdma_delete_ah_tbl { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_hdr req; + u32 ahid; +}; + +struct ocrdma_delete_ah_tbl_rsp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_rsp rsp; +}; + +enum { + OCRDMA_EQE_VALID_SHIFT = 0, + OCRDMA_EQE_VALID_MASK = BIT(0), + OCRDMA_EQE_MAJOR_CODE_MASK = 0x0E, + OCRDMA_EQE_MAJOR_CODE_SHIFT = 0x01, + OCRDMA_EQE_FOR_CQE_MASK = 0xFFFE, + OCRDMA_EQE_RESOURCE_ID_SHIFT = 16, + OCRDMA_EQE_RESOURCE_ID_MASK = 0xFFFF << + OCRDMA_EQE_RESOURCE_ID_SHIFT, +}; + +enum major_code { + OCRDMA_MAJOR_CODE_COMPLETION = 0x00, + OCRDMA_MAJOR_CODE_SENTINAL = 0x01 +}; + +struct ocrdma_eqe { + u32 id_valid; +}; + +enum OCRDMA_CQE_STATUS { + OCRDMA_CQE_SUCCESS = 0, + OCRDMA_CQE_LOC_LEN_ERR, + OCRDMA_CQE_LOC_QP_OP_ERR, + OCRDMA_CQE_LOC_EEC_OP_ERR, + OCRDMA_CQE_LOC_PROT_ERR, + OCRDMA_CQE_WR_FLUSH_ERR, + OCRDMA_CQE_MW_BIND_ERR, + OCRDMA_CQE_BAD_RESP_ERR, + OCRDMA_CQE_LOC_ACCESS_ERR, + OCRDMA_CQE_REM_INV_REQ_ERR, + OCRDMA_CQE_REM_ACCESS_ERR, + OCRDMA_CQE_REM_OP_ERR, + OCRDMA_CQE_RETRY_EXC_ERR, + OCRDMA_CQE_RNR_RETRY_EXC_ERR, + OCRDMA_CQE_LOC_RDD_VIOL_ERR, + OCRDMA_CQE_REM_INV_RD_REQ_ERR, + OCRDMA_CQE_REM_ABORT_ERR, + OCRDMA_CQE_INV_EECN_ERR, + OCRDMA_CQE_INV_EEC_STATE_ERR, + OCRDMA_CQE_FATAL_ERR, + OCRDMA_CQE_RESP_TIMEOUT_ERR, + OCRDMA_CQE_GENERAL_ERR, + + OCRDMA_MAX_CQE_ERR +}; + +enum { + /* w0 */ + OCRDMA_CQE_WQEIDX_SHIFT = 0, + OCRDMA_CQE_WQEIDX_MASK = 0xFFFF, + + /* w1 */ + OCRDMA_CQE_UD_XFER_LEN_SHIFT = 16, + OCRDMA_CQE_PKEY_SHIFT = 0, + OCRDMA_CQE_PKEY_MASK = 0xFFFF, + + /* w2 */ + OCRDMA_CQE_QPN_SHIFT = 0, + OCRDMA_CQE_QPN_MASK = 0x0000FFFF, + + OCRDMA_CQE_BUFTAG_SHIFT = 16, + OCRDMA_CQE_BUFTAG_MASK = 0xFFFF << OCRDMA_CQE_BUFTAG_SHIFT, + + /* w3 */ + OCRDMA_CQE_UD_STATUS_SHIFT = 24, + OCRDMA_CQE_UD_STATUS_MASK = 0x7 << OCRDMA_CQE_UD_STATUS_SHIFT, + OCRDMA_CQE_STATUS_SHIFT = 16, + OCRDMA_CQE_STATUS_MASK = 0xFF << OCRDMA_CQE_STATUS_SHIFT, + OCRDMA_CQE_VALID = BIT(31), + OCRDMA_CQE_INVALIDATE = BIT(30), + OCRDMA_CQE_QTYPE = BIT(29), + OCRDMA_CQE_IMM = BIT(28), + OCRDMA_CQE_WRITE_IMM = BIT(27), + OCRDMA_CQE_QTYPE_SQ = 0, + OCRDMA_CQE_QTYPE_RQ = 1, + OCRDMA_CQE_SRCQP_MASK = 0xFFFFFF +}; + +struct ocrdma_cqe { + union { + /* w0 to w2 */ + struct { + u32 wqeidx; + u32 bytes_xfered; + u32 qpn; + } wq; + struct { + u32 lkey_immdt; + u32 rxlen; + u32 buftag_qpn; + } rq; + struct { + u32 lkey_immdt; + u32 rxlen_pkey; + u32 buftag_qpn; + } ud; + struct { + u32 word_0; + u32 word_1; + u32 qpn; + } cmn; + }; + u32 flags_status_srcqpn; /* w3 */ +}; + +struct ocrdma_sge { + u32 addr_hi; + u32 addr_lo; + u32 lrkey; + u32 len; +}; + +enum { + OCRDMA_FLAG_SIG = 0x1, + OCRDMA_FLAG_INV = 0x2, + OCRDMA_FLAG_FENCE_L = 0x4, + OCRDMA_FLAG_FENCE_R = 0x8, + OCRDMA_FLAG_SOLICIT = 0x10, + OCRDMA_FLAG_IMM = 0x20, + OCRDMA_FLAG_AH_VLAN_PR = 0x40, + + /* Stag flags */ + OCRDMA_LKEY_FLAG_LOCAL_WR = 0x1, + OCRDMA_LKEY_FLAG_REMOTE_RD = 0x2, + OCRDMA_LKEY_FLAG_REMOTE_WR = 0x4, + OCRDMA_LKEY_FLAG_VATO = 0x8, +}; + +enum OCRDMA_WQE_OPCODE { + OCRDMA_WRITE = 0x06, + OCRDMA_READ = 0x0C, + OCRDMA_RESV0 = 0x02, + OCRDMA_SEND = 0x00, + OCRDMA_CMP_SWP = 0x14, + OCRDMA_BIND_MW = 0x10, + OCRDMA_FR_MR = 0x11, + OCRDMA_RESV1 = 0x0A, + OCRDMA_LKEY_INV = 0x15, + OCRDMA_FETCH_ADD = 0x13, + OCRDMA_POST_RQ = 0x12 +}; + +enum { + OCRDMA_TYPE_INLINE = 0x0, + OCRDMA_TYPE_LKEY = 0x1, +}; + +enum { + OCRDMA_WQE_OPCODE_SHIFT = 0, + OCRDMA_WQE_OPCODE_MASK = 0x0000001F, + OCRDMA_WQE_FLAGS_SHIFT = 5, + OCRDMA_WQE_TYPE_SHIFT = 16, + OCRDMA_WQE_TYPE_MASK = 0x00030000, + OCRDMA_WQE_SIZE_SHIFT = 18, + OCRDMA_WQE_SIZE_MASK = 0xFF, + OCRDMA_WQE_NXT_WQE_SIZE_SHIFT = 25, + + OCRDMA_WQE_LKEY_FLAGS_SHIFT = 0, + OCRDMA_WQE_LKEY_FLAGS_MASK = 0xF +}; + +/* header WQE for all the SQ and RQ operations */ +struct ocrdma_hdr_wqe { + u32 cw; + union { + u32 rsvd_tag; + u32 rsvd_lkey_flags; + }; + union { + u32 immdt; + u32 lkey; + }; + u32 total_len; +}; + +struct ocrdma_ewqe_ud_hdr { + u32 rsvd_dest_qpn; + u32 qkey; + u32 rsvd_ahid; + u32 rsvd; +}; + +/* extended wqe followed by hdr_wqe for Fast Memory register */ +struct ocrdma_ewqe_fr { + u32 va_hi; + u32 va_lo; + u32 fbo_hi; + u32 fbo_lo; + u32 size_sge; + u32 num_sges; + u32 rsvd; + u32 rsvd2; +}; + +struct ocrdma_eth_basic { + u8 dmac[6]; + u8 smac[6]; + __be16 eth_type; +} __packed; + +struct ocrdma_eth_vlan { + u8 dmac[6]; + u8 smac[6]; + __be16 eth_type; + __be16 vlan_tag; +#define OCRDMA_ROCE_ETH_TYPE 0x8915 + __be16 roce_eth_type; +} __packed; + +struct ocrdma_grh { + __be32 tclass_flow; + __be32 pdid_hoplimit; + u8 sgid[16]; + u8 dgid[16]; + u16 rsvd; +} __packed; + +#define OCRDMA_AV_VALID BIT(7) +#define OCRDMA_AV_VLAN_VALID BIT(1) + +struct ocrdma_av { + struct ocrdma_eth_vlan eth_hdr; + struct ocrdma_grh grh; + u32 valid; +} __packed; + +struct ocrdma_rsrc_stats { + u32 dpp_pds; + u32 non_dpp_pds; + u32 rc_dpp_qps; + u32 uc_dpp_qps; + u32 ud_dpp_qps; + u32 rc_non_dpp_qps; + u32 rsvd; + u32 uc_non_dpp_qps; + u32 ud_non_dpp_qps; + u32 rsvd1; + u32 srqs; + u32 rbqs; + u32 r64K_nsmr; + u32 r64K_to_2M_nsmr; + u32 r2M_to_44M_nsmr; + u32 r44M_to_1G_nsmr; + u32 r1G_to_4G_nsmr; + u32 nsmr_count_4G_to_32G; + u32 r32G_to_64G_nsmr; + u32 r64G_to_128G_nsmr; + u32 r128G_to_higher_nsmr; + u32 embedded_nsmr; + u32 frmr; + u32 prefetch_qps; + u32 ondemand_qps; + u32 phy_mr; + u32 mw; + u32 rsvd2[7]; +}; + +struct ocrdma_db_err_stats { + u32 sq_doorbell_errors; + u32 cq_doorbell_errors; + u32 rq_srq_doorbell_errors; + u32 cq_overflow_errors; + u32 rsvd[4]; +}; + +struct ocrdma_wqe_stats { + u32 large_send_rc_wqes_lo; + u32 large_send_rc_wqes_hi; + u32 large_write_rc_wqes_lo; + u32 large_write_rc_wqes_hi; + u32 rsvd[4]; + u32 read_wqes_lo; + u32 read_wqes_hi; + u32 frmr_wqes_lo; + u32 frmr_wqes_hi; + u32 mw_bind_wqes_lo; + u32 mw_bind_wqes_hi; + u32 invalidate_wqes_lo; + u32 invalidate_wqes_hi; + u32 rsvd1[2]; + u32 dpp_wqe_drops; + u32 rsvd2[5]; +}; + +struct ocrdma_tx_stats { + u32 send_pkts_lo; + u32 send_pkts_hi; + u32 write_pkts_lo; + u32 write_pkts_hi; + u32 read_pkts_lo; + u32 read_pkts_hi; + u32 read_rsp_pkts_lo; + u32 read_rsp_pkts_hi; + u32 ack_pkts_lo; + u32 ack_pkts_hi; + u32 send_bytes_lo; + u32 send_bytes_hi; + u32 write_bytes_lo; + u32 write_bytes_hi; + u32 read_req_bytes_lo; + u32 read_req_bytes_hi; + u32 read_rsp_bytes_lo; + u32 read_rsp_bytes_hi; + u32 ack_timeouts; + u32 rsvd[5]; +}; + + +struct ocrdma_tx_qp_err_stats { + u32 local_length_errors; + u32 local_protection_errors; + u32 local_qp_operation_errors; + u32 retry_count_exceeded_errors; + u32 rnr_retry_count_exceeded_errors; + u32 rsvd[3]; +}; + +struct ocrdma_rx_stats { + u32 roce_frame_bytes_lo; + u32 roce_frame_bytes_hi; + u32 roce_frame_icrc_drops; + u32 roce_frame_payload_len_drops; + u32 ud_drops; + u32 qp1_drops; + u32 psn_error_request_packets; + u32 psn_error_resp_packets; + u32 rnr_nak_timeouts; + u32 rnr_nak_receives; + u32 roce_frame_rxmt_drops; + u32 nak_count_psn_sequence_errors; + u32 rc_drop_count_lookup_errors; + u32 rq_rnr_naks; + u32 srq_rnr_naks; + u32 roce_frames_lo; + u32 roce_frames_hi; + u32 rsvd; +}; + +struct ocrdma_rx_qp_err_stats { + u32 nak_invalid_requst_errors; + u32 nak_remote_operation_errors; + u32 nak_count_remote_access_errors; + u32 local_length_errors; + u32 local_protection_errors; + u32 local_qp_operation_errors; + u32 rsvd[2]; +}; + +struct ocrdma_tx_dbg_stats { + u32 data[100]; +}; + +struct ocrdma_rx_dbg_stats { + u32 data[200]; +}; + +struct ocrdma_rdma_stats_req { + struct ocrdma_mbx_hdr hdr; + u8 reset_stats; + u8 rsvd[3]; +} __packed; + +struct ocrdma_rdma_stats_resp { + struct ocrdma_mbx_hdr hdr; + struct ocrdma_rsrc_stats act_rsrc_stats; + struct ocrdma_rsrc_stats th_rsrc_stats; + struct ocrdma_db_err_stats db_err_stats; + struct ocrdma_wqe_stats wqe_stats; + struct ocrdma_tx_stats tx_stats; + struct ocrdma_tx_qp_err_stats tx_qp_err_stats; + struct ocrdma_rx_stats rx_stats; + struct ocrdma_rx_qp_err_stats rx_qp_err_stats; + struct ocrdma_tx_dbg_stats tx_dbg_stats; + struct ocrdma_rx_dbg_stats rx_dbg_stats; +} __packed; + +enum { + OCRDMA_HBA_ATTRB_EPROM_VER_LO_MASK = 0xFF, + OCRDMA_HBA_ATTRB_EPROM_VER_HI_MASK = 0xFF00, + OCRDMA_HBA_ATTRB_EPROM_VER_HI_SHIFT = 0x08, + OCRDMA_HBA_ATTRB_CDBLEN_MASK = 0xFFFF, + OCRDMA_HBA_ATTRB_ASIC_REV_MASK = 0xFF0000, + OCRDMA_HBA_ATTRB_ASIC_REV_SHIFT = 0x10, + OCRDMA_HBA_ATTRB_GUID0_MASK = 0xFF000000, + OCRDMA_HBA_ATTRB_GUID0_SHIFT = 0x18, + OCRDMA_HBA_ATTRB_GUID13_MASK = 0xFF, + OCRDMA_HBA_ATTRB_GUID14_MASK = 0xFF00, + OCRDMA_HBA_ATTRB_GUID14_SHIFT = 0x08, + OCRDMA_HBA_ATTRB_GUID15_MASK = 0xFF0000, + OCRDMA_HBA_ATTRB_GUID15_SHIFT = 0x10, + OCRDMA_HBA_ATTRB_PCNT_MASK = 0xFF000000, + OCRDMA_HBA_ATTRB_PCNT_SHIFT = 0x18, + OCRDMA_HBA_ATTRB_LDTOUT_MASK = 0xFFFF, + OCRDMA_HBA_ATTRB_ISCSI_VER_MASK = 0xFF0000, + OCRDMA_HBA_ATTRB_ISCSI_VER_SHIFT = 0x10, + OCRDMA_HBA_ATTRB_MFUNC_DEV_MASK = 0xFF000000, + OCRDMA_HBA_ATTRB_MFUNC_DEV_SHIFT = 0x18, + OCRDMA_HBA_ATTRB_CV_MASK = 0xFF, + OCRDMA_HBA_ATTRB_HBA_ST_MASK = 0xFF00, + OCRDMA_HBA_ATTRB_HBA_ST_SHIFT = 0x08, + OCRDMA_HBA_ATTRB_MAX_DOMS_MASK = 0xFF0000, + OCRDMA_HBA_ATTRB_MAX_DOMS_SHIFT = 0x10, + OCRDMA_HBA_ATTRB_PTNUM_MASK = 0x3F000000, + OCRDMA_HBA_ATTRB_PTNUM_SHIFT = 0x18, + OCRDMA_HBA_ATTRB_PT_MASK = 0xC0000000, + OCRDMA_HBA_ATTRB_PT_SHIFT = 0x1E, + OCRDMA_HBA_ATTRB_ISCSI_FET_MASK = 0xFF, + OCRDMA_HBA_ATTRB_ASIC_GEN_MASK = 0xFF00, + OCRDMA_HBA_ATTRB_ASIC_GEN_SHIFT = 0x08, + OCRDMA_HBA_ATTRB_PCI_VID_MASK = 0xFFFF, + OCRDMA_HBA_ATTRB_PCI_DID_MASK = 0xFFFF0000, + OCRDMA_HBA_ATTRB_PCI_DID_SHIFT = 0x10, + OCRDMA_HBA_ATTRB_PCI_SVID_MASK = 0xFFFF, + OCRDMA_HBA_ATTRB_PCI_SSID_MASK = 0xFFFF0000, + OCRDMA_HBA_ATTRB_PCI_SSID_SHIFT = 0x10, + OCRDMA_HBA_ATTRB_PCI_BUSNUM_MASK = 0xFF, + OCRDMA_HBA_ATTRB_PCI_DEVNUM_MASK = 0xFF00, + OCRDMA_HBA_ATTRB_PCI_DEVNUM_SHIFT = 0x08, + OCRDMA_HBA_ATTRB_PCI_FUNCNUM_MASK = 0xFF0000, + OCRDMA_HBA_ATTRB_PCI_FUNCNUM_SHIFT = 0x10, + OCRDMA_HBA_ATTRB_IF_TYPE_MASK = 0xFF000000, + OCRDMA_HBA_ATTRB_IF_TYPE_SHIFT = 0x18, + OCRDMA_HBA_ATTRB_NETFIL_MASK =0xFF +}; + +struct mgmt_hba_attribs { + u8 flashrom_version_string[32]; + u8 manufacturer_name[32]; + u32 supported_modes; + u32 rsvd_eprom_verhi_verlo; + u32 mbx_ds_ver; + u32 epfw_ds_ver; + u8 ncsi_ver_string[12]; + u32 default_extended_timeout; + u8 controller_model_number[32]; + u8 controller_description[64]; + u8 controller_serial_number[32]; + u8 ip_version_string[32]; + u8 firmware_version_string[32]; + u8 bios_version_string[32]; + u8 redboot_version_string[32]; + u8 driver_version_string[32]; + u8 fw_on_flash_version_string[32]; + u32 functionalities_supported; + u32 guid0_asicrev_cdblen; + u8 generational_guid[12]; + u32 portcnt_guid15; + u32 mfuncdev_iscsi_ldtout; + u32 ptpnum_maxdoms_hbast_cv; + u32 firmware_post_status; + u32 hba_mtu[8]; + u32 res_asicgen_iscsi_feaures; + u32 rsvd1[3]; +}; + +struct mgmt_controller_attrib { + struct mgmt_hba_attribs hba_attribs; + u32 pci_did_vid; + u32 pci_ssid_svid; + u32 ityp_fnum_devnum_bnum; + u32 uid_hi; + u32 uid_lo; + u32 res_nnetfil; + u32 rsvd0[4]; +}; + +struct ocrdma_get_ctrl_attribs_rsp { + struct ocrdma_mbx_hdr hdr; + struct mgmt_controller_attrib ctrl_attribs; +}; + +#define OCRDMA_SUBSYS_DCBX 0x10 + +enum OCRDMA_DCBX_OPCODE { + OCRDMA_CMD_GET_DCBX_CONFIG = 0x01 +}; + +enum OCRDMA_DCBX_PARAM_TYPE { + OCRDMA_PARAMETER_TYPE_ADMIN = 0x00, + OCRDMA_PARAMETER_TYPE_OPER = 0x01, + OCRDMA_PARAMETER_TYPE_PEER = 0x02 +}; + +enum OCRDMA_DCBX_APP_PROTO { + OCRDMA_APP_PROTO_ROCE = 0x8915 +}; + +enum OCRDMA_DCBX_PROTO { + OCRDMA_PROTO_SELECT_L2 = 0x00, + OCRDMA_PROTO_SELECT_L4 = 0x01 +}; + +enum OCRDMA_DCBX_APP_PARAM { + OCRDMA_APP_PARAM_APP_PROTO_MASK = 0xFFFF, + OCRDMA_APP_PARAM_PROTO_SEL_MASK = 0xFF, + OCRDMA_APP_PARAM_PROTO_SEL_SHIFT = 0x10, + OCRDMA_APP_PARAM_VALID_MASK = 0xFF, + OCRDMA_APP_PARAM_VALID_SHIFT = 0x18 +}; + +enum OCRDMA_DCBX_STATE_FLAGS { + OCRDMA_STATE_FLAG_ENABLED = 0x01, + OCRDMA_STATE_FLAG_ADDVERTISED = 0x02, + OCRDMA_STATE_FLAG_WILLING = 0x04, + OCRDMA_STATE_FLAG_SYNC = 0x08, + OCRDMA_STATE_FLAG_UNSUPPORTED = 0x40000000, + OCRDMA_STATE_FLAG_NEG_FAILD = 0x80000000 +}; + +enum OCRDMA_TCV_AEV_OPV_ST { + OCRDMA_DCBX_TC_SUPPORT_MASK = 0xFF, + OCRDMA_DCBX_TC_SUPPORT_SHIFT = 0x18, + OCRDMA_DCBX_APP_ENTRY_SHIFT = 0x10, + OCRDMA_DCBX_OP_PARAM_SHIFT = 0x08, + OCRDMA_DCBX_STATE_MASK = 0xFF +}; + +struct ocrdma_app_parameter { + u32 valid_proto_app; + u32 oui; + u32 app_prio[2]; +}; + +struct ocrdma_dcbx_cfg { + u32 tcv_aev_opv_st; + u32 tc_state; + u32 pfc_state; + u32 qcn_state; + u32 appl_state; + u32 ll_state; + u32 tc_bw[2]; + u32 tc_prio[8]; + u32 pfc_prio[2]; + struct ocrdma_app_parameter app_param[15]; +}; + +struct ocrdma_get_dcbx_cfg_req { + struct ocrdma_mbx_hdr hdr; + u32 param_type; +} __packed; + +struct ocrdma_get_dcbx_cfg_rsp { + struct ocrdma_mbx_rsp hdr; + struct ocrdma_dcbx_cfg cfg; +} __packed; + +#endif /* __OCRDMA_SLI_H__ */ diff --git a/kernel/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/kernel/drivers/infiniband/hw/ocrdma/ocrdma_stats.c new file mode 100644 index 000000000..48d7ef51a --- /dev/null +++ b/kernel/drivers/infiniband/hw/ocrdma/ocrdma_stats.c @@ -0,0 +1,857 @@ +/******************************************************************* + * This file is part of the Emulex RoCE Device Driver for * + * RoCE (RDMA over Converged Ethernet) adapters. * + * Copyright (C) 2008-2014 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + * + * Contact Information: + * linux-drivers@emulex.com + * + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 + *******************************************************************/ + +#include +#include +#include "ocrdma_stats.h" + +static struct dentry *ocrdma_dbgfs_dir; + +static int ocrdma_add_stat(char *start, char *pcur, + char *name, u64 count) +{ + char buff[128] = {0}; + int cpy_len = 0; + + snprintf(buff, 128, "%s: %llu\n", name, count); + cpy_len = strlen(buff); + + if (pcur + cpy_len > start + OCRDMA_MAX_DBGFS_MEM) { + pr_err("%s: No space in stats buff\n", __func__); + return 0; + } + + memcpy(pcur, buff, cpy_len); + return cpy_len; +} + +static bool ocrdma_alloc_stats_mem(struct ocrdma_dev *dev) +{ + struct stats_mem *mem = &dev->stats_mem; + + /* Alloc mbox command mem*/ + mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req), + sizeof(struct ocrdma_rdma_stats_resp)); + + mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size, + &mem->pa, GFP_KERNEL); + if (!mem->va) { + pr_err("%s: stats mbox allocation failed\n", __func__); + return false; + } + + memset(mem->va, 0, mem->size); + + /* Alloc debugfs mem */ + mem->debugfs_mem = kzalloc(OCRDMA_MAX_DBGFS_MEM, GFP_KERNEL); + if (!mem->debugfs_mem) { + pr_err("%s: stats debugfs mem allocation failed\n", __func__); + return false; + } + + return true; +} + +static void ocrdma_release_stats_mem(struct ocrdma_dev *dev) +{ + struct stats_mem *mem = &dev->stats_mem; + + if (mem->va) + dma_free_coherent(&dev->nic_info.pdev->dev, mem->size, + mem->va, mem->pa); + kfree(mem->debugfs_mem); +} + +static char *ocrdma_resource_stats(struct ocrdma_dev *dev) +{ + char *stats = dev->stats_mem.debugfs_mem, *pcur; + struct ocrdma_rdma_stats_resp *rdma_stats = + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; + struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats; + + memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); + + pcur = stats; + pcur += ocrdma_add_stat(stats, pcur, "active_dpp_pds", + (u64)rsrc_stats->dpp_pds); + pcur += ocrdma_add_stat(stats, pcur, "active_non_dpp_pds", + (u64)rsrc_stats->non_dpp_pds); + pcur += ocrdma_add_stat(stats, pcur, "active_rc_dpp_qps", + (u64)rsrc_stats->rc_dpp_qps); + pcur += ocrdma_add_stat(stats, pcur, "active_uc_dpp_qps", + (u64)rsrc_stats->uc_dpp_qps); + pcur += ocrdma_add_stat(stats, pcur, "active_ud_dpp_qps", + (u64)rsrc_stats->ud_dpp_qps); + pcur += ocrdma_add_stat(stats, pcur, "active_rc_non_dpp_qps", + (u64)rsrc_stats->rc_non_dpp_qps); + pcur += ocrdma_add_stat(stats, pcur, "active_uc_non_dpp_qps", + (u64)rsrc_stats->uc_non_dpp_qps); + pcur += ocrdma_add_stat(stats, pcur, "active_ud_non_dpp_qps", + (u64)rsrc_stats->ud_non_dpp_qps); + pcur += ocrdma_add_stat(stats, pcur, "active_srqs", + (u64)rsrc_stats->srqs); + pcur += ocrdma_add_stat(stats, pcur, "active_rbqs", + (u64)rsrc_stats->rbqs); + pcur += ocrdma_add_stat(stats, pcur, "active_64K_nsmr", + (u64)rsrc_stats->r64K_nsmr); + pcur += ocrdma_add_stat(stats, pcur, "active_64K_to_2M_nsmr", + (u64)rsrc_stats->r64K_to_2M_nsmr); + pcur += ocrdma_add_stat(stats, pcur, "active_2M_to_44M_nsmr", + (u64)rsrc_stats->r2M_to_44M_nsmr); + pcur += ocrdma_add_stat(stats, pcur, "active_44M_to_1G_nsmr", + (u64)rsrc_stats->r44M_to_1G_nsmr); + pcur += ocrdma_add_stat(stats, pcur, "active_1G_to_4G_nsmr", + (u64)rsrc_stats->r1G_to_4G_nsmr); + pcur += ocrdma_add_stat(stats, pcur, "active_nsmr_count_4G_to_32G", + (u64)rsrc_stats->nsmr_count_4G_to_32G); + pcur += ocrdma_add_stat(stats, pcur, "active_32G_to_64G_nsmr", + (u64)rsrc_stats->r32G_to_64G_nsmr); + pcur += ocrdma_add_stat(stats, pcur, "active_64G_to_128G_nsmr", + (u64)rsrc_stats->r64G_to_128G_nsmr); + pcur += ocrdma_add_stat(stats, pcur, "active_128G_to_higher_nsmr", + (u64)rsrc_stats->r128G_to_higher_nsmr); + pcur += ocrdma_add_stat(stats, pcur, "active_embedded_nsmr", + (u64)rsrc_stats->embedded_nsmr); + pcur += ocrdma_add_stat(stats, pcur, "active_frmr", + (u64)rsrc_stats->frmr); + pcur += ocrdma_add_stat(stats, pcur, "active_prefetch_qps", + (u64)rsrc_stats->prefetch_qps); + pcur += ocrdma_add_stat(stats, pcur, "active_ondemand_qps", + (u64)rsrc_stats->ondemand_qps); + pcur += ocrdma_add_stat(stats, pcur, "active_phy_mr", + (u64)rsrc_stats->phy_mr); + pcur += ocrdma_add_stat(stats, pcur, "active_mw", + (u64)rsrc_stats->mw); + + /* Print the threshold stats */ + rsrc_stats = &rdma_stats->th_rsrc_stats; + + pcur += ocrdma_add_stat(stats, pcur, "threshold_dpp_pds", + (u64)rsrc_stats->dpp_pds); + pcur += ocrdma_add_stat(stats, pcur, "threshold_non_dpp_pds", + (u64)rsrc_stats->non_dpp_pds); + pcur += ocrdma_add_stat(stats, pcur, "threshold_rc_dpp_qps", + (u64)rsrc_stats->rc_dpp_qps); + pcur += ocrdma_add_stat(stats, pcur, "threshold_uc_dpp_qps", + (u64)rsrc_stats->uc_dpp_qps); + pcur += ocrdma_add_stat(stats, pcur, "threshold_ud_dpp_qps", + (u64)rsrc_stats->ud_dpp_qps); + pcur += ocrdma_add_stat(stats, pcur, "threshold_rc_non_dpp_qps", + (u64)rsrc_stats->rc_non_dpp_qps); + pcur += ocrdma_add_stat(stats, pcur, "threshold_uc_non_dpp_qps", + (u64)rsrc_stats->uc_non_dpp_qps); + pcur += ocrdma_add_stat(stats, pcur, "threshold_ud_non_dpp_qps", + (u64)rsrc_stats->ud_non_dpp_qps); + pcur += ocrdma_add_stat(stats, pcur, "threshold_srqs", + (u64)rsrc_stats->srqs); + pcur += ocrdma_add_stat(stats, pcur, "threshold_rbqs", + (u64)rsrc_stats->rbqs); + pcur += ocrdma_add_stat(stats, pcur, "threshold_64K_nsmr", + (u64)rsrc_stats->r64K_nsmr); + pcur += ocrdma_add_stat(stats, pcur, "threshold_64K_to_2M_nsmr", + (u64)rsrc_stats->r64K_to_2M_nsmr); + pcur += ocrdma_add_stat(stats, pcur, "threshold_2M_to_44M_nsmr", + (u64)rsrc_stats->r2M_to_44M_nsmr); + pcur += ocrdma_add_stat(stats, pcur, "threshold_44M_to_1G_nsmr", + (u64)rsrc_stats->r44M_to_1G_nsmr); + pcur += ocrdma_add_stat(stats, pcur, "threshold_1G_to_4G_nsmr", + (u64)rsrc_stats->r1G_to_4G_nsmr); + pcur += ocrdma_add_stat(stats, pcur, "threshold_nsmr_count_4G_to_32G", + (u64)rsrc_stats->nsmr_count_4G_to_32G); + pcur += ocrdma_add_stat(stats, pcur, "threshold_32G_to_64G_nsmr", + (u64)rsrc_stats->r32G_to_64G_nsmr); + pcur += ocrdma_add_stat(stats, pcur, "threshold_64G_to_128G_nsmr", + (u64)rsrc_stats->r64G_to_128G_nsmr); + pcur += ocrdma_add_stat(stats, pcur, "threshold_128G_to_higher_nsmr", + (u64)rsrc_stats->r128G_to_higher_nsmr); + pcur += ocrdma_add_stat(stats, pcur, "threshold_embedded_nsmr", + (u64)rsrc_stats->embedded_nsmr); + pcur += ocrdma_add_stat(stats, pcur, "threshold_frmr", + (u64)rsrc_stats->frmr); + pcur += ocrdma_add_stat(stats, pcur, "threshold_prefetch_qps", + (u64)rsrc_stats->prefetch_qps); + pcur += ocrdma_add_stat(stats, pcur, "threshold_ondemand_qps", + (u64)rsrc_stats->ondemand_qps); + pcur += ocrdma_add_stat(stats, pcur, "threshold_phy_mr", + (u64)rsrc_stats->phy_mr); + pcur += ocrdma_add_stat(stats, pcur, "threshold_mw", + (u64)rsrc_stats->mw); + return stats; +} + +static char *ocrdma_rx_stats(struct ocrdma_dev *dev) +{ + char *stats = dev->stats_mem.debugfs_mem, *pcur; + struct ocrdma_rdma_stats_resp *rdma_stats = + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; + struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats; + + memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); + + pcur = stats; + pcur += ocrdma_add_stat + (stats, pcur, "roce_frame_bytes", + convert_to_64bit(rx_stats->roce_frame_bytes_lo, + rx_stats->roce_frame_bytes_hi)); + pcur += ocrdma_add_stat(stats, pcur, "roce_frame_icrc_drops", + (u64)rx_stats->roce_frame_icrc_drops); + pcur += ocrdma_add_stat(stats, pcur, "roce_frame_payload_len_drops", + (u64)rx_stats->roce_frame_payload_len_drops); + pcur += ocrdma_add_stat(stats, pcur, "ud_drops", + (u64)rx_stats->ud_drops); + pcur += ocrdma_add_stat(stats, pcur, "qp1_drops", + (u64)rx_stats->qp1_drops); + pcur += ocrdma_add_stat(stats, pcur, "psn_error_request_packets", + (u64)rx_stats->psn_error_request_packets); + pcur += ocrdma_add_stat(stats, pcur, "psn_error_resp_packets", + (u64)rx_stats->psn_error_resp_packets); + pcur += ocrdma_add_stat(stats, pcur, "rnr_nak_timeouts", + (u64)rx_stats->rnr_nak_timeouts); + pcur += ocrdma_add_stat(stats, pcur, "rnr_nak_receives", + (u64)rx_stats->rnr_nak_receives); + pcur += ocrdma_add_stat(stats, pcur, "roce_frame_rxmt_drops", + (u64)rx_stats->roce_frame_rxmt_drops); + pcur += ocrdma_add_stat(stats, pcur, "nak_count_psn_sequence_errors", + (u64)rx_stats->nak_count_psn_sequence_errors); + pcur += ocrdma_add_stat(stats, pcur, "rc_drop_count_lookup_errors", + (u64)rx_stats->rc_drop_count_lookup_errors); + pcur += ocrdma_add_stat(stats, pcur, "rq_rnr_naks", + (u64)rx_stats->rq_rnr_naks); + pcur += ocrdma_add_stat(stats, pcur, "srq_rnr_naks", + (u64)rx_stats->srq_rnr_naks); + pcur += ocrdma_add_stat(stats, pcur, "roce_frames", + convert_to_64bit(rx_stats->roce_frames_lo, + rx_stats->roce_frames_hi)); + + return stats; +} + +static u64 ocrdma_sysfs_rcv_pkts(struct ocrdma_dev *dev) +{ + struct ocrdma_rdma_stats_resp *rdma_stats = + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; + struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats; + + return convert_to_64bit(rx_stats->roce_frames_lo, + rx_stats->roce_frames_hi) + (u64)rx_stats->roce_frame_icrc_drops + + (u64)rx_stats->roce_frame_payload_len_drops; +} + +static u64 ocrdma_sysfs_rcv_data(struct ocrdma_dev *dev) +{ + struct ocrdma_rdma_stats_resp *rdma_stats = + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; + struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats; + + return (convert_to_64bit(rx_stats->roce_frame_bytes_lo, + rx_stats->roce_frame_bytes_hi))/4; +} + +static char *ocrdma_tx_stats(struct ocrdma_dev *dev) +{ + char *stats = dev->stats_mem.debugfs_mem, *pcur; + struct ocrdma_rdma_stats_resp *rdma_stats = + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; + struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats; + + memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); + + pcur = stats; + pcur += ocrdma_add_stat(stats, pcur, "send_pkts", + convert_to_64bit(tx_stats->send_pkts_lo, + tx_stats->send_pkts_hi)); + pcur += ocrdma_add_stat(stats, pcur, "write_pkts", + convert_to_64bit(tx_stats->write_pkts_lo, + tx_stats->write_pkts_hi)); + pcur += ocrdma_add_stat(stats, pcur, "read_pkts", + convert_to_64bit(tx_stats->read_pkts_lo, + tx_stats->read_pkts_hi)); + pcur += ocrdma_add_stat(stats, pcur, "read_rsp_pkts", + convert_to_64bit(tx_stats->read_rsp_pkts_lo, + tx_stats->read_rsp_pkts_hi)); + pcur += ocrdma_add_stat(stats, pcur, "ack_pkts", + convert_to_64bit(tx_stats->ack_pkts_lo, + tx_stats->ack_pkts_hi)); + pcur += ocrdma_add_stat(stats, pcur, "send_bytes", + convert_to_64bit(tx_stats->send_bytes_lo, + tx_stats->send_bytes_hi)); + pcur += ocrdma_add_stat(stats, pcur, "write_bytes", + convert_to_64bit(tx_stats->write_bytes_lo, + tx_stats->write_bytes_hi)); + pcur += ocrdma_add_stat(stats, pcur, "read_req_bytes", + convert_to_64bit(tx_stats->read_req_bytes_lo, + tx_stats->read_req_bytes_hi)); + pcur += ocrdma_add_stat(stats, pcur, "read_rsp_bytes", + convert_to_64bit(tx_stats->read_rsp_bytes_lo, + tx_stats->read_rsp_bytes_hi)); + pcur += ocrdma_add_stat(stats, pcur, "ack_timeouts", + (u64)tx_stats->ack_timeouts); + + return stats; +} + +static u64 ocrdma_sysfs_xmit_pkts(struct ocrdma_dev *dev) +{ + struct ocrdma_rdma_stats_resp *rdma_stats = + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; + struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats; + + return (convert_to_64bit(tx_stats->send_pkts_lo, + tx_stats->send_pkts_hi) + + convert_to_64bit(tx_stats->write_pkts_lo, tx_stats->write_pkts_hi) + + convert_to_64bit(tx_stats->read_pkts_lo, tx_stats->read_pkts_hi) + + convert_to_64bit(tx_stats->read_rsp_pkts_lo, + tx_stats->read_rsp_pkts_hi) + + convert_to_64bit(tx_stats->ack_pkts_lo, tx_stats->ack_pkts_hi)); +} + +static u64 ocrdma_sysfs_xmit_data(struct ocrdma_dev *dev) +{ + struct ocrdma_rdma_stats_resp *rdma_stats = + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; + struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats; + + return (convert_to_64bit(tx_stats->send_bytes_lo, + tx_stats->send_bytes_hi) + + convert_to_64bit(tx_stats->write_bytes_lo, + tx_stats->write_bytes_hi) + + convert_to_64bit(tx_stats->read_req_bytes_lo, + tx_stats->read_req_bytes_hi) + + convert_to_64bit(tx_stats->read_rsp_bytes_lo, + tx_stats->read_rsp_bytes_hi))/4; +} + +static char *ocrdma_wqe_stats(struct ocrdma_dev *dev) +{ + char *stats = dev->stats_mem.debugfs_mem, *pcur; + struct ocrdma_rdma_stats_resp *rdma_stats = + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; + struct ocrdma_wqe_stats *wqe_stats = &rdma_stats->wqe_stats; + + memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); + + pcur = stats; + pcur += ocrdma_add_stat(stats, pcur, "large_send_rc_wqes", + convert_to_64bit(wqe_stats->large_send_rc_wqes_lo, + wqe_stats->large_send_rc_wqes_hi)); + pcur += ocrdma_add_stat(stats, pcur, "large_write_rc_wqes", + convert_to_64bit(wqe_stats->large_write_rc_wqes_lo, + wqe_stats->large_write_rc_wqes_hi)); + pcur += ocrdma_add_stat(stats, pcur, "read_wqes", + convert_to_64bit(wqe_stats->read_wqes_lo, + wqe_stats->read_wqes_hi)); + pcur += ocrdma_add_stat(stats, pcur, "frmr_wqes", + convert_to_64bit(wqe_stats->frmr_wqes_lo, + wqe_stats->frmr_wqes_hi)); + pcur += ocrdma_add_stat(stats, pcur, "mw_bind_wqes", + convert_to_64bit(wqe_stats->mw_bind_wqes_lo, + wqe_stats->mw_bind_wqes_hi)); + pcur += ocrdma_add_stat(stats, pcur, "invalidate_wqes", + convert_to_64bit(wqe_stats->invalidate_wqes_lo, + wqe_stats->invalidate_wqes_hi)); + pcur += ocrdma_add_stat(stats, pcur, "dpp_wqe_drops", + (u64)wqe_stats->dpp_wqe_drops); + return stats; +} + +static char *ocrdma_db_errstats(struct ocrdma_dev *dev) +{ + char *stats = dev->stats_mem.debugfs_mem, *pcur; + struct ocrdma_rdma_stats_resp *rdma_stats = + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; + struct ocrdma_db_err_stats *db_err_stats = &rdma_stats->db_err_stats; + + memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); + + pcur = stats; + pcur += ocrdma_add_stat(stats, pcur, "sq_doorbell_errors", + (u64)db_err_stats->sq_doorbell_errors); + pcur += ocrdma_add_stat(stats, pcur, "cq_doorbell_errors", + (u64)db_err_stats->cq_doorbell_errors); + pcur += ocrdma_add_stat(stats, pcur, "rq_srq_doorbell_errors", + (u64)db_err_stats->rq_srq_doorbell_errors); + pcur += ocrdma_add_stat(stats, pcur, "cq_overflow_errors", + (u64)db_err_stats->cq_overflow_errors); + return stats; +} + +static char *ocrdma_rxqp_errstats(struct ocrdma_dev *dev) +{ + char *stats = dev->stats_mem.debugfs_mem, *pcur; + struct ocrdma_rdma_stats_resp *rdma_stats = + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; + struct ocrdma_rx_qp_err_stats *rx_qp_err_stats = + &rdma_stats->rx_qp_err_stats; + + memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); + + pcur = stats; + pcur += ocrdma_add_stat(stats, pcur, "nak_invalid_requst_errors", + (u64)rx_qp_err_stats->nak_invalid_requst_errors); + pcur += ocrdma_add_stat(stats, pcur, "nak_remote_operation_errors", + (u64)rx_qp_err_stats->nak_remote_operation_errors); + pcur += ocrdma_add_stat(stats, pcur, "nak_count_remote_access_errors", + (u64)rx_qp_err_stats->nak_count_remote_access_errors); + pcur += ocrdma_add_stat(stats, pcur, "local_length_errors", + (u64)rx_qp_err_stats->local_length_errors); + pcur += ocrdma_add_stat(stats, pcur, "local_protection_errors", + (u64)rx_qp_err_stats->local_protection_errors); + pcur += ocrdma_add_stat(stats, pcur, "local_qp_operation_errors", + (u64)rx_qp_err_stats->local_qp_operation_errors); + return stats; +} + +static char *ocrdma_txqp_errstats(struct ocrdma_dev *dev) +{ + char *stats = dev->stats_mem.debugfs_mem, *pcur; + struct ocrdma_rdma_stats_resp *rdma_stats = + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; + struct ocrdma_tx_qp_err_stats *tx_qp_err_stats = + &rdma_stats->tx_qp_err_stats; + + memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); + + pcur = stats; + pcur += ocrdma_add_stat(stats, pcur, "local_length_errors", + (u64)tx_qp_err_stats->local_length_errors); + pcur += ocrdma_add_stat(stats, pcur, "local_protection_errors", + (u64)tx_qp_err_stats->local_protection_errors); + pcur += ocrdma_add_stat(stats, pcur, "local_qp_operation_errors", + (u64)tx_qp_err_stats->local_qp_operation_errors); + pcur += ocrdma_add_stat(stats, pcur, "retry_count_exceeded_errors", + (u64)tx_qp_err_stats->retry_count_exceeded_errors); + pcur += ocrdma_add_stat(stats, pcur, "rnr_retry_count_exceeded_errors", + (u64)tx_qp_err_stats->rnr_retry_count_exceeded_errors); + return stats; +} + +static char *ocrdma_tx_dbg_stats(struct ocrdma_dev *dev) +{ + int i; + char *pstats = dev->stats_mem.debugfs_mem; + struct ocrdma_rdma_stats_resp *rdma_stats = + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; + struct ocrdma_tx_dbg_stats *tx_dbg_stats = + &rdma_stats->tx_dbg_stats; + + memset(pstats, 0, (OCRDMA_MAX_DBGFS_MEM)); + + for (i = 0; i < 100; i++) + pstats += snprintf(pstats, 80, "DW[%d] = 0x%x\n", i, + tx_dbg_stats->data[i]); + + return dev->stats_mem.debugfs_mem; +} + +static char *ocrdma_rx_dbg_stats(struct ocrdma_dev *dev) +{ + int i; + char *pstats = dev->stats_mem.debugfs_mem; + struct ocrdma_rdma_stats_resp *rdma_stats = + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; + struct ocrdma_rx_dbg_stats *rx_dbg_stats = + &rdma_stats->rx_dbg_stats; + + memset(pstats, 0, (OCRDMA_MAX_DBGFS_MEM)); + + for (i = 0; i < 200; i++) + pstats += snprintf(pstats, 80, "DW[%d] = 0x%x\n", i, + rx_dbg_stats->data[i]); + + return dev->stats_mem.debugfs_mem; +} + +static char *ocrdma_driver_dbg_stats(struct ocrdma_dev *dev) +{ + char *stats = dev->stats_mem.debugfs_mem, *pcur; + + + memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); + + pcur = stats; + pcur += ocrdma_add_stat(stats, pcur, "async_cq_err", + (u64)(dev->async_err_stats + [OCRDMA_CQ_ERROR].counter)); + pcur += ocrdma_add_stat(stats, pcur, "async_cq_overrun_err", + (u64)dev->async_err_stats + [OCRDMA_CQ_OVERRUN_ERROR].counter); + pcur += ocrdma_add_stat(stats, pcur, "async_cq_qpcat_err", + (u64)dev->async_err_stats + [OCRDMA_CQ_QPCAT_ERROR].counter); + pcur += ocrdma_add_stat(stats, pcur, "async_qp_access_err", + (u64)dev->async_err_stats + [OCRDMA_QP_ACCESS_ERROR].counter); + pcur += ocrdma_add_stat(stats, pcur, "async_qp_commm_est_evt", + (u64)dev->async_err_stats + [OCRDMA_QP_COMM_EST_EVENT].counter); + pcur += ocrdma_add_stat(stats, pcur, "async_sq_drained_evt", + (u64)dev->async_err_stats + [OCRDMA_SQ_DRAINED_EVENT].counter); + pcur += ocrdma_add_stat(stats, pcur, "async_dev_fatal_evt", + (u64)dev->async_err_stats + [OCRDMA_DEVICE_FATAL_EVENT].counter); + pcur += ocrdma_add_stat(stats, pcur, "async_srqcat_err", + (u64)dev->async_err_stats + [OCRDMA_SRQCAT_ERROR].counter); + pcur += ocrdma_add_stat(stats, pcur, "async_srq_limit_evt", + (u64)dev->async_err_stats + [OCRDMA_SRQ_LIMIT_EVENT].counter); + pcur += ocrdma_add_stat(stats, pcur, "async_qp_last_wqe_evt", + (u64)dev->async_err_stats + [OCRDMA_QP_LAST_WQE_EVENT].counter); + + pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_len_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_LOC_LEN_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_qp_op_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_LOC_QP_OP_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_eec_op_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_LOC_EEC_OP_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_prot_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_LOC_PROT_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_wr_flush_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_WR_FLUSH_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_mw_bind_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_MW_BIND_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_bad_resp_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_BAD_RESP_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_access_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_LOC_ACCESS_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_req_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_REM_INV_REQ_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_access_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_REM_ACCESS_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_op_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_REM_OP_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_retry_exc_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_RETRY_EXC_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_rnr_retry_exc_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_RNR_RETRY_EXC_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_rdd_viol_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_LOC_RDD_VIOL_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_rd_req_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_REM_INV_RD_REQ_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_abort_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_REM_ABORT_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eecn_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_INV_EECN_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eec_state_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_INV_EEC_STATE_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_fatal_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_FATAL_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_resp_timeout_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_RESP_TIMEOUT_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_general_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_GENERAL_ERR].counter); + return stats; +} + +static void ocrdma_update_stats(struct ocrdma_dev *dev) +{ + ulong now = jiffies, secs; + int status = 0; + struct ocrdma_rdma_stats_resp *rdma_stats = + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; + struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats; + + secs = jiffies_to_msecs(now - dev->last_stats_time) / 1000U; + if (secs) { + /* update */ + status = ocrdma_mbx_rdma_stats(dev, false); + if (status) + pr_err("%s: stats mbox failed with status = %d\n", + __func__, status); + /* Update PD counters from PD resource manager */ + if (dev->pd_mgr->pd_prealloc_valid) { + rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_count; + rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_count; + /* Threshold stata*/ + rsrc_stats = &rdma_stats->th_rsrc_stats; + rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_thrsh; + rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_thrsh; + } + dev->last_stats_time = jiffies; + } +} + +static ssize_t ocrdma_dbgfs_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + char tmp_str[32]; + long reset; + int status = 0; + struct ocrdma_stats *pstats = filp->private_data; + struct ocrdma_dev *dev = pstats->dev; + + if (count > 32) + goto err; + + if (copy_from_user(tmp_str, buffer, count)) + goto err; + + tmp_str[count-1] = '\0'; + if (kstrtol(tmp_str, 10, &reset)) + goto err; + + switch (pstats->type) { + case OCRDMA_RESET_STATS: + if (reset) { + status = ocrdma_mbx_rdma_stats(dev, true); + if (status) { + pr_err("Failed to reset stats = %d", status); + goto err; + } + } + break; + default: + goto err; + } + + return count; +err: + return -EFAULT; +} + +int ocrdma_pma_counters(struct ocrdma_dev *dev, + struct ib_mad *out_mad) +{ + struct ib_pma_portcounters *pma_cnt; + + memset(out_mad->data, 0, sizeof out_mad->data); + pma_cnt = (void *)(out_mad->data + 40); + ocrdma_update_stats(dev); + + pma_cnt->port_xmit_data = cpu_to_be32(ocrdma_sysfs_xmit_data(dev)); + pma_cnt->port_rcv_data = cpu_to_be32(ocrdma_sysfs_rcv_data(dev)); + pma_cnt->port_xmit_packets = cpu_to_be32(ocrdma_sysfs_xmit_pkts(dev)); + pma_cnt->port_rcv_packets = cpu_to_be32(ocrdma_sysfs_rcv_pkts(dev)); + return 0; +} + +static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer, + size_t usr_buf_len, loff_t *ppos) +{ + struct ocrdma_stats *pstats = filp->private_data; + struct ocrdma_dev *dev = pstats->dev; + ssize_t status = 0; + char *data = NULL; + + /* No partial reads */ + if (*ppos != 0) + return 0; + + mutex_lock(&dev->stats_lock); + + ocrdma_update_stats(dev); + + switch (pstats->type) { + case OCRDMA_RSRC_STATS: + data = ocrdma_resource_stats(dev); + break; + case OCRDMA_RXSTATS: + data = ocrdma_rx_stats(dev); + break; + case OCRDMA_WQESTATS: + data = ocrdma_wqe_stats(dev); + break; + case OCRDMA_TXSTATS: + data = ocrdma_tx_stats(dev); + break; + case OCRDMA_DB_ERRSTATS: + data = ocrdma_db_errstats(dev); + break; + case OCRDMA_RXQP_ERRSTATS: + data = ocrdma_rxqp_errstats(dev); + break; + case OCRDMA_TXQP_ERRSTATS: + data = ocrdma_txqp_errstats(dev); + break; + case OCRDMA_TX_DBG_STATS: + data = ocrdma_tx_dbg_stats(dev); + break; + case OCRDMA_RX_DBG_STATS: + data = ocrdma_rx_dbg_stats(dev); + break; + case OCRDMA_DRV_STATS: + data = ocrdma_driver_dbg_stats(dev); + break; + + default: + status = -EFAULT; + goto exit; + } + + if (usr_buf_len < strlen(data)) { + status = -ENOSPC; + goto exit; + } + + status = simple_read_from_buffer(buffer, usr_buf_len, ppos, data, + strlen(data)); +exit: + mutex_unlock(&dev->stats_lock); + return status; +} + +static const struct file_operations ocrdma_dbg_ops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ocrdma_dbgfs_ops_read, + .write = ocrdma_dbgfs_ops_write, +}; + +void ocrdma_add_port_stats(struct ocrdma_dev *dev) +{ + if (!ocrdma_dbgfs_dir) + return; + + /* Create post stats base dir */ + dev->dir = debugfs_create_dir(dev->ibdev.name, ocrdma_dbgfs_dir); + if (!dev->dir) + goto err; + + dev->rsrc_stats.type = OCRDMA_RSRC_STATS; + dev->rsrc_stats.dev = dev; + if (!debugfs_create_file("resource_stats", S_IRUSR, dev->dir, + &dev->rsrc_stats, &ocrdma_dbg_ops)) + goto err; + + dev->rx_stats.type = OCRDMA_RXSTATS; + dev->rx_stats.dev = dev; + if (!debugfs_create_file("rx_stats", S_IRUSR, dev->dir, + &dev->rx_stats, &ocrdma_dbg_ops)) + goto err; + + dev->wqe_stats.type = OCRDMA_WQESTATS; + dev->wqe_stats.dev = dev; + if (!debugfs_create_file("wqe_stats", S_IRUSR, dev->dir, + &dev->wqe_stats, &ocrdma_dbg_ops)) + goto err; + + dev->tx_stats.type = OCRDMA_TXSTATS; + dev->tx_stats.dev = dev; + if (!debugfs_create_file("tx_stats", S_IRUSR, dev->dir, + &dev->tx_stats, &ocrdma_dbg_ops)) + goto err; + + dev->db_err_stats.type = OCRDMA_DB_ERRSTATS; + dev->db_err_stats.dev = dev; + if (!debugfs_create_file("db_err_stats", S_IRUSR, dev->dir, + &dev->db_err_stats, &ocrdma_dbg_ops)) + goto err; + + + dev->tx_qp_err_stats.type = OCRDMA_TXQP_ERRSTATS; + dev->tx_qp_err_stats.dev = dev; + if (!debugfs_create_file("tx_qp_err_stats", S_IRUSR, dev->dir, + &dev->tx_qp_err_stats, &ocrdma_dbg_ops)) + goto err; + + dev->rx_qp_err_stats.type = OCRDMA_RXQP_ERRSTATS; + dev->rx_qp_err_stats.dev = dev; + if (!debugfs_create_file("rx_qp_err_stats", S_IRUSR, dev->dir, + &dev->rx_qp_err_stats, &ocrdma_dbg_ops)) + goto err; + + + dev->tx_dbg_stats.type = OCRDMA_TX_DBG_STATS; + dev->tx_dbg_stats.dev = dev; + if (!debugfs_create_file("tx_dbg_stats", S_IRUSR, dev->dir, + &dev->tx_dbg_stats, &ocrdma_dbg_ops)) + goto err; + + dev->rx_dbg_stats.type = OCRDMA_RX_DBG_STATS; + dev->rx_dbg_stats.dev = dev; + if (!debugfs_create_file("rx_dbg_stats", S_IRUSR, dev->dir, + &dev->rx_dbg_stats, &ocrdma_dbg_ops)) + goto err; + + dev->driver_stats.type = OCRDMA_DRV_STATS; + dev->driver_stats.dev = dev; + if (!debugfs_create_file("driver_dbg_stats", S_IRUSR, dev->dir, + &dev->driver_stats, &ocrdma_dbg_ops)) + goto err; + + dev->reset_stats.type = OCRDMA_RESET_STATS; + dev->reset_stats.dev = dev; + if (!debugfs_create_file("reset_stats", S_IRUSR, dev->dir, + &dev->reset_stats, &ocrdma_dbg_ops)) + goto err; + + /* Now create dma_mem for stats mbx command */ + if (!ocrdma_alloc_stats_mem(dev)) + goto err; + + mutex_init(&dev->stats_lock); + + return; +err: + ocrdma_release_stats_mem(dev); + debugfs_remove_recursive(dev->dir); + dev->dir = NULL; +} + +void ocrdma_rem_port_stats(struct ocrdma_dev *dev) +{ + if (!dev->dir) + return; + mutex_destroy(&dev->stats_lock); + ocrdma_release_stats_mem(dev); + debugfs_remove(dev->dir); +} + +void ocrdma_init_debugfs(void) +{ + /* Create base dir in debugfs root dir */ + ocrdma_dbgfs_dir = debugfs_create_dir("ocrdma", NULL); +} + +void ocrdma_rem_debugfs(void) +{ + debugfs_remove_recursive(ocrdma_dbgfs_dir); +} diff --git a/kernel/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/kernel/drivers/infiniband/hw/ocrdma/ocrdma_stats.h new file mode 100644 index 000000000..091edd68a --- /dev/null +++ b/kernel/drivers/infiniband/hw/ocrdma/ocrdma_stats.h @@ -0,0 +1,58 @@ +/******************************************************************* + * This file is part of the Emulex RoCE Device Driver for * + * RoCE (RDMA over Converged Ethernet) adapters. * + * Copyright (C) 2008-2014 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + * + * Contact Information: + * linux-drivers@emulex.com + * + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 + *******************************************************************/ + +#ifndef __OCRDMA_STATS_H__ +#define __OCRDMA_STATS_H__ + +#include +#include "ocrdma.h" +#include "ocrdma_hw.h" + +#define OCRDMA_MAX_DBGFS_MEM 4096 + +enum OCRDMA_STATS_TYPE { + OCRDMA_RSRC_STATS, + OCRDMA_RXSTATS, + OCRDMA_WQESTATS, + OCRDMA_TXSTATS, + OCRDMA_DB_ERRSTATS, + OCRDMA_RXQP_ERRSTATS, + OCRDMA_TXQP_ERRSTATS, + OCRDMA_TX_DBG_STATS, + OCRDMA_RX_DBG_STATS, + OCRDMA_DRV_STATS, + OCRDMA_RESET_STATS +}; + +void ocrdma_rem_debugfs(void); +void ocrdma_init_debugfs(void); +void ocrdma_rem_port_stats(struct ocrdma_dev *dev); +void ocrdma_add_port_stats(struct ocrdma_dev *dev); +int ocrdma_pma_counters(struct ocrdma_dev *dev, + struct ib_mad *out_mad); + +#endif /* __OCRDMA_STATS_H__ */ diff --git a/kernel/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/kernel/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c new file mode 100644 index 000000000..9dcb66077 --- /dev/null +++ b/kernel/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -0,0 +1,3195 @@ +/******************************************************************* + * This file is part of the Emulex RoCE Device Driver for * + * RoCE (RDMA over Converged Ethernet) adapters. * + * Copyright (C) 2008-2012 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + * + * Contact Information: + * linux-drivers@emulex.com + * + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 + *******************************************************************/ + +#include +#include +#include +#include +#include +#include + +#include "ocrdma.h" +#include "ocrdma_hw.h" +#include "ocrdma_verbs.h" +#include "ocrdma_abi.h" + +int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) +{ + if (index > 1) + return -EINVAL; + + *pkey = 0xffff; + return 0; +} + +int ocrdma_query_gid(struct ib_device *ibdev, u8 port, + int index, union ib_gid *sgid) +{ + struct ocrdma_dev *dev; + + dev = get_ocrdma_dev(ibdev); + memset(sgid, 0, sizeof(*sgid)); + if (index >= OCRDMA_MAX_SGID) + return -EINVAL; + + memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid)); + + return 0; +} + +int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr) +{ + struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); + + memset(attr, 0, sizeof *attr); + memcpy(&attr->fw_ver, &dev->attr.fw_ver[0], + min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver))); + ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid); + attr->max_mr_size = dev->attr.max_mr_size; + attr->page_size_cap = 0xffff000; + attr->vendor_id = dev->nic_info.pdev->vendor; + attr->vendor_part_id = dev->nic_info.pdev->device; + attr->hw_ver = dev->asic_id; + attr->max_qp = dev->attr.max_qp; + attr->max_ah = OCRDMA_MAX_AH; + attr->max_qp_wr = dev->attr.max_wqe; + + attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD | + IB_DEVICE_RC_RNR_NAK_GEN | + IB_DEVICE_SHUTDOWN_PORT | + IB_DEVICE_SYS_IMAGE_GUID | + IB_DEVICE_LOCAL_DMA_LKEY | + IB_DEVICE_MEM_MGT_EXTENSIONS; + attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge); + attr->max_sge_rd = 0; + attr->max_cq = dev->attr.max_cq; + attr->max_cqe = dev->attr.max_cqe; + attr->max_mr = dev->attr.max_mr; + attr->max_mw = dev->attr.max_mw; + attr->max_pd = dev->attr.max_pd; + attr->atomic_cap = 0; + attr->max_fmr = 0; + attr->max_map_per_fmr = 0; + attr->max_qp_rd_atom = + min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp); + attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp; + attr->max_srq = dev->attr.max_srq; + attr->max_srq_sge = dev->attr.max_srq_sge; + attr->max_srq_wr = dev->attr.max_rqe; + attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay; + attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr; + attr->max_pkeys = 1; + return 0; +} + +static inline void get_link_speed_and_width(struct ocrdma_dev *dev, + u8 *ib_speed, u8 *ib_width) +{ + int status; + u8 speed; + + status = ocrdma_mbx_get_link_speed(dev, &speed); + if (status) + speed = OCRDMA_PHYS_LINK_SPEED_ZERO; + + switch (speed) { + case OCRDMA_PHYS_LINK_SPEED_1GBPS: + *ib_speed = IB_SPEED_SDR; + *ib_width = IB_WIDTH_1X; + break; + + case OCRDMA_PHYS_LINK_SPEED_10GBPS: + *ib_speed = IB_SPEED_QDR; + *ib_width = IB_WIDTH_1X; + break; + + case OCRDMA_PHYS_LINK_SPEED_20GBPS: + *ib_speed = IB_SPEED_DDR; + *ib_width = IB_WIDTH_4X; + break; + + case OCRDMA_PHYS_LINK_SPEED_40GBPS: + *ib_speed = IB_SPEED_QDR; + *ib_width = IB_WIDTH_4X; + break; + + default: + /* Unsupported */ + *ib_speed = IB_SPEED_SDR; + *ib_width = IB_WIDTH_1X; + } +} + +int ocrdma_query_port(struct ib_device *ibdev, + u8 port, struct ib_port_attr *props) +{ + enum ib_port_state port_state; + struct ocrdma_dev *dev; + struct net_device *netdev; + + dev = get_ocrdma_dev(ibdev); + if (port > 1) { + pr_err("%s(%d) invalid_port=0x%x\n", __func__, + dev->id, port); + return -EINVAL; + } + netdev = dev->nic_info.netdev; + if (netif_running(netdev) && netif_oper_up(netdev)) { + port_state = IB_PORT_ACTIVE; + props->phys_state = 5; + } else { + port_state = IB_PORT_DOWN; + props->phys_state = 3; + } + props->max_mtu = IB_MTU_4096; + props->active_mtu = iboe_get_mtu(netdev->mtu); + props->lid = 0; + props->lmc = 0; + props->sm_lid = 0; + props->sm_sl = 0; + props->state = port_state; + props->port_cap_flags = + IB_PORT_CM_SUP | + IB_PORT_REINIT_SUP | + IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_IP_BASED_GIDS; + props->gid_tbl_len = OCRDMA_MAX_SGID; + props->pkey_tbl_len = 1; + props->bad_pkey_cntr = 0; + props->qkey_viol_cntr = 0; + get_link_speed_and_width(dev, &props->active_speed, + &props->active_width); + props->max_msg_sz = 0x80000000; + props->max_vl_num = 4; + return 0; +} + +int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask, + struct ib_port_modify *props) +{ + struct ocrdma_dev *dev; + + dev = get_ocrdma_dev(ibdev); + if (port > 1) { + pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port); + return -EINVAL; + } + return 0; +} + +static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, + unsigned long len) +{ + struct ocrdma_mm *mm; + + mm = kzalloc(sizeof(*mm), GFP_KERNEL); + if (mm == NULL) + return -ENOMEM; + mm->key.phy_addr = phy_addr; + mm->key.len = len; + INIT_LIST_HEAD(&mm->entry); + + mutex_lock(&uctx->mm_list_lock); + list_add_tail(&mm->entry, &uctx->mm_head); + mutex_unlock(&uctx->mm_list_lock); + return 0; +} + +static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, + unsigned long len) +{ + struct ocrdma_mm *mm, *tmp; + + mutex_lock(&uctx->mm_list_lock); + list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) { + if (len != mm->key.len && phy_addr != mm->key.phy_addr) + continue; + + list_del(&mm->entry); + kfree(mm); + break; + } + mutex_unlock(&uctx->mm_list_lock); +} + +static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, + unsigned long len) +{ + bool found = false; + struct ocrdma_mm *mm; + + mutex_lock(&uctx->mm_list_lock); + list_for_each_entry(mm, &uctx->mm_head, entry) { + if (len != mm->key.len && phy_addr != mm->key.phy_addr) + continue; + + found = true; + break; + } + mutex_unlock(&uctx->mm_list_lock); + return found; +} + + +static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool) +{ + u16 pd_bitmap_idx = 0; + const unsigned long *pd_bitmap; + + if (dpp_pool) { + pd_bitmap = dev->pd_mgr->pd_dpp_bitmap; + pd_bitmap_idx = find_first_zero_bit(pd_bitmap, + dev->pd_mgr->max_dpp_pd); + __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap); + dev->pd_mgr->pd_dpp_count++; + if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh) + dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count; + } else { + pd_bitmap = dev->pd_mgr->pd_norm_bitmap; + pd_bitmap_idx = find_first_zero_bit(pd_bitmap, + dev->pd_mgr->max_normal_pd); + __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap); + dev->pd_mgr->pd_norm_count++; + if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh) + dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count; + } + return pd_bitmap_idx; +} + +static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id, + bool dpp_pool) +{ + u16 pd_count; + u16 pd_bit_index; + + pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count : + dev->pd_mgr->pd_norm_count; + if (pd_count == 0) + return -EINVAL; + + if (dpp_pool) { + pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start; + if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) { + return -EINVAL; + } else { + __clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap); + dev->pd_mgr->pd_dpp_count--; + } + } else { + pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start; + if (pd_bit_index >= dev->pd_mgr->max_normal_pd) { + return -EINVAL; + } else { + __clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap); + dev->pd_mgr->pd_norm_count--; + } + } + + return 0; +} + +static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id, + bool dpp_pool) +{ + int status; + + mutex_lock(&dev->dev_lock); + status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool); + mutex_unlock(&dev->dev_lock); + return status; +} + +static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd) +{ + u16 pd_idx = 0; + int status = 0; + + mutex_lock(&dev->dev_lock); + if (pd->dpp_enabled) { + /* try allocating DPP PD, if not available then normal PD */ + if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) { + pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true); + pd->id = dev->pd_mgr->pd_dpp_start + pd_idx; + pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx; + } else if (dev->pd_mgr->pd_norm_count < + dev->pd_mgr->max_normal_pd) { + pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false); + pd->id = dev->pd_mgr->pd_norm_start + pd_idx; + pd->dpp_enabled = false; + } else { + status = -EINVAL; + } + } else { + if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) { + pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false); + pd->id = dev->pd_mgr->pd_norm_start + pd_idx; + } else { + status = -EINVAL; + } + } + mutex_unlock(&dev->dev_lock); + return status; +} + +static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev, + struct ocrdma_ucontext *uctx, + struct ib_udata *udata) +{ + struct ocrdma_pd *pd = NULL; + int status = 0; + + pd = kzalloc(sizeof(*pd), GFP_KERNEL); + if (!pd) + return ERR_PTR(-ENOMEM); + + if (udata && uctx && dev->attr.max_dpp_pds) { + pd->dpp_enabled = + ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R; + pd->num_dpp_qp = + pd->dpp_enabled ? (dev->nic_info.db_page_size / + dev->attr.wqe_size) : 0; + } + + if (dev->pd_mgr->pd_prealloc_valid) { + status = ocrdma_get_pd_num(dev, pd); + return (status == 0) ? pd : ERR_PTR(status); + } + +retry: + status = ocrdma_mbx_alloc_pd(dev, pd); + if (status) { + if (pd->dpp_enabled) { + pd->dpp_enabled = false; + pd->num_dpp_qp = 0; + goto retry; + } else { + kfree(pd); + return ERR_PTR(status); + } + } + + return pd; +} + +static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx, + struct ocrdma_pd *pd) +{ + return (uctx->cntxt_pd == pd ? true : false); +} + +static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev, + struct ocrdma_pd *pd) +{ + int status = 0; + + if (dev->pd_mgr->pd_prealloc_valid) + status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled); + else + status = ocrdma_mbx_dealloc_pd(dev, pd); + + kfree(pd); + return status; +} + +static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev, + struct ocrdma_ucontext *uctx, + struct ib_udata *udata) +{ + int status = 0; + + uctx->cntxt_pd = _ocrdma_alloc_pd(dev, uctx, udata); + if (IS_ERR(uctx->cntxt_pd)) { + status = PTR_ERR(uctx->cntxt_pd); + uctx->cntxt_pd = NULL; + goto err; + } + + uctx->cntxt_pd->uctx = uctx; + uctx->cntxt_pd->ibpd.device = &dev->ibdev; +err: + return status; +} + +static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx) +{ + struct ocrdma_pd *pd = uctx->cntxt_pd; + struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); + + if (uctx->pd_in_use) { + pr_err("%s(%d) Freeing in use pdid=0x%x.\n", + __func__, dev->id, pd->id); + } + uctx->cntxt_pd = NULL; + (void)_ocrdma_dealloc_pd(dev, pd); + return 0; +} + +static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx) +{ + struct ocrdma_pd *pd = NULL; + + mutex_lock(&uctx->mm_list_lock); + if (!uctx->pd_in_use) { + uctx->pd_in_use = true; + pd = uctx->cntxt_pd; + } + mutex_unlock(&uctx->mm_list_lock); + + return pd; +} + +static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx) +{ + mutex_lock(&uctx->mm_list_lock); + uctx->pd_in_use = false; + mutex_unlock(&uctx->mm_list_lock); +} + +struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev, + struct ib_udata *udata) +{ + int status; + struct ocrdma_ucontext *ctx; + struct ocrdma_alloc_ucontext_resp resp; + struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); + struct pci_dev *pdev = dev->nic_info.pdev; + u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE); + + if (!udata) + return ERR_PTR(-EFAULT); + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return ERR_PTR(-ENOMEM); + INIT_LIST_HEAD(&ctx->mm_head); + mutex_init(&ctx->mm_list_lock); + + ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len, + &ctx->ah_tbl.pa, GFP_KERNEL); + if (!ctx->ah_tbl.va) { + kfree(ctx); + return ERR_PTR(-ENOMEM); + } + memset(ctx->ah_tbl.va, 0, map_len); + ctx->ah_tbl.len = map_len; + + memset(&resp, 0, sizeof(resp)); + resp.ah_tbl_len = ctx->ah_tbl.len; + resp.ah_tbl_page = virt_to_phys(ctx->ah_tbl.va); + + status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len); + if (status) + goto map_err; + + status = ocrdma_alloc_ucontext_pd(dev, ctx, udata); + if (status) + goto pd_err; + + resp.dev_id = dev->id; + resp.max_inline_data = dev->attr.max_inline_data; + resp.wqe_size = dev->attr.wqe_size; + resp.rqe_size = dev->attr.rqe_size; + resp.dpp_wqe_size = dev->attr.wqe_size; + + memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver)); + status = ib_copy_to_udata(udata, &resp, sizeof(resp)); + if (status) + goto cpy_err; + return &ctx->ibucontext; + +cpy_err: +pd_err: + ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len); +map_err: + dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va, + ctx->ah_tbl.pa); + kfree(ctx); + return ERR_PTR(status); +} + +int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx) +{ + int status = 0; + struct ocrdma_mm *mm, *tmp; + struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx); + struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device); + struct pci_dev *pdev = dev->nic_info.pdev; + + status = ocrdma_dealloc_ucontext_pd(uctx); + + ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len); + dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va, + uctx->ah_tbl.pa); + + list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) { + list_del(&mm->entry); + kfree(mm); + } + kfree(uctx); + return status; +} + +int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) +{ + struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context); + struct ocrdma_dev *dev = get_ocrdma_dev(context->device); + unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT; + u64 unmapped_db = (u64) dev->nic_info.unmapped_db; + unsigned long len = (vma->vm_end - vma->vm_start); + int status = 0; + bool found; + + if (vma->vm_start & (PAGE_SIZE - 1)) + return -EINVAL; + found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len); + if (!found) + return -EINVAL; + + if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db + + dev->nic_info.db_total_size)) && + (len <= dev->nic_info.db_page_size)) { + if (vma->vm_flags & VM_READ) + return -EPERM; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + len, vma->vm_page_prot); + } else if (dev->nic_info.dpp_unmapped_len && + (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) && + (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr + + dev->nic_info.dpp_unmapped_len)) && + (len <= dev->nic_info.dpp_unmapped_len)) { + if (vma->vm_flags & VM_READ) + return -EPERM; + + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + len, vma->vm_page_prot); + } else { + status = remap_pfn_range(vma, vma->vm_start, + vma->vm_pgoff, len, vma->vm_page_prot); + } + return status; +} + +static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd, + struct ib_ucontext *ib_ctx, + struct ib_udata *udata) +{ + int status; + u64 db_page_addr; + u64 dpp_page_addr = 0; + u32 db_page_size; + struct ocrdma_alloc_pd_uresp rsp; + struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx); + + memset(&rsp, 0, sizeof(rsp)); + rsp.id = pd->id; + rsp.dpp_enabled = pd->dpp_enabled; + db_page_addr = ocrdma_get_db_addr(dev, pd->id); + db_page_size = dev->nic_info.db_page_size; + + status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size); + if (status) + return status; + + if (pd->dpp_enabled) { + dpp_page_addr = dev->nic_info.dpp_unmapped_addr + + (pd->id * PAGE_SIZE); + status = ocrdma_add_mmap(uctx, dpp_page_addr, + PAGE_SIZE); + if (status) + goto dpp_map_err; + rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr); + rsp.dpp_page_addr_lo = dpp_page_addr; + } + + status = ib_copy_to_udata(udata, &rsp, sizeof(rsp)); + if (status) + goto ucopy_err; + + pd->uctx = uctx; + return 0; + +ucopy_err: + if (pd->dpp_enabled) + ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE); +dpp_map_err: + ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size); + return status; +} + +struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev, + struct ib_ucontext *context, + struct ib_udata *udata) +{ + struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); + struct ocrdma_pd *pd; + struct ocrdma_ucontext *uctx = NULL; + int status; + u8 is_uctx_pd = false; + + if (udata && context) { + uctx = get_ocrdma_ucontext(context); + pd = ocrdma_get_ucontext_pd(uctx); + if (pd) { + is_uctx_pd = true; + goto pd_mapping; + } + } + + pd = _ocrdma_alloc_pd(dev, uctx, udata); + if (IS_ERR(pd)) { + status = PTR_ERR(pd); + goto exit; + } + +pd_mapping: + if (udata && context) { + status = ocrdma_copy_pd_uresp(dev, pd, context, udata); + if (status) + goto err; + } + return &pd->ibpd; + +err: + if (is_uctx_pd) { + ocrdma_release_ucontext_pd(uctx); + } else { + status = _ocrdma_dealloc_pd(dev, pd); + kfree(pd); + } +exit: + return ERR_PTR(status); +} + +int ocrdma_dealloc_pd(struct ib_pd *ibpd) +{ + struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); + struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); + struct ocrdma_ucontext *uctx = NULL; + int status = 0; + u64 usr_db; + + uctx = pd->uctx; + if (uctx) { + u64 dpp_db = dev->nic_info.dpp_unmapped_addr + + (pd->id * PAGE_SIZE); + if (pd->dpp_enabled) + ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE); + usr_db = ocrdma_get_db_addr(dev, pd->id); + ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size); + + if (is_ucontext_pd(uctx, pd)) { + ocrdma_release_ucontext_pd(uctx); + return status; + } + } + status = _ocrdma_dealloc_pd(dev, pd); + return status; +} + +static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr, + u32 pdid, int acc, u32 num_pbls, u32 addr_check) +{ + int status; + + mr->hwmr.fr_mr = 0; + mr->hwmr.local_rd = 1; + mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; + mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; + mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; + mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0; + mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; + mr->hwmr.num_pbls = num_pbls; + + status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check); + if (status) + return status; + + mr->ibmr.lkey = mr->hwmr.lkey; + if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) + mr->ibmr.rkey = mr->hwmr.lkey; + return 0; +} + +struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc) +{ + int status; + struct ocrdma_mr *mr; + struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); + struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); + + if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) { + pr_err("%s err, invalid access rights\n", __func__); + return ERR_PTR(-EINVAL); + } + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0, + OCRDMA_ADDR_CHECK_DISABLE); + if (status) { + kfree(mr); + return ERR_PTR(status); + } + + return &mr->ibmr; +} + +static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev, + struct ocrdma_hw_mr *mr) +{ + struct pci_dev *pdev = dev->nic_info.pdev; + int i = 0; + + if (mr->pbl_table) { + for (i = 0; i < mr->num_pbls; i++) { + if (!mr->pbl_table[i].va) + continue; + dma_free_coherent(&pdev->dev, mr->pbl_size, + mr->pbl_table[i].va, + mr->pbl_table[i].pa); + } + kfree(mr->pbl_table); + mr->pbl_table = NULL; + } +} + +static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr, + u32 num_pbes) +{ + u32 num_pbls = 0; + u32 idx = 0; + int status = 0; + u32 pbl_size; + + do { + pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx); + if (pbl_size > MAX_OCRDMA_PBL_SIZE) { + status = -EFAULT; + break; + } + num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64))); + num_pbls = num_pbls / (pbl_size / sizeof(u64)); + idx++; + } while (num_pbls >= dev->attr.max_num_mr_pbl); + + mr->hwmr.num_pbes = num_pbes; + mr->hwmr.num_pbls = num_pbls; + mr->hwmr.pbl_size = pbl_size; + return status; +} + +static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr) +{ + int status = 0; + int i; + u32 dma_len = mr->pbl_size; + struct pci_dev *pdev = dev->nic_info.pdev; + void *va; + dma_addr_t pa; + + mr->pbl_table = kzalloc(sizeof(struct ocrdma_pbl) * + mr->num_pbls, GFP_KERNEL); + + if (!mr->pbl_table) + return -ENOMEM; + + for (i = 0; i < mr->num_pbls; i++) { + va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL); + if (!va) { + ocrdma_free_mr_pbl_tbl(dev, mr); + status = -ENOMEM; + break; + } + memset(va, 0, dma_len); + mr->pbl_table[i].va = va; + mr->pbl_table[i].pa = pa; + } + return status; +} + +static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr, + u32 num_pbes) +{ + struct ocrdma_pbe *pbe; + struct scatterlist *sg; + struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table; + struct ib_umem *umem = mr->umem; + int shift, pg_cnt, pages, pbe_cnt, entry, total_num_pbes = 0; + + if (!mr->hwmr.num_pbes) + return; + + pbe = (struct ocrdma_pbe *)pbl_tbl->va; + pbe_cnt = 0; + + shift = ilog2(umem->page_size); + + for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { + pages = sg_dma_len(sg) >> shift; + for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { + /* store the page address in pbe */ + pbe->pa_lo = + cpu_to_le32(sg_dma_address + (sg) + + (umem->page_size * pg_cnt)); + pbe->pa_hi = + cpu_to_le32(upper_32_bits + ((sg_dma_address + (sg) + + umem->page_size * pg_cnt))); + pbe_cnt += 1; + total_num_pbes += 1; + pbe++; + + /* if done building pbes, issue the mbx cmd. */ + if (total_num_pbes == num_pbes) + return; + + /* if the given pbl is full storing the pbes, + * move to next pbl. + */ + if (pbe_cnt == + (mr->hwmr.pbl_size / sizeof(u64))) { + pbl_tbl++; + pbe = (struct ocrdma_pbe *)pbl_tbl->va; + pbe_cnt = 0; + } + + } + } +} + +struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, + u64 usr_addr, int acc, struct ib_udata *udata) +{ + int status = -ENOMEM; + struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); + struct ocrdma_mr *mr; + struct ocrdma_pd *pd; + u32 num_pbes; + + pd = get_ocrdma_pd(ibpd); + + if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) + return ERR_PTR(-EINVAL); + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(status); + mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0); + if (IS_ERR(mr->umem)) { + status = -EFAULT; + goto umem_err; + } + num_pbes = ib_umem_page_count(mr->umem); + status = ocrdma_get_pbl_info(dev, mr, num_pbes); + if (status) + goto umem_err; + + mr->hwmr.pbe_size = mr->umem->page_size; + mr->hwmr.fbo = ib_umem_offset(mr->umem); + mr->hwmr.va = usr_addr; + mr->hwmr.len = len; + mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; + mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; + mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; + mr->hwmr.local_rd = 1; + mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; + status = ocrdma_build_pbl_tbl(dev, &mr->hwmr); + if (status) + goto umem_err; + build_user_pbes(dev, mr, num_pbes); + status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc); + if (status) + goto mbx_err; + mr->ibmr.lkey = mr->hwmr.lkey; + if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) + mr->ibmr.rkey = mr->hwmr.lkey; + + return &mr->ibmr; + +mbx_err: + ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); +umem_err: + kfree(mr); + return ERR_PTR(status); +} + +int ocrdma_dereg_mr(struct ib_mr *ib_mr) +{ + struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr); + struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device); + + (void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey); + + ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); + + /* it could be user registered memory. */ + if (mr->umem) + ib_umem_release(mr->umem); + kfree(mr); + + /* Don't stop cleanup, in case FW is unresponsive */ + if (dev->mqe_ctx.fw_error_state) { + pr_err("%s(%d) fw not responding.\n", + __func__, dev->id); + } + return 0; +} + +static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq, + struct ib_udata *udata, + struct ib_ucontext *ib_ctx) +{ + int status; + struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx); + struct ocrdma_create_cq_uresp uresp; + + memset(&uresp, 0, sizeof(uresp)); + uresp.cq_id = cq->id; + uresp.page_size = PAGE_ALIGN(cq->len); + uresp.num_pages = 1; + uresp.max_hw_cqe = cq->max_hw_cqe; + uresp.page_addr[0] = virt_to_phys(cq->va); + uresp.db_page_addr = ocrdma_get_db_addr(dev, uctx->cntxt_pd->id); + uresp.db_page_size = dev->nic_info.db_page_size; + uresp.phase_change = cq->phase_change ? 1 : 0; + status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); + if (status) { + pr_err("%s(%d) copy error cqid=0x%x.\n", + __func__, dev->id, cq->id); + goto err; + } + status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size); + if (status) + goto err; + status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size); + if (status) { + ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size); + goto err; + } + cq->ucontext = uctx; +err: + return status; +} + +struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector, + struct ib_ucontext *ib_ctx, + struct ib_udata *udata) +{ + struct ocrdma_cq *cq; + struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); + struct ocrdma_ucontext *uctx = NULL; + u16 pd_id = 0; + int status; + struct ocrdma_create_cq_ureq ureq; + + if (udata) { + if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) + return ERR_PTR(-EFAULT); + } else + ureq.dpp_cq = 0; + cq = kzalloc(sizeof(*cq), GFP_KERNEL); + if (!cq) + return ERR_PTR(-ENOMEM); + + spin_lock_init(&cq->cq_lock); + spin_lock_init(&cq->comp_handler_lock); + INIT_LIST_HEAD(&cq->sq_head); + INIT_LIST_HEAD(&cq->rq_head); + cq->first_arm = true; + + if (ib_ctx) { + uctx = get_ocrdma_ucontext(ib_ctx); + pd_id = uctx->cntxt_pd->id; + } + + status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id); + if (status) { + kfree(cq); + return ERR_PTR(status); + } + if (ib_ctx) { + status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx); + if (status) + goto ctx_err; + } + cq->phase = OCRDMA_CQE_VALID; + dev->cq_tbl[cq->id] = cq; + return &cq->ibcq; + +ctx_err: + ocrdma_mbx_destroy_cq(dev, cq); + kfree(cq); + return ERR_PTR(status); +} + +int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt, + struct ib_udata *udata) +{ + int status = 0; + struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); + + if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) { + status = -EINVAL; + return status; + } + ibcq->cqe = new_cnt; + return status; +} + +static void ocrdma_flush_cq(struct ocrdma_cq *cq) +{ + int cqe_cnt; + int valid_count = 0; + unsigned long flags; + + struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device); + struct ocrdma_cqe *cqe = NULL; + + cqe = cq->va; + cqe_cnt = cq->cqe_cnt; + + /* Last irq might have scheduled a polling thread + * sync-up with it before hard flushing. + */ + spin_lock_irqsave(&cq->cq_lock, flags); + while (cqe_cnt) { + if (is_cqe_valid(cq, cqe)) + valid_count++; + cqe++; + cqe_cnt--; + } + ocrdma_ring_cq_db(dev, cq->id, false, false, valid_count); + spin_unlock_irqrestore(&cq->cq_lock, flags); +} + +int ocrdma_destroy_cq(struct ib_cq *ibcq) +{ + struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); + struct ocrdma_eq *eq = NULL; + struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); + int pdid = 0; + u32 irq, indx; + + dev->cq_tbl[cq->id] = NULL; + indx = ocrdma_get_eq_table_index(dev, cq->eqn); + if (indx == -EINVAL) + BUG(); + + eq = &dev->eq_tbl[indx]; + irq = ocrdma_get_irq(dev, eq); + synchronize_irq(irq); + ocrdma_flush_cq(cq); + + (void)ocrdma_mbx_destroy_cq(dev, cq); + if (cq->ucontext) { + pdid = cq->ucontext->cntxt_pd->id; + ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, + PAGE_ALIGN(cq->len)); + ocrdma_del_mmap(cq->ucontext, + ocrdma_get_db_addr(dev, pdid), + dev->nic_info.db_page_size); + } + + kfree(cq); + return 0; +} + +static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) +{ + int status = -EINVAL; + + if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) { + dev->qp_tbl[qp->id] = qp; + status = 0; + } + return status; +} + +static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) +{ + dev->qp_tbl[qp->id] = NULL; +} + +static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev, + struct ib_qp_init_attr *attrs) +{ + if ((attrs->qp_type != IB_QPT_GSI) && + (attrs->qp_type != IB_QPT_RC) && + (attrs->qp_type != IB_QPT_UC) && + (attrs->qp_type != IB_QPT_UD)) { + pr_err("%s(%d) unsupported qp type=0x%x requested\n", + __func__, dev->id, attrs->qp_type); + return -EINVAL; + } + /* Skip the check for QP1 to support CM size of 128 */ + if ((attrs->qp_type != IB_QPT_GSI) && + (attrs->cap.max_send_wr > dev->attr.max_wqe)) { + pr_err("%s(%d) unsupported send_wr=0x%x requested\n", + __func__, dev->id, attrs->cap.max_send_wr); + pr_err("%s(%d) supported send_wr=0x%x\n", + __func__, dev->id, dev->attr.max_wqe); + return -EINVAL; + } + if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) { + pr_err("%s(%d) unsupported recv_wr=0x%x requested\n", + __func__, dev->id, attrs->cap.max_recv_wr); + pr_err("%s(%d) supported recv_wr=0x%x\n", + __func__, dev->id, dev->attr.max_rqe); + return -EINVAL; + } + if (attrs->cap.max_inline_data > dev->attr.max_inline_data) { + pr_err("%s(%d) unsupported inline data size=0x%x requested\n", + __func__, dev->id, attrs->cap.max_inline_data); + pr_err("%s(%d) supported inline data size=0x%x\n", + __func__, dev->id, dev->attr.max_inline_data); + return -EINVAL; + } + if (attrs->cap.max_send_sge > dev->attr.max_send_sge) { + pr_err("%s(%d) unsupported send_sge=0x%x requested\n", + __func__, dev->id, attrs->cap.max_send_sge); + pr_err("%s(%d) supported send_sge=0x%x\n", + __func__, dev->id, dev->attr.max_send_sge); + return -EINVAL; + } + if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) { + pr_err("%s(%d) unsupported recv_sge=0x%x requested\n", + __func__, dev->id, attrs->cap.max_recv_sge); + pr_err("%s(%d) supported recv_sge=0x%x\n", + __func__, dev->id, dev->attr.max_recv_sge); + return -EINVAL; + } + /* unprivileged user space cannot create special QP */ + if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) { + pr_err + ("%s(%d) Userspace can't create special QPs of type=0x%x\n", + __func__, dev->id, attrs->qp_type); + return -EINVAL; + } + /* allow creating only one GSI type of QP */ + if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) { + pr_err("%s(%d) GSI special QPs already created.\n", + __func__, dev->id); + return -EINVAL; + } + /* verify consumer QPs are not trying to use GSI QP's CQ */ + if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) { + if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) || + (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) { + pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n", + __func__, dev->id); + return -EINVAL; + } + } + return 0; +} + +static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp, + struct ib_udata *udata, int dpp_offset, + int dpp_credit_lmt, int srq) +{ + int status = 0; + u64 usr_db; + struct ocrdma_create_qp_uresp uresp; + struct ocrdma_pd *pd = qp->pd; + struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); + + memset(&uresp, 0, sizeof(uresp)); + usr_db = dev->nic_info.unmapped_db + + (pd->id * dev->nic_info.db_page_size); + uresp.qp_id = qp->id; + uresp.sq_dbid = qp->sq.dbid; + uresp.num_sq_pages = 1; + uresp.sq_page_size = PAGE_ALIGN(qp->sq.len); + uresp.sq_page_addr[0] = virt_to_phys(qp->sq.va); + uresp.num_wqe_allocated = qp->sq.max_cnt; + if (!srq) { + uresp.rq_dbid = qp->rq.dbid; + uresp.num_rq_pages = 1; + uresp.rq_page_size = PAGE_ALIGN(qp->rq.len); + uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va); + uresp.num_rqe_allocated = qp->rq.max_cnt; + } + uresp.db_page_addr = usr_db; + uresp.db_page_size = dev->nic_info.db_page_size; + uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET; + uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET; + uresp.db_shift = OCRDMA_DB_RQ_SHIFT; + + if (qp->dpp_enabled) { + uresp.dpp_credit = dpp_credit_lmt; + uresp.dpp_offset = dpp_offset; + } + status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); + if (status) { + pr_err("%s(%d) user copy error.\n", __func__, dev->id); + goto err; + } + status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0], + uresp.sq_page_size); + if (status) + goto err; + + if (!srq) { + status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0], + uresp.rq_page_size); + if (status) + goto rq_map_err; + } + return status; +rq_map_err: + ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size); +err: + return status; +} + +static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp, + struct ocrdma_pd *pd) +{ + if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) { + qp->sq_db = dev->nic_info.db + + (pd->id * dev->nic_info.db_page_size) + + OCRDMA_DB_GEN2_SQ_OFFSET; + qp->rq_db = dev->nic_info.db + + (pd->id * dev->nic_info.db_page_size) + + OCRDMA_DB_GEN2_RQ_OFFSET; + } else { + qp->sq_db = dev->nic_info.db + + (pd->id * dev->nic_info.db_page_size) + + OCRDMA_DB_SQ_OFFSET; + qp->rq_db = dev->nic_info.db + + (pd->id * dev->nic_info.db_page_size) + + OCRDMA_DB_RQ_OFFSET; + } +} + +static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp) +{ + qp->wqe_wr_id_tbl = + kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt, + GFP_KERNEL); + if (qp->wqe_wr_id_tbl == NULL) + return -ENOMEM; + qp->rqe_wr_id_tbl = + kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL); + if (qp->rqe_wr_id_tbl == NULL) + return -ENOMEM; + + return 0; +} + +static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp, + struct ocrdma_pd *pd, + struct ib_qp_init_attr *attrs) +{ + qp->pd = pd; + spin_lock_init(&qp->q_lock); + INIT_LIST_HEAD(&qp->sq_entry); + INIT_LIST_HEAD(&qp->rq_entry); + + qp->qp_type = attrs->qp_type; + qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR; + qp->max_inline_data = attrs->cap.max_inline_data; + qp->sq.max_sges = attrs->cap.max_send_sge; + qp->rq.max_sges = attrs->cap.max_recv_sge; + qp->state = OCRDMA_QPS_RST; + qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false; +} + +static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev, + struct ib_qp_init_attr *attrs) +{ + if (attrs->qp_type == IB_QPT_GSI) { + dev->gsi_qp_created = 1; + dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq); + dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq); + } +} + +struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd, + struct ib_qp_init_attr *attrs, + struct ib_udata *udata) +{ + int status; + struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); + struct ocrdma_qp *qp; + struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); + struct ocrdma_create_qp_ureq ureq; + u16 dpp_credit_lmt, dpp_offset; + + status = ocrdma_check_qp_params(ibpd, dev, attrs); + if (status) + goto gen_err; + + memset(&ureq, 0, sizeof(ureq)); + if (udata) { + if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) + return ERR_PTR(-EFAULT); + } + qp = kzalloc(sizeof(*qp), GFP_KERNEL); + if (!qp) { + status = -ENOMEM; + goto gen_err; + } + ocrdma_set_qp_init_params(qp, pd, attrs); + if (udata == NULL) + qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 | + OCRDMA_QP_FAST_REG); + + mutex_lock(&dev->dev_lock); + status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq, + ureq.dpp_cq_id, + &dpp_offset, &dpp_credit_lmt); + if (status) + goto mbx_err; + + /* user space QP's wr_id table are managed in library */ + if (udata == NULL) { + status = ocrdma_alloc_wr_id_tbl(qp); + if (status) + goto map_err; + } + + status = ocrdma_add_qpn_map(dev, qp); + if (status) + goto map_err; + ocrdma_set_qp_db(dev, qp, pd); + if (udata) { + status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset, + dpp_credit_lmt, + (attrs->srq != NULL)); + if (status) + goto cpy_err; + } + ocrdma_store_gsi_qp_cq(dev, attrs); + qp->ibqp.qp_num = qp->id; + mutex_unlock(&dev->dev_lock); + return &qp->ibqp; + +cpy_err: + ocrdma_del_qpn_map(dev, qp); +map_err: + ocrdma_mbx_destroy_qp(dev, qp); +mbx_err: + mutex_unlock(&dev->dev_lock); + kfree(qp->wqe_wr_id_tbl); + kfree(qp->rqe_wr_id_tbl); + kfree(qp); + pr_err("%s(%d) error=%d\n", __func__, dev->id, status); +gen_err: + return ERR_PTR(status); +} + +int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask) +{ + int status = 0; + struct ocrdma_qp *qp; + struct ocrdma_dev *dev; + enum ib_qp_state old_qps; + + qp = get_ocrdma_qp(ibqp); + dev = get_ocrdma_dev(ibqp->device); + if (attr_mask & IB_QP_STATE) + status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps); + /* if new and previous states are same hw doesn't need to + * know about it. + */ + if (status < 0) + return status; + status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask); + + return status; +} + +int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata) +{ + unsigned long flags; + int status = -EINVAL; + struct ocrdma_qp *qp; + struct ocrdma_dev *dev; + enum ib_qp_state old_qps, new_qps; + + qp = get_ocrdma_qp(ibqp); + dev = get_ocrdma_dev(ibqp->device); + + /* syncronize with multiple context trying to change, retrive qps */ + mutex_lock(&dev->dev_lock); + /* syncronize with wqe, rqe posting and cqe processing contexts */ + spin_lock_irqsave(&qp->q_lock, flags); + old_qps = get_ibqp_state(qp->state); + if (attr_mask & IB_QP_STATE) + new_qps = attr->qp_state; + else + new_qps = old_qps; + spin_unlock_irqrestore(&qp->q_lock, flags); + + if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask, + IB_LINK_LAYER_ETHERNET)) { + pr_err("%s(%d) invalid attribute mask=0x%x specified for\n" + "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n", + __func__, dev->id, attr_mask, qp->id, ibqp->qp_type, + old_qps, new_qps); + goto param_err; + } + + status = _ocrdma_modify_qp(ibqp, attr, attr_mask); + if (status > 0) + status = 0; +param_err: + mutex_unlock(&dev->dev_lock); + return status; +} + +static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu) +{ + switch (mtu) { + case 256: + return IB_MTU_256; + case 512: + return IB_MTU_512; + case 1024: + return IB_MTU_1024; + case 2048: + return IB_MTU_2048; + case 4096: + return IB_MTU_4096; + default: + return IB_MTU_1024; + } +} + +static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags) +{ + int ib_qp_acc_flags = 0; + + if (qp_cap_flags & OCRDMA_QP_INB_WR) + ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE; + if (qp_cap_flags & OCRDMA_QP_INB_RD) + ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE; + return ib_qp_acc_flags; +} + +int ocrdma_query_qp(struct ib_qp *ibqp, + struct ib_qp_attr *qp_attr, + int attr_mask, struct ib_qp_init_attr *qp_init_attr) +{ + int status; + u32 qp_state; + struct ocrdma_qp_params params; + struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); + struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device); + + memset(¶ms, 0, sizeof(params)); + mutex_lock(&dev->dev_lock); + status = ocrdma_mbx_query_qp(dev, qp, ¶ms); + mutex_unlock(&dev->dev_lock); + if (status) + goto mbx_err; + if (qp->qp_type == IB_QPT_UD) + qp_attr->qkey = params.qkey; + qp_attr->path_mtu = + ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx & + OCRDMA_QP_PARAMS_PATH_MTU_MASK) >> + OCRDMA_QP_PARAMS_PATH_MTU_SHIFT; + qp_attr->path_mig_state = IB_MIG_MIGRATED; + qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK; + qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK; + qp_attr->dest_qp_num = + params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK; + + qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags); + qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1; + qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1; + qp_attr->cap.max_send_sge = qp->sq.max_sges; + qp_attr->cap.max_recv_sge = qp->rq.max_sges; + qp_attr->cap.max_inline_data = qp->max_inline_data; + qp_init_attr->cap = qp_attr->cap; + memcpy(&qp_attr->ah_attr.grh.dgid, ¶ms.dgid[0], + sizeof(params.dgid)); + qp_attr->ah_attr.grh.flow_label = params.rnt_rc_sl_fl & + OCRDMA_QP_PARAMS_FLOW_LABEL_MASK; + qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx; + qp_attr->ah_attr.grh.hop_limit = (params.hop_lmt_rq_psn & + OCRDMA_QP_PARAMS_HOP_LMT_MASK) >> + OCRDMA_QP_PARAMS_HOP_LMT_SHIFT; + qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn & + OCRDMA_QP_PARAMS_TCLASS_MASK) >> + OCRDMA_QP_PARAMS_TCLASS_SHIFT; + + qp_attr->ah_attr.ah_flags = IB_AH_GRH; + qp_attr->ah_attr.port_num = 1; + qp_attr->ah_attr.sl = (params.rnt_rc_sl_fl & + OCRDMA_QP_PARAMS_SL_MASK) >> + OCRDMA_QP_PARAMS_SL_SHIFT; + qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn & + OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >> + OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT; + qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn & + OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >> + OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT; + qp_attr->retry_cnt = + (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >> + OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT; + qp_attr->min_rnr_timer = 0; + qp_attr->pkey_index = 0; + qp_attr->port_num = 1; + qp_attr->ah_attr.src_path_bits = 0; + qp_attr->ah_attr.static_rate = 0; + qp_attr->alt_pkey_index = 0; + qp_attr->alt_port_num = 0; + qp_attr->alt_timeout = 0; + memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr)); + qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >> + OCRDMA_QP_PARAMS_STATE_SHIFT; + qp_attr->qp_state = get_ibqp_state(qp_state); + qp_attr->cur_qp_state = qp_attr->qp_state; + qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0; + qp_attr->max_dest_rd_atomic = + params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT; + qp_attr->max_rd_atomic = + params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK; + qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags & + OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0; + /* Sync driver QP state with FW */ + ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL); +mbx_err: + return status; +} + +static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx) +{ + unsigned int i = idx / 32; + u32 mask = (1U << (idx % 32)); + + srq->idx_bit_fields[i] ^= mask; +} + +static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q) +{ + return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt; +} + +static int is_hw_sq_empty(struct ocrdma_qp *qp) +{ + return (qp->sq.tail == qp->sq.head); +} + +static int is_hw_rq_empty(struct ocrdma_qp *qp) +{ + return (qp->rq.tail == qp->rq.head); +} + +static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q) +{ + return q->va + (q->head * q->entry_size); +} + +static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q, + u32 idx) +{ + return q->va + (idx * q->entry_size); +} + +static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q) +{ + q->head = (q->head + 1) & q->max_wqe_idx; +} + +static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q) +{ + q->tail = (q->tail + 1) & q->max_wqe_idx; +} + +/* discard the cqe for a given QP */ +static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq) +{ + unsigned long cq_flags; + unsigned long flags; + int discard_cnt = 0; + u32 cur_getp, stop_getp; + struct ocrdma_cqe *cqe; + u32 qpn = 0, wqe_idx = 0; + + spin_lock_irqsave(&cq->cq_lock, cq_flags); + + /* traverse through the CQEs in the hw CQ, + * find the matching CQE for a given qp, + * mark the matching one discarded by clearing qpn. + * ring the doorbell in the poll_cq() as + * we don't complete out of order cqe. + */ + + cur_getp = cq->getp; + /* find upto when do we reap the cq. */ + stop_getp = cur_getp; + do { + if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp))) + break; + + cqe = cq->va + cur_getp; + /* if (a) done reaping whole hw cq, or + * (b) qp_xq becomes empty. + * then exit + */ + qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK; + /* if previously discarded cqe found, skip that too. */ + /* check for matching qp */ + if (qpn == 0 || qpn != qp->id) + goto skip_cqe; + + if (is_cqe_for_sq(cqe)) { + ocrdma_hwq_inc_tail(&qp->sq); + } else { + if (qp->srq) { + wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >> + OCRDMA_CQE_BUFTAG_SHIFT) & + qp->srq->rq.max_wqe_idx; + if (wqe_idx < 1) + BUG(); + spin_lock_irqsave(&qp->srq->q_lock, flags); + ocrdma_hwq_inc_tail(&qp->srq->rq); + ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1); + spin_unlock_irqrestore(&qp->srq->q_lock, flags); + + } else { + ocrdma_hwq_inc_tail(&qp->rq); + } + } + /* mark cqe discarded so that it is not picked up later + * in the poll_cq(). + */ + discard_cnt += 1; + cqe->cmn.qpn = 0; +skip_cqe: + cur_getp = (cur_getp + 1) % cq->max_hw_cqe; + } while (cur_getp != stop_getp); + spin_unlock_irqrestore(&cq->cq_lock, cq_flags); +} + +void ocrdma_del_flush_qp(struct ocrdma_qp *qp) +{ + int found = false; + unsigned long flags; + struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); + /* sync with any active CQ poll */ + + spin_lock_irqsave(&dev->flush_q_lock, flags); + found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp); + if (found) + list_del(&qp->sq_entry); + if (!qp->srq) { + found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp); + if (found) + list_del(&qp->rq_entry); + } + spin_unlock_irqrestore(&dev->flush_q_lock, flags); +} + +int ocrdma_destroy_qp(struct ib_qp *ibqp) +{ + struct ocrdma_pd *pd; + struct ocrdma_qp *qp; + struct ocrdma_dev *dev; + struct ib_qp_attr attrs; + int attr_mask; + unsigned long flags; + + qp = get_ocrdma_qp(ibqp); + dev = get_ocrdma_dev(ibqp->device); + + pd = qp->pd; + + /* change the QP state to ERROR */ + if (qp->state != OCRDMA_QPS_RST) { + attrs.qp_state = IB_QPS_ERR; + attr_mask = IB_QP_STATE; + _ocrdma_modify_qp(ibqp, &attrs, attr_mask); + } + /* ensure that CQEs for newly created QP (whose id may be same with + * one which just getting destroyed are same), dont get + * discarded until the old CQEs are discarded. + */ + mutex_lock(&dev->dev_lock); + (void) ocrdma_mbx_destroy_qp(dev, qp); + + /* + * acquire CQ lock while destroy is in progress, in order to + * protect against proessing in-flight CQEs for this QP. + */ + spin_lock_irqsave(&qp->sq_cq->cq_lock, flags); + if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) + spin_lock(&qp->rq_cq->cq_lock); + + ocrdma_del_qpn_map(dev, qp); + + if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) + spin_unlock(&qp->rq_cq->cq_lock); + spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags); + + if (!pd->uctx) { + ocrdma_discard_cqes(qp, qp->sq_cq); + ocrdma_discard_cqes(qp, qp->rq_cq); + } + mutex_unlock(&dev->dev_lock); + + if (pd->uctx) { + ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa, + PAGE_ALIGN(qp->sq.len)); + if (!qp->srq) + ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa, + PAGE_ALIGN(qp->rq.len)); + } + + ocrdma_del_flush_qp(qp); + + kfree(qp->wqe_wr_id_tbl); + kfree(qp->rqe_wr_id_tbl); + kfree(qp); + return 0; +} + +static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq, + struct ib_udata *udata) +{ + int status; + struct ocrdma_create_srq_uresp uresp; + + memset(&uresp, 0, sizeof(uresp)); + uresp.rq_dbid = srq->rq.dbid; + uresp.num_rq_pages = 1; + uresp.rq_page_addr[0] = virt_to_phys(srq->rq.va); + uresp.rq_page_size = srq->rq.len; + uresp.db_page_addr = dev->nic_info.unmapped_db + + (srq->pd->id * dev->nic_info.db_page_size); + uresp.db_page_size = dev->nic_info.db_page_size; + uresp.num_rqe_allocated = srq->rq.max_cnt; + if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) { + uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET; + uresp.db_shift = 24; + } else { + uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET; + uresp.db_shift = 16; + } + + status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); + if (status) + return status; + status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0], + uresp.rq_page_size); + if (status) + return status; + return status; +} + +struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd, + struct ib_srq_init_attr *init_attr, + struct ib_udata *udata) +{ + int status = -ENOMEM; + struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); + struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); + struct ocrdma_srq *srq; + + if (init_attr->attr.max_sge > dev->attr.max_recv_sge) + return ERR_PTR(-EINVAL); + if (init_attr->attr.max_wr > dev->attr.max_rqe) + return ERR_PTR(-EINVAL); + + srq = kzalloc(sizeof(*srq), GFP_KERNEL); + if (!srq) + return ERR_PTR(status); + + spin_lock_init(&srq->q_lock); + srq->pd = pd; + srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size); + status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd); + if (status) + goto err; + + if (udata == NULL) { + srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt, + GFP_KERNEL); + if (srq->rqe_wr_id_tbl == NULL) + goto arm_err; + + srq->bit_fields_len = (srq->rq.max_cnt / 32) + + (srq->rq.max_cnt % 32 ? 1 : 0); + srq->idx_bit_fields = + kmalloc(srq->bit_fields_len * sizeof(u32), GFP_KERNEL); + if (srq->idx_bit_fields == NULL) + goto arm_err; + memset(srq->idx_bit_fields, 0xff, + srq->bit_fields_len * sizeof(u32)); + } + + if (init_attr->attr.srq_limit) { + status = ocrdma_mbx_modify_srq(srq, &init_attr->attr); + if (status) + goto arm_err; + } + + if (udata) { + status = ocrdma_copy_srq_uresp(dev, srq, udata); + if (status) + goto arm_err; + } + + return &srq->ibsrq; + +arm_err: + ocrdma_mbx_destroy_srq(dev, srq); +err: + kfree(srq->rqe_wr_id_tbl); + kfree(srq->idx_bit_fields); + kfree(srq); + return ERR_PTR(status); +} + +int ocrdma_modify_srq(struct ib_srq *ibsrq, + struct ib_srq_attr *srq_attr, + enum ib_srq_attr_mask srq_attr_mask, + struct ib_udata *udata) +{ + int status = 0; + struct ocrdma_srq *srq; + + srq = get_ocrdma_srq(ibsrq); + if (srq_attr_mask & IB_SRQ_MAX_WR) + status = -EINVAL; + else + status = ocrdma_mbx_modify_srq(srq, srq_attr); + return status; +} + +int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) +{ + int status; + struct ocrdma_srq *srq; + + srq = get_ocrdma_srq(ibsrq); + status = ocrdma_mbx_query_srq(srq, srq_attr); + return status; +} + +int ocrdma_destroy_srq(struct ib_srq *ibsrq) +{ + int status; + struct ocrdma_srq *srq; + struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device); + + srq = get_ocrdma_srq(ibsrq); + + status = ocrdma_mbx_destroy_srq(dev, srq); + + if (srq->pd->uctx) + ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa, + PAGE_ALIGN(srq->rq.len)); + + kfree(srq->idx_bit_fields); + kfree(srq->rqe_wr_id_tbl); + kfree(srq); + return status; +} + +/* unprivileged verbs and their support functions. */ +static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp, + struct ocrdma_hdr_wqe *hdr, + struct ib_send_wr *wr) +{ + struct ocrdma_ewqe_ud_hdr *ud_hdr = + (struct ocrdma_ewqe_ud_hdr *)(hdr + 1); + struct ocrdma_ah *ah = get_ocrdma_ah(wr->wr.ud.ah); + + ud_hdr->rsvd_dest_qpn = wr->wr.ud.remote_qpn; + if (qp->qp_type == IB_QPT_GSI) + ud_hdr->qkey = qp->qkey; + else + ud_hdr->qkey = wr->wr.ud.remote_qkey; + ud_hdr->rsvd_ahid = ah->id; + if (ah->av->valid & OCRDMA_AV_VLAN_VALID) + hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT); +} + +static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr, + struct ocrdma_sge *sge, int num_sge, + struct ib_sge *sg_list) +{ + int i; + + for (i = 0; i < num_sge; i++) { + sge[i].lrkey = sg_list[i].lkey; + sge[i].addr_lo = sg_list[i].addr; + sge[i].addr_hi = upper_32_bits(sg_list[i].addr); + sge[i].len = sg_list[i].length; + hdr->total_len += sg_list[i].length; + } + if (num_sge == 0) + memset(sge, 0, sizeof(*sge)); +} + +static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge) +{ + uint32_t total_len = 0, i; + + for (i = 0; i < num_sge; i++) + total_len += sg_list[i].length; + return total_len; +} + + +static int ocrdma_build_inline_sges(struct ocrdma_qp *qp, + struct ocrdma_hdr_wqe *hdr, + struct ocrdma_sge *sge, + struct ib_send_wr *wr, u32 wqe_size) +{ + int i; + char *dpp_addr; + + if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) { + hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge); + if (unlikely(hdr->total_len > qp->max_inline_data)) { + pr_err("%s() supported_len=0x%x,\n" + " unsupported len req=0x%x\n", __func__, + qp->max_inline_data, hdr->total_len); + return -EINVAL; + } + dpp_addr = (char *)sge; + for (i = 0; i < wr->num_sge; i++) { + memcpy(dpp_addr, + (void *)(unsigned long)wr->sg_list[i].addr, + wr->sg_list[i].length); + dpp_addr += wr->sg_list[i].length; + } + + wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES); + if (0 == hdr->total_len) + wqe_size += sizeof(struct ocrdma_sge); + hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT); + } else { + ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list); + if (wr->num_sge) + wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge)); + else + wqe_size += sizeof(struct ocrdma_sge); + hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); + } + hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); + return 0; +} + +static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, + struct ib_send_wr *wr) +{ + int status; + struct ocrdma_sge *sge; + u32 wqe_size = sizeof(*hdr); + + if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { + ocrdma_build_ud_hdr(qp, hdr, wr); + sge = (struct ocrdma_sge *)(hdr + 2); + wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr); + } else { + sge = (struct ocrdma_sge *)(hdr + 1); + } + + status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); + return status; +} + +static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, + struct ib_send_wr *wr) +{ + int status; + struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1); + struct ocrdma_sge *sge = ext_rw + 1; + u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw); + + status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); + if (status) + return status; + ext_rw->addr_lo = wr->wr.rdma.remote_addr; + ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr); + ext_rw->lrkey = wr->wr.rdma.rkey; + ext_rw->len = hdr->total_len; + return 0; +} + +static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, + struct ib_send_wr *wr) +{ + struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1); + struct ocrdma_sge *sge = ext_rw + 1; + u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) + + sizeof(struct ocrdma_hdr_wqe); + + ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list); + hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); + hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT); + hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); + + ext_rw->addr_lo = wr->wr.rdma.remote_addr; + ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr); + ext_rw->lrkey = wr->wr.rdma.rkey; + ext_rw->len = hdr->total_len; +} + +static void build_frmr_pbes(struct ib_send_wr *wr, struct ocrdma_pbl *pbl_tbl, + struct ocrdma_hw_mr *hwmr) +{ + int i; + u64 buf_addr = 0; + int num_pbes; + struct ocrdma_pbe *pbe; + + pbe = (struct ocrdma_pbe *)pbl_tbl->va; + num_pbes = 0; + + /* go through the OS phy regions & fill hw pbe entries into pbls. */ + for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { + /* number of pbes can be more for one OS buf, when + * buffers are of different sizes. + * split the ib_buf to one or more pbes. + */ + buf_addr = wr->wr.fast_reg.page_list->page_list[i]; + pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK)); + pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr)); + num_pbes += 1; + pbe++; + + /* if the pbl is full storing the pbes, + * move to next pbl. + */ + if (num_pbes == (hwmr->pbl_size/sizeof(u64))) { + pbl_tbl++; + pbe = (struct ocrdma_pbe *)pbl_tbl->va; + } + } + return; +} + +static int get_encoded_page_size(int pg_sz) +{ + /* Max size is 256M 4096 << 16 */ + int i = 0; + for (; i < 17; i++) + if (pg_sz == (4096 << i)) + break; + return i; +} + + +static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, + struct ib_send_wr *wr) +{ + u64 fbo; + struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1); + struct ocrdma_mr *mr; + struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); + u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr); + + wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES); + + if (wr->wr.fast_reg.page_list_len > dev->attr.max_pages_per_frmr) + return -EINVAL; + + hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT); + hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); + + if (wr->wr.fast_reg.page_list_len == 0) + BUG(); + if (wr->wr.fast_reg.access_flags & IB_ACCESS_LOCAL_WRITE) + hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR; + if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_WRITE) + hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR; + if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_READ) + hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD; + hdr->lkey = wr->wr.fast_reg.rkey; + hdr->total_len = wr->wr.fast_reg.length; + + fbo = wr->wr.fast_reg.iova_start - + (wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK); + + fast_reg->va_hi = upper_32_bits(wr->wr.fast_reg.iova_start); + fast_reg->va_lo = (u32) (wr->wr.fast_reg.iova_start & 0xffffffff); + fast_reg->fbo_hi = upper_32_bits(fbo); + fast_reg->fbo_lo = (u32) fbo & 0xffffffff; + fast_reg->num_sges = wr->wr.fast_reg.page_list_len; + fast_reg->size_sge = + get_encoded_page_size(1 << wr->wr.fast_reg.page_shift); + mr = (struct ocrdma_mr *) (unsigned long) + dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)]; + build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr); + return 0; +} + +static void ocrdma_ring_sq_db(struct ocrdma_qp *qp) +{ + u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT); + + iowrite32(val, qp->sq_db); +} + +int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr) +{ + int status = 0; + struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); + struct ocrdma_hdr_wqe *hdr; + unsigned long flags; + + spin_lock_irqsave(&qp->q_lock, flags); + if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) { + spin_unlock_irqrestore(&qp->q_lock, flags); + *bad_wr = wr; + return -EINVAL; + } + + while (wr) { + if (qp->qp_type == IB_QPT_UD && + (wr->opcode != IB_WR_SEND && + wr->opcode != IB_WR_SEND_WITH_IMM)) { + *bad_wr = wr; + status = -EINVAL; + break; + } + if (ocrdma_hwq_free_cnt(&qp->sq) == 0 || + wr->num_sge > qp->sq.max_sges) { + *bad_wr = wr; + status = -ENOMEM; + break; + } + hdr = ocrdma_hwq_head(&qp->sq); + hdr->cw = 0; + if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled) + hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT); + if (wr->send_flags & IB_SEND_FENCE) + hdr->cw |= + (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT); + if (wr->send_flags & IB_SEND_SOLICITED) + hdr->cw |= + (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT); + hdr->total_len = 0; + switch (wr->opcode) { + case IB_WR_SEND_WITH_IMM: + hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT); + hdr->immdt = ntohl(wr->ex.imm_data); + case IB_WR_SEND: + hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT); + ocrdma_build_send(qp, hdr, wr); + break; + case IB_WR_SEND_WITH_INV: + hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT); + hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT); + hdr->lkey = wr->ex.invalidate_rkey; + status = ocrdma_build_send(qp, hdr, wr); + break; + case IB_WR_RDMA_WRITE_WITH_IMM: + hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT); + hdr->immdt = ntohl(wr->ex.imm_data); + case IB_WR_RDMA_WRITE: + hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT); + status = ocrdma_build_write(qp, hdr, wr); + break; + case IB_WR_RDMA_READ: + ocrdma_build_read(qp, hdr, wr); + break; + case IB_WR_LOCAL_INV: + hdr->cw |= + (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT); + hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) + + sizeof(struct ocrdma_sge)) / + OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT; + hdr->lkey = wr->ex.invalidate_rkey; + break; + case IB_WR_FAST_REG_MR: + status = ocrdma_build_fr(qp, hdr, wr); + break; + default: + status = -EINVAL; + break; + } + if (status) { + *bad_wr = wr; + break; + } + if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled) + qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1; + else + qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0; + qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id; + ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) & + OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE); + /* make sure wqe is written before adapter can access it */ + wmb(); + /* inform hw to start processing it */ + ocrdma_ring_sq_db(qp); + + /* update pointer, counter for next wr */ + ocrdma_hwq_inc_head(&qp->sq); + wr = wr->next; + } + spin_unlock_irqrestore(&qp->q_lock, flags); + return status; +} + +static void ocrdma_ring_rq_db(struct ocrdma_qp *qp) +{ + u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT); + + iowrite32(val, qp->rq_db); +} + +static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr, + u16 tag) +{ + u32 wqe_size = 0; + struct ocrdma_sge *sge; + if (wr->num_sge) + wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe); + else + wqe_size = sizeof(*sge) + sizeof(*rqe); + + rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) << + OCRDMA_WQE_SIZE_SHIFT); + rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT); + rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); + rqe->total_len = 0; + rqe->rsvd_tag = tag; + sge = (struct ocrdma_sge *)(rqe + 1); + ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list); + ocrdma_cpu_to_le32(rqe, wqe_size); +} + +int ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr) +{ + int status = 0; + unsigned long flags; + struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); + struct ocrdma_hdr_wqe *rqe; + + spin_lock_irqsave(&qp->q_lock, flags); + if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) { + spin_unlock_irqrestore(&qp->q_lock, flags); + *bad_wr = wr; + return -EINVAL; + } + while (wr) { + if (ocrdma_hwq_free_cnt(&qp->rq) == 0 || + wr->num_sge > qp->rq.max_sges) { + *bad_wr = wr; + status = -ENOMEM; + break; + } + rqe = ocrdma_hwq_head(&qp->rq); + ocrdma_build_rqe(rqe, wr, 0); + + qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id; + /* make sure rqe is written before adapter can access it */ + wmb(); + + /* inform hw to start processing it */ + ocrdma_ring_rq_db(qp); + + /* update pointer, counter for next wr */ + ocrdma_hwq_inc_head(&qp->rq); + wr = wr->next; + } + spin_unlock_irqrestore(&qp->q_lock, flags); + return status; +} + +/* cqe for srq's rqe can potentially arrive out of order. + * index gives the entry in the shadow table where to store + * the wr_id. tag/index is returned in cqe to reference back + * for a given rqe. + */ +static int ocrdma_srq_get_idx(struct ocrdma_srq *srq) +{ + int row = 0; + int indx = 0; + + for (row = 0; row < srq->bit_fields_len; row++) { + if (srq->idx_bit_fields[row]) { + indx = ffs(srq->idx_bit_fields[row]); + indx = (row * 32) + (indx - 1); + if (indx >= srq->rq.max_cnt) + BUG(); + ocrdma_srq_toggle_bit(srq, indx); + break; + } + } + + if (row == srq->bit_fields_len) + BUG(); + return indx + 1; /* Use from index 1 */ +} + +static void ocrdma_ring_srq_db(struct ocrdma_srq *srq) +{ + u32 val = srq->rq.dbid | (1 << 16); + + iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET); +} + +int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr) +{ + int status = 0; + unsigned long flags; + struct ocrdma_srq *srq; + struct ocrdma_hdr_wqe *rqe; + u16 tag; + + srq = get_ocrdma_srq(ibsrq); + + spin_lock_irqsave(&srq->q_lock, flags); + while (wr) { + if (ocrdma_hwq_free_cnt(&srq->rq) == 0 || + wr->num_sge > srq->rq.max_sges) { + status = -ENOMEM; + *bad_wr = wr; + break; + } + tag = ocrdma_srq_get_idx(srq); + rqe = ocrdma_hwq_head(&srq->rq); + ocrdma_build_rqe(rqe, wr, tag); + + srq->rqe_wr_id_tbl[tag] = wr->wr_id; + /* make sure rqe is written before adapter can perform DMA */ + wmb(); + /* inform hw to start processing it */ + ocrdma_ring_srq_db(srq); + /* update pointer, counter for next wr */ + ocrdma_hwq_inc_head(&srq->rq); + wr = wr->next; + } + spin_unlock_irqrestore(&srq->q_lock, flags); + return status; +} + +static enum ib_wc_status ocrdma_to_ibwc_err(u16 status) +{ + enum ib_wc_status ibwc_status; + + switch (status) { + case OCRDMA_CQE_GENERAL_ERR: + ibwc_status = IB_WC_GENERAL_ERR; + break; + case OCRDMA_CQE_LOC_LEN_ERR: + ibwc_status = IB_WC_LOC_LEN_ERR; + break; + case OCRDMA_CQE_LOC_QP_OP_ERR: + ibwc_status = IB_WC_LOC_QP_OP_ERR; + break; + case OCRDMA_CQE_LOC_EEC_OP_ERR: + ibwc_status = IB_WC_LOC_EEC_OP_ERR; + break; + case OCRDMA_CQE_LOC_PROT_ERR: + ibwc_status = IB_WC_LOC_PROT_ERR; + break; + case OCRDMA_CQE_WR_FLUSH_ERR: + ibwc_status = IB_WC_WR_FLUSH_ERR; + break; + case OCRDMA_CQE_MW_BIND_ERR: + ibwc_status = IB_WC_MW_BIND_ERR; + break; + case OCRDMA_CQE_BAD_RESP_ERR: + ibwc_status = IB_WC_BAD_RESP_ERR; + break; + case OCRDMA_CQE_LOC_ACCESS_ERR: + ibwc_status = IB_WC_LOC_ACCESS_ERR; + break; + case OCRDMA_CQE_REM_INV_REQ_ERR: + ibwc_status = IB_WC_REM_INV_REQ_ERR; + break; + case OCRDMA_CQE_REM_ACCESS_ERR: + ibwc_status = IB_WC_REM_ACCESS_ERR; + break; + case OCRDMA_CQE_REM_OP_ERR: + ibwc_status = IB_WC_REM_OP_ERR; + break; + case OCRDMA_CQE_RETRY_EXC_ERR: + ibwc_status = IB_WC_RETRY_EXC_ERR; + break; + case OCRDMA_CQE_RNR_RETRY_EXC_ERR: + ibwc_status = IB_WC_RNR_RETRY_EXC_ERR; + break; + case OCRDMA_CQE_LOC_RDD_VIOL_ERR: + ibwc_status = IB_WC_LOC_RDD_VIOL_ERR; + break; + case OCRDMA_CQE_REM_INV_RD_REQ_ERR: + ibwc_status = IB_WC_REM_INV_RD_REQ_ERR; + break; + case OCRDMA_CQE_REM_ABORT_ERR: + ibwc_status = IB_WC_REM_ABORT_ERR; + break; + case OCRDMA_CQE_INV_EECN_ERR: + ibwc_status = IB_WC_INV_EECN_ERR; + break; + case OCRDMA_CQE_INV_EEC_STATE_ERR: + ibwc_status = IB_WC_INV_EEC_STATE_ERR; + break; + case OCRDMA_CQE_FATAL_ERR: + ibwc_status = IB_WC_FATAL_ERR; + break; + case OCRDMA_CQE_RESP_TIMEOUT_ERR: + ibwc_status = IB_WC_RESP_TIMEOUT_ERR; + break; + default: + ibwc_status = IB_WC_GENERAL_ERR; + break; + } + return ibwc_status; +} + +static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc, + u32 wqe_idx) +{ + struct ocrdma_hdr_wqe *hdr; + struct ocrdma_sge *rw; + int opcode; + + hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx); + + ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid; + /* Undo the hdr->cw swap */ + opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK; + switch (opcode) { + case OCRDMA_WRITE: + ibwc->opcode = IB_WC_RDMA_WRITE; + break; + case OCRDMA_READ: + rw = (struct ocrdma_sge *)(hdr + 1); + ibwc->opcode = IB_WC_RDMA_READ; + ibwc->byte_len = rw->len; + break; + case OCRDMA_SEND: + ibwc->opcode = IB_WC_SEND; + break; + case OCRDMA_FR_MR: + ibwc->opcode = IB_WC_FAST_REG_MR; + break; + case OCRDMA_LKEY_INV: + ibwc->opcode = IB_WC_LOCAL_INV; + break; + default: + ibwc->status = IB_WC_GENERAL_ERR; + pr_err("%s() invalid opcode received = 0x%x\n", + __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK); + break; + } +} + +static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp, + struct ocrdma_cqe *cqe) +{ + if (is_cqe_for_sq(cqe)) { + cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( + cqe->flags_status_srcqpn) & + ~OCRDMA_CQE_STATUS_MASK); + cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( + cqe->flags_status_srcqpn) | + (OCRDMA_CQE_WR_FLUSH_ERR << + OCRDMA_CQE_STATUS_SHIFT)); + } else { + if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { + cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( + cqe->flags_status_srcqpn) & + ~OCRDMA_CQE_UD_STATUS_MASK); + cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( + cqe->flags_status_srcqpn) | + (OCRDMA_CQE_WR_FLUSH_ERR << + OCRDMA_CQE_UD_STATUS_SHIFT)); + } else { + cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( + cqe->flags_status_srcqpn) & + ~OCRDMA_CQE_STATUS_MASK); + cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( + cqe->flags_status_srcqpn) | + (OCRDMA_CQE_WR_FLUSH_ERR << + OCRDMA_CQE_STATUS_SHIFT)); + } + } +} + +static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, + struct ocrdma_qp *qp, int status) +{ + bool expand = false; + + ibwc->byte_len = 0; + ibwc->qp = &qp->ibqp; + ibwc->status = ocrdma_to_ibwc_err(status); + + ocrdma_flush_qp(qp); + ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL); + + /* if wqe/rqe pending for which cqe needs to be returned, + * trigger inflating it. + */ + if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) { + expand = true; + ocrdma_set_cqe_status_flushed(qp, cqe); + } + return expand; +} + +static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, + struct ocrdma_qp *qp, int status) +{ + ibwc->opcode = IB_WC_RECV; + ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; + ocrdma_hwq_inc_tail(&qp->rq); + + return ocrdma_update_err_cqe(ibwc, cqe, qp, status); +} + +static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, + struct ocrdma_qp *qp, int status) +{ + ocrdma_update_wc(qp, ibwc, qp->sq.tail); + ocrdma_hwq_inc_tail(&qp->sq); + + return ocrdma_update_err_cqe(ibwc, cqe, qp, status); +} + + +static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp, + struct ocrdma_cqe *cqe, struct ib_wc *ibwc, + bool *polled, bool *stop) +{ + bool expand; + struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); + int status = (le32_to_cpu(cqe->flags_status_srcqpn) & + OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; + if (status < OCRDMA_MAX_CQE_ERR) + atomic_inc(&dev->cqe_err_stats[status]); + + /* when hw sq is empty, but rq is not empty, so we continue + * to keep the cqe in order to get the cq event again. + */ + if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) { + /* when cq for rq and sq is same, it is safe to return + * flush cqe for RQEs. + */ + if (!qp->srq && (qp->sq_cq == qp->rq_cq)) { + *polled = true; + status = OCRDMA_CQE_WR_FLUSH_ERR; + expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); + } else { + /* stop processing further cqe as this cqe is used for + * triggering cq event on buddy cq of RQ. + * When QP is destroyed, this cqe will be removed + * from the cq's hardware q. + */ + *polled = false; + *stop = true; + expand = false; + } + } else if (is_hw_sq_empty(qp)) { + /* Do nothing */ + expand = false; + *polled = false; + *stop = false; + } else { + *polled = true; + expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status); + } + return expand; +} + +static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp, + struct ocrdma_cqe *cqe, + struct ib_wc *ibwc, bool *polled) +{ + bool expand = false; + int tail = qp->sq.tail; + u32 wqe_idx; + + if (!qp->wqe_wr_id_tbl[tail].signaled) { + *polled = false; /* WC cannot be consumed yet */ + } else { + ibwc->status = IB_WC_SUCCESS; + ibwc->wc_flags = 0; + ibwc->qp = &qp->ibqp; + ocrdma_update_wc(qp, ibwc, tail); + *polled = true; + } + wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) & + OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx; + if (tail != wqe_idx) + expand = true; /* Coalesced CQE can't be consumed yet */ + + ocrdma_hwq_inc_tail(&qp->sq); + return expand; +} + +static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, + struct ib_wc *ibwc, bool *polled, bool *stop) +{ + int status; + bool expand; + + status = (le32_to_cpu(cqe->flags_status_srcqpn) & + OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; + + if (status == OCRDMA_CQE_SUCCESS) + expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled); + else + expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop); + return expand; +} + +static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe) +{ + int status; + + status = (le32_to_cpu(cqe->flags_status_srcqpn) & + OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT; + ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) & + OCRDMA_CQE_SRCQP_MASK; + ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) & + OCRDMA_CQE_PKEY_MASK; + ibwc->wc_flags = IB_WC_GRH; + ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >> + OCRDMA_CQE_UD_XFER_LEN_SHIFT); + return status; +} + +static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc, + struct ocrdma_cqe *cqe, + struct ocrdma_qp *qp) +{ + unsigned long flags; + struct ocrdma_srq *srq; + u32 wqe_idx; + + srq = get_ocrdma_srq(qp->ibqp.srq); + wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >> + OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx; + if (wqe_idx < 1) + BUG(); + + ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx]; + spin_lock_irqsave(&srq->q_lock, flags); + ocrdma_srq_toggle_bit(srq, wqe_idx - 1); + spin_unlock_irqrestore(&srq->q_lock, flags); + ocrdma_hwq_inc_tail(&srq->rq); +} + +static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, + struct ib_wc *ibwc, bool *polled, bool *stop, + int status) +{ + bool expand; + struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); + + if (status < OCRDMA_MAX_CQE_ERR) + atomic_inc(&dev->cqe_err_stats[status]); + + /* when hw_rq is empty, but wq is not empty, so continue + * to keep the cqe to get the cq event again. + */ + if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) { + if (!qp->srq && (qp->sq_cq == qp->rq_cq)) { + *polled = true; + status = OCRDMA_CQE_WR_FLUSH_ERR; + expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status); + } else { + *polled = false; + *stop = true; + expand = false; + } + } else if (is_hw_rq_empty(qp)) { + /* Do nothing */ + expand = false; + *polled = false; + *stop = false; + } else { + *polled = true; + expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); + } + return expand; +} + +static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp, + struct ocrdma_cqe *cqe, struct ib_wc *ibwc) +{ + ibwc->opcode = IB_WC_RECV; + ibwc->qp = &qp->ibqp; + ibwc->status = IB_WC_SUCCESS; + + if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) + ocrdma_update_ud_rcqe(ibwc, cqe); + else + ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen); + + if (is_cqe_imm(cqe)) { + ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt)); + ibwc->wc_flags |= IB_WC_WITH_IMM; + } else if (is_cqe_wr_imm(cqe)) { + ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM; + ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt)); + ibwc->wc_flags |= IB_WC_WITH_IMM; + } else if (is_cqe_invalidated(cqe)) { + ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt); + ibwc->wc_flags |= IB_WC_WITH_INVALIDATE; + } + if (qp->ibqp.srq) { + ocrdma_update_free_srq_cqe(ibwc, cqe, qp); + } else { + ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; + ocrdma_hwq_inc_tail(&qp->rq); + } +} + +static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, + struct ib_wc *ibwc, bool *polled, bool *stop) +{ + int status; + bool expand = false; + + ibwc->wc_flags = 0; + if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { + status = (le32_to_cpu(cqe->flags_status_srcqpn) & + OCRDMA_CQE_UD_STATUS_MASK) >> + OCRDMA_CQE_UD_STATUS_SHIFT; + } else { + status = (le32_to_cpu(cqe->flags_status_srcqpn) & + OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; + } + + if (status == OCRDMA_CQE_SUCCESS) { + *polled = true; + ocrdma_poll_success_rcqe(qp, cqe, ibwc); + } else { + expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop, + status); + } + return expand; +} + +static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe, + u16 cur_getp) +{ + if (cq->phase_change) { + if (cur_getp == 0) + cq->phase = (~cq->phase & OCRDMA_CQE_VALID); + } else { + /* clear valid bit */ + cqe->flags_status_srcqpn = 0; + } +} + +static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries, + struct ib_wc *ibwc) +{ + u16 qpn = 0; + int i = 0; + bool expand = false; + int polled_hw_cqes = 0; + struct ocrdma_qp *qp = NULL; + struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device); + struct ocrdma_cqe *cqe; + u16 cur_getp; bool polled = false; bool stop = false; + + cur_getp = cq->getp; + while (num_entries) { + cqe = cq->va + cur_getp; + /* check whether valid cqe or not */ + if (!is_cqe_valid(cq, cqe)) + break; + qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK); + /* ignore discarded cqe */ + if (qpn == 0) + goto skip_cqe; + qp = dev->qp_tbl[qpn]; + BUG_ON(qp == NULL); + + if (is_cqe_for_sq(cqe)) { + expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled, + &stop); + } else { + expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled, + &stop); + } + if (expand) + goto expand_cqe; + if (stop) + goto stop_cqe; + /* clear qpn to avoid duplicate processing by discard_cqe() */ + cqe->cmn.qpn = 0; +skip_cqe: + polled_hw_cqes += 1; + cur_getp = (cur_getp + 1) % cq->max_hw_cqe; + ocrdma_change_cq_phase(cq, cqe, cur_getp); +expand_cqe: + if (polled) { + num_entries -= 1; + i += 1; + ibwc = ibwc + 1; + polled = false; + } + } +stop_cqe: + cq->getp = cur_getp; + if (cq->deferred_arm) { + ocrdma_ring_cq_db(dev, cq->id, true, cq->deferred_sol, + polled_hw_cqes); + cq->deferred_arm = false; + cq->deferred_sol = false; + } else { + /* We need to pop the CQE. No need to arm */ + ocrdma_ring_cq_db(dev, cq->id, false, cq->deferred_sol, + polled_hw_cqes); + cq->deferred_sol = false; + } + + return i; +} + +/* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */ +static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries, + struct ocrdma_qp *qp, struct ib_wc *ibwc) +{ + int err_cqes = 0; + + while (num_entries) { + if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp)) + break; + if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) { + ocrdma_update_wc(qp, ibwc, qp->sq.tail); + ocrdma_hwq_inc_tail(&qp->sq); + } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) { + ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; + ocrdma_hwq_inc_tail(&qp->rq); + } else { + return err_cqes; + } + ibwc->byte_len = 0; + ibwc->status = IB_WC_WR_FLUSH_ERR; + ibwc = ibwc + 1; + err_cqes += 1; + num_entries -= 1; + } + return err_cqes; +} + +int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) +{ + int cqes_to_poll = num_entries; + struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); + struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); + int num_os_cqe = 0, err_cqes = 0; + struct ocrdma_qp *qp; + unsigned long flags; + + /* poll cqes from adapter CQ */ + spin_lock_irqsave(&cq->cq_lock, flags); + num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc); + spin_unlock_irqrestore(&cq->cq_lock, flags); + cqes_to_poll -= num_os_cqe; + + if (cqes_to_poll) { + wc = wc + num_os_cqe; + /* adapter returns single error cqe when qp moves to + * error state. So insert error cqes with wc_status as + * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ + * respectively which uses this CQ. + */ + spin_lock_irqsave(&dev->flush_q_lock, flags); + list_for_each_entry(qp, &cq->sq_head, sq_entry) { + if (cqes_to_poll == 0) + break; + err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc); + cqes_to_poll -= err_cqes; + num_os_cqe += err_cqes; + wc = wc + err_cqes; + } + spin_unlock_irqrestore(&dev->flush_q_lock, flags); + } + return num_os_cqe; +} + +int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags) +{ + struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); + struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); + u16 cq_id; + unsigned long flags; + bool arm_needed = false, sol_needed = false; + + cq_id = cq->id; + + spin_lock_irqsave(&cq->cq_lock, flags); + if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED) + arm_needed = true; + if (cq_flags & IB_CQ_SOLICITED) + sol_needed = true; + + if (cq->first_arm) { + ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0); + cq->first_arm = false; + } + + cq->deferred_arm = true; + cq->deferred_sol = sol_needed; + spin_unlock_irqrestore(&cq->cq_lock, flags); + + return 0; +} + +struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len) +{ + int status; + struct ocrdma_mr *mr; + struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); + struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); + + if (max_page_list_len > dev->attr.max_pages_per_frmr) + return ERR_PTR(-EINVAL); + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + status = ocrdma_get_pbl_info(dev, mr, max_page_list_len); + if (status) + goto pbl_err; + mr->hwmr.fr_mr = 1; + mr->hwmr.remote_rd = 0; + mr->hwmr.remote_wr = 0; + mr->hwmr.local_rd = 0; + mr->hwmr.local_wr = 0; + mr->hwmr.mw_bind = 0; + status = ocrdma_build_pbl_tbl(dev, &mr->hwmr); + if (status) + goto pbl_err; + status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0); + if (status) + goto mbx_err; + mr->ibmr.rkey = mr->hwmr.lkey; + mr->ibmr.lkey = mr->hwmr.lkey; + dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] = + (unsigned long) mr; + return &mr->ibmr; +mbx_err: + ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); +pbl_err: + kfree(mr); + return ERR_PTR(-ENOMEM); +} + +struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device + *ibdev, + int page_list_len) +{ + struct ib_fast_reg_page_list *frmr_list; + int size; + + size = sizeof(*frmr_list) + (page_list_len * sizeof(u64)); + frmr_list = kzalloc(size, GFP_KERNEL); + if (!frmr_list) + return ERR_PTR(-ENOMEM); + frmr_list->page_list = (u64 *)(frmr_list + 1); + return frmr_list; +} + +void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list) +{ + kfree(page_list); +} + +#define MAX_KERNEL_PBE_SIZE 65536 +static inline int count_kernel_pbes(struct ib_phys_buf *buf_list, + int buf_cnt, u32 *pbe_size) +{ + u64 total_size = 0; + u64 buf_size = 0; + int i; + *pbe_size = roundup(buf_list[0].size, PAGE_SIZE); + *pbe_size = roundup_pow_of_two(*pbe_size); + + /* find the smallest PBE size that we can have */ + for (i = 0; i < buf_cnt; i++) { + /* first addr may not be page aligned, so ignore checking */ + if ((i != 0) && ((buf_list[i].addr & ~PAGE_MASK) || + (buf_list[i].size & ~PAGE_MASK))) { + return 0; + } + + /* if configured PBE size is greater then the chosen one, + * reduce the PBE size. + */ + buf_size = roundup(buf_list[i].size, PAGE_SIZE); + /* pbe_size has to be even multiple of 4K 1,2,4,8...*/ + buf_size = roundup_pow_of_two(buf_size); + if (*pbe_size > buf_size) + *pbe_size = buf_size; + + total_size += buf_size; + } + *pbe_size = *pbe_size > MAX_KERNEL_PBE_SIZE ? + (MAX_KERNEL_PBE_SIZE) : (*pbe_size); + + /* num_pbes = total_size / (*pbe_size); this is implemented below. */ + + return total_size >> ilog2(*pbe_size); +} + +static void build_kernel_pbes(struct ib_phys_buf *buf_list, int ib_buf_cnt, + u32 pbe_size, struct ocrdma_pbl *pbl_tbl, + struct ocrdma_hw_mr *hwmr) +{ + int i; + int idx; + int pbes_per_buf = 0; + u64 buf_addr = 0; + int num_pbes; + struct ocrdma_pbe *pbe; + int total_num_pbes = 0; + + if (!hwmr->num_pbes) + return; + + pbe = (struct ocrdma_pbe *)pbl_tbl->va; + num_pbes = 0; + + /* go through the OS phy regions & fill hw pbe entries into pbls. */ + for (i = 0; i < ib_buf_cnt; i++) { + buf_addr = buf_list[i].addr; + pbes_per_buf = + roundup_pow_of_two(roundup(buf_list[i].size, PAGE_SIZE)) / + pbe_size; + hwmr->len += buf_list[i].size; + /* number of pbes can be more for one OS buf, when + * buffers are of different sizes. + * split the ib_buf to one or more pbes. + */ + for (idx = 0; idx < pbes_per_buf; idx++) { + /* we program always page aligned addresses, + * first unaligned address is taken care by fbo. + */ + if (i == 0) { + /* for non zero fbo, assign the + * start of the page. + */ + pbe->pa_lo = + cpu_to_le32((u32) (buf_addr & PAGE_MASK)); + pbe->pa_hi = + cpu_to_le32((u32) upper_32_bits(buf_addr)); + } else { + pbe->pa_lo = + cpu_to_le32((u32) (buf_addr & 0xffffffff)); + pbe->pa_hi = + cpu_to_le32((u32) upper_32_bits(buf_addr)); + } + buf_addr += pbe_size; + num_pbes += 1; + total_num_pbes += 1; + pbe++; + + if (total_num_pbes == hwmr->num_pbes) + goto mr_tbl_done; + /* if the pbl is full storing the pbes, + * move to next pbl. + */ + if (num_pbes == (hwmr->pbl_size/sizeof(u64))) { + pbl_tbl++; + pbe = (struct ocrdma_pbe *)pbl_tbl->va; + num_pbes = 0; + } + } + } +mr_tbl_done: + return; +} + +struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *ibpd, + struct ib_phys_buf *buf_list, + int buf_cnt, int acc, u64 *iova_start) +{ + int status = -ENOMEM; + struct ocrdma_mr *mr; + struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); + struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); + u32 num_pbes; + u32 pbe_size = 0; + + if ((acc & IB_ACCESS_REMOTE_WRITE) && !(acc & IB_ACCESS_LOCAL_WRITE)) + return ERR_PTR(-EINVAL); + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(status); + + num_pbes = count_kernel_pbes(buf_list, buf_cnt, &pbe_size); + if (num_pbes == 0) { + status = -EINVAL; + goto pbl_err; + } + status = ocrdma_get_pbl_info(dev, mr, num_pbes); + if (status) + goto pbl_err; + + mr->hwmr.pbe_size = pbe_size; + mr->hwmr.fbo = *iova_start - (buf_list[0].addr & PAGE_MASK); + mr->hwmr.va = *iova_start; + mr->hwmr.local_rd = 1; + mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; + mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; + mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; + mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; + mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0; + + status = ocrdma_build_pbl_tbl(dev, &mr->hwmr); + if (status) + goto pbl_err; + build_kernel_pbes(buf_list, buf_cnt, pbe_size, mr->hwmr.pbl_table, + &mr->hwmr); + status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc); + if (status) + goto mbx_err; + + mr->ibmr.lkey = mr->hwmr.lkey; + if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) + mr->ibmr.rkey = mr->hwmr.lkey; + return &mr->ibmr; + +mbx_err: + ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); +pbl_err: + kfree(mr); + return ERR_PTR(status); +} diff --git a/kernel/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/kernel/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h new file mode 100644 index 000000000..b8f7853fd --- /dev/null +++ b/kernel/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h @@ -0,0 +1,99 @@ +/******************************************************************* + * This file is part of the Emulex RoCE Device Driver for * + * RoCE (RDMA over Converged Ethernet) adapters. * + * Copyright (C) 2008-2012 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + * + * Contact Information: + * linux-drivers@emulex.com + * + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 + *******************************************************************/ + +#ifndef __OCRDMA_VERBS_H__ +#define __OCRDMA_VERBS_H__ + +int ocrdma_post_send(struct ib_qp *, struct ib_send_wr *, + struct ib_send_wr **bad_wr); +int ocrdma_post_recv(struct ib_qp *, struct ib_recv_wr *, + struct ib_recv_wr **bad_wr); + +int ocrdma_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc); +int ocrdma_arm_cq(struct ib_cq *, enum ib_cq_notify_flags flags); + +int ocrdma_query_device(struct ib_device *, struct ib_device_attr *props); +int ocrdma_query_port(struct ib_device *, u8 port, struct ib_port_attr *props); +int ocrdma_modify_port(struct ib_device *, u8 port, int mask, + struct ib_port_modify *props); + +void ocrdma_get_guid(struct ocrdma_dev *, u8 *guid); +int ocrdma_query_gid(struct ib_device *, u8 port, + int index, union ib_gid *gid); +int ocrdma_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey); + +struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *, + struct ib_udata *); +int ocrdma_dealloc_ucontext(struct ib_ucontext *); + +int ocrdma_mmap(struct ib_ucontext *, struct vm_area_struct *vma); + +struct ib_pd *ocrdma_alloc_pd(struct ib_device *, + struct ib_ucontext *, struct ib_udata *); +int ocrdma_dealloc_pd(struct ib_pd *pd); + +struct ib_cq *ocrdma_create_cq(struct ib_device *, int entries, int vector, + struct ib_ucontext *, struct ib_udata *); +int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); +int ocrdma_destroy_cq(struct ib_cq *); + +struct ib_qp *ocrdma_create_qp(struct ib_pd *, + struct ib_qp_init_attr *attrs, + struct ib_udata *); +int _ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr, + int attr_mask); +int ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata); +int ocrdma_query_qp(struct ib_qp *, + struct ib_qp_attr *qp_attr, + int qp_attr_mask, struct ib_qp_init_attr *); +int ocrdma_destroy_qp(struct ib_qp *); +void ocrdma_del_flush_qp(struct ocrdma_qp *qp); + +struct ib_srq *ocrdma_create_srq(struct ib_pd *, struct ib_srq_init_attr *, + struct ib_udata *); +int ocrdma_modify_srq(struct ib_srq *, struct ib_srq_attr *, + enum ib_srq_attr_mask, struct ib_udata *); +int ocrdma_query_srq(struct ib_srq *, struct ib_srq_attr *); +int ocrdma_destroy_srq(struct ib_srq *); +int ocrdma_post_srq_recv(struct ib_srq *, struct ib_recv_wr *, + struct ib_recv_wr **bad_recv_wr); + +int ocrdma_dereg_mr(struct ib_mr *); +struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *, int acc); +struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *, + struct ib_phys_buf *buffer_list, + int num_phys_buf, int acc, u64 *iova_start); +struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *, u64 start, u64 length, + u64 virt, int acc, struct ib_udata *); +struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *pd, int max_page_list_len); +struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device + *ibdev, + int page_list_len); +void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list); + +#endif /* __OCRDMA_VERBS_H__ */ -- cgit 1.2.3-korg