diff options
Diffstat (limited to 'kernel/drivers/infiniband/hw/mlx4')
-rw-r--r-- | kernel/drivers/infiniband/hw/mlx4/ah.c | 19 | ||||
-rw-r--r-- | kernel/drivers/infiniband/hw/mlx4/alias_GUID.c | 7 | ||||
-rw-r--r-- | kernel/drivers/infiniband/hw/mlx4/cq.c | 17 | ||||
-rw-r--r-- | kernel/drivers/infiniband/hw/mlx4/mad.c | 217 | ||||
-rw-r--r-- | kernel/drivers/infiniband/hw/mlx4/main.c | 1166 | ||||
-rw-r--r-- | kernel/drivers/infiniband/hw/mlx4/mcg.c | 10 | ||||
-rw-r--r-- | kernel/drivers/infiniband/hw/mlx4/mlx4_ib.h | 112 | ||||
-rw-r--r-- | kernel/drivers/infiniband/hw/mlx4/mr.c | 180 | ||||
-rw-r--r-- | kernel/drivers/infiniband/hw/mlx4/qp.c | 362 | ||||
-rw-r--r-- | kernel/drivers/infiniband/hw/mlx4/srq.c | 13 |
10 files changed, 1196 insertions, 907 deletions
diff --git a/kernel/drivers/infiniband/hw/mlx4/ah.c b/kernel/drivers/infiniband/hw/mlx4/ah.c index 33fdd5012..86af71351 100644 --- a/kernel/drivers/infiniband/hw/mlx4/ah.c +++ b/kernel/drivers/infiniband/hw/mlx4/ah.c @@ -76,7 +76,10 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr struct mlx4_dev *dev = ibdev->dev; int is_mcast = 0; struct in6_addr in6; - u16 vlan_tag; + u16 vlan_tag = 0xffff; + union ib_gid sgid; + struct ib_gid_attr gid_attr; + int ret; memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6)); if (rdma_is_multicast_addr(&in6)) { @@ -85,11 +88,21 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr } else { memcpy(ah->av.eth.mac, ah_attr->dmac, ETH_ALEN); } - vlan_tag = ah_attr->vlan_id; + ret = ib_get_cached_gid(pd->device, ah_attr->port_num, + ah_attr->grh.sgid_index, &sgid, &gid_attr); + if (ret) + return ERR_PTR(ret); + memset(ah->av.eth.s_mac, 0, ETH_ALEN); + if (gid_attr.ndev) { + if (is_vlan_dev(gid_attr.ndev)) + vlan_tag = vlan_dev_vlan_id(gid_attr.ndev); + memcpy(ah->av.eth.s_mac, gid_attr.ndev->dev_addr, ETH_ALEN); + dev_put(gid_attr.ndev); + } if (vlan_tag < 0x1000) vlan_tag |= (ah_attr->sl & 7) << 13; ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); - ah->av.eth.gid_index = ah_attr->grh.sgid_index; + ah->av.eth.gid_index = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index); ah->av.eth.vlan = cpu_to_be16(vlan_tag); if (ah_attr->static_rate) { ah->av.eth.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET; diff --git a/kernel/drivers/infiniband/hw/mlx4/alias_GUID.c b/kernel/drivers/infiniband/hw/mlx4/alias_GUID.c index 0f00204d2..21cb41a60 100644 --- a/kernel/drivers/infiniband/hw/mlx4/alias_GUID.c +++ b/kernel/drivers/infiniband/hw/mlx4/alias_GUID.c @@ -189,7 +189,7 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, { int i; u64 guid_indexes; - int slave_id; + int slave_id, slave_port; enum slave_port_state new_state; enum slave_port_state prev_state; __be64 tmp_cur_ag, form_cache_ag; @@ -217,6 +217,11 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ; if (slave_id >= dev->dev->persist->num_vfs + 1) return; + + slave_port = mlx4_phys_to_slave_port(dev->dev, slave_id, port_num); + if (slave_port < 0) /* this port isn't available for the VF */ + continue; + tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE]; form_cache_ag = get_cached_alias_guid(dev, port_num, (NUM_ALIAS_GUID_IN_REC * block_num) + i); diff --git a/kernel/drivers/infiniband/hw/mlx4/cq.c b/kernel/drivers/infiniband/hw/mlx4/cq.c index 2857ed897..b88fc8f5a 100644 --- a/kernel/drivers/infiniband/hw/mlx4/cq.c +++ b/kernel/drivers/infiniband/hw/mlx4/cq.c @@ -166,10 +166,14 @@ err_buf: return err; } -struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector, +#define CQ_CREATE_FLAGS_SUPPORTED IB_CQ_FLAGS_TIMESTAMP_COMPLETION +struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, struct ib_ucontext *context, struct ib_udata *udata) { + int entries = attr->cqe; + int vector = attr->comp_vector; struct mlx4_ib_dev *dev = to_mdev(ibdev); struct mlx4_ib_cq *cq; struct mlx4_uar *uar; @@ -178,6 +182,9 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector if (entries < 1 || entries > dev->dev->caps.max_cqes) return ERR_PTR(-EINVAL); + if (attr->flags & ~CQ_CREATE_FLAGS_SUPPORTED) + return ERR_PTR(-EINVAL); + cq = kmalloc(sizeof *cq, GFP_KERNEL); if (!cq) return ERR_PTR(-ENOMEM); @@ -188,6 +195,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector spin_lock_init(&cq->lock); cq->resize_buf = NULL; cq->resize_umem = NULL; + cq->create_flags = attr->flags; INIT_LIST_HEAD(&cq->send_qp_list); INIT_LIST_HEAD(&cq->recv_qp_list); @@ -231,7 +239,8 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector vector = dev->eq_table[vector % ibdev->num_comp_vectors]; err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, - cq->db.dma, &cq->mcq, vector, 0, 0); + cq->db.dma, &cq->mcq, vector, 0, + !!(cq->create_flags & IB_CQ_FLAGS_TIMESTAMP_COMPLETION)); if (err) goto err_dbmap; @@ -809,7 +818,7 @@ repoll: wc->opcode = IB_WC_LSO; break; case MLX4_OPCODE_FMR: - wc->opcode = IB_WC_FAST_REG_MR; + wc->opcode = IB_WC_REG_MR; break; case MLX4_OPCODE_LOCAL_INVAL: wc->opcode = IB_WC_LOCAL_INV; @@ -862,7 +871,7 @@ repoll: if (is_eth) { wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; if (be32_to_cpu(cqe->vlan_my_qpn) & - MLX4_CQE_VLAN_PRESENT_MASK) { + MLX4_CQE_CVLAN_PRESENT_MASK) { wc->vlan_id = be16_to_cpu(cqe->sl_vid) & MLX4_CQE_VID_MASK; } else { diff --git a/kernel/drivers/infiniband/hw/mlx4/mad.c b/kernel/drivers/infiniband/hw/mlx4/mad.c index 9cd2b002d..870e56b6b 100644 --- a/kernel/drivers/infiniband/hw/mlx4/mad.c +++ b/kernel/drivers/infiniband/hw/mlx4/mad.c @@ -64,14 +64,6 @@ enum { #define GUID_TBL_BLK_NUM_ENTRIES 8 #define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES) -/* Counters should be saturate once they reach their maximum value */ -#define ASSIGN_32BIT_COUNTER(counter, value) do {\ - if ((value) > U32_MAX) \ - counter = cpu_to_be32(U32_MAX); \ - else \ - counter = cpu_to_be32(value); \ -} while (0) - struct mlx4_mad_rcv_buf { struct ib_grh grh; u8 payload[256]; @@ -111,8 +103,9 @@ __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx) } int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags, - int port, struct ib_wc *in_wc, struct ib_grh *in_grh, - void *in_mad, void *response_mad) + int port, const struct ib_wc *in_wc, + const struct ib_grh *in_grh, + const void *in_mad, void *response_mad) { struct mlx4_cmd_mailbox *inmailbox, *outmailbox; void *inbox; @@ -220,7 +213,7 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl) * Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can * synthesize LID change, Client-Rereg, GID change, and P_Key change events. */ -static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad, +static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad *mad, u16 prev_lid) { struct ib_port_info *pinfo; @@ -356,7 +349,7 @@ static void node_desc_override(struct ib_device *dev, } } -static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *mad) +static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, const struct ib_mad *mad) { int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; struct ib_mad_send_buf *send_buf; @@ -366,7 +359,8 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma if (agent) { send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, - IB_MGMT_MAD_DATA, GFP_ATOMIC); + IB_MGMT_MAD_DATA, GFP_ATOMIC, + IB_MGMT_BASE_VERSION); if (IS_ERR(send_buf)) return; /* @@ -463,7 +457,8 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, struct ib_grh *grh, struct ib_mad *mad) { struct ib_sge list; - struct ib_send_wr wr, *bad_wr; + struct ib_ud_wr wr; + struct ib_send_wr *bad_wr; struct mlx4_ib_demux_pv_ctx *tun_ctx; struct mlx4_ib_demux_pv_qp *tun_qp; struct mlx4_rcv_tunnel_mad *tun_mad; @@ -586,20 +581,20 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map; list.length = sizeof (struct mlx4_rcv_tunnel_mad); - list.lkey = tun_ctx->mr->lkey; - - wr.wr.ud.ah = ah; - wr.wr.ud.port_num = port; - wr.wr.ud.remote_qkey = IB_QP_SET_QKEY; - wr.wr.ud.remote_qpn = dqpn; - wr.next = NULL; - wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt); - wr.sg_list = &list; - wr.num_sge = 1; - wr.opcode = IB_WR_SEND; - wr.send_flags = IB_SEND_SIGNALED; - - ret = ib_post_send(src_qp, &wr, &bad_wr); + list.lkey = tun_ctx->pd->local_dma_lkey; + + wr.ah = ah; + wr.port_num = port; + wr.remote_qkey = IB_QP_SET_QKEY; + wr.remote_qpn = dqpn; + wr.wr.next = NULL; + wr.wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt); + wr.wr.sg_list = &list; + wr.wr.num_sge = 1; + wr.wr.opcode = IB_WR_SEND; + wr.wr.send_flags = IB_SEND_SIGNALED; + + ret = ib_post_send(src_qp, &wr.wr, &bad_wr); out: if (ret) ib_destroy_ah(ah); @@ -722,8 +717,8 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port, } static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, - struct ib_wc *in_wc, struct ib_grh *in_grh, - struct ib_mad *in_mad, struct ib_mad *out_mad) + const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const struct ib_mad *in_mad, struct ib_mad *out_mad) { u16 slid, prev_lid = 0; int err; @@ -825,34 +820,39 @@ static void edit_counter(struct mlx4_counter *cnt, } static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, - struct ib_wc *in_wc, struct ib_grh *in_grh, - struct ib_mad *in_mad, struct ib_mad *out_mad) + const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const struct ib_mad *in_mad, struct ib_mad *out_mad) { - struct mlx4_cmd_mailbox *mailbox; + struct mlx4_counter counter_stats; struct mlx4_ib_dev *dev = to_mdev(ibdev); - int err; - u32 inmod = dev->counters[port_num - 1] & 0xffff; - u8 mode; + struct counter_index *tmp_counter; + int err = IB_MAD_RESULT_FAILURE, stats_avail = 0; if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT) return -EINVAL; - mailbox = mlx4_alloc_cmd_mailbox(dev->dev); - if (IS_ERR(mailbox)) - return IB_MAD_RESULT_FAILURE; - - err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0, - MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C, - MLX4_CMD_WRAPPED); - if (err) - err = IB_MAD_RESULT_FAILURE; - else { + memset(&counter_stats, 0, sizeof(counter_stats)); + mutex_lock(&dev->counters_table[port_num - 1].mutex); + list_for_each_entry(tmp_counter, + &dev->counters_table[port_num - 1].counters_list, + list) { + err = mlx4_get_counter_stats(dev->dev, + tmp_counter->index, + &counter_stats, 0); + if (err) { + err = IB_MAD_RESULT_FAILURE; + stats_avail = 0; + break; + } + stats_avail = 1; + } + mutex_unlock(&dev->counters_table[port_num - 1].mutex); + if (stats_avail) { memset(out_mad->data, 0, sizeof out_mad->data); - mode = ((struct mlx4_counter *)mailbox->buf)->counter_mode; - switch (mode & 0xf) { + switch (counter_stats.counter_mode & 0xf) { case 0: - edit_counter(mailbox->buf, - (void *)(out_mad->data + 40)); + edit_counter(&counter_stats, + (void *)(out_mad->data + 40)); err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; break; default: @@ -860,25 +860,43 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, } } - mlx4_free_cmd_mailbox(dev->dev, mailbox); - return err; } int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, - struct ib_wc *in_wc, struct ib_grh *in_grh, - struct ib_mad *in_mad, struct ib_mad *out_mad) + const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const struct ib_mad_hdr *in, size_t in_mad_size, + struct ib_mad_hdr *out, size_t *out_mad_size, + u16 *out_mad_pkey_index) { - switch (rdma_port_get_link_layer(ibdev, port_num)) { - case IB_LINK_LAYER_INFINIBAND: + struct mlx4_ib_dev *dev = to_mdev(ibdev); + const struct ib_mad *in_mad = (const struct ib_mad *)in; + struct ib_mad *out_mad = (struct ib_mad *)out; + enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num); + + if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) || + *out_mad_size != sizeof(*out_mad))) + return IB_MAD_RESULT_FAILURE; + + /* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA + * queries, should be called only by VFs and for that specific purpose + */ + if (link == IB_LINK_LAYER_INFINIBAND) { + if (mlx4_is_slave(dev->dev) && + in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT && + in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS) + return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, + in_grh, in_mad, out_mad); + return ib_process_mad(ibdev, mad_flags, port_num, in_wc, in_grh, in_mad, out_mad); - case IB_LINK_LAYER_ETHERNET: - return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, - in_grh, in_mad, out_mad); - default: - return -EINVAL; } + + if (link == IB_LINK_LAYER_ETHERNET) + return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, + in_grh, in_mad, out_mad); + + return -EINVAL; } static void send_handler(struct ib_mad_agent *agent, @@ -1127,7 +1145,7 @@ static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx, sg_list.addr = tun_qp->ring[index].map; sg_list.length = size; - sg_list.lkey = ctx->mr->lkey; + sg_list.lkey = ctx->pd->local_dma_lkey; recv_wr.next = NULL; recv_wr.sg_list = &sg_list; @@ -1166,10 +1184,11 @@ static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave) int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn, u32 qkey, struct ib_ah_attr *attr, - u8 *s_mac, struct ib_mad *mad) + u8 *s_mac, u16 vlan_id, struct ib_mad *mad) { struct ib_sge list; - struct ib_send_wr wr, *bad_wr; + struct ib_ud_wr wr; + struct ib_send_wr *bad_wr; struct mlx4_ib_demux_pv_ctx *sqp_ctx; struct mlx4_ib_demux_pv_qp *sqp; struct mlx4_mad_snd_buf *sqp_mad; @@ -1238,24 +1257,27 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, list.addr = sqp->tx_ring[wire_tx_ix].buf.map; list.length = sizeof (struct mlx4_mad_snd_buf); - list.lkey = sqp_ctx->mr->lkey; - - wr.wr.ud.ah = ah; - wr.wr.ud.port_num = port; - wr.wr.ud.pkey_index = wire_pkey_ix; - wr.wr.ud.remote_qkey = qkey; - wr.wr.ud.remote_qpn = remote_qpn; - wr.next = NULL; - wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum); - wr.sg_list = &list; - wr.num_sge = 1; - wr.opcode = IB_WR_SEND; - wr.send_flags = IB_SEND_SIGNALED; + list.lkey = sqp_ctx->pd->local_dma_lkey; + + wr.ah = ah; + wr.port_num = port; + wr.pkey_index = wire_pkey_ix; + wr.remote_qkey = qkey; + wr.remote_qpn = remote_qpn; + wr.wr.next = NULL; + wr.wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum); + wr.wr.sg_list = &list; + wr.wr.num_sge = 1; + wr.wr.opcode = IB_WR_SEND; + wr.wr.send_flags = IB_SEND_SIGNALED; if (s_mac) memcpy(to_mah(ah)->av.eth.s_mac, s_mac, 6); + if (vlan_id < 0x1000) + vlan_id |= (attr->sl & 7) << 13; + to_mah(ah)->av.eth.vlan = cpu_to_be16(vlan_id); - ret = ib_post_send(send_qp, &wr, &bad_wr); + ret = ib_post_send(send_qp, &wr.wr, &bad_wr); out: if (ret) ib_destroy_ah(ah); @@ -1289,6 +1311,7 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc u8 *slave_id; int slave; int port; + u16 vlan_id; /* Get slave that sent this packet */ if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn || @@ -1365,19 +1388,22 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc * stadard address handle by decoding the tunnelled mlx4_ah fields */ memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av)); ah.ibah.device = ctx->ib_dev; + + port = be32_to_cpu(ah.av.ib.port_pd) >> 24; + port = mlx4_slave_convert_port(dev->dev, slave, port); + if (port < 0) + return; + ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff)); + mlx4_ib_query_ah(&ah.ibah, &ah_attr); if (ah_attr.ah_flags & IB_AH_GRH) fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr); - port = mlx4_slave_convert_port(dev->dev, slave, ah_attr.port_num); - if (port < 0) - return; - ah_attr.port_num = port; memcpy(ah_attr.dmac, tunnel->hdr.mac, 6); - ah_attr.vlan_id = be16_to_cpu(tunnel->hdr.vlan); + vlan_id = be16_to_cpu(tunnel->hdr.vlan); /* if slave have default vlan use it */ mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave, - &ah_attr.vlan_id, &ah_attr.sl); + &vlan_id, &ah_attr.sl); mlx4_ib_send_to_wire(dev, slave, ctx->port, is_proxy_qp0(dev, wc->src_qp, slave) ? @@ -1385,7 +1411,7 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc be16_to_cpu(tunnel->hdr.pkey_index), be32_to_cpu(tunnel->hdr.remote_qpn), be32_to_cpu(tunnel->hdr.qkey), - &ah_attr, wc->smac, &tunnel->mad); + &ah_attr, wc->smac, vlan_id, &tunnel->mad); } static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx, @@ -1773,6 +1799,7 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port, int create_tun, struct mlx4_ib_demux_pv_ctx *ctx) { int ret, cq_size; + struct ib_cq_init_attr cq_attr = {}; if (ctx->state != DEMUX_PV_STATE_DOWN) return -EEXIST; @@ -1801,8 +1828,9 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port, if (ctx->has_smi) cq_size *= 2; + cq_attr.cqe = cq_size; ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler, - NULL, ctx, cq_size, 0); + NULL, ctx, &cq_attr); if (IS_ERR(ctx->cq)) { ret = PTR_ERR(ctx->cq); pr_err("Couldn't create tunnel CQ (%d)\n", ret); @@ -1816,19 +1844,12 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port, goto err_cq; } - ctx->mr = ib_get_dma_mr(ctx->pd, IB_ACCESS_LOCAL_WRITE); - if (IS_ERR(ctx->mr)) { - ret = PTR_ERR(ctx->mr); - pr_err("Couldn't get tunnel DMA MR (%d)\n", ret); - goto err_pd; - } - if (ctx->has_smi) { ret = create_pv_sqp(ctx, IB_QPT_SMI, create_tun); if (ret) { pr_err("Couldn't create %s QP0 (%d)\n", create_tun ? "tunnel for" : "", ret); - goto err_mr; + goto err_pd; } } @@ -1865,10 +1886,6 @@ err_qp0: ib_destroy_qp(ctx->qp[0].qp); ctx->qp[0].qp = NULL; -err_mr: - ib_dereg_mr(ctx->mr); - ctx->mr = NULL; - err_pd: ib_dealloc_pd(ctx->pd); ctx->pd = NULL; @@ -1905,8 +1922,6 @@ static void destroy_pv_resources(struct mlx4_ib_dev *dev, int slave, int port, ib_destroy_qp(ctx->qp[1].qp); ctx->qp[1].qp = NULL; mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1); - ib_dereg_mr(ctx->mr); - ctx->mr = NULL; ib_dealloc_pd(ctx->pd); ctx->pd = NULL; ib_destroy_cq(ctx->cq); @@ -2039,8 +2054,6 @@ static void mlx4_ib_free_sqp_ctx(struct mlx4_ib_demux_pv_ctx *sqp_ctx) ib_destroy_qp(sqp_ctx->qp[1].qp); sqp_ctx->qp[1].qp = NULL; mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_GSI, 0); - ib_dereg_mr(sqp_ctx->mr); - sqp_ctx->mr = NULL; ib_dealloc_pd(sqp_ctx->pd); sqp_ctx->pd = NULL; ib_destroy_cq(sqp_ctx->cq); diff --git a/kernel/drivers/infiniband/hw/mlx4/main.c b/kernel/drivers/infiniband/hw/mlx4/main.c index cc64400d4..97d6878f9 100644 --- a/kernel/drivers/infiniband/hw/mlx4/main.c +++ b/kernel/drivers/infiniband/hw/mlx4/main.c @@ -45,6 +45,9 @@ #include <rdma/ib_smi.h> #include <rdma/ib_user_verbs.h> #include <rdma/ib_addr.h> +#include <rdma/ib_cache.h> + +#include <net/bonding.h> #include <linux/mlx4/driver.h> #include <linux/mlx4/cmd.h> @@ -74,13 +77,6 @@ static const char mlx4_ib_version[] = DRV_NAME ": Mellanox ConnectX InfiniBand driver v" DRV_VERSION " (" DRV_RELDATE ")\n"; -struct update_gid_work { - struct work_struct work; - union ib_gid gids[128]; - struct mlx4_ib_dev *dev; - int port; -}; - static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init); static struct workqueue_struct *wq; @@ -93,8 +89,6 @@ static void init_query_mad(struct ib_smp *mad) mad->method = IB_MGMT_METHOD_GET; } -static union ib_gid zgid; - static int check_flow_steering_support(struct mlx4_dev *dev) { int eth_num_ports = 0; @@ -131,15 +125,267 @@ static int num_ib_ports(struct mlx4_dev *dev) return ib_ports; } +static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_num) +{ + struct mlx4_ib_dev *ibdev = to_mdev(device); + struct net_device *dev; + + rcu_read_lock(); + dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num); + + if (dev) { + if (mlx4_is_bonded(ibdev->dev)) { + struct net_device *upper = NULL; + + upper = netdev_master_upper_dev_get_rcu(dev); + if (upper) { + struct net_device *active; + + active = bond_option_active_slave_get_rcu(netdev_priv(upper)); + if (active) + dev = active; + } + } + } + if (dev) + dev_hold(dev); + + rcu_read_unlock(); + return dev; +} + +static int mlx4_ib_update_gids(struct gid_entry *gids, + struct mlx4_ib_dev *ibdev, + u8 port_num) +{ + struct mlx4_cmd_mailbox *mailbox; + int err; + struct mlx4_dev *dev = ibdev->dev; + int i; + union ib_gid *gid_tbl; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return -ENOMEM; + + gid_tbl = mailbox->buf; + + for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) + memcpy(&gid_tbl[i], &gids[i].gid, sizeof(union ib_gid)); + + err = mlx4_cmd(dev, mailbox->dma, + MLX4_SET_PORT_GID_TABLE << 8 | port_num, + 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); + if (mlx4_is_bonded(dev)) + err += mlx4_cmd(dev, mailbox->dma, + MLX4_SET_PORT_GID_TABLE << 8 | 2, + 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +static int mlx4_ib_add_gid(struct ib_device *device, + u8 port_num, + unsigned int index, + const union ib_gid *gid, + const struct ib_gid_attr *attr, + void **context) +{ + struct mlx4_ib_dev *ibdev = to_mdev(device); + struct mlx4_ib_iboe *iboe = &ibdev->iboe; + struct mlx4_port_gid_table *port_gid_table; + int free = -1, found = -1; + int ret = 0; + int hw_update = 0; + int i; + struct gid_entry *gids = NULL; + + if (!rdma_cap_roce_gid_table(device, port_num)) + return -EINVAL; + + if (port_num > MLX4_MAX_PORTS) + return -EINVAL; + + if (!context) + return -EINVAL; + + port_gid_table = &iboe->gids[port_num - 1]; + spin_lock_bh(&iboe->lock); + for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) { + if (!memcmp(&port_gid_table->gids[i].gid, gid, sizeof(*gid))) { + found = i; + break; + } + if (free < 0 && !memcmp(&port_gid_table->gids[i].gid, &zgid, sizeof(*gid))) + free = i; /* HW has space */ + } + + if (found < 0) { + if (free < 0) { + ret = -ENOSPC; + } else { + port_gid_table->gids[free].ctx = kmalloc(sizeof(*port_gid_table->gids[free].ctx), GFP_ATOMIC); + if (!port_gid_table->gids[free].ctx) { + ret = -ENOMEM; + } else { + *context = port_gid_table->gids[free].ctx; + memcpy(&port_gid_table->gids[free].gid, gid, sizeof(*gid)); + port_gid_table->gids[free].ctx->real_index = free; + port_gid_table->gids[free].ctx->refcount = 1; + hw_update = 1; + } + } + } else { + struct gid_cache_context *ctx = port_gid_table->gids[found].ctx; + *context = ctx; + ctx->refcount++; + } + if (!ret && hw_update) { + gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC); + if (!gids) { + ret = -ENOMEM; + } else { + for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) + memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid)); + } + } + spin_unlock_bh(&iboe->lock); + + if (!ret && hw_update) { + ret = mlx4_ib_update_gids(gids, ibdev, port_num); + kfree(gids); + } + + return ret; +} + +static int mlx4_ib_del_gid(struct ib_device *device, + u8 port_num, + unsigned int index, + void **context) +{ + struct gid_cache_context *ctx = *context; + struct mlx4_ib_dev *ibdev = to_mdev(device); + struct mlx4_ib_iboe *iboe = &ibdev->iboe; + struct mlx4_port_gid_table *port_gid_table; + int ret = 0; + int hw_update = 0; + struct gid_entry *gids = NULL; + + if (!rdma_cap_roce_gid_table(device, port_num)) + return -EINVAL; + + if (port_num > MLX4_MAX_PORTS) + return -EINVAL; + + port_gid_table = &iboe->gids[port_num - 1]; + spin_lock_bh(&iboe->lock); + if (ctx) { + ctx->refcount--; + if (!ctx->refcount) { + unsigned int real_index = ctx->real_index; + + memcpy(&port_gid_table->gids[real_index].gid, &zgid, sizeof(zgid)); + kfree(port_gid_table->gids[real_index].ctx); + port_gid_table->gids[real_index].ctx = NULL; + hw_update = 1; + } + } + if (!ret && hw_update) { + int i; + + gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC); + if (!gids) { + ret = -ENOMEM; + } else { + for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) + memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid)); + } + } + spin_unlock_bh(&iboe->lock); + + if (!ret && hw_update) { + ret = mlx4_ib_update_gids(gids, ibdev, port_num); + kfree(gids); + } + return ret; +} + +int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev, + u8 port_num, int index) +{ + struct mlx4_ib_iboe *iboe = &ibdev->iboe; + struct gid_cache_context *ctx = NULL; + union ib_gid gid; + struct mlx4_port_gid_table *port_gid_table; + int real_index = -EINVAL; + int i; + int ret; + unsigned long flags; + + if (port_num > MLX4_MAX_PORTS) + return -EINVAL; + + if (mlx4_is_bonded(ibdev->dev)) + port_num = 1; + + if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num)) + return index; + + ret = ib_get_cached_gid(&ibdev->ib_dev, port_num, index, &gid, NULL); + if (ret) + return ret; + + if (!memcmp(&gid, &zgid, sizeof(gid))) + return -EINVAL; + + spin_lock_irqsave(&iboe->lock, flags); + port_gid_table = &iboe->gids[port_num - 1]; + + for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) + if (!memcmp(&port_gid_table->gids[i].gid, &gid, sizeof(gid))) { + ctx = port_gid_table->gids[i].ctx; + break; + } + if (ctx) + real_index = ctx->real_index; + spin_unlock_irqrestore(&iboe->lock, flags); + return real_index; +} + static int mlx4_ib_query_device(struct ib_device *ibdev, - struct ib_device_attr *props) + struct ib_device_attr *props, + struct ib_udata *uhw) { struct mlx4_ib_dev *dev = to_mdev(ibdev); struct ib_smp *in_mad = NULL; struct ib_smp *out_mad = NULL; int err = -ENOMEM; int have_ib_ports; + struct mlx4_uverbs_ex_query_device cmd; + struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0}; + struct mlx4_clock_params clock_params; + + if (uhw->inlen) { + if (uhw->inlen < sizeof(cmd)) + return -EINVAL; + + err = ib_copy_from_udata(&cmd, uhw, sizeof(cmd)); + if (err) + return err; + + if (cmd.comp_mask) + return -EINVAL; + if (cmd.reserved) + return -EINVAL; + } + + resp.response_length = offsetof(typeof(resp), response_length) + + sizeof(resp.response_length); in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); if (!in_mad || !out_mad) @@ -196,6 +442,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; } + props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM; + props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & 0xffffff; props->vendor_part_id = dev->dev->persist->pdev->device; @@ -208,6 +456,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE; props->max_sge = min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg); + props->max_sge_rd = MLX4_MAX_SGE_RD; props->max_cq = dev->dev->quotas.cq; props->max_cqe = dev->dev->caps.max_cqes; props->max_mr = dev->dev->quotas.mpt; @@ -229,7 +478,25 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * props->max_mcast_grp; props->max_map_per_fmr = dev->dev->caps.max_fmr_maps; + props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL; + props->timestamp_mask = 0xFFFFFFFFFFFFULL; + if (!mlx4_is_slave(dev->dev)) + err = mlx4_get_internal_clock_params(dev->dev, &clock_params); + + if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) { + resp.response_length += sizeof(resp.hca_core_clock_offset); + if (!err && !mlx4_is_slave(dev->dev)) { + resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP; + resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE; + } + } + + if (uhw->outlen) { + err = ib_copy_to_udata(uhw, &resp, resp.response_length); + if (err) + goto out; + } out: kfree(in_mad); kfree(out_mad); @@ -375,12 +642,13 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, props->state = IB_PORT_DOWN; props->phys_state = state_to_phys_state(props->state); props->active_mtu = IB_MTU_256; - if (is_bonded) - rtnl_lock(); /* required to get upper dev */ spin_lock_bh(&iboe->lock); ndev = iboe->netdevs[port - 1]; - if (ndev && is_bonded) - ndev = netdev_master_upper_dev_get(ndev); + if (ndev && is_bonded) { + rcu_read_lock(); /* required to get upper dev */ + ndev = netdev_master_upper_dev_get_rcu(ndev); + rcu_read_unlock(); + } if (!ndev) goto out_unlock; @@ -392,8 +660,6 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, props->phys_state = state_to_phys_state(props->state); out_unlock: spin_unlock_bh(&iboe->lock); - if (is_bonded) - rtnl_unlock(); out: mlx4_free_cmd_mailbox(mdev->dev, mailbox); return err; @@ -476,23 +742,27 @@ out: return err; } -static int iboe_query_gid(struct ib_device *ibdev, u8 port, int index, - union ib_gid *gid) -{ - struct mlx4_ib_dev *dev = to_mdev(ibdev); - - *gid = dev->iboe.gid_table[port - 1][index]; - - return 0; -} - static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid) { - if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND) + int ret; + + if (rdma_protocol_ib(ibdev, port)) return __mlx4_ib_query_gid(ibdev, port, index, gid, 0); - else - return iboe_query_gid(ibdev, port, index, gid); + + if (!rdma_protocol_roce(ibdev, port)) + return -ENODEV; + + if (!rdma_cap_roce_gid_table(ibdev, port)) + return -ENODEV; + + ret = ib_get_cached_gid(ibdev, port, index, gid, NULL); + if (ret == -EAGAIN) { + memcpy(gid, &zgid, sizeof(*gid)); + return 0; + } + + return ret; } int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, @@ -653,7 +923,7 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev, resp.cqe_size = dev->dev->caps.cqe_size; } - context = kmalloc(sizeof *context, GFP_KERNEL); + context = kzalloc(sizeof(*context), GFP_KERNEL); if (!context) return ERR_PTR(-ENOMEM); @@ -690,21 +960,143 @@ static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) return 0; } +static void mlx4_ib_vma_open(struct vm_area_struct *area) +{ + /* vma_open is called when a new VMA is created on top of our VMA. + * This is done through either mremap flow or split_vma (usually due + * to mlock, madvise, munmap, etc.). We do not support a clone of the + * vma, as this VMA is strongly hardware related. Therefore we set the + * vm_ops of the newly created/cloned VMA to NULL, to prevent it from + * calling us again and trying to do incorrect actions. We assume that + * the original vma size is exactly a single page that there will be no + * "splitting" operations on. + */ + area->vm_ops = NULL; +} + +static void mlx4_ib_vma_close(struct vm_area_struct *area) +{ + struct mlx4_ib_vma_private_data *mlx4_ib_vma_priv_data; + + /* It's guaranteed that all VMAs opened on a FD are closed before the + * file itself is closed, therefore no sync is needed with the regular + * closing flow. (e.g. mlx4_ib_dealloc_ucontext) However need a sync + * with accessing the vma as part of mlx4_ib_disassociate_ucontext. + * The close operation is usually called under mm->mmap_sem except when + * process is exiting. The exiting case is handled explicitly as part + * of mlx4_ib_disassociate_ucontext. + */ + mlx4_ib_vma_priv_data = (struct mlx4_ib_vma_private_data *) + area->vm_private_data; + + /* set the vma context pointer to null in the mlx4_ib driver's private + * data to protect against a race condition in mlx4_ib_dissassociate_ucontext(). + */ + mlx4_ib_vma_priv_data->vma = NULL; +} + +static const struct vm_operations_struct mlx4_ib_vm_ops = { + .open = mlx4_ib_vma_open, + .close = mlx4_ib_vma_close +}; + +static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) +{ + int i; + int ret = 0; + struct vm_area_struct *vma; + struct mlx4_ib_ucontext *context = to_mucontext(ibcontext); + struct task_struct *owning_process = NULL; + struct mm_struct *owning_mm = NULL; + + owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID); + if (!owning_process) + return; + + owning_mm = get_task_mm(owning_process); + if (!owning_mm) { + pr_info("no mm, disassociate ucontext is pending task termination\n"); + while (1) { + /* make sure that task is dead before returning, it may + * prevent a rare case of module down in parallel to a + * call to mlx4_ib_vma_close. + */ + put_task_struct(owning_process); + msleep(1); + owning_process = get_pid_task(ibcontext->tgid, + PIDTYPE_PID); + if (!owning_process || + owning_process->state == TASK_DEAD) { + pr_info("disassociate ucontext done, task was terminated\n"); + /* in case task was dead need to release the task struct */ + if (owning_process) + put_task_struct(owning_process); + return; + } + } + } + + /* need to protect from a race on closing the vma as part of + * mlx4_ib_vma_close(). + */ + down_read(&owning_mm->mmap_sem); + for (i = 0; i < HW_BAR_COUNT; i++) { + vma = context->hw_bar_info[i].vma; + if (!vma) + continue; + + ret = zap_vma_ptes(context->hw_bar_info[i].vma, + context->hw_bar_info[i].vma->vm_start, + PAGE_SIZE); + if (ret) { + pr_err("Error: zap_vma_ptes failed for index=%d, ret=%d\n", i, ret); + BUG_ON(1); + } + + /* context going to be destroyed, should not access ops any more */ + context->hw_bar_info[i].vma->vm_ops = NULL; + } + + up_read(&owning_mm->mmap_sem); + mmput(owning_mm); + put_task_struct(owning_process); +} + +static void mlx4_ib_set_vma_data(struct vm_area_struct *vma, + struct mlx4_ib_vma_private_data *vma_private_data) +{ + vma_private_data->vma = vma; + vma->vm_private_data = vma_private_data; + vma->vm_ops = &mlx4_ib_vm_ops; +} + static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) { struct mlx4_ib_dev *dev = to_mdev(context->device); + struct mlx4_ib_ucontext *mucontext = to_mucontext(context); if (vma->vm_end - vma->vm_start != PAGE_SIZE) return -EINVAL; if (vma->vm_pgoff == 0) { + /* We prevent double mmaping on same context */ + if (mucontext->hw_bar_info[HW_BAR_DB].vma) + return -EINVAL; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); if (io_remap_pfn_range(vma, vma->vm_start, to_mucontext(context)->uar.pfn, PAGE_SIZE, vma->vm_page_prot)) return -EAGAIN; + + mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_DB]); + } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) { + /* We prevent double mmaping on same context */ + if (mucontext->hw_bar_info[HW_BAR_BF].vma) + return -EINVAL; + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); if (io_remap_pfn_range(vma, vma->vm_start, @@ -712,8 +1104,36 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) dev->dev->caps.num_uars, PAGE_SIZE, vma->vm_page_prot)) return -EAGAIN; - } else + + mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_BF]); + + } else if (vma->vm_pgoff == 3) { + struct mlx4_clock_params params; + int ret; + + /* We prevent double mmaping on same context */ + if (mucontext->hw_bar_info[HW_BAR_CLOCK].vma) + return -EINVAL; + + ret = mlx4_get_internal_clock_params(dev->dev, ¶ms); + + if (ret) + return ret; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (io_remap_pfn_range(vma, vma->vm_start, + (pci_resource_start(dev->dev->persist->pdev, + params.bar) + + params.offset) + >> PAGE_SHIFT, + PAGE_SIZE, vma->vm_page_prot)) + return -EAGAIN; + + mlx4_ib_set_vma_data(vma, + &mucontext->hw_bar_info[HW_BAR_CLOCK]); + } else { return -EINVAL; + } return 0; } @@ -758,6 +1178,7 @@ static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev, struct ib_udata *udata) { struct mlx4_ib_xrcd *xrcd; + struct ib_cq_init_attr cq_attr = {}; int err; if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) @@ -777,7 +1198,8 @@ static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev, goto err2; } - xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, 1, 0); + cq_attr.cqe = 1; + xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr); if (IS_ERR(xrcd->cq)) { err = PTR_ERR(xrcd->cq); goto err3; @@ -827,6 +1249,22 @@ static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid) return 0; } +static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev, + struct mlx4_ib_counters *ctr_table) +{ + struct counter_index *counter, *tmp_count; + + mutex_lock(&ctr_table->mutex); + list_for_each_entry_safe(counter, tmp_count, &ctr_table->counters_list, + list) { + if (counter->allocated) + mlx4_counter_free(ibdev->dev, counter->index); + list_del(&counter->list); + kfree(counter); + } + mutex_unlock(&ctr_table->mutex); +} + int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, union ib_gid *gid) { @@ -1090,7 +1528,7 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0, MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_NATIVE); + MLX4_CMD_WRAPPED); if (ret == -ENOMEM) pr_err("mcg table is full. Fail to register network rule.\n"); else if (ret == -ENXIO) @@ -1107,7 +1545,7 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id) int err; err = mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_NATIVE); + MLX4_CMD_WRAPPED); if (err) pr_err("Fail to detach network rule. registration id = 0x%llx\n", reg_id); @@ -1185,7 +1623,6 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, &mflow->reg_id[i].id); if (err) goto err_create_flow; - i++; if (is_bonded) { /* Application always sees one port so the mirror rule * must be on port #2 @@ -1200,6 +1637,7 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, j++; } + i++; } if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) { @@ -1207,7 +1645,7 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, &mflow->reg_id[i].id); if (err) goto err_create_flow; - i++; + if (is_bonded) { flow_attr->port = 2; err = mlx4_ib_tunnel_steer_add(qp, flow_attr, @@ -1218,6 +1656,7 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, j++; } /* function to create mirror rule */ + i++; } return &mflow->ibflow; @@ -1489,272 +1928,6 @@ static struct device_attribute *mlx4_class_attributes[] = { &dev_attr_board_id }; -static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, - struct net_device *dev) -{ - memcpy(eui, dev->dev_addr, 3); - memcpy(eui + 5, dev->dev_addr + 3, 3); - if (vlan_id < 0x1000) { - eui[3] = vlan_id >> 8; - eui[4] = vlan_id & 0xff; - } else { - eui[3] = 0xff; - eui[4] = 0xfe; - } - eui[0] ^= 2; -} - -static void update_gids_task(struct work_struct *work) -{ - struct update_gid_work *gw = container_of(work, struct update_gid_work, work); - struct mlx4_cmd_mailbox *mailbox; - union ib_gid *gids; - int err; - struct mlx4_dev *dev = gw->dev->dev; - int is_bonded = mlx4_is_bonded(dev); - - if (!gw->dev->ib_active) - return; - - mailbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(mailbox)) { - pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox)); - return; - } - - gids = mailbox->buf; - memcpy(gids, gw->gids, sizeof gw->gids); - - err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port, - MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); - if (err) - pr_warn("set port command failed\n"); - else - if ((gw->port == 1) || !is_bonded) - mlx4_ib_dispatch_event(gw->dev, - is_bonded ? 1 : gw->port, - IB_EVENT_GID_CHANGE); - - mlx4_free_cmd_mailbox(dev, mailbox); - kfree(gw); -} - -static void reset_gids_task(struct work_struct *work) -{ - struct update_gid_work *gw = - container_of(work, struct update_gid_work, work); - struct mlx4_cmd_mailbox *mailbox; - union ib_gid *gids; - int err; - struct mlx4_dev *dev = gw->dev->dev; - - if (!gw->dev->ib_active) - return; - - mailbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(mailbox)) { - pr_warn("reset gid table failed\n"); - goto free; - } - - gids = mailbox->buf; - memcpy(gids, gw->gids, sizeof(gw->gids)); - - if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, gw->port) == - IB_LINK_LAYER_ETHERNET) { - err = mlx4_cmd(dev, mailbox->dma, - MLX4_SET_PORT_GID_TABLE << 8 | gw->port, - MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT, - MLX4_CMD_TIME_CLASS_B, - MLX4_CMD_WRAPPED); - if (err) - pr_warn("set port %d command failed\n", gw->port); - } - - mlx4_free_cmd_mailbox(dev, mailbox); -free: - kfree(gw); -} - -static int update_gid_table(struct mlx4_ib_dev *dev, int port, - union ib_gid *gid, int clear, - int default_gid) -{ - struct update_gid_work *work; - int i; - int need_update = 0; - int free = -1; - int found = -1; - int max_gids; - - if (default_gid) { - free = 0; - } else { - max_gids = dev->dev->caps.gid_table_len[port]; - for (i = 1; i < max_gids; ++i) { - if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid, - sizeof(*gid))) - found = i; - - if (clear) { - if (found >= 0) { - need_update = 1; - dev->iboe.gid_table[port - 1][found] = - zgid; - break; - } - } else { - if (found >= 0) - break; - - if (free < 0 && - !memcmp(&dev->iboe.gid_table[port - 1][i], - &zgid, sizeof(*gid))) - free = i; - } - } - } - - if (found == -1 && !clear && free >= 0) { - dev->iboe.gid_table[port - 1][free] = *gid; - need_update = 1; - } - - if (!need_update) - return 0; - - work = kzalloc(sizeof(*work), GFP_ATOMIC); - if (!work) - return -ENOMEM; - - memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof(work->gids)); - INIT_WORK(&work->work, update_gids_task); - work->port = port; - work->dev = dev; - queue_work(wq, &work->work); - - return 0; -} - -static void mlx4_make_default_gid(struct net_device *dev, union ib_gid *gid) -{ - gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); - mlx4_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev); -} - - -static int reset_gid_table(struct mlx4_ib_dev *dev, u8 port) -{ - struct update_gid_work *work; - - work = kzalloc(sizeof(*work), GFP_ATOMIC); - if (!work) - return -ENOMEM; - - memset(dev->iboe.gid_table[port - 1], 0, sizeof(work->gids)); - memset(work->gids, 0, sizeof(work->gids)); - INIT_WORK(&work->work, reset_gids_task); - work->dev = dev; - work->port = port; - queue_work(wq, &work->work); - return 0; -} - -static int mlx4_ib_addr_event(int event, struct net_device *event_netdev, - struct mlx4_ib_dev *ibdev, union ib_gid *gid) -{ - struct mlx4_ib_iboe *iboe; - int port = 0; - struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ? - rdma_vlan_dev_real_dev(event_netdev) : - event_netdev; - union ib_gid default_gid; - - mlx4_make_default_gid(real_dev, &default_gid); - - if (!memcmp(gid, &default_gid, sizeof(*gid))) - return 0; - - if (event != NETDEV_DOWN && event != NETDEV_UP) - return 0; - - if ((real_dev != event_netdev) && - (event == NETDEV_DOWN) && - rdma_link_local_addr((struct in6_addr *)gid)) - return 0; - - iboe = &ibdev->iboe; - spin_lock_bh(&iboe->lock); - - for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) - if ((netif_is_bond_master(real_dev) && - (real_dev == iboe->masters[port - 1])) || - (!netif_is_bond_master(real_dev) && - (real_dev == iboe->netdevs[port - 1]))) - update_gid_table(ibdev, port, gid, - event == NETDEV_DOWN, 0); - - spin_unlock_bh(&iboe->lock); - return 0; - -} - -static u8 mlx4_ib_get_dev_port(struct net_device *dev, - struct mlx4_ib_dev *ibdev) -{ - u8 port = 0; - struct mlx4_ib_iboe *iboe; - struct net_device *real_dev = rdma_vlan_dev_real_dev(dev) ? - rdma_vlan_dev_real_dev(dev) : dev; - - iboe = &ibdev->iboe; - - for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) - if ((netif_is_bond_master(real_dev) && - (real_dev == iboe->masters[port - 1])) || - (!netif_is_bond_master(real_dev) && - (real_dev == iboe->netdevs[port - 1]))) - break; - - if ((port == 0) || (port > ibdev->dev->caps.num_ports)) - return 0; - else - return port; -} - -static int mlx4_ib_inet_event(struct notifier_block *this, unsigned long event, - void *ptr) -{ - struct mlx4_ib_dev *ibdev; - struct in_ifaddr *ifa = ptr; - union ib_gid gid; - struct net_device *event_netdev = ifa->ifa_dev->dev; - - ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid); - - ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet); - - mlx4_ib_addr_event(event, event_netdev, ibdev, &gid); - return NOTIFY_DONE; -} - -#if IS_ENABLED(CONFIG_IPV6) -static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event, - void *ptr) -{ - struct mlx4_ib_dev *ibdev; - struct inet6_ifaddr *ifa = ptr; - union ib_gid *gid = (union ib_gid *)&ifa->addr; - struct net_device *event_netdev = ifa->idev->dev; - - ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet6); - - mlx4_ib_addr_event(event, event_netdev, ibdev, gid); - return NOTIFY_DONE; -} -#endif - #define MLX4_IB_INVALID_MAC ((u64)-1) static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev, struct net_device *dev, @@ -1813,94 +1986,6 @@ unlock: mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]); } -static void mlx4_ib_get_dev_addr(struct net_device *dev, - struct mlx4_ib_dev *ibdev, u8 port) -{ - struct in_device *in_dev; -#if IS_ENABLED(CONFIG_IPV6) - struct inet6_dev *in6_dev; - union ib_gid *pgid; - struct inet6_ifaddr *ifp; - union ib_gid default_gid; -#endif - union ib_gid gid; - - - if ((port == 0) || (port > ibdev->dev->caps.num_ports)) - return; - - /* IPv4 gids */ - in_dev = in_dev_get(dev); - if (in_dev) { - for_ifa(in_dev) { - /*ifa->ifa_address;*/ - ipv6_addr_set_v4mapped(ifa->ifa_address, - (struct in6_addr *)&gid); - update_gid_table(ibdev, port, &gid, 0, 0); - } - endfor_ifa(in_dev); - in_dev_put(in_dev); - } -#if IS_ENABLED(CONFIG_IPV6) - mlx4_make_default_gid(dev, &default_gid); - /* IPv6 gids */ - in6_dev = in6_dev_get(dev); - if (in6_dev) { - read_lock_bh(&in6_dev->lock); - list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { - pgid = (union ib_gid *)&ifp->addr; - if (!memcmp(pgid, &default_gid, sizeof(*pgid))) - continue; - update_gid_table(ibdev, port, pgid, 0, 0); - } - read_unlock_bh(&in6_dev->lock); - in6_dev_put(in6_dev); - } -#endif -} - -static void mlx4_ib_set_default_gid(struct mlx4_ib_dev *ibdev, - struct net_device *dev, u8 port) -{ - union ib_gid gid; - mlx4_make_default_gid(dev, &gid); - update_gid_table(ibdev, port, &gid, 0, 1); -} - -static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) -{ - struct net_device *dev; - struct mlx4_ib_iboe *iboe = &ibdev->iboe; - int i; - int err = 0; - - for (i = 1; i <= ibdev->num_ports; ++i) { - if (rdma_port_get_link_layer(&ibdev->ib_dev, i) == - IB_LINK_LAYER_ETHERNET) { - err = reset_gid_table(ibdev, i); - if (err) - goto out; - } - } - - read_lock(&dev_base_lock); - spin_lock_bh(&iboe->lock); - - for_each_netdev(&init_net, dev) { - u8 port = mlx4_ib_get_dev_port(dev, ibdev); - /* port will be non-zero only for ETH ports */ - if (port) { - mlx4_ib_set_default_gid(ibdev, dev, port); - mlx4_ib_get_dev_addr(dev, ibdev, port); - } - } - - spin_unlock_bh(&iboe->lock); - read_unlock(&dev_base_lock); -out: - return err; -} - static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, struct net_device *dev, unsigned long event) @@ -1910,81 +1995,22 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, int update_qps_port = -1; int port; + ASSERT_RTNL(); + iboe = &ibdev->iboe; spin_lock_bh(&iboe->lock); mlx4_foreach_ib_transport_port(port, ibdev->dev) { - enum ib_port_state port_state = IB_PORT_NOP; - struct net_device *old_master = iboe->masters[port - 1]; - struct net_device *curr_netdev; - struct net_device *curr_master; iboe->netdevs[port - 1] = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port); - if (iboe->netdevs[port - 1]) - mlx4_ib_set_default_gid(ibdev, - iboe->netdevs[port - 1], port); - curr_netdev = iboe->netdevs[port - 1]; - - if (iboe->netdevs[port - 1] && - netif_is_bond_slave(iboe->netdevs[port - 1])) { - iboe->masters[port - 1] = netdev_master_upper_dev_get( - iboe->netdevs[port - 1]); - } else { - iboe->masters[port - 1] = NULL; - } - curr_master = iboe->masters[port - 1]; if (dev == iboe->netdevs[port - 1] && (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER || event == NETDEV_UP || event == NETDEV_CHANGE)) update_qps_port = port; - if (curr_netdev) { - port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? - IB_PORT_ACTIVE : IB_PORT_DOWN; - mlx4_ib_set_default_gid(ibdev, curr_netdev, port); - if (curr_master) { - /* if using bonding/team and a slave port is down, we - * don't want the bond IP based gids in the table since - * flows that select port by gid may get the down port. - */ - if (port_state == IB_PORT_DOWN && - !mlx4_is_bonded(ibdev->dev)) { - reset_gid_table(ibdev, port); - mlx4_ib_set_default_gid(ibdev, - curr_netdev, - port); - } else { - /* gids from the upper dev (bond/team) - * should appear in port's gid table - */ - mlx4_ib_get_dev_addr(curr_master, - ibdev, port); - } - } - /* if bonding is used it is possible that we add it to - * masters only after IP address is assigned to the - * net bonding interface. - */ - if (curr_master && (old_master != curr_master)) { - reset_gid_table(ibdev, port); - mlx4_ib_set_default_gid(ibdev, - curr_netdev, port); - mlx4_ib_get_dev_addr(curr_master, ibdev, port); - } - - if (!curr_master && (old_master != curr_master)) { - reset_gid_table(ibdev, port); - mlx4_ib_set_default_gid(ibdev, - curr_netdev, port); - mlx4_ib_get_dev_addr(curr_netdev, ibdev, port); - } - } else { - reset_gid_table(ibdev, port); - } } - spin_unlock_bh(&iboe->lock); if (update_qps_port > 0) @@ -2041,77 +2067,75 @@ static void init_pkeys(struct mlx4_ib_dev *ibdev) static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) { - char name[80]; - int eq_per_port = 0; - int added_eqs = 0; - int total_eqs = 0; - int i, j, eq; + int i, j, eq = 0, total_eqs = 0; - /* Legacy mode or comp_pool is not large enough */ - if (dev->caps.comp_pool == 0 || - dev->caps.num_ports > dev->caps.comp_pool) - return; - - eq_per_port = dev->caps.comp_pool / dev->caps.num_ports; - - /* Init eq table */ - added_eqs = 0; - mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) - added_eqs += eq_per_port; - - total_eqs = dev->caps.num_comp_vectors + added_eqs; - - ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL); + ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors, + sizeof(ibdev->eq_table[0]), GFP_KERNEL); if (!ibdev->eq_table) return; - ibdev->eq_added = added_eqs; - - eq = 0; - mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) { - for (j = 0; j < eq_per_port; j++) { - snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s", - i, j, dev->persist->pdev->bus->name); - /* Set IRQ for specific name (per ring) */ - if (mlx4_assign_eq(dev, name, NULL, - &ibdev->eq_table[eq])) { - /* Use legacy (same as mlx4_en driver) */ - pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq); - ibdev->eq_table[eq] = - (eq % dev->caps.num_comp_vectors); - } - eq++; + for (i = 1; i <= dev->caps.num_ports; i++) { + for (j = 0; j < mlx4_get_eqs_per_port(dev, i); + j++, total_eqs++) { + if (i > 1 && mlx4_is_eq_shared(dev, total_eqs)) + continue; + ibdev->eq_table[eq] = total_eqs; + if (!mlx4_assign_eq(dev, i, + &ibdev->eq_table[eq])) + eq++; + else + ibdev->eq_table[eq] = -1; } } - /* Fill the reset of the vector with legacy EQ */ - for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++) - ibdev->eq_table[eq++] = i; + for (i = eq; i < dev->caps.num_comp_vectors; + ibdev->eq_table[i++] = -1) + ; /* Advertise the new number of EQs to clients */ - ibdev->ib_dev.num_comp_vectors = total_eqs; + ibdev->ib_dev.num_comp_vectors = eq; } static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) { int i; + int total_eqs = ibdev->ib_dev.num_comp_vectors; - /* no additional eqs were added */ + /* no eqs were allocated */ if (!ibdev->eq_table) return; /* Reset the advertised EQ number */ - ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; + ibdev->ib_dev.num_comp_vectors = 0; - /* Free only the added eqs */ - for (i = 0; i < ibdev->eq_added; i++) { - /* Don't free legacy eqs if used */ - if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors) - continue; + for (i = 0; i < total_eqs; i++) mlx4_release_eq(dev, ibdev->eq_table[i]); - } kfree(ibdev->eq_table); + ibdev->eq_table = NULL; +} + +static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = mlx4_ib_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; + else + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE; + + immutable->max_mad_size = IB_MGMT_MAD_SIZE; + + return 0; } static void *mlx4_ib_add(struct mlx4_dev *dev) @@ -2123,6 +2147,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) struct mlx4_ib_iboe *iboe; int ib_num_ports = 0; int num_req_counters; + int allocated; + u32 counter_index; + struct counter_index *new_counter_index = NULL; pr_info_once("%s", mlx4_ib_version); @@ -2167,6 +2194,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) 1 : ibdev->num_ports; ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; ibdev->ib_dev.dma_device = &dev->persist->pdev->dev; + ibdev->ib_dev.get_netdev = mlx4_ib_get_netdev; + ibdev->ib_dev.add_gid = mlx4_ib_add_gid; + ibdev->ib_dev.del_gid = mlx4_ib_del_gid; if (dev->caps.userspace_caps) ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION; @@ -2235,12 +2265,13 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr; ibdev->ib_dev.rereg_user_mr = mlx4_ib_rereg_user_mr; ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr; - ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr; - ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list; - ibdev->ib_dev.free_fast_reg_page_list = mlx4_ib_free_fast_reg_page_list; + ibdev->ib_dev.alloc_mr = mlx4_ib_alloc_mr; + ibdev->ib_dev.map_mr_sg = mlx4_ib_map_mr_sg; ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach; ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach; ibdev->ib_dev.process_mad = mlx4_ib_process_mad; + ibdev->ib_dev.get_port_immutable = mlx4_port_immutable; + ibdev->ib_dev.disassociate_ucontext = mlx4_ib_disassociate_ucontext; if (!mlx4_is_slave(ibdev->dev)) { ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc; @@ -2278,6 +2309,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW); } + ibdev->ib_dev.uverbs_ex_cmd_mask |= + (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) | + (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) | + (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP); + mlx4_ib_alloc_eqs(dev, ibdev); spin_lock_init(&iboe->lock); @@ -2285,22 +2321,58 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) if (init_node_data(ibdev)) goto err_map; + for (i = 0; i < ibdev->num_ports; ++i) { + mutex_init(&ibdev->counters_table[i].mutex); + INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list); + } + num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports; for (i = 0; i < num_req_counters; ++i) { mutex_init(&ibdev->qp1_proxy_lock[i]); + allocated = 0; if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == IB_LINK_LAYER_ETHERNET) { - err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]); + err = mlx4_counter_alloc(ibdev->dev, &counter_index); + /* if failed to allocate a new counter, use default */ if (err) - ibdev->counters[i] = -1; - } else { - ibdev->counters[i] = -1; + counter_index = + mlx4_get_default_counter_index(dev, + i + 1); + else + allocated = 1; + } else { /* IB_LINK_LAYER_INFINIBAND use the default counter */ + counter_index = mlx4_get_default_counter_index(dev, + i + 1); } + new_counter_index = kmalloc(sizeof(*new_counter_index), + GFP_KERNEL); + if (!new_counter_index) { + if (allocated) + mlx4_counter_free(ibdev->dev, counter_index); + goto err_counter; + } + new_counter_index->index = counter_index; + new_counter_index->allocated = allocated; + list_add_tail(&new_counter_index->list, + &ibdev->counters_table[i].counters_list); + ibdev->counters_table[i].default_counter = counter_index; + pr_info("counter index %d for port %d allocated %d\n", + counter_index, i + 1, allocated); } if (mlx4_is_bonded(dev)) - for (i = 1; i < ibdev->num_ports ; ++i) - ibdev->counters[i] = ibdev->counters[0]; - + for (i = 1; i < ibdev->num_ports ; ++i) { + new_counter_index = + kmalloc(sizeof(struct counter_index), + GFP_KERNEL); + if (!new_counter_index) + goto err_counter; + new_counter_index->index = counter_index; + new_counter_index->allocated = 0; + list_add_tail(&new_counter_index->list, + &ibdev->counters_table[i].counters_list); + ibdev->counters_table[i].default_counter = + counter_index; + } mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) ib_num_ports++; @@ -2360,26 +2432,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) goto err_notif; } } - if (!iboe->nb_inet.notifier_call) { - iboe->nb_inet.notifier_call = mlx4_ib_inet_event; - err = register_inetaddr_notifier(&iboe->nb_inet); - if (err) { - iboe->nb_inet.notifier_call = NULL; - goto err_notif; - } - } -#if IS_ENABLED(CONFIG_IPV6) - if (!iboe->nb_inet6.notifier_call) { - iboe->nb_inet6.notifier_call = mlx4_ib_inet6_event; - err = register_inet6addr_notifier(&iboe->nb_inet6); - if (err) { - iboe->nb_inet6.notifier_call = NULL; - goto err_notif; - } - } -#endif - if (mlx4_ib_init_gid_table(ibdev)) - goto err_notif; } for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) { @@ -2410,18 +2462,6 @@ err_notif: pr_warn("failure unregistering notifier\n"); ibdev->iboe.nb.notifier_call = NULL; } - if (ibdev->iboe.nb_inet.notifier_call) { - if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet)) - pr_warn("failure unregistering notifier\n"); - ibdev->iboe.nb_inet.notifier_call = NULL; - } -#if IS_ENABLED(CONFIG_IPV6) - if (ibdev->iboe.nb_inet6.notifier_call) { - if (unregister_inet6addr_notifier(&ibdev->iboe.nb_inet6)) - pr_warn("failure unregistering notifier\n"); - ibdev->iboe.nb_inet6.notifier_call = NULL; - } -#endif flush_workqueue(wq); mlx4_ib_close_sriov(ibdev); @@ -2440,9 +2480,8 @@ err_steer_qp_release: mlx4_qp_release_range(dev, ibdev->steer_qpn_base, ibdev->steer_qpn_count); err_counter: - for (; i; --i) - if (ibdev->counters[i - 1] != -1) - mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]); + for (i = 0; i < ibdev->num_ports; ++i) + mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]); err_map: iounmap(ibdev->uar_map); @@ -2545,23 +2584,10 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) kfree(ibdev->ib_uc_qpns_bitmap); } - if (ibdev->iboe.nb_inet.notifier_call) { - if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet)) - pr_warn("failure unregistering notifier\n"); - ibdev->iboe.nb_inet.notifier_call = NULL; - } -#if IS_ENABLED(CONFIG_IPV6) - if (ibdev->iboe.nb_inet6.notifier_call) { - if (unregister_inet6addr_notifier(&ibdev->iboe.nb_inet6)) - pr_warn("failure unregistering notifier\n"); - ibdev->iboe.nb_inet6.notifier_call = NULL; - } -#endif - iounmap(ibdev->uar_map); for (p = 0; p < ibdev->num_ports; ++p) - if (ibdev->counters[p] != -1) - mlx4_counter_free(ibdev->dev, ibdev->counters[p]); + mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]); + mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB) mlx4_CLOSE_PORT(dev, p); @@ -2592,31 +2618,33 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init) dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC); if (!dm) { pr_err("failed to allocate memory for tunneling qp update\n"); - goto out; + return; } for (i = 0; i < ports; i++) { dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC); if (!dm[i]) { pr_err("failed to allocate memory for tunneling qp update work struct\n"); - for (i = 0; i < dev->caps.num_ports; i++) { - if (dm[i]) - kfree(dm[i]); - } + while (--i >= 0) + kfree(dm[i]); goto out; } - } - /* initialize or tear down tunnel QPs for the slave */ - for (i = 0; i < ports; i++) { INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work); dm[i]->port = first_port + i + 1; dm[i]->slave = slave; dm[i]->do_init = do_init; dm[i]->dev = ibdev; - spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags); - if (!ibdev->sriov.is_going_down) + } + /* initialize or tear down tunnel QPs for the slave */ + spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags); + if (!ibdev->sriov.is_going_down) { + for (i = 0; i < ports; i++) queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work); spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); + } else { + spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); + for (i = 0; i < ports; i++) + kfree(dm[i]); } out: kfree(dm); diff --git a/kernel/drivers/infiniband/hw/mlx4/mcg.c b/kernel/drivers/infiniband/hw/mlx4/mcg.c index a0559a8af..99451d887 100644 --- a/kernel/drivers/infiniband/hw/mlx4/mcg.c +++ b/kernel/drivers/infiniband/hw/mlx4/mcg.c @@ -51,6 +51,10 @@ pr_warn("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\ (group)->name, group->demux->port, ## arg) +#define mcg_debug_group(group, format, arg...) \ + pr_debug("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\ + (group)->name, (group)->demux->port, ## arg) + #define mcg_error_group(group, format, arg...) \ pr_err(" %16s: " format, (group)->name, ## arg) @@ -218,7 +222,7 @@ static int send_mad_to_wire(struct mlx4_ib_demux_ctx *ctx, struct ib_mad *mad) spin_unlock_irqrestore(&dev->sm_lock, flags); return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev), ctx->port, IB_QPT_GSI, 0, 1, IB_QP1_QKEY, - &ah_attr, NULL, mad); + &ah_attr, NULL, 0xffff, mad); } static int send_mad_to_slave(int slave, struct mlx4_ib_demux_ctx *ctx, @@ -962,8 +966,8 @@ int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port, mutex_lock(&group->lock); if (group->func[slave].num_pend_reqs > MAX_PEND_REQS_PER_FUNC) { mutex_unlock(&group->lock); - mcg_warn_group(group, "Port %d, Func %d has too many pending requests (%d), dropping\n", - port, slave, MAX_PEND_REQS_PER_FUNC); + mcg_debug_group(group, "Port %d, Func %d has too many pending requests (%d), dropping\n", + port, slave, MAX_PEND_REQS_PER_FUNC); release_group(group, 0); kfree(req); return -ENOMEM; diff --git a/kernel/drivers/infiniband/hw/mlx4/mlx4_ib.h b/kernel/drivers/infiniband/hw/mlx4/mlx4_ib.h index fce393437..1caa11eda 100644 --- a/kernel/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/kernel/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -70,11 +70,24 @@ extern int mlx4_ib_sm_guid_assign; #define MLX4_IB_UC_STEER_QPN_ALIGN 1 #define MLX4_IB_UC_MAX_NUM_QPS 256 + +enum hw_bar_type { + HW_BAR_BF, + HW_BAR_DB, + HW_BAR_CLOCK, + HW_BAR_COUNT +}; + +struct mlx4_ib_vma_private_data { + struct vm_area_struct *vma; +}; + struct mlx4_ib_ucontext { struct ib_ucontext ibucontext; struct mlx4_uar uar; struct list_head db_page_list; struct mutex db_page_mutex; + struct mlx4_ib_vma_private_data hw_bar_info[HW_BAR_COUNT]; }; struct mlx4_ib_pd { @@ -110,15 +123,23 @@ struct mlx4_ib_cq { struct mutex resize_mutex; struct ib_umem *umem; struct ib_umem *resize_umem; + int create_flags; /* List of qps that it serves.*/ struct list_head send_qp_list; struct list_head recv_qp_list; }; +#define MLX4_MR_PAGES_ALIGN 0x40 + struct mlx4_ib_mr { struct ib_mr ibmr; + __be64 *pages; + dma_addr_t page_map; + u32 npages; + u32 max_pages; struct mlx4_mr mmr; struct ib_umem *umem; + void *pages_alloc; }; struct mlx4_ib_mw { @@ -126,12 +147,6 @@ struct mlx4_ib_mw { struct mlx4_mw mmw; }; -struct mlx4_ib_fast_reg_page_list { - struct ib_fast_reg_page_list ibfrpl; - __be64 *mapped_page_list; - dma_addr_t map; -}; - struct mlx4_ib_fmr { struct ib_fmr ibfmr; struct mlx4_fmr mfmr; @@ -306,6 +321,7 @@ struct mlx4_ib_qp { struct list_head qps_list; struct list_head cq_recv_list; struct list_head cq_send_list; + struct counter_index *counter_index; }; struct mlx4_ib_srq { @@ -414,7 +430,6 @@ struct mlx4_ib_demux_pv_ctx { struct ib_device *ib_dev; struct ib_cq *cq; struct ib_pd *pd; - struct ib_mr *mr; struct work_struct work; struct workqueue_struct *wq; struct mlx4_ib_demux_pv_qp qp[2]; @@ -456,15 +471,26 @@ struct mlx4_ib_sriov { struct idr pv_id_table; }; +struct gid_cache_context { + int real_index; + int refcount; +}; + +struct gid_entry { + union ib_gid gid; + struct gid_cache_context *ctx; +}; + +struct mlx4_port_gid_table { + struct gid_entry gids[MLX4_MAX_PORT_GIDS]; +}; + struct mlx4_ib_iboe { spinlock_t lock; struct net_device *netdevs[MLX4_MAX_PORTS]; - struct net_device *masters[MLX4_MAX_PORTS]; atomic64_t mac[MLX4_MAX_PORTS]; struct notifier_block nb; - struct notifier_block nb_inet; - struct notifier_block nb_inet6; - union ib_gid gid_table[MLX4_MAX_PORTS][128]; + struct mlx4_port_gid_table gids[MLX4_MAX_PORTS]; }; struct pkey_mgt { @@ -503,6 +529,18 @@ struct mlx4_ib_iov_port { struct mlx4_ib_iov_sysfs_attr mcg_dentry; }; +struct counter_index { + struct list_head list; + u32 index; + u8 allocated; +}; + +struct mlx4_ib_counters { + struct list_head counters_list; + struct mutex mutex; /* mutex for accessing counters list */ + u32 default_counter; +}; + struct mlx4_ib_dev { struct ib_device ib_dev; struct mlx4_dev *dev; @@ -521,9 +559,8 @@ struct mlx4_ib_dev { struct mutex cap_mask_mutex; bool ib_active; struct mlx4_ib_iboe iboe; - int counters[MLX4_MAX_PORTS]; + struct mlx4_ib_counters counters_table[MLX4_MAX_PORTS]; int *eq_table; - int eq_added; struct kobject *iov_parent; struct kobject *ports_parent; struct kobject *dev_ports_parent[MLX4_MFUNC_MAX]; @@ -555,6 +592,21 @@ struct mlx4_ib_qp_tunnel_init_attr { u8 port; }; +struct mlx4_uverbs_ex_query_device { + __u32 comp_mask; + __u32 reserved; +}; + +enum query_device_resp_mask { + QUERY_DEVICE_RESP_MASK_TIMESTAMP = 1UL << 0, +}; + +struct mlx4_uverbs_ex_query_device_resp { + __u32 comp_mask; + __u32 response_length; + __u64 hca_core_clock_offset; +}; + static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev) { return container_of(ibdev, struct mlx4_ib_dev, ib_dev); @@ -595,11 +647,6 @@ static inline struct mlx4_ib_mw *to_mmw(struct ib_mw *ibmw) return container_of(ibmw, struct mlx4_ib_mw, ibmw); } -static inline struct mlx4_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl) -{ - return container_of(ibfrpl, struct mlx4_ib_fast_reg_page_list, ibfrpl); -} - static inline struct mlx4_ib_fmr *to_mfmr(struct ib_fmr *ibfmr) { return container_of(ibfmr, struct mlx4_ib_fmr, ibfmr); @@ -660,15 +707,16 @@ struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type); int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind); int mlx4_ib_dealloc_mw(struct ib_mw *mw); -struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd, - int max_page_list_len); -struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev, - int page_list_len); -void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list); - +struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, + enum ib_mr_type mr_type, + u32 max_num_sg); +int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, + struct scatterlist *sg, + int sg_nents); int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata); -struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector, +struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, struct ib_ucontext *context, struct ib_udata *udata); int mlx4_ib_destroy_cq(struct ib_cq *cq); @@ -706,11 +754,13 @@ int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr); int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags, - int port, struct ib_wc *in_wc, struct ib_grh *in_grh, - void *in_mad, void *response_mad); + int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const void *in_mad, void *response_mad); int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, - struct ib_wc *in_wc, struct ib_grh *in_grh, - struct ib_mad *in_mad, struct ib_mad *out_mad); + const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const struct ib_mad_hdr *in, size_t in_mad_size, + struct ib_mad_hdr *out, size_t *out_mad_size, + u16 *out_mad_pkey_index); int mlx4_ib_mad_init(struct mlx4_ib_dev *dev); void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev); @@ -766,7 +816,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn, u32 qkey, struct ib_ah_attr *attr, u8 *s_mac, - struct ib_mad *mad); + u16 vlan_id, struct ib_mad *mad); __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx); @@ -815,5 +865,7 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length, u64 virt_addr, int mr_access_flags, struct ib_pd *pd, struct ib_udata *udata); +int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev, + u8 port_num, int index); #endif /* MLX4_IB_H */ diff --git a/kernel/drivers/infiniband/hw/mlx4/mr.c b/kernel/drivers/infiniband/hw/mlx4/mr.c index e0d271782..4d1e1c632 100644 --- a/kernel/drivers/infiniband/hw/mlx4/mr.c +++ b/kernel/drivers/infiniband/hw/mlx4/mr.c @@ -59,7 +59,7 @@ struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc) struct mlx4_ib_mr *mr; int err; - mr = kmalloc(sizeof *mr, GFP_KERNEL); + mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); @@ -140,7 +140,7 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, int err; int n; - mr = kmalloc(sizeof *mr, GFP_KERNEL); + mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); @@ -271,11 +271,59 @@ release_mpt_entry: return err; } +static int +mlx4_alloc_priv_pages(struct ib_device *device, + struct mlx4_ib_mr *mr, + int max_pages) +{ + int size = max_pages * sizeof(u64); + int add_size; + int ret; + + add_size = max_t(int, MLX4_MR_PAGES_ALIGN - ARCH_KMALLOC_MINALIGN, 0); + + mr->pages_alloc = kzalloc(size + add_size, GFP_KERNEL); + if (!mr->pages_alloc) + return -ENOMEM; + + mr->pages = PTR_ALIGN(mr->pages_alloc, MLX4_MR_PAGES_ALIGN); + + mr->page_map = dma_map_single(device->dma_device, mr->pages, + size, DMA_TO_DEVICE); + + if (dma_mapping_error(device->dma_device, mr->page_map)) { + ret = -ENOMEM; + goto err; + } + + return 0; +err: + kfree(mr->pages_alloc); + + return ret; +} + +static void +mlx4_free_priv_pages(struct mlx4_ib_mr *mr) +{ + if (mr->pages) { + struct ib_device *device = mr->ibmr.device; + int size = mr->max_pages * sizeof(u64); + + dma_unmap_single(device->dma_device, mr->page_map, + size, DMA_TO_DEVICE); + kfree(mr->pages_alloc); + mr->pages = NULL; + } +} + int mlx4_ib_dereg_mr(struct ib_mr *ibmr) { struct mlx4_ib_mr *mr = to_mmr(ibmr); int ret; + mlx4_free_priv_pages(mr); + ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr); if (ret) return ret; @@ -321,21 +369,21 @@ err_free: int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind) { - struct ib_send_wr wr; + struct ib_bind_mw_wr wr; struct ib_send_wr *bad_wr; int ret; memset(&wr, 0, sizeof(wr)); - wr.opcode = IB_WR_BIND_MW; - wr.wr_id = mw_bind->wr_id; - wr.send_flags = mw_bind->send_flags; - wr.wr.bind_mw.mw = mw; - wr.wr.bind_mw.bind_info = mw_bind->bind_info; - wr.wr.bind_mw.rkey = ib_inc_rkey(mw->rkey); - - ret = mlx4_ib_post_send(qp, &wr, &bad_wr); + wr.wr.opcode = IB_WR_BIND_MW; + wr.wr.wr_id = mw_bind->wr_id; + wr.wr.send_flags = mw_bind->send_flags; + wr.mw = mw; + wr.bind_info = mw_bind->bind_info; + wr.rkey = ib_inc_rkey(mw->rkey); + + ret = mlx4_ib_post_send(qp, &wr.wr, &bad_wr); if (!ret) - mw->rkey = wr.wr.bind_mw.rkey; + mw->rkey = wr.rkey; return ret; } @@ -350,87 +398,51 @@ int mlx4_ib_dealloc_mw(struct ib_mw *ibmw) return 0; } -struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd, - int max_page_list_len) +struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, + enum ib_mr_type mr_type, + u32 max_num_sg) { struct mlx4_ib_dev *dev = to_mdev(pd->device); struct mlx4_ib_mr *mr; int err; - mr = kmalloc(sizeof *mr, GFP_KERNEL); + if (mr_type != IB_MR_TYPE_MEM_REG || + max_num_sg > MLX4_MAX_FAST_REG_PAGES) + return ERR_PTR(-EINVAL); + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, 0, 0, 0, - max_page_list_len, 0, &mr->mmr); + max_num_sg, 0, &mr->mmr); if (err) goto err_free; + err = mlx4_alloc_priv_pages(pd->device, mr, max_num_sg); + if (err) + goto err_free_mr; + + mr->max_pages = max_num_sg; + err = mlx4_mr_enable(dev->dev, &mr->mmr); if (err) - goto err_mr; + goto err_free_pl; mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; mr->umem = NULL; return &mr->ibmr; -err_mr: +err_free_pl: + mlx4_free_priv_pages(mr); +err_free_mr: (void) mlx4_mr_free(dev->dev, &mr->mmr); - err_free: kfree(mr); return ERR_PTR(err); } -struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev, - int page_list_len) -{ - struct mlx4_ib_dev *dev = to_mdev(ibdev); - struct mlx4_ib_fast_reg_page_list *mfrpl; - int size = page_list_len * sizeof (u64); - - if (page_list_len > MLX4_MAX_FAST_REG_PAGES) - return ERR_PTR(-EINVAL); - - mfrpl = kmalloc(sizeof *mfrpl, GFP_KERNEL); - if (!mfrpl) - return ERR_PTR(-ENOMEM); - - mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL); - if (!mfrpl->ibfrpl.page_list) - goto err_free; - - mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->persist-> - pdev->dev, - size, &mfrpl->map, - GFP_KERNEL); - if (!mfrpl->mapped_page_list) - goto err_free; - - WARN_ON(mfrpl->map & 0x3f); - - return &mfrpl->ibfrpl; - -err_free: - kfree(mfrpl->ibfrpl.page_list); - kfree(mfrpl); - return ERR_PTR(-ENOMEM); -} - -void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list) -{ - struct mlx4_ib_dev *dev = to_mdev(page_list->device); - struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list); - int size = page_list->max_page_list_len * sizeof (u64); - - dma_free_coherent(&dev->dev->persist->pdev->dev, size, - mfrpl->mapped_page_list, - mfrpl->map); - kfree(mfrpl->ibfrpl.page_list); - kfree(mfrpl); -} - struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc, struct ib_fmr_attr *fmr_attr) { @@ -523,3 +535,37 @@ int mlx4_ib_fmr_dealloc(struct ib_fmr *ibfmr) return err; } + +static int mlx4_set_page(struct ib_mr *ibmr, u64 addr) +{ + struct mlx4_ib_mr *mr = to_mmr(ibmr); + + if (unlikely(mr->npages == mr->max_pages)) + return -ENOMEM; + + mr->pages[mr->npages++] = cpu_to_be64(addr | MLX4_MTT_FLAG_PRESENT); + + return 0; +} + +int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, + struct scatterlist *sg, + int sg_nents) +{ + struct mlx4_ib_mr *mr = to_mmr(ibmr); + int rc; + + mr->npages = 0; + + ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map, + sizeof(u64) * mr->max_pages, + DMA_TO_DEVICE); + + rc = ib_sg_to_pages(ibmr, sg, sg_nents, mlx4_set_page); + + ib_dma_sync_single_for_device(ibmr->device, mr->page_map, + sizeof(u64) * mr->max_pages, + DMA_TO_DEVICE); + + return rc; +} diff --git a/kernel/drivers/infiniband/hw/mlx4/qp.c b/kernel/drivers/infiniband/hw/mlx4/qp.c index 02fc91c68..13eaaf452 100644 --- a/kernel/drivers/infiniband/hw/mlx4/qp.c +++ b/kernel/drivers/infiniband/hw/mlx4/qp.c @@ -34,6 +34,7 @@ #include <linux/log2.h> #include <linux/slab.h> #include <linux/netdevice.h> +#include <linux/vmalloc.h> #include <rdma/ib_cache.h> #include <rdma/ib_pack.h> @@ -111,7 +112,7 @@ static const __be32 mlx4_ib_opcode[] = { [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA), [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL), [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL), - [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR), + [IB_WR_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR), [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS), [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA), [IB_WR_BIND_MW] = cpu_to_be32(MLX4_OPCODE_BIND_MW), @@ -617,6 +618,18 @@ static int qp0_enabled_vf(struct mlx4_dev *dev, int qpn) return 0; } +static void mlx4_ib_free_qp_counter(struct mlx4_ib_dev *dev, + struct mlx4_ib_qp *qp) +{ + mutex_lock(&dev->counters_table[qp->port - 1].mutex); + mlx4_counter_free(dev->dev, qp->counter_index->index); + list_del(&qp->counter_index->list); + mutex_unlock(&dev->counters_table[qp->port - 1].mutex); + + kfree(qp->counter_index); + qp->counter_index = NULL; +} + static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata, int sqpn, struct mlx4_ib_qp **caller_qp, @@ -746,9 +759,6 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, } else { qp->sq_no_prefetch = 0; - if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) - qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; - if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) qp->flags |= MLX4_IB_QP_LSO; @@ -786,8 +796,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, if (err) goto err_mtt; - qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof (u64), gfp); - qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof (u64), gfp); + qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(u64), gfp); + if (!qp->sq.wrid) + qp->sq.wrid = __vmalloc(qp->sq.wqe_cnt * sizeof(u64), + gfp, PAGE_KERNEL); + qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(u64), gfp); + if (!qp->rq.wrid) + qp->rq.wrid = __vmalloc(qp->rq.wqe_cnt * sizeof(u64), + gfp, PAGE_KERNEL); if (!qp->sq.wrid || !qp->rq.wrid) { err = -ENOMEM; goto err_wrid; @@ -822,6 +838,9 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, goto err_proxy; } + if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) + qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; + err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp, gfp); if (err) goto err_qpn; @@ -874,8 +893,8 @@ err_wrid: if (qp_has_rq(init_attr)) mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db); } else { - kfree(qp->sq.wrid); - kfree(qp->rq.wrid); + kvfree(qp->sq.wrid); + kvfree(qp->rq.wrid); } err_mtt: @@ -1050,8 +1069,8 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, &qp->db); ib_umem_release(qp->umem); } else { - kfree(qp->sq.wrid); - kfree(qp->rq.wrid); + kvfree(qp->sq.wrid); + kvfree(qp->rq.wrid); if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) free_proxy_bufs(&dev->ib_dev, qp); @@ -1086,6 +1105,7 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, { struct mlx4_ib_qp *qp = NULL; int err; + int sup_u_create_flags = MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; u16 xrcdn = 0; gfp_t gfp; @@ -1109,8 +1129,10 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, } if (init_attr->create_flags && - (udata || - ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP | MLX4_IB_QP_CREATE_USE_GFP_NOIO)) && + ((udata && init_attr->create_flags & ~(sup_u_create_flags)) || + ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP | + MLX4_IB_QP_CREATE_USE_GFP_NOIO | + MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK)) && init_attr->qp_type != IB_QPT_UD) || ((init_attr->create_flags & MLX4_IB_SRIOV_SQP) && init_attr->qp_type > IB_QPT_GSI))) @@ -1189,6 +1211,9 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp) mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]); } + if (mqp->counter_index) + mlx4_ib_free_qp_counter(dev, mqp); + pd = get_pd(mqp); destroy_qp_common(dev, mqp, !!pd->ibpd.uobject); @@ -1292,14 +1317,18 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah, path->static_rate = 0; if (ah->ah_flags & IB_AH_GRH) { - if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len[port]) { + int real_sgid_index = mlx4_ib_gid_index_to_real_index(dev, + port, + ah->grh.sgid_index); + + if (real_sgid_index >= dev->dev->caps.gid_table_len[port]) { pr_err("sgid_index (%u) too large. max is %d\n", - ah->grh.sgid_index, dev->dev->caps.gid_table_len[port] - 1); + real_sgid_index, dev->dev->caps.gid_table_len[port] - 1); return -1; } path->grh_mylmc |= 1 << 7; - path->mgid_index = ah->grh.sgid_index; + path->mgid_index = real_sgid_index; path->hop_limit = ah->grh.hop_limit; path->tclass_flowlabel = cpu_to_be32((ah->grh.traffic_class << 20) | @@ -1387,11 +1416,12 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah, static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp, enum ib_qp_attr_mask qp_attr_mask, struct mlx4_ib_qp *mqp, - struct mlx4_qp_path *path, u8 port) + struct mlx4_qp_path *path, u8 port, + u16 vlan_id, u8 *smac) { return _mlx4_set_path(dev, &qp->ah_attr, - mlx4_mac_to_u64((u8 *)qp->smac), - (qp_attr_mask & IB_QP_VID) ? qp->vlan_id : 0xffff, + mlx4_mac_to_u64(smac), + vlan_id, path, &mqp->pri, port); } @@ -1402,9 +1432,8 @@ static int mlx4_set_alt_path(struct mlx4_ib_dev *dev, struct mlx4_qp_path *path, u8 port) { return _mlx4_set_path(dev, &qp->alt_ah_attr, - mlx4_mac_to_u64((u8 *)qp->alt_smac), - (qp_attr_mask & IB_QP_ALT_VID) ? - qp->alt_vlan_id : 0xffff, + 0, + 0xffff, path, &mqp->alt, port); } @@ -1420,7 +1449,8 @@ static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) } } -static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, u8 *smac, +static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, + struct mlx4_ib_qp *qp, struct mlx4_qp_context *context) { u64 u64_mac; @@ -1443,6 +1473,40 @@ static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp * return 0; } +static int create_qp_lb_counter(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) +{ + struct counter_index *new_counter_index; + int err; + u32 tmp_idx; + + if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) != + IB_LINK_LAYER_ETHERNET || + !(qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) || + !(dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_LB_SRC_CHK)) + return 0; + + err = mlx4_counter_alloc(dev->dev, &tmp_idx); + if (err) + return err; + + new_counter_index = kmalloc(sizeof(*new_counter_index), GFP_KERNEL); + if (!new_counter_index) { + mlx4_counter_free(dev->dev, tmp_idx); + return -ENOMEM; + } + + new_counter_index->index = tmp_idx; + new_counter_index->allocated = 1; + qp->counter_index = new_counter_index; + + mutex_lock(&dev->counters_table[qp->port - 1].mutex); + list_add_tail(&new_counter_index->list, + &dev->counters_table[qp->port - 1].counters_list); + mutex_unlock(&dev->counters_table[qp->port - 1].mutex); + + return 0; +} + static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, enum ib_qp_state cur_state, enum ib_qp_state new_state) @@ -1456,6 +1520,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, int sqd_event; int steer_qp = 0; int err = -EINVAL; + int counter_index; /* APM is not supported under RoCE */ if (attr_mask & IB_QP_ALT_PATH && @@ -1515,6 +1580,9 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3; context->sq_size_stride |= qp->sq.wqe_shift - 4; + if (new_state == IB_QPS_RESET && qp->counter_index) + mlx4_ib_free_qp_counter(dev, qp); + if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { context->sq_size_stride |= !!qp->sq_no_prefetch << 7; context->xrcd = cpu_to_be32((u32) qp->xrcdn); @@ -1539,12 +1607,27 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, } if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { - if (dev->counters[qp->port - 1] != -1) { - context->pri_path.counter_index = - dev->counters[qp->port - 1]; + err = create_qp_lb_counter(dev, qp); + if (err) + goto out; + + counter_index = + dev->counters_table[qp->port - 1].default_counter; + if (qp->counter_index) + counter_index = qp->counter_index->index; + + if (counter_index != -1) { + context->pri_path.counter_index = counter_index; optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX; + if (qp->counter_index) { + context->pri_path.fl |= + MLX4_FL_ETH_SRC_CHECK_MC_LB; + context->pri_path.vlan_control |= + MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER; + } } else - context->pri_path.counter_index = 0xff; + context->pri_path.counter_index = + MLX4_SINK_COUNTER_INDEX(dev->dev); if (qp->flags & MLX4_IB_QP_NETIF) { mlx4_ib_steer_qp_reg(dev, qp, 1); @@ -1560,9 +1643,33 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, } if (attr_mask & IB_QP_AV) { + u8 port_num = mlx4_is_bonded(to_mdev(ibqp->device)->dev) ? 1 : + attr_mask & IB_QP_PORT ? attr->port_num : qp->port; + union ib_gid gid; + struct ib_gid_attr gid_attr; + u16 vlan = 0xffff; + u8 smac[ETH_ALEN]; + int status = 0; + + if (rdma_cap_eth_ah(&dev->ib_dev, port_num) && + attr->ah_attr.ah_flags & IB_AH_GRH) { + int index = attr->ah_attr.grh.sgid_index; + + status = ib_get_cached_gid(ibqp->device, port_num, + index, &gid, &gid_attr); + if (!status && !memcmp(&gid, &zgid, sizeof(gid))) + status = -ENOENT; + if (!status && gid_attr.ndev) { + vlan = rdma_vlan_dev_vlan_id(gid_attr.ndev); + memcpy(smac, gid_attr.ndev->dev_addr, ETH_ALEN); + dev_put(gid_attr.ndev); + } + } + if (status) + goto out; + if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path, - attr_mask & IB_QP_PORT ? - attr->port_num : qp->port)) + port_num, vlan, smac)) goto out; optpar |= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | @@ -1699,7 +1806,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_UD || qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI || qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) { - err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context); + err = handle_eth_ud_smac_index(dev, qp, context); if (err) { err = -EINVAL; goto out; @@ -1843,6 +1950,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, } } out: + if (err && qp->counter_index) + mlx4_ib_free_qp_counter(dev, qp); if (err && steer_qp) mlx4_ib_steer_qp_reg(dev, qp, 0); kfree(context); @@ -2031,14 +2140,14 @@ static int vf_get_qp0_qkey(struct mlx4_dev *dev, int qpn, u32 *qkey) } static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, - struct ib_send_wr *wr, + struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len) { struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device); struct ib_device *ib_dev = &mdev->ib_dev; struct mlx4_wqe_mlx_seg *mlx = wqe; struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; - struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); + struct mlx4_ib_ah *ah = to_mah(wr->ah); u16 pkey; u32 qkey; int send_size; @@ -2046,13 +2155,13 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, int spc; int i; - if (wr->opcode != IB_WR_SEND) + if (wr->wr.opcode != IB_WR_SEND) return -EINVAL; send_size = 0; - for (i = 0; i < wr->num_sge; ++i) - send_size += wr->sg_list[i].length; + for (i = 0; i < wr->wr.num_sge; ++i) + send_size += wr->wr.sg_list[i].length; /* for proxy-qp0 sends, need to add in size of tunnel header */ /* for tunnel-qp0 sends, tunnel header is already in s/g list */ @@ -2077,11 +2186,11 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, mlx->rlid = sqp->ud_header.lrh.destination_lid; sqp->ud_header.lrh.virtual_lane = 0; - sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); + sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey); sqp->ud_header.bth.pkey = cpu_to_be16(pkey); if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER) - sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); + sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); else sqp->ud_header.bth.destination_qpn = cpu_to_be32(mdev->dev->caps.qp0_tunnel[sqp->qp.port - 1]); @@ -2153,14 +2262,14 @@ static void mlx4_u64_to_smac(u8 *dst_mac, u64 src_mac) } } -static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, +static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len) { struct ib_device *ib_dev = sqp->qp.ibqp.device; struct mlx4_wqe_mlx_seg *mlx = wqe; struct mlx4_wqe_ctrl_seg *ctrl = wqe; struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; - struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); + struct mlx4_ib_ah *ah = to_mah(wr->ah); union ib_gid sgid; u16 pkey; int send_size; @@ -2174,8 +2283,8 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, bool is_grh; send_size = 0; - for (i = 0; i < wr->num_sge; ++i) - send_size += wr->sg_list[i].length; + for (i = 0; i < wr->wr.num_sge; ++i) + send_size += wr->wr.sg_list[i].length; is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET; is_grh = mlx4_ib_ah_grh_present(ah); @@ -2192,7 +2301,10 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, } else { err = ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.ib.port_pd) >> 24, - ah->av.ib.gid_index, &sgid); + ah->av.ib.gid_index, &sgid, + NULL); + if (!err && !memcmp(&sgid, &zgid, sizeof(sgid))) + err = -ENOENT; if (err) return err; } @@ -2234,7 +2346,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.ib.port_pd) >> 24, ah->av.ib.gid_index, - &sqp->ud_header.grh.source_gid); + &sqp->ud_header.grh.source_gid, NULL); } memcpy(sqp->ud_header.grh.destination_gid.raw, ah->av.ib.dgid, 16); @@ -2252,7 +2364,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, mlx->rlid = sqp->ud_header.lrh.destination_lid; } - switch (wr->opcode) { + switch (wr->wr.opcode) { case IB_WR_SEND: sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; sqp->ud_header.immediate_present = 0; @@ -2260,7 +2372,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, case IB_WR_SEND_WITH_IMM: sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; sqp->ud_header.immediate_present = 1; - sqp->ud_header.immediate_data = wr->ex.imm_data; + sqp->ud_header.immediate_data = wr->wr.ex.imm_data; break; default: return -EINVAL; @@ -2303,16 +2415,16 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; } - sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); + sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); if (!sqp->qp.ibqp.qp_num) ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey); else - ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->wr.ud.pkey_index, &pkey); + ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, &pkey); sqp->ud_header.bth.pkey = cpu_to_be16(pkey); - sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); + sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); - sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? - sqp->qkey : wr->wr.ud.remote_qkey); + sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ? + sqp->qkey : wr->remote_qkey); sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf); @@ -2400,43 +2512,39 @@ static __be32 convert_access(int acc) cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ); } -static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, struct ib_send_wr *wr) +static void set_reg_seg(struct mlx4_wqe_fmr_seg *fseg, + struct ib_reg_wr *wr) { - struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list); - int i; - - for (i = 0; i < wr->wr.fast_reg.page_list_len; ++i) - mfrpl->mapped_page_list[i] = - cpu_to_be64(wr->wr.fast_reg.page_list->page_list[i] | - MLX4_MTT_FLAG_PRESENT); + struct mlx4_ib_mr *mr = to_mmr(wr->mr); - fseg->flags = convert_access(wr->wr.fast_reg.access_flags); - fseg->mem_key = cpu_to_be32(wr->wr.fast_reg.rkey); - fseg->buf_list = cpu_to_be64(mfrpl->map); - fseg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start); - fseg->reg_len = cpu_to_be64(wr->wr.fast_reg.length); + fseg->flags = convert_access(wr->access); + fseg->mem_key = cpu_to_be32(wr->key); + fseg->buf_list = cpu_to_be64(mr->page_map); + fseg->start_addr = cpu_to_be64(mr->ibmr.iova); + fseg->reg_len = cpu_to_be64(mr->ibmr.length); fseg->offset = 0; /* XXX -- is this just for ZBVA? */ - fseg->page_size = cpu_to_be32(wr->wr.fast_reg.page_shift); + fseg->page_size = cpu_to_be32(ilog2(mr->ibmr.page_size)); fseg->reserved[0] = 0; fseg->reserved[1] = 0; } -static void set_bind_seg(struct mlx4_wqe_bind_seg *bseg, struct ib_send_wr *wr) +static void set_bind_seg(struct mlx4_wqe_bind_seg *bseg, + struct ib_bind_mw_wr *wr) { bseg->flags1 = - convert_access(wr->wr.bind_mw.bind_info.mw_access_flags) & + convert_access(wr->bind_info.mw_access_flags) & cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ | MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE | MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC); bseg->flags2 = 0; - if (wr->wr.bind_mw.mw->type == IB_MW_TYPE_2) + if (wr->mw->type == IB_MW_TYPE_2) bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_TYPE_2); - if (wr->wr.bind_mw.bind_info.mw_access_flags & IB_ZERO_BASED) + if (wr->bind_info.mw_access_flags & IB_ZERO_BASED) bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_ZERO_BASED); - bseg->new_rkey = cpu_to_be32(wr->wr.bind_mw.rkey); - bseg->lkey = cpu_to_be32(wr->wr.bind_mw.bind_info.mr->lkey); - bseg->addr = cpu_to_be64(wr->wr.bind_mw.bind_info.addr); - bseg->length = cpu_to_be64(wr->wr.bind_mw.bind_info.length); + bseg->new_rkey = cpu_to_be32(wr->rkey); + bseg->lkey = cpu_to_be32(wr->bind_info.mr->lkey); + bseg->addr = cpu_to_be64(wr->bind_info.addr); + bseg->length = cpu_to_be64(wr->bind_info.length); } static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey) @@ -2453,46 +2561,47 @@ static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg, rseg->reserved = 0; } -static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *wr) +static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, + struct ib_atomic_wr *wr) { - if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { - aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); - aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); - } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) { - aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); - aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask); + if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { + aseg->swap_add = cpu_to_be64(wr->swap); + aseg->compare = cpu_to_be64(wr->compare_add); + } else if (wr->wr.opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) { + aseg->swap_add = cpu_to_be64(wr->compare_add); + aseg->compare = cpu_to_be64(wr->compare_add_mask); } else { - aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); + aseg->swap_add = cpu_to_be64(wr->compare_add); aseg->compare = 0; } } static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg, - struct ib_send_wr *wr) + struct ib_atomic_wr *wr) { - aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); - aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask); - aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); - aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask); + aseg->swap_add = cpu_to_be64(wr->swap); + aseg->swap_add_mask = cpu_to_be64(wr->swap_mask); + aseg->compare = cpu_to_be64(wr->compare_add); + aseg->compare_mask = cpu_to_be64(wr->compare_add_mask); } static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, - struct ib_send_wr *wr) + struct ib_ud_wr *wr) { - memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av)); - dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); - dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); - dseg->vlan = to_mah(wr->wr.ud.ah)->av.eth.vlan; - memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->av.eth.mac, 6); + memcpy(dseg->av, &to_mah(wr->ah)->av, sizeof (struct mlx4_av)); + dseg->dqpn = cpu_to_be32(wr->remote_qpn); + dseg->qkey = cpu_to_be32(wr->remote_qkey); + dseg->vlan = to_mah(wr->ah)->av.eth.vlan; + memcpy(dseg->mac, to_mah(wr->ah)->av.eth.mac, 6); } static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev, struct mlx4_wqe_datagram_seg *dseg, - struct ib_send_wr *wr, + struct ib_ud_wr *wr, enum mlx4_ib_qp_type qpt) { - union mlx4_ext_av *av = &to_mah(wr->wr.ud.ah)->av; + union mlx4_ext_av *av = &to_mah(wr->ah)->av; struct mlx4_av sqp_av = {0}; int port = *((u8 *) &av->ib.port_pd) & 0x3; @@ -2511,18 +2620,18 @@ static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev, dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY); } -static void build_tunnel_header(struct ib_send_wr *wr, void *wqe, unsigned *mlx_seg_len) +static void build_tunnel_header(struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len) { struct mlx4_wqe_inline_seg *inl = wqe; struct mlx4_ib_tunnel_header hdr; - struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); + struct mlx4_ib_ah *ah = to_mah(wr->ah); int spc; int i; memcpy(&hdr.av, &ah->av, sizeof hdr.av); - hdr.remote_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); - hdr.pkey_index = cpu_to_be16(wr->wr.ud.pkey_index); - hdr.qkey = cpu_to_be32(wr->wr.ud.remote_qkey); + hdr.remote_qpn = cpu_to_be32(wr->remote_qpn); + hdr.pkey_index = cpu_to_be16(wr->pkey_index); + hdr.qkey = cpu_to_be32(wr->remote_qkey); memcpy(hdr.mac, ah->av.eth.mac, 6); hdr.vlan = ah->av.eth.vlan; @@ -2594,22 +2703,22 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg) dseg->addr = cpu_to_be64(sg->addr); } -static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr, +static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_ud_wr *wr, struct mlx4_ib_qp *qp, unsigned *lso_seg_len, __be32 *lso_hdr_sz, __be32 *blh) { - unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16); + unsigned halign = ALIGN(sizeof *wqe + wr->hlen, 16); if (unlikely(halign > MLX4_IB_CACHE_LINE_SIZE)) *blh = cpu_to_be32(1 << 6); if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) && - wr->num_sge > qp->sq.max_gs - (halign >> 4))) + wr->wr.num_sge > qp->sq.max_gs - (halign >> 4))) return -EINVAL; - memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen); + memcpy(wqe->header, wr->header, wr->hlen); - *lso_hdr_sz = cpu_to_be32(wr->wr.ud.mss << 16 | wr->wr.ud.hlen); + *lso_hdr_sz = cpu_to_be32(wr->mss << 16 | wr->hlen); *lso_seg_len = halign; return 0; } @@ -2708,11 +2817,11 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD: - set_raddr_seg(wqe, wr->wr.atomic.remote_addr, - wr->wr.atomic.rkey); + set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, + atomic_wr(wr)->rkey); wqe += sizeof (struct mlx4_wqe_raddr_seg); - set_atomic_seg(wqe, wr); + set_atomic_seg(wqe, atomic_wr(wr)); wqe += sizeof (struct mlx4_wqe_atomic_seg); size += (sizeof (struct mlx4_wqe_raddr_seg) + @@ -2721,11 +2830,11 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, break; case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: - set_raddr_seg(wqe, wr->wr.atomic.remote_addr, - wr->wr.atomic.rkey); + set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, + atomic_wr(wr)->rkey); wqe += sizeof (struct mlx4_wqe_raddr_seg); - set_masked_atomic_seg(wqe, wr); + set_masked_atomic_seg(wqe, atomic_wr(wr)); wqe += sizeof (struct mlx4_wqe_masked_atomic_seg); size += (sizeof (struct mlx4_wqe_raddr_seg) + @@ -2736,8 +2845,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, case IB_WR_RDMA_READ: case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: - set_raddr_seg(wqe, wr->wr.rdma.remote_addr, - wr->wr.rdma.rkey); + set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, + rdma_wr(wr)->rkey); wqe += sizeof (struct mlx4_wqe_raddr_seg); size += sizeof (struct mlx4_wqe_raddr_seg) / 16; break; @@ -2750,18 +2859,18 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, size += sizeof (struct mlx4_wqe_local_inval_seg) / 16; break; - case IB_WR_FAST_REG_MR: + case IB_WR_REG_MR: ctrl->srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER); - set_fmr_seg(wqe, wr); - wqe += sizeof (struct mlx4_wqe_fmr_seg); - size += sizeof (struct mlx4_wqe_fmr_seg) / 16; + set_reg_seg(wqe, reg_wr(wr)); + wqe += sizeof(struct mlx4_wqe_fmr_seg); + size += sizeof(struct mlx4_wqe_fmr_seg) / 16; break; case IB_WR_BIND_MW: ctrl->srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER); - set_bind_seg(wqe, wr); + set_bind_seg(wqe, bind_mw_wr(wr)); wqe += sizeof(struct mlx4_wqe_bind_seg); size += sizeof(struct mlx4_wqe_bind_seg) / 16; break; @@ -2772,7 +2881,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, break; case MLX4_IB_QPT_TUN_SMI_OWNER: - err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen); + err = build_sriov_qp0_header(to_msqp(qp), ud_wr(wr), + ctrl, &seglen); if (unlikely(err)) { *bad_wr = wr; goto out; @@ -2783,19 +2893,20 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, case MLX4_IB_QPT_TUN_SMI: case MLX4_IB_QPT_TUN_GSI: /* this is a UD qp used in MAD responses to slaves. */ - set_datagram_seg(wqe, wr); + set_datagram_seg(wqe, ud_wr(wr)); /* set the forced-loopback bit in the data seg av */ *(__be32 *) wqe |= cpu_to_be32(0x80000000); wqe += sizeof (struct mlx4_wqe_datagram_seg); size += sizeof (struct mlx4_wqe_datagram_seg) / 16; break; case MLX4_IB_QPT_UD: - set_datagram_seg(wqe, wr); + set_datagram_seg(wqe, ud_wr(wr)); wqe += sizeof (struct mlx4_wqe_datagram_seg); size += sizeof (struct mlx4_wqe_datagram_seg) / 16; if (wr->opcode == IB_WR_LSO) { - err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz, &blh); + err = build_lso_seg(wqe, ud_wr(wr), qp, &seglen, + &lso_hdr_sz, &blh); if (unlikely(err)) { *bad_wr = wr; goto out; @@ -2807,7 +2918,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, break; case MLX4_IB_QPT_PROXY_SMI_OWNER: - err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen); + err = build_sriov_qp0_header(to_msqp(qp), ud_wr(wr), + ctrl, &seglen); if (unlikely(err)) { *bad_wr = wr; goto out; @@ -2818,7 +2930,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, add_zero_len_inline(wqe); wqe += 16; size++; - build_tunnel_header(wr, wqe, &seglen); + build_tunnel_header(ud_wr(wr), wqe, &seglen); wqe += seglen; size += seglen / 16; break; @@ -2828,18 +2940,20 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, * In this case we first add a UD segment targeting * the tunnel qp, and then add a header with address * information */ - set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe, wr, + set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe, + ud_wr(wr), qp->mlx4_ib_qp_type); wqe += sizeof (struct mlx4_wqe_datagram_seg); size += sizeof (struct mlx4_wqe_datagram_seg) / 16; - build_tunnel_header(wr, wqe, &seglen); + build_tunnel_header(ud_wr(wr), wqe, &seglen); wqe += seglen; size += seglen / 16; break; case MLX4_IB_QPT_SMI: case MLX4_IB_QPT_GSI: - err = build_mlx_header(to_msqp(qp), wr, ctrl, &seglen); + err = build_mlx_header(to_msqp(qp), ud_wr(wr), ctrl, + &seglen); if (unlikely(err)) { *bad_wr = wr; goto out; diff --git a/kernel/drivers/infiniband/hw/mlx4/srq.c b/kernel/drivers/infiniband/hw/mlx4/srq.c index dce5dfe3a..c394376eb 100644 --- a/kernel/drivers/infiniband/hw/mlx4/srq.c +++ b/kernel/drivers/infiniband/hw/mlx4/srq.c @@ -34,6 +34,7 @@ #include <linux/mlx4/qp.h> #include <linux/mlx4/srq.h> #include <linux/slab.h> +#include <linux/vmalloc.h> #include "mlx4_ib.h" #include "user.h" @@ -172,8 +173,12 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, srq->wrid = kmalloc(srq->msrq.max * sizeof (u64), GFP_KERNEL); if (!srq->wrid) { - err = -ENOMEM; - goto err_mtt; + srq->wrid = __vmalloc(srq->msrq.max * sizeof(u64), + GFP_KERNEL, PAGE_KERNEL); + if (!srq->wrid) { + err = -ENOMEM; + goto err_mtt; + } } } @@ -204,7 +209,7 @@ err_wrid: if (pd->uobject) mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db); else - kfree(srq->wrid); + kvfree(srq->wrid); err_mtt: mlx4_mtt_cleanup(dev->dev, &srq->mtt); @@ -281,7 +286,7 @@ int mlx4_ib_destroy_srq(struct ib_srq *srq) mlx4_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db); ib_umem_release(msrq->umem); } else { - kfree(msrq->wrid); + kvfree(msrq->wrid); mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift, &msrq->buf); mlx4_db_free(dev->dev, &msrq->db); |