summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/infiniband/hw/mlx4/mad.c
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-11 10:41:07 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-13 08:17:18 +0300
commite09b41010ba33a20a87472ee821fa407a5b8da36 (patch)
treed10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/drivers/infiniband/hw/mlx4/mad.c
parentf93b97fd65072de626c074dbe099a1fff05ce060 (diff)
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page. During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/drivers/infiniband/hw/mlx4/mad.c')
-rw-r--r--kernel/drivers/infiniband/hw/mlx4/mad.c217
1 files changed, 115 insertions, 102 deletions
diff --git a/kernel/drivers/infiniband/hw/mlx4/mad.c b/kernel/drivers/infiniband/hw/mlx4/mad.c
index 9cd2b002d..870e56b6b 100644
--- a/kernel/drivers/infiniband/hw/mlx4/mad.c
+++ b/kernel/drivers/infiniband/hw/mlx4/mad.c
@@ -64,14 +64,6 @@ enum {
#define GUID_TBL_BLK_NUM_ENTRIES 8
#define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
-/* Counters should be saturate once they reach their maximum value */
-#define ASSIGN_32BIT_COUNTER(counter, value) do {\
- if ((value) > U32_MAX) \
- counter = cpu_to_be32(U32_MAX); \
- else \
- counter = cpu_to_be32(value); \
-} while (0)
-
struct mlx4_mad_rcv_buf {
struct ib_grh grh;
u8 payload[256];
@@ -111,8 +103,9 @@ __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
}
int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
- int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
- void *in_mad, void *response_mad)
+ int port, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh,
+ const void *in_mad, void *response_mad)
{
struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
void *inbox;
@@ -220,7 +213,7 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
* Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can
* synthesize LID change, Client-Rereg, GID change, and P_Key change events.
*/
-static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
+static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad *mad,
u16 prev_lid)
{
struct ib_port_info *pinfo;
@@ -356,7 +349,7 @@ static void node_desc_override(struct ib_device *dev,
}
}
-static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *mad)
+static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, const struct ib_mad *mad)
{
int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
struct ib_mad_send_buf *send_buf;
@@ -366,7 +359,8 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma
if (agent) {
send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
- IB_MGMT_MAD_DATA, GFP_ATOMIC);
+ IB_MGMT_MAD_DATA, GFP_ATOMIC,
+ IB_MGMT_BASE_VERSION);
if (IS_ERR(send_buf))
return;
/*
@@ -463,7 +457,8 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
struct ib_grh *grh, struct ib_mad *mad)
{
struct ib_sge list;
- struct ib_send_wr wr, *bad_wr;
+ struct ib_ud_wr wr;
+ struct ib_send_wr *bad_wr;
struct mlx4_ib_demux_pv_ctx *tun_ctx;
struct mlx4_ib_demux_pv_qp *tun_qp;
struct mlx4_rcv_tunnel_mad *tun_mad;
@@ -586,20 +581,20 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map;
list.length = sizeof (struct mlx4_rcv_tunnel_mad);
- list.lkey = tun_ctx->mr->lkey;
-
- wr.wr.ud.ah = ah;
- wr.wr.ud.port_num = port;
- wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
- wr.wr.ud.remote_qpn = dqpn;
- wr.next = NULL;
- wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt);
- wr.sg_list = &list;
- wr.num_sge = 1;
- wr.opcode = IB_WR_SEND;
- wr.send_flags = IB_SEND_SIGNALED;
-
- ret = ib_post_send(src_qp, &wr, &bad_wr);
+ list.lkey = tun_ctx->pd->local_dma_lkey;
+
+ wr.ah = ah;
+ wr.port_num = port;
+ wr.remote_qkey = IB_QP_SET_QKEY;
+ wr.remote_qpn = dqpn;
+ wr.wr.next = NULL;
+ wr.wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt);
+ wr.wr.sg_list = &list;
+ wr.wr.num_sge = 1;
+ wr.wr.opcode = IB_WR_SEND;
+ wr.wr.send_flags = IB_SEND_SIGNALED;
+
+ ret = ib_post_send(src_qp, &wr.wr, &bad_wr);
out:
if (ret)
ib_destroy_ah(ah);
@@ -722,8 +717,8 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
}
static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
- struct ib_wc *in_wc, struct ib_grh *in_grh,
- struct ib_mad *in_mad, struct ib_mad *out_mad)
+ const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+ const struct ib_mad *in_mad, struct ib_mad *out_mad)
{
u16 slid, prev_lid = 0;
int err;
@@ -825,34 +820,39 @@ static void edit_counter(struct mlx4_counter *cnt,
}
static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
- struct ib_wc *in_wc, struct ib_grh *in_grh,
- struct ib_mad *in_mad, struct ib_mad *out_mad)
+ const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+ const struct ib_mad *in_mad, struct ib_mad *out_mad)
{
- struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_counter counter_stats;
struct mlx4_ib_dev *dev = to_mdev(ibdev);
- int err;
- u32 inmod = dev->counters[port_num - 1] & 0xffff;
- u8 mode;
+ struct counter_index *tmp_counter;
+ int err = IB_MAD_RESULT_FAILURE, stats_avail = 0;
if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
return -EINVAL;
- mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
- if (IS_ERR(mailbox))
- return IB_MAD_RESULT_FAILURE;
-
- err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0,
- MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
- MLX4_CMD_WRAPPED);
- if (err)
- err = IB_MAD_RESULT_FAILURE;
- else {
+ memset(&counter_stats, 0, sizeof(counter_stats));
+ mutex_lock(&dev->counters_table[port_num - 1].mutex);
+ list_for_each_entry(tmp_counter,
+ &dev->counters_table[port_num - 1].counters_list,
+ list) {
+ err = mlx4_get_counter_stats(dev->dev,
+ tmp_counter->index,
+ &counter_stats, 0);
+ if (err) {
+ err = IB_MAD_RESULT_FAILURE;
+ stats_avail = 0;
+ break;
+ }
+ stats_avail = 1;
+ }
+ mutex_unlock(&dev->counters_table[port_num - 1].mutex);
+ if (stats_avail) {
memset(out_mad->data, 0, sizeof out_mad->data);
- mode = ((struct mlx4_counter *)mailbox->buf)->counter_mode;
- switch (mode & 0xf) {
+ switch (counter_stats.counter_mode & 0xf) {
case 0:
- edit_counter(mailbox->buf,
- (void *)(out_mad->data + 40));
+ edit_counter(&counter_stats,
+ (void *)(out_mad->data + 40));
err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
break;
default:
@@ -860,25 +860,43 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
}
}
- mlx4_free_cmd_mailbox(dev->dev, mailbox);
-
return err;
}
int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
- struct ib_wc *in_wc, struct ib_grh *in_grh,
- struct ib_mad *in_mad, struct ib_mad *out_mad)
+ const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+ const struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size,
+ u16 *out_mad_pkey_index)
{
- switch (rdma_port_get_link_layer(ibdev, port_num)) {
- case IB_LINK_LAYER_INFINIBAND:
+ struct mlx4_ib_dev *dev = to_mdev(ibdev);
+ const struct ib_mad *in_mad = (const struct ib_mad *)in;
+ struct ib_mad *out_mad = (struct ib_mad *)out;
+ enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num);
+
+ if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
+
+ /* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA
+ * queries, should be called only by VFs and for that specific purpose
+ */
+ if (link == IB_LINK_LAYER_INFINIBAND) {
+ if (mlx4_is_slave(dev->dev) &&
+ in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
+ in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS)
+ return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
+ in_grh, in_mad, out_mad);
+
return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
in_grh, in_mad, out_mad);
- case IB_LINK_LAYER_ETHERNET:
- return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
- in_grh, in_mad, out_mad);
- default:
- return -EINVAL;
}
+
+ if (link == IB_LINK_LAYER_ETHERNET)
+ return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
+ in_grh, in_mad, out_mad);
+
+ return -EINVAL;
}
static void send_handler(struct ib_mad_agent *agent,
@@ -1127,7 +1145,7 @@ static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
sg_list.addr = tun_qp->ring[index].map;
sg_list.length = size;
- sg_list.lkey = ctx->mr->lkey;
+ sg_list.lkey = ctx->pd->local_dma_lkey;
recv_wr.next = NULL;
recv_wr.sg_list = &sg_list;
@@ -1166,10 +1184,11 @@ static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave)
int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
enum ib_qp_type dest_qpt, u16 pkey_index,
u32 remote_qpn, u32 qkey, struct ib_ah_attr *attr,
- u8 *s_mac, struct ib_mad *mad)
+ u8 *s_mac, u16 vlan_id, struct ib_mad *mad)
{
struct ib_sge list;
- struct ib_send_wr wr, *bad_wr;
+ struct ib_ud_wr wr;
+ struct ib_send_wr *bad_wr;
struct mlx4_ib_demux_pv_ctx *sqp_ctx;
struct mlx4_ib_demux_pv_qp *sqp;
struct mlx4_mad_snd_buf *sqp_mad;
@@ -1238,24 +1257,27 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
list.addr = sqp->tx_ring[wire_tx_ix].buf.map;
list.length = sizeof (struct mlx4_mad_snd_buf);
- list.lkey = sqp_ctx->mr->lkey;
-
- wr.wr.ud.ah = ah;
- wr.wr.ud.port_num = port;
- wr.wr.ud.pkey_index = wire_pkey_ix;
- wr.wr.ud.remote_qkey = qkey;
- wr.wr.ud.remote_qpn = remote_qpn;
- wr.next = NULL;
- wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum);
- wr.sg_list = &list;
- wr.num_sge = 1;
- wr.opcode = IB_WR_SEND;
- wr.send_flags = IB_SEND_SIGNALED;
+ list.lkey = sqp_ctx->pd->local_dma_lkey;
+
+ wr.ah = ah;
+ wr.port_num = port;
+ wr.pkey_index = wire_pkey_ix;
+ wr.remote_qkey = qkey;
+ wr.remote_qpn = remote_qpn;
+ wr.wr.next = NULL;
+ wr.wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum);
+ wr.wr.sg_list = &list;
+ wr.wr.num_sge = 1;
+ wr.wr.opcode = IB_WR_SEND;
+ wr.wr.send_flags = IB_SEND_SIGNALED;
if (s_mac)
memcpy(to_mah(ah)->av.eth.s_mac, s_mac, 6);
+ if (vlan_id < 0x1000)
+ vlan_id |= (attr->sl & 7) << 13;
+ to_mah(ah)->av.eth.vlan = cpu_to_be16(vlan_id);
- ret = ib_post_send(send_qp, &wr, &bad_wr);
+ ret = ib_post_send(send_qp, &wr.wr, &bad_wr);
out:
if (ret)
ib_destroy_ah(ah);
@@ -1289,6 +1311,7 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
u8 *slave_id;
int slave;
int port;
+ u16 vlan_id;
/* Get slave that sent this packet */
if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn ||
@@ -1365,19 +1388,22 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
* stadard address handle by decoding the tunnelled mlx4_ah fields */
memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av));
ah.ibah.device = ctx->ib_dev;
+
+ port = be32_to_cpu(ah.av.ib.port_pd) >> 24;
+ port = mlx4_slave_convert_port(dev->dev, slave, port);
+ if (port < 0)
+ return;
+ ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff));
+
mlx4_ib_query_ah(&ah.ibah, &ah_attr);
if (ah_attr.ah_flags & IB_AH_GRH)
fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr);
- port = mlx4_slave_convert_port(dev->dev, slave, ah_attr.port_num);
- if (port < 0)
- return;
- ah_attr.port_num = port;
memcpy(ah_attr.dmac, tunnel->hdr.mac, 6);
- ah_attr.vlan_id = be16_to_cpu(tunnel->hdr.vlan);
+ vlan_id = be16_to_cpu(tunnel->hdr.vlan);
/* if slave have default vlan use it */
mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave,
- &ah_attr.vlan_id, &ah_attr.sl);
+ &vlan_id, &ah_attr.sl);
mlx4_ib_send_to_wire(dev, slave, ctx->port,
is_proxy_qp0(dev, wc->src_qp, slave) ?
@@ -1385,7 +1411,7 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
be16_to_cpu(tunnel->hdr.pkey_index),
be32_to_cpu(tunnel->hdr.remote_qpn),
be32_to_cpu(tunnel->hdr.qkey),
- &ah_attr, wc->smac, &tunnel->mad);
+ &ah_attr, wc->smac, vlan_id, &tunnel->mad);
}
static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
@@ -1773,6 +1799,7 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
int create_tun, struct mlx4_ib_demux_pv_ctx *ctx)
{
int ret, cq_size;
+ struct ib_cq_init_attr cq_attr = {};
if (ctx->state != DEMUX_PV_STATE_DOWN)
return -EEXIST;
@@ -1801,8 +1828,9 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
if (ctx->has_smi)
cq_size *= 2;
+ cq_attr.cqe = cq_size;
ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler,
- NULL, ctx, cq_size, 0);
+ NULL, ctx, &cq_attr);
if (IS_ERR(ctx->cq)) {
ret = PTR_ERR(ctx->cq);
pr_err("Couldn't create tunnel CQ (%d)\n", ret);
@@ -1816,19 +1844,12 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
goto err_cq;
}
- ctx->mr = ib_get_dma_mr(ctx->pd, IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(ctx->mr)) {
- ret = PTR_ERR(ctx->mr);
- pr_err("Couldn't get tunnel DMA MR (%d)\n", ret);
- goto err_pd;
- }
-
if (ctx->has_smi) {
ret = create_pv_sqp(ctx, IB_QPT_SMI, create_tun);
if (ret) {
pr_err("Couldn't create %s QP0 (%d)\n",
create_tun ? "tunnel for" : "", ret);
- goto err_mr;
+ goto err_pd;
}
}
@@ -1865,10 +1886,6 @@ err_qp0:
ib_destroy_qp(ctx->qp[0].qp);
ctx->qp[0].qp = NULL;
-err_mr:
- ib_dereg_mr(ctx->mr);
- ctx->mr = NULL;
-
err_pd:
ib_dealloc_pd(ctx->pd);
ctx->pd = NULL;
@@ -1905,8 +1922,6 @@ static void destroy_pv_resources(struct mlx4_ib_dev *dev, int slave, int port,
ib_destroy_qp(ctx->qp[1].qp);
ctx->qp[1].qp = NULL;
mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1);
- ib_dereg_mr(ctx->mr);
- ctx->mr = NULL;
ib_dealloc_pd(ctx->pd);
ctx->pd = NULL;
ib_destroy_cq(ctx->cq);
@@ -2039,8 +2054,6 @@ static void mlx4_ib_free_sqp_ctx(struct mlx4_ib_demux_pv_ctx *sqp_ctx)
ib_destroy_qp(sqp_ctx->qp[1].qp);
sqp_ctx->qp[1].qp = NULL;
mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_GSI, 0);
- ib_dereg_mr(sqp_ctx->mr);
- sqp_ctx->mr = NULL;
ib_dealloc_pd(sqp_ctx->pd);
sqp_ctx->pd = NULL;
ib_destroy_cq(sqp_ctx->cq);