summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/net/ethernet/mellanox/mlx4
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-11 10:41:07 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-13 08:17:18 +0300
commite09b41010ba33a20a87472ee821fa407a5b8da36 (patch)
treed10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/drivers/net/ethernet/mellanox/mlx4
parentf93b97fd65072de626c074dbe099a1fff05ce060 (diff)
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page. During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/drivers/net/ethernet/mellanox/mlx4')
-rw-r--r--kernel/drivers/net/ethernet/mellanox/mlx4/cmd.c131
-rw-r--r--kernel/drivers/net/ethernet/mellanox/mlx4/cq.c13
-rw-r--r--kernel/drivers/net/ethernet/mellanox/mlx4/en_clock.c32
-rw-r--r--kernel/drivers/net/ethernet/mellanox/mlx4/en_cq.c57
-rw-r--r--kernel/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c71
-rw-r--r--kernel/drivers/net/ethernet/mellanox/mlx4/en_main.c65
-rw-r--r--kernel/drivers/net/ethernet/mellanox/mlx4/en_netdev.c262
-rw-r--r--kernel/drivers/net/ethernet/mellanox/mlx4/en_port.c33
-rw-r--r--kernel/drivers/net/ethernet/mellanox/mlx4/en_resources.c27
-rw-r--r--kernel/drivers/net/ethernet/mellanox/mlx4/en_rx.c50
-rw-r--r--kernel/drivers/net/ethernet/mellanox/mlx4/en_tx.c15
-rw-r--r--kernel/drivers/net/ethernet/mellanox/mlx4/eq.c406
-rw-r--r--kernel/drivers/net/ethernet/mellanox/mlx4/fw.c104
-rw-r--r--kernel/drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--kernel/drivers/net/ethernet/mellanox/mlx4/intf.c3
-rw-r--r--kernel/drivers/net/ethernet/mellanox/mlx4/main.c337
-rw-r--r--kernel/drivers/net/ethernet/mellanox/mlx4/mcg.c7
-rw-r--r--kernel/drivers/net/ethernet/mellanox/mlx4/mlx4.h22
-rw-r--r--kernel/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h8
-rw-r--r--kernel/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h10
-rw-r--r--kernel/drivers/net/ethernet/mellanox/mlx4/mr.c2
-rw-r--r--kernel/drivers/net/ethernet/mellanox/mlx4/profile.c8
-rw-r--r--kernel/drivers/net/ethernet/mellanox/mlx4/qp.c34
-rw-r--r--kernel/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c286
24 files changed, 1518 insertions, 466 deletions
diff --git a/kernel/drivers/net/ethernet/mellanox/mlx4/cmd.c b/kernel/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 529ef0594..d48d57934 100644
--- a/kernel/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/kernel/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -49,6 +49,7 @@
#include "mlx4.h"
#include "fw.h"
#include "fw_qos.h"
+#include "mlx4_stats.h"
#define CMD_POLL_TOKEN 0xffff
#define INBOX_MASK 0xffffffffffffff00ULL
@@ -685,6 +686,7 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
{
struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
struct mlx4_cmd_context *context;
+ long ret_wait;
int err = 0;
down(&cmd->event_sem);
@@ -710,8 +712,20 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
if (err)
goto out_reset;
- if (!wait_for_completion_timeout(&context->done,
- msecs_to_jiffies(timeout))) {
+ if (op == MLX4_CMD_SENSE_PORT) {
+ ret_wait =
+ wait_for_completion_interruptible_timeout(&context->done,
+ msecs_to_jiffies(timeout));
+ if (ret_wait < 0) {
+ context->fw_status = 0;
+ context->out_param = 0;
+ context->result = 0;
+ }
+ } else {
+ ret_wait = (long)wait_for_completion_timeout(&context->done,
+ msecs_to_jiffies(timeout));
+ }
+ if (!ret_wait) {
mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
op);
if (op == MLX4_CMD_NOP) {
@@ -882,7 +896,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
{
struct ib_smp *smp = inbox->buf;
u32 index;
- u8 port;
+ u8 port, slave_port;
u8 opcode_modifier;
u16 *table;
int err;
@@ -894,7 +908,8 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
__be32 slave_cap_mask;
__be64 slave_node_guid;
- port = vhcr->in_modifier;
+ slave_port = vhcr->in_modifier;
+ port = mlx4_slave_convert_port(dev, slave, slave_port);
/* network-view bit is for driver use only, and should not be passed to FW */
opcode_modifier = vhcr->op_modifier & ~0x8; /* clear netw view bit */
@@ -930,8 +945,9 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) {
/*get the slave specific caps:*/
/*do the command */
+ smp->attr_mod = cpu_to_be32(port);
err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
- vhcr->in_modifier, opcode_modifier,
+ port, opcode_modifier,
vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
/* modify the response for slaves */
if (!err && slave != mlx4_master_func_num(dev)) {
@@ -975,7 +991,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
}
if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
- vhcr->in_modifier, opcode_modifier,
+ port, opcode_modifier,
vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
if (!err) {
slave_node_guid = mlx4_get_slave_node_guid(dev, slave);
@@ -994,7 +1010,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
smp->method == IB_MGMT_METHOD_GET) || network_view) {
mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n",
- slave, smp->method, smp->mgmt_class,
+ slave, smp->mgmt_class, smp->method,
network_view ? "Network" : "Host",
be16_to_cpu(smp->attr_id));
return -EPERM;
@@ -2382,7 +2398,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
}
}
- memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size);
+ memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
INIT_WORK(&priv->mfunc.master.comm_work,
mlx4_master_comm_channel);
@@ -2915,7 +2931,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
port = mlx4_slaves_closest_port(dev, slave, port);
s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
s_info->mac = mac;
- mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
+ mlx4_info(dev, "default mac on vf %d port %d to %llX will take effect only after vf restart\n",
vf, port, s_info->mac);
return 0;
}
@@ -3164,6 +3180,92 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat
}
EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state);
+int mlx4_get_counter_stats(struct mlx4_dev *dev, int counter_index,
+ struct mlx4_counter *counter_stats, int reset)
+{
+ struct mlx4_cmd_mailbox *mailbox = NULL;
+ struct mlx4_counter *tmp_counter;
+ int err;
+ u32 if_stat_in_mod;
+
+ if (!counter_stats)
+ return -EINVAL;
+
+ if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
+ return 0;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memset(mailbox->buf, 0, sizeof(struct mlx4_counter));
+ if_stat_in_mod = counter_index;
+ if (reset)
+ if_stat_in_mod |= MLX4_QUERY_IF_STAT_RESET;
+ err = mlx4_cmd_box(dev, 0, mailbox->dma,
+ if_stat_in_mod, 0,
+ MLX4_CMD_QUERY_IF_STAT,
+ MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
+ if (err) {
+ mlx4_dbg(dev, "%s: failed to read statistics for counter index %d\n",
+ __func__, counter_index);
+ goto if_stat_out;
+ }
+ tmp_counter = (struct mlx4_counter *)mailbox->buf;
+ counter_stats->counter_mode = tmp_counter->counter_mode;
+ if (counter_stats->counter_mode == 0) {
+ counter_stats->rx_frames =
+ cpu_to_be64(be64_to_cpu(counter_stats->rx_frames) +
+ be64_to_cpu(tmp_counter->rx_frames));
+ counter_stats->tx_frames =
+ cpu_to_be64(be64_to_cpu(counter_stats->tx_frames) +
+ be64_to_cpu(tmp_counter->tx_frames));
+ counter_stats->rx_bytes =
+ cpu_to_be64(be64_to_cpu(counter_stats->rx_bytes) +
+ be64_to_cpu(tmp_counter->rx_bytes));
+ counter_stats->tx_bytes =
+ cpu_to_be64(be64_to_cpu(counter_stats->tx_bytes) +
+ be64_to_cpu(tmp_counter->tx_bytes));
+ }
+
+if_stat_out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_counter_stats);
+
+int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct mlx4_counter tmp_vf_stats;
+ int slave;
+ int err = 0;
+
+ if (!vf_stats)
+ return -EINVAL;
+
+ if (!mlx4_is_master(dev))
+ return -EPROTONOSUPPORT;
+
+ slave = mlx4_get_slave_indx(dev, vf_idx);
+ if (slave < 0)
+ return -EINVAL;
+
+ port = mlx4_slaves_closest_port(dev, slave, port);
+ err = mlx4_calc_vf_counters(dev, slave, port, &tmp_vf_stats);
+ if (!err && tmp_vf_stats.counter_mode == 0) {
+ vf_stats->rx_packets = be64_to_cpu(tmp_vf_stats.rx_frames);
+ vf_stats->tx_packets = be64_to_cpu(tmp_vf_stats.tx_frames);
+ vf_stats->rx_bytes = be64_to_cpu(tmp_vf_stats.rx_bytes);
+ vf_stats->tx_bytes = be64_to_cpu(tmp_vf_stats.tx_bytes);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_vf_stats);
+
int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -3197,6 +3299,12 @@ int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
int enabled)
{
struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
+ &priv->dev, slave);
+ int min_port = find_first_bit(actv_ports.ports,
+ priv->dev.caps.num_ports) + 1;
+ int max_port = min_port - 1 +
+ bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
if (slave == mlx4_master_func_num(dev))
return 0;
@@ -3206,6 +3314,11 @@ int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
enabled < 0 || enabled > 1)
return -EINVAL;
+ if (min_port == max_port && dev->caps.num_ports > 1) {
+ mlx4_info(dev, "SMI access disallowed for single ported VFs\n");
+ return -EPROTONOSUPPORT;
+ }
+
priv->mfunc.master.vf_admin[slave].enable_smi[port] = enabled;
return 0;
}
diff --git a/kernel/drivers/net/ethernet/mellanox/mlx4/cq.c b/kernel/drivers/net/ethernet/mellanox/mlx4/cq.c
index e71f31387..3348e646d 100644
--- a/kernel/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/kernel/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -292,7 +292,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
u64 mtt_addr;
int err;
- if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool)
+ if (vector >= dev->caps.num_comp_vectors)
return -EINVAL;
cq->vector = vector;
@@ -319,7 +319,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
cq_context->flags |= cpu_to_be32(1 << 19);
cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
- cq_context->comp_eqn = priv->eq_table.eq[vector].eqn;
+ cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn;
cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
mtt_addr = mlx4_mtt_addr(dev, mtt);
@@ -339,11 +339,11 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
init_completion(&cq->free);
cq->comp = mlx4_add_cq_to_tasklet;
cq->tasklet_ctx.priv =
- &priv->eq_table.eq[cq->vector].tasklet_ctx;
+ &priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].tasklet_ctx;
INIT_LIST_HEAD(&cq->tasklet_ctx.list);
- cq->irq = priv->eq_table.eq[cq->vector].irq;
+ cq->irq = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].irq;
return 0;
err_radix:
@@ -368,7 +368,10 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
if (err)
mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
- synchronize_irq(priv->eq_table.eq[cq->vector].irq);
+ synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
+ if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
+ priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
+ synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
spin_lock_irq(&cq_table->lock);
radix_tree_delete(&cq_table->tree, cq->cqn);
diff --git a/kernel/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/kernel/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 8a083d73e..1494997c4 100644
--- a/kernel/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/kernel/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -236,23 +236,43 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
.enable = mlx4_en_phc_enable,
};
+#define MLX4_EN_WRAP_AROUND_SEC 10ULL
+
+/* This function calculates the max shift that enables the user range
+ * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
+ */
+static u32 freq_to_shift(u16 freq)
+{
+ u32 freq_khz = freq * 1000;
+ u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
+ u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
+ max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1;
+ /* calculate max possible multiplier in order to fit in 64bit */
+ u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
+
+ /* This comes from the reverse of clocksource_khz2mult */
+ return ilog2(div_u64(max_mul * freq_khz, 1000000));
+}
+
void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
{
struct mlx4_dev *dev = mdev->dev;
unsigned long flags;
u64 ns, zero = 0;
+ /* mlx4_en_init_timestamp is called for each netdev.
+ * mdev->ptp_clock is common for all ports, skip initialization if
+ * was done for other port.
+ */
+ if (mdev->ptp_clock)
+ return;
+
rwlock_init(&mdev->clock_lock);
memset(&mdev->cycles, 0, sizeof(mdev->cycles));
mdev->cycles.read = mlx4_en_read_clock;
mdev->cycles.mask = CLOCKSOURCE_MASK(48);
- /* Using shift to make calculation more accurate. Since current HW
- * clock frequency is 427 MHz, and cycles are given using a 48 bits
- * register, the biggest shift when calculating using u64, is 14
- * (max_cycles * multiplier < 2^64)
- */
- mdev->cycles.shift = 14;
+ mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock);
mdev->cycles.mult =
clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
mdev->nominal_c_mult = mdev->cycles.mult;
diff --git a/kernel/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/kernel/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 22da4d0d0..eb8a4988d 100644
--- a/kernel/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/kernel/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -66,6 +66,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
cq->ring = ring;
cq->is_tx = mode;
+ cq->vector = mdev->dev->caps.num_comp_vectors;
/* Allocate HW buffers on provided NUMA node.
* dev->numa_node is used in mtt range allocation flow.
@@ -99,14 +100,8 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
{
struct mlx4_en_dev *mdev = priv->mdev;
int err = 0;
- char name[25];
int timestamp_en = 0;
- struct cpu_rmap *rmap =
-#ifdef CONFIG_RFS_ACCEL
- priv->dev->rx_cpu_rmap;
-#else
- NULL;
-#endif
+ bool assigned_eq = false;
cq->dev = mdev->pndev[priv->port];
cq->mcq.set_ci_db = cq->wqres.db.db;
@@ -116,23 +111,19 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
memset(cq->buf, 0, cq->buf_size);
if (cq->is_tx == RX) {
- if (mdev->dev->caps.comp_pool) {
- if (!cq->vector) {
- sprintf(name, "%s-%d", priv->dev->name,
- cq->ring);
- /* Set IRQ for specific name (per ring) */
- if (mlx4_assign_eq(mdev->dev, name, rmap,
- &cq->vector)) {
- cq->vector = (cq->ring + 1 + priv->port)
- % mdev->dev->caps.num_comp_vectors;
- mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
- name);
- }
-
+ if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port,
+ cq->vector)) {
+ cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask);
+
+ err = mlx4_assign_eq(mdev->dev, priv->port,
+ &cq->vector);
+ if (err) {
+ mlx4_err(mdev, "Failed assigning an EQ to CQ vector %d\n",
+ cq->vector);
+ goto free_eq;
}
- } else {
- cq->vector = (cq->ring + 1 + priv->port) %
- mdev->dev->caps.num_comp_vectors;
+
+ assigned_eq = true;
}
cq->irq_desc =
@@ -159,7 +150,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
&mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
cq->vector, 0, timestamp_en);
if (err)
- return err;
+ goto free_eq;
cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
cq->mcq.event = mlx4_en_cq_event;
@@ -168,13 +159,6 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
NAPI_POLL_WEIGHT);
} else {
- struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
-
- err = irq_set_affinity_hint(cq->mcq.irq,
- ring->affinity_mask);
- if (err)
- mlx4_warn(mdev, "Failed setting affinity hint\n");
-
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
napi_hash_add(&cq->napi);
}
@@ -182,6 +166,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
napi_enable(&cq->napi);
return 0;
+
+free_eq:
+ if (assigned_eq)
+ mlx4_release_eq(mdev->dev, cq->vector);
+ cq->vector = mdev->dev->caps.num_comp_vectors;
+ return err;
}
void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
@@ -191,9 +181,9 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
mlx4_en_unmap_buffer(&cq->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
- if (priv->mdev->dev->caps.comp_pool && cq->vector) {
+ if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) &&
+ cq->is_tx == RX)
mlx4_release_eq(priv->mdev->dev, cq->vector);
- }
cq->vector = 0;
cq->buf_size = 0;
cq->buf = NULL;
@@ -207,7 +197,6 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
if (!cq->is_tx) {
napi_hash_del(&cq->napi);
synchronize_rcu();
- irq_set_affinity_hint(cq->mcq.irq, NULL);
}
netif_napi_del(&cq->napi);
diff --git a/kernel/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/kernel/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index a2ddf3d75..ddb554188 100644
--- a/kernel/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/kernel/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -95,13 +95,11 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
(u16) (mdev->dev->caps.fw_ver & 0xffff));
strlcpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->n_stats = 0;
- drvinfo->regdump_len = 0;
- drvinfo->eedump_len = 0;
}
static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = {
"blueflame",
+ "phv-bit"
};
static const char main_strings[][ETH_GSTRING_LEN] = {
@@ -119,6 +117,12 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
"queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
"rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload",
+ /* pf statistics */
+ "pf_rx_packets",
+ "pf_rx_bytes",
+ "pf_tx_packets",
+ "pf_tx_bytes",
+
/* priority flow control statistics rx */
"rx_pause_prio_0", "rx_pause_duration_prio_0",
"rx_pause_transition_prio_0",
@@ -368,6 +372,11 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
if (bitmap_iterator_test(&it))
data[index++] = ((unsigned long *)&priv->port_stats)[i];
+ for (i = 0; i < NUM_PF_STATS; i++, bitmap_iterator_inc(&it))
+ if (bitmap_iterator_test(&it))
+ data[index++] =
+ ((unsigned long *)&priv->pf_stats)[i];
+
for (i = 0; i < NUM_FLOW_PRIORITY_STATS_RX;
i++, bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
@@ -448,6 +457,12 @@ static void mlx4_en_get_strings(struct net_device *dev,
strcpy(data + (index++) * ETH_GSTRING_LEN,
main_strings[strings]);
+ for (i = 0; i < NUM_PF_STATS; i++, strings++,
+ bitmap_iterator_inc(&it))
+ if (bitmap_iterator_test(&it))
+ strcpy(data + (index++) * ETH_GSTRING_LEN,
+ main_strings[strings]);
+
for (i = 0; i < NUM_FLOW_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
@@ -1780,35 +1795,49 @@ static int mlx4_en_get_ts_info(struct net_device *dev,
static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
bool bf_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
bool bf_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
+ bool phv_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_PHV);
+ bool phv_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_PHV);
int i;
+ int ret = 0;
- if (bf_enabled_new == bf_enabled_old)
- return 0; /* Nothing to do */
+ if (bf_enabled_new != bf_enabled_old) {
+ if (bf_enabled_new) {
+ bool bf_supported = true;
- if (bf_enabled_new) {
- bool bf_supported = true;
+ for (i = 0; i < priv->tx_ring_num; i++)
+ bf_supported &= priv->tx_ring[i]->bf_alloced;
- for (i = 0; i < priv->tx_ring_num; i++)
- bf_supported &= priv->tx_ring[i]->bf_alloced;
+ if (!bf_supported) {
+ en_err(priv, "BlueFlame is not supported\n");
+ return -EINVAL;
+ }
- if (!bf_supported) {
- en_err(priv, "BlueFlame is not supported\n");
- return -EINVAL;
+ priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
+ } else {
+ priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
}
- priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
- } else {
- priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
- }
-
- for (i = 0; i < priv->tx_ring_num; i++)
- priv->tx_ring[i]->bf_enabled = bf_enabled_new;
+ for (i = 0; i < priv->tx_ring_num; i++)
+ priv->tx_ring[i]->bf_enabled = bf_enabled_new;
- en_info(priv, "BlueFlame %s\n",
- bf_enabled_new ? "Enabled" : "Disabled");
+ en_info(priv, "BlueFlame %s\n",
+ bf_enabled_new ? "Enabled" : "Disabled");
+ }
+ if (phv_enabled_new != phv_enabled_old) {
+ ret = set_phv_bit(mdev->dev, priv->port, (int)phv_enabled_new);
+ if (ret)
+ return ret;
+ else if (phv_enabled_new)
+ priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
+ else
+ priv->pflags &= ~MLX4_EN_PRIV_FLAGS_PHV;
+ en_info(priv, "PHV bit %s\n",
+ phv_enabled_new ? "Enabled" : "Disabled");
+ }
return 0;
}
diff --git a/kernel/drivers/net/ethernet/mellanox/mlx4/en_main.c b/kernel/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 913b716ed..e0ec280a7 100644
--- a/kernel/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/kernel/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -123,6 +123,28 @@ void mlx4_en_update_loopback_state(struct net_device *dev,
*/
if (mlx4_is_mfunc(priv->mdev->dev) || priv->validate_loopback)
priv->flags |= MLX4_EN_FLAG_ENABLE_HW_LOOPBACK;
+
+ mutex_lock(&priv->mdev->state_lock);
+ if (priv->mdev->dev->caps.flags2 &
+ MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB &&
+ priv->rss_map.indir_qp.qpn) {
+ int i;
+ int err = 0;
+ int loopback = !!(features & NETIF_F_LOOPBACK);
+
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ int ret;
+
+ ret = mlx4_en_change_mcast_lb(priv,
+ &priv->rss_map.qps[i],
+ loopback);
+ if (!err)
+ err = ret;
+ }
+ if (err)
+ mlx4_warn(priv->mdev, "failed to change mcast loopback\n");
+ }
+ mutex_unlock(&priv->mdev->state_lock);
}
static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
@@ -210,9 +232,6 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
if (mdev->pndev[i])
mlx4_en_destroy_netdev(mdev->pndev[i]);
- if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
- mlx4_en_remove_timestamp(mdev);
-
flush_workqueue(mdev->workqueue);
destroy_workqueue(mdev->workqueue);
(void) mlx4_mr_free(dev, &mdev->mr);
@@ -224,6 +243,26 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
kfree(mdev);
}
+static void mlx4_en_activate(struct mlx4_dev *dev, void *ctx)
+{
+ int i;
+ struct mlx4_en_dev *mdev = ctx;
+
+ /* Create a netdev for each port */
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
+ mlx4_info(mdev, "Activating port:%d\n", i);
+ if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
+ mdev->pndev[i] = NULL;
+ }
+
+ /* register notifier */
+ mdev->nb.notifier_call = mlx4_en_netdev_event;
+ if (register_netdevice_notifier(&mdev->nb)) {
+ mdev->nb.notifier_call = NULL;
+ mlx4_err(mdev, "Failed to create notifier\n");
+ }
+}
+
static void *mlx4_en_add(struct mlx4_dev *dev)
{
struct mlx4_en_dev *mdev;
@@ -278,10 +317,6 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
mdev->port_cnt++;
- /* Initialize time stamp mechanism */
- if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
- mlx4_en_init_timestamp(mdev);
-
/* Set default number of RX rings*/
mlx4_en_set_num_rx_rings(mdev);
@@ -297,21 +332,6 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
mutex_init(&mdev->state_lock);
mdev->device_up = true;
- /* Setup ports */
-
- /* Create a netdev for each port */
- mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
- mlx4_info(mdev, "Activating port:%d\n", i);
- if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
- mdev->pndev[i] = NULL;
- }
- /* register notifier */
- mdev->nb.notifier_call = mlx4_en_netdev_event;
- if (register_netdevice_notifier(&mdev->nb)) {
- mdev->nb.notifier_call = NULL;
- mlx4_err(mdev, "Failed to create notifier\n");
- }
-
return mdev;
err_mr:
@@ -335,6 +355,7 @@ static struct mlx4_interface mlx4_en_interface = {
.event = mlx4_en_event,
.get_dev = mlx4_en_get_netdev,
.protocol = MLX4_PROT_ETH,
+ .activate = mlx4_en_activate,
};
static void mlx4_en_verify_params(void)
diff --git a/kernel/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/kernel/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index a5a0b8420..67e9633ea 100644
--- a/kernel/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/kernel/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -573,10 +573,8 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
- struct mlx4_mac_entry *entry;
int index = 0;
int err = 0;
- u64 reg_id = 0;
int *qpn = &priv->base_qpn;
u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
@@ -600,44 +598,11 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
if (err) {
en_err(priv, "Failed to reserve qp for mac registration\n");
- goto qp_err;
+ mlx4_unregister_mac(dev, priv->port, mac);
+ return err;
}
- err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
- if (err)
- goto steer_err;
-
- err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
- &priv->tunnel_reg_id);
- if (err)
- goto tunnel_err;
-
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
- err = -ENOMEM;
- goto alloc_err;
- }
- memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
- memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
- entry->reg_id = reg_id;
-
- hlist_add_head_rcu(&entry->hlist,
- &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
-
return 0;
-
-alloc_err:
- if (priv->tunnel_reg_id)
- mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
-tunnel_err:
- mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
-
-steer_err:
- mlx4_qp_release_range(dev, *qpn, 1);
-
-qp_err:
- mlx4_unregister_mac(dev, priv->port, mac);
- return err;
}
static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
@@ -645,39 +610,13 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
int qpn = priv->base_qpn;
- u64 mac;
if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
- mac = mlx4_mac_to_u64(priv->dev->dev_addr);
+ u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
priv->dev->dev_addr);
mlx4_unregister_mac(dev, priv->port, mac);
} else {
- struct mlx4_mac_entry *entry;
- struct hlist_node *tmp;
- struct hlist_head *bucket;
- unsigned int i;
-
- for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
- bucket = &priv->mac_hash[i];
- hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
- mac = mlx4_mac_to_u64(entry->mac);
- en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
- entry->mac);
- mlx4_en_uc_steer_release(priv, entry->mac,
- qpn, entry->reg_id);
-
- mlx4_unregister_mac(dev, priv->port, mac);
- hlist_del_rcu(&entry->hlist);
- kfree_rcu(entry, rcu);
- }
- }
-
- if (priv->tunnel_reg_id) {
- mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
- priv->tunnel_reg_id = 0;
- }
-
en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
priv->port, qpn);
mlx4_qp_release_range(dev, qpn, 1);
@@ -1283,6 +1222,75 @@ static void mlx4_en_netpoll(struct net_device *dev)
}
#endif
+static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv)
+{
+ u64 reg_id;
+ int err = 0;
+ int *qpn = &priv->base_qpn;
+ struct mlx4_mac_entry *entry;
+
+ err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
+ if (err)
+ return err;
+
+ err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
+ &priv->tunnel_reg_id);
+ if (err)
+ goto tunnel_err;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
+
+ memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
+ memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
+ entry->reg_id = reg_id;
+ hlist_add_head_rcu(&entry->hlist,
+ &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
+
+ return 0;
+
+alloc_err:
+ if (priv->tunnel_reg_id)
+ mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
+
+tunnel_err:
+ mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
+ return err;
+}
+
+static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv)
+{
+ u64 mac;
+ unsigned int i;
+ int qpn = priv->base_qpn;
+ struct hlist_head *bucket;
+ struct hlist_node *tmp;
+ struct mlx4_mac_entry *entry;
+
+ for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
+ bucket = &priv->mac_hash[i];
+ hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
+ mac = mlx4_mac_to_u64(entry->mac);
+ en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n",
+ entry->mac);
+ mlx4_en_uc_steer_release(priv, entry->mac,
+ qpn, entry->reg_id);
+
+ mlx4_unregister_mac(priv->mdev->dev, priv->port, mac);
+ hlist_del_rcu(&entry->hlist);
+ kfree_rcu(entry, rcu);
+ }
+ }
+
+ if (priv->tunnel_reg_id) {
+ mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
+ priv->tunnel_reg_id = 0;
+ }
+}
+
static void mlx4_en_tx_timeout(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -1597,6 +1605,9 @@ int mlx4_en_start_port(struct net_device *dev)
}
mdev->mac_removed[priv->port] = 0;
+ priv->counter_index =
+ mlx4_get_default_counter_index(mdev->dev, priv->port);
+
err = mlx4_en_config_rss_steer(priv);
if (err) {
en_err(priv, "Failed configuring rss steering\n");
@@ -1681,6 +1692,11 @@ int mlx4_en_start_port(struct net_device *dev)
goto tx_err;
}
+ /* Set Unicast and VXLAN steering rules */
+ if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0 &&
+ mlx4_en_set_rss_steer_rules(priv))
+ mlx4_warn(mdev, "Failed setting steering rules\n");
+
/* Attach rx QP to bradcast address */
eth_broadcast_addr(&mc_list[10]);
mc_list[5] = priv->port; /* needed for B0 steering support */
@@ -1755,6 +1771,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
/* Set port as not active */
priv->port_up = false;
+ priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
/* Promsicuous mode */
if (mdev->dev->caps.steering_mode ==
@@ -1827,6 +1844,9 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
for (i = 0; i < priv->tx_ring_num; i++)
mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
+ if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
+ mlx4_en_delete_rss_steer_rules(priv);
+
/* Free RSS qps */
mlx4_en_release_rss_steer(priv);
@@ -1891,6 +1911,7 @@ static void mlx4_en_clear_stats(struct net_device *dev)
sizeof(priv->rx_priority_flowstats));
memset(&priv->tx_priority_flowstats, 0,
sizeof(priv->tx_priority_flowstats));
+ memset(&priv->pf_stats, 0, sizeof(priv->pf_stats));
for (i = 0; i < priv->tx_ring_num; i++) {
priv->tx_ring[i]->bytes = 0;
@@ -1954,7 +1975,6 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
int i;
#ifdef CONFIG_RFS_ACCEL
- free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
priv->dev->rx_cpu_rmap = NULL;
#endif
@@ -2008,11 +2028,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
}
#ifdef CONFIG_RFS_ACCEL
- if (priv->mdev->dev->caps.comp_pool) {
- priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool);
- if (!priv->dev->rx_cpu_rmap)
- goto err;
- }
+ priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port);
#endif
return 0;
@@ -2056,6 +2072,9 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
/* flush any pending task for this netdev */
flush_workqueue(mdev->workqueue);
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
+ mlx4_en_remove_timestamp(mdev);
+
/* Detach the netdev so tasks would not attempt to access it */
mutex_lock(&mdev->state_lock);
mdev->pndev[priv->port] = NULL;
@@ -2184,6 +2203,25 @@ static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
}
+static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct mlx4_en_priv *en_priv = netdev_priv(netdev);
+ struct mlx4_en_dev *mdev = en_priv->mdev;
+
+ /* Since there is no support for separate RX C-TAG/S-TAG vlan accel
+ * enable/disable make sure S-TAG flag is always in same state as
+ * C-TAG.
+ */
+ if (features & NETIF_F_HW_VLAN_CTAG_RX &&
+ !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
+ features |= NETIF_F_HW_VLAN_STAG_RX;
+ else
+ features &= ~NETIF_F_HW_VLAN_STAG_RX;
+
+ return features;
+}
+
static int mlx4_en_set_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -2218,6 +2256,10 @@ static int mlx4_en_set_features(struct net_device *netdev,
en_info(priv, "Turn %s TX vlan strip offload\n",
(features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
+ if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX))
+ en_info(priv, "Turn %s TX S-VLAN strip offload\n",
+ (features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF");
+
if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
en_info(priv, "Turn %s loopback\n",
(features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
@@ -2288,6 +2330,15 @@ static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_st
return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
}
+static int mlx4_en_get_vf_stats(struct net_device *dev, int vf,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct mlx4_en_priv *en_priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = en_priv->mdev;
+
+ return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats);
+}
+
#define PORT_ID_BYTE_LEN 8
static int mlx4_en_get_phys_port_id(struct net_device *dev,
struct netdev_phys_item_id *ppid)
@@ -2330,8 +2381,6 @@ out:
/* set offloads */
priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
- priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
- priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL;
}
static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
@@ -2342,8 +2391,6 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
/* unset offloads */
priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
- priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
- priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
VXLAN_STEER_BY_OUTER_MAC, 0);
@@ -2451,6 +2498,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_poll_controller = mlx4_en_netpoll,
#endif
.ndo_set_features = mlx4_en_set_features,
+ .ndo_fix_features = mlx4_en_fix_features,
.ndo_setup_tc = mlx4_en_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
@@ -2485,11 +2533,13 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_set_vf_rate = mlx4_en_set_vf_rate,
.ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
.ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
+ .ndo_get_vf_stats = mlx4_en_get_vf_stats,
.ndo_get_vf_config = mlx4_en_get_vf_config,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mlx4_en_netpoll,
#endif
.ndo_set_features = mlx4_en_set_features,
+ .ndo_fix_features = mlx4_en_fix_features,
.ndo_setup_tc = mlx4_en_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
@@ -2682,7 +2732,7 @@ void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
u8 rx_ppp, u8 rx_pause,
u8 tx_ppp, u8 tx_pause)
{
- int last_i = NUM_MAIN_STATS + NUM_PORT_STATS;
+ int last_i = NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PF_STATS;
if (!mlx4_is_slave(dev) &&
(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) {
@@ -2744,6 +2794,11 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS);
last_i += NUM_PORT_STATS;
+ if (mlx4_is_master(dev))
+ bitmap_set(stats_bitmap->bitmap, last_i,
+ NUM_PF_STATS);
+ last_i += NUM_PF_STATS;
+
mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap,
rx_ppp, rx_pause,
tx_ppp, tx_pause);
@@ -2760,7 +2815,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
struct mlx4_en_priv *priv;
int i;
int err;
- u64 mac_u64;
dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
MAX_TX_RINGS, MAX_RX_RINGS);
@@ -2779,6 +2833,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv = netdev_priv(dev);
memset(priv, 0, sizeof(struct mlx4_en_priv));
+ priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
spin_lock_init(&priv->stats_lock);
INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
@@ -2851,17 +2906,17 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->addr_len = ETH_ALEN;
mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
if (!is_valid_ether_addr(dev->dev_addr)) {
- if (mlx4_is_slave(priv->mdev->dev)) {
- eth_hw_addr_random(dev);
- en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
- mac_u64 = mlx4_mac_to_u64(dev->dev_addr);
- mdev->dev->caps.def_mac[priv->port] = mac_u64;
- } else {
- en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
- priv->port, dev->dev_addr);
- err = -EINVAL;
- goto out;
- }
+ en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
+ priv->port, dev->dev_addr);
+ err = -EINVAL;
+ goto out;
+ } else if (mlx4_is_slave(priv->mdev->dev) &&
+ (priv->mdev->dev->port_random_macs & 1 << priv->port)) {
+ /* Random MAC was assigned in mlx4_slave_cap
+ * in mlx4_core module
+ */
+ dev->addr_assign_type |= NET_ADDR_RANDOM;
+ en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
}
memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac));
@@ -2915,6 +2970,27 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->hw_features |= NETIF_F_LOOPBACK |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+ if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
+ dev->features |= NETIF_F_HW_VLAN_STAG_RX |
+ NETIF_F_HW_VLAN_STAG_FILTER;
+ dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
+ }
+
+ if (mlx4_is_slave(mdev->dev)) {
+ int phv;
+
+ err = get_phv_bit(mdev->dev, port, &phv);
+ if (!err && phv) {
+ dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+ priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
+ }
+ } else {
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
+ !(mdev->dev->caps.flags2 &
+ MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
+ dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+ }
+
if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
dev->hw_features |= NETIF_F_RXFCS;
@@ -2940,6 +3016,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->rss_hash_fn = ETH_RSS_HASH_TOP;
}
+ if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ dev->features |= NETIF_F_GSO_UDP_TUNNEL;
+ }
+
mdev->pndev[port] = dev;
mdev->upper[port] = NULL;
@@ -2981,9 +3062,12 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
}
queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
+ /* Initialize time stamp mechanism */
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
- queue_delayed_work(mdev->workqueue, &priv->service_task,
- SERVICE_TASK_DELAY);
+ mlx4_en_init_timestamp(mdev);
+
+ queue_delayed_work(mdev->workqueue, &priv->service_task,
+ SERVICE_TASK_DELAY);
mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap,
mdev->profile.prof[priv->port].rx_ppp,
diff --git a/kernel/drivers/net/ethernet/mellanox/mlx4/en_port.c b/kernel/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 0a56f010c..3904b5fc0 100644
--- a/kernel/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/kernel/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -149,6 +149,7 @@ static unsigned long en_stats_adder(__be64 *start, __be64 *next, int num)
int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
{
+ struct mlx4_counter tmp_counter_stats;
struct mlx4_en_stat_out_mbox *mlx4_en_stats;
struct mlx4_en_stat_out_flow_control_mbox *flowstats;
struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
@@ -156,7 +157,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
struct mlx4_cmd_mailbox *mailbox;
u64 in_mod = reset << 8 | port;
int err;
- int i;
+ int i, counter_index;
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
@@ -202,6 +203,20 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->port_stats.tso_packets += ring->tso_packets;
priv->port_stats.xmit_more += ring->xmit_more;
}
+ if (mlx4_is_master(mdev->dev)) {
+ stats->rx_packets = en_stats_adder(&mlx4_en_stats->RTOT_prio_0,
+ &mlx4_en_stats->RTOT_prio_1,
+ NUM_PRIORITIES);
+ stats->tx_packets = en_stats_adder(&mlx4_en_stats->TTOT_prio_0,
+ &mlx4_en_stats->TTOT_prio_1,
+ NUM_PRIORITIES);
+ stats->rx_bytes = en_stats_adder(&mlx4_en_stats->ROCT_prio_0,
+ &mlx4_en_stats->ROCT_prio_1,
+ NUM_PRIORITIES);
+ stats->tx_bytes = en_stats_adder(&mlx4_en_stats->TOCT_prio_0,
+ &mlx4_en_stats->TOCT_prio_1,
+ NUM_PRIORITIES);
+ }
/* net device stats */
stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
@@ -223,11 +238,11 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
stats->collisions = 0;
stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP);
stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
- stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
+ stats->rx_over_errors = 0;
stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
stats->rx_frame_errors = 0;
stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
- stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
+ stats->rx_missed_errors = 0;
stats->tx_aborted_errors = 0;
stats->tx_carrier_errors = 0;
stats->tx_fifo_errors = 0;
@@ -296,6 +311,11 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
spin_unlock_bh(&priv->stats_lock);
+ memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats));
+ counter_index = mlx4_get_default_counter_index(mdev->dev, port);
+ err = mlx4_get_counter_stats(mdev->dev, counter_index,
+ &tmp_counter_stats, reset);
+
/* 0xffs indicates invalid value */
memset(mailbox->buf, 0xff, sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
@@ -314,6 +334,13 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
spin_lock_bh(&priv->stats_lock);
+ if (tmp_counter_stats.counter_mode == 0) {
+ priv->pf_stats.rx_bytes = be64_to_cpu(tmp_counter_stats.rx_bytes);
+ priv->pf_stats.tx_bytes = be64_to_cpu(tmp_counter_stats.tx_bytes);
+ priv->pf_stats.rx_packets = be64_to_cpu(tmp_counter_stats.rx_frames);
+ priv->pf_stats.tx_packets = be64_to_cpu(tmp_counter_stats.tx_frames);
+ }
+
for (i = 0; i < MLX4_NUM_PRIORITIES; i++) {
priv->rx_priority_flowstats[i].rx_pause =
be64_to_cpu(flowstats[i].rx_pause);
diff --git a/kernel/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/kernel/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index 34f2fdf4f..12aab5a65 100644
--- a/kernel/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/kernel/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -66,9 +66,18 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
context->pri_path.sched_queue |= user_prio << 3;
context->pri_path.feup = MLX4_FEUP_FORCE_ETH_UP;
}
- context->pri_path.counter_index = 0xff;
+ context->pri_path.counter_index = priv->counter_index;
context->cqn_send = cpu_to_be32(cqn);
context->cqn_recv = cpu_to_be32(cqn);
+ if (!rss &&
+ (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_LB_SRC_CHK) &&
+ context->pri_path.counter_index !=
+ MLX4_SINK_COUNTER_INDEX(mdev->dev)) {
+ /* disable multicast loopback to qp with same counter */
+ if (!(dev->features & NETIF_F_LOOPBACK))
+ context->pri_path.fl |= MLX4_FL_ETH_SRC_CHECK_MC_LB;
+ context->pri_path.control |= MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
+ }
context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX))
context->param3 |= cpu_to_be32(1 << 30);
@@ -80,6 +89,22 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
}
}
+int mlx4_en_change_mcast_lb(struct mlx4_en_priv *priv, struct mlx4_qp *qp,
+ int loopback)
+{
+ int ret;
+ struct mlx4_update_qp_params qp_params;
+
+ memset(&qp_params, 0, sizeof(qp_params));
+ if (!loopback)
+ qp_params.flags = MLX4_UPDATE_QP_PARAMS_FLAGS_ETH_CHECK_MC_LB;
+
+ ret = mlx4_update_qp(priv->mdev->dev, qp->qpn,
+ MLX4_UPDATE_QP_ETH_SRC_CHECK_MC_LB,
+ &qp_params);
+
+ return ret;
+}
int mlx4_en_map_buffer(struct mlx4_buf *buf)
{
diff --git a/kernel/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/kernel/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 80aac2010..e7a5000aa 100644
--- a/kernel/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/kernel/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -246,7 +246,6 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring)
{
- BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size);
return ring->prod == ring->cons;
}
@@ -337,15 +336,10 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
struct mlx4_dev *dev = mdev->dev;
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
- if (!dev->caps.comp_pool)
- num_of_eqs = max_t(int, MIN_RX_RINGS,
- min_t(int,
- dev->caps.num_comp_vectors,
- DEF_RX_RINGS));
- else
- num_of_eqs = min_t(int, MAX_MSIX_P_PORT,
- dev->caps.comp_pool/
- dev->caps.num_ports) - 1;
+ num_of_eqs = max_t(int, MIN_RX_RINGS,
+ min_t(int,
+ mlx4_get_eqs_per_port(mdev->dev, i),
+ DEF_RX_RINGS));
num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS :
min_t(int, num_of_eqs,
@@ -731,7 +725,7 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
- if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK) &&
+ if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
!(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) {
hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr);
hdr += sizeof(struct vlan_hdr);
@@ -912,17 +906,25 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
gro_skb->csum_level = 1;
if ((cqe->vlan_my_qpn &
- cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
+ cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) &&
(dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
u16 vid = be16_to_cpu(cqe->sl_vid);
__vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
+ } else if ((be32_to_cpu(cqe->vlan_my_qpn) &
+ MLX4_CQE_SVLAN_PRESENT_MASK) &&
+ (dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
+ __vlan_hwaccel_put_tag(gro_skb,
+ htons(ETH_P_8021AD),
+ be16_to_cpu(cqe->sl_vid));
}
if (dev->features & NETIF_F_RXHASH)
skb_set_hash(gro_skb,
be32_to_cpu(cqe->immed_rss_invalid),
- PKT_HASH_TYPE_L3);
+ (ip_summed == CHECKSUM_UNNECESSARY) ?
+ PKT_HASH_TYPE_L4 :
+ PKT_HASH_TYPE_L3);
skb_record_rx_queue(gro_skb, cq->ring);
skb_mark_napi_id(gro_skb, &cq->napi);
@@ -968,12 +970,19 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
if (dev->features & NETIF_F_RXHASH)
skb_set_hash(skb,
be32_to_cpu(cqe->immed_rss_invalid),
- PKT_HASH_TYPE_L3);
+ (ip_summed == CHECKSUM_UNNECESSARY) ?
+ PKT_HASH_TYPE_L4 :
+ PKT_HASH_TYPE_L3);
if ((be32_to_cpu(cqe->vlan_my_qpn) &
- MLX4_CQE_VLAN_PRESENT_MASK) &&
+ MLX4_CQE_CVLAN_PRESENT_MASK) &&
(dev->features & NETIF_F_HW_VLAN_CTAG_RX))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid));
+ else if ((be32_to_cpu(cqe->vlan_my_qpn) &
+ MLX4_CQE_SVLAN_PRESENT_MASK) &&
+ (dev->features & NETIF_F_HW_VLAN_STAG_RX))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
+ be16_to_cpu(cqe->sl_vid));
if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
timestamp = mlx4_en_get_cqe_ts(cqe);
@@ -1038,13 +1047,15 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
/* If we used up all the quota - we're probably not done yet... */
if (done == budget) {
- int cpu_curr;
const struct cpumask *aff;
+ struct irq_data *idata;
+ int cpu_curr;
INC_PERF_COUNTER(priv->pstats.napi_quota);
cpu_curr = smp_processor_id();
- aff = irq_desc_get_irq_data(cq->irq_desc)->affinity;
+ idata = irq_desc_get_irq_data(cq->irq_desc);
+ aff = irq_data_get_affinity_mask(idata);
if (likely(cpumask_test_cpu(cpu_curr, aff)))
return budget;
@@ -1071,7 +1082,10 @@ static const int frag_sizes[] = {
void mlx4_en_calc_rx_buf(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN;
+ /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
+ * headers. (For example: ETH_P_8021Q and ETH_P_8021AD).
+ */
+ int eff_mtu = dev->mtu + ETH_HLEN + (2 * VLAN_HLEN);
int buf_size = 0;
int i = 0;
diff --git a/kernel/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/kernel/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index c10d98f6a..4421bf546 100644
--- a/kernel/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/kernel/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -718,6 +718,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
u32 index, bf_index;
__be32 op_own;
u16 vlan_tag = 0;
+ u16 vlan_proto = 0;
int i_frag;
int lso_header_size;
void *fragptr = NULL;
@@ -750,9 +751,10 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_drop;
}
- if (skb_vlan_tag_present(skb))
+ if (skb_vlan_tag_present(skb)) {
vlan_tag = skb_vlan_tag_get(skb);
-
+ vlan_proto = be16_to_cpu(skb->vlan_proto);
+ }
netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
@@ -958,8 +960,13 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->bf.offset ^= ring->bf.buf_size;
} else {
tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
- tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
- !!skb_vlan_tag_present(skb);
+ if (vlan_proto == ETH_P_8021AD)
+ tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN;
+ else if (vlan_proto == ETH_P_8021Q)
+ tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN;
+ else
+ tx_desc->ctrl.ins_vlan = 0;
+
tx_desc->ctrl.fence_size = real_size;
/* Ensure new descriptor hits memory
diff --git a/kernel/drivers/net/ethernet/mellanox/mlx4/eq.c b/kernel/drivers/net/ethernet/mellanox/mlx4/eq.c
index 983b1d512..603d1c3d3 100644
--- a/kernel/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/kernel/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -145,7 +145,7 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq;
struct mlx4_eqe *eqe;
u8 slave;
- int i;
+ int i, phys_port, slave_port;
for (eqe = next_slave_event_eqe(slave_eq); eqe;
eqe = next_slave_event_eqe(slave_eq)) {
@@ -154,9 +154,20 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
/* All active slaves need to receive the event */
if (slave == ALL_SLAVES) {
for (i = 0; i <= dev->persist->num_vfs; i++) {
+ phys_port = 0;
+ if (eqe->type == MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT &&
+ eqe->subtype == MLX4_DEV_PMC_SUBTYPE_PORT_INFO) {
+ phys_port = eqe->event.port_mgmt_change.port;
+ slave_port = mlx4_phys_to_slave_port(dev, i, phys_port);
+ if (slave_port < 0) /* VF doesn't have this port */
+ continue;
+ eqe->event.port_mgmt_change.port = slave_port;
+ }
if (mlx4_GEN_EQE(dev, i, eqe))
mlx4_warn(dev, "Failed to generate event for slave %d\n",
i);
+ if (phys_port)
+ eqe->event.port_mgmt_change.port = phys_port;
}
} else {
if (mlx4_GEN_EQE(dev, slave, eqe))
@@ -185,7 +196,7 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
return;
}
- memcpy(s_eqe, eqe, dev->caps.eqe_size - 1);
+ memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
s_eqe->slave_id = slave;
/* ensure all information is written before setting the ownersip bit */
dma_wmb();
@@ -210,6 +221,22 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
slave_event(dev, slave, eqe);
}
+#if defined(CONFIG_SMP)
+static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
+{
+ int hint_err;
+ struct mlx4_dev *dev = &priv->dev;
+ struct mlx4_eq *eq = &priv->eq_table.eq[vec];
+
+ if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask))
+ return;
+
+ hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
+ if (hint_err)
+ mlx4_warn(dev, "irq_set_affinity_hint failed, err %d\n", hint_err);
+}
+#endif
+
int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port)
{
struct mlx4_eqe eqe;
@@ -224,7 +251,7 @@ int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port)
eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE;
- eqe.event.port_mgmt_change.port = port;
+ eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port);
return mlx4_GEN_EQE(dev, slave, &eqe);
}
@@ -241,7 +268,7 @@ int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port)
eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
eqe.subtype = MLX4_DEV_PMC_SUBTYPE_GUID_INFO;
- eqe.event.port_mgmt_change.port = port;
+ eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port);
return mlx4_GEN_EQE(dev, slave, &eqe);
}
@@ -251,6 +278,7 @@ int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port,
u8 port_subtype_change)
{
struct mlx4_eqe eqe;
+ u8 slave_port = mlx4_phys_to_slave_port(dev, slave, port);
/*don't send if we don't have the that slave */
if (dev->persist->num_vfs < slave)
@@ -259,7 +287,7 @@ int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port,
eqe.type = MLX4_EVENT_TYPE_PORT_CHANGE;
eqe.subtype = port_subtype_change;
- eqe.event.port_change.port = cpu_to_be32(port << 28);
+ eqe.event.port_change.port = cpu_to_be32(slave_port << 28);
mlx4_dbg(dev, "%s: sending: %d to slave: %d on port: %d\n", __func__,
port_subtype_change, slave, port);
@@ -589,6 +617,10 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
if (SLAVE_PORT_GEN_EVENT_DOWN == gen_event) {
if (i == mlx4_master_func_num(dev))
continue;
+ eqe->event.port_change.port =
+ cpu_to_be32(
+ (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
+ | (mlx4_phys_to_slave_port(dev, i, port) << 28));
mlx4_slave_event(dev, i, eqe);
}
}
@@ -879,8 +911,8 @@ static int mlx4_num_eq_uar(struct mlx4_dev *dev)
* we need to map, take the difference of highest index and
* the lowest index we'll use and add 1.
*/
- return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs +
- dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1;
+ return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 -
+ dev->caps.reserved_eqs / 4 + 1;
}
static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
@@ -1069,32 +1101,21 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
static void mlx4_free_irqs(struct mlx4_dev *dev)
{
struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
- struct mlx4_priv *priv = mlx4_priv(dev);
- int i, vec;
+ int i;
if (eq_table->have_irq)
free_irq(dev->persist->pdev->irq, dev);
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
if (eq_table->eq[i].have_irq) {
+ free_cpumask_var(eq_table->eq[i].affinity_mask);
+#if defined(CONFIG_SMP)
+ irq_set_affinity_hint(eq_table->eq[i].irq, NULL);
+#endif
free_irq(eq_table->eq[i].irq, eq_table->eq + i);
eq_table->eq[i].have_irq = 0;
}
- for (i = 0; i < dev->caps.comp_pool; i++) {
- /*
- * Freeing the assigned irq's
- * all bits should be 0, but we need to validate
- */
- if (priv->msix_ctl.pool_bm & 1ULL << i) {
- /* NO need protecting*/
- vec = dev->caps.num_comp_vectors + 1 + i;
- free_irq(priv->eq_table.eq[vec].irq,
- &priv->eq_table.eq[vec]);
- }
- }
-
-
kfree(eq_table->irq_names);
}
@@ -1175,76 +1196,73 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
}
priv->eq_table.irq_names =
- kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
- dev->caps.comp_pool),
+ kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1),
GFP_KERNEL);
if (!priv->eq_table.irq_names) {
err = -ENOMEM;
- goto err_out_bitmap;
+ goto err_out_clr_int;
}
- for (i = 0; i < dev->caps.num_comp_vectors; ++i) {
- err = mlx4_create_eq(dev, dev->caps.num_cqs -
- dev->caps.reserved_cqs +
- MLX4_NUM_SPARE_EQE,
- (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
- &priv->eq_table.eq[i]);
- if (err) {
- --i;
- goto err_out_unmap;
- }
- }
-
- err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
- (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0,
- &priv->eq_table.eq[dev->caps.num_comp_vectors]);
- if (err)
- goto err_out_comp;
-
- /*if additional completion vectors poolsize is 0 this loop will not run*/
- for (i = dev->caps.num_comp_vectors + 1;
- i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) {
+ for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
+ if (i == MLX4_EQ_ASYNC) {
+ err = mlx4_create_eq(dev,
+ MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
+ 0, &priv->eq_table.eq[MLX4_EQ_ASYNC]);
+ } else {
+ struct mlx4_eq *eq = &priv->eq_table.eq[i];
+#ifdef CONFIG_RFS_ACCEL
+ int port = find_first_bit(eq->actv_ports.ports,
+ dev->caps.num_ports) + 1;
+
+ if (port <= dev->caps.num_ports) {
+ struct mlx4_port_info *info =
+ &mlx4_priv(dev)->port[port];
+
+ if (!info->rmap) {
+ info->rmap = alloc_irq_cpu_rmap(
+ mlx4_get_eqs_per_port(dev, port));
+ if (!info->rmap) {
+ mlx4_warn(dev, "Failed to allocate cpu rmap\n");
+ err = -ENOMEM;
+ goto err_out_unmap;
+ }
+ }
- err = mlx4_create_eq(dev, dev->caps.num_cqs -
- dev->caps.reserved_cqs +
- MLX4_NUM_SPARE_EQE,
- (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
- &priv->eq_table.eq[i]);
- if (err) {
- --i;
- goto err_out_unmap;
+ err = irq_cpu_rmap_add(
+ info->rmap, eq->irq);
+ if (err)
+ mlx4_warn(dev, "Failed adding irq rmap\n");
+ }
+#endif
+ err = mlx4_create_eq(dev, dev->caps.num_cqs -
+ dev->caps.reserved_cqs +
+ MLX4_NUM_SPARE_EQE,
+ (dev->flags & MLX4_FLAG_MSI_X) ?
+ i + 1 - !!(i > MLX4_EQ_ASYNC) : 0,
+ eq);
}
+ if (err)
+ goto err_out_unmap;
}
-
if (dev->flags & MLX4_FLAG_MSI_X) {
const char *eq_name;
- for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
- if (i < dev->caps.num_comp_vectors) {
- snprintf(priv->eq_table.irq_names +
- i * MLX4_IRQNAME_SIZE,
- MLX4_IRQNAME_SIZE,
- "mlx4-comp-%d@pci:%s", i,
- pci_name(dev->persist->pdev));
- } else {
- snprintf(priv->eq_table.irq_names +
- i * MLX4_IRQNAME_SIZE,
- MLX4_IRQNAME_SIZE,
- "mlx4-async@pci:%s",
- pci_name(dev->persist->pdev));
- }
+ snprintf(priv->eq_table.irq_names +
+ MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE,
+ MLX4_IRQNAME_SIZE,
+ "mlx4-async@pci:%s",
+ pci_name(dev->persist->pdev));
+ eq_name = priv->eq_table.irq_names +
+ MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE;
- eq_name = priv->eq_table.irq_names +
- i * MLX4_IRQNAME_SIZE;
- err = request_irq(priv->eq_table.eq[i].irq,
- mlx4_msi_x_interrupt, 0, eq_name,
- priv->eq_table.eq + i);
- if (err)
- goto err_out_async;
+ err = request_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq,
+ mlx4_msi_x_interrupt, 0, eq_name,
+ priv->eq_table.eq + MLX4_EQ_ASYNC);
+ if (err)
+ goto err_out_unmap;
- priv->eq_table.eq[i].have_irq = 1;
- }
+ priv->eq_table.eq[MLX4_EQ_ASYNC].have_irq = 1;
} else {
snprintf(priv->eq_table.irq_names,
MLX4_IRQNAME_SIZE,
@@ -1253,36 +1271,38 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
err = request_irq(dev->persist->pdev->irq, mlx4_interrupt,
IRQF_SHARED, priv->eq_table.irq_names, dev);
if (err)
- goto err_out_async;
+ goto err_out_unmap;
priv->eq_table.have_irq = 1;
}
err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
- priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+ priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
if (err)
mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
- priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err);
+ priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);
- for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
- eq_set_ci(&priv->eq_table.eq[i], 1);
+ /* arm ASYNC eq */
+ eq_set_ci(&priv->eq_table.eq[MLX4_EQ_ASYNC], 1);
return 0;
-err_out_async:
- mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]);
-
-err_out_comp:
- i = dev->caps.num_comp_vectors - 1;
-
err_out_unmap:
- while (i >= 0) {
- mlx4_free_eq(dev, &priv->eq_table.eq[i]);
- --i;
+ while (i >= 0)
+ mlx4_free_eq(dev, &priv->eq_table.eq[i--]);
+#ifdef CONFIG_RFS_ACCEL
+ for (i = 1; i <= dev->caps.num_ports; i++) {
+ if (mlx4_priv(dev)->port[i].rmap) {
+ free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap);
+ mlx4_priv(dev)->port[i].rmap = NULL;
+ }
}
+#endif
+ mlx4_free_irqs(dev);
+
+err_out_clr_int:
if (!mlx4_is_slave(dev))
mlx4_unmap_clr_int(dev);
- mlx4_free_irqs(dev);
err_out_bitmap:
mlx4_unmap_uar(dev);
@@ -1300,11 +1320,19 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
int i;
mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1,
- priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+ priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
+#ifdef CONFIG_RFS_ACCEL
+ for (i = 1; i <= dev->caps.num_ports; i++) {
+ if (mlx4_priv(dev)->port[i].rmap) {
+ free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap);
+ mlx4_priv(dev)->port[i].rmap = NULL;
+ }
+ }
+#endif
mlx4_free_irqs(dev);
- for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
+ for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
mlx4_free_eq(dev, &priv->eq_table.eq[i]);
if (!mlx4_is_slave(dev))
@@ -1336,6 +1364,10 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
* and performing a NOP command
*/
for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
+ /* Make sure request_irq was called */
+ if (!priv->eq_table.eq[i].have_irq)
+ continue;
+
/* Temporary use polling for command completions */
mlx4_cmd_use_polling(dev);
@@ -1355,87 +1387,169 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
/* Return to default */
mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
- priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+ priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
return err;
}
EXPORT_SYMBOL(mlx4_test_interrupts);
-int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
- int *vector)
+bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector)
{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ vector = MLX4_CQ_TO_EQ_VECTOR(vector);
+ if (vector < 0 || (vector >= dev->caps.num_comp_vectors + 1) ||
+ (vector == MLX4_EQ_ASYNC))
+ return false;
+
+ return test_bit(port - 1, priv->eq_table.eq[vector].actv_ports.ports);
+}
+EXPORT_SYMBOL(mlx4_is_eq_vector_valid);
+u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port)
+{
struct mlx4_priv *priv = mlx4_priv(dev);
- int vec = 0, err = 0, i;
+ unsigned int i;
+ unsigned int sum = 0;
+
+ for (i = 0; i < dev->caps.num_comp_vectors + 1; i++)
+ sum += !!test_bit(port - 1,
+ priv->eq_table.eq[i].actv_ports.ports);
+
+ return sum;
+}
+EXPORT_SYMBOL(mlx4_get_eqs_per_port);
+
+int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ vector = MLX4_CQ_TO_EQ_VECTOR(vector);
+ if (vector <= 0 || (vector >= dev->caps.num_comp_vectors + 1))
+ return -EINVAL;
+
+ return !!(bitmap_weight(priv->eq_table.eq[vector].actv_ports.ports,
+ dev->caps.num_ports) > 1);
+}
+EXPORT_SYMBOL(mlx4_is_eq_shared);
+
+struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port)
+{
+ return mlx4_priv(dev)->port[port].rmap;
+}
+EXPORT_SYMBOL(mlx4_get_cpu_rmap);
+
+int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int err = 0, i = 0;
+ u32 min_ref_count_val = (u32)-1;
+ int requested_vector = MLX4_CQ_TO_EQ_VECTOR(*vector);
+ int *prequested_vector = NULL;
+
mutex_lock(&priv->msix_ctl.pool_lock);
- for (i = 0; !vec && i < dev->caps.comp_pool; i++) {
- if (~priv->msix_ctl.pool_bm & 1ULL << i) {
- priv->msix_ctl.pool_bm |= 1ULL << i;
- vec = dev->caps.num_comp_vectors + 1 + i;
- snprintf(priv->eq_table.irq_names +
- vec * MLX4_IRQNAME_SIZE,
- MLX4_IRQNAME_SIZE, "%s", name);
-#ifdef CONFIG_RFS_ACCEL
- if (rmap) {
- err = irq_cpu_rmap_add(rmap,
- priv->eq_table.eq[vec].irq);
- if (err)
- mlx4_warn(dev, "Failed adding irq rmap\n");
+ if (requested_vector < (dev->caps.num_comp_vectors + 1) &&
+ (requested_vector >= 0) &&
+ (requested_vector != MLX4_EQ_ASYNC)) {
+ if (test_bit(port - 1,
+ priv->eq_table.eq[requested_vector].actv_ports.ports)) {
+ prequested_vector = &requested_vector;
+ } else {
+ struct mlx4_eq *eq;
+
+ for (i = 1; i < port;
+ requested_vector += mlx4_get_eqs_per_port(dev, i++))
+ ;
+
+ eq = &priv->eq_table.eq[requested_vector];
+ if (requested_vector < dev->caps.num_comp_vectors + 1 &&
+ test_bit(port - 1, eq->actv_ports.ports)) {
+ prequested_vector = &requested_vector;
}
-#endif
- err = request_irq(priv->eq_table.eq[vec].irq,
- mlx4_msi_x_interrupt, 0,
- &priv->eq_table.irq_names[vec<<5],
- priv->eq_table.eq + vec);
- if (err) {
- /*zero out bit by fliping it*/
- priv->msix_ctl.pool_bm ^= 1 << i;
- vec = 0;
- continue;
- /*we dont want to break here*/
+ }
+ }
+
+ if (!prequested_vector) {
+ requested_vector = -1;
+ for (i = 0; min_ref_count_val && i < dev->caps.num_comp_vectors + 1;
+ i++) {
+ struct mlx4_eq *eq = &priv->eq_table.eq[i];
+
+ if (min_ref_count_val > eq->ref_count &&
+ test_bit(port - 1, eq->actv_ports.ports)) {
+ min_ref_count_val = eq->ref_count;
+ requested_vector = i;
}
+ }
+
+ if (requested_vector < 0) {
+ err = -ENOSPC;
+ goto err_unlock;
+ }
+
+ prequested_vector = &requested_vector;
+ }
+
+ if (!test_bit(*prequested_vector, priv->msix_ctl.pool_bm) &&
+ dev->flags & MLX4_FLAG_MSI_X) {
+ set_bit(*prequested_vector, priv->msix_ctl.pool_bm);
+ snprintf(priv->eq_table.irq_names +
+ *prequested_vector * MLX4_IRQNAME_SIZE,
+ MLX4_IRQNAME_SIZE, "mlx4-%d@%s",
+ *prequested_vector, dev_name(&dev->persist->pdev->dev));
- eq_set_ci(&priv->eq_table.eq[vec], 1);
+ err = request_irq(priv->eq_table.eq[*prequested_vector].irq,
+ mlx4_msi_x_interrupt, 0,
+ &priv->eq_table.irq_names[*prequested_vector << 5],
+ priv->eq_table.eq + *prequested_vector);
+
+ if (err) {
+ clear_bit(*prequested_vector, priv->msix_ctl.pool_bm);
+ *prequested_vector = -1;
+ } else {
+#if defined(CONFIG_SMP)
+ mlx4_set_eq_affinity_hint(priv, *prequested_vector);
+#endif
+ eq_set_ci(&priv->eq_table.eq[*prequested_vector], 1);
+ priv->eq_table.eq[*prequested_vector].have_irq = 1;
}
}
+
+ if (!err && *prequested_vector >= 0)
+ priv->eq_table.eq[*prequested_vector].ref_count++;
+
+err_unlock:
mutex_unlock(&priv->msix_ctl.pool_lock);
- if (vec) {
- *vector = vec;
- } else {
+ if (!err && *prequested_vector >= 0)
+ *vector = MLX4_EQ_TO_CQ_VECTOR(*prequested_vector);
+ else
*vector = 0;
- err = (i == dev->caps.comp_pool) ? -ENOSPC : err;
- }
+
return err;
}
EXPORT_SYMBOL(mlx4_assign_eq);
-int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec)
+int mlx4_eq_get_irq(struct mlx4_dev *dev, int cq_vec)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- return priv->eq_table.eq[vec].irq;
+ return priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq_vec)].irq;
}
EXPORT_SYMBOL(mlx4_eq_get_irq);
void mlx4_release_eq(struct mlx4_dev *dev, int vec)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- /*bm index*/
- int i = vec - dev->caps.num_comp_vectors - 1;
-
- if (likely(i >= 0)) {
- /*sanity check , making sure were not trying to free irq's
- Belonging to a legacy EQ*/
- mutex_lock(&priv->msix_ctl.pool_lock);
- if (priv->msix_ctl.pool_bm & 1ULL << i) {
- free_irq(priv->eq_table.eq[vec].irq,
- &priv->eq_table.eq[vec]);
- priv->msix_ctl.pool_bm &= ~(1ULL << i);
- }
- mutex_unlock(&priv->msix_ctl.pool_lock);
- }
+ int eq_vec = MLX4_CQ_TO_EQ_VECTOR(vec);
+ mutex_lock(&priv->msix_ctl.pool_lock);
+ priv->eq_table.eq[eq_vec].ref_count--;
+
+ /* once we allocated EQ, we don't release it because it might be binded
+ * to cpu_rmap.
+ */
+ mutex_unlock(&priv->msix_ctl.pool_lock);
}
EXPORT_SYMBOL(mlx4_release_eq);
diff --git a/kernel/drivers/net/ethernet/mellanox/mlx4/fw.c b/kernel/drivers/net/ethernet/mellanox/mlx4/fw.c
index e30bf57ad..90db94e83 100644
--- a/kernel/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/kernel/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -154,6 +154,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[26] = "Port ETS Scheduler support",
[27] = "Port beacon support",
[28] = "RX-ALL support",
+ [29] = "802.1ad offload support",
+ [31] = "Modifying loopback source checks using UPDATE_QP support",
+ [32] = "Loopback source checks support",
};
int i;
@@ -307,6 +310,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
#define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31)
+#define QUERY_FUNC_CAP_PHV_BIT 0x40
if (vhcr->op_modifier == 1) {
struct mlx4_active_ports actv_ports =
@@ -351,6 +355,12 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
QUERY_FUNC_CAP_PHYS_PORT_ID);
+ if (dev->caps.phv_bit[port]) {
+ field = QUERY_FUNC_CAP_PHV_BIT;
+ MLX4_PUT(outbox->buf, field,
+ QUERY_FUNC_CAP_FLAGS0_OFFSET);
+ }
+
} else if (vhcr->op_modifier == 0) {
struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev, slave);
@@ -600,6 +610,9 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
MLX4_GET(func_cap->phys_port_id, outbox,
QUERY_FUNC_CAP_PHYS_PORT_ID);
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
+ func_cap->flags |= (field & QUERY_FUNC_CAP_PHV_BIT);
+
/* All other resources are allocated by the master, but we still report
* 'num' and 'reserved' capabilities as follows:
* - num remains the maximum resource index
@@ -700,6 +713,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
#define QUERY_DEV_CAP_CONFIG_DEV_OFFSET 0x94
+#define QUERY_DEV_CAP_PHV_EN_OFFSET 0x96
#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
#define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c
@@ -898,6 +912,12 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
if (field & (1 << 2))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_PHV_EN_OFFSET);
+ if (field & 0x80)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PHV_EN;
+ if (field & 0x40)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN;
+
MLX4_GET(dev_cap->reserved_lkey, outbox,
QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
@@ -946,6 +966,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
if (field32 & (1 << 16))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP;
+ if (field32 & (1 << 18))
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB;
+ if (field32 & (1 << 19))
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_LB_SRC_CHK;
if (field32 & (1 << 26))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL;
if (field32 & (1 << 20))
@@ -1992,6 +2016,10 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
+ /* phv_check enable */
+ MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
+ if (byte_field & 0x2)
+ param->phv_check_en = 1;
out:
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -2758,3 +2786,79 @@ int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave,
0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
MLX4_CMD_NATIVE);
}
+
+static int mlx4_SET_PORT_phv_bit(struct mlx4_dev *dev, u8 port, u8 phv_bit)
+{
+#define SET_PORT_GEN_PHV_VALID 0x10
+#define SET_PORT_GEN_PHV_EN 0x80
+
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_port_general_context *context;
+ u32 in_mod;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ context = mailbox->buf;
+
+ context->v_ignore_fcs |= SET_PORT_GEN_PHV_VALID;
+ if (phv_bit)
+ context->phv_en |= SET_PORT_GEN_PHV_EN;
+
+ in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
+ MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv)
+{
+ int err;
+ struct mlx4_func_cap func_cap;
+
+ memset(&func_cap, 0, sizeof(func_cap));
+ err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap);
+ if (!err)
+ *phv = func_cap.flags & QUERY_FUNC_CAP_PHV_BIT;
+ return err;
+}
+EXPORT_SYMBOL(get_phv_bit);
+
+int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val)
+{
+ int ret;
+
+ if (mlx4_is_slave(dev))
+ return -EPERM;
+
+ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
+ !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
+ ret = mlx4_SET_PORT_phv_bit(dev, port, new_val);
+ if (!ret)
+ dev->caps.phv_bit[port] = new_val;
+ return ret;
+ }
+
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(set_phv_bit);
+
+void mlx4_replace_zero_macs(struct mlx4_dev *dev)
+{
+ int i;
+ u8 mac_addr[ETH_ALEN];
+
+ dev->port_random_macs = 0;
+ for (i = 1; i <= dev->caps.num_ports; ++i)
+ if (!dev->caps.def_mac[i] &&
+ dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) {
+ eth_random_addr(mac_addr);
+ dev->port_random_macs |= 1 << i;
+ dev->caps.def_mac[i] = mlx4_mac_to_u64(mac_addr);
+ }
+}
+EXPORT_SYMBOL_GPL(mlx4_replace_zero_macs);
diff --git a/kernel/drivers/net/ethernet/mellanox/mlx4/fw.h b/kernel/drivers/net/ethernet/mellanox/mlx4/fw.h
index 07cb7c246..08de5555c 100644
--- a/kernel/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/kernel/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -204,6 +204,7 @@ struct mlx4_init_hca_param {
u16 cqe_size; /* For use only when CQE stride feature enabled */
u16 eqe_size; /* For use only when EQE stride feature enabled */
u8 rss_ip_frags;
+ u8 phv_check_en; /* for QUERY_HCA */
};
struct mlx4_init_ib_param {
diff --git a/kernel/drivers/net/ethernet/mellanox/mlx4/intf.c b/kernel/drivers/net/ethernet/mellanox/mlx4/intf.c
index 0d80aed59..0472941af 100644
--- a/kernel/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/kernel/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -63,8 +63,11 @@ static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
spin_lock_irq(&priv->ctx_lock);
list_add_tail(&dev_ctx->list, &priv->ctx_list);
spin_unlock_irq(&priv->ctx_lock);
+ if (intf->activate)
+ intf->activate(&priv->dev, dev_ctx->context);
} else
kfree(dev_ctx);
+
}
static void mlx4_remove_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
diff --git a/kernel/drivers/net/ethernet/mellanox/mlx4/main.c b/kernel/drivers/net/ethernet/mellanox/mlx4/main.c
index ced5ecab5..31c491e02 100644
--- a/kernel/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/kernel/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -405,6 +405,21 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
+ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) {
+ struct mlx4_init_hca_param hca_param;
+
+ memset(&hca_param, 0, sizeof(hca_param));
+ err = mlx4_QUERY_HCA(dev, &hca_param);
+ /* Turn off PHV_EN flag in case phv_check_en is set.
+ * phv_check_en is a HW check that parse the packet and verify
+ * phv bit was reported correctly in the wqe. To allow QinQ
+ * PHV_EN flag should be set and phv_check_en must be cleared
+ * otherwise QinQ packets will be drop by the HW.
+ */
+ if (err || hca_param.phv_check_en)
+ dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN;
+ }
+
/* Sense port always allowed on supported devices for ConnectX-1 and -2 */
if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
@@ -479,7 +494,15 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
}
}
- dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
+ if (mlx4_is_master(dev) && (dev->caps.num_ports == 2) &&
+ (port_type_array[0] == MLX4_PORT_TYPE_IB) &&
+ (port_type_array[1] == MLX4_PORT_TYPE_ETH)) {
+ mlx4_warn(dev,
+ "Granular QoS per VF not supported with IB/Eth configuration\n");
+ dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_QOS_VPP;
+ }
+
+ dev->caps.max_counters = dev_cap->max_counters;
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
@@ -840,6 +863,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
return -ENODEV;
}
+ mlx4_replace_zero_macs(dev);
+
dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL);
dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
@@ -867,9 +892,10 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
dev->caps.port_mask[i] = dev->caps.port_type[i];
dev->caps.phys_port_id[i] = func_cap.phys_port_id;
- if (mlx4_get_slave_pkey_gid_tbl_len(dev, i,
- &dev->caps.gid_table_len[i],
- &dev->caps.pkey_table_len[i]))
+ err = mlx4_get_slave_pkey_gid_tbl_len(dev, i,
+ &dev->caps.gid_table_len[i],
+ &dev->caps.pkey_table_len[i]);
+ if (err)
goto err_mem;
}
@@ -881,6 +907,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
dev->caps.uar_page_size * dev->caps.num_uars,
(unsigned long long)
pci_resource_len(dev->persist->pdev, 2));
+ err = -ENOMEM;
goto err_mem;
}
@@ -1674,6 +1701,25 @@ static int map_internal_clock(struct mlx4_dev *dev)
return 0;
}
+int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
+ struct mlx4_clock_params *params)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ if (mlx4_is_slave(dev))
+ return -ENOTSUPP;
+
+ if (!params)
+ return -EINVAL;
+
+ params->bar = priv->fw.clock_bar;
+ params->offset = priv->fw.clock_offset;
+ params->size = MLX4_CLOCK_SIZE;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_internal_clock_params);
+
static void unmap_internal_clock(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2193,20 +2239,78 @@ err_free_icm:
static int mlx4_init_counters_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- int nent;
+ int nent_pow2;
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
return -ENOENT;
- nent = dev->caps.max_counters;
- return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0);
+ if (!dev->caps.max_counters)
+ return -ENOSPC;
+
+ nent_pow2 = roundup_pow_of_two(dev->caps.max_counters);
+ /* reserve last counter index for sink counter */
+ return mlx4_bitmap_init(&priv->counters_bitmap, nent_pow2,
+ nent_pow2 - 1, 0,
+ nent_pow2 - dev->caps.max_counters + 1);
}
static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
{
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
+ return;
+
+ if (!dev->caps.max_counters)
+ return;
+
mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
}
+static void mlx4_cleanup_default_counters(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int port;
+
+ for (port = 0; port < dev->caps.num_ports; port++)
+ if (priv->def_counter[port] != -1)
+ mlx4_counter_free(dev, priv->def_counter[port]);
+}
+
+static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int port, err = 0;
+ u32 idx;
+
+ for (port = 0; port < dev->caps.num_ports; port++)
+ priv->def_counter[port] = -1;
+
+ for (port = 0; port < dev->caps.num_ports; port++) {
+ err = mlx4_counter_alloc(dev, &idx);
+
+ if (!err || err == -ENOSPC) {
+ priv->def_counter[port] = idx;
+ } else if (err == -ENOENT) {
+ err = 0;
+ continue;
+ } else if (mlx4_is_slave(dev) && err == -EINVAL) {
+ priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev);
+ mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n",
+ MLX4_SINK_COUNTER_INDEX(dev));
+ err = 0;
+ } else {
+ mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n",
+ __func__, port + 1, err);
+ mlx4_cleanup_default_counters(dev);
+ return err;
+ }
+
+ mlx4_dbg(dev, "%s: default counter index %d for port %d\n",
+ __func__, priv->def_counter[port], port + 1);
+ }
+
+ return err;
+}
+
int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2215,8 +2319,10 @@ int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
return -ENOENT;
*idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
- if (*idx == -1)
- return -ENOMEM;
+ if (*idx == -1) {
+ *idx = MLX4_SINK_COUNTER_INDEX(dev);
+ return -ENOSPC;
+ }
return 0;
}
@@ -2239,8 +2345,35 @@ int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
}
EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
+static int __mlx4_clear_if_stat(struct mlx4_dev *dev,
+ u8 counter_index)
+{
+ struct mlx4_cmd_mailbox *if_stat_mailbox;
+ int err;
+ u32 if_stat_in_mod = (counter_index & 0xff) | MLX4_QUERY_IF_STAT_RESET;
+
+ if_stat_mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(if_stat_mailbox))
+ return PTR_ERR(if_stat_mailbox);
+
+ err = mlx4_cmd_box(dev, 0, if_stat_mailbox->dma, if_stat_in_mod, 0,
+ MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
+
+ mlx4_free_cmd_mailbox(dev, if_stat_mailbox);
+ return err;
+}
+
void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
{
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
+ return;
+
+ if (idx == MLX4_SINK_COUNTER_INDEX(dev))
+ return;
+
+ __mlx4_clear_if_stat(dev, idx);
+
mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR);
return;
}
@@ -2260,6 +2393,14 @@ void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
}
EXPORT_SYMBOL_GPL(mlx4_counter_free);
+int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ return priv->def_counter[port - 1];
+}
+EXPORT_SYMBOL_GPL(mlx4_get_default_counter_index);
+
void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2364,11 +2505,11 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
if (err) {
if (dev->flags & MLX4_FLAG_MSI_X) {
mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
- priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
+ priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
mlx4_warn(dev, "Trying again without MSI-X\n");
} else {
mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
- priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
+ priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
}
@@ -2395,10 +2536,18 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
goto err_srq_table_free;
}
- err = mlx4_init_counters_table(dev);
- if (err && err != -ENOENT) {
- mlx4_err(dev, "Failed to initialize counters table, aborting\n");
- goto err_qp_table_free;
+ if (!mlx4_is_slave(dev)) {
+ err = mlx4_init_counters_table(dev);
+ if (err && err != -ENOENT) {
+ mlx4_err(dev, "Failed to initialize counters table, aborting\n");
+ goto err_qp_table_free;
+ }
+ }
+
+ err = mlx4_allocate_default_counters(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to allocate default counters, aborting\n");
+ goto err_counters_table_free;
}
if (!mlx4_is_slave(dev)) {
@@ -2432,15 +2581,19 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
if (err) {
mlx4_err(dev, "Failed to set port %d, aborting\n",
port);
- goto err_counters_table_free;
+ goto err_default_countes_free;
}
}
}
return 0;
+err_default_countes_free:
+ mlx4_cleanup_default_counters(dev);
+
err_counters_table_free:
- mlx4_cleanup_counters_table(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_cleanup_counters_table(dev);
err_qp_table_free:
mlx4_cleanup_qp_table(dev);
@@ -2481,17 +2634,50 @@ err_uar_table_free:
return err;
}
+static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn)
+{
+ int requested_cpu = 0;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_eq *eq;
+ int off = 0;
+ int i;
+
+ if (eqn > dev->caps.num_comp_vectors)
+ return -EINVAL;
+
+ for (i = 1; i < port; i++)
+ off += mlx4_get_eqs_per_port(dev, i);
+
+ requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC);
+
+ /* Meaning EQs are shared, and this call comes from the second port */
+ if (requested_cpu < 0)
+ return 0;
+
+ eq = &priv->eq_table.eq[eqn];
+
+ if (!zalloc_cpumask_var(&eq->affinity_mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpumask_set_cpu(requested_cpu, eq->affinity_mask);
+
+ return 0;
+}
+
static void mlx4_enable_msi_x(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct msix_entry *entries;
int i;
+ int port = 0;
if (msi_x) {
- int nreq = dev->caps.num_ports * num_online_cpus() + MSIX_LEGACY_SZ;
+ int nreq = dev->caps.num_ports * num_online_cpus() + 1;
nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
nreq);
+ if (nreq > MAX_MSIX)
+ nreq = MAX_MSIX;
entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
if (!entries)
@@ -2503,20 +2689,55 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2,
nreq);
- if (nreq < 0) {
+ if (nreq < 0 || nreq < MLX4_EQ_ASYNC) {
kfree(entries);
goto no_msi;
- } else if (nreq < MSIX_LEGACY_SZ +
- dev->caps.num_ports * MIN_MSIX_P_PORT) {
- /*Working in legacy mode , all EQ's shared*/
- dev->caps.comp_pool = 0;
- dev->caps.num_comp_vectors = nreq - 1;
- } else {
- dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ;
- dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
}
- for (i = 0; i < nreq; ++i)
- priv->eq_table.eq[i].irq = entries[i].vector;
+ /* 1 is reserved for events (asyncrounous EQ) */
+ dev->caps.num_comp_vectors = nreq - 1;
+
+ priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector;
+ bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports,
+ dev->caps.num_ports);
+
+ for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
+ if (i == MLX4_EQ_ASYNC)
+ continue;
+
+ priv->eq_table.eq[i].irq =
+ entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
+
+ if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
+ bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
+ dev->caps.num_ports);
+ /* We don't set affinity hint when there
+ * aren't enough EQs
+ */
+ } else {
+ set_bit(port,
+ priv->eq_table.eq[i].actv_ports.ports);
+ if (mlx4_init_affinity_hint(dev, port + 1, i))
+ mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n",
+ i);
+ }
+ /* We divide the Eqs evenly between the two ports.
+ * (dev->caps.num_comp_vectors / dev->caps.num_ports)
+ * refers to the number of Eqs per port
+ * (i.e eqs_per_port). Theoretically, we would like to
+ * write something like (i + 1) % eqs_per_port == 0.
+ * However, since there's an asynchronous Eq, we have
+ * to skip over it by comparing this condition to
+ * !!((i + 1) > MLX4_EQ_ASYNC).
+ */
+ if ((dev->caps.num_comp_vectors > dev->caps.num_ports) &&
+ ((i + 1) %
+ (dev->caps.num_comp_vectors / dev->caps.num_ports)) ==
+ !!((i + 1) > MLX4_EQ_ASYNC))
+ /* If dev->caps.num_comp_vectors < dev->caps.num_ports,
+ * everything is shared anyway.
+ */
+ port++;
+ }
dev->flags |= MLX4_FLAG_MSI_X;
@@ -2526,10 +2747,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
no_msi:
dev->caps.num_comp_vectors = 1;
- dev->caps.comp_pool = 0;
- for (i = 0; i < 2; ++i)
+ BUG_ON(MLX4_EQ_ASYNC >= 2);
+ for (i = 0; i < 2; ++i) {
priv->eq_table.eq[i].irq = dev->persist->pdev->irq;
+ if (i != MLX4_EQ_ASYNC) {
+ bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
+ dev->caps.num_ports);
+ }
+ }
}
static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
@@ -2594,6 +2820,10 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_mtu_attr);
+#ifdef CONFIG_RFS_ACCEL
+ free_irq_cpu_rmap(info->rmap);
+ info->rmap = NULL;
+#endif
}
static int mlx4_init_steering(struct mlx4_dev *dev)
@@ -2703,6 +2933,8 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
{
u64 dev_flags = dev->flags;
int err = 0;
+ int fw_enabled_sriov_vfs = min(pci_sriov_get_totalvfs(pdev),
+ MLX4_MAX_NUM_VF);
if (reset_flow) {
dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs),
@@ -2728,6 +2960,12 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
}
if (!(dev->flags & MLX4_FLAG_SRIOV)) {
+ if (total_vfs > fw_enabled_sriov_vfs) {
+ mlx4_err(dev, "requested vfs (%d) > available vfs (%d). Continuing without SR_IOV\n",
+ total_vfs, fw_enabled_sriov_vfs);
+ err = -ENOMEM;
+ goto disable_sriov;
+ }
mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
err = pci_enable_sriov(pdev, total_vfs);
}
@@ -2749,6 +2987,7 @@ disable_sriov:
free_mem:
dev->persist->num_vfs = 0;
kfree(dev->dev_vfs);
+ dev->dev_vfs = NULL;
return dev_flags & ~MLX4_FLAG_MASTER;
}
@@ -2900,6 +3139,7 @@ slave_start:
existing_vfs,
reset_flow);
+ mlx4_close_fw(dev);
mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
dev->flags = dev_flags;
if (!SRIOV_VALID_STATE(dev->flags)) {
@@ -2988,18 +3228,6 @@ slave_start:
/* In master functions, the communication channel must be initialized
* after obtaining its address from fw */
if (mlx4_is_master(dev)) {
- int ib_ports = 0;
-
- mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
- ib_ports++;
-
- if (ib_ports &&
- (num_vfs_argc > 1 || probe_vfs_argc > 1)) {
- mlx4_err(dev,
- "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n");
- err = -EINVAL;
- goto err_close;
- }
if (dev->caps.num_ports < 2 &&
num_vfs_argc > 1) {
err = -EINVAL;
@@ -3036,7 +3264,7 @@ slave_start:
if (err)
goto err_master_mfunc;
- priv->msix_ctl.pool_bm = 0;
+ bitmap_zero(priv->msix_ctl.pool_bm, MAX_MSIX);
mutex_init(&priv->msix_ctl.pool_lock);
mlx4_enable_msi_x(dev);
@@ -3058,7 +3286,6 @@ slave_start:
!mlx4_is_mfunc(dev)) {
dev->flags &= ~MLX4_FLAG_MSI_X;
dev->caps.num_comp_vectors = 1;
- dev->caps.comp_pool = 0;
pci_disable_msix(pdev);
err = mlx4_setup_hca(dev);
}
@@ -3109,7 +3336,9 @@ err_port:
for (--port; port >= 1; --port)
mlx4_cleanup_port_info(&priv->port[port]);
- mlx4_cleanup_counters_table(dev);
+ mlx4_cleanup_default_counters(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_cleanup_counters_table(dev);
mlx4_cleanup_qp_table(dev);
mlx4_cleanup_srq_table(dev);
mlx4_cleanup_cq_table(dev);
@@ -3218,20 +3447,20 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
goto err_disable_pdev;
}
}
- if (total_vfs >= MLX4_MAX_NUM_VF) {
+ if (total_vfs > MLX4_MAX_NUM_VF) {
dev_err(&pdev->dev,
- "Requested more VF's (%d) than allowed (%d)\n",
- total_vfs, MLX4_MAX_NUM_VF - 1);
+ "Requested more VF's (%d) than allowed by hw (%d)\n",
+ total_vfs, MLX4_MAX_NUM_VF);
err = -EINVAL;
goto err_disable_pdev;
}
for (i = 0; i < MLX4_MAX_PORTS; i++) {
- if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) {
+ if (nvfs[i] + nvfs[2] > MLX4_MAX_NUM_VF_P_PORT) {
dev_err(&pdev->dev,
- "Requested more VF's (%d) for port (%d) than allowed (%d)\n",
+ "Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n",
nvfs[i] + nvfs[2], i + 1,
- MLX4_MAX_NUM_VF_P_PORT - 1);
+ MLX4_MAX_NUM_VF_P_PORT);
err = -EINVAL;
goto err_disable_pdev;
}
@@ -3407,7 +3636,9 @@ static void mlx4_unload_one(struct pci_dev *pdev)
mlx4_free_resource_tracker(dev,
RES_TR_FREE_SLAVES_ONLY);
- mlx4_cleanup_counters_table(dev);
+ mlx4_cleanup_default_counters(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_cleanup_counters_table(dev);
mlx4_cleanup_qp_table(dev);
mlx4_cleanup_srq_table(dev);
mlx4_cleanup_cq_table(dev);
diff --git a/kernel/drivers/net/ethernet/mellanox/mlx4/mcg.c b/kernel/drivers/net/ethernet/mellanox/mlx4/mcg.c
index bd9ea0d01..1d4e2e054 100644
--- a/kernel/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/kernel/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1184,10 +1184,11 @@ out:
if (prot == MLX4_PROT_ETH) {
/* manage the steering entry for promisc mode */
if (new_entry)
- new_steering_entry(dev, port, steer, index, qp->qpn);
+ err = new_steering_entry(dev, port, steer,
+ index, qp->qpn);
else
- existing_steering_entry(dev, port, steer,
- index, qp->qpn);
+ err = existing_steering_entry(dev, port, steer,
+ index, qp->qpn);
}
if (err && link && index != -1) {
if (index < dev->caps.num_mgms)
diff --git a/kernel/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/kernel/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 502d3dd2c..e1cf9036a 100644
--- a/kernel/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/kernel/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -65,6 +65,8 @@
#define INIT_HCA_TPT_MW_ENABLE (1 << 7)
+#define MLX4_QUERY_IF_STAT_RESET BIT(31)
+
enum {
MLX4_HCR_BASE = 0x80680,
MLX4_HCR_SIZE = 0x0001c,
@@ -287,6 +289,12 @@ struct mlx4_icm_table {
#define MLX4_CQE_SIZE_MASK_STRIDE 0x3
#define MLX4_EQE_SIZE_MASK_STRIDE 0x30
+#define MLX4_EQ_ASYNC 0
+#define MLX4_EQ_TO_CQ_VECTOR(vector) ((vector) - \
+ !!((int)(vector) >= MLX4_EQ_ASYNC))
+#define MLX4_CQ_TO_EQ_VECTOR(vector) ((vector) + \
+ !!((int)(vector) >= MLX4_EQ_ASYNC))
+
/*
* Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
*/
@@ -391,6 +399,9 @@ struct mlx4_eq {
struct mlx4_buf_list *page_list;
struct mlx4_mtt mtt;
struct mlx4_eq_tasklet tasklet_ctx;
+ struct mlx4_active_ports actv_ports;
+ u32 ref_count;
+ cpumask_var_t affinity_mask;
};
struct mlx4_slave_eqe {
@@ -776,6 +787,9 @@ struct mlx4_set_port_general_context {
u8 pprx;
u8 pfcrx;
u16 reserved4;
+ u32 reserved5;
+ u8 phv_en;
+ u8 reserved6[3];
};
struct mlx4_set_port_rqp_calc_context {
@@ -808,6 +822,7 @@ struct mlx4_port_info {
struct mlx4_vlan_table vlan_table;
struct mlx4_roce_gid_table gid_table;
int base_qpn;
+ struct cpu_rmap *rmap;
};
struct mlx4_sense {
@@ -818,7 +833,7 @@ struct mlx4_sense {
};
struct mlx4_msix_ctl {
- u64 pool_bm;
+ DECLARE_BITMAP(pool_bm, MAX_MSIX);
struct mutex pool_lock;
};
@@ -864,6 +879,7 @@ struct mlx4_priv {
struct mlx4_qp_table qp_table;
struct mlx4_mcg_table mcg_table;
struct mlx4_bitmap counters_bitmap;
+ int def_counter[MLX4_MAX_PORTS];
struct mlx4_catas_err catas_err;
@@ -997,6 +1013,8 @@ int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int start_index, int npages, u64 *page_list);
int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
+int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
+ struct mlx4_counter *data);
int __mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn);
void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn);
@@ -1360,6 +1378,8 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work);
void mlx4_init_quotas(struct mlx4_dev *dev);
+/* for VFs, replace zero MACs with randomly-generated MACs at driver start */
+void mlx4_replace_zero_macs(struct mlx4_dev *dev);
int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port);
/* Returns the VF index of slave */
int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave);
diff --git a/kernel/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/kernel/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 909fcf803..c41f15102 100644
--- a/kernel/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/kernel/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -95,6 +95,7 @@
*/
#define MLX4_EN_PRIV_FLAGS_BLUEFLAME 1
+#define MLX4_EN_PRIV_FLAGS_PHV 2
#define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ)
@@ -339,7 +340,7 @@ struct mlx4_en_cq {
struct napi_struct napi;
int size;
int buf_size;
- unsigned vector;
+ int vector;
enum cq_type is_tx;
u16 moder_time;
u16 moder_cnt;
@@ -567,6 +568,7 @@ struct mlx4_en_priv {
#endif
struct mlx4_en_perf_stats pstats;
struct mlx4_en_pkt_stats pkstats;
+ struct mlx4_en_counter_stats pf_stats;
struct mlx4_en_flow_stats_rx rx_priority_flowstats[MLX4_NUM_PRIORITIES];
struct mlx4_en_flow_stats_tx tx_priority_flowstats[MLX4_NUM_PRIORITIES];
struct mlx4_en_flow_stats_rx rx_flowstats;
@@ -582,6 +584,7 @@ struct mlx4_en_priv {
struct device *ddev;
struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
struct hwtstamp_config hwtstamp_config;
+ u32 counter_index;
#ifdef CONFIG_MLX4_EN_DCB
struct ieee_ets ets;
@@ -795,7 +798,8 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
int mlx4_en_map_buffer(struct mlx4_buf *buf);
void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
-
+int mlx4_en_change_mcast_lb(struct mlx4_en_priv *priv, struct mlx4_qp *qp,
+ int loopback);
void mlx4_en_calc_rx_buf(struct net_device *dev);
int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
diff --git a/kernel/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/kernel/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
index 00555832a..7fd466c0b 100644
--- a/kernel/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
+++ b/kernel/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
@@ -23,6 +23,14 @@ struct mlx4_en_pkt_stats {
#define NUM_PKT_STATS 43
};
+struct mlx4_en_counter_stats {
+ unsigned long rx_packets;
+ unsigned long rx_bytes;
+ unsigned long tx_packets;
+ unsigned long tx_bytes;
+#define NUM_PF_STATS 4
+};
+
struct mlx4_en_port_stats {
unsigned long tso_packets;
unsigned long xmit_more;
@@ -99,7 +107,7 @@ enum {
};
#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + \
- NUM_FLOW_STATS + NUM_PERF_STATS)
+ NUM_FLOW_STATS + NUM_PERF_STATS + NUM_PF_STATS)
#define MLX4_FIND_NETDEV_STAT(n) (offsetof(struct net_device_stats, n) / \
sizeof(((struct net_device_stats *)0)->n))
diff --git a/kernel/drivers/net/ethernet/mellanox/mlx4/mr.c b/kernel/drivers/net/ethernet/mellanox/mlx4/mr.c
index 78f51e103..93195191f 100644
--- a/kernel/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/kernel/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -318,7 +318,7 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
key, NULL);
} else {
mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR_OR_NULL(mailbox))
+ if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
err = mlx4_cmd_box(dev, 0, mailbox->dma, key,
diff --git a/kernel/drivers/net/ethernet/mellanox/mlx4/profile.c b/kernel/drivers/net/ethernet/mellanox/mlx4/profile.c
index 2bf437aaf..bae8b22ed 100644
--- a/kernel/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/kernel/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -82,7 +82,6 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
u64 total_size = 0;
struct mlx4_resource *profile;
- struct mlx4_resource tmp;
struct sysinfo si;
int i, j;
@@ -149,11 +148,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
*/
for (i = MLX4_RES_NUM; i > 0; --i)
for (j = 1; j < i; ++j) {
- if (profile[j].size > profile[j - 1].size) {
- tmp = profile[j];
- profile[j] = profile[j - 1];
- profile[j - 1] = tmp;
- }
+ if (profile[j].size > profile[j - 1].size)
+ swap(profile[j], profile[j - 1]);
}
for (i = 0; i < MLX4_RES_NUM; ++i) {
diff --git a/kernel/drivers/net/ethernet/mellanox/mlx4/qp.c b/kernel/drivers/net/ethernet/mellanox/mlx4/qp.c
index b75214a80..168823dde 100644
--- a/kernel/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/kernel/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -422,20 +422,37 @@ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
u64 qp_mask = 0;
int err = 0;
+ if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
+ return -EINVAL;
+
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
cmd = (struct mlx4_update_qp_context *)mailbox->buf;
- if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
- return -EINVAL;
-
if (attr & MLX4_UPDATE_QP_SMAC) {
pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
}
+ if (attr & MLX4_UPDATE_QP_ETH_SRC_CHECK_MC_LB) {
+ if (!(dev->caps.flags2
+ & MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
+ mlx4_warn(dev,
+ "Trying to set src check LB, but it isn't supported\n");
+ err = -ENOTSUPP;
+ goto out;
+ }
+ pri_addr_path_mask |=
+ 1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB;
+ if (params->flags &
+ MLX4_UPDATE_QP_PARAMS_FLAGS_ETH_CHECK_MC_LB) {
+ cmd->qp_context.pri_path.fl |=
+ MLX4_FL_ETH_SRC_CHECK_MC_LB;
+ }
+ }
+
if (attr & MLX4_UPDATE_QP_VSD) {
qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD;
if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE)
@@ -458,7 +475,7 @@ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0,
MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
-
+out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
@@ -749,7 +766,7 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
{
int sort[MLX4_NUM_QP_REGION];
- int i, j, tmp;
+ int i, j;
int last_base = dev->caps.num_qps;
for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
@@ -758,11 +775,8 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
for (i = MLX4_NUM_QP_REGION; i > MLX4_QP_REGION_BOTTOM; --i) {
for (j = MLX4_QP_REGION_BOTTOM + 2; j < i; ++j) {
if (dev->caps.reserved_qps_cnt[sort[j]] >
- dev->caps.reserved_qps_cnt[sort[j - 1]]) {
- tmp = sort[j];
- sort[j] = sort[j - 1];
- sort[j - 1] = tmp;
- }
+ dev->caps.reserved_qps_cnt[sort[j - 1]])
+ swap(sort[j], sort[j - 1]);
}
}
diff --git a/kernel/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/kernel/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index bafe2180c..cad6c44df 100644
--- a/kernel/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/kernel/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -46,8 +46,11 @@
#include "mlx4.h"
#include "fw.h"
+#include "mlx4_stats.h"
#define MLX4_MAC_VALID (1ull << 63)
+#define MLX4_PF_COUNTERS_PER_PORT 2
+#define MLX4_VF_COUNTERS_PER_PORT 1
struct mac_res {
struct list_head list;
@@ -459,11 +462,21 @@ void mlx4_init_quotas(struct mlx4_dev *dev)
dev->quotas.mpt =
priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
}
+
+static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
+{
+ /* reduce the sink counter */
+ return (dev->caps.max_counters - 1 -
+ (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
+ / MLX4_MAX_PORTS;
+}
+
int mlx4_init_resource_tracker(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i, j;
int t;
+ int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
priv->mfunc.master.res_tracker.slave_list =
kzalloc(dev->num_slaves * sizeof(struct slave_list),
@@ -499,6 +512,9 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
res_alloc->allocated = kzalloc((dev->persist->
num_vfs + 1) *
sizeof(int), GFP_KERNEL);
+ /* Reduce the sink counter */
+ if (i == RES_COUNTER)
+ res_alloc->res_free = dev->caps.max_counters - 1;
if (!res_alloc->quota || !res_alloc->guaranteed ||
!res_alloc->allocated)
@@ -577,9 +593,17 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
break;
case RES_COUNTER:
res_alloc->quota[t] = dev->caps.max_counters;
- res_alloc->guaranteed[t] = 0;
if (t == mlx4_master_func_num(dev))
- res_alloc->res_free = res_alloc->quota[t];
+ res_alloc->guaranteed[t] =
+ MLX4_PF_COUNTERS_PER_PORT *
+ MLX4_MAX_PORTS;
+ else if (t <= max_vfs_guarantee_counter)
+ res_alloc->guaranteed[t] =
+ MLX4_VF_COUNTERS_PER_PORT *
+ MLX4_MAX_PORTS;
+ else
+ res_alloc->guaranteed[t] = 0;
+ res_alloc->res_free -= res_alloc->guaranteed[t];
break;
default:
break;
@@ -700,6 +724,9 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
}
}
+static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
+ u8 slave, int port);
+
static int update_vport_qp_param(struct mlx4_dev *dev,
struct mlx4_cmd_mailbox *inbox,
u8 slave, u32 qpn)
@@ -715,6 +742,10 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
+ err = handle_counter(dev, qpc, slave, port);
+ if (err)
+ goto out;
+
if (MLX4_VGT != vp_oper->state.default_vlan) {
/* the reserved QPs (special, proxy, tunnel)
* do not operate over vlans
@@ -739,9 +770,12 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
}
}
+ /* preserve IF_COUNTER flag */
+ qpc->pri_path.vlan_control &=
+ MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
- qpc->pri_path.vlan_control =
+ qpc->pri_path.vlan_control |=
MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
@@ -749,12 +783,12 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
} else if (0 != vp_oper->state.default_vlan) {
- qpc->pri_path.vlan_control =
+ qpc->pri_path.vlan_control |=
MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
} else { /* priority tagged */
- qpc->pri_path.vlan_control =
+ qpc->pri_path.vlan_control |=
MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
}
@@ -859,6 +893,83 @@ static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
spin_unlock_irq(mlx4_tlock(dev));
}
+static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param, int port);
+
+static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
+ int counter_index)
+{
+ struct res_common *r;
+ struct res_counter *counter;
+ int ret = 0;
+
+ if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
+ return ret;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = find_res(dev, counter_index, RES_COUNTER);
+ if (!r || r->owner != slave)
+ ret = -EINVAL;
+ counter = container_of(r, struct res_counter, com);
+ if (!counter->port)
+ counter->port = port;
+
+ spin_unlock_irq(mlx4_tlock(dev));
+ return ret;
+}
+
+static int handle_unexisting_counter(struct mlx4_dev *dev,
+ struct mlx4_qp_context *qpc, u8 slave,
+ int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_common *tmp;
+ struct res_counter *counter;
+ u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
+ int err = 0;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry(tmp,
+ &tracker->slave_list[slave].res_list[RES_COUNTER],
+ list) {
+ counter = container_of(tmp, struct res_counter, com);
+ if (port == counter->port) {
+ qpc->pri_path.counter_index = counter->com.res_id;
+ spin_unlock_irq(mlx4_tlock(dev));
+ return 0;
+ }
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ /* No existing counter, need to allocate a new counter */
+ err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
+ port);
+ if (err == -ENOENT) {
+ err = 0;
+ } else if (err && err != -ENOSPC) {
+ mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
+ __func__, slave, err);
+ } else {
+ qpc->pri_path.counter_index = counter_idx;
+ mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
+ __func__, slave, qpc->pri_path.counter_index);
+ err = 0;
+ }
+
+ return err;
+}
+
+static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
+ u8 slave, int port)
+{
+ if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
+ return handle_existing_counter(dev, slave, port,
+ qpc->pri_path.counter_index);
+
+ return handle_unexisting_counter(dev, qpc, slave, port);
+}
+
static struct res_common *alloc_qp_tr(int id)
{
struct res_qp *ret;
@@ -952,7 +1063,7 @@ static struct res_common *alloc_srq_tr(int id)
return &ret->com;
}
-static struct res_common *alloc_counter_tr(int id)
+static struct res_common *alloc_counter_tr(int id, int port)
{
struct res_counter *ret;
@@ -962,6 +1073,7 @@ static struct res_common *alloc_counter_tr(int id)
ret->com.res_id = id;
ret->com.state = RES_COUNTER_ALLOCATED;
+ ret->port = port;
return &ret->com;
}
@@ -1022,7 +1134,7 @@ static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
pr_err("implementation missing\n");
return NULL;
case RES_COUNTER:
- ret = alloc_counter_tr(id);
+ ret = alloc_counter_tr(id, extra);
break;
case RES_XRCD:
ret = alloc_xrcdn_tr(id);
@@ -1039,6 +1151,53 @@ static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
return ret;
}
+int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
+ struct mlx4_counter *data)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_common *tmp;
+ struct res_counter *counter;
+ int *counters_arr;
+ int i = 0, err = 0;
+
+ memset(data, 0, sizeof(*data));
+
+ counters_arr = kmalloc_array(dev->caps.max_counters,
+ sizeof(*counters_arr), GFP_KERNEL);
+ if (!counters_arr)
+ return -ENOMEM;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry(tmp,
+ &tracker->slave_list[slave].res_list[RES_COUNTER],
+ list) {
+ counter = container_of(tmp, struct res_counter, com);
+ if (counter->port == port) {
+ counters_arr[i] = (int)tmp->res_id;
+ i++;
+ }
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+ counters_arr[i] = -1;
+
+ i = 0;
+
+ while (counters_arr[i] != -1) {
+ err = mlx4_get_counter_stats(dev, counters_arr[i], data,
+ 0);
+ if (err) {
+ memset(data, 0, sizeof(*data));
+ goto table_changed;
+ }
+ i++;
+ }
+
+table_changed:
+ kfree(counters_arr);
+ return 0;
+}
+
static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
enum mlx4_resource type, int extra)
{
@@ -1082,8 +1241,10 @@ static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
return 0;
undo:
- for (--i; i >= base; --i)
+ for (--i; i >= 0; --i) {
rb_erase(&res_arr[i]->node, root);
+ list_del_init(&res_arr[i]->list);
+ }
spin_unlock_irq(mlx4_tlock(dev));
@@ -2001,7 +2162,7 @@ static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
}
static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
- u64 in_param, u64 *out_param)
+ u64 in_param, u64 *out_param, int port)
{
u32 index;
int err;
@@ -2019,7 +2180,7 @@ static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
return err;
}
- err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
+ err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
if (err) {
__mlx4_counter_free(dev, index);
mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
@@ -2101,7 +2262,7 @@ int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
case RES_COUNTER:
err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
- vhcr->in_param, &vhcr->out_param);
+ vhcr->in_param, &vhcr->out_param, 0);
break;
case RES_XRCD:
@@ -2335,6 +2496,9 @@ static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
return -EINVAL;
index = get_param_l(&in_param);
+ if (index == MLX4_SINK_COUNTER_INDEX(dev))
+ return 0;
+
err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
if (err)
return err;
@@ -2703,6 +2867,10 @@ static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
context->qkey = cpu_to_be32(qkey);
}
+static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
+ struct mlx4_qp_context *qpc,
+ struct mlx4_cmd_mailbox *inbox);
+
int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -2725,6 +2893,10 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
struct res_srq *srq;
int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
+ err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
+ if (err)
+ return err;
+
err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
if (err)
return err;
@@ -3526,8 +3698,8 @@ static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
((port & 1) << 6);
- if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH ||
- mlx4_is_eth(dev, port + 1)) {
+ if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
+ qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
qpc->pri_path.sched_queue = pri_sched_queue;
}
@@ -3595,9 +3767,6 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
update_gid(dev, inbox, (u8)slave);
adjust_proxy_tun_qkey(dev, vhcr, qpc);
orig_sched_queue = qpc->pri_path.sched_queue;
- err = update_vport_qp_param(dev, inbox, slave, qpn);
- if (err)
- return err;
err = get_res(dev, slave, qpn, RES_QP, &qp);
if (err)
@@ -3607,6 +3776,10 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
goto out;
}
+ err = update_vport_qp_param(dev, inbox, slave, qpn);
+ if (err)
+ goto out;
+
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
out:
/* if no error, save sched queue value passed in by VF. This is
@@ -3965,6 +4138,22 @@ static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
return 0;
}
+static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
+ struct _rule_hw *eth_header)
+{
+ if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
+ is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
+ struct mlx4_net_trans_rule_hw_eth *eth =
+ (struct mlx4_net_trans_rule_hw_eth *)eth_header;
+ struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
+ bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
+ next_rule->rsvd == 0;
+
+ if (last_rule)
+ ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
+ }
+}
+
/*
* In case of missing eth header, append eth header with a MAC address
* assigned to the VF.
@@ -4025,7 +4214,9 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
}
-#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
+#define MLX4_UPD_QP_PATH_MASK_SUPPORTED ( \
+ 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX |\
+ 1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)
int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -4048,6 +4239,16 @@ int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
(pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
return -EPERM;
+ if ((pri_addr_path_mask &
+ (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) &&
+ !(dev->caps.flags2 &
+ MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
+ mlx4_warn(dev,
+ "Src check LB for slave %d isn't supported\n",
+ slave);
+ return -ENOTSUPP;
+ }
+
/* Just change the smac for the QP */
err = get_res(dev, slave, qpn, RES_QP, &rqp);
if (err) {
@@ -4105,9 +4306,10 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
return -EOPNOTSUPP;
ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
- ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
- if (ctrl->port <= 0)
+ err = mlx4_slave_convert_port(dev, slave, ctrl->port);
+ if (err <= 0)
return -EINVAL;
+ ctrl->port = err;
qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
err = get_res(dev, slave, qpn, RES_QP, &rqp);
if (err) {
@@ -4117,6 +4319,12 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
rule_header = (struct _rule_hw *)(ctrl + 1);
header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
+ if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
+ handle_eth_header_mcast_prio(ctrl, rule_header);
+
+ if (slave == dev->caps.function)
+ goto execute;
+
switch (header_id) {
case MLX4_NET_TRANS_RULE_ID_ETH:
if (validate_eth_header_mac(slave, rule_header, rlist)) {
@@ -4143,6 +4351,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
goto err_put;
}
+execute:
err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
vhcr->in_modifier, 0,
MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
@@ -4744,26 +4953,41 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
struct res_counter *counter;
struct res_counter *tmp;
int err;
- int index;
+ int *counters_arr = NULL;
+ int i, j;
err = move_all_busy(dev, slave, RES_COUNTER);
if (err)
mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
slave);
- spin_lock_irq(mlx4_tlock(dev));
- list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
- if (counter->com.owner == slave) {
- index = counter->com.res_id;
- rb_erase(&counter->com.node,
- &tracker->res_tree[RES_COUNTER]);
- list_del(&counter->com.list);
- kfree(counter);
- __mlx4_counter_free(dev, index);
+ counters_arr = kmalloc_array(dev->caps.max_counters,
+ sizeof(*counters_arr), GFP_KERNEL);
+ if (!counters_arr)
+ return;
+
+ do {
+ i = 0;
+ j = 0;
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
+ if (counter->com.owner == slave) {
+ counters_arr[i++] = counter->com.res_id;
+ rb_erase(&counter->com.node,
+ &tracker->res_tree[RES_COUNTER]);
+ list_del(&counter->com.list);
+ kfree(counter);
+ }
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ while (j < i) {
+ __mlx4_counter_free(dev, counters_arr[j++]);
mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
}
- }
- spin_unlock_irq(mlx4_tlock(dev));
+ } while (i);
+
+ kfree(counters_arr);
}
static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)