summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/infiniband/ulp
diff options
context:
space:
mode:
authorYunhong Jiang <yunhong.jiang@linux.intel.com>2017-03-08 23:13:28 -0800
committerYunhong Jiang <yunhong.jiang@linux.intel.com>2017-03-08 23:36:15 -0800
commit52f993b8e89487ec9ee15a7fb4979e0f09a45b27 (patch)
treed65304486afe0bea4a311c783c0d72791c8c0aa2 /kernel/drivers/infiniband/ulp
parentc189ccac5702322ed843fe17057035b7222a59b6 (diff)
Upgrade to 4.4.50-rt62
The current kernel is based on rt kernel v4.4.6-rt14. We will upgrade it to 4.4.50-rt62. The command to achieve it is: a) Clone a git repo from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-stable-rt.git b) Get the diff between this two changesets: git diff 640eca2901f3435e616157b11379d3223a44b391 705619beeea1b0b48219a683fd1a901a86fdaf5e where the two commits are: [yjiang5@jnakajim-build linux-stable-rt]$ git show --oneline --name-only 640eca2901f3435e616157b11379d3223a44b391 640eca2901f3 v4.4.6-rt14 localversion-rt [yjiang5@jnakajim-build linux-stable-rt]$ git show --oneline --name-only 705619beeea1b0b48219a683fd1a901a86fdaf5e 705619beeea1 Linux 4.4.50-rt62 localversion-rt c) One patch has been backported thus revert the patch before applying. filterdiff -p1 -x scripts/package/Makefile ~/tmp/v4.4.6-rt14-4.4.50-rt62.diff |patch -p1 --dry-run Upstream status: backport Change-Id: I244d57a32f6066e5a5b9915f9fbf99e7bbca6e01 Signed-off-by: Yunhong Jiang <yunhong.jiang@linux.intel.com>
Diffstat (limited to 'kernel/drivers/infiniband/ulp')
-rw-r--r--kernel/drivers/infiniband/ulp/ipoib/ipoib.h21
-rw-r--r--kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c33
-rw-r--r--kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c23
-rw-r--r--kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c60
-rw-r--r--kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c37
-rw-r--r--kernel/drivers/infiniband/ulp/isert/ib_isert.c122
-rw-r--r--kernel/drivers/infiniband/ulp/isert/ib_isert.h2
-rw-r--r--kernel/drivers/infiniband/ulp/srp/ib_srp.c2
-rw-r--r--kernel/drivers/infiniband/ulp/srpt/ib_srpt.c59
9 files changed, 180 insertions, 179 deletions
diff --git a/kernel/drivers/infiniband/ulp/ipoib/ipoib.h b/kernel/drivers/infiniband/ulp/ipoib/ipoib.h
index 3ede10309..07cfcc326 100644
--- a/kernel/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/kernel/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -63,6 +63,8 @@ enum ipoib_flush_level {
enum {
IPOIB_ENCAP_LEN = 4,
+ IPOIB_PSEUDO_LEN = 20,
+ IPOIB_HARD_LEN = IPOIB_ENCAP_LEN + IPOIB_PSEUDO_LEN,
IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN,
IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */
@@ -131,15 +133,21 @@ struct ipoib_header {
u16 reserved;
};
-struct ipoib_cb {
- struct qdisc_skb_cb qdisc_cb;
- u8 hwaddr[INFINIBAND_ALEN];
+struct ipoib_pseudo_header {
+ u8 hwaddr[INFINIBAND_ALEN];
};
-static inline struct ipoib_cb *ipoib_skb_cb(const struct sk_buff *skb)
+static inline void skb_add_pseudo_hdr(struct sk_buff *skb)
{
- BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct ipoib_cb));
- return (struct ipoib_cb *)skb->cb;
+ char *data = skb_push(skb, IPOIB_PSEUDO_LEN);
+
+ /*
+ * only the ipoib header is present now, make room for a dummy
+ * pseudo header and set skb field accordingly
+ */
+ memset(data, 0, IPOIB_PSEUDO_LEN);
+ skb_reset_mac_header(skb);
+ skb_pull(skb, IPOIB_HARD_LEN);
}
/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
@@ -472,6 +480,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
struct ipoib_ah *address, u32 qpn);
void ipoib_reap_ah(struct work_struct *work);
+struct ipoib_path *__path_find(struct net_device *dev, void *gid);
void ipoib_mark_paths_invalid(struct net_device *dev);
void ipoib_flush_paths(struct net_device *dev);
struct ipoib_dev_priv *ipoib_intf_alloc(const char *format);
diff --git a/kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 3ae9726ef..3ba7de5f9 100644
--- a/kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -63,6 +63,8 @@ MODULE_PARM_DESC(cm_data_debug_level,
#define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
#define IPOIB_CM_RX_UPDATE_MASK (0x3)
+#define IPOIB_CM_RX_RESERVE (ALIGN(IPOIB_HARD_LEN, 16) - IPOIB_ENCAP_LEN)
+
static struct ib_qp_attr ipoib_cm_err_attr = {
.qp_state = IB_QPS_ERR
};
@@ -147,15 +149,15 @@ static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
struct sk_buff *skb;
int i;
- skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
+ skb = dev_alloc_skb(ALIGN(IPOIB_CM_HEAD_SIZE + IPOIB_PSEUDO_LEN, 16));
if (unlikely(!skb))
return NULL;
/*
- * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
+ * IPoIB adds a IPOIB_ENCAP_LEN byte header, this will align the
* IP header to a multiple of 16.
*/
- skb_reserve(skb, 12);
+ skb_reserve(skb, IPOIB_CM_RX_RESERVE);
mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
DMA_FROM_DEVICE);
@@ -624,9 +626,9 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
if (wc->byte_len < IPOIB_CM_COPYBREAK) {
int dlen = wc->byte_len;
- small_skb = dev_alloc_skb(dlen + 12);
+ small_skb = dev_alloc_skb(dlen + IPOIB_CM_RX_RESERVE);
if (small_skb) {
- skb_reserve(small_skb, 12);
+ skb_reserve(small_skb, IPOIB_CM_RX_RESERVE);
ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
dlen, DMA_FROM_DEVICE);
skb_copy_from_linear_data(skb, small_skb->data, dlen);
@@ -663,8 +665,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
copied:
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
- skb_reset_mac_header(skb);
- skb_pull(skb, IPOIB_ENCAP_LEN);
+ skb_add_pseudo_hdr(skb);
++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len;
@@ -1035,8 +1036,6 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
tx_qp = ib_create_qp(priv->pd, &attr);
if (PTR_ERR(tx_qp) == -EINVAL) {
- ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n",
- priv->ca->name);
attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO;
tx_qp = ib_create_qp(priv->pd, &attr);
}
@@ -1299,6 +1298,8 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
}
}
+#define QPN_AND_OPTIONS_OFFSET 4
+
static void ipoib_cm_tx_start(struct work_struct *work)
{
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
@@ -1307,6 +1308,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
struct ipoib_neigh *neigh;
struct ipoib_cm_tx *p;
unsigned long flags;
+ struct ipoib_path *path;
int ret;
struct ib_sa_path_rec pathrec;
@@ -1319,7 +1321,19 @@ static void ipoib_cm_tx_start(struct work_struct *work)
p = list_entry(priv->cm.start_list.next, typeof(*p), list);
list_del_init(&p->list);
neigh = p->neigh;
+
qpn = IPOIB_QPN(neigh->daddr);
+ /*
+ * As long as the search is with these 2 locks,
+ * path existence indicates its validity.
+ */
+ path = __path_find(dev, neigh->daddr + QPN_AND_OPTIONS_OFFSET);
+ if (!path) {
+ pr_info("%s ignore not valid path %pI6\n",
+ __func__,
+ neigh->daddr + QPN_AND_OPTIONS_OFFSET);
+ goto free_neigh;
+ }
memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1331,6 +1345,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
spin_lock_irqsave(&priv->lock, flags);
if (ret) {
+free_neigh:
neigh = p->neigh;
if (neigh) {
neigh->cm = NULL;
diff --git a/kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 5ea0c1407..8f8c3af9f 100644
--- a/kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -130,16 +130,15 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
- skb = dev_alloc_skb(buf_size + IPOIB_ENCAP_LEN);
+ skb = dev_alloc_skb(buf_size + IPOIB_HARD_LEN);
if (unlikely(!skb))
return NULL;
/*
- * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
- * header. So we need 4 more bytes to get to 48 and align the
- * IP header to a multiple of 16.
+ * the IP header will be at IPOIP_HARD_LEN + IB_GRH_BYTES, that is
+ * 64 bytes aligned
*/
- skb_reserve(skb, 4);
+ skb_reserve(skb, sizeof(struct ipoib_pseudo_header));
mapping = priv->rx_ring[id].mapping;
mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
@@ -242,10 +241,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
skb_pull(skb, IB_GRH_BYTES);
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
- skb_reset_mac_header(skb);
- skb_pull(skb, IPOIB_ENCAP_LEN);
-
- skb->truesize = SKB_TRUESIZE(skb->len);
+ skb_add_pseudo_hdr(skb);
++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len;
@@ -1030,8 +1026,17 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
}
if (level == IPOIB_FLUSH_LIGHT) {
+ int oper_up;
ipoib_mark_paths_invalid(dev);
+ /* Set IPoIB operation as down to prevent races between:
+ * the flush flow which leaves MCG and on the fly joins
+ * which can happen during that time. mcast restart task
+ * should deal with join requests we missed.
+ */
+ oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
ipoib_mcast_dev_flush(dev);
+ if (oper_up)
+ set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
ipoib_flush_ah(dev);
}
diff --git a/kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c b/kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 7d3281866..8a4d10452 100644
--- a/kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -481,7 +481,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
return -EINVAL;
}
-static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
+struct ipoib_path *__path_find(struct net_device *dev, void *gid)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct rb_node *n = priv->path_tree.rb_node;
@@ -850,9 +850,12 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
ipoib_neigh_free(neigh);
goto err_drop;
}
- if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
+ if (skb_queue_len(&neigh->queue) <
+ IPOIB_MAX_PATH_REC_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, IPOIB_PSEUDO_LEN);
__skb_queue_tail(&neigh->queue, skb);
- else {
+ } else {
ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
skb_queue_len(&neigh->queue));
goto err_drop;
@@ -889,7 +892,7 @@ err_drop:
}
static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
- struct ipoib_cb *cb)
+ struct ipoib_pseudo_header *phdr)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_path *path;
@@ -897,16 +900,18 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
spin_lock_irqsave(&priv->lock, flags);
- path = __path_find(dev, cb->hwaddr + 4);
+ path = __path_find(dev, phdr->hwaddr + 4);
if (!path || !path->valid) {
int new_path = 0;
if (!path) {
- path = path_rec_create(dev, cb->hwaddr + 4);
+ path = path_rec_create(dev, phdr->hwaddr + 4);
new_path = 1;
}
if (path) {
if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, IPOIB_PSEUDO_LEN);
__skb_queue_tail(&path->queue, skb);
} else {
++dev->stats.tx_dropped;
@@ -934,10 +939,12 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
be16_to_cpu(path->pathrec.dlid));
spin_unlock_irqrestore(&priv->lock, flags);
- ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr));
+ ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
return;
} else if ((path->query || !path_rec_start(dev, path)) &&
skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, IPOIB_PSEUDO_LEN);
__skb_queue_tail(&path->queue, skb);
} else {
++dev->stats.tx_dropped;
@@ -951,13 +958,15 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_neigh *neigh;
- struct ipoib_cb *cb = ipoib_skb_cb(skb);
+ struct ipoib_pseudo_header *phdr;
struct ipoib_header *header;
unsigned long flags;
+ phdr = (struct ipoib_pseudo_header *) skb->data;
+ skb_pull(skb, sizeof(*phdr));
header = (struct ipoib_header *) skb->data;
- if (unlikely(cb->hwaddr[4] == 0xff)) {
+ if (unlikely(phdr->hwaddr[4] == 0xff)) {
/* multicast, arrange "if" according to probability */
if ((header->proto != htons(ETH_P_IP)) &&
(header->proto != htons(ETH_P_IPV6)) &&
@@ -970,13 +979,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
/* Add in the P_Key for multicast*/
- cb->hwaddr[8] = (priv->pkey >> 8) & 0xff;
- cb->hwaddr[9] = priv->pkey & 0xff;
+ phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
+ phdr->hwaddr[9] = priv->pkey & 0xff;
- neigh = ipoib_neigh_get(dev, cb->hwaddr);
+ neigh = ipoib_neigh_get(dev, phdr->hwaddr);
if (likely(neigh))
goto send_using_neigh;
- ipoib_mcast_send(dev, cb->hwaddr, skb);
+ ipoib_mcast_send(dev, phdr->hwaddr, skb);
return NETDEV_TX_OK;
}
@@ -985,16 +994,16 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
case htons(ETH_P_IP):
case htons(ETH_P_IPV6):
case htons(ETH_P_TIPC):
- neigh = ipoib_neigh_get(dev, cb->hwaddr);
+ neigh = ipoib_neigh_get(dev, phdr->hwaddr);
if (unlikely(!neigh)) {
- neigh_add_path(skb, cb->hwaddr, dev);
+ neigh_add_path(skb, phdr->hwaddr, dev);
return NETDEV_TX_OK;
}
break;
case htons(ETH_P_ARP):
case htons(ETH_P_RARP):
/* for unicast ARP and RARP should always perform path find */
- unicast_arp_send(skb, dev, cb);
+ unicast_arp_send(skb, dev, phdr);
return NETDEV_TX_OK;
default:
/* ethertype not supported by IPoIB */
@@ -1011,11 +1020,13 @@ send_using_neigh:
goto unref;
}
} else if (neigh->ah) {
- ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(cb->hwaddr));
+ ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(phdr->hwaddr));
goto unref;
}
if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, sizeof(*phdr));
spin_lock_irqsave(&priv->lock, flags);
__skb_queue_tail(&neigh->queue, skb);
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1047,8 +1058,8 @@ static int ipoib_hard_header(struct sk_buff *skb,
unsigned short type,
const void *daddr, const void *saddr, unsigned len)
{
+ struct ipoib_pseudo_header *phdr;
struct ipoib_header *header;
- struct ipoib_cb *cb = ipoib_skb_cb(skb);
header = (struct ipoib_header *) skb_push(skb, sizeof *header);
@@ -1057,12 +1068,13 @@ static int ipoib_hard_header(struct sk_buff *skb,
/*
* we don't rely on dst_entry structure, always stuff the
- * destination address into skb->cb so we can figure out where
+ * destination address into skb hard header so we can figure out where
* to send the packet later.
*/
- memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
+ phdr = (struct ipoib_pseudo_header *) skb_push(skb, sizeof(*phdr));
+ memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
- return sizeof *header;
+ return IPOIB_HARD_LEN;
}
static void ipoib_set_mcast_list(struct net_device *dev)
@@ -1131,7 +1143,9 @@ struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
neigh = NULL;
goto out_unlock;
}
- neigh->alive = jiffies;
+
+ if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
+ neigh->alive = jiffies;
goto out_unlock;
}
}
@@ -1636,7 +1650,7 @@ void ipoib_setup(struct net_device *dev)
dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
- dev->hard_header_len = IPOIB_ENCAP_LEN;
+ dev->hard_header_len = IPOIB_HARD_LEN;
dev->addr_len = INFINIBAND_ALEN;
dev->type = ARPHRD_INFINIBAND;
dev->tx_queue_len = ipoib_sendq_size * 2;
diff --git a/kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 6dcef673d..a123d0439 100644
--- a/kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -456,7 +456,10 @@ out_locked:
return status;
}
-static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
+/*
+ * Caller must hold 'priv->lock'
+ */
+static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_sa_multicast *multicast;
@@ -466,6 +469,10 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
ib_sa_comp_mask comp_mask;
int ret = 0;
+ if (!priv->broadcast ||
+ !test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
+ return -EINVAL;
+
ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw);
rec.mgid = mcast->mcmember.mgid;
@@ -525,20 +532,23 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
rec.join_state = 4;
#endif
}
+ spin_unlock_irq(&priv->lock);
multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
&rec, comp_mask, GFP_KERNEL,
ipoib_mcast_join_complete, mcast);
+ spin_lock_irq(&priv->lock);
if (IS_ERR(multicast)) {
ret = PTR_ERR(multicast);
ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
- spin_lock_irq(&priv->lock);
/* Requeue this join task with a backoff delay */
__ipoib_mcast_schedule_join_thread(priv, mcast, 1);
clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
spin_unlock_irq(&priv->lock);
complete(&mcast->done);
+ spin_lock_irq(&priv->lock);
}
+ return 0;
}
void ipoib_mcast_join_task(struct work_struct *work)
@@ -553,8 +563,11 @@ void ipoib_mcast_join_task(struct work_struct *work)
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
return;
- if (ib_query_port(priv->ca, priv->port, &port_attr) ||
- port_attr.state != IB_PORT_ACTIVE) {
+ if (ib_query_port(priv->ca, priv->port, &port_attr)) {
+ ipoib_dbg(priv, "ib_query_port() failed\n");
+ return;
+ }
+ if (port_attr.state != IB_PORT_ACTIVE) {
ipoib_dbg(priv, "port state is not ACTIVE (state = %d) suspending join task\n",
port_attr.state);
return;
@@ -620,9 +633,10 @@ void ipoib_mcast_join_task(struct work_struct *work)
/* Found the next unjoined group */
init_completion(&mcast->done);
set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
- spin_unlock_irq(&priv->lock);
- ipoib_mcast_join(dev, mcast);
- spin_lock_irq(&priv->lock);
+ if (ipoib_mcast_join(dev, mcast)) {
+ spin_unlock_irq(&priv->lock);
+ return;
+ }
} else if (!delay_until ||
time_before(mcast->delay_until, delay_until))
delay_until = mcast->delay_until;
@@ -641,10 +655,9 @@ out:
if (mcast) {
init_completion(&mcast->done);
set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ ipoib_mcast_join(dev, mcast);
}
spin_unlock_irq(&priv->lock);
- if (mcast)
- ipoib_mcast_join(dev, mcast);
}
int ipoib_mcast_start_thread(struct net_device *dev)
@@ -743,9 +756,11 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
__ipoib_mcast_add(dev, mcast);
list_add_tail(&mcast->list, &priv->multicast_list);
}
- if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
+ if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, sizeof(struct ipoib_pseudo_header));
skb_queue_tail(&mcast->pkt_queue, skb);
- else {
+ } else {
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
}
diff --git a/kernel/drivers/infiniband/ulp/isert/ib_isert.c b/kernel/drivers/infiniband/ulp/isert/ib_isert.c
index 8a51c3b5d..b0edb66a2 100644
--- a/kernel/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/kernel/drivers/infiniband/ulp/isert/ib_isert.c
@@ -66,6 +66,7 @@ isert_rdma_accept(struct isert_conn *isert_conn);
struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
static void isert_release_work(struct work_struct *work);
+static void isert_wait4flush(struct isert_conn *isert_conn);
static inline bool
isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
@@ -815,12 +816,31 @@ isert_put_conn(struct isert_conn *isert_conn)
kref_put(&isert_conn->kref, isert_release_kref);
}
+static void
+isert_handle_unbound_conn(struct isert_conn *isert_conn)
+{
+ struct isert_np *isert_np = isert_conn->cm_id->context;
+
+ mutex_lock(&isert_np->mutex);
+ if (!list_empty(&isert_conn->node)) {
+ /*
+ * This means iscsi doesn't know this connection
+ * so schedule a cleanup ourselves
+ */
+ list_del_init(&isert_conn->node);
+ isert_put_conn(isert_conn);
+ complete(&isert_conn->wait);
+ queue_work(isert_release_wq, &isert_conn->release_work);
+ }
+ mutex_unlock(&isert_np->mutex);
+}
+
/**
* isert_conn_terminate() - Initiate connection termination
* @isert_conn: isert connection struct
*
* Notes:
- * In case the connection state is FULL_FEATURE, move state
+ * In case the connection state is BOUND, move state
* to TEMINATING and start teardown sequence (rdma_disconnect).
* In case the connection state is UP, complete flush as well.
*
@@ -832,23 +852,19 @@ isert_conn_terminate(struct isert_conn *isert_conn)
{
int err;
- switch (isert_conn->state) {
- case ISER_CONN_TERMINATING:
- break;
- case ISER_CONN_UP:
- case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
- isert_info("Terminating conn %p state %d\n",
- isert_conn, isert_conn->state);
- isert_conn->state = ISER_CONN_TERMINATING;
- err = rdma_disconnect(isert_conn->cm_id);
- if (err)
- isert_warn("Failed rdma_disconnect isert_conn %p\n",
- isert_conn);
- break;
- default:
- isert_warn("conn %p teminating in state %d\n",
- isert_conn, isert_conn->state);
- }
+ if (isert_conn->state >= ISER_CONN_TERMINATING)
+ return;
+
+ isert_info("Terminating conn %p state %d\n",
+ isert_conn, isert_conn->state);
+ isert_conn->state = ISER_CONN_TERMINATING;
+ err = rdma_disconnect(isert_conn->cm_id);
+ if (err)
+ isert_warn("Failed rdma_disconnect isert_conn %p\n",
+ isert_conn);
+
+ isert_info("conn %p completing wait\n", isert_conn);
+ complete(&isert_conn->wait);
}
static int
@@ -882,35 +898,27 @@ static int
isert_disconnected_handler(struct rdma_cm_id *cma_id,
enum rdma_cm_event_type event)
{
- struct isert_np *isert_np = cma_id->context;
- struct isert_conn *isert_conn;
- bool terminating = false;
-
- if (isert_np->cm_id == cma_id)
- return isert_np_cma_handler(cma_id->context, event);
-
- isert_conn = cma_id->qp->qp_context;
+ struct isert_conn *isert_conn = cma_id->qp->qp_context;
mutex_lock(&isert_conn->mutex);
- terminating = (isert_conn->state == ISER_CONN_TERMINATING);
- isert_conn_terminate(isert_conn);
- mutex_unlock(&isert_conn->mutex);
-
- isert_info("conn %p completing wait\n", isert_conn);
- complete(&isert_conn->wait);
-
- if (terminating)
- goto out;
-
- mutex_lock(&isert_np->mutex);
- if (!list_empty(&isert_conn->node)) {
- list_del_init(&isert_conn->node);
- isert_put_conn(isert_conn);
- queue_work(isert_release_wq, &isert_conn->release_work);
+ switch (isert_conn->state) {
+ case ISER_CONN_TERMINATING:
+ break;
+ case ISER_CONN_UP:
+ isert_conn_terminate(isert_conn);
+ isert_wait4flush(isert_conn);
+ isert_handle_unbound_conn(isert_conn);
+ break;
+ case ISER_CONN_BOUND:
+ case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+ break;
+ default:
+ isert_warn("conn %p teminating in state %d\n",
+ isert_conn, isert_conn->state);
}
- mutex_unlock(&isert_np->mutex);
+ mutex_unlock(&isert_conn->mutex);
-out:
return 0;
}
@@ -929,12 +937,16 @@ isert_connect_error(struct rdma_cm_id *cma_id)
static int
isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
+ struct isert_np *isert_np = cma_id->context;
int ret = 0;
isert_info("%s (%d): status %d id %p np %p\n",
rdma_event_msg(event->event), event->event,
event->status, cma_id, cma_id->context);
+ if (isert_np->cm_id == cma_id)
+ return isert_np_cma_handler(cma_id->context, event->event);
+
switch (event->event) {
case RDMA_CM_EVENT_CONNECT_REQUEST:
ret = isert_connect_request(cma_id, event);
@@ -980,13 +992,10 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
rx_wr--;
rx_wr->next = NULL; /* mark end of work requests list */
- isert_conn->post_recv_buf_count += count;
ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
&rx_wr_failed);
- if (ret) {
+ if (ret)
isert_err("ib_post_recv() failed with ret: %d\n", ret);
- isert_conn->post_recv_buf_count -= count;
- }
return ret;
}
@@ -1002,12 +1011,9 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
rx_wr.num_sge = 1;
rx_wr.next = NULL;
- isert_conn->post_recv_buf_count++;
ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed);
- if (ret) {
+ if (ret)
isert_err("ib_post_recv() failed with ret: %d\n", ret);
- isert_conn->post_recv_buf_count--;
- }
return ret;
}
@@ -1120,12 +1126,9 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn)
rx_wr.sg_list = &sge;
rx_wr.num_sge = 1;
- isert_conn->post_recv_buf_count++;
ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail);
- if (ret) {
+ if (ret)
isert_err("ib_post_recv() failed: %d\n", ret);
- isert_conn->post_recv_buf_count--;
- }
return ret;
}
@@ -1620,7 +1623,6 @@ isert_rcv_completion(struct iser_rx_desc *desc,
ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
DMA_FROM_DEVICE);
- isert_conn->post_recv_buf_count--;
}
static int
@@ -2035,7 +2037,8 @@ is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
void *start = isert_conn->rx_descs;
int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->rx_descs);
- if (wr_id >= start && wr_id < start + len)
+ if ((wr_id >= start && wr_id < start + len) ||
+ (wr_id == isert_conn->login_req_buf))
return false;
return true;
@@ -2059,10 +2062,6 @@ isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
isert_unmap_tx_desc(desc, ib_dev);
else
isert_completion_put(desc, isert_cmd, ib_dev, true);
- } else {
- isert_conn->post_recv_buf_count--;
- if (!isert_conn->post_recv_buf_count)
- iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
}
}
@@ -3193,6 +3192,7 @@ accept_wait:
conn->context = isert_conn;
isert_conn->conn = conn;
+ isert_conn->state = ISER_CONN_BOUND;
isert_set_conn_info(np, conn, isert_conn);
diff --git a/kernel/drivers/infiniband/ulp/isert/ib_isert.h b/kernel/drivers/infiniband/ulp/isert/ib_isert.h
index 3d7fbc47c..1874d21da 100644
--- a/kernel/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/kernel/drivers/infiniband/ulp/isert/ib_isert.h
@@ -50,6 +50,7 @@ enum iser_ib_op_code {
enum iser_conn_state {
ISER_CONN_INIT,
ISER_CONN_UP,
+ ISER_CONN_BOUND,
ISER_CONN_FULL_FEATURE,
ISER_CONN_TERMINATING,
ISER_CONN_DOWN,
@@ -144,7 +145,6 @@ struct isert_device;
struct isert_conn {
enum iser_conn_state state;
- int post_recv_buf_count;
u32 responder_resources;
u32 initiator_depth;
bool pi_support;
diff --git a/kernel/drivers/infiniband/ulp/srp/ib_srp.c b/kernel/drivers/infiniband/ulp/srp/ib_srp.c
index 3db9a6597..5f0f4fc58 100644
--- a/kernel/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/kernel/drivers/infiniband/ulp/srp/ib_srp.c
@@ -1519,7 +1519,7 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
if (dev->use_fast_reg) {
state.sg = idb_sg;
- sg_set_buf(idb_sg, req->indirect_desc, idb_len);
+ sg_init_one(idb_sg, req->indirect_desc, idb_len);
idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
#ifdef CONFIG_NEED_SG_DMA_LENGTH
idb_sg->dma_length = idb_sg->length; /* hack^2 */
diff --git a/kernel/drivers/infiniband/ulp/srpt/ib_srpt.c b/kernel/drivers/infiniband/ulp/srpt/ib_srpt.c
index 2e2fe818c..eaabf3125 100644
--- a/kernel/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/kernel/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1737,47 +1737,6 @@ send_sense:
return -1;
}
-/**
- * srpt_rx_mgmt_fn_tag() - Process a task management function by tag.
- * @ch: RDMA channel of the task management request.
- * @fn: Task management function to perform.
- * @req_tag: Tag of the SRP task management request.
- * @mgmt_ioctx: I/O context of the task management request.
- *
- * Returns zero if the target core will process the task management
- * request asynchronously.
- *
- * Note: It is assumed that the initiator serializes tag-based task management
- * requests.
- */
-static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
-{
- struct srpt_device *sdev;
- struct srpt_rdma_ch *ch;
- struct srpt_send_ioctx *target;
- int ret, i;
-
- ret = -EINVAL;
- ch = ioctx->ch;
- BUG_ON(!ch);
- BUG_ON(!ch->sport);
- sdev = ch->sport->sdev;
- BUG_ON(!sdev);
- spin_lock_irq(&sdev->spinlock);
- for (i = 0; i < ch->rq_size; ++i) {
- target = ch->ioctx_ring[i];
- if (target->cmd.se_lun == ioctx->cmd.se_lun &&
- target->cmd.tag == tag &&
- srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
- ret = 0;
- /* now let the target core abort &target->cmd; */
- break;
- }
- }
- spin_unlock_irq(&sdev->spinlock);
- return ret;
-}
-
static int srp_tmr_to_tcm(int fn)
{
switch (fn) {
@@ -1812,7 +1771,6 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
struct se_cmd *cmd;
struct se_session *sess = ch->sess;
uint64_t unpacked_lun;
- uint32_t tag = 0;
int tcm_tmr;
int rc;
@@ -1828,25 +1786,10 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
send_ioctx->cmd.tag = srp_tsk->tag;
tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
- if (tcm_tmr < 0) {
- send_ioctx->cmd.se_tmr_req->response =
- TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
- goto fail;
- }
unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
sizeof(srp_tsk->lun));
-
- if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK) {
- rc = srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
- if (rc < 0) {
- send_ioctx->cmd.se_tmr_req->response =
- TMR_TASK_DOES_NOT_EXIST;
- goto fail;
- }
- tag = srp_tsk->task_tag;
- }
rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL, unpacked_lun,
- srp_tsk, tcm_tmr, GFP_KERNEL, tag,
+ srp_tsk, tcm_tmr, GFP_KERNEL, srp_tsk->task_tag,
TARGET_SCF_ACK_KREF);
if (rc != 0) {
send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;