diff options
Diffstat (limited to 'kernel/drivers/infiniband/ulp')
17 files changed, 1859 insertions, 1867 deletions
diff --git a/kernel/drivers/infiniband/ulp/ipoib/ipoib.h b/kernel/drivers/infiniband/ulp/ipoib/ipoib.h index bd94b0a6e..3ede10309 100644 --- a/kernel/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/kernel/drivers/infiniband/ulp/ipoib/ipoib.h @@ -80,7 +80,7 @@ enum { IPOIB_NUM_WC = 4, IPOIB_MAX_PATH_REC_QUEUE = 3, - IPOIB_MAX_MCAST_QUEUE = 3, + IPOIB_MAX_MCAST_QUEUE = 64, IPOIB_FLAG_OPER_UP = 0, IPOIB_FLAG_INITIALIZED = 1, @@ -239,7 +239,7 @@ struct ipoib_cm_tx { struct net_device *dev; struct ipoib_neigh *neigh; struct ipoib_path *path; - struct ipoib_cm_tx_buf *tx_ring; + struct ipoib_tx_buf *tx_ring; unsigned tx_head; unsigned tx_tail; unsigned long flags; @@ -342,7 +342,6 @@ struct ipoib_dev_priv { u16 pkey; u16 pkey_index; struct ib_pd *pd; - struct ib_mr *mr; struct ib_cq *recv_cq; struct ib_cq *send_cq; struct ib_qp *qp; @@ -361,7 +360,7 @@ struct ipoib_dev_priv { unsigned tx_head; unsigned tx_tail; struct ib_sge tx_sge[MAX_SKB_FRAGS + 1]; - struct ib_send_wr tx_wr; + struct ib_ud_wr tx_wr; unsigned tx_outstanding; struct ib_wc send_wc[MAX_SEND_CQE]; @@ -496,6 +495,7 @@ void ipoib_dev_cleanup(struct net_device *dev); void ipoib_mcast_join_task(struct work_struct *work); void ipoib_mcast_carrier_on_task(struct work_struct *work); void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb); +void ipoib_mcast_free(struct ipoib_mcast *mc); void ipoib_mcast_restart_task(struct work_struct *work); int ipoib_mcast_start_thread(struct net_device *dev); @@ -504,6 +504,33 @@ int ipoib_mcast_stop_thread(struct net_device *dev); void ipoib_mcast_dev_down(struct net_device *dev); void ipoib_mcast_dev_flush(struct net_device *dev); +int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req); +void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv, + struct ipoib_tx_buf *tx_req); + +static inline void ipoib_build_sge(struct ipoib_dev_priv *priv, + struct ipoib_tx_buf *tx_req) +{ + int i, off; + struct sk_buff *skb = tx_req->skb; + skb_frag_t *frags = skb_shinfo(skb)->frags; + int nr_frags = skb_shinfo(skb)->nr_frags; + u64 *mapping = tx_req->mapping; + + if (skb_headlen(skb)) { + priv->tx_sge[0].addr = mapping[0]; + priv->tx_sge[0].length = skb_headlen(skb); + off = 1; + } else + off = 0; + + for (i = 0; i < nr_frags; ++i) { + priv->tx_sge[i + off].addr = mapping[i + off]; + priv->tx_sge[i + off].length = skb_frag_size(&frags[i]); + } + priv->tx_wr.wr.num_sge = nr_frags + off; +} + #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev); int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter); @@ -522,6 +549,8 @@ void ipoib_path_iter_read(struct ipoib_path_iter *iter, int ipoib_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid, int set_qkey); +int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast); +struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid); int ipoib_init_qp(struct net_device *dev); int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca); diff --git a/kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c index cf32a778e..3ae9726ef 100644 --- a/kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -332,7 +332,7 @@ static void ipoib_cm_init_rx_wr(struct net_device *dev, int i; for (i = 0; i < priv->cm.num_frags; ++i) - sge[i].lkey = priv->mr->lkey; + sge[i].lkey = priv->pd->local_dma_lkey; sge[0].length = IPOIB_CM_HEAD_SIZE; for (i = 1; i < priv->cm.num_frags; ++i) @@ -694,24 +694,21 @@ repost: static inline int post_send(struct ipoib_dev_priv *priv, struct ipoib_cm_tx *tx, unsigned int wr_id, - u64 addr, int len) + struct ipoib_tx_buf *tx_req) { struct ib_send_wr *bad_wr; - priv->tx_sge[0].addr = addr; - priv->tx_sge[0].length = len; + ipoib_build_sge(priv, tx_req); - priv->tx_wr.num_sge = 1; - priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM; + priv->tx_wr.wr.wr_id = wr_id | IPOIB_OP_CM; - return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr); + return ib_post_send(tx->qp, &priv->tx_wr.wr, &bad_wr); } void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx) { struct ipoib_dev_priv *priv = netdev_priv(dev); - struct ipoib_cm_tx_buf *tx_req; - u64 addr; + struct ipoib_tx_buf *tx_req; int rc; if (unlikely(skb->len > tx->mtu)) { @@ -735,24 +732,21 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ */ tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)]; tx_req->skb = skb; - addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE); - if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { + + if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) { ++dev->stats.tx_errors; dev_kfree_skb_any(skb); return; } - tx_req->mapping = addr; - skb_orphan(skb); skb_dst_drop(skb); - rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), - addr, skb->len); + rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req); if (unlikely(rc)) { ipoib_warn(priv, "post_send failed, error %d\n", rc); ++dev->stats.tx_errors; - ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); + ipoib_dma_unmap_tx(priv, tx_req); dev_kfree_skb_any(skb); } else { dev->trans_start = jiffies; @@ -777,7 +771,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_cm_tx *tx = wc->qp->qp_context; unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM; - struct ipoib_cm_tx_buf *tx_req; + struct ipoib_tx_buf *tx_req; unsigned long flags; ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n", @@ -791,7 +785,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) tx_req = &tx->tx_ring[wr_id]; - ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE); + ipoib_dma_unmap_tx(priv, tx_req); /* FIXME: is this right? Shouldn't we only increment on success? */ ++dev->stats.tx_packets; @@ -854,7 +848,7 @@ int ipoib_cm_dev_open(struct net_device *dev) } ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num), - 0, NULL); + 0); if (ret) { printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name, IPOIB_CM_IETF_ID | priv->qp->qp_num); @@ -1036,6 +1030,9 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_ struct ib_qp *tx_qp; + if (dev->features & NETIF_F_SG) + attr.cap.max_send_sge = MAX_SKB_FRAGS + 1; + tx_qp = ib_create_qp(priv->pd, &attr); if (PTR_ERR(tx_qp) == -EINVAL) { ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n", @@ -1170,7 +1167,7 @@ err_tx: static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p) { struct ipoib_dev_priv *priv = netdev_priv(p->dev); - struct ipoib_cm_tx_buf *tx_req; + struct ipoib_tx_buf *tx_req; unsigned long begin; ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n", @@ -1197,8 +1194,7 @@ timeout: while ((int) p->tx_tail - (int) p->tx_head < 0) { tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)]; - ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, - DMA_TO_DEVICE); + ipoib_dma_unmap_tx(priv, tx_req); dev_kfree_skb_any(tx_req->skb); ++p->tx_tail; netif_tx_lock_bh(p->dev); @@ -1455,7 +1451,6 @@ static void ipoib_cm_stale_task(struct work_struct *work) spin_unlock_irq(&priv->lock); } - static ssize_t show_mode(struct device *d, struct device_attribute *attr, char *buf) { diff --git a/kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 63b92cbb2..5ea0c1407 100644 --- a/kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -263,8 +263,7 @@ repost: "for buf %d\n", wr_id); } -static int ipoib_dma_map_tx(struct ib_device *ca, - struct ipoib_tx_buf *tx_req) +int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req) { struct sk_buff *skb = tx_req->skb; u64 *mapping = tx_req->mapping; @@ -305,8 +304,8 @@ partial_error: return -EIO; } -static void ipoib_dma_unmap_tx(struct ib_device *ca, - struct ipoib_tx_buf *tx_req) +void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv, + struct ipoib_tx_buf *tx_req) { struct sk_buff *skb = tx_req->skb; u64 *mapping = tx_req->mapping; @@ -314,7 +313,8 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca, int off; if (skb_headlen(skb)) { - ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE); + ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb), + DMA_TO_DEVICE); off = 1; } else off = 0; @@ -322,8 +322,8 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca, for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - ib_dma_unmap_page(ca, mapping[i + off], skb_frag_size(frag), - DMA_TO_DEVICE); + ib_dma_unmap_page(priv->ca, mapping[i + off], + skb_frag_size(frag), DMA_TO_DEVICE); } } @@ -389,7 +389,7 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) tx_req = &priv->tx_ring[wr_id]; - ipoib_dma_unmap_tx(priv->ca, tx_req); + ipoib_dma_unmap_tx(priv, tx_req); ++dev->stats.tx_packets; dev->stats.tx_bytes += tx_req->skb->len; @@ -514,37 +514,23 @@ static inline int post_send(struct ipoib_dev_priv *priv, void *head, int hlen) { struct ib_send_wr *bad_wr; - int i, off; struct sk_buff *skb = tx_req->skb; - skb_frag_t *frags = skb_shinfo(skb)->frags; - int nr_frags = skb_shinfo(skb)->nr_frags; - u64 *mapping = tx_req->mapping; - if (skb_headlen(skb)) { - priv->tx_sge[0].addr = mapping[0]; - priv->tx_sge[0].length = skb_headlen(skb); - off = 1; - } else - off = 0; + ipoib_build_sge(priv, tx_req); - for (i = 0; i < nr_frags; ++i) { - priv->tx_sge[i + off].addr = mapping[i + off]; - priv->tx_sge[i + off].length = skb_frag_size(&frags[i]); - } - priv->tx_wr.num_sge = nr_frags + off; - priv->tx_wr.wr_id = wr_id; - priv->tx_wr.wr.ud.remote_qpn = qpn; - priv->tx_wr.wr.ud.ah = address; + priv->tx_wr.wr.wr_id = wr_id; + priv->tx_wr.remote_qpn = qpn; + priv->tx_wr.ah = address; if (head) { - priv->tx_wr.wr.ud.mss = skb_shinfo(skb)->gso_size; - priv->tx_wr.wr.ud.header = head; - priv->tx_wr.wr.ud.hlen = hlen; - priv->tx_wr.opcode = IB_WR_LSO; + priv->tx_wr.mss = skb_shinfo(skb)->gso_size; + priv->tx_wr.header = head; + priv->tx_wr.hlen = hlen; + priv->tx_wr.wr.opcode = IB_WR_LSO; } else - priv->tx_wr.opcode = IB_WR_SEND; + priv->tx_wr.wr.opcode = IB_WR_SEND; - return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr); + return ib_post_send(priv->qp, &priv->tx_wr.wr, &bad_wr); } void ipoib_send(struct net_device *dev, struct sk_buff *skb, @@ -597,9 +583,9 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, } if (skb->ip_summed == CHECKSUM_PARTIAL) - priv->tx_wr.send_flags |= IB_SEND_IP_CSUM; + priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM; else - priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; + priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM; if (++priv->tx_outstanding == ipoib_sendq_size) { ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n"); @@ -617,7 +603,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, ipoib_warn(priv, "post_send failed, error %d\n", rc); ++dev->stats.tx_errors; --priv->tx_outstanding; - ipoib_dma_unmap_tx(priv->ca, tx_req); + ipoib_dma_unmap_tx(priv, tx_req); dev_kfree_skb_any(skb); if (netif_queue_stopped(dev)) netif_wake_queue(dev); @@ -868,7 +854,7 @@ int ipoib_ib_dev_stop(struct net_device *dev) while ((int) priv->tx_tail - (int) priv->tx_head < 0) { tx_req = &priv->tx_ring[priv->tx_tail & (ipoib_sendq_size - 1)]; - ipoib_dma_unmap_tx(priv->ca, tx_req); + ipoib_dma_unmap_tx(priv, tx_req); dev_kfree_skb_any(tx_req->skb); ++priv->tx_tail; --priv->tx_outstanding; @@ -985,20 +971,21 @@ static inline int update_child_pkey(struct ipoib_dev_priv *priv) } static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, - enum ipoib_flush_level level) + enum ipoib_flush_level level, + int nesting) { struct ipoib_dev_priv *cpriv; struct net_device *dev = priv->dev; int result; - down_read(&priv->vlan_rwsem); + down_read_nested(&priv->vlan_rwsem, nesting); /* * Flush any child interfaces too -- they might be up even if * the parent is down. */ list_for_each_entry(cpriv, &priv->child_intfs, list) - __ipoib_ib_dev_flush(cpriv, level); + __ipoib_ib_dev_flush(cpriv, level, nesting + 1); up_read(&priv->vlan_rwsem); @@ -1076,7 +1063,7 @@ void ipoib_ib_dev_flush_light(struct work_struct *work) struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, flush_light); - __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT); + __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0); } void ipoib_ib_dev_flush_normal(struct work_struct *work) @@ -1084,7 +1071,7 @@ void ipoib_ib_dev_flush_normal(struct work_struct *work) struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, flush_normal); - __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL); + __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0); } void ipoib_ib_dev_flush_heavy(struct work_struct *work) @@ -1092,7 +1079,7 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work) struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, flush_heavy); - __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY); + __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0); } void ipoib_ib_dev_cleanup(struct net_device *dev) diff --git a/kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c b/kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c index 9e1b203d7..7d3281866 100644 --- a/kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -48,6 +48,9 @@ #include <linux/jhash.h> #include <net/arp.h> +#include <net/addrconf.h> +#include <linux/inetdevice.h> +#include <rdma/ib_cache.h> #define DRV_VERSION "1.0.0" @@ -89,13 +92,18 @@ struct workqueue_struct *ipoib_workqueue; struct ib_sa_client ipoib_sa_client; static void ipoib_add_one(struct ib_device *device); -static void ipoib_remove_one(struct ib_device *device); +static void ipoib_remove_one(struct ib_device *device, void *client_data); static void ipoib_neigh_reclaim(struct rcu_head *rp); +static struct net_device *ipoib_get_net_dev_by_params( + struct ib_device *dev, u8 port, u16 pkey, + const union ib_gid *gid, const struct sockaddr *addr, + void *client_data); static struct ib_client ipoib_client = { .name = "ipoib", .add = ipoib_add_one, - .remove = ipoib_remove_one + .remove = ipoib_remove_one, + .get_net_dev_by_params = ipoib_get_net_dev_by_params, }; int ipoib_open(struct net_device *dev) @@ -190,7 +198,7 @@ static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_featu struct ipoib_dev_priv *priv = netdev_priv(dev); if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags)) - features &= ~(NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO); + features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO); return features; } @@ -222,6 +230,225 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu) return 0; } +/* Called with an RCU read lock taken */ +static bool ipoib_is_dev_match_addr_rcu(const struct sockaddr *addr, + struct net_device *dev) +{ + struct net *net = dev_net(dev); + struct in_device *in_dev; + struct sockaddr_in *addr_in = (struct sockaddr_in *)addr; + struct sockaddr_in6 *addr_in6 = (struct sockaddr_in6 *)addr; + __be32 ret_addr; + + switch (addr->sa_family) { + case AF_INET: + in_dev = in_dev_get(dev); + if (!in_dev) + return false; + + ret_addr = inet_confirm_addr(net, in_dev, 0, + addr_in->sin_addr.s_addr, + RT_SCOPE_HOST); + in_dev_put(in_dev); + if (ret_addr) + return true; + + break; + case AF_INET6: + if (IS_ENABLED(CONFIG_IPV6) && + ipv6_chk_addr(net, &addr_in6->sin6_addr, dev, 1)) + return true; + + break; + } + return false; +} + +/** + * Find the master net_device on top of the given net_device. + * @dev: base IPoIB net_device + * + * Returns the master net_device with a reference held, or the same net_device + * if no master exists. + */ +static struct net_device *ipoib_get_master_net_dev(struct net_device *dev) +{ + struct net_device *master; + + rcu_read_lock(); + master = netdev_master_upper_dev_get_rcu(dev); + if (master) + dev_hold(master); + rcu_read_unlock(); + + if (master) + return master; + + dev_hold(dev); + return dev; +} + +/** + * Find a net_device matching the given address, which is an upper device of + * the given net_device. + * @addr: IP address to look for. + * @dev: base IPoIB net_device + * + * If found, returns the net_device with a reference held. Otherwise return + * NULL. + */ +static struct net_device *ipoib_get_net_dev_match_addr( + const struct sockaddr *addr, struct net_device *dev) +{ + struct net_device *upper, + *result = NULL; + struct list_head *iter; + + rcu_read_lock(); + if (ipoib_is_dev_match_addr_rcu(addr, dev)) { + dev_hold(dev); + result = dev; + goto out; + } + + netdev_for_each_all_upper_dev_rcu(dev, upper, iter) { + if (ipoib_is_dev_match_addr_rcu(addr, upper)) { + dev_hold(upper); + result = upper; + break; + } + } +out: + rcu_read_unlock(); + return result; +} + +/* returns the number of IPoIB netdevs on top a given ipoib device matching a + * pkey_index and address, if one exists. + * + * @found_net_dev: contains a matching net_device if the return value >= 1, + * with a reference held. */ +static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv, + const union ib_gid *gid, + u16 pkey_index, + const struct sockaddr *addr, + int nesting, + struct net_device **found_net_dev) +{ + struct ipoib_dev_priv *child_priv; + struct net_device *net_dev = NULL; + int matches = 0; + + if (priv->pkey_index == pkey_index && + (!gid || !memcmp(gid, &priv->local_gid, sizeof(*gid)))) { + if (!addr) { + net_dev = ipoib_get_master_net_dev(priv->dev); + } else { + /* Verify the net_device matches the IP address, as + * IPoIB child devices currently share a GID. */ + net_dev = ipoib_get_net_dev_match_addr(addr, priv->dev); + } + if (net_dev) { + if (!*found_net_dev) + *found_net_dev = net_dev; + else + dev_put(net_dev); + ++matches; + } + } + + /* Check child interfaces */ + down_read_nested(&priv->vlan_rwsem, nesting); + list_for_each_entry(child_priv, &priv->child_intfs, list) { + matches += ipoib_match_gid_pkey_addr(child_priv, gid, + pkey_index, addr, + nesting + 1, + found_net_dev); + if (matches > 1) + break; + } + up_read(&priv->vlan_rwsem); + + return matches; +} + +/* Returns the number of matching net_devs found (between 0 and 2). Also + * return the matching net_device in the @net_dev parameter, holding a + * reference to the net_device, if the number of matches >= 1 */ +static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u8 port, + u16 pkey_index, + const union ib_gid *gid, + const struct sockaddr *addr, + struct net_device **net_dev) +{ + struct ipoib_dev_priv *priv; + int matches = 0; + + *net_dev = NULL; + + list_for_each_entry(priv, dev_list, list) { + if (priv->port != port) + continue; + + matches += ipoib_match_gid_pkey_addr(priv, gid, pkey_index, + addr, 0, net_dev); + if (matches > 1) + break; + } + + return matches; +} + +static struct net_device *ipoib_get_net_dev_by_params( + struct ib_device *dev, u8 port, u16 pkey, + const union ib_gid *gid, const struct sockaddr *addr, + void *client_data) +{ + struct net_device *net_dev; + struct list_head *dev_list = client_data; + u16 pkey_index; + int matches; + int ret; + + if (!rdma_protocol_ib(dev, port)) + return NULL; + + ret = ib_find_cached_pkey(dev, port, pkey, &pkey_index); + if (ret) + return NULL; + + if (!dev_list) + return NULL; + + /* See if we can find a unique device matching the L2 parameters */ + matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index, + gid, NULL, &net_dev); + + switch (matches) { + case 0: + return NULL; + case 1: + return net_dev; + } + + dev_put(net_dev); + + /* Couldn't find a unique device with L2 parameters only. Use L3 + * address to uniquely match the net device */ + matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index, + gid, addr, &net_dev); + switch (matches) { + case 0: + return NULL; + default: + dev_warn_ratelimited(&dev->dev, + "duplicate IP address detected\n"); + /* Fall through */ + case 1: + return net_dev; + } +} + int ipoib_set_mode(struct net_device *dev, const char *buf) { struct ipoib_dev_priv *priv = netdev_priv(dev); @@ -232,8 +459,9 @@ int ipoib_set_mode(struct net_device *dev, const char *buf) ipoib_warn(priv, "enabling connected mode " "will cause multicast packet drops\n"); netdev_update_features(dev); + dev_set_mtu(dev, ipoib_cm_max_mtu(dev)); rtnl_unlock(); - priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; + priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM; ipoib_flush_paths(dev); rtnl_lock(); @@ -921,6 +1149,9 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) unsigned long dt; unsigned long flags; int i; + LIST_HEAD(remove_list); + struct ipoib_mcast *mcast, *tmcast; + struct net_device *dev = priv->dev; if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) return; @@ -948,6 +1179,19 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) lockdep_is_held(&priv->lock))) != NULL) { /* was the neigh idle for two GC periods */ if (time_after(neigh_obsolete, neigh->alive)) { + u8 *mgid = neigh->daddr + 4; + + /* Is this multicast ? */ + if (*mgid == 0xff) { + mcast = __ipoib_mcast_find(dev, mgid); + + if (mcast && test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { + list_del(&mcast->list); + rb_erase(&mcast->rb_node, &priv->multicast_tree); + list_add_tail(&mcast->list, &remove_list); + } + } + rcu_assign_pointer(*np, rcu_dereference_protected(neigh->hnext, lockdep_is_held(&priv->lock))); @@ -963,6 +1207,10 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) out_unlock: spin_unlock_irqrestore(&priv->lock, flags); + list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { + ipoib_mcast_leave(dev, mcast); + ipoib_mcast_free(mcast); + } } static void ipoib_reap_neigh(struct work_struct *work) @@ -1128,7 +1376,7 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv) { struct ipoib_neigh_table *ntbl = &priv->ntbl; struct ipoib_neigh_hash *htbl; - struct ipoib_neigh **buckets; + struct ipoib_neigh __rcu **buckets; u32 size; clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); @@ -1146,7 +1394,7 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv) htbl->size = size; htbl->mask = (size - 1); htbl->buckets = buckets; - ntbl->htbl = htbl; + RCU_INIT_POINTER(ntbl->htbl, htbl); htbl->ntbl = ntbl; atomic_set(&ntbl->entries, 0); @@ -1577,7 +1825,8 @@ static struct net_device *ipoib_add_port(const char *format, SET_NETDEV_DEV(priv->dev, hca->dma_device); priv->dev->dev_id = port - 1; - if (!ib_query_port(hca, port, &attr)) + result = ib_query_port(hca, port, &attr); + if (!result) priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); else { printk(KERN_WARNING "%s: ib_query_port %d failed\n", @@ -1598,7 +1847,8 @@ static struct net_device *ipoib_add_port(const char *format, goto device_init_failed; } - if (ipoib_set_dev_features(priv, hca)) + result = ipoib_set_dev_features(priv, hca); + if (result) goto device_init_failed; /* @@ -1610,7 +1860,7 @@ static struct net_device *ipoib_add_port(const char *format, priv->dev->broadcast[8] = priv->pkey >> 8; priv->dev->broadcast[9] = priv->pkey & 0xff; - result = ib_query_gid(hca, port, 0, &priv->local_gid); + result = ib_query_gid(hca, port, 0, &priv->local_gid, NULL); if (result) { printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n", hca->name, port, result); @@ -1684,10 +1934,8 @@ static void ipoib_add_one(struct ib_device *device) struct list_head *dev_list; struct net_device *dev; struct ipoib_dev_priv *priv; - int s, e, p; - - if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) - return; + int p; + int count = 0; dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); if (!dev_list) @@ -1695,36 +1943,30 @@ static void ipoib_add_one(struct ib_device *device) INIT_LIST_HEAD(dev_list); - if (device->node_type == RDMA_NODE_IB_SWITCH) { - s = 0; - e = 0; - } else { - s = 1; - e = device->phys_port_cnt; - } - - for (p = s; p <= e; ++p) { - if (rdma_port_get_link_layer(device, p) != IB_LINK_LAYER_INFINIBAND) + for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { + if (!rdma_protocol_ib(device, p)) continue; dev = ipoib_add_port("ib%d", device, p); if (!IS_ERR(dev)) { priv = netdev_priv(dev); list_add_tail(&priv->list, dev_list); + count++; } } + if (!count) { + kfree(dev_list); + return; + } + ib_set_client_data(device, &ipoib_client, dev_list); } -static void ipoib_remove_one(struct ib_device *device) +static void ipoib_remove_one(struct ib_device *device, void *client_data) { struct ipoib_dev_priv *priv, *tmp; - struct list_head *dev_list; - - if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) - return; + struct list_head *dev_list = client_data; - dev_list = ib_get_client_data(device, &ipoib_client); if (!dev_list) return; diff --git a/kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 140c94ce7..6dcef673d 100644 --- a/kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -106,7 +106,7 @@ static void __ipoib_mcast_schedule_join_thread(struct ipoib_dev_priv *priv, queue_delayed_work(priv->wq, &priv->mcast_task, 0); } -static void ipoib_mcast_free(struct ipoib_mcast *mcast) +void ipoib_mcast_free(struct ipoib_mcast *mcast) { struct net_device *dev = mcast->dev; int tx_dropped = 0; @@ -153,7 +153,7 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev, return mcast; } -static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid) +struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct rb_node *n = priv->multicast_tree.rb_node; @@ -245,7 +245,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey); spin_unlock_irq(&priv->lock); - priv->tx_wr.wr.ud.remote_qkey = priv->qkey; + priv->tx_wr.remote_qkey = priv->qkey; set_qkey = 1; } @@ -393,8 +393,13 @@ static int ipoib_mcast_join_complete(int status, goto out_locked; } } else { - if (mcast->logcount++ < 20) { - if (status == -ETIMEDOUT || status == -EAGAIN) { + bool silent_fail = + test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) && + status == -EINVAL; + + if (mcast->logcount < 20) { + if (status == -ETIMEDOUT || status == -EAGAIN || + silent_fail) { ipoib_dbg_mcast(priv, "%smulticast join failed for %pI6, status %d\n", test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? "sendonly " : "", mcast->mcmember.mgid.raw, status); @@ -403,6 +408,9 @@ static int ipoib_mcast_join_complete(int status, test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? "sendonly " : "", mcast->mcmember.mgid.raw, status); } + + if (!silent_fail) + mcast->logcount++; } if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) && @@ -448,8 +456,7 @@ out_locked: return status; } -static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, - int create) +static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ib_sa_multicast *multicast; @@ -471,7 +478,14 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE; - if (create) { + if (mcast != priv->broadcast) { + /* + * RFC 4391: + * The MGID MUST use the same P_Key, Q_Key, SL, MTU, + * and HopLimit as those used in the broadcast-GID. The rest + * of attributes SHOULD follow the values used in the + * broadcast-GID as well. + */ comp_mask |= IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_MTU_SELECTOR | @@ -492,6 +506,24 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, rec.sl = priv->broadcast->mcmember.sl; rec.flow_label = priv->broadcast->mcmember.flow_label; rec.hop_limit = priv->broadcast->mcmember.hop_limit; + + /* + * Send-only IB Multicast joins do not work at the core + * IB layer yet, so we can't use them here. However, + * we are emulating an Ethernet multicast send, which + * does not require a multicast subscription and will + * still send properly. The most appropriate thing to + * do is to create the group if it doesn't exist as that + * most closely emulates the behavior, from a user space + * application perspecitive, of Ethernet multicast + * operation. For now, we do a full join, maybe later + * when the core IB layers support send only joins we + * will use them. + */ +#if 0 + if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) + rec.join_state = 4; +#endif } multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, @@ -517,7 +549,6 @@ void ipoib_mcast_join_task(struct work_struct *work) struct ib_port_attr port_attr; unsigned long delay_until = 0; struct ipoib_mcast *mcast = NULL; - int create = 1; if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) return; @@ -530,7 +561,7 @@ void ipoib_mcast_join_task(struct work_struct *work) } priv->local_lid = port_attr.lid; - if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid)) + if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid, NULL)) ipoib_warn(priv, "ib_query_gid() failed\n"); else memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); @@ -566,7 +597,6 @@ void ipoib_mcast_join_task(struct work_struct *work) if (IS_ERR_OR_NULL(priv->broadcast->mc) && !test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags)) { mcast = priv->broadcast; - create = 0; if (mcast->backoff > 1 && time_before(jiffies, mcast->delay_until)) { delay_until = mcast->delay_until; @@ -590,12 +620,8 @@ void ipoib_mcast_join_task(struct work_struct *work) /* Found the next unjoined group */ init_completion(&mcast->done); set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); - if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) - create = 0; - else - create = 1; spin_unlock_irq(&priv->lock); - ipoib_mcast_join(dev, mcast, create); + ipoib_mcast_join(dev, mcast); spin_lock_irq(&priv->lock); } else if (!delay_until || time_before(mcast->delay_until, delay_until)) @@ -618,7 +644,7 @@ out: } spin_unlock_irq(&priv->lock); if (mcast) - ipoib_mcast_join(dev, mcast, create); + ipoib_mcast_join(dev, mcast); } int ipoib_mcast_start_thread(struct net_device *dev) @@ -651,7 +677,7 @@ int ipoib_mcast_stop_thread(struct net_device *dev) return 0; } -static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) +int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) { struct ipoib_dev_priv *priv = netdev_priv(dev); int ret = 0; diff --git a/kernel/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/kernel/drivers/infiniband/ulp/ipoib/ipoib_verbs.c index 2d13fd08c..d48c5bae7 100644 --- a/kernel/drivers/infiniband/ulp/ipoib/ipoib_verbs.c +++ b/kernel/drivers/infiniband/ulp/ipoib/ipoib_verbs.c @@ -141,6 +141,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) .sq_sig_type = IB_SIGNAL_ALL_WR, .qp_type = IB_QPT_UD }; + struct ib_cq_init_attr cq_attr = {}; int ret, size; int i; @@ -151,12 +152,6 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) return -ENODEV; } - priv->mr = ib_get_dma_mr(priv->pd, IB_ACCESS_LOCAL_WRITE); - if (IS_ERR(priv->mr)) { - printk(KERN_WARNING "%s: ib_get_dma_mr failed\n", ca->name); - goto out_free_pd; - } - /* * the various IPoIB tasks assume they will never race against * themselves, so always use a single thread workqueue @@ -164,7 +159,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) priv->wq = create_singlethread_workqueue("ipoib_wq"); if (!priv->wq) { printk(KERN_WARNING "ipoib: failed to allocate device WQ\n"); - goto out_free_mr; + goto out_free_pd; } size = ipoib_recvq_size + 1; @@ -179,14 +174,17 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) if (ret != -ENOSYS) goto out_free_wq; - priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev, size, 0); + cq_attr.cqe = size; + priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, + dev, &cq_attr); if (IS_ERR(priv->recv_cq)) { printk(KERN_WARNING "%s: failed to create receive CQ\n", ca->name); goto out_cm_dev_cleanup; } + cq_attr.cqe = ipoib_sendq_size; priv->send_cq = ib_create_cq(priv->ca, ipoib_send_comp_handler, NULL, - dev, ipoib_sendq_size, 0); + dev, &cq_attr); if (IS_ERR(priv->send_cq)) { printk(KERN_WARNING "%s: failed to create send CQ\n", ca->name); goto out_free_recv_cq; @@ -221,13 +219,13 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) priv->dev->dev_addr[3] = (priv->qp->qp_num ) & 0xff; for (i = 0; i < MAX_SKB_FRAGS + 1; ++i) - priv->tx_sge[i].lkey = priv->mr->lkey; + priv->tx_sge[i].lkey = priv->pd->local_dma_lkey; - priv->tx_wr.opcode = IB_WR_SEND; - priv->tx_wr.sg_list = priv->tx_sge; - priv->tx_wr.send_flags = IB_SEND_SIGNALED; + priv->tx_wr.wr.opcode = IB_WR_SEND; + priv->tx_wr.wr.sg_list = priv->tx_sge; + priv->tx_wr.wr.send_flags = IB_SEND_SIGNALED; - priv->rx_sge[0].lkey = priv->mr->lkey; + priv->rx_sge[0].lkey = priv->pd->local_dma_lkey; priv->rx_sge[0].length = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu); priv->rx_wr.num_sge = 1; @@ -250,9 +248,6 @@ out_free_wq: destroy_workqueue(priv->wq); priv->wq = NULL; -out_free_mr: - ib_dereg_mr(priv->mr); - out_free_pd: ib_dealloc_pd(priv->pd); @@ -285,12 +280,7 @@ void ipoib_transport_dev_cleanup(struct net_device *dev) priv->wq = NULL; } - if (ib_dereg_mr(priv->mr)) - ipoib_warn(priv, "ib_dereg_mr failed\n"); - - if (ib_dealloc_pd(priv->pd)) - ipoib_warn(priv, "ib_dealloc_pd failed\n"); - + ib_dealloc_pd(priv->pd); } void ipoib_event(struct ib_event_handler *handler, diff --git a/kernel/drivers/infiniband/ulp/iser/iscsi_iser.c b/kernel/drivers/infiniband/ulp/iser/iscsi_iser.c index c933d882c..9080161e0 100644 --- a/kernel/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/kernel/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -74,36 +74,44 @@ #include "iscsi_iser.h" +MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Alex Nezhinsky, Dan Bar Dov, Or Gerlitz"); +MODULE_VERSION(DRV_VER); + static struct scsi_host_template iscsi_iser_sht; static struct iscsi_transport iscsi_iser_transport; static struct scsi_transport_template *iscsi_iser_scsi_transport; +static struct workqueue_struct *release_wq; +struct iser_global ig; + +int iser_debug_level = 0; +module_param_named(debug_level, iser_debug_level, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:disabled)"); static unsigned int iscsi_max_lun = 512; module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); +MODULE_PARM_DESC(max_lun, "Max LUNs to allow per session (default:512"); -int iser_debug_level = 0; -bool iser_pi_enable = false; -int iser_pi_guard = 1; +unsigned int iser_max_sectors = ISER_DEF_MAX_SECTORS; +module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024"); -MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover"); -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_AUTHOR("Alex Nezhinsky, Dan Bar Dov, Or Gerlitz"); -MODULE_VERSION(DRV_VER); - -module_param_named(debug_level, iser_debug_level, int, 0644); -MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:disabled)"); +bool iser_always_reg = true; +module_param_named(always_register, iser_always_reg, bool, S_IRUGO); +MODULE_PARM_DESC(always_register, + "Always register memory, even for continuous memory regions (default:true)"); -module_param_named(pi_enable, iser_pi_enable, bool, 0644); +bool iser_pi_enable = false; +module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO); MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)"); -module_param_named(pi_guard, iser_pi_guard, int, 0644); +int iser_pi_guard; +module_param_named(pi_guard, iser_pi_guard, int, S_IRUGO); MODULE_PARM_DESC(pi_guard, "T10-PI guard_type [deprecated]"); -static struct workqueue_struct *release_wq; -struct iser_global ig; - /* - * iscsi_iser_recv() - Process a successfull recv completion + * iscsi_iser_recv() - Process a successful recv completion * @conn: iscsi connection * @hdr: iscsi header * @rx_data: buffer containing receive data payload @@ -118,7 +126,6 @@ iscsi_iser_recv(struct iscsi_conn *conn, struct iscsi_hdr *hdr, { int rc = 0; int datalen; - int ahslen; /* verify PDU length */ datalen = ntoh24(hdr->dlength); @@ -133,9 +140,6 @@ iscsi_iser_recv(struct iscsi_conn *conn, struct iscsi_hdr *hdr, iser_dbg("aligned datalen (%d) hdr, %d (IB)\n", datalen, rx_data_len); - /* read AHS */ - ahslen = hdr->hlength * 4; - rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len); if (rc && rc != ISCSI_ERR_NO_SCSI_CMD) goto error; @@ -201,11 +205,12 @@ iser_initialize_task_headers(struct iscsi_task *task, goto out; } + tx_desc->wr_idx = 0; tx_desc->mapped = true; tx_desc->dma_addr = dma_addr; tx_desc->tx_sg[0].addr = tx_desc->dma_addr; tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; - tx_desc->tx_sg[0].lkey = device->mr->lkey; + tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; iser_task->iser_conn = iser_conn; out: @@ -626,6 +631,8 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, if (ep) { iser_conn = ep->dd_data; max_cmds = iser_conn->max_cmds; + shost->sg_tablesize = iser_conn->scsi_sg_tablesize; + shost->max_sectors = iser_conn->scsi_max_sectors; mutex_lock(&iser_conn->state_mutex); if (iser_conn->state != ISER_CONN_UP) { @@ -644,6 +651,15 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, SHOST_DIX_GUARD_CRC); } + /* + * Limit the sg_tablesize and max_sectors based on the device + * max fastreg page list length. + */ + shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize, + ib_conn->device->dev_attr.max_fast_reg_page_list_len); + shost->max_sectors = min_t(unsigned int, + 1024, (shost->sg_tablesize * PAGE_SIZE) >> 9); + if (iscsi_host_add(shost, ib_conn->device->ib_device->dma_device)) { mutex_unlock(&iser_conn->state_mutex); @@ -746,15 +762,7 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s stats->r2t_pdus = conn->r2t_pdus_cnt; /* always 0 */ stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; - stats->custom_length = 4; - strcpy(stats->custom[0].desc, "qp_tx_queue_full"); - stats->custom[0].value = 0; /* TB iser_conn->qp_tx_queue_full; */ - strcpy(stats->custom[1].desc, "fmr_map_not_avail"); - stats->custom[1].value = 0; /* TB iser_conn->fmr_map_not_avail */; - strcpy(stats->custom[2].desc, "eh_abort_cnt"); - stats->custom[2].value = conn->eh_abort_cnt; - strcpy(stats->custom[3].desc, "fmr_unalign_cnt"); - stats->custom[3].value = conn->fmr_unalign_cnt; + stats->custom_length = 0; } static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep, @@ -843,10 +851,9 @@ failure: static int iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) { - struct iser_conn *iser_conn; + struct iser_conn *iser_conn = ep->dd_data; int rc; - iser_conn = ep->dd_data; rc = wait_for_completion_interruptible_timeout(&iser_conn->up_completion, msecs_to_jiffies(timeout_ms)); /* if conn establishment failed, return error code to iscsi */ @@ -858,7 +865,7 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) mutex_unlock(&iser_conn->state_mutex); } - iser_info("ib conn %p rc = %d\n", iser_conn, rc); + iser_info("iser conn %p rc = %d\n", iser_conn, rc); if (rc > 0) return 1; /* success, this is the equivalent of POLLOUT */ @@ -880,11 +887,9 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) static void iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep) { - struct iser_conn *iser_conn; + struct iser_conn *iser_conn = ep->dd_data; - iser_conn = ep->dd_data; - iser_info("ep %p iser conn %p state %d\n", - ep, iser_conn, iser_conn->state); + iser_info("ep %p iser conn %p\n", ep, iser_conn); mutex_lock(&iser_conn->state_mutex); iser_conn_terminate(iser_conn); @@ -904,6 +909,7 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep) mutex_unlock(&iser_conn->state_mutex); iser_conn_release(iser_conn); } + iscsi_destroy_endpoint(ep); } @@ -961,19 +967,27 @@ static umode_t iser_attr_is_visible(int param_type, int param) return 0; } +static int iscsi_iser_slave_alloc(struct scsi_device *sdev) +{ + blk_queue_virt_boundary(sdev->request_queue, ~MASK_4K); + + return 0; +} + static struct scsi_host_template iscsi_iser_sht = { .module = THIS_MODULE, .name = "iSCSI Initiator over iSER", .queuecommand = iscsi_queuecommand, .change_queue_depth = scsi_change_queue_depth, - .sg_tablesize = ISCSI_ISER_SG_TABLESIZE, - .max_sectors = 1024, + .sg_tablesize = ISCSI_ISER_DEF_SG_TABLESIZE, + .max_sectors = ISER_DEF_MAX_SECTORS, .cmd_per_lun = ISER_DEF_CMD_PER_LUN, .eh_abort_handler = iscsi_eh_abort, .eh_device_reset_handler= iscsi_eh_device_reset, .eh_target_reset_handler = iscsi_eh_recover_target, .target_alloc = iscsi_target_alloc, - .use_clustering = DISABLE_CLUSTERING, + .use_clustering = ENABLE_CLUSTERING, + .slave_alloc = iscsi_iser_slave_alloc, .proc_name = "iscsi_iser", .this_id = -1, .track_queue_depth = 1, @@ -1078,7 +1092,7 @@ static void __exit iser_exit(void) if (!connlist_empty) { iser_err("Error cleanup stage completed but we still have iser " - "connections, destroying them anyway.\n"); + "connections, destroying them anyway\n"); list_for_each_entry_safe(iser_conn, n, &ig.connlist, conn_list) { iser_conn_release(iser_conn); diff --git a/kernel/drivers/infiniband/ulp/iser/iscsi_iser.h b/kernel/drivers/infiniband/ulp/iser/iscsi_iser.h index d2b6caf76..8a5998e6a 100644 --- a/kernel/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/kernel/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -98,8 +98,13 @@ #define SHIFT_4K 12 #define SIZE_4K (1ULL << SHIFT_4K) #define MASK_4K (~(SIZE_4K-1)) - /* support up to 512KB in one RDMA */ -#define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K) + +/* Default support is 512KB I/O size */ +#define ISER_DEF_MAX_SECTORS 1024 +#define ISCSI_ISER_DEF_SG_TABLESIZE ((ISER_DEF_MAX_SECTORS * 512) >> SHIFT_4K) +/* Maximum support is 8MB I/O size */ +#define ISCSI_ISER_MAX_SG_TABLESIZE ((16384 * 512) >> SHIFT_4K) + #define ISER_DEF_XMIT_CMDS_DEFAULT 512 #if ISCSI_DEF_XMIT_CMDS_MAX > ISER_DEF_XMIT_CMDS_DEFAULT #define ISER_DEF_XMIT_CMDS_MAX ISCSI_DEF_XMIT_CMDS_MAX @@ -222,23 +227,19 @@ enum iser_data_dir { * @size: num entries of this sg * @data_len: total beffer byte len * @dma_nents: returned by dma_map_sg - * @orig_sg: pointer to the original sg list (in case - * we used a copy) - * @orig_size: num entris of orig sg list */ struct iser_data_buf { struct scatterlist *sg; - unsigned int size; + int size; unsigned long data_len; unsigned int dma_nents; - struct scatterlist *orig_sg; - unsigned int orig_size; - }; +}; /* fwd declarations */ struct iser_device; struct iscsi_iser_task; struct iscsi_endpoint; +struct iser_reg_resources; /** * struct iser_mem_reg - iSER memory registration info @@ -259,6 +260,14 @@ enum iser_desc_type { ISCSI_TX_DATAOUT }; +/* Maximum number of work requests per task: + * Data memory region local invalidate + fast registration + * Protection memory region local invalidate + fast registration + * Signature memory region local invalidate + fast registration + * PDU send + */ +#define ISER_MAX_WRS 7 + /** * struct iser_tx_desc - iSER TX descriptor (for send wr_id) * @@ -271,6 +280,11 @@ enum iser_desc_type { * unsolicited data-out or control * @num_sge: number sges used on this TX task * @mapped: Is the task header mapped + * @wr_idx: Current WR index + * @wrs: Array of WRs per task + * @data_reg: Data buffer registration details + * @prot_reg: Protection buffer registration details + * @sig_attrs: Signature attributes */ struct iser_tx_desc { struct iser_hdr iser_header; @@ -280,6 +294,15 @@ struct iser_tx_desc { struct ib_sge tx_sg[2]; int num_sge; bool mapped; + u8 wr_idx; + union iser_wr { + struct ib_send_wr send; + struct ib_reg_wr fast_reg; + struct ib_sig_handover_wr sig; + } wrs[ISER_MAX_WRS]; + struct iser_mem_reg data_reg; + struct iser_mem_reg prot_reg; + struct ib_sig_attrs sig_attrs; }; #define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \ @@ -326,6 +349,33 @@ struct iser_comp { }; /** + * struct iser_device - Memory registration operations + * per-device registration schemes + * + * @alloc_reg_res: Allocate registration resources + * @free_reg_res: Free registration resources + * @fast_reg_mem: Register memory buffers + * @unreg_mem: Un-register memory buffers + * @reg_desc_get: Get a registration descriptor for pool + * @reg_desc_put: Get a registration descriptor to pool + */ +struct iser_reg_ops { + int (*alloc_reg_res)(struct ib_conn *ib_conn, + unsigned cmds_max, + unsigned int size); + void (*free_reg_res)(struct ib_conn *ib_conn); + int (*reg_mem)(struct iscsi_iser_task *iser_task, + struct iser_data_buf *mem, + struct iser_reg_resources *rsc, + struct iser_mem_reg *reg); + void (*unreg_mem)(struct iscsi_iser_task *iser_task, + enum iser_data_dir cmd_dir); + struct iser_fr_desc * (*reg_desc_get)(struct ib_conn *ib_conn); + void (*reg_desc_put)(struct ib_conn *ib_conn, + struct iser_fr_desc *desc); +}; + +/** * struct iser_device - iSER device handle * * @ib_device: RDMA device @@ -338,11 +388,7 @@ struct iser_comp { * @comps_used: Number of completion contexts used, Min between online * cpus and device max completion vectors * @comps: Dinamically allocated array of completion handlers - * Memory registration pool Function pointers (FMR or Fastreg): - * @iser_alloc_rdma_reg_res: Allocation of memory regions pool - * @iser_free_rdma_reg_res: Free of memory regions pool - * @iser_reg_rdma_mem: Memory registration routine - * @iser_unreg_rdma_mem: Memory deregistration routine + * @reg_ops: Registration ops */ struct iser_device { struct ib_device *ib_device; @@ -354,54 +400,69 @@ struct iser_device { int refcount; int comps_used; struct iser_comp *comps; - int (*iser_alloc_rdma_reg_res)(struct ib_conn *ib_conn, - unsigned cmds_max); - void (*iser_free_rdma_reg_res)(struct ib_conn *ib_conn); - int (*iser_reg_rdma_mem)(struct iscsi_iser_task *iser_task, - enum iser_data_dir cmd_dir); - void (*iser_unreg_rdma_mem)(struct iscsi_iser_task *iser_task, - enum iser_data_dir cmd_dir); + struct iser_reg_ops *reg_ops; }; #define ISER_CHECK_GUARD 0xc0 #define ISER_CHECK_REFTAG 0x0f #define ISER_CHECK_APPTAG 0x30 -enum iser_reg_indicator { - ISER_DATA_KEY_VALID = 1 << 0, - ISER_PROT_KEY_VALID = 1 << 1, - ISER_SIG_KEY_VALID = 1 << 2, - ISER_FASTREG_PROTECTED = 1 << 3, +/** + * struct iser_reg_resources - Fast registration recources + * + * @mr: memory region + * @fmr_pool: pool of fmrs + * @page_vec: fast reg page list used by fmr pool + * @mr_valid: is mr valid indicator + */ +struct iser_reg_resources { + union { + struct ib_mr *mr; + struct ib_fmr_pool *fmr_pool; + }; + struct iser_page_vec *page_vec; + u8 mr_valid:1; }; /** * struct iser_pi_context - Protection information context * - * @prot_mr: protection memory region - * @prot_frpl: protection fastreg page list - * @sig_mr: signature feature enabled memory region + * @rsc: protection buffer registration resources + * @sig_mr: signature enable memory region + * @sig_mr_valid: is sig_mr valid indicator + * @sig_protected: is region protected indicator */ struct iser_pi_context { - struct ib_mr *prot_mr; - struct ib_fast_reg_page_list *prot_frpl; + struct iser_reg_resources rsc; struct ib_mr *sig_mr; + u8 sig_mr_valid:1; + u8 sig_protected:1; }; /** - * struct fast_reg_descriptor - Fast registration descriptor + * struct iser_fr_desc - Fast registration descriptor * * @list: entry in connection fastreg pool - * @data_mr: data memory region - * @data_frpl: data fastreg page list + * @rsc: data buffer registration resources * @pi_ctx: protection information context - * @reg_indicators: fast registration indicators */ -struct fast_reg_descriptor { +struct iser_fr_desc { struct list_head list; - struct ib_mr *data_mr; - struct ib_fast_reg_page_list *data_frpl; + struct iser_reg_resources rsc; struct iser_pi_context *pi_ctx; - u8 reg_indicators; +}; + +/** + * struct iser_fr_pool: connection fast registration pool + * + * @list: list of fastreg descriptors + * @lock: protects fmr/fastreg pool + * @size: size of the pool + */ +struct iser_fr_pool { + struct list_head list; + spinlock_t lock; + int size; }; /** @@ -417,15 +478,7 @@ struct fast_reg_descriptor { * @pi_support: Indicate device T10-PI support * @beacon: beacon send wr to signal all flush errors were drained * @flush_comp: completes when all connection completions consumed - * @lock: protects fmr/fastreg pool - * @union.fmr: - * @pool: FMR pool for fast registrations - * @page_vec: page vector to hold mapped commands pages - * used for registration - * @union.fastreg: - * @pool: Fast registration descriptors pool for fast - * registrations - * @pool_size: Size of pool + * @fr_pool: connection fast registration poool */ struct ib_conn { struct rdma_cm_id *cma_id; @@ -438,17 +491,7 @@ struct ib_conn { bool pi_support; struct ib_send_wr beacon; struct completion flush_comp; - spinlock_t lock; - union { - struct { - struct ib_fmr_pool *pool; - struct iser_page_vec *page_vec; - } fmr; - struct { - struct list_head pool; - int pool_size; - } fastreg; - }; + struct iser_fr_pool fr_pool; }; /** @@ -479,6 +522,8 @@ struct ib_conn { * @rx_desc_head: head of rx_descs cyclic buffer * @rx_descs: rx buffers array (cyclic buffer) * @num_rx_descs: number of rx descriptors + * @scsi_sg_tablesize: scsi host sg_tablesize + * @scsi_max_sectors: scsi host max sectors */ struct iser_conn { struct ib_conn ib_conn; @@ -503,6 +548,8 @@ struct iser_conn { unsigned int rx_desc_head; struct iser_rx_desc *rx_descs; u32 num_rx_descs; + unsigned short scsi_sg_tablesize; + unsigned int scsi_max_sectors; }; /** @@ -558,6 +605,10 @@ extern struct iser_global ig; extern int iser_debug_level; extern bool iser_pi_enable; extern int iser_pi_guard; +extern unsigned int iser_max_sectors; +extern bool iser_always_reg; + +int iser_assign_reg_ops(struct iser_device *device); int iser_send_control(struct iscsi_conn *conn, struct iscsi_task *task); @@ -599,10 +650,10 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, struct iser_data_buf *mem, enum iser_data_dir cmd_dir); -int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *task, - enum iser_data_dir cmd_dir); -int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *task, - enum iser_data_dir cmd_dir); +int iser_reg_rdma_mem(struct iscsi_iser_task *task, + enum iser_data_dir dir); +void iser_unreg_rdma_mem(struct iscsi_iser_task *task, + enum iser_data_dir dir); int iser_connect(struct iser_conn *iser_conn, struct sockaddr *src_addr, @@ -632,15 +683,40 @@ int iser_initialize_task_headers(struct iscsi_task *task, struct iser_tx_desc *tx_desc); int iser_alloc_rx_descriptors(struct iser_conn *iser_conn, struct iscsi_session *session); -int iser_create_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max); +int iser_alloc_fmr_pool(struct ib_conn *ib_conn, + unsigned cmds_max, + unsigned int size); void iser_free_fmr_pool(struct ib_conn *ib_conn); -int iser_create_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max); +int iser_alloc_fastreg_pool(struct ib_conn *ib_conn, + unsigned cmds_max, + unsigned int size); void iser_free_fastreg_pool(struct ib_conn *ib_conn); u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, enum iser_data_dir cmd_dir, sector_t *sector); -struct fast_reg_descriptor * -iser_reg_desc_get(struct ib_conn *ib_conn); +struct iser_fr_desc * +iser_reg_desc_get_fr(struct ib_conn *ib_conn); void -iser_reg_desc_put(struct ib_conn *ib_conn, - struct fast_reg_descriptor *desc); +iser_reg_desc_put_fr(struct ib_conn *ib_conn, + struct iser_fr_desc *desc); +struct iser_fr_desc * +iser_reg_desc_get_fmr(struct ib_conn *ib_conn); +void +iser_reg_desc_put_fmr(struct ib_conn *ib_conn, + struct iser_fr_desc *desc); + +static inline struct ib_send_wr * +iser_tx_next_wr(struct iser_tx_desc *tx_desc) +{ + struct ib_send_wr *cur_wr = &tx_desc->wrs[tx_desc->wr_idx].send; + struct ib_send_wr *last_wr; + + if (tx_desc->wr_idx) { + last_wr = &tx_desc->wrs[tx_desc->wr_idx - 1].send; + last_wr->next = cur_wr; + } + tx_desc->wr_idx++; + + return cur_wr; +} + #endif diff --git a/kernel/drivers/infiniband/ulp/iser/iser_initiator.c b/kernel/drivers/infiniband/ulp/iser/iser_initiator.c index 0a47f42fe..ffd00c420 100644 --- a/kernel/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/kernel/drivers/infiniband/ulp/iser/iser_initiator.c @@ -49,7 +49,6 @@ static int iser_prepare_read_cmd(struct iscsi_task *task) { struct iscsi_iser_task *iser_task = task->dd_data; - struct iser_device *device = iser_task->iser_conn->ib_conn.device; struct iser_mem_reg *mem_reg; int err; struct iser_hdr *hdr = &iser_task->desc.iser_header; @@ -73,7 +72,7 @@ static int iser_prepare_read_cmd(struct iscsi_task *task) return err; } - err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_IN); + err = iser_reg_rdma_mem(iser_task, ISER_DIR_IN); if (err) { iser_err("Failed to set up Data-IN RDMA\n"); return err; @@ -103,7 +102,6 @@ iser_prepare_write_cmd(struct iscsi_task *task, unsigned int edtl) { struct iscsi_iser_task *iser_task = task->dd_data; - struct iser_device *device = iser_task->iser_conn->ib_conn.device; struct iser_mem_reg *mem_reg; int err; struct iser_hdr *hdr = &iser_task->desc.iser_header; @@ -128,7 +126,7 @@ iser_prepare_write_cmd(struct iscsi_task *task, return err; } - err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_OUT); + err = iser_reg_rdma_mem(iser_task, ISER_DIR_OUT); if (err != 0) { iser_err("Failed to register write cmd RDMA mem\n"); return err; @@ -170,13 +168,7 @@ static void iser_create_send_desc(struct iser_conn *iser_conn, memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr)); tx_desc->iser_header.flags = ISER_VER; - tx_desc->num_sge = 1; - - if (tx_desc->tx_sg[0].lkey != device->mr->lkey) { - tx_desc->tx_sg[0].lkey = device->mr->lkey; - iser_dbg("sdesc %p lkey mismatch, fixing\n", tx_desc); - } } static void iser_free_login_buf(struct iser_conn *iser_conn) @@ -266,7 +258,8 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn, iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */ iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2; - if (device->iser_alloc_rdma_reg_res(ib_conn, session->scsi_cmds_max)) + if (device->reg_ops->alloc_reg_res(ib_conn, session->scsi_cmds_max, + iser_conn->scsi_sg_tablesize)) goto create_rdma_reg_res_failed; if (iser_alloc_login_buf(iser_conn)) @@ -291,7 +284,7 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn, rx_sg = &rx_desc->rx_sg; rx_sg->addr = rx_desc->dma_addr; rx_sg->length = ISER_RX_PAYLOAD_SIZE; - rx_sg->lkey = device->mr->lkey; + rx_sg->lkey = device->pd->local_dma_lkey; } iser_conn->rx_desc_head = 0; @@ -307,7 +300,7 @@ rx_desc_dma_map_failed: rx_desc_alloc_fail: iser_free_login_buf(iser_conn); alloc_login_buf_fail: - device->iser_free_rdma_reg_res(ib_conn); + device->reg_ops->free_reg_res(ib_conn); create_rdma_reg_res_failed: iser_err("failed allocating rx descriptors / data buffers\n"); return -ENOMEM; @@ -320,8 +313,8 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn) struct ib_conn *ib_conn = &iser_conn->ib_conn; struct iser_device *device = ib_conn->device; - if (device->iser_free_rdma_reg_res) - device->iser_free_rdma_reg_res(ib_conn); + if (device->reg_ops->free_reg_res) + device->reg_ops->free_reg_res(ib_conn); rx_desc = iser_conn->rx_descs; for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++) @@ -545,7 +538,7 @@ int iser_send_control(struct iscsi_conn *conn, tx_dsg->addr = iser_conn->login_req_dma; tx_dsg->length = task->data_count; - tx_dsg->lkey = device->mr->lkey; + tx_dsg->lkey = device->pd->local_dma_lkey; mdesc->num_sge = 2; } @@ -668,61 +661,25 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task) void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) { - struct iser_device *device = iser_task->iser_conn->ib_conn.device; - int is_rdma_data_aligned = 1; - int is_rdma_prot_aligned = 1; int prot_count = scsi_prot_sg_count(iser_task->sc); - /* if we were reading, copy back to unaligned sglist, - * anyway dma_unmap and free the copy - */ - if (iser_task->data[ISER_DIR_IN].orig_sg) { - is_rdma_data_aligned = 0; - iser_finalize_rdma_unaligned_sg(iser_task, - &iser_task->data[ISER_DIR_IN], - ISER_DIR_IN); - } - - if (iser_task->data[ISER_DIR_OUT].orig_sg) { - is_rdma_data_aligned = 0; - iser_finalize_rdma_unaligned_sg(iser_task, - &iser_task->data[ISER_DIR_OUT], - ISER_DIR_OUT); - } - - if (iser_task->prot[ISER_DIR_IN].orig_sg) { - is_rdma_prot_aligned = 0; - iser_finalize_rdma_unaligned_sg(iser_task, - &iser_task->prot[ISER_DIR_IN], - ISER_DIR_IN); - } - - if (iser_task->prot[ISER_DIR_OUT].orig_sg) { - is_rdma_prot_aligned = 0; - iser_finalize_rdma_unaligned_sg(iser_task, - &iser_task->prot[ISER_DIR_OUT], - ISER_DIR_OUT); - } - if (iser_task->dir[ISER_DIR_IN]) { - device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN); - if (is_rdma_data_aligned) - iser_dma_unmap_task_data(iser_task, - &iser_task->data[ISER_DIR_IN], - DMA_FROM_DEVICE); - if (prot_count && is_rdma_prot_aligned) + iser_unreg_rdma_mem(iser_task, ISER_DIR_IN); + iser_dma_unmap_task_data(iser_task, + &iser_task->data[ISER_DIR_IN], + DMA_FROM_DEVICE); + if (prot_count) iser_dma_unmap_task_data(iser_task, &iser_task->prot[ISER_DIR_IN], DMA_FROM_DEVICE); } if (iser_task->dir[ISER_DIR_OUT]) { - device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT); - if (is_rdma_data_aligned) - iser_dma_unmap_task_data(iser_task, - &iser_task->data[ISER_DIR_OUT], - DMA_TO_DEVICE); - if (prot_count && is_rdma_prot_aligned) + iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT); + iser_dma_unmap_task_data(iser_task, + &iser_task->data[ISER_DIR_OUT], + DMA_TO_DEVICE); + if (prot_count) iser_dma_unmap_task_data(iser_task, &iser_task->prot[ISER_DIR_OUT], DMA_TO_DEVICE); diff --git a/kernel/drivers/infiniband/ulp/iser/iser_memory.c b/kernel/drivers/infiniband/ulp/iser/iser_memory.c index f0cdc961e..ea765fb96 100644 --- a/kernel/drivers/infiniband/ulp/iser/iser_memory.c +++ b/kernel/drivers/infiniband/ulp/iser/iser_memory.c @@ -38,194 +38,97 @@ #include <linux/scatterlist.h> #include "iscsi_iser.h" +static +int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task, + struct iser_data_buf *mem, + struct iser_reg_resources *rsc, + struct iser_mem_reg *mem_reg); +static +int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, + struct iser_data_buf *mem, + struct iser_reg_resources *rsc, + struct iser_mem_reg *mem_reg); + +static struct iser_reg_ops fastreg_ops = { + .alloc_reg_res = iser_alloc_fastreg_pool, + .free_reg_res = iser_free_fastreg_pool, + .reg_mem = iser_fast_reg_mr, + .unreg_mem = iser_unreg_mem_fastreg, + .reg_desc_get = iser_reg_desc_get_fr, + .reg_desc_put = iser_reg_desc_put_fr, +}; -static void -iser_free_bounce_sg(struct iser_data_buf *data) -{ - struct scatterlist *sg; - int count; - - for_each_sg(data->sg, sg, data->size, count) - __free_page(sg_page(sg)); - - kfree(data->sg); - - data->sg = data->orig_sg; - data->size = data->orig_size; - data->orig_sg = NULL; - data->orig_size = 0; -} +static struct iser_reg_ops fmr_ops = { + .alloc_reg_res = iser_alloc_fmr_pool, + .free_reg_res = iser_free_fmr_pool, + .reg_mem = iser_fast_reg_fmr, + .unreg_mem = iser_unreg_mem_fmr, + .reg_desc_get = iser_reg_desc_get_fmr, + .reg_desc_put = iser_reg_desc_put_fmr, +}; -static int -iser_alloc_bounce_sg(struct iser_data_buf *data) +int iser_assign_reg_ops(struct iser_device *device) { - struct scatterlist *sg; - struct page *page; - unsigned long length = data->data_len; - int i = 0, nents = DIV_ROUND_UP(length, PAGE_SIZE); - - sg = kcalloc(nents, sizeof(*sg), GFP_ATOMIC); - if (!sg) - goto err; - - sg_init_table(sg, nents); - while (length) { - u32 page_len = min_t(u32, length, PAGE_SIZE); - - page = alloc_page(GFP_ATOMIC); - if (!page) - goto err; - - sg_set_page(&sg[i], page, page_len, 0); - length -= page_len; - i++; + struct ib_device_attr *dev_attr = &device->dev_attr; + + /* Assign function handles - based on FMR support */ + if (device->ib_device->alloc_fmr && device->ib_device->dealloc_fmr && + device->ib_device->map_phys_fmr && device->ib_device->unmap_fmr) { + iser_info("FMR supported, using FMR for registration\n"); + device->reg_ops = &fmr_ops; + } else + if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { + iser_info("FastReg supported, using FastReg for registration\n"); + device->reg_ops = &fastreg_ops; + } else { + iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n"); + return -1; } - data->orig_sg = data->sg; - data->orig_size = data->size; - data->sg = sg; - data->size = nents; - return 0; - -err: - for (; i > 0; i--) - __free_page(sg_page(&sg[i - 1])); - kfree(sg); - - return -ENOMEM; -} - -static void -iser_copy_bounce(struct iser_data_buf *data, bool to_buffer) -{ - struct scatterlist *osg, *bsg = data->sg; - void *oaddr, *baddr; - unsigned int left = data->data_len; - unsigned int bsg_off = 0; - int i; - - for_each_sg(data->orig_sg, osg, data->orig_size, i) { - unsigned int copy_len, osg_off = 0; - - oaddr = kmap_atomic(sg_page(osg)) + osg->offset; - copy_len = min(left, osg->length); - while (copy_len) { - unsigned int len = min(copy_len, bsg->length - bsg_off); - - baddr = kmap_atomic(sg_page(bsg)) + bsg->offset; - if (to_buffer) - memcpy(baddr + bsg_off, oaddr + osg_off, len); - else - memcpy(oaddr + osg_off, baddr + bsg_off, len); - - kunmap_atomic(baddr - bsg->offset); - osg_off += len; - bsg_off += len; - copy_len -= len; - - if (bsg_off >= bsg->length) { - bsg = sg_next(bsg); - bsg_off = 0; - } - } - kunmap_atomic(oaddr - osg->offset); - left -= osg_off; - } -} - -static inline void -iser_copy_from_bounce(struct iser_data_buf *data) -{ - iser_copy_bounce(data, false); -} - -static inline void -iser_copy_to_bounce(struct iser_data_buf *data) -{ - iser_copy_bounce(data, true); } -struct fast_reg_descriptor * -iser_reg_desc_get(struct ib_conn *ib_conn) +struct iser_fr_desc * +iser_reg_desc_get_fr(struct ib_conn *ib_conn) { - struct fast_reg_descriptor *desc; + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; + struct iser_fr_desc *desc; unsigned long flags; - spin_lock_irqsave(&ib_conn->lock, flags); - desc = list_first_entry(&ib_conn->fastreg.pool, - struct fast_reg_descriptor, list); + spin_lock_irqsave(&fr_pool->lock, flags); + desc = list_first_entry(&fr_pool->list, + struct iser_fr_desc, list); list_del(&desc->list); - spin_unlock_irqrestore(&ib_conn->lock, flags); + spin_unlock_irqrestore(&fr_pool->lock, flags); return desc; } void -iser_reg_desc_put(struct ib_conn *ib_conn, - struct fast_reg_descriptor *desc) +iser_reg_desc_put_fr(struct ib_conn *ib_conn, + struct iser_fr_desc *desc) { + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; unsigned long flags; - spin_lock_irqsave(&ib_conn->lock, flags); - list_add(&desc->list, &ib_conn->fastreg.pool); - spin_unlock_irqrestore(&ib_conn->lock, flags); + spin_lock_irqsave(&fr_pool->lock, flags); + list_add(&desc->list, &fr_pool->list); + spin_unlock_irqrestore(&fr_pool->lock, flags); } -/** - * iser_start_rdma_unaligned_sg - */ -static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, - struct iser_data_buf *data, - enum iser_data_dir cmd_dir) +struct iser_fr_desc * +iser_reg_desc_get_fmr(struct ib_conn *ib_conn) { - struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device; - int rc; - - rc = iser_alloc_bounce_sg(data); - if (rc) { - iser_err("Failed to allocate bounce for data len %lu\n", - data->data_len); - return rc; - } - - if (cmd_dir == ISER_DIR_OUT) - iser_copy_to_bounce(data); - - data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, - (cmd_dir == ISER_DIR_OUT) ? - DMA_TO_DEVICE : DMA_FROM_DEVICE); - if (!data->dma_nents) { - iser_err("Got dma_nents %d, something went wrong...\n", - data->dma_nents); - rc = -ENOMEM; - goto err; - } + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; - return 0; -err: - iser_free_bounce_sg(data); - return rc; + return list_first_entry(&fr_pool->list, + struct iser_fr_desc, list); } -/** - * iser_finalize_rdma_unaligned_sg - */ - -void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, - struct iser_data_buf *data, - enum iser_data_dir cmd_dir) +void +iser_reg_desc_put_fmr(struct ib_conn *ib_conn, + struct iser_fr_desc *desc) { - struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device; - - ib_dma_unmap_sg(dev, data->sg, data->size, - (cmd_dir == ISER_DIR_OUT) ? - DMA_TO_DEVICE : DMA_FROM_DEVICE); - - if (cmd_dir == ISER_DIR_IN) - iser_copy_from_bounce(data); - - iser_free_bounce_sg(data); } #define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0) @@ -289,52 +192,6 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data, return cur_page; } - -/** - * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned - * for RDMA sub-list of a scatter-gather list of memory buffers, and returns - * the number of entries which are aligned correctly. Supports the case where - * consecutive SG elements are actually fragments of the same physcial page. - */ -static int iser_data_buf_aligned_len(struct iser_data_buf *data, - struct ib_device *ibdev) -{ - struct scatterlist *sg, *sgl, *next_sg = NULL; - u64 start_addr, end_addr; - int i, ret_len, start_check = 0; - - if (data->dma_nents == 1) - return 1; - - sgl = data->sg; - start_addr = ib_sg_dma_address(ibdev, sgl); - - for_each_sg(sgl, sg, data->dma_nents, i) { - if (start_check && !IS_4K_ALIGNED(start_addr)) - break; - - next_sg = sg_next(sg); - if (!next_sg) - break; - - end_addr = start_addr + ib_sg_dma_len(ibdev, sg); - start_addr = ib_sg_dma_address(ibdev, next_sg); - - if (end_addr == start_addr) { - start_check = 0; - continue; - } else - start_check = 1; - - if (!IS_4K_ALIGNED(end_addr)) - break; - } - ret_len = (next_sg) ? i : i+1; - iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n", - ret_len, data->dma_nents, data); - return ret_len; -} - static void iser_data_buf_dump(struct iser_data_buf *data, struct ib_device *ibdev) { @@ -393,7 +250,7 @@ iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem, { struct scatterlist *sg = mem->sg; - reg->sge.lkey = device->mr->lkey; + reg->sge.lkey = device->pd->local_dma_lkey; reg->rkey = device->mr->rkey; reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]); reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]); @@ -405,47 +262,21 @@ iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem, return 0; } -static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, - struct iser_data_buf *mem, - enum iser_data_dir cmd_dir, - int aligned_len) -{ - struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn; - struct iser_device *device = iser_task->iser_conn->ib_conn.device; - - iscsi_conn->fmr_unalign_cnt++; - iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n", - aligned_len, mem->size); - - if (iser_debug_level > 0) - iser_data_buf_dump(mem, device->ib_device); - - /* unmap the command data before accessing it */ - iser_dma_unmap_task_data(iser_task, mem, - (cmd_dir == ISER_DIR_OUT) ? - DMA_TO_DEVICE : DMA_FROM_DEVICE); - - /* allocate copy buf, if we are writing, copy the */ - /* unaligned scatterlist, dma map the copy */ - if (iser_start_rdma_unaligned_sg(iser_task, mem, cmd_dir) != 0) - return -ENOMEM; - - return 0; -} - /** * iser_reg_page_vec - Register physical memory * * returns: 0 on success, errno code on failure */ static -int iser_reg_page_vec(struct iscsi_iser_task *iser_task, +int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task, struct iser_data_buf *mem, - struct iser_page_vec *page_vec, - struct iser_mem_reg *mem_reg) + struct iser_reg_resources *rsc, + struct iser_mem_reg *reg) { struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn; struct iser_device *device = ib_conn->device; + struct iser_page_vec *page_vec = rsc->page_vec; + struct ib_fmr_pool *fmr_pool = rsc->fmr_pool; struct ib_pool_fmr *fmr; int ret, plen; @@ -461,7 +292,7 @@ int iser_reg_page_vec(struct iscsi_iser_task *iser_task, return -EINVAL; } - fmr = ib_fmr_pool_map_phys(ib_conn->fmr.pool, + fmr = ib_fmr_pool_map_phys(fmr_pool, page_vec->pages, page_vec->length, page_vec->pages[0]); @@ -471,11 +302,15 @@ int iser_reg_page_vec(struct iscsi_iser_task *iser_task, return ret; } - mem_reg->sge.lkey = fmr->fmr->lkey; - mem_reg->rkey = fmr->fmr->rkey; - mem_reg->sge.addr = page_vec->pages[0] + page_vec->offset; - mem_reg->sge.length = page_vec->data_size; - mem_reg->mem_h = fmr; + reg->sge.lkey = fmr->fmr->lkey; + reg->rkey = fmr->fmr->rkey; + reg->sge.addr = page_vec->pages[0] + page_vec->offset; + reg->sge.length = page_vec->data_size; + reg->mem_h = fmr; + + iser_dbg("fmr reg: lkey=0x%x, rkey=0x%x, addr=0x%llx," + " length=0x%x\n", reg->sge.lkey, reg->rkey, + reg->sge.addr, reg->sge.length); return 0; } @@ -505,71 +340,17 @@ void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task, void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task, enum iser_data_dir cmd_dir) { + struct iser_device *device = iser_task->iser_conn->ib_conn.device; struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir]; if (!reg->mem_h) return; - iser_reg_desc_put(&iser_task->iser_conn->ib_conn, - reg->mem_h); + device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn, + reg->mem_h); reg->mem_h = NULL; } -/** - * iser_reg_rdma_mem_fmr - Registers memory intended for RDMA, - * using FMR (if possible) obtaining rkey and va - * - * returns 0 on success, errno code on failure - */ -int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, - enum iser_data_dir cmd_dir) -{ - struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn; - struct iser_device *device = ib_conn->device; - struct ib_device *ibdev = device->ib_device; - struct iser_data_buf *mem = &iser_task->data[cmd_dir]; - struct iser_mem_reg *mem_reg; - int aligned_len; - int err; - int i; - - mem_reg = &iser_task->rdma_reg[cmd_dir]; - - aligned_len = iser_data_buf_aligned_len(mem, ibdev); - if (aligned_len != mem->dma_nents) { - err = fall_to_bounce_buf(iser_task, mem, - cmd_dir, aligned_len); - if (err) { - iser_err("failed to allocate bounce buffer\n"); - return err; - } - } - - /* if there a single dma entry, FMR is not needed */ - if (mem->dma_nents == 1) { - return iser_reg_dma(device, mem, mem_reg); - } else { /* use FMR for multiple dma entries */ - err = iser_reg_page_vec(iser_task, mem, ib_conn->fmr.page_vec, - mem_reg); - if (err && err != -EAGAIN) { - iser_data_buf_dump(mem, ibdev); - iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", - mem->dma_nents, - ntoh24(iser_task->desc.iscsi_header.dlength)); - iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", - ib_conn->fmr.page_vec->data_size, - ib_conn->fmr.page_vec->length, - ib_conn->fmr.page_vec->offset); - for (i = 0; i < ib_conn->fmr.page_vec->length; i++) - iser_err("page_vec[%d] = 0x%llx\n", i, - (unsigned long long)ib_conn->fmr.page_vec->pages[i]); - } - if (err) - return err; - } - return 0; -} - static void iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs, struct ib_sig_domain *domain) @@ -637,10 +418,11 @@ iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr) { u32 rkey; - memset(inv_wr, 0, sizeof(*inv_wr)); inv_wr->opcode = IB_WR_LOCAL_INV; inv_wr->wr_id = ISER_FASTREG_LI_WRID; inv_wr->ex.invalidate_rkey = mr->rkey; + inv_wr->send_flags = 0; + inv_wr->num_sge = 0; rkey = ib_inc_rkey(mr->rkey); ib_update_fast_reg_key(mr, rkey); @@ -648,61 +430,49 @@ iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr) static int iser_reg_sig_mr(struct iscsi_iser_task *iser_task, - struct fast_reg_descriptor *desc, + struct iser_pi_context *pi_ctx, struct iser_mem_reg *data_reg, struct iser_mem_reg *prot_reg, struct iser_mem_reg *sig_reg) { - struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn; - struct iser_pi_context *pi_ctx = desc->pi_ctx; - struct ib_send_wr sig_wr, inv_wr; - struct ib_send_wr *bad_wr, *wr = NULL; - struct ib_sig_attrs sig_attrs; + struct iser_tx_desc *tx_desc = &iser_task->desc; + struct ib_sig_attrs *sig_attrs = &tx_desc->sig_attrs; + struct ib_sig_handover_wr *wr; int ret; - memset(&sig_attrs, 0, sizeof(sig_attrs)); - ret = iser_set_sig_attrs(iser_task->sc, &sig_attrs); + memset(sig_attrs, 0, sizeof(*sig_attrs)); + ret = iser_set_sig_attrs(iser_task->sc, sig_attrs); if (ret) goto err; - iser_set_prot_checks(iser_task->sc, &sig_attrs.check_mask); + iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask); - if (!(desc->reg_indicators & ISER_SIG_KEY_VALID)) { - iser_inv_rkey(&inv_wr, pi_ctx->sig_mr); - wr = &inv_wr; - } + if (!pi_ctx->sig_mr_valid) + iser_inv_rkey(iser_tx_next_wr(tx_desc), pi_ctx->sig_mr); - memset(&sig_wr, 0, sizeof(sig_wr)); - sig_wr.opcode = IB_WR_REG_SIG_MR; - sig_wr.wr_id = ISER_FASTREG_LI_WRID; - sig_wr.sg_list = &data_reg->sge; - sig_wr.num_sge = 1; - sig_wr.wr.sig_handover.sig_attrs = &sig_attrs; - sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr; + wr = sig_handover_wr(iser_tx_next_wr(tx_desc)); + wr->wr.opcode = IB_WR_REG_SIG_MR; + wr->wr.wr_id = ISER_FASTREG_LI_WRID; + wr->wr.sg_list = &data_reg->sge; + wr->wr.num_sge = 1; + wr->wr.send_flags = 0; + wr->sig_attrs = sig_attrs; + wr->sig_mr = pi_ctx->sig_mr; if (scsi_prot_sg_count(iser_task->sc)) - sig_wr.wr.sig_handover.prot = &prot_reg->sge; - sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE | - IB_ACCESS_REMOTE_READ | - IB_ACCESS_REMOTE_WRITE; - - if (!wr) - wr = &sig_wr; + wr->prot = &prot_reg->sge; else - wr->next = &sig_wr; - - ret = ib_post_send(ib_conn->qp, wr, &bad_wr); - if (ret) { - iser_err("reg_sig_mr failed, ret:%d\n", ret); - goto err; - } - desc->reg_indicators &= ~ISER_SIG_KEY_VALID; + wr->prot = NULL; + wr->access_flags = IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_READ | + IB_ACCESS_REMOTE_WRITE; + pi_ctx->sig_mr_valid = 0; sig_reg->sge.lkey = pi_ctx->sig_mr->lkey; sig_reg->rkey = pi_ctx->sig_mr->rkey; sig_reg->sge.addr = 0; sig_reg->sge.length = scsi_transfer_length(iser_task->sc); - iser_dbg("sig_sge: lkey: 0x%x, rkey: 0x%x, addr: 0x%llx, length: %u\n", + iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=%u\n", sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr, sig_reg->sge.length); err: @@ -711,149 +481,139 @@ err: static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, struct iser_data_buf *mem, - struct fast_reg_descriptor *desc, - enum iser_reg_indicator ind, + struct iser_reg_resources *rsc, struct iser_mem_reg *reg) { - struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn; - struct iser_device *device = ib_conn->device; - struct ib_mr *mr; - struct ib_fast_reg_page_list *frpl; - struct ib_send_wr fastreg_wr, inv_wr; - struct ib_send_wr *bad_wr, *wr = NULL; - int ret, offset, size, plen; - - /* if there a single dma entry, dma mr suffices */ - if (mem->dma_nents == 1) - return iser_reg_dma(device, mem, reg); - - if (ind == ISER_DATA_KEY_VALID) { - mr = desc->data_mr; - frpl = desc->data_frpl; - } else { - mr = desc->pi_ctx->prot_mr; - frpl = desc->pi_ctx->prot_frpl; + struct iser_tx_desc *tx_desc = &iser_task->desc; + struct ib_mr *mr = rsc->mr; + struct ib_reg_wr *wr; + int n; + + if (!rsc->mr_valid) + iser_inv_rkey(iser_tx_next_wr(tx_desc), mr); + + n = ib_map_mr_sg(mr, mem->sg, mem->size, SIZE_4K); + if (unlikely(n != mem->size)) { + iser_err("failed to map sg (%d/%d)\n", + n, mem->size); + return n < 0 ? n : -EINVAL; } - plen = iser_sg_to_page_vec(mem, device->ib_device, frpl->page_list, - &offset, &size); - if (plen * SIZE_4K < size) { - iser_err("fast reg page_list too short to hold this SG\n"); - return -EINVAL; - } + wr = reg_wr(iser_tx_next_wr(tx_desc)); + wr->wr.opcode = IB_WR_REG_MR; + wr->wr.wr_id = ISER_FASTREG_LI_WRID; + wr->wr.send_flags = 0; + wr->wr.num_sge = 0; + wr->mr = mr; + wr->key = mr->rkey; + wr->access = IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_WRITE | + IB_ACCESS_REMOTE_READ; - if (!(desc->reg_indicators & ind)) { - iser_inv_rkey(&inv_wr, mr); - wr = &inv_wr; - } - - /* Prepare FASTREG WR */ - memset(&fastreg_wr, 0, sizeof(fastreg_wr)); - fastreg_wr.wr_id = ISER_FASTREG_LI_WRID; - fastreg_wr.opcode = IB_WR_FAST_REG_MR; - fastreg_wr.wr.fast_reg.iova_start = frpl->page_list[0] + offset; - fastreg_wr.wr.fast_reg.page_list = frpl; - fastreg_wr.wr.fast_reg.page_list_len = plen; - fastreg_wr.wr.fast_reg.page_shift = SHIFT_4K; - fastreg_wr.wr.fast_reg.length = size; - fastreg_wr.wr.fast_reg.rkey = mr->rkey; - fastreg_wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE | - IB_ACCESS_REMOTE_WRITE | - IB_ACCESS_REMOTE_READ); - - if (!wr) - wr = &fastreg_wr; - else - wr->next = &fastreg_wr; - - ret = ib_post_send(ib_conn->qp, wr, &bad_wr); - if (ret) { - iser_err("fast registration failed, ret:%d\n", ret); - return ret; - } - desc->reg_indicators &= ~ind; + rsc->mr_valid = 0; reg->sge.lkey = mr->lkey; reg->rkey = mr->rkey; - reg->sge.addr = frpl->page_list[0] + offset; - reg->sge.length = size; + reg->sge.addr = mr->iova; + reg->sge.length = mr->length; - return ret; + iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=0x%x\n", + reg->sge.lkey, reg->rkey, reg->sge.addr, reg->sge.length); + + return 0; } -/** - * iser_reg_rdma_mem_fastreg - Registers memory intended for RDMA, - * using Fast Registration WR (if possible) obtaining rkey and va - * - * returns 0 on success, errno code on failure - */ -int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, - enum iser_data_dir cmd_dir) +static int +iser_reg_prot_sg(struct iscsi_iser_task *task, + struct iser_data_buf *mem, + struct iser_fr_desc *desc, + bool use_dma_key, + struct iser_mem_reg *reg) { - struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn; + struct iser_device *device = task->iser_conn->ib_conn.device; + + if (use_dma_key) + return iser_reg_dma(device, mem, reg); + + return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg); +} + +static int +iser_reg_data_sg(struct iscsi_iser_task *task, + struct iser_data_buf *mem, + struct iser_fr_desc *desc, + bool use_dma_key, + struct iser_mem_reg *reg) +{ + struct iser_device *device = task->iser_conn->ib_conn.device; + + if (use_dma_key) + return iser_reg_dma(device, mem, reg); + + return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg); +} + +int iser_reg_rdma_mem(struct iscsi_iser_task *task, + enum iser_data_dir dir) +{ + struct ib_conn *ib_conn = &task->iser_conn->ib_conn; struct iser_device *device = ib_conn->device; - struct ib_device *ibdev = device->ib_device; - struct iser_data_buf *mem = &iser_task->data[cmd_dir]; - struct iser_mem_reg *mem_reg = &iser_task->rdma_reg[cmd_dir]; - struct fast_reg_descriptor *desc = NULL; - int err, aligned_len; - - aligned_len = iser_data_buf_aligned_len(mem, ibdev); - if (aligned_len != mem->dma_nents) { - err = fall_to_bounce_buf(iser_task, mem, - cmd_dir, aligned_len); - if (err) { - iser_err("failed to allocate bounce buffer\n"); - return err; - } - } + struct iser_data_buf *mem = &task->data[dir]; + struct iser_mem_reg *reg = &task->rdma_reg[dir]; + struct iser_mem_reg *data_reg; + struct iser_fr_desc *desc = NULL; + bool use_dma_key; + int err; - if (mem->dma_nents != 1 || - scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) { - desc = iser_reg_desc_get(ib_conn); - mem_reg->mem_h = desc; + use_dma_key = (mem->dma_nents == 1 && !iser_always_reg && + scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL); + + if (!use_dma_key) { + desc = device->reg_ops->reg_desc_get(ib_conn); + reg->mem_h = desc; } - err = iser_fast_reg_mr(iser_task, mem, desc, - ISER_DATA_KEY_VALID, mem_reg); - if (err) + if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL) + data_reg = reg; + else + data_reg = &task->desc.data_reg; + + err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg); + if (unlikely(err)) goto err_reg; - if (scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) { - struct iser_mem_reg prot_reg; - - memset(&prot_reg, 0, sizeof(prot_reg)); - if (scsi_prot_sg_count(iser_task->sc)) { - mem = &iser_task->prot[cmd_dir]; - aligned_len = iser_data_buf_aligned_len(mem, ibdev); - if (aligned_len != mem->dma_nents) { - err = fall_to_bounce_buf(iser_task, mem, - cmd_dir, aligned_len); - if (err) { - iser_err("failed to allocate bounce buffer\n"); - return err; - } - } - - err = iser_fast_reg_mr(iser_task, mem, desc, - ISER_PROT_KEY_VALID, &prot_reg); - if (err) + if (scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) { + struct iser_mem_reg *prot_reg = &task->desc.prot_reg; + + if (scsi_prot_sg_count(task->sc)) { + mem = &task->prot[dir]; + err = iser_reg_prot_sg(task, mem, desc, + use_dma_key, prot_reg); + if (unlikely(err)) goto err_reg; } - err = iser_reg_sig_mr(iser_task, desc, mem_reg, - &prot_reg, mem_reg); - if (err) { - iser_err("Failed to register signature mr\n"); - return err; - } - desc->reg_indicators |= ISER_FASTREG_PROTECTED; + err = iser_reg_sig_mr(task, desc->pi_ctx, data_reg, + prot_reg, reg); + if (unlikely(err)) + goto err_reg; + + desc->pi_ctx->sig_protected = 1; } return 0; + err_reg: if (desc) - iser_reg_desc_put(ib_conn, desc); + device->reg_ops->reg_desc_put(ib_conn, desc); return err; } + +void iser_unreg_rdma_mem(struct iscsi_iser_task *task, + enum iser_data_dir dir) +{ + struct iser_device *device = task->iser_conn->ib_conn.device; + + device->reg_ops->unreg_mem(task, dir); +} diff --git a/kernel/drivers/infiniband/ulp/iser/iser_verbs.c b/kernel/drivers/infiniband/ulp/iser/iser_verbs.c index cc2dd35ff..42f4da620 100644 --- a/kernel/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/kernel/drivers/infiniband/ulp/iser/iser_verbs.c @@ -51,19 +51,22 @@ static void iser_cq_callback(struct ib_cq *cq, void *cq_context); static void iser_cq_event_callback(struct ib_event *cause, void *context) { - iser_err("got cq event %d \n", cause->event); + iser_err("cq event %s (%d)\n", + ib_event_msg(cause->event), cause->event); } static void iser_qp_event_callback(struct ib_event *cause, void *context) { - iser_err("got qp event %d\n",cause->event); + iser_err("qp event %s (%d)\n", + ib_event_msg(cause->event), cause->event); } static void iser_event_handler(struct ib_event_handler *handler, struct ib_event *event) { - iser_err("async event %d on device %s port %d\n", event->event, - event->device->name, event->element.port_num); + iser_err("async event %s (%d) on device %s port %d\n", + ib_event_msg(event->event), event->event, + event->device->name, event->element.port_num); } /** @@ -84,25 +87,9 @@ static int iser_create_device_ib_res(struct iser_device *device) return ret; } - /* Assign function handles - based on FMR support */ - if (device->ib_device->alloc_fmr && device->ib_device->dealloc_fmr && - device->ib_device->map_phys_fmr && device->ib_device->unmap_fmr) { - iser_info("FMR supported, using FMR for registration\n"); - device->iser_alloc_rdma_reg_res = iser_create_fmr_pool; - device->iser_free_rdma_reg_res = iser_free_fmr_pool; - device->iser_reg_rdma_mem = iser_reg_rdma_mem_fmr; - device->iser_unreg_rdma_mem = iser_unreg_mem_fmr; - } else - if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { - iser_info("FastReg supported, using FastReg for registration\n"); - device->iser_alloc_rdma_reg_res = iser_create_fastreg_pool; - device->iser_free_rdma_reg_res = iser_free_fastreg_pool; - device->iser_reg_rdma_mem = iser_reg_rdma_mem_fastreg; - device->iser_unreg_rdma_mem = iser_unreg_mem_fastreg; - } else { - iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n"); - return -1; - } + ret = iser_assign_reg_ops(device); + if (ret) + return ret; device->comps_used = min_t(int, num_online_cpus(), device->ib_device->num_comp_vectors); @@ -123,14 +110,17 @@ static int iser_create_device_ib_res(struct iser_device *device) goto pd_err; for (i = 0; i < device->comps_used; i++) { + struct ib_cq_init_attr cq_attr = {}; struct iser_comp *comp = &device->comps[i]; comp->device = device; + cq_attr.cqe = max_cqe; + cq_attr.comp_vector = i; comp->cq = ib_create_cq(device->ib_device, iser_cq_callback, iser_cq_event_callback, (void *)comp, - max_cqe, i); + &cq_attr); if (IS_ERR(comp->cq)) { comp->cq = NULL; goto cq_err; @@ -143,11 +133,15 @@ static int iser_create_device_ib_res(struct iser_device *device) (unsigned long)comp); } - device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE | - IB_ACCESS_REMOTE_WRITE | - IB_ACCESS_REMOTE_READ); - if (IS_ERR(device->mr)) - goto dma_mr_err; + if (!iser_always_reg) { + int access = IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_WRITE | + IB_ACCESS_REMOTE_READ; + + device->mr = ib_get_dma_mr(device->pd, access); + if (IS_ERR(device->mr)) + goto dma_mr_err; + } INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device, iser_event_handler); @@ -157,7 +151,8 @@ static int iser_create_device_ib_res(struct iser_device *device) return 0; handler_err: - ib_dereg_mr(device->mr); + if (device->mr) + ib_dereg_mr(device->mr); dma_mr_err: for (i = 0; i < device->comps_used; i++) tasklet_kill(&device->comps[i].tasklet); @@ -183,7 +178,6 @@ comps_err: static void iser_free_device_ib_res(struct iser_device *device) { int i; - BUG_ON(device->mr == NULL); for (i = 0; i < device->comps_used; i++) { struct iser_comp *comp = &device->comps[i]; @@ -194,8 +188,9 @@ static void iser_free_device_ib_res(struct iser_device *device) } (void)ib_unregister_event_handler(&device->event_handler); - (void)ib_dereg_mr(device->mr); - (void)ib_dealloc_pd(device->pd); + if (device->mr) + (void)ib_dereg_mr(device->mr); + ib_dealloc_pd(device->pd); kfree(device->comps); device->comps = NULL; @@ -205,28 +200,40 @@ static void iser_free_device_ib_res(struct iser_device *device) } /** - * iser_create_fmr_pool - Creates FMR pool and page_vector + * iser_alloc_fmr_pool - Creates FMR pool and page_vector * * returns 0 on success, or errno code on failure */ -int iser_create_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max) +int iser_alloc_fmr_pool(struct ib_conn *ib_conn, + unsigned cmds_max, + unsigned int size) { struct iser_device *device = ib_conn->device; + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; + struct iser_page_vec *page_vec; + struct iser_fr_desc *desc; + struct ib_fmr_pool *fmr_pool; struct ib_fmr_pool_param params; - int ret = -ENOMEM; + int ret; - ib_conn->fmr.page_vec = kmalloc(sizeof(*ib_conn->fmr.page_vec) + - (sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE + 1)), - GFP_KERNEL); - if (!ib_conn->fmr.page_vec) - return ret; + INIT_LIST_HEAD(&fr_pool->list); + spin_lock_init(&fr_pool->lock); + + desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; + + page_vec = kmalloc(sizeof(*page_vec) + (sizeof(u64) * size), + GFP_KERNEL); + if (!page_vec) { + ret = -ENOMEM; + goto err_frpl; + } - ib_conn->fmr.page_vec->pages = (u64 *)(ib_conn->fmr.page_vec + 1); + page_vec->pages = (u64 *)(page_vec + 1); params.page_shift = SHIFT_4K; - /* when the first/last SG element are not start/end * - * page aligned, the map whould be of N+1 pages */ - params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1; + params.max_pages_per_fmr = size; /* make the pool size twice the max number of SCSI commands * * the ML is expected to queue, watermark for unmap at 50% */ params.pool_size = cmds_max * 2; @@ -237,23 +244,25 @@ int iser_create_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max) IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ); - ib_conn->fmr.pool = ib_create_fmr_pool(device->pd, ¶ms); - if (!IS_ERR(ib_conn->fmr.pool)) - return 0; - - /* no FMR => no need for page_vec */ - kfree(ib_conn->fmr.page_vec); - ib_conn->fmr.page_vec = NULL; - - ret = PTR_ERR(ib_conn->fmr.pool); - ib_conn->fmr.pool = NULL; - if (ret != -ENOSYS) { + fmr_pool = ib_create_fmr_pool(device->pd, ¶ms); + if (IS_ERR(fmr_pool)) { + ret = PTR_ERR(fmr_pool); iser_err("FMR allocation failed, err %d\n", ret); - return ret; - } else { - iser_warn("FMRs are not supported, using unaligned mode\n"); - return 0; + goto err_fmr; } + + desc->rsc.page_vec = page_vec; + desc->rsc.fmr_pool = fmr_pool; + list_add(&desc->list, &fr_pool->list); + + return 0; + +err_fmr: + kfree(page_vec); +err_frpl: + kfree(desc); + + return ret; } /** @@ -261,26 +270,54 @@ int iser_create_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max) */ void iser_free_fmr_pool(struct ib_conn *ib_conn) { + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; + struct iser_fr_desc *desc; + + desc = list_first_entry(&fr_pool->list, + struct iser_fr_desc, list); + list_del(&desc->list); + iser_info("freeing conn %p fmr pool %p\n", - ib_conn, ib_conn->fmr.pool); + ib_conn, desc->rsc.fmr_pool); + + ib_destroy_fmr_pool(desc->rsc.fmr_pool); + kfree(desc->rsc.page_vec); + kfree(desc); +} + +static int +iser_alloc_reg_res(struct ib_device *ib_device, + struct ib_pd *pd, + struct iser_reg_resources *res, + unsigned int size) +{ + int ret; - if (ib_conn->fmr.pool != NULL) - ib_destroy_fmr_pool(ib_conn->fmr.pool); + res->mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, size); + if (IS_ERR(res->mr)) { + ret = PTR_ERR(res->mr); + iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret); + return ret; + } + res->mr_valid = 1; - ib_conn->fmr.pool = NULL; + return 0; +} - kfree(ib_conn->fmr.page_vec); - ib_conn->fmr.page_vec = NULL; +static void +iser_free_reg_res(struct iser_reg_resources *rsc) +{ + ib_dereg_mr(rsc->mr); } static int -iser_alloc_pi_ctx(struct ib_device *ib_device, struct ib_pd *pd, - struct fast_reg_descriptor *desc) +iser_alloc_pi_ctx(struct ib_device *ib_device, + struct ib_pd *pd, + struct iser_fr_desc *desc, + unsigned int size) { struct iser_pi_context *pi_ctx = NULL; - struct ib_mr_init_attr mr_init_attr = {.max_reg_descriptors = 2, - .flags = IB_MR_SIGNATURE_EN}; - int ret = 0; + int ret; desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL); if (!desc->pi_ctx) @@ -288,36 +325,25 @@ iser_alloc_pi_ctx(struct ib_device *ib_device, struct ib_pd *pd, pi_ctx = desc->pi_ctx; - pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device, - ISCSI_ISER_SG_TABLESIZE); - if (IS_ERR(pi_ctx->prot_frpl)) { - ret = PTR_ERR(pi_ctx->prot_frpl); - goto prot_frpl_failure; - } - - pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, - ISCSI_ISER_SG_TABLESIZE + 1); - if (IS_ERR(pi_ctx->prot_mr)) { - ret = PTR_ERR(pi_ctx->prot_mr); - goto prot_mr_failure; + ret = iser_alloc_reg_res(ib_device, pd, &pi_ctx->rsc, size); + if (ret) { + iser_err("failed to allocate reg_resources\n"); + goto alloc_reg_res_err; } - desc->reg_indicators |= ISER_PROT_KEY_VALID; - pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr); + pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2); if (IS_ERR(pi_ctx->sig_mr)) { ret = PTR_ERR(pi_ctx->sig_mr); goto sig_mr_failure; } - desc->reg_indicators |= ISER_SIG_KEY_VALID; - desc->reg_indicators &= ~ISER_FASTREG_PROTECTED; + pi_ctx->sig_mr_valid = 1; + desc->pi_ctx->sig_protected = 0; return 0; sig_mr_failure: - ib_dereg_mr(desc->pi_ctx->prot_mr); -prot_mr_failure: - ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl); -prot_frpl_failure: + iser_free_reg_res(&pi_ctx->rsc); +alloc_reg_res_err: kfree(desc->pi_ctx); return ret; @@ -326,82 +352,71 @@ prot_frpl_failure: static void iser_free_pi_ctx(struct iser_pi_context *pi_ctx) { - ib_free_fast_reg_page_list(pi_ctx->prot_frpl); - ib_dereg_mr(pi_ctx->prot_mr); - ib_destroy_mr(pi_ctx->sig_mr); + iser_free_reg_res(&pi_ctx->rsc); + ib_dereg_mr(pi_ctx->sig_mr); kfree(pi_ctx); } -static int -iser_create_fastreg_desc(struct ib_device *ib_device, struct ib_pd *pd, - bool pi_enable, struct fast_reg_descriptor *desc) +static struct iser_fr_desc * +iser_create_fastreg_desc(struct ib_device *ib_device, + struct ib_pd *pd, + bool pi_enable, + unsigned int size) { + struct iser_fr_desc *desc; int ret; - desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device, - ISCSI_ISER_SG_TABLESIZE + 1); - if (IS_ERR(desc->data_frpl)) { - ret = PTR_ERR(desc->data_frpl); - iser_err("Failed to allocate ib_fast_reg_page_list err=%d\n", - ret); - return PTR_ERR(desc->data_frpl); - } + desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) + return ERR_PTR(-ENOMEM); - desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE + 1); - if (IS_ERR(desc->data_mr)) { - ret = PTR_ERR(desc->data_mr); - iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret); - goto fast_reg_mr_failure; - } - desc->reg_indicators |= ISER_DATA_KEY_VALID; + ret = iser_alloc_reg_res(ib_device, pd, &desc->rsc, size); + if (ret) + goto reg_res_alloc_failure; if (pi_enable) { - ret = iser_alloc_pi_ctx(ib_device, pd, desc); + ret = iser_alloc_pi_ctx(ib_device, pd, desc, size); if (ret) goto pi_ctx_alloc_failure; } - return 0; + return desc; + pi_ctx_alloc_failure: - ib_dereg_mr(desc->data_mr); -fast_reg_mr_failure: - ib_free_fast_reg_page_list(desc->data_frpl); + iser_free_reg_res(&desc->rsc); +reg_res_alloc_failure: + kfree(desc); - return ret; + return ERR_PTR(ret); } /** - * iser_create_fastreg_pool - Creates pool of fast_reg descriptors + * iser_alloc_fastreg_pool - Creates pool of fast_reg descriptors * for fast registration work requests. * returns 0 on success, or errno code on failure */ -int iser_create_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max) +int iser_alloc_fastreg_pool(struct ib_conn *ib_conn, + unsigned cmds_max, + unsigned int size) { struct iser_device *device = ib_conn->device; - struct fast_reg_descriptor *desc; + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; + struct iser_fr_desc *desc; int i, ret; - INIT_LIST_HEAD(&ib_conn->fastreg.pool); - ib_conn->fastreg.pool_size = 0; + INIT_LIST_HEAD(&fr_pool->list); + spin_lock_init(&fr_pool->lock); + fr_pool->size = 0; for (i = 0; i < cmds_max; i++) { - desc = kzalloc(sizeof(*desc), GFP_KERNEL); - if (!desc) { - iser_err("Failed to allocate a new fast_reg descriptor\n"); - ret = -ENOMEM; - goto err; - } - - ret = iser_create_fastreg_desc(device->ib_device, device->pd, - ib_conn->pi_support, desc); - if (ret) { - iser_err("Failed to create fastreg descriptor err=%d\n", - ret); - kfree(desc); + desc = iser_create_fastreg_desc(device->ib_device, device->pd, + ib_conn->pi_support, size); + if (IS_ERR(desc)) { + ret = PTR_ERR(desc); goto err; } - list_add_tail(&desc->list, &ib_conn->fastreg.pool); - ib_conn->fastreg.pool_size++; + list_add_tail(&desc->list, &fr_pool->list); + fr_pool->size++; } return 0; @@ -416,27 +431,27 @@ err: */ void iser_free_fastreg_pool(struct ib_conn *ib_conn) { - struct fast_reg_descriptor *desc, *tmp; + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; + struct iser_fr_desc *desc, *tmp; int i = 0; - if (list_empty(&ib_conn->fastreg.pool)) + if (list_empty(&fr_pool->list)) return; iser_info("freeing conn %p fr pool\n", ib_conn); - list_for_each_entry_safe(desc, tmp, &ib_conn->fastreg.pool, list) { + list_for_each_entry_safe(desc, tmp, &fr_pool->list, list) { list_del(&desc->list); - ib_free_fast_reg_page_list(desc->data_frpl); - ib_dereg_mr(desc->data_mr); + iser_free_reg_res(&desc->rsc); if (desc->pi_ctx) iser_free_pi_ctx(desc->pi_ctx); kfree(desc); ++i; } - if (i < ib_conn->fastreg.pool_size) + if (i < fr_pool->size) iser_warn("pool still has %d regions registered\n", - ib_conn->fastreg.pool_size - i); + fr_pool->size - i); } /** @@ -732,6 +747,31 @@ static void iser_connect_error(struct rdma_cm_id *cma_id) iser_conn->state = ISER_CONN_TERMINATING; } +static void +iser_calc_scsi_params(struct iser_conn *iser_conn, + unsigned int max_sectors) +{ + struct iser_device *device = iser_conn->ib_conn.device; + unsigned short sg_tablesize, sup_sg_tablesize; + + sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K); + sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE, + device->dev_attr.max_fast_reg_page_list_len); + + if (sg_tablesize > sup_sg_tablesize) { + sg_tablesize = sup_sg_tablesize; + iser_conn->scsi_max_sectors = sg_tablesize * SIZE_4K / 512; + } else { + iser_conn->scsi_max_sectors = max_sectors; + } + + iser_conn->scsi_sg_tablesize = sg_tablesize; + + iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n", + iser_conn, iser_conn->scsi_sg_tablesize, + iser_conn->scsi_max_sectors); +} + /** * Called with state mutex held **/ @@ -770,6 +810,8 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id) } } + iser_calc_scsi_params(iser_conn, iser_max_sectors); + ret = rdma_resolve_route(cma_id, 1000); if (ret) { iser_err("resolve route failed: %d\n", ret); @@ -873,8 +915,9 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve int ret = 0; iser_conn = (struct iser_conn *)cma_id->context; - iser_info("event %d status %d conn %p id %p\n", - event->event, event->status, cma_id->context, cma_id); + iser_info("%s (%d): status %d conn %p id %p\n", + rdma_event_msg(event->event), event->event, + event->status, cma_id->context, cma_id); mutex_lock(&iser_conn->state_mutex); switch (event->event) { @@ -913,7 +956,8 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve } break; default: - iser_err("Unexpected RDMA CM event (%d)\n", event->event); + iser_err("Unexpected RDMA CM event: %s (%d)\n", + rdma_event_msg(event->event), event->event); break; } mutex_unlock(&iser_conn->state_mutex); @@ -930,7 +974,6 @@ void iser_conn_init(struct iser_conn *iser_conn) init_completion(&iser_conn->ib_completion); init_completion(&iser_conn->up_completion); INIT_LIST_HEAD(&iser_conn->conn_list); - spin_lock_init(&iser_conn->ib_conn.lock); mutex_init(&iser_conn->state_mutex); } @@ -960,7 +1003,7 @@ int iser_connect(struct iser_conn *iser_conn, ib_conn->beacon.wr_id = ISER_BEACON_WRID; ib_conn->beacon.opcode = IB_WR_SEND; - ib_conn->cma_id = rdma_create_id(iser_cma_handler, + ib_conn->cma_id = rdma_create_id(&init_net, iser_cma_handler, (void *)iser_conn, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(ib_conn->cma_id)) { @@ -1009,7 +1052,7 @@ int iser_post_recvl(struct iser_conn *iser_conn) sge.addr = iser_conn->login_resp_dma; sge.length = ISER_RX_LOGIN_SIZE; - sge.lkey = ib_conn->device->mr->lkey; + sge.lkey = ib_conn->device->pd->local_dma_lkey; rx_wr.wr_id = (uintptr_t)iser_conn->login_resp_buf; rx_wr.sg_list = &sge; @@ -1064,23 +1107,24 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count) int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc, bool signal) { - int ib_ret; - struct ib_send_wr send_wr, *send_wr_failed; + struct ib_send_wr *bad_wr, *wr = iser_tx_next_wr(tx_desc); + int ib_ret; ib_dma_sync_single_for_device(ib_conn->device->ib_device, tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); - send_wr.next = NULL; - send_wr.wr_id = (uintptr_t)tx_desc; - send_wr.sg_list = tx_desc->tx_sg; - send_wr.num_sge = tx_desc->num_sge; - send_wr.opcode = IB_WR_SEND; - send_wr.send_flags = signal ? IB_SEND_SIGNALED : 0; + wr->next = NULL; + wr->wr_id = (uintptr_t)tx_desc; + wr->sg_list = tx_desc->tx_sg; + wr->num_sge = tx_desc->num_sge; + wr->opcode = IB_WR_SEND; + wr->send_flags = signal ? IB_SEND_SIGNALED : 0; - ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed); + ib_ret = ib_post_send(ib_conn->qp, &tx_desc->wrs[0].send, &bad_wr); if (ib_ret) - iser_err("ib_post_send failed, ret:%d\n", ib_ret); + iser_err("ib_post_send failed, ret:%d opcode:%d\n", + ib_ret, bad_wr->opcode); return ib_ret; } @@ -1173,10 +1217,13 @@ static void iser_handle_wc(struct ib_wc *wc) } } else { if (wc->status != IB_WC_WR_FLUSH_ERR) - iser_err("wr id %llx status %d vend_err %x\n", - wc->wr_id, wc->status, wc->vendor_err); + iser_err("%s (%d): wr id %llx vend_err %x\n", + ib_wc_status_msg(wc->status), wc->status, + wc->wr_id, wc->vendor_err); else - iser_dbg("flush error: wr id %llx\n", wc->wr_id); + iser_dbg("%s (%d): wr id %llx\n", + ib_wc_status_msg(wc->status), wc->status, + wc->wr_id); if (wc->wr_id == ISER_BEACON_WRID) /* all flush errors were consumed */ @@ -1229,13 +1276,13 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, enum iser_data_dir cmd_dir, sector_t *sector) { struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir]; - struct fast_reg_descriptor *desc = reg->mem_h; + struct iser_fr_desc *desc = reg->mem_h; unsigned long sector_size = iser_task->sc->device->sector_size; struct ib_mr_status mr_status; int ret; - if (desc && desc->reg_indicators & ISER_FASTREG_PROTECTED) { - desc->reg_indicators &= ~ISER_FASTREG_PROTECTED; + if (desc && desc->pi_ctx->sig_protected) { + desc->pi_ctx->sig_protected = 0; ret = ib_check_mr_status(desc->pi_ctx->sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status); if (ret) { @@ -1246,7 +1293,7 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { sector_t sector_off = mr_status.sig_err.sig_err_offset; - do_div(sector_off, sector_size + 8); + sector_div(sector_off, sector_size + 8); *sector = scsi_get_lba(iser_task->sc) + sector_off; pr_err("PI error found type %d at sector %llx " diff --git a/kernel/drivers/infiniband/ulp/isert/ib_isert.c b/kernel/drivers/infiniband/ulp/isert/ib_isert.c index 575a072d7..8a51c3b5d 100644 --- a/kernel/drivers/infiniband/ulp/isert/ib_isert.c +++ b/kernel/drivers/infiniband/ulp/isert/ib_isert.c @@ -80,7 +80,9 @@ isert_qp_event_callback(struct ib_event *e, void *context) { struct isert_conn *isert_conn = context; - isert_err("conn %p event: %d\n", isert_conn, e->event); + isert_err("%s (%d): conn %p\n", + ib_event_msg(e->event), e->event, isert_conn); + switch (e->event) { case IB_EVENT_COMM_EST: rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST); @@ -155,16 +157,9 @@ isert_create_qp(struct isert_conn *isert_conn, attr.recv_cq = comp->cq; attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS; attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1; - /* - * FIXME: Use devattr.max_sge - 2 for max_send_sge as - * work-around for RDMA_READs with ConnectX-2. - * - * Also, still make sure to have at least two SGEs for - * outgoing control PDU responses. - */ - attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2); - isert_conn->max_sge = attr.cap.max_send_sge; - + attr.cap.max_send_sge = device->dev_attr.max_sge; + isert_conn->max_sge = min(device->dev_attr.max_sge, + device->dev_attr.max_sge_rd); attr.cap.max_recv_sge = 1; attr.sq_sig_type = IB_SIGNAL_REQ_WR; attr.qp_type = IB_QPT_RC; @@ -233,11 +228,9 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn) rx_sg = &rx_desc->rx_sg; rx_sg->addr = rx_desc->dma_addr; rx_sg->length = ISER_RX_PAYLOAD_SIZE; - rx_sg->lkey = device->mr->lkey; + rx_sg->lkey = device->pd->local_dma_lkey; } - isert_conn->rx_desc_head = 0; - return 0; dma_map_fail: @@ -318,15 +311,18 @@ isert_alloc_comps(struct isert_device *device, max_cqe = min(ISER_MAX_CQ_LEN, attr->max_cqe); for (i = 0; i < device->comps_used; i++) { + struct ib_cq_init_attr cq_attr = {}; struct isert_comp *comp = &device->comps[i]; comp->device = device; INIT_WORK(&comp->work, isert_cq_work); + cq_attr.cqe = max_cqe; + cq_attr.comp_vector = i; comp->cq = ib_create_cq(device->ib_device, isert_cq_callback, isert_cq_event_callback, (void *)comp, - max_cqe, i); + &cq_attr); if (IS_ERR(comp->cq)) { isert_err("Unable to allocate cq\n"); ret = PTR_ERR(comp->cq); @@ -380,22 +376,12 @@ isert_create_device_ib_res(struct isert_device *device) goto out_cq; } - device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE); - if (IS_ERR(device->mr)) { - ret = PTR_ERR(device->mr); - isert_err("failed to create dma mr, device %p, ret=%d\n", - device, ret); - goto out_mr; - } - /* Check signature cap */ device->pi_capable = dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER ? true : false; return 0; -out_mr: - ib_dealloc_pd(device->pd); out_cq: isert_free_comps(device); return ret; @@ -406,7 +392,6 @@ isert_free_device_ib_res(struct isert_device *device) { isert_info("device %p\n", device); - ib_dereg_mr(device->mr); ib_dealloc_pd(device->pd); isert_free_comps(device); } @@ -481,12 +466,10 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn) list_for_each_entry_safe(fr_desc, tmp, &isert_conn->fr_pool, list) { list_del(&fr_desc->list); - ib_free_fast_reg_page_list(fr_desc->data_frpl); ib_dereg_mr(fr_desc->data_mr); if (fr_desc->pi_ctx) { - ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl); ib_dereg_mr(fr_desc->pi_ctx->prot_mr); - ib_destroy_mr(fr_desc->pi_ctx->sig_mr); + ib_dereg_mr(fr_desc->pi_ctx->sig_mr); kfree(fr_desc->pi_ctx); } kfree(fr_desc); @@ -503,7 +486,6 @@ isert_create_pi_ctx(struct fast_reg_descriptor *desc, struct ib_device *device, struct ib_pd *pd) { - struct ib_mr_init_attr mr_init_attr; struct pi_context *pi_ctx; int ret; @@ -513,28 +495,17 @@ isert_create_pi_ctx(struct fast_reg_descriptor *desc, return -ENOMEM; } - pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(device, - ISCSI_ISER_SG_TABLESIZE); - if (IS_ERR(pi_ctx->prot_frpl)) { - isert_err("Failed to allocate prot frpl err=%ld\n", - PTR_ERR(pi_ctx->prot_frpl)); - ret = PTR_ERR(pi_ctx->prot_frpl); - goto err_pi_ctx; - } - - pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE); + pi_ctx->prot_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, + ISCSI_ISER_SG_TABLESIZE); if (IS_ERR(pi_ctx->prot_mr)) { isert_err("Failed to allocate prot frmr err=%ld\n", PTR_ERR(pi_ctx->prot_mr)); ret = PTR_ERR(pi_ctx->prot_mr); - goto err_prot_frpl; + goto err_pi_ctx; } desc->ind |= ISERT_PROT_KEY_VALID; - memset(&mr_init_attr, 0, sizeof(mr_init_attr)); - mr_init_attr.max_reg_descriptors = 2; - mr_init_attr.flags |= IB_MR_SIGNATURE_EN; - pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr); + pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2); if (IS_ERR(pi_ctx->sig_mr)) { isert_err("Failed to allocate signature enabled mr err=%ld\n", PTR_ERR(pi_ctx->sig_mr)); @@ -550,8 +521,6 @@ isert_create_pi_ctx(struct fast_reg_descriptor *desc, err_prot_mr: ib_dereg_mr(pi_ctx->prot_mr); -err_prot_frpl: - ib_free_fast_reg_page_list(pi_ctx->prot_frpl); err_pi_ctx: kfree(pi_ctx); @@ -562,33 +531,18 @@ static int isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd, struct fast_reg_descriptor *fr_desc) { - int ret; - - fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device, - ISCSI_ISER_SG_TABLESIZE); - if (IS_ERR(fr_desc->data_frpl)) { - isert_err("Failed to allocate data frpl err=%ld\n", - PTR_ERR(fr_desc->data_frpl)); - return PTR_ERR(fr_desc->data_frpl); - } - - fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE); + fr_desc->data_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, + ISCSI_ISER_SG_TABLESIZE); if (IS_ERR(fr_desc->data_mr)) { isert_err("Failed to allocate data frmr err=%ld\n", PTR_ERR(fr_desc->data_mr)); - ret = PTR_ERR(fr_desc->data_mr); - goto err_data_frpl; + return PTR_ERR(fr_desc->data_mr); } fr_desc->ind |= ISERT_DATA_KEY_VALID; isert_dbg("Created fr_desc %p\n", fr_desc); return 0; - -err_data_frpl: - ib_free_fast_reg_page_list(fr_desc->data_frpl); - - return ret; } static int @@ -642,7 +596,7 @@ static void isert_init_conn(struct isert_conn *isert_conn) { isert_conn->state = ISER_CONN_INIT; - INIT_LIST_HEAD(&isert_conn->accept_node); + INIT_LIST_HEAD(&isert_conn->node); init_completion(&isert_conn->login_comp); init_completion(&isert_conn->login_req_comp); init_completion(&isert_conn->wait); @@ -775,12 +729,10 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) if (ret) goto out_conn_dev; - mutex_lock(&isert_np->np_accept_mutex); - list_add_tail(&isert_conn->accept_node, &isert_np->np_accept_list); - mutex_unlock(&isert_np->np_accept_mutex); + mutex_lock(&isert_np->mutex); + list_add_tail(&isert_conn->node, &isert_np->accepted); + mutex_unlock(&isert_np->mutex); - isert_info("np %p: Allow accept_np to continue\n", np); - up(&isert_np->np_sem); return 0; out_conn_dev: @@ -828,18 +780,21 @@ static void isert_connected_handler(struct rdma_cm_id *cma_id) { struct isert_conn *isert_conn = cma_id->qp->qp_context; + struct isert_np *isert_np = cma_id->context; isert_info("conn %p\n", isert_conn); - if (!kref_get_unless_zero(&isert_conn->kref)) { - isert_warn("conn %p connect_release is running\n", isert_conn); - return; - } - mutex_lock(&isert_conn->mutex); - if (isert_conn->state != ISER_CONN_FULL_FEATURE) - isert_conn->state = ISER_CONN_UP; + isert_conn->state = ISER_CONN_UP; + kref_get(&isert_conn->kref); mutex_unlock(&isert_conn->mutex); + + mutex_lock(&isert_np->mutex); + list_move_tail(&isert_conn->node, &isert_np->pending); + mutex_unlock(&isert_np->mutex); + + isert_info("np %p: Allow accept_np to continue\n", isert_np); + up(&isert_np->sem); } static void @@ -900,18 +855,19 @@ static int isert_np_cma_handler(struct isert_np *isert_np, enum rdma_cm_event_type event) { - isert_dbg("isert np %p, handling event %d\n", isert_np, event); + isert_dbg("%s (%d): isert np %p\n", + rdma_event_msg(event), event, isert_np); switch (event) { case RDMA_CM_EVENT_DEVICE_REMOVAL: - isert_np->np_cm_id = NULL; + isert_np->cm_id = NULL; break; case RDMA_CM_EVENT_ADDR_CHANGE: - isert_np->np_cm_id = isert_setup_id(isert_np); - if (IS_ERR(isert_np->np_cm_id)) { + isert_np->cm_id = isert_setup_id(isert_np); + if (IS_ERR(isert_np->cm_id)) { isert_err("isert np %p setup id failed: %ld\n", - isert_np, PTR_ERR(isert_np->np_cm_id)); - isert_np->np_cm_id = NULL; + isert_np, PTR_ERR(isert_np->cm_id)); + isert_np->cm_id = NULL; } break; default: @@ -930,7 +886,7 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id, struct isert_conn *isert_conn; bool terminating = false; - if (isert_np->np_cm_id == cma_id) + if (isert_np->cm_id == cma_id) return isert_np_cma_handler(cma_id->context, event); isert_conn = cma_id->qp->qp_context; @@ -946,13 +902,13 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id, if (terminating) goto out; - mutex_lock(&isert_np->np_accept_mutex); - if (!list_empty(&isert_conn->accept_node)) { - list_del_init(&isert_conn->accept_node); + mutex_lock(&isert_np->mutex); + if (!list_empty(&isert_conn->node)) { + list_del_init(&isert_conn->node); isert_put_conn(isert_conn); queue_work(isert_release_wq, &isert_conn->release_work); } - mutex_unlock(&isert_np->np_accept_mutex); + mutex_unlock(&isert_np->mutex); out: return 0; @@ -963,6 +919,7 @@ isert_connect_error(struct rdma_cm_id *cma_id) { struct isert_conn *isert_conn = cma_id->qp->qp_context; + list_del_init(&isert_conn->node); isert_conn->cm_id = NULL; isert_put_conn(isert_conn); @@ -974,7 +931,8 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) { int ret = 0; - isert_info("event %d status %d id %p np %p\n", event->event, + isert_info("%s (%d): status %d id %p np %p\n", + rdma_event_msg(event->event), event->event, event->status, cma_id, cma_id->context); switch (event->event) { @@ -1006,35 +964,51 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) } static int -isert_post_recv(struct isert_conn *isert_conn, u32 count) +isert_post_recvm(struct isert_conn *isert_conn, u32 count) { struct ib_recv_wr *rx_wr, *rx_wr_failed; int i, ret; - unsigned int rx_head = isert_conn->rx_desc_head; struct iser_rx_desc *rx_desc; for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { - rx_desc = &isert_conn->rx_descs[rx_head]; - rx_wr->wr_id = (uintptr_t)rx_desc; - rx_wr->sg_list = &rx_desc->rx_sg; - rx_wr->num_sge = 1; - rx_wr->next = rx_wr + 1; - rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1); + rx_desc = &isert_conn->rx_descs[i]; + rx_wr->wr_id = (uintptr_t)rx_desc; + rx_wr->sg_list = &rx_desc->rx_sg; + rx_wr->num_sge = 1; + rx_wr->next = rx_wr + 1; } - rx_wr--; rx_wr->next = NULL; /* mark end of work requests list */ isert_conn->post_recv_buf_count += count; ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr, - &rx_wr_failed); + &rx_wr_failed); if (ret) { isert_err("ib_post_recv() failed with ret: %d\n", ret); isert_conn->post_recv_buf_count -= count; - } else { - isert_dbg("Posted %d RX buffers\n", count); - isert_conn->rx_desc_head = rx_head; } + + return ret; +} + +static int +isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc) +{ + struct ib_recv_wr *rx_wr_failed, rx_wr; + int ret; + + rx_wr.wr_id = (uintptr_t)rx_desc; + rx_wr.sg_list = &rx_desc->rx_sg; + rx_wr.num_sge = 1; + rx_wr.next = NULL; + + isert_conn->post_recv_buf_count++; + ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed); + if (ret) { + isert_err("ib_post_recv() failed with ret: %d\n", ret); + isert_conn->post_recv_buf_count--; + } + return ret; } @@ -1079,8 +1053,8 @@ isert_create_send_desc(struct isert_conn *isert_conn, tx_desc->num_sge = 1; tx_desc->isert_cmd = isert_cmd; - if (tx_desc->tx_sg[0].lkey != device->mr->lkey) { - tx_desc->tx_sg[0].lkey = device->mr->lkey; + if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) { + tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc); } } @@ -1103,7 +1077,7 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn, tx_desc->dma_addr = dma_addr; tx_desc->tx_sg[0].addr = tx_desc->dma_addr; tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; - tx_desc->tx_sg[0].lkey = device->mr->lkey; + tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n", tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length, @@ -1136,7 +1110,7 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn) memset(&sge, 0, sizeof(struct ib_sge)); sge.addr = isert_conn->login_req_dma; sge.length = ISER_RX_LOGIN_SIZE; - sge.lkey = isert_conn->device->mr->lkey; + sge.lkey = isert_conn->device->pd->local_dma_lkey; isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n", sge.addr, sge.length, sge.lkey); @@ -1186,7 +1160,7 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, tx_dsg->addr = isert_conn->login_rsp_dma; tx_dsg->length = length; - tx_dsg->lkey = isert_conn->device->mr->lkey; + tx_dsg->lkey = isert_conn->device->pd->local_dma_lkey; tx_desc->num_sge = 2; } if (!login->login_failed) { @@ -1205,7 +1179,8 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, if (ret) return ret; - ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX); + ret = isert_post_recvm(isert_conn, + ISERT_QP_MAX_RECV_DTOS); if (ret) return ret; @@ -1278,7 +1253,7 @@ isert_rx_login_req(struct isert_conn *isert_conn) } static struct iscsi_cmd -*isert_allocate_cmd(struct iscsi_conn *conn) +*isert_allocate_cmd(struct iscsi_conn *conn, struct iser_rx_desc *rx_desc) { struct isert_conn *isert_conn = conn->context; struct isert_cmd *isert_cmd; @@ -1292,6 +1267,7 @@ static struct iscsi_cmd isert_cmd = iscsit_priv_cmd(cmd); isert_cmd->conn = isert_conn; isert_cmd->iscsi_cmd = cmd; + isert_cmd->rx_desc = rx_desc; return cmd; } @@ -1303,9 +1279,9 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn, { struct iscsi_conn *conn = isert_conn->conn; struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf; - struct scatterlist *sg; int imm_data, imm_data_len, unsol_data, sg_nents, rc; bool dump_payload = false; + unsigned int data_len; rc = iscsit_setup_scsi_cmd(conn, cmd, buf); if (rc < 0) @@ -1314,7 +1290,10 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn, imm_data = cmd->immediate_data; imm_data_len = cmd->first_burst_len; unsol_data = cmd->unsolicited_data; + data_len = cmd->se_cmd.data_length; + if (imm_data && imm_data_len == data_len) + cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; rc = iscsit_process_scsi_cmd(conn, cmd, hdr); if (rc < 0) { return 0; @@ -1326,13 +1305,20 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn, if (!imm_data) return 0; - sg = &cmd->se_cmd.t_data_sg[0]; - sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); - - isert_dbg("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n", - sg, sg_nents, &rx_desc->data[0], imm_data_len); - - sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len); + if (imm_data_len != data_len) { + sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); + sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents, + &rx_desc->data[0], imm_data_len); + isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n", + sg_nents, imm_data_len); + } else { + sg_init_table(&isert_cmd->sg, 1); + cmd->se_cmd.t_data_sg = &isert_cmd->sg; + cmd->se_cmd.t_data_nents = 1; + sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len); + isert_dbg("Transfer Immediate imm_data_len: %d\n", + imm_data_len); + } cmd->write_data_done += imm_data_len; @@ -1349,7 +1335,7 @@ sequence_cmd: if (!rc && dump_payload == false && unsol_data) iscsit_set_unsoliticed_dataout(cmd); else if (dump_payload && imm_data) - target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd); + target_put_sess_cmd(&cmd->se_cmd); return 0; } @@ -1407,6 +1393,15 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn, if (rc < 0) return rc; + /* + * multiple data-outs on the same command can arrive - + * so post the buffer before hand + */ + rc = isert_post_recv(isert_conn, rx_desc); + if (rc) { + isert_err("ib_post_recv failed with %d\n", rc); + return rc; + } return 0; } @@ -1479,7 +1474,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, switch (opcode) { case ISCSI_OP_SCSI_CMD: - cmd = isert_allocate_cmd(conn); + cmd = isert_allocate_cmd(conn, rx_desc); if (!cmd) break; @@ -1493,7 +1488,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, rx_desc, (unsigned char *)hdr); break; case ISCSI_OP_NOOP_OUT: - cmd = isert_allocate_cmd(conn); + cmd = isert_allocate_cmd(conn, rx_desc); if (!cmd) break; @@ -1506,7 +1501,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, (unsigned char *)hdr); break; case ISCSI_OP_SCSI_TMFUNC: - cmd = isert_allocate_cmd(conn); + cmd = isert_allocate_cmd(conn, rx_desc); if (!cmd) break; @@ -1514,22 +1509,20 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, (unsigned char *)hdr); break; case ISCSI_OP_LOGOUT: - cmd = isert_allocate_cmd(conn); + cmd = isert_allocate_cmd(conn, rx_desc); if (!cmd) break; ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); break; case ISCSI_OP_TEXT: - if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) { + if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) cmd = iscsit_find_cmd_from_itt(conn, hdr->itt); - if (!cmd) - break; - } else { - cmd = isert_allocate_cmd(conn); - if (!cmd) - break; - } + else + cmd = isert_allocate_cmd(conn, rx_desc); + + if (!cmd) + break; isert_cmd = iscsit_priv_cmd(cmd); ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd, @@ -1550,7 +1543,6 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn) struct iser_hdr *iser_hdr = &rx_desc->iser_header; uint64_t read_va = 0, write_va = 0; uint32_t read_stag = 0, write_stag = 0; - int rc; switch (iser_hdr->flags & 0xF0) { case ISCSI_CTRL: @@ -1577,8 +1569,8 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn) break; } - rc = isert_rx_opcode(isert_conn, rx_desc, - read_stag, read_va, write_stag, write_va); + isert_rx_opcode(isert_conn, rx_desc, + read_stag, read_va, write_stag, write_va); } static void @@ -1589,7 +1581,7 @@ isert_rcv_completion(struct iser_rx_desc *desc, struct ib_device *ib_dev = isert_conn->cm_id->device; struct iscsi_hdr *hdr; u64 rx_dma; - int rx_buflen, outstanding; + int rx_buflen; if ((char *)desc == isert_conn->login_req_buf) { rx_dma = isert_conn->login_req_dma; @@ -1629,22 +1621,6 @@ isert_rcv_completion(struct iser_rx_desc *desc, DMA_FROM_DEVICE); isert_conn->post_recv_buf_count--; - isert_dbg("Decremented post_recv_buf_count: %d\n", - isert_conn->post_recv_buf_count); - - if ((char *)desc == isert_conn->login_req_buf) - return; - - outstanding = isert_conn->post_recv_buf_count; - if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) { - int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding, - ISERT_MIN_POSTED_RX); - err = isert_post_recv(isert_conn, count); - if (err) { - isert_err("isert_post_recv() count: %d failed, %d\n", - count, err); - } - } } static int @@ -1703,10 +1679,10 @@ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) isert_unmap_data_buf(isert_conn, &wr->data); } - if (wr->send_wr) { + if (wr->rdma_wr) { isert_dbg("Cmd %p free send_wr\n", isert_cmd); - kfree(wr->send_wr); - wr->send_wr = NULL; + kfree(wr->rdma_wr); + wr->rdma_wr = NULL; } if (wr->ib_sge) { @@ -1741,7 +1717,7 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) } wr->ib_sge = NULL; - wr->send_wr = NULL; + wr->rdma_wr = NULL; } static void @@ -1774,7 +1750,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) { struct se_cmd *se_cmd = &cmd->se_cmd; - target_put_sess_cmd(se_cmd->se_sess, se_cmd); + target_put_sess_cmd(se_cmd); } } @@ -1910,7 +1886,7 @@ isert_completion_rdma_write(struct iser_tx_desc *tx_desc, } device->unreg_rdma_mem(isert_cmd, isert_conn); - wr->send_wr_num = 0; + wr->rdma_wr_num = 0; if (ret) transport_send_check_condition_and_sense(se_cmd, se_cmd->pi_err, 0); @@ -1938,7 +1914,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc, iscsit_stop_dataout_timer(cmd); device->unreg_rdma_mem(isert_cmd, isert_conn); cmd->write_data_done = wr->data.len; - wr->send_wr_num = 0; + wr->rdma_wr_num = 0; isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); spin_lock_bh(&cmd->istate_lock); @@ -1947,7 +1923,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc, spin_unlock_bh(&cmd->istate_lock); if (ret) { - target_put_sess_cmd(se_cmd->se_sess, se_cmd); + target_put_sess_cmd(se_cmd); transport_send_check_condition_and_sense(se_cmd, se_cmd->pi_err, 0); } else { @@ -2108,10 +2084,13 @@ isert_handle_wc(struct ib_wc *wc) } } else { if (wc->status != IB_WC_WR_FLUSH_ERR) - isert_err("wr id %llx status %d vend_err %x\n", - wc->wr_id, wc->status, wc->vendor_err); + isert_err("%s (%d): wr id %llx vend_err %x\n", + ib_wc_status_msg(wc->status), wc->status, + wc->wr_id, wc->vendor_err); else - isert_dbg("flush error: wr id %llx\n", wc->wr_id); + isert_dbg("%s (%d): wr id %llx\n", + ib_wc_status_msg(wc->status), wc->status, + wc->wr_id); if (wc->wr_id != ISER_FASTREG_LI_WRID) isert_cq_comp_err(isert_conn, wc); @@ -2153,6 +2132,12 @@ isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd) struct ib_send_wr *wr_failed; int ret; + ret = isert_post_recv(isert_conn, isert_cmd->rx_desc); + if (ret) { + isert_err("ib_post_recv failed with %d\n", ret); + return ret; + } + ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr, &wr_failed); if (ret) { @@ -2200,7 +2185,7 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) isert_cmd->pdu_buf_len = pdu_len; tx_dsg->addr = isert_cmd->pdu_buf_dma; tx_dsg->length = pdu_len; - tx_dsg->lkey = device->mr->lkey; + tx_dsg->lkey = device->pd->local_dma_lkey; isert_cmd->tx_desc.num_sge = 2; } @@ -2328,7 +2313,7 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn) isert_cmd->pdu_buf_len = ISCSI_HDR_LEN; tx_dsg->addr = isert_cmd->pdu_buf_dma; tx_dsg->length = ISCSI_HDR_LEN; - tx_dsg->lkey = device->mr->lkey; + tx_dsg->lkey = device->pd->local_dma_lkey; isert_cmd->tx_desc.num_sge = 2; isert_init_send_wr(isert_conn, isert_cmd, send_wr); @@ -2369,7 +2354,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) isert_cmd->pdu_buf_len = txt_rsp_len; tx_dsg->addr = isert_cmd->pdu_buf_dma; tx_dsg->length = txt_rsp_len; - tx_dsg->lkey = device->mr->lkey; + tx_dsg->lkey = device->pd->local_dma_lkey; isert_cmd->tx_desc.num_sge = 2; } isert_init_send_wr(isert_conn, isert_cmd, send_wr); @@ -2381,7 +2366,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) static int isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, - struct ib_sge *ib_sge, struct ib_send_wr *send_wr, + struct ib_sge *ib_sge, struct ib_rdma_wr *rdma_wr, u32 data_left, u32 offset) { struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; @@ -2396,8 +2381,8 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge); page_off = offset % PAGE_SIZE; - send_wr->sg_list = ib_sge; - send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc; + rdma_wr->wr.sg_list = ib_sge; + rdma_wr->wr.wr_id = (uintptr_t)&isert_cmd->tx_desc; /* * Perform mapping of TCM scatterlist memory ib_sge dma_addr. */ @@ -2410,7 +2395,7 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off; ib_sge->length = min_t(u32, data_left, ib_sg_dma_len(ib_dev, tmp_sg) - page_off); - ib_sge->lkey = device->mr->lkey; + ib_sge->lkey = device->pd->local_dma_lkey; isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n", ib_sge->addr, ib_sge->length, ib_sge->lkey); @@ -2422,11 +2407,11 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge); } - send_wr->num_sge = ++i; + rdma_wr->wr.num_sge = ++i; isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n", - send_wr->sg_list, send_wr->num_sge); + rdma_wr->wr.sg_list, rdma_wr->wr.num_sge); - return send_wr->num_sge; + return rdma_wr->wr.num_sge; } static int @@ -2437,7 +2422,7 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); struct isert_conn *isert_conn = conn->context; struct isert_data_buf *data = &wr->data; - struct ib_send_wr *send_wr; + struct ib_rdma_wr *rdma_wr; struct ib_sge *ib_sge; u32 offset, data_len, data_left, rdma_write_max, va_offset = 0; int ret = 0, i, ib_sge_cnt; @@ -2462,11 +2447,11 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, } wr->ib_sge = ib_sge; - wr->send_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge); - wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num, + wr->rdma_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge); + wr->rdma_wr = kzalloc(sizeof(struct ib_rdma_wr) * wr->rdma_wr_num, GFP_KERNEL); - if (!wr->send_wr) { - isert_dbg("Unable to allocate wr->send_wr\n"); + if (!wr->rdma_wr) { + isert_dbg("Unable to allocate wr->rdma_wr\n"); ret = -ENOMEM; goto unmap_cmd; } @@ -2474,31 +2459,31 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, wr->isert_cmd = isert_cmd; rdma_write_max = isert_conn->max_sge * PAGE_SIZE; - for (i = 0; i < wr->send_wr_num; i++) { - send_wr = &isert_cmd->rdma_wr.send_wr[i]; + for (i = 0; i < wr->rdma_wr_num; i++) { + rdma_wr = &isert_cmd->rdma_wr.rdma_wr[i]; data_len = min(data_left, rdma_write_max); - send_wr->send_flags = 0; + rdma_wr->wr.send_flags = 0; if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { - send_wr->opcode = IB_WR_RDMA_WRITE; - send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset; - send_wr->wr.rdma.rkey = isert_cmd->read_stag; - if (i + 1 == wr->send_wr_num) - send_wr->next = &isert_cmd->tx_desc.send_wr; + rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; + rdma_wr->remote_addr = isert_cmd->read_va + offset; + rdma_wr->rkey = isert_cmd->read_stag; + if (i + 1 == wr->rdma_wr_num) + rdma_wr->wr.next = &isert_cmd->tx_desc.send_wr; else - send_wr->next = &wr->send_wr[i + 1]; + rdma_wr->wr.next = &wr->rdma_wr[i + 1].wr; } else { - send_wr->opcode = IB_WR_RDMA_READ; - send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset; - send_wr->wr.rdma.rkey = isert_cmd->write_stag; - if (i + 1 == wr->send_wr_num) - send_wr->send_flags = IB_SEND_SIGNALED; + rdma_wr->wr.opcode = IB_WR_RDMA_READ; + rdma_wr->remote_addr = isert_cmd->write_va + va_offset; + rdma_wr->rkey = isert_cmd->write_stag; + if (i + 1 == wr->rdma_wr_num) + rdma_wr->wr.send_flags = IB_SEND_SIGNALED; else - send_wr->next = &wr->send_wr[i + 1]; + rdma_wr->wr.next = &wr->rdma_wr[i + 1].wr; } ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge, - send_wr, data_len, offset); + rdma_wr, data_len, offset); ib_sge += ib_sge_cnt; offset += data_len; @@ -2513,45 +2498,6 @@ unmap_cmd: return ret; } -static int -isert_map_fr_pagelist(struct ib_device *ib_dev, - struct scatterlist *sg_start, int sg_nents, u64 *fr_pl) -{ - u64 start_addr, end_addr, page, chunk_start = 0; - struct scatterlist *tmp_sg; - int i = 0, new_chunk, last_ent, n_pages; - - n_pages = 0; - new_chunk = 1; - last_ent = sg_nents - 1; - for_each_sg(sg_start, tmp_sg, sg_nents, i) { - start_addr = ib_sg_dma_address(ib_dev, tmp_sg); - if (new_chunk) - chunk_start = start_addr; - end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg); - - isert_dbg("SGL[%d] dma_addr: 0x%llx len: %u\n", - i, (unsigned long long)tmp_sg->dma_address, - tmp_sg->length); - - if ((end_addr & ~PAGE_MASK) && i < last_ent) { - new_chunk = 0; - continue; - } - new_chunk = 1; - - page = chunk_start & PAGE_MASK; - do { - fr_pl[n_pages++] = page; - isert_dbg("Mapped page_list[%d] page_addr: 0x%llx\n", - n_pages - 1, page); - page += PAGE_SIZE; - } while (page < end_addr); - } - - return n_pages; -} - static inline void isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr) { @@ -2577,14 +2523,12 @@ isert_fast_reg_mr(struct isert_conn *isert_conn, struct isert_device *device = isert_conn->device; struct ib_device *ib_dev = device->ib_device; struct ib_mr *mr; - struct ib_fast_reg_page_list *frpl; - struct ib_send_wr fr_wr, inv_wr; - struct ib_send_wr *bad_wr, *wr = NULL; - int ret, pagelist_len; - u32 page_off; + struct ib_reg_wr reg_wr; + struct ib_send_wr inv_wr, *bad_wr, *wr = NULL; + int ret, n; if (mem->dma_nents == 1) { - sge->lkey = device->mr->lkey; + sge->lkey = device->pd->local_dma_lkey; sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]); sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]); isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n", @@ -2592,45 +2536,41 @@ isert_fast_reg_mr(struct isert_conn *isert_conn, return 0; } - if (ind == ISERT_DATA_KEY_VALID) { + if (ind == ISERT_DATA_KEY_VALID) /* Registering data buffer */ mr = fr_desc->data_mr; - frpl = fr_desc->data_frpl; - } else { + else /* Registering protection buffer */ mr = fr_desc->pi_ctx->prot_mr; - frpl = fr_desc->pi_ctx->prot_frpl; - } - - page_off = mem->offset % PAGE_SIZE; - - isert_dbg("Use fr_desc %p sg_nents %d offset %u\n", - fr_desc, mem->nents, mem->offset); - - pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents, - &frpl->page_list[0]); if (!(fr_desc->ind & ind)) { isert_inv_rkey(&inv_wr, mr); wr = &inv_wr; } - /* Prepare FASTREG WR */ - memset(&fr_wr, 0, sizeof(fr_wr)); - fr_wr.wr_id = ISER_FASTREG_LI_WRID; - fr_wr.opcode = IB_WR_FAST_REG_MR; - fr_wr.wr.fast_reg.iova_start = frpl->page_list[0] + page_off; - fr_wr.wr.fast_reg.page_list = frpl; - fr_wr.wr.fast_reg.page_list_len = pagelist_len; - fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT; - fr_wr.wr.fast_reg.length = mem->len; - fr_wr.wr.fast_reg.rkey = mr->rkey; - fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE; + n = ib_map_mr_sg(mr, mem->sg, mem->nents, PAGE_SIZE); + if (unlikely(n != mem->nents)) { + isert_err("failed to map mr sg (%d/%d)\n", + n, mem->nents); + return n < 0 ? n : -EINVAL; + } + + isert_dbg("Use fr_desc %p sg_nents %d offset %u\n", + fr_desc, mem->nents, mem->offset); + + reg_wr.wr.next = NULL; + reg_wr.wr.opcode = IB_WR_REG_MR; + reg_wr.wr.wr_id = ISER_FASTREG_LI_WRID; + reg_wr.wr.send_flags = 0; + reg_wr.wr.num_sge = 0; + reg_wr.mr = mr; + reg_wr.key = mr->lkey; + reg_wr.access = IB_ACCESS_LOCAL_WRITE; if (!wr) - wr = &fr_wr; + wr = ®_wr.wr; else - wr->next = &fr_wr; + wr->next = ®_wr.wr; ret = ib_post_send(isert_conn->qp, wr, &bad_wr); if (ret) { @@ -2640,8 +2580,8 @@ isert_fast_reg_mr(struct isert_conn *isert_conn, fr_desc->ind &= ~ind; sge->lkey = mr->lkey; - sge->addr = frpl->page_list[0] + page_off; - sge->length = mem->len; + sge->addr = mr->iova; + sge->length = mr->length; isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n", sge->addr, sge->length, sge->lkey); @@ -2711,8 +2651,8 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, struct isert_rdma_wr *rdma_wr, struct fast_reg_descriptor *fr_desc) { - struct ib_send_wr sig_wr, inv_wr; - struct ib_send_wr *bad_wr, *wr = NULL; + struct ib_sig_handover_wr sig_wr; + struct ib_send_wr inv_wr, *bad_wr, *wr = NULL; struct pi_context *pi_ctx = fr_desc->pi_ctx; struct ib_sig_attrs sig_attrs; int ret; @@ -2730,20 +2670,20 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, } memset(&sig_wr, 0, sizeof(sig_wr)); - sig_wr.opcode = IB_WR_REG_SIG_MR; - sig_wr.wr_id = ISER_FASTREG_LI_WRID; - sig_wr.sg_list = &rdma_wr->ib_sg[DATA]; - sig_wr.num_sge = 1; - sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE; - sig_wr.wr.sig_handover.sig_attrs = &sig_attrs; - sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr; + sig_wr.wr.opcode = IB_WR_REG_SIG_MR; + sig_wr.wr.wr_id = ISER_FASTREG_LI_WRID; + sig_wr.wr.sg_list = &rdma_wr->ib_sg[DATA]; + sig_wr.wr.num_sge = 1; + sig_wr.access_flags = IB_ACCESS_LOCAL_WRITE; + sig_wr.sig_attrs = &sig_attrs; + sig_wr.sig_mr = pi_ctx->sig_mr; if (se_cmd->t_prot_sg) - sig_wr.wr.sig_handover.prot = &rdma_wr->ib_sg[PROT]; + sig_wr.prot = &rdma_wr->ib_sg[PROT]; if (!wr) - wr = &sig_wr; + wr = &sig_wr.wr; else - wr->next = &sig_wr; + wr->next = &sig_wr.wr; ret = ib_post_send(isert_conn->qp, wr, &bad_wr); if (ret) { @@ -2837,7 +2777,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); struct isert_conn *isert_conn = conn->context; struct fast_reg_descriptor *fr_desc = NULL; - struct ib_send_wr *send_wr; + struct ib_rdma_wr *rdma_wr; struct ib_sge *ib_sg; u32 offset; int ret = 0; @@ -2878,26 +2818,26 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg)); wr->ib_sge = &wr->s_ib_sge; - wr->send_wr_num = 1; - memset(&wr->s_send_wr, 0, sizeof(*send_wr)); - wr->send_wr = &wr->s_send_wr; + wr->rdma_wr_num = 1; + memset(&wr->s_rdma_wr, 0, sizeof(wr->s_rdma_wr)); + wr->rdma_wr = &wr->s_rdma_wr; wr->isert_cmd = isert_cmd; - send_wr = &isert_cmd->rdma_wr.s_send_wr; - send_wr->sg_list = &wr->s_ib_sge; - send_wr->num_sge = 1; - send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc; + rdma_wr = &isert_cmd->rdma_wr.s_rdma_wr; + rdma_wr->wr.sg_list = &wr->s_ib_sge; + rdma_wr->wr.num_sge = 1; + rdma_wr->wr.wr_id = (uintptr_t)&isert_cmd->tx_desc; if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { - send_wr->opcode = IB_WR_RDMA_WRITE; - send_wr->wr.rdma.remote_addr = isert_cmd->read_va; - send_wr->wr.rdma.rkey = isert_cmd->read_stag; - send_wr->send_flags = !isert_prot_cmd(isert_conn, se_cmd) ? + rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; + rdma_wr->remote_addr = isert_cmd->read_va; + rdma_wr->rkey = isert_cmd->read_stag; + rdma_wr->wr.send_flags = !isert_prot_cmd(isert_conn, se_cmd) ? 0 : IB_SEND_SIGNALED; } else { - send_wr->opcode = IB_WR_RDMA_READ; - send_wr->wr.rdma.remote_addr = isert_cmd->write_va; - send_wr->wr.rdma.rkey = isert_cmd->write_stag; - send_wr->send_flags = IB_SEND_SIGNALED; + rdma_wr->wr.opcode = IB_WR_RDMA_READ; + rdma_wr->remote_addr = isert_cmd->write_va; + rdma_wr->rkey = isert_cmd->write_stag; + rdma_wr->wr.send_flags = IB_SEND_SIGNALED; } return 0; @@ -2945,11 +2885,17 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); isert_init_send_wr(isert_conn, isert_cmd, &isert_cmd->tx_desc.send_wr); - isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr; - wr->send_wr_num += 1; + isert_cmd->rdma_wr.s_rdma_wr.wr.next = &isert_cmd->tx_desc.send_wr; + wr->rdma_wr_num += 1; + + rc = isert_post_recv(isert_conn, isert_cmd->rx_desc); + if (rc) { + isert_err("ib_post_recv failed with %d\n", rc); + return rc; + } } - rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed); + rc = ib_post_send(isert_conn->qp, &wr->rdma_wr->wr, &wr_failed); if (rc) isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); @@ -2983,7 +2929,7 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) return rc; } - rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed); + rc = ib_post_send(isert_conn->qp, &wr->rdma_wr->wr, &wr_failed); if (rc) isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); @@ -2996,9 +2942,16 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) static int isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) { - int ret; + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); + int ret = 0; switch (state) { + case ISTATE_REMOVE: + spin_lock_bh(&conn->cmd_lock); + list_del_init(&cmd->i_conn_node); + spin_unlock_bh(&conn->cmd_lock); + isert_put_cmd(isert_cmd, true); + break; case ISTATE_SEND_NOPIN_WANT_RESPONSE: ret = isert_put_nopin(cmd, conn, false); break; @@ -3062,7 +3015,7 @@ isert_setup_id(struct isert_np *isert_np) sa = (struct sockaddr *)&np->np_sockaddr; isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa); - id = rdma_create_id(isert_cma_handler, isert_np, + id = rdma_create_id(&init_net, isert_cma_handler, isert_np, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(id)) { isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id)); @@ -3092,7 +3045,7 @@ out: static int isert_setup_np(struct iscsi_np *np, - struct __kernel_sockaddr_storage *ksockaddr) + struct sockaddr_storage *ksockaddr) { struct isert_np *isert_np; struct rdma_cm_id *isert_lid; @@ -3103,10 +3056,10 @@ isert_setup_np(struct iscsi_np *np, isert_err("Unable to allocate struct isert_np\n"); return -ENOMEM; } - sema_init(&isert_np->np_sem, 0); - mutex_init(&isert_np->np_accept_mutex); - INIT_LIST_HEAD(&isert_np->np_accept_list); - init_completion(&isert_np->np_login_comp); + sema_init(&isert_np->sem, 0); + mutex_init(&isert_np->mutex); + INIT_LIST_HEAD(&isert_np->accepted); + INIT_LIST_HEAD(&isert_np->pending); isert_np->np = np; /* @@ -3114,7 +3067,7 @@ isert_setup_np(struct iscsi_np *np, * in iscsi_target_configfs.c code.. */ memcpy(&np->np_sockaddr, ksockaddr, - sizeof(struct __kernel_sockaddr_storage)); + sizeof(struct sockaddr_storage)); isert_lid = isert_setup_id(isert_np); if (IS_ERR(isert_lid)) { @@ -3122,7 +3075,7 @@ isert_setup_np(struct iscsi_np *np, goto out; } - isert_np->np_cm_id = isert_lid; + isert_np->cm_id = isert_lid; np->np_context = isert_np; return 0; @@ -3196,32 +3149,11 @@ isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn, { struct rdma_cm_id *cm_id = isert_conn->cm_id; struct rdma_route *cm_route = &cm_id->route; - struct sockaddr_in *sock_in; - struct sockaddr_in6 *sock_in6; conn->login_family = np->np_sockaddr.ss_family; - if (np->np_sockaddr.ss_family == AF_INET6) { - sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr; - snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c", - &sock_in6->sin6_addr.in6_u); - conn->login_port = ntohs(sock_in6->sin6_port); - - sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr; - snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c", - &sock_in6->sin6_addr.in6_u); - conn->local_port = ntohs(sock_in6->sin6_port); - } else { - sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr; - sprintf(conn->login_ip, "%pI4", - &sock_in->sin_addr.s_addr); - conn->login_port = ntohs(sock_in->sin_port); - - sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr; - sprintf(conn->local_ip, "%pI4", - &sock_in->sin_addr.s_addr); - conn->local_port = ntohs(sock_in->sin_port); - } + conn->login_sockaddr = cm_route->addr.dst_addr; + conn->local_sockaddr = cm_route->addr.src_addr; } static int @@ -3232,7 +3164,7 @@ isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) int ret; accept_wait: - ret = down_interruptible(&isert_np->np_sem); + ret = down_interruptible(&isert_np->sem); if (ret) return -ENODEV; @@ -3249,15 +3181,15 @@ accept_wait: } spin_unlock_bh(&np->np_thread_lock); - mutex_lock(&isert_np->np_accept_mutex); - if (list_empty(&isert_np->np_accept_list)) { - mutex_unlock(&isert_np->np_accept_mutex); + mutex_lock(&isert_np->mutex); + if (list_empty(&isert_np->pending)) { + mutex_unlock(&isert_np->mutex); goto accept_wait; } - isert_conn = list_first_entry(&isert_np->np_accept_list, - struct isert_conn, accept_node); - list_del_init(&isert_conn->accept_node); - mutex_unlock(&isert_np->np_accept_mutex); + isert_conn = list_first_entry(&isert_np->pending, + struct isert_conn, node); + list_del_init(&isert_conn->node); + mutex_unlock(&isert_np->mutex); conn->context = isert_conn; isert_conn->conn = conn; @@ -3275,28 +3207,39 @@ isert_free_np(struct iscsi_np *np) struct isert_np *isert_np = np->np_context; struct isert_conn *isert_conn, *n; - if (isert_np->np_cm_id) - rdma_destroy_id(isert_np->np_cm_id); + if (isert_np->cm_id) + rdma_destroy_id(isert_np->cm_id); /* * FIXME: At this point we don't have a good way to insure * that at this point we don't have hanging connections that * completed RDMA establishment but didn't start iscsi login * process. So work-around this by cleaning up what ever piled - * up in np_accept_list. + * up in accepted and pending lists. */ - mutex_lock(&isert_np->np_accept_mutex); - if (!list_empty(&isert_np->np_accept_list)) { - isert_info("Still have isert connections, cleaning up...\n"); + mutex_lock(&isert_np->mutex); + if (!list_empty(&isert_np->pending)) { + isert_info("Still have isert pending connections\n"); list_for_each_entry_safe(isert_conn, n, - &isert_np->np_accept_list, - accept_node) { + &isert_np->pending, + node) { isert_info("cleaning isert_conn %p state (%d)\n", isert_conn, isert_conn->state); isert_connect_release(isert_conn); } } - mutex_unlock(&isert_np->np_accept_mutex); + + if (!list_empty(&isert_np->accepted)) { + isert_info("Still have isert accepted connections\n"); + list_for_each_entry_safe(isert_conn, n, + &isert_np->accepted, + node) { + isert_info("cleaning isert_conn %p state (%d)\n", + isert_conn, isert_conn->state); + isert_connect_release(isert_conn); + } + } + mutex_unlock(&isert_np->mutex); np->np_context = NULL; kfree(isert_np); @@ -3363,6 +3306,41 @@ isert_wait4flush(struct isert_conn *isert_conn) wait_for_completion(&isert_conn->wait_comp_err); } +/** + * isert_put_unsol_pending_cmds() - Drop commands waiting for + * unsolicitate dataout + * @conn: iscsi connection + * + * We might still have commands that are waiting for unsolicited + * dataouts messages. We must put the extra reference on those + * before blocking on the target_wait_for_session_cmds + */ +static void +isert_put_unsol_pending_cmds(struct iscsi_conn *conn) +{ + struct iscsi_cmd *cmd, *tmp; + static LIST_HEAD(drop_cmd_list); + + spin_lock_bh(&conn->cmd_lock); + list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) { + if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) && + (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) && + (cmd->write_data_done < cmd->se_cmd.data_length)) + list_move_tail(&cmd->i_conn_node, &drop_cmd_list); + } + spin_unlock_bh(&conn->cmd_lock); + + list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) { + list_del_init(&cmd->i_conn_node); + if (cmd->i_state != ISTATE_REMOVE) { + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); + + isert_info("conn %p dropping cmd %p\n", conn, cmd); + isert_put_cmd(isert_cmd, true); + } + } +} + static void isert_wait_conn(struct iscsi_conn *conn) { struct isert_conn *isert_conn = conn->context; @@ -3381,8 +3359,9 @@ static void isert_wait_conn(struct iscsi_conn *conn) isert_conn_terminate(isert_conn); mutex_unlock(&isert_conn->mutex); - isert_wait4cmds(conn); isert_wait4flush(isert_conn); + isert_put_unsol_pending_cmds(conn); + isert_wait4cmds(conn); isert_wait4logout(isert_conn); queue_work(isert_release_wq, &isert_conn->release_work); diff --git a/kernel/drivers/infiniband/ulp/isert/ib_isert.h b/kernel/drivers/infiniband/ulp/isert/ib_isert.h index 9ec23a786..3d7fbc47c 100644 --- a/kernel/drivers/infiniband/ulp/isert/ib_isert.h +++ b/kernel/drivers/infiniband/ulp/isert/ib_isert.h @@ -84,14 +84,12 @@ enum isert_indicator { struct pi_context { struct ib_mr *prot_mr; - struct ib_fast_reg_page_list *prot_frpl; struct ib_mr *sig_mr; }; struct fast_reg_descriptor { struct list_head list; struct ib_mr *data_mr; - struct ib_fast_reg_page_list *data_frpl; u8 ind; struct pi_context *pi_ctx; }; @@ -113,14 +111,13 @@ enum { }; struct isert_rdma_wr { - struct list_head wr_list; struct isert_cmd *isert_cmd; enum iser_ib_op_code iser_ib_op; struct ib_sge *ib_sge; struct ib_sge s_ib_sge; - int send_wr_num; - struct ib_send_wr *send_wr; - struct ib_send_wr s_send_wr; + int rdma_wr_num; + struct ib_rdma_wr *rdma_wr; + struct ib_rdma_wr s_rdma_wr; struct ib_sge ib_sg[3]; struct isert_data_buf data; struct isert_data_buf prot; @@ -134,14 +131,13 @@ struct isert_cmd { uint64_t write_va; u64 pdu_buf_dma; u32 pdu_buf_len; - u32 read_va_off; - u32 write_va_off; - u32 rdma_wr_num; struct isert_conn *conn; struct iscsi_cmd *iscsi_cmd; struct iser_tx_desc tx_desc; + struct iser_rx_desc *rx_desc; struct isert_rdma_wr rdma_wr; struct work_struct comp_work; + struct scatterlist sg; }; struct isert_device; @@ -159,11 +155,10 @@ struct isert_conn { u64 login_req_dma; int login_req_len; u64 login_rsp_dma; - unsigned int rx_desc_head; struct iser_rx_desc *rx_descs; - struct ib_recv_wr rx_wr[ISERT_MIN_POSTED_RX]; + struct ib_recv_wr rx_wr[ISERT_QP_MAX_RECV_DTOS]; struct iscsi_conn *conn; - struct list_head accept_node; + struct list_head node; struct completion login_comp; struct completion login_req_comp; struct iser_tx_desc login_tx_desc; @@ -209,7 +204,6 @@ struct isert_device { int refcount; struct ib_device *ib_device; struct ib_pd *pd; - struct ib_mr *mr; struct isert_comp *comps; int comps_used; struct list_head dev_node; @@ -223,9 +217,9 @@ struct isert_device { struct isert_np { struct iscsi_np *np; - struct semaphore np_sem; - struct rdma_cm_id *np_cm_id; - struct mutex np_accept_mutex; - struct list_head np_accept_list; - struct completion np_login_comp; + struct semaphore sem; + struct rdma_cm_id *cm_id; + struct mutex mutex; + struct list_head accepted; + struct list_head pending; }; diff --git a/kernel/drivers/infiniband/ulp/srp/ib_srp.c b/kernel/drivers/infiniband/ulp/srp/ib_srp.c index 025f93105..3db9a6597 100644 --- a/kernel/drivers/infiniband/ulp/srp/ib_srp.c +++ b/kernel/drivers/infiniband/ulp/srp/ib_srp.c @@ -55,20 +55,21 @@ #define DRV_NAME "ib_srp" #define PFX DRV_NAME ": " -#define DRV_VERSION "1.0" -#define DRV_RELDATE "July 1, 2013" +#define DRV_VERSION "2.0" +#define DRV_RELDATE "July 26, 2015" MODULE_AUTHOR("Roland Dreier"); -MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator " - "v" DRV_VERSION " (" DRV_RELDATE ")"); +MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator"); MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRV_VERSION); +MODULE_INFO(release_date, DRV_RELDATE); static unsigned int srp_sg_tablesize; static unsigned int cmd_sg_entries; static unsigned int indirect_sg_entries; static bool allow_ext_sg; -static bool prefer_fr; -static bool register_always; +static bool prefer_fr = true; +static bool register_always = true; static int topspin_workarounds = 1; module_param(srp_sg_tablesize, uint, 0444); @@ -98,7 +99,7 @@ module_param(register_always, bool, 0444); MODULE_PARM_DESC(register_always, "Use memory registration even for contiguous memory regions"); -static struct kernel_param_ops srp_tmo_ops; +static const struct kernel_param_ops srp_tmo_ops; static int srp_reconnect_delay = 10; module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay, @@ -130,7 +131,7 @@ MODULE_PARM_DESC(ch_count, "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA."); static void srp_add_one(struct ib_device *device); -static void srp_remove_one(struct ib_device *device); +static void srp_remove_one(struct ib_device *device, void *client_data); static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr); static void srp_send_completion(struct ib_cq *cq, void *ch_ptr); static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); @@ -160,13 +161,10 @@ static int srp_tmo_set(const char *val, const struct kernel_param *kp) { int tmo, res; - if (strncmp(val, "off", 3) != 0) { - res = kstrtoint(val, 0, &tmo); - if (res) - goto out; - } else { - tmo = -1; - } + res = srp_parse_tmo(&tmo, val); + if (res) + goto out; + if (kp->arg == &srp_reconnect_delay) res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo, srp_dev_loss_tmo); @@ -183,7 +181,7 @@ out: return res; } -static struct kernel_param_ops srp_tmo_ops = { +static const struct kernel_param_ops srp_tmo_ops = { .get = srp_tmo_get, .set = srp_tmo_set, }; @@ -253,7 +251,8 @@ static void srp_free_iu(struct srp_host *host, struct srp_iu *iu) static void srp_qp_event(struct ib_event *event, void *context) { - pr_debug("QP event %d\n", event->event); + pr_debug("QP event %s (%d)\n", + ib_event_msg(event->event), event->event); } static int srp_init_qp(struct srp_target_port *target, @@ -341,8 +340,6 @@ static void srp_destroy_fr_pool(struct srp_fr_pool *pool) return; for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { - if (d->frpl) - ib_free_fast_reg_page_list(d->frpl); if (d->mr) ib_dereg_mr(d->mr); } @@ -363,7 +360,6 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device, struct srp_fr_pool *pool; struct srp_fr_desc *d; struct ib_mr *mr; - struct ib_fast_reg_page_list *frpl; int i, ret = -EINVAL; if (pool_size <= 0) @@ -379,18 +375,13 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device, INIT_LIST_HEAD(&pool->free_list); for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { - mr = ib_alloc_fast_reg_mr(pd, max_page_list_len); + mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, + max_page_list_len); if (IS_ERR(mr)) { ret = PTR_ERR(mr); goto destroy_pool; } d->mr = mr; - frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len); - if (IS_ERR(frpl)) { - ret = PTR_ERR(frpl); - goto destroy_pool; - } - d->frpl = frpl; list_add_tail(&d->entry, &pool->free_list); } @@ -497,7 +488,8 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch) struct ib_qp *qp; struct ib_fmr_pool *fmr_pool = NULL; struct srp_fr_pool *fr_pool = NULL; - const int m = 1 + dev->use_fast_reg; + const int m = dev->use_fast_reg ? 3 : 1; + struct ib_cq_init_attr cq_attr = {}; int ret; init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL); @@ -505,15 +497,19 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch) return -ENOMEM; /* + 1 for SRP_LAST_WR_ID */ + cq_attr.cqe = target->queue_size + 1; + cq_attr.comp_vector = ch->comp_vector; recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch, - target->queue_size + 1, ch->comp_vector); + &cq_attr); if (IS_ERR(recv_cq)) { ret = PTR_ERR(recv_cq); goto err; } + cq_attr.cqe = m * target->queue_size; + cq_attr.comp_vector = ch->comp_vector; send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch, - m * target->queue_size, ch->comp_vector); + &cq_attr); if (IS_ERR(send_cq)) { ret = PTR_ERR(send_cq); goto err_recv_cq; @@ -541,7 +537,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch) if (ret) goto err_qp; - if (dev->use_fast_reg && dev->has_fr) { + if (dev->use_fast_reg) { fr_pool = srp_alloc_fr_pool(target); if (IS_ERR(fr_pool)) { ret = PTR_ERR(fr_pool); @@ -549,10 +545,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch) "FR pool allocation failed (%d)\n", ret); goto err_qp; } - if (ch->fr_pool) - srp_destroy_fr_pool(ch->fr_pool); - ch->fr_pool = fr_pool; - } else if (!dev->use_fast_reg && dev->has_fmr) { + } else if (dev->use_fmr) { fmr_pool = srp_alloc_fmr_pool(target); if (IS_ERR(fmr_pool)) { ret = PTR_ERR(fmr_pool); @@ -560,9 +553,6 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch) "FMR pool allocation failed (%d)\n", ret); goto err_qp; } - if (ch->fmr_pool) - ib_destroy_fmr_pool(ch->fmr_pool); - ch->fmr_pool = fmr_pool; } if (ch->qp) @@ -576,6 +566,16 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch) ch->recv_cq = recv_cq; ch->send_cq = send_cq; + if (dev->use_fast_reg) { + if (ch->fr_pool) + srp_destroy_fr_pool(ch->fr_pool); + ch->fr_pool = fr_pool; + } else if (dev->use_fmr) { + if (ch->fmr_pool) + ib_destroy_fmr_pool(ch->fmr_pool); + ch->fmr_pool = fmr_pool; + } + kfree(init_attr); return 0; @@ -618,7 +618,7 @@ static void srp_free_ch_ib(struct srp_target_port *target, if (dev->use_fast_reg) { if (ch->fr_pool) srp_destroy_fr_pool(ch->fr_pool); - } else { + } else if (dev->use_fmr) { if (ch->fmr_pool) ib_destroy_fmr_pool(ch->fmr_pool); } @@ -780,7 +780,7 @@ static int srp_send_req(struct srp_rdma_ch *ch, bool multich) shost_printk(KERN_DEBUG, target->scsi_host, PFX "Topspin/Cisco initiator port ID workaround " "activated for target GUID %016llx\n", - (unsigned long long) be64_to_cpu(target->ioc_guid)); + be64_to_cpu(target->ioc_guid)); memset(req->priv.initiator_port_id, 0, 8); memcpy(req->priv.initiator_port_id + 8, &target->srp_host->srp_dev->dev->node_guid, 8); @@ -835,16 +835,17 @@ static void srp_free_req_data(struct srp_target_port *target, struct srp_request *req; int i; - if (!ch->target || !ch->req_ring) + if (!ch->req_ring) return; for (i = 0; i < target->req_ring_size; ++i) { req = &ch->req_ring[i]; - if (dev->use_fast_reg) + if (dev->use_fast_reg) { kfree(req->fr_list); - else + } else { kfree(req->fmr_list); - kfree(req->map_page); + kfree(req->map_page); + } if (req->indirect_dma_addr) { ib_dma_unmap_single(ibdev, req->indirect_dma_addr, target->indirect_size, @@ -878,14 +879,15 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch) GFP_KERNEL); if (!mr_list) goto out; - if (srp_dev->use_fast_reg) + if (srp_dev->use_fast_reg) { req->fr_list = mr_list; - else + } else { req->fmr_list = mr_list; - req->map_page = kmalloc(srp_dev->max_pages_per_mr * - sizeof(void *), GFP_KERNEL); - if (!req->map_page) - goto out; + req->map_page = kmalloc(srp_dev->max_pages_per_mr * + sizeof(void *), GFP_KERNEL); + if (!req->map_page) + goto out; + } req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL); if (!req->indirect_desc) goto out; @@ -992,16 +994,16 @@ static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich) ret = srp_lookup_path(ch); if (ret) - return ret; + goto out; while (1) { init_completion(&ch->done); ret = srp_send_req(ch, multich); if (ret) - return ret; + goto out; ret = wait_for_completion_interruptible(&ch->done); if (ret < 0) - return ret; + goto out; /* * The CM event handling code will set status to @@ -1009,15 +1011,16 @@ static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich) * back, or SRP_DLID_REDIRECT if we get a lid/qp * redirect REJ back. */ - switch (ch->status) { + ret = ch->status; + switch (ret) { case 0: ch->connected = true; - return 0; + goto out; case SRP_PORT_REDIRECT: ret = srp_lookup_path(ch); if (ret) - return ret; + goto out; break; case SRP_DLID_REDIRECT: @@ -1026,13 +1029,16 @@ static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich) case SRP_STALE_CONN: shost_printk(KERN_ERR, target->scsi_host, PFX "giving up on stale connection\n"); - ch->status = -ECONNRESET; - return ch->status; + ret = -ECONNRESET; + goto out; default: - return ch->status; + goto out; } } + +out: + return ret <= 0 ? ret : -ENODEV; } static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey) @@ -1080,7 +1086,7 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd, if (req->nmdesc) srp_fr_pool_put(ch->fr_pool, req->fr_list, req->nmdesc); - } else { + } else if (dev->use_fmr) { struct ib_pool_fmr **pfmr; for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++) @@ -1209,14 +1215,10 @@ static int srp_rport_reconnect(struct srp_rport *rport) */ for (i = 0; i < target->ch_count; i++) { ch = &target->ch[i]; - if (!ch->target) - break; ret += srp_new_cm_id(ch); } for (i = 0; i < target->ch_count; i++) { ch = &target->ch[i]; - if (!ch->target) - break; for (j = 0; j < target->req_ring_size; ++j) { struct srp_request *req = &ch->req_ring[j]; @@ -1225,8 +1227,6 @@ static int srp_rport_reconnect(struct srp_rport *rport) } for (i = 0; i < target->ch_count; i++) { ch = &target->ch[i]; - if (!ch->target) - break; /* * Whether or not creating a new CM ID succeeded, create a new * QP. This guarantees that all completion callback function @@ -1243,7 +1243,7 @@ static int srp_rport_reconnect(struct srp_rport *rport) for (i = 0; i < target->ch_count; i++) { ch = &target->ch[i]; - if (ret || !ch->target) + if (ret) break; ret = srp_connect_ch(ch, multich); multich = true; @@ -1261,6 +1261,8 @@ static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr, { struct srp_direct_buf *desc = state->desc; + WARN_ON_ONCE(!dma_len); + desc->va = cpu_to_be64(dma_addr); desc->key = cpu_to_be32(rkey); desc->len = cpu_to_be32(dma_len); @@ -1273,31 +1275,68 @@ static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr, static int srp_map_finish_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch) { + struct srp_target_port *target = ch->target; + struct srp_device *dev = target->srp_host->srp_dev; struct ib_pool_fmr *fmr; u64 io_addr = 0; + if (state->fmr.next >= state->fmr.end) + return -ENOMEM; + + WARN_ON_ONCE(!dev->use_fmr); + + if (state->npages == 0) + return 0; + + if (state->npages == 1 && target->global_mr) { + srp_map_desc(state, state->base_dma_addr, state->dma_len, + target->global_mr->rkey); + goto reset_state; + } + fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages, state->npages, io_addr); if (IS_ERR(fmr)) return PTR_ERR(fmr); - *state->next_fmr++ = fmr; + *state->fmr.next++ = fmr; state->nmdesc++; - srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey); + srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask, + state->dma_len, fmr->fmr->rkey); + +reset_state: + state->npages = 0; + state->dma_len = 0; return 0; } static int srp_map_finish_fr(struct srp_map_state *state, - struct srp_rdma_ch *ch) + struct srp_rdma_ch *ch, int sg_nents) { struct srp_target_port *target = ch->target; struct srp_device *dev = target->srp_host->srp_dev; struct ib_send_wr *bad_wr; - struct ib_send_wr wr; + struct ib_reg_wr wr; struct srp_fr_desc *desc; u32 rkey; + int n, err; + + if (state->fr.next >= state->fr.end) + return -ENOMEM; + + WARN_ON_ONCE(!dev->use_fast_reg); + + if (sg_nents == 0) + return 0; + + if (sg_nents == 1 && target->global_mr) { + srp_map_desc(state, sg_dma_address(state->sg), + sg_dma_len(state->sg), + target->global_mr->rkey); + return 1; + } desc = srp_fr_pool_get(ch->fr_pool); if (!desc) @@ -1306,125 +1345,54 @@ static int srp_map_finish_fr(struct srp_map_state *state, rkey = ib_inc_rkey(desc->mr->rkey); ib_update_fast_reg_key(desc->mr, rkey); - memcpy(desc->frpl->page_list, state->pages, - sizeof(state->pages[0]) * state->npages); - - memset(&wr, 0, sizeof(wr)); - wr.opcode = IB_WR_FAST_REG_MR; - wr.wr_id = FAST_REG_WR_ID_MASK; - wr.wr.fast_reg.iova_start = state->base_dma_addr; - wr.wr.fast_reg.page_list = desc->frpl; - wr.wr.fast_reg.page_list_len = state->npages; - wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size); - wr.wr.fast_reg.length = state->dma_len; - wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE | - IB_ACCESS_REMOTE_READ | - IB_ACCESS_REMOTE_WRITE); - wr.wr.fast_reg.rkey = desc->mr->lkey; - - *state->next_fr++ = desc; + n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, dev->mr_page_size); + if (unlikely(n < 0)) + return n; + + wr.wr.next = NULL; + wr.wr.opcode = IB_WR_REG_MR; + wr.wr.wr_id = FAST_REG_WR_ID_MASK; + wr.wr.num_sge = 0; + wr.wr.send_flags = 0; + wr.mr = desc->mr; + wr.key = desc->mr->rkey; + wr.access = (IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_READ | + IB_ACCESS_REMOTE_WRITE); + + *state->fr.next++ = desc; state->nmdesc++; - srp_map_desc(state, state->base_dma_addr, state->dma_len, - desc->mr->rkey); - - return ib_post_send(ch->qp, &wr, &bad_wr); -} - -static int srp_finish_mapping(struct srp_map_state *state, - struct srp_rdma_ch *ch) -{ - struct srp_target_port *target = ch->target; - int ret = 0; - - if (state->npages == 0) - return 0; - - if (state->npages == 1 && !register_always) - srp_map_desc(state, state->base_dma_addr, state->dma_len, - target->rkey); - else - ret = target->srp_host->srp_dev->use_fast_reg ? - srp_map_finish_fr(state, ch) : - srp_map_finish_fmr(state, ch); + srp_map_desc(state, desc->mr->iova, + desc->mr->length, desc->mr->rkey); - if (ret == 0) { - state->npages = 0; - state->dma_len = 0; - } + err = ib_post_send(ch->qp, &wr.wr, &bad_wr); + if (unlikely(err)) + return err; - return ret; -} - -static void srp_map_update_start(struct srp_map_state *state, - struct scatterlist *sg, int sg_index, - dma_addr_t dma_addr) -{ - state->unmapped_sg = sg; - state->unmapped_index = sg_index; - state->unmapped_addr = dma_addr; + return n; } static int srp_map_sg_entry(struct srp_map_state *state, struct srp_rdma_ch *ch, - struct scatterlist *sg, int sg_index, - bool use_mr) + struct scatterlist *sg, int sg_index) { struct srp_target_port *target = ch->target; struct srp_device *dev = target->srp_host->srp_dev; struct ib_device *ibdev = dev->dev; dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg); unsigned int dma_len = ib_sg_dma_len(ibdev, sg); - unsigned int len; + unsigned int len = 0; int ret; - if (!dma_len) - return 0; - - if (!use_mr) { - /* - * Once we're in direct map mode for a request, we don't - * go back to FMR or FR mode, so no need to update anything - * other than the descriptor. - */ - srp_map_desc(state, dma_addr, dma_len, target->rkey); - return 0; - } - - /* - * Since not all RDMA HW drivers support non-zero page offsets for - * FMR, if we start at an offset into a page, don't merge into the - * current FMR mapping. Finish it out, and use the kernel's MR for - * this sg entry. - */ - if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) || - dma_len > dev->mr_max_size) { - ret = srp_finish_mapping(state, ch); - if (ret) - return ret; - - srp_map_desc(state, dma_addr, dma_len, target->rkey); - srp_map_update_start(state, NULL, 0, 0); - return 0; - } - - /* - * If this is the first sg that will be mapped via FMR or via FR, save - * our position. We need to know the first unmapped entry, its index, - * and the first unmapped address within that entry to be able to - * restart mapping after an error. - */ - if (!state->unmapped_sg) - srp_map_update_start(state, sg, sg_index, dma_addr); + WARN_ON_ONCE(!dma_len); while (dma_len) { unsigned offset = dma_addr & ~dev->mr_page_mask; if (state->npages == dev->max_pages_per_mr || offset != 0) { - ret = srp_finish_mapping(state, ch); + ret = srp_map_finish_fmr(state, ch); if (ret) return ret; - - srp_map_update_start(state, sg, sg_index, dma_addr); } len = min_t(unsigned int, dma_len, dev->mr_page_size - offset); @@ -1443,78 +1411,152 @@ static int srp_map_sg_entry(struct srp_map_state *state, * boundries. */ ret = 0; - if (len != dev->mr_page_size) { - ret = srp_finish_mapping(state, ch); - if (!ret) - srp_map_update_start(state, NULL, 0, 0); - } + if (len != dev->mr_page_size) + ret = srp_map_finish_fmr(state, ch); return ret; } -static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch, - struct srp_request *req, struct scatterlist *scat, - int count) +static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch, + struct srp_request *req, struct scatterlist *scat, + int count) +{ + struct scatterlist *sg; + int i, ret; + + state->desc = req->indirect_desc; + state->pages = req->map_page; + state->fmr.next = req->fmr_list; + state->fmr.end = req->fmr_list + ch->target->cmd_sg_cnt; + + for_each_sg(scat, sg, count, i) { + ret = srp_map_sg_entry(state, ch, sg, i); + if (ret) + return ret; + } + + ret = srp_map_finish_fmr(state, ch); + if (ret) + return ret; + + req->nmdesc = state->nmdesc; + + return 0; +} + +static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch, + struct srp_request *req, struct scatterlist *scat, + int count) +{ + state->desc = req->indirect_desc; + state->fr.next = req->fr_list; + state->fr.end = req->fr_list + ch->target->cmd_sg_cnt; + state->sg = scat; + + while (count) { + int i, n; + + n = srp_map_finish_fr(state, ch, count); + if (unlikely(n < 0)) + return n; + + count -= n; + for (i = 0; i < n; i++) + state->sg = sg_next(state->sg); + } + + req->nmdesc = state->nmdesc; + + return 0; +} + +static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch, + struct srp_request *req, struct scatterlist *scat, + int count) { struct srp_target_port *target = ch->target; struct srp_device *dev = target->srp_host->srp_dev; - struct ib_device *ibdev = dev->dev; struct scatterlist *sg; int i; - bool use_mr; - - state->desc = req->indirect_desc; - state->pages = req->map_page; - if (dev->use_fast_reg) { - state->next_fr = req->fr_list; - use_mr = !!ch->fr_pool; - } else { - state->next_fmr = req->fmr_list; - use_mr = !!ch->fmr_pool; - } + state->desc = req->indirect_desc; for_each_sg(scat, sg, count, i) { - if (srp_map_sg_entry(state, ch, sg, i, use_mr)) { - /* - * Memory registration failed, so backtrack to the - * first unmapped entry and continue on without using - * memory registration. - */ - dma_addr_t dma_addr; - unsigned int dma_len; - -backtrack: - sg = state->unmapped_sg; - i = state->unmapped_index; - - dma_addr = ib_sg_dma_address(ibdev, sg); - dma_len = ib_sg_dma_len(ibdev, sg); - dma_len -= (state->unmapped_addr - dma_addr); - dma_addr = state->unmapped_addr; - use_mr = false; - srp_map_desc(state, dma_addr, dma_len, target->rkey); - } + srp_map_desc(state, ib_sg_dma_address(dev->dev, sg), + ib_sg_dma_len(dev->dev, sg), + target->global_mr->rkey); } - if (use_mr && srp_finish_mapping(state, ch)) - goto backtrack; - req->nmdesc = state->nmdesc; return 0; } +/* + * Register the indirect data buffer descriptor with the HCA. + * + * Note: since the indirect data buffer descriptor has been allocated with + * kmalloc() it is guaranteed that this buffer is a physically contiguous + * memory buffer. + */ +static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req, + void **next_mr, void **end_mr, u32 idb_len, + __be32 *idb_rkey) +{ + struct srp_target_port *target = ch->target; + struct srp_device *dev = target->srp_host->srp_dev; + struct srp_map_state state; + struct srp_direct_buf idb_desc; + u64 idb_pages[1]; + struct scatterlist idb_sg[1]; + int ret; + + memset(&state, 0, sizeof(state)); + memset(&idb_desc, 0, sizeof(idb_desc)); + state.gen.next = next_mr; + state.gen.end = end_mr; + state.desc = &idb_desc; + state.base_dma_addr = req->indirect_dma_addr; + state.dma_len = idb_len; + + if (dev->use_fast_reg) { + state.sg = idb_sg; + sg_set_buf(idb_sg, req->indirect_desc, idb_len); + idb_sg->dma_address = req->indirect_dma_addr; /* hack! */ +#ifdef CONFIG_NEED_SG_DMA_LENGTH + idb_sg->dma_length = idb_sg->length; /* hack^2 */ +#endif + ret = srp_map_finish_fr(&state, ch, 1); + if (ret < 0) + return ret; + } else if (dev->use_fmr) { + state.pages = idb_pages; + state.pages[0] = (req->indirect_dma_addr & + dev->mr_page_mask); + state.npages = 1; + ret = srp_map_finish_fmr(&state, ch); + if (ret < 0) + return ret; + } else { + return -EINVAL; + } + + *idb_rkey = idb_desc.key; + + return 0; +} + static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, struct srp_request *req) { struct srp_target_port *target = ch->target; struct scatterlist *scat; struct srp_cmd *cmd = req->cmd->buf; - int len, nents, count; + int len, nents, count, ret; struct srp_device *dev; struct ib_device *ibdev; struct srp_map_state state; struct srp_indirect_buf *indirect_hdr; - u32 table_len; + u32 idb_len, table_len; + __be32 idb_rkey; u8 fmt; if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE) @@ -1541,7 +1583,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, fmt = SRP_DATA_DESC_DIRECT; len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); - if (count == 1 && !register_always) { + if (count == 1 && target->global_mr) { /* * The midlayer only generated a single gather/scatter * entry, or DMA mapping coalesced everything to a @@ -1551,7 +1593,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, struct srp_direct_buf *buf = (void *) cmd->add_data; buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat)); - buf->key = cpu_to_be32(target->rkey); + buf->key = cpu_to_be32(target->global_mr->rkey); buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); req->nmdesc = 0; @@ -1568,7 +1610,12 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, target->indirect_size, DMA_TO_DEVICE); memset(&state, 0, sizeof(state)); - srp_map_sg(&state, ch, req, scat, count); + if (dev->use_fast_reg) + srp_map_sg_fr(&state, ch, req, scat, count); + else if (dev->use_fmr) + srp_map_sg_fmr(&state, ch, req, scat, count); + else + srp_map_sg_dma(&state, ch, req, scat, count); /* We've mapped the request, now pull as much of the indirect * descriptor table as we can into the command buffer. If this @@ -1596,6 +1643,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, count = min(state.ndesc, target->cmd_sg_cnt); table_len = state.ndesc * sizeof (struct srp_direct_buf); + idb_len = sizeof(struct srp_indirect_buf) + table_len; fmt = SRP_DATA_DESC_INDIRECT; len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf); @@ -1604,8 +1652,18 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, memcpy(indirect_hdr->desc_list, req->indirect_desc, count * sizeof (struct srp_direct_buf)); + if (!target->global_mr) { + ret = srp_map_idb(ch, req, state.gen.next, state.gen.end, + idb_len, &idb_rkey); + if (ret < 0) + return ret; + req->nmdesc++; + } else { + idb_rkey = cpu_to_be32(target->global_mr->rkey); + } + indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr); - indirect_hdr->table_desc.key = cpu_to_be32(target->rkey); + indirect_hdr->table_desc.key = idb_rkey; indirect_hdr->table_desc.len = cpu_to_be32(table_len); indirect_hdr->len = cpu_to_be32(state.total_len); @@ -1837,7 +1895,7 @@ static void srp_process_aer_req(struct srp_rdma_ch *ch, s32 delta = be32_to_cpu(req->req_lim_delta); shost_printk(KERN_ERR, target->scsi_host, PFX - "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun)); + "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun)); if (srp_response_common(ch, delta, &rsp, sizeof(rsp))) shost_printk(KERN_ERR, target->scsi_host, PFX @@ -1927,17 +1985,18 @@ static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status, if (ch->connected && !target->qp_in_error) { if (wr_id & LOCAL_INV_WR_ID_MASK) { shost_printk(KERN_ERR, target->scsi_host, PFX - "LOCAL_INV failed with status %d\n", - wc_status); + "LOCAL_INV failed with status %s (%d)\n", + ib_wc_status_msg(wc_status), wc_status); } else if (wr_id & FAST_REG_WR_ID_MASK) { shost_printk(KERN_ERR, target->scsi_host, PFX - "FAST_REG_MR failed status %d\n", - wc_status); + "FAST_REG_MR failed status %s (%d)\n", + ib_wc_status_msg(wc_status), wc_status); } else { shost_printk(KERN_ERR, target->scsi_host, - PFX "failed %s status %d for iu %p\n", + PFX "failed %s status %s (%d) for iu %p\n", send_err ? "send" : "receive", - wc_status, (void *)(uintptr_t)wr_id); + ib_wc_status_msg(wc_status), wc_status, + (void *)(uintptr_t)wr_id); } queue_work(system_long_wq, &target->tl_err_work); } @@ -2029,7 +2088,7 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) memset(cmd, 0, sizeof *cmd); cmd->opcode = SRP_CMD; - cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); + int_to_scsilun(scmnd->device->lun, &cmd->lun); cmd->tag = tag; memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); @@ -2172,7 +2231,7 @@ static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask) } static void srp_cm_rep_handler(struct ib_cm_id *cm_id, - struct srp_login_rsp *lrsp, + const struct srp_login_rsp *lrsp, struct srp_rdma_ch *ch) { struct srp_target_port *target = ch->target; @@ -2409,8 +2468,8 @@ srp_change_queue_depth(struct scsi_device *sdev, int qdepth) return scsi_change_queue_depth(sdev, qdepth); } -static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, - unsigned int lun, u8 func) +static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun, + u8 func) { struct srp_target_port *target = ch->target; struct srp_rport *rport = target->rport; @@ -2444,7 +2503,7 @@ static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, memset(tsk_mgmt, 0, sizeof *tsk_mgmt); tsk_mgmt->opcode = SRP_TSK_MGMT; - tsk_mgmt->lun = cpu_to_be64((u64) lun << 48); + int_to_scsilun(lun, &tsk_mgmt->lun); tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT; tsk_mgmt->tsk_mgmt_func = func; tsk_mgmt->task_tag = req_tag; @@ -2558,8 +2617,7 @@ static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr, { struct srp_target_port *target = host_to_target(class_to_shost(dev)); - return sprintf(buf, "0x%016llx\n", - (unsigned long long) be64_to_cpu(target->id_ext)); + return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext)); } static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr, @@ -2567,8 +2625,7 @@ static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr, { struct srp_target_port *target = host_to_target(class_to_shost(dev)); - return sprintf(buf, "0x%016llx\n", - (unsigned long long) be64_to_cpu(target->ioc_guid)); + return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid)); } static ssize_t show_service_id(struct device *dev, @@ -2576,8 +2633,7 @@ static ssize_t show_service_id(struct device *dev, { struct srp_target_port *target = host_to_target(class_to_shost(dev)); - return sprintf(buf, "0x%016llx\n", - (unsigned long long) be64_to_cpu(target->service_id)); + return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id)); } static ssize_t show_pkey(struct device *dev, struct device_attribute *attr, @@ -2746,7 +2802,6 @@ static struct scsi_host_template srp_template = { .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = srp_host_attrs, - .use_blk_tags = 1, .track_queue_depth = 1, }; @@ -2775,7 +2830,7 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target) target->state = SRP_TARGET_SCANNING; sprintf(target->target_name, "SRP.T10:%016llX", - (unsigned long long) be64_to_cpu(target->id_ext)); + be64_to_cpu(target->id_ext)); if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device)) return -ENODEV; @@ -3149,7 +3204,7 @@ static ssize_t srp_create_target(struct device *dev, target_host->transportt = ib_srp_transport_template; target_host->max_channel = 0; target_host->max_id = 1; - target_host->max_lun = SRP_MAX_LUN; + target_host->max_lun = -1LL; target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; target = host_to_target(target_host); @@ -3157,8 +3212,8 @@ static ssize_t srp_create_target(struct device *dev, target->io_class = SRP_REV16A_IB_IO_CLASS; target->scsi_host = target_host; target->srp_host = host; - target->lkey = host->srp_dev->mr->lkey; - target->rkey = host->srp_dev->mr->rkey; + target->lkey = host->srp_dev->pd->local_dma_lkey; + target->global_mr = host->srp_dev->global_mr; target->cmd_sg_cnt = cmd_sg_entries; target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries; target->allow_ext_sg = allow_ext_sg; @@ -3177,10 +3232,6 @@ static ssize_t srp_create_target(struct device *dev, if (ret) goto out; - ret = scsi_init_shared_tag_map(target_host, target_host->can_queue); - if (ret) - goto out; - target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE; if (!srp_conn_unique(target->srp_host, target)) { @@ -3209,7 +3260,7 @@ static ssize_t srp_create_target(struct device *dev, INIT_WORK(&target->tl_err_work, srp_tl_err_work); INIT_WORK(&target->remove_work, srp_remove_work); spin_lock_init(&target->lock); - ret = ib_query_gid(ibdev, host->port, 0, &target->sgid); + ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL); if (ret) goto out; @@ -3390,7 +3441,7 @@ static void srp_add_one(struct ib_device *device) struct srp_device *srp_dev; struct ib_device_attr *dev_attr; struct srp_host *host; - int mr_page_shift, s, e, p; + int mr_page_shift, p; u64 max_pages_per_mr; dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); @@ -3415,6 +3466,7 @@ static void srp_add_one(struct ib_device *device) srp_dev->use_fast_reg = (srp_dev->has_fr && (!srp_dev->has_fmr || prefer_fr)); + srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr; /* * Use the smallest page size supported by the HCA, down to a @@ -3447,22 +3499,18 @@ static void srp_add_one(struct ib_device *device) if (IS_ERR(srp_dev->pd)) goto free_dev; - srp_dev->mr = ib_get_dma_mr(srp_dev->pd, - IB_ACCESS_LOCAL_WRITE | - IB_ACCESS_REMOTE_READ | - IB_ACCESS_REMOTE_WRITE); - if (IS_ERR(srp_dev->mr)) - goto err_pd; - - if (device->node_type == RDMA_NODE_IB_SWITCH) { - s = 0; - e = 0; + if (!register_always || (!srp_dev->has_fmr && !srp_dev->has_fr)) { + srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd, + IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_READ | + IB_ACCESS_REMOTE_WRITE); + if (IS_ERR(srp_dev->global_mr)) + goto err_pd; } else { - s = 1; - e = device->phys_port_cnt; + srp_dev->global_mr = NULL; } - for (p = s; p <= e; ++p) { + for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { host = srp_add_port(srp_dev, p); if (host) list_add_tail(&host->list, &srp_dev->dev_list); @@ -3482,13 +3530,13 @@ free_attr: kfree(dev_attr); } -static void srp_remove_one(struct ib_device *device) +static void srp_remove_one(struct ib_device *device, void *client_data) { struct srp_device *srp_dev; struct srp_host *host, *tmp_host; struct srp_target_port *target; - srp_dev = ib_get_client_data(device, &srp_client); + srp_dev = client_data; if (!srp_dev) return; @@ -3517,7 +3565,8 @@ static void srp_remove_one(struct ib_device *device) kfree(host); } - ib_dereg_mr(srp_dev->mr); + if (srp_dev->global_mr) + ib_dereg_mr(srp_dev->global_mr); ib_dealloc_pd(srp_dev->pd); kfree(srp_dev); diff --git a/kernel/drivers/infiniband/ulp/srp/ib_srp.h b/kernel/drivers/infiniband/ulp/srp/ib_srp.h index e690847a4..f6af531f9 100644 --- a/kernel/drivers/infiniband/ulp/srp/ib_srp.h +++ b/kernel/drivers/infiniband/ulp/srp/ib_srp.h @@ -54,7 +54,6 @@ enum { SRP_DLID_REDIRECT = 2, SRP_STALE_CONN = 3, - SRP_MAX_LUN = 512, SRP_DEF_SG_TABLESIZE = 12, SRP_DEFAULT_QUEUE_SIZE = 1 << 6, @@ -96,13 +95,14 @@ struct srp_device { struct list_head dev_list; struct ib_device *dev; struct ib_pd *pd; - struct ib_mr *mr; + struct ib_mr *global_mr; u64 mr_page_mask; int mr_page_size; int mr_max_size; int max_pages_per_mr; bool has_fmr; bool has_fr; + bool use_fmr; bool use_fast_reg; }; @@ -183,10 +183,10 @@ struct srp_target_port { spinlock_t lock; /* read only in the hot path */ + struct ib_mr *global_mr; struct srp_rdma_ch *ch; u32 ch_count; u32 lkey; - u32 rkey; enum srp_target_state state; unsigned int max_iu_len; unsigned int cmd_sg_cnt; @@ -242,7 +242,6 @@ struct srp_iu { struct srp_fr_desc { struct list_head entry; struct ib_mr *mr; - struct ib_fast_reg_page_list *frpl; }; /** @@ -277,26 +276,33 @@ struct srp_fr_pool { * @npages: Number of page addresses in the pages[] array. * @nmdesc: Number of FMR or FR memory descriptors used for mapping. * @ndesc: Number of SRP buffer descriptors that have been filled in. - * @unmapped_sg: First element of the sg-list that is mapped via FMR or FR. - * @unmapped_index: Index of the first element mapped via FMR or FR. - * @unmapped_addr: DMA address of the first element mapped via FMR or FR. */ struct srp_map_state { union { - struct ib_pool_fmr **next_fmr; - struct srp_fr_desc **next_fr; + struct { + struct ib_pool_fmr **next; + struct ib_pool_fmr **end; + } fmr; + struct { + struct srp_fr_desc **next; + struct srp_fr_desc **end; + } fr; + struct { + void **next; + void **end; + } gen; }; struct srp_direct_buf *desc; - u64 *pages; + union { + u64 *pages; + struct scatterlist *sg; + }; dma_addr_t base_dma_addr; u32 dma_len; u32 total_len; unsigned int npages; unsigned int nmdesc; unsigned int ndesc; - struct scatterlist *unmapped_sg; - int unmapped_index; - dma_addr_t unmapped_addr; }; #endif /* IB_SRP_H */ diff --git a/kernel/drivers/infiniband/ulp/srpt/ib_srpt.c b/kernel/drivers/infiniband/ulp/srpt/ib_srpt.c index 9b84b4c0a..2e2fe818c 100644 --- a/kernel/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/kernel/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -41,12 +41,10 @@ #include <linux/string.h> #include <linux/delay.h> #include <linux/atomic.h> +#include <scsi/scsi_proto.h> #include <scsi/scsi_tcq.h> -#include <target/configfs_macros.h> #include <target/target_core_base.h> -#include <target/target_core_fabric_configfs.h> #include <target/target_core_fabric.h> -#include <target/target_core_configfs.h> #include "ib_srpt.h" /* Name of this kernel module. */ @@ -93,7 +91,6 @@ MODULE_PARM_DESC(srpt_service_guid, " instead of using the node_guid of the first HCA."); static struct ib_client srpt_client; -static const struct target_core_fabric_ops srpt_template; static void srpt_release_channel(struct srpt_rdma_ch *ch); static int srpt_queue_status(struct se_cmd *cmd); @@ -303,7 +300,7 @@ static void srpt_get_iou(struct ib_dm_mad *mad) int i; ioui = (struct ib_dm_iou_info *)mad->data; - ioui->change_id = __constant_cpu_to_be16(1); + ioui->change_id = cpu_to_be16(1); ioui->max_controllers = 16; /* set present for slot 1 and empty for the rest */ @@ -331,13 +328,13 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot, if (!slot || slot > 16) { mad->mad_hdr.status - = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD); + = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD); return; } if (slot > 2) { mad->mad_hdr.status - = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC); + = cpu_to_be16(DM_MAD_STATUS_NO_IOC); return; } @@ -349,10 +346,10 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot, iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver); iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id); iocp->subsys_device_id = 0x0; - iocp->io_class = __constant_cpu_to_be16(SRP_REV16A_IB_IO_CLASS); - iocp->io_subclass = __constant_cpu_to_be16(SRP_IO_SUBCLASS); - iocp->protocol = __constant_cpu_to_be16(SRP_PROTOCOL); - iocp->protocol_version = __constant_cpu_to_be16(SRP_PROTOCOL_VERSION); + iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS); + iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS); + iocp->protocol = cpu_to_be16(SRP_PROTOCOL); + iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION); iocp->send_queue_depth = cpu_to_be16(sdev->srq_size); iocp->rdma_read_depth = 4; iocp->send_size = cpu_to_be32(srp_max_req_size); @@ -380,13 +377,13 @@ static void srpt_get_svc_entries(u64 ioc_guid, if (!slot || slot > 16) { mad->mad_hdr.status - = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD); + = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD); return; } if (slot > 2 || lo > hi || hi > 1) { mad->mad_hdr.status - = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC); + = cpu_to_be16(DM_MAD_STATUS_NO_IOC); return; } @@ -437,7 +434,7 @@ static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad, break; default: rsp_mad->mad_hdr.status = - __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR); + cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR); break; } } @@ -476,7 +473,8 @@ static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent, rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp, mad_wc->wc->pkey_index, 0, IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA, - GFP_KERNEL); + GFP_KERNEL, + IB_MGMT_BASE_VERSION); if (IS_ERR(rsp)) goto err_rsp; @@ -493,11 +491,11 @@ static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent, break; case IB_MGMT_METHOD_SET: dm_mad->mad_hdr.status = - __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR); + cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR); break; default: dm_mad->mad_hdr.status = - __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD); + cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD); break; } @@ -546,7 +544,8 @@ static int srpt_refresh_port(struct srpt_port *sport) sport->sm_lid = port_attr.sm_lid; sport->lid = port_attr.lid; - ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid); + ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid, + NULL); if (ret) goto err_query_port; @@ -783,7 +782,7 @@ static int srpt_post_recv(struct srpt_device *sdev, list.addr = ioctx->ioctx.dma; list.length = srp_max_req_size; - list.lkey = sdev->mr->lkey; + list.lkey = sdev->pd->local_dma_lkey; wr.next = NULL; wr.sg_list = &list; @@ -818,7 +817,7 @@ static int srpt_post_send(struct srpt_rdma_ch *ch, list.addr = ioctx->ioctx.dma; list.length = len; - list.lkey = sdev->mr->lkey; + list.lkey = sdev->pd->local_dma_lkey; wr.next = NULL; wr.wr_id = encode_wr_id(SRPT_SEND, ioctx->ioctx.index); @@ -1206,7 +1205,7 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch, while (rsize > 0 && tsize > 0) { sge->addr = dma_addr; - sge->lkey = ch->sport->sdev->mr->lkey; + sge->lkey = ch->sport->sdev->pd->local_dma_lkey; if (rsize >= dma_len) { sge->length = @@ -1334,12 +1333,12 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) BUG_ON(ch->sess == NULL); - target_put_sess_cmd(ch->sess, &ioctx->cmd); + target_put_sess_cmd(&ioctx->cmd); goto out; } pr_debug("Aborting cmd with state %d and tag %lld\n", state, - ioctx->tag); + ioctx->cmd.tag); switch (state) { case SRPT_STATE_NEW: @@ -1365,11 +1364,11 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) * not been received in time. */ srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx); - target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd); + target_put_sess_cmd(&ioctx->cmd); break; case SRPT_STATE_MGMT_RSP_SENT: srpt_set_cmd_state(ioctx, SRPT_STATE_DONE); - target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd); + target_put_sess_cmd(&ioctx->cmd); break; default: WARN(1, "Unexpected command state (%d)", state); @@ -1387,7 +1386,6 @@ static void srpt_handle_send_err_comp(struct srpt_rdma_ch *ch, u64 wr_id) { struct srpt_send_ioctx *ioctx; enum srpt_command_state state; - struct se_cmd *cmd; u32 index; atomic_inc(&ch->sq_wr_avail); @@ -1395,7 +1393,6 @@ static void srpt_handle_send_err_comp(struct srpt_rdma_ch *ch, u64 wr_id) index = idx_from_wr_id(wr_id); ioctx = ch->ioctx_ring[index]; state = srpt_get_cmd_state(ioctx); - cmd = &ioctx->cmd; WARN_ON(state != SRPT_STATE_CMD_RSP_SENT && state != SRPT_STATE_MGMT_RSP_SENT @@ -1472,10 +1469,8 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch, struct srpt_send_ioctx *ioctx, enum srpt_opcode opcode) { - struct se_cmd *cmd; enum srpt_command_state state; - cmd = &ioctx->cmd; state = srpt_get_cmd_state(ioctx); switch (opcode) { case SRPT_RDMA_READ_LAST: @@ -1539,7 +1534,7 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch, memset(srp_rsp, 0, sizeof *srp_rsp); srp_rsp->opcode = SRP_RSP; srp_rsp->req_lim_delta = - __constant_cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0)); + cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0)); srp_rsp->tag = tag; srp_rsp->status = status; @@ -1589,8 +1584,8 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch, memset(srp_rsp, 0, sizeof *srp_rsp); srp_rsp->opcode = SRP_RSP; - srp_rsp->req_lim_delta = __constant_cpu_to_be32(1 - + atomic_xchg(&ch->req_lim_delta, 0)); + srp_rsp->req_lim_delta = + cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0)); srp_rsp->tag = tag; srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID; @@ -1634,7 +1629,7 @@ static uint64_t srpt_unpack_lun(const uint8_t *lun, int len) switch (len) { case 8: if ((*((__be64 *)lun) & - __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0) + cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0) goto out_err; break; case 4: @@ -1679,7 +1674,7 @@ static int srpt_check_stop_free(struct se_cmd *cmd) struct srpt_send_ioctx *ioctx = container_of(cmd, struct srpt_send_ioctx, cmd); - return target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd); + return target_put_sess_cmd(&ioctx->cmd); } /** @@ -1701,7 +1696,7 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch, srp_cmd = recv_ioctx->ioctx.buf; cmd = &send_ioctx->cmd; - send_ioctx->tag = srp_cmd->tag; + cmd->tag = srp_cmd->tag; switch (srp_cmd->task_attr) { case SRP_CMD_SIMPLE_Q: @@ -1772,7 +1767,7 @@ static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag) for (i = 0; i < ch->rq_size; ++i) { target = ch->ioctx_ring[i]; if (target->cmd.se_lun == ioctx->cmd.se_lun && - target->tag == tag && + target->cmd.tag == tag && srpt_get_cmd_state(target) != SRPT_STATE_DONE) { ret = 0; /* now let the target core abort &target->cmd; */ @@ -1831,7 +1826,7 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch, srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess); srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT); - send_ioctx->tag = srp_tsk->tag; + send_ioctx->cmd.tag = srp_tsk->tag; tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func); if (tcm_tmr < 0) { send_ioctx->cmd.se_tmr_req->response = @@ -2080,6 +2075,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) struct srpt_port *sport = ch->sport; struct srpt_device *sdev = sport->sdev; u32 srp_sq_size = sport->port_attrib.srp_sq_size; + struct ib_cq_init_attr cq_attr = {}; int ret; WARN_ON(ch->rq_size < 1); @@ -2090,8 +2086,9 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) goto out; retry: + cq_attr.cqe = ch->rq_size + srp_sq_size; ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch, - ch->rq_size + srp_sq_size, 0); + &cq_attr); if (IS_ERR(ch->cq)) { ret = PTR_ERR(ch->cq); pr_err("failed to create CQ cqe= %d ret= %d\n", @@ -2176,12 +2173,9 @@ static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch) */ static void __srpt_close_ch(struct srpt_rdma_ch *ch) { - struct srpt_device *sdev; enum rdma_ch_state prev_state; unsigned long flags; - sdev = ch->sport->sdev; - spin_lock_irqsave(&ch->spinlock, flags); prev_state = ch->state; switch (prev_state) { @@ -2454,8 +2448,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, } if (it_iu_len > srp_max_req_size || it_iu_len < 64) { - rej->reason = __constant_cpu_to_be32( - SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE); + rej->reason = cpu_to_be32( + SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE); ret = -EINVAL; pr_err("rejected SRP_LOGIN_REQ because its" " length (%d bytes) is out of range (%d .. %d)\n", @@ -2464,8 +2458,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, } if (!sport->enabled) { - rej->reason = __constant_cpu_to_be32( - SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); + rej->reason = cpu_to_be32( + SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); ret = -EINVAL; pr_err("rejected SRP_LOGIN_REQ because the target port" " has not yet been enabled\n"); @@ -2510,8 +2504,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid) || *(__be64 *)(req->target_port_id + 8) != cpu_to_be64(srpt_service_guid)) { - rej->reason = __constant_cpu_to_be32( - SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL); + rej->reason = cpu_to_be32( + SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL); ret = -ENOMEM; pr_err("rejected SRP_LOGIN_REQ because it" " has an invalid target port identifier.\n"); @@ -2520,8 +2514,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, ch = kzalloc(sizeof *ch, GFP_KERNEL); if (!ch) { - rej->reason = __constant_cpu_to_be32( - SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); + rej->reason = cpu_to_be32( + SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); pr_err("rejected SRP_LOGIN_REQ because no memory.\n"); ret = -ENOMEM; goto reject; @@ -2557,8 +2551,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, ret = srpt_create_ch_ib(ch); if (ret) { - rej->reason = __constant_cpu_to_be32( - SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); + rej->reason = cpu_to_be32( + SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); pr_err("rejected SRP_LOGIN_REQ because creating" " a new RDMA channel failed.\n"); goto free_ring; @@ -2566,8 +2560,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, ret = srpt_ch_qp_rtr(ch, ch->qp); if (ret) { - rej->reason = __constant_cpu_to_be32( - SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); + rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); pr_err("rejected SRP_LOGIN_REQ because enabling" " RTR failed (error code = %d)\n", ret); goto destroy_ib; @@ -2585,15 +2578,15 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, if (!nacl) { pr_info("Rejected login because no ACL has been" " configured yet for initiator %s.\n", ch->sess_name); - rej->reason = __constant_cpu_to_be32( - SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED); + rej->reason = cpu_to_be32( + SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED); goto destroy_ib; } ch->sess = transport_init_session(TARGET_PROT_NORMAL); if (IS_ERR(ch->sess)) { - rej->reason = __constant_cpu_to_be32( - SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); + rej->reason = cpu_to_be32( + SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); pr_debug("Failed to create session\n"); goto deregister_session; } @@ -2609,8 +2602,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, rsp->max_it_iu_len = req->req_it_iu_len; rsp->max_ti_iu_len = req->req_it_iu_len; ch->max_ti_iu_len = it_iu_len; - rsp->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT - | SRP_BUF_FORMAT_INDIRECT); + rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT + | SRP_BUF_FORMAT_INDIRECT); rsp->req_lim_delta = cpu_to_be32(ch->rq_size); atomic_set(&ch->req_lim, ch->rq_size); atomic_set(&ch->req_lim_delta, 0); @@ -2660,8 +2653,8 @@ free_ch: reject: rej->opcode = SRP_LOGIN_REJ; rej->tag = req->tag; - rej->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT - | SRP_BUF_FORMAT_INDIRECT); + rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT + | SRP_BUF_FORMAT_INDIRECT); ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, (void *)rej, sizeof *rej); @@ -2828,7 +2821,7 @@ static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) static int srpt_perform_rdmas(struct srpt_rdma_ch *ch, struct srpt_send_ioctx *ioctx) { - struct ib_send_wr wr; + struct ib_rdma_wr wr; struct ib_send_wr *bad_wr; struct rdma_iu *riu; int i; @@ -2856,29 +2849,29 @@ static int srpt_perform_rdmas(struct srpt_rdma_ch *ch, for (i = 0; i < n_rdma; ++i, ++riu) { if (dir == DMA_FROM_DEVICE) { - wr.opcode = IB_WR_RDMA_WRITE; - wr.wr_id = encode_wr_id(i == n_rdma - 1 ? + wr.wr.opcode = IB_WR_RDMA_WRITE; + wr.wr.wr_id = encode_wr_id(i == n_rdma - 1 ? SRPT_RDMA_WRITE_LAST : SRPT_RDMA_MID, ioctx->ioctx.index); } else { - wr.opcode = IB_WR_RDMA_READ; - wr.wr_id = encode_wr_id(i == n_rdma - 1 ? + wr.wr.opcode = IB_WR_RDMA_READ; + wr.wr.wr_id = encode_wr_id(i == n_rdma - 1 ? SRPT_RDMA_READ_LAST : SRPT_RDMA_MID, ioctx->ioctx.index); } - wr.next = NULL; - wr.wr.rdma.remote_addr = riu->raddr; - wr.wr.rdma.rkey = riu->rkey; - wr.num_sge = riu->sge_cnt; - wr.sg_list = riu->sge; + wr.wr.next = NULL; + wr.remote_addr = riu->raddr; + wr.rkey = riu->rkey; + wr.wr.num_sge = riu->sge_cnt; + wr.wr.sg_list = riu->sge; /* only get completion event for the last rdma write */ if (i == (n_rdma - 1) && dir == DMA_TO_DEVICE) - wr.send_flags = IB_SEND_SIGNALED; + wr.wr.send_flags = IB_SEND_SIGNALED; - ret = ib_post_send(ch->qp, &wr, &bad_wr); + ret = ib_post_send(ch->qp, &wr.wr, &bad_wr); if (ret) break; } @@ -2887,11 +2880,11 @@ static int srpt_perform_rdmas(struct srpt_rdma_ch *ch, pr_err("%s[%d]: ib_post_send() returned %d for %d/%d\n", __func__, __LINE__, ret, i, n_rdma); if (ret && i > 0) { - wr.num_sge = 0; - wr.wr_id = encode_wr_id(SRPT_RDMA_ABORT, ioctx->ioctx.index); - wr.send_flags = IB_SEND_SIGNALED; + wr.wr.num_sge = 0; + wr.wr.wr_id = encode_wr_id(SRPT_RDMA_ABORT, ioctx->ioctx.index); + wr.wr.send_flags = IB_SEND_SIGNALED; while (ch->state == CH_LIVE && - ib_post_send(ch->qp, &wr, &bad_wr) != 0) { + ib_post_send(ch->qp, &wr.wr, &bad_wr) != 0) { pr_info("Trying to abort failed RDMA transfer [%d]\n", ioctx->ioctx.index); msleep(1000); @@ -2979,7 +2972,7 @@ static int srpt_write_pending(struct se_cmd *se_cmd) case CH_DRAINING: case CH_RELEASING: pr_debug("cmd with tag %lld: channel disconnecting\n", - ioctx->tag); + ioctx->cmd.tag); srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN); ret = -EINVAL; goto out; @@ -3054,27 +3047,27 @@ static void srpt_queue_response(struct se_cmd *cmd) ret = srpt_xfer_data(ch, ioctx); if (ret) { pr_err("xfer_data failed for tag %llu\n", - ioctx->tag); + ioctx->cmd.tag); return; } } if (state != SRPT_STATE_MGMT) - resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->tag, + resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->cmd.tag, cmd->scsi_status); else { srp_tm_status = tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response); resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status, - ioctx->tag); + ioctx->cmd.tag); } ret = srpt_post_send(ch, ioctx, resp_len); if (ret) { pr_err("sending cmd response failed for tag %llu\n", - ioctx->tag); + ioctx->cmd.tag); srpt_unmap_sg_to_ib_sge(ch, ioctx); srpt_set_cmd_state(ioctx, SRPT_STATE_DONE); - target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd); + target_put_sess_cmd(&ioctx->cmd); } } @@ -3217,10 +3210,6 @@ static void srpt_add_one(struct ib_device *device) if (IS_ERR(sdev->pd)) goto free_dev; - sdev->mr = ib_get_dma_mr(sdev->pd, IB_ACCESS_LOCAL_WRITE); - if (IS_ERR(sdev->mr)) - goto err_pd; - sdev->srq_size = min(srpt_srq_size, sdev->dev_attr.max_srq_wr); srq_attr.event_handler = srpt_srq_event; @@ -3232,7 +3221,7 @@ static void srpt_add_one(struct ib_device *device) sdev->srq = ib_create_srq(sdev->pd, &srq_attr); if (IS_ERR(sdev->srq)) - goto err_mr; + goto err_pd; pr_debug("%s: create SRQ #wr= %d max_allow=%d dev= %s\n", __func__, sdev->srq_size, sdev->dev_attr.max_srq_wr, @@ -3256,7 +3245,7 @@ static void srpt_add_one(struct ib_device *device) * in the system as service_id; therefore, the target_id will change * if this HCA is gone bad and replaced by different HCA */ - if (ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0, NULL)) + if (ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0)) goto err_cm; INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device, @@ -3317,8 +3306,6 @@ err_cm: ib_destroy_cm_id(sdev->cm_id); err_srq: ib_destroy_srq(sdev->srq); -err_mr: - ib_dereg_mr(sdev->mr); err_pd: ib_dealloc_pd(sdev->pd); free_dev: @@ -3332,12 +3319,11 @@ err: /** * srpt_remove_one() - InfiniBand device removal callback function. */ -static void srpt_remove_one(struct ib_device *device) +static void srpt_remove_one(struct ib_device *device, void *client_data) { - struct srpt_device *sdev; + struct srpt_device *sdev = client_data; int i; - sdev = ib_get_client_data(device, &srpt_client); if (!sdev) { pr_info("%s(%s): nothing to do.\n", __func__, device->name); return; @@ -3364,7 +3350,6 @@ static void srpt_remove_one(struct ib_device *device) srpt_release_sdev(sdev); ib_destroy_srq(sdev->srq); - ib_dereg_mr(sdev->mr); ib_dealloc_pd(sdev->pd); srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev, @@ -3394,11 +3379,6 @@ static char *srpt_get_fabric_name(void) return "srpt"; } -static u8 srpt_get_fabric_proto_ident(struct se_portal_group *se_tpg) -{ - return SCSI_TRANSPORTID_PROTOCOLID_SRP; -} - static char *srpt_get_fabric_wwn(struct se_portal_group *tpg) { struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1); @@ -3411,69 +3391,6 @@ static u16 srpt_get_tag(struct se_portal_group *tpg) return 1; } -static u32 srpt_get_default_depth(struct se_portal_group *se_tpg) -{ - return 1; -} - -static u32 srpt_get_pr_transport_id(struct se_portal_group *se_tpg, - struct se_node_acl *se_nacl, - struct t10_pr_registration *pr_reg, - int *format_code, unsigned char *buf) -{ - struct srpt_node_acl *nacl; - struct spc_rdma_transport_id *tr_id; - - nacl = container_of(se_nacl, struct srpt_node_acl, nacl); - tr_id = (void *)buf; - tr_id->protocol_identifier = SCSI_TRANSPORTID_PROTOCOLID_SRP; - memcpy(tr_id->i_port_id, nacl->i_port_id, sizeof(tr_id->i_port_id)); - return sizeof(*tr_id); -} - -static u32 srpt_get_pr_transport_id_len(struct se_portal_group *se_tpg, - struct se_node_acl *se_nacl, - struct t10_pr_registration *pr_reg, - int *format_code) -{ - *format_code = 0; - return sizeof(struct spc_rdma_transport_id); -} - -static char *srpt_parse_pr_out_transport_id(struct se_portal_group *se_tpg, - const char *buf, u32 *out_tid_len, - char **port_nexus_ptr) -{ - struct spc_rdma_transport_id *tr_id; - - *port_nexus_ptr = NULL; - *out_tid_len = sizeof(struct spc_rdma_transport_id); - tr_id = (void *)buf; - return (char *)tr_id->i_port_id; -} - -static struct se_node_acl *srpt_alloc_fabric_acl(struct se_portal_group *se_tpg) -{ - struct srpt_node_acl *nacl; - - nacl = kzalloc(sizeof(struct srpt_node_acl), GFP_KERNEL); - if (!nacl) { - pr_err("Unable to allocate struct srpt_node_acl\n"); - return NULL; - } - - return &nacl->nacl; -} - -static void srpt_release_fabric_acl(struct se_portal_group *se_tpg, - struct se_node_acl *se_nacl) -{ - struct srpt_node_acl *nacl; - - nacl = container_of(se_nacl, struct srpt_node_acl, nacl); - kfree(nacl); -} - static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg) { return 1; @@ -3547,14 +3464,6 @@ static void srpt_set_default_node_attrs(struct se_node_acl *nacl) { } -static u32 srpt_get_task_tag(struct se_cmd *se_cmd) -{ - struct srpt_send_ioctx *ioctx; - - ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd); - return ioctx->tag; -} - /* Note: only used from inside debug printk's by the TCM core. */ static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd) { @@ -3597,40 +3506,19 @@ out: * configfs callback function invoked for * mkdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id */ -static struct se_node_acl *srpt_make_nodeacl(struct se_portal_group *tpg, - struct config_group *group, - const char *name) +static int srpt_init_nodeacl(struct se_node_acl *se_nacl, const char *name) { - struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1); - struct se_node_acl *se_nacl, *se_nacl_new; - struct srpt_node_acl *nacl; - int ret = 0; - u32 nexus_depth = 1; + struct srpt_port *sport = + container_of(se_nacl->se_tpg, struct srpt_port, port_tpg_1); + struct srpt_node_acl *nacl = + container_of(se_nacl, struct srpt_node_acl, nacl); u8 i_port_id[16]; if (srpt_parse_i_port_id(i_port_id, name) < 0) { pr_err("invalid initiator port ID %s\n", name); - ret = -EINVAL; - goto err; + return -EINVAL; } - se_nacl_new = srpt_alloc_fabric_acl(tpg); - if (!se_nacl_new) { - ret = -ENOMEM; - goto err; - } - /* - * nacl_new may be released by core_tpg_add_initiator_node_acl() - * when converting a node ACL from demo mode to explict - */ - se_nacl = core_tpg_add_initiator_node_acl(tpg, se_nacl_new, name, - nexus_depth); - if (IS_ERR(se_nacl)) { - ret = PTR_ERR(se_nacl); - goto err; - } - /* Locate our struct srpt_node_acl and set sdev and i_port_id. */ - nacl = container_of(se_nacl, struct srpt_node_acl, nacl); memcpy(&nacl->i_port_id[0], &i_port_id[0], 16); nacl->sport = sport; @@ -3638,45 +3526,37 @@ static struct se_node_acl *srpt_make_nodeacl(struct se_portal_group *tpg, list_add_tail(&nacl->list, &sport->port_acl_list); spin_unlock_irq(&sport->port_acl_lock); - return se_nacl; -err: - return ERR_PTR(ret); + return 0; } /* * configfs callback function invoked for * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id */ -static void srpt_drop_nodeacl(struct se_node_acl *se_nacl) +static void srpt_cleanup_nodeacl(struct se_node_acl *se_nacl) { - struct srpt_node_acl *nacl; - struct srpt_device *sdev; - struct srpt_port *sport; + struct srpt_node_acl *nacl = + container_of(se_nacl, struct srpt_node_acl, nacl); + struct srpt_port *sport = nacl->sport; - nacl = container_of(se_nacl, struct srpt_node_acl, nacl); - sport = nacl->sport; - sdev = sport->sdev; spin_lock_irq(&sport->port_acl_lock); list_del(&nacl->list); spin_unlock_irq(&sport->port_acl_lock); - core_tpg_del_initiator_node_acl(&sport->port_tpg_1, se_nacl, 1); - srpt_release_fabric_acl(NULL, se_nacl); } -static ssize_t srpt_tpg_attrib_show_srp_max_rdma_size( - struct se_portal_group *se_tpg, - char *page) +static ssize_t srpt_tpg_attrib_srp_max_rdma_size_show(struct config_item *item, + char *page) { + struct se_portal_group *se_tpg = attrib_to_tpg(item); struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); return sprintf(page, "%u\n", sport->port_attrib.srp_max_rdma_size); } -static ssize_t srpt_tpg_attrib_store_srp_max_rdma_size( - struct se_portal_group *se_tpg, - const char *page, - size_t count) +static ssize_t srpt_tpg_attrib_srp_max_rdma_size_store(struct config_item *item, + const char *page, size_t count) { + struct se_portal_group *se_tpg = attrib_to_tpg(item); struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); unsigned long val; int ret; @@ -3701,22 +3581,19 @@ static ssize_t srpt_tpg_attrib_store_srp_max_rdma_size( return count; } -TF_TPG_ATTRIB_ATTR(srpt, srp_max_rdma_size, S_IRUGO | S_IWUSR); - -static ssize_t srpt_tpg_attrib_show_srp_max_rsp_size( - struct se_portal_group *se_tpg, - char *page) +static ssize_t srpt_tpg_attrib_srp_max_rsp_size_show(struct config_item *item, + char *page) { + struct se_portal_group *se_tpg = attrib_to_tpg(item); struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size); } -static ssize_t srpt_tpg_attrib_store_srp_max_rsp_size( - struct se_portal_group *se_tpg, - const char *page, - size_t count) +static ssize_t srpt_tpg_attrib_srp_max_rsp_size_store(struct config_item *item, + const char *page, size_t count) { + struct se_portal_group *se_tpg = attrib_to_tpg(item); struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); unsigned long val; int ret; @@ -3741,22 +3618,19 @@ static ssize_t srpt_tpg_attrib_store_srp_max_rsp_size( return count; } -TF_TPG_ATTRIB_ATTR(srpt, srp_max_rsp_size, S_IRUGO | S_IWUSR); - -static ssize_t srpt_tpg_attrib_show_srp_sq_size( - struct se_portal_group *se_tpg, - char *page) +static ssize_t srpt_tpg_attrib_srp_sq_size_show(struct config_item *item, + char *page) { + struct se_portal_group *se_tpg = attrib_to_tpg(item); struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); return sprintf(page, "%u\n", sport->port_attrib.srp_sq_size); } -static ssize_t srpt_tpg_attrib_store_srp_sq_size( - struct se_portal_group *se_tpg, - const char *page, - size_t count) +static ssize_t srpt_tpg_attrib_srp_sq_size_store(struct config_item *item, + const char *page, size_t count) { + struct se_portal_group *se_tpg = attrib_to_tpg(item); struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); unsigned long val; int ret; @@ -3781,29 +3655,29 @@ static ssize_t srpt_tpg_attrib_store_srp_sq_size( return count; } -TF_TPG_ATTRIB_ATTR(srpt, srp_sq_size, S_IRUGO | S_IWUSR); +CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rdma_size); +CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rsp_size); +CONFIGFS_ATTR(srpt_tpg_attrib_, srp_sq_size); static struct configfs_attribute *srpt_tpg_attrib_attrs[] = { - &srpt_tpg_attrib_srp_max_rdma_size.attr, - &srpt_tpg_attrib_srp_max_rsp_size.attr, - &srpt_tpg_attrib_srp_sq_size.attr, + &srpt_tpg_attrib_attr_srp_max_rdma_size, + &srpt_tpg_attrib_attr_srp_max_rsp_size, + &srpt_tpg_attrib_attr_srp_sq_size, NULL, }; -static ssize_t srpt_tpg_show_enable( - struct se_portal_group *se_tpg, - char *page) +static ssize_t srpt_tpg_enable_show(struct config_item *item, char *page) { + struct se_portal_group *se_tpg = to_tpg(item); struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); return snprintf(page, PAGE_SIZE, "%d\n", (sport->enabled) ? 1: 0); } -static ssize_t srpt_tpg_store_enable( - struct se_portal_group *se_tpg, - const char *page, - size_t count) +static ssize_t srpt_tpg_enable_store(struct config_item *item, + const char *page, size_t count) { + struct se_portal_group *se_tpg = to_tpg(item); struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); unsigned long tmp; int ret; @@ -3826,10 +3700,10 @@ static ssize_t srpt_tpg_store_enable( return count; } -TF_TPG_BASE_ATTR(srpt, enable, S_IRUGO | S_IWUSR); +CONFIGFS_ATTR(srpt_tpg_, enable); static struct configfs_attribute *srpt_tpg_attrs[] = { - &srpt_tpg_enable.attr, + &srpt_tpg_attr_enable, NULL, }; @@ -3845,8 +3719,7 @@ static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn, int res; /* Initialize sport->port_wwn and sport->port_tpg_1 */ - res = core_tpg_register(&srpt_template, &sport->port_wwn, - &sport->port_tpg_1, sport, TRANSPORT_TPG_TYPE_NORMAL); + res = core_tpg_register(&sport->port_wwn, &sport->port_tpg_1, SCSI_PROTOCOL_SRP); if (res) return ERR_PTR(res); @@ -3900,36 +3773,29 @@ static void srpt_drop_tport(struct se_wwn *wwn) pr_debug("drop_tport(%s\n", config_item_name(&sport->port_wwn.wwn_group.cg_item)); } -static ssize_t srpt_wwn_show_attr_version(struct target_fabric_configfs *tf, - char *buf) +static ssize_t srpt_wwn_version_show(struct config_item *item, char *buf) { return scnprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION); } -TF_WWN_ATTR_RO(srpt, version); +CONFIGFS_ATTR_RO(srpt_wwn_, version); static struct configfs_attribute *srpt_wwn_attrs[] = { - &srpt_wwn_version.attr, + &srpt_wwn_attr_version, NULL, }; static const struct target_core_fabric_ops srpt_template = { .module = THIS_MODULE, .name = "srpt", + .node_acl_size = sizeof(struct srpt_node_acl), .get_fabric_name = srpt_get_fabric_name, - .get_fabric_proto_ident = srpt_get_fabric_proto_ident, .tpg_get_wwn = srpt_get_fabric_wwn, .tpg_get_tag = srpt_get_tag, - .tpg_get_default_depth = srpt_get_default_depth, - .tpg_get_pr_transport_id = srpt_get_pr_transport_id, - .tpg_get_pr_transport_id_len = srpt_get_pr_transport_id_len, - .tpg_parse_pr_out_transport_id = srpt_parse_pr_out_transport_id, .tpg_check_demo_mode = srpt_check_false, .tpg_check_demo_mode_cache = srpt_check_true, .tpg_check_demo_mode_write_protect = srpt_check_true, .tpg_check_prod_mode_write_protect = srpt_check_false, - .tpg_alloc_fabric_acl = srpt_alloc_fabric_acl, - .tpg_release_fabric_acl = srpt_release_fabric_acl, .tpg_get_inst_index = srpt_tpg_get_inst_index, .release_cmd = srpt_release_cmd, .check_stop_free = srpt_check_stop_free, @@ -3940,7 +3806,6 @@ static const struct target_core_fabric_ops srpt_template = { .write_pending = srpt_write_pending, .write_pending_status = srpt_write_pending_status, .set_default_node_attributes = srpt_set_default_node_attrs, - .get_task_tag = srpt_get_task_tag, .get_cmd_state = srpt_get_tcm_cmd_state, .queue_data_in = srpt_queue_data_in, .queue_status = srpt_queue_status, @@ -3954,12 +3819,8 @@ static const struct target_core_fabric_ops srpt_template = { .fabric_drop_wwn = srpt_drop_tport, .fabric_make_tpg = srpt_make_tpg, .fabric_drop_tpg = srpt_drop_tpg, - .fabric_post_link = NULL, - .fabric_pre_unlink = NULL, - .fabric_make_np = NULL, - .fabric_drop_np = NULL, - .fabric_make_nodeacl = srpt_make_nodeacl, - .fabric_drop_nodeacl = srpt_drop_nodeacl, + .fabric_init_nodeacl = srpt_init_nodeacl, + .fabric_cleanup_nodeacl = srpt_cleanup_nodeacl, .tfc_wwn_attrs = srpt_wwn_attrs, .tfc_tpg_base_attrs = srpt_tpg_attrs, diff --git a/kernel/drivers/infiniband/ulp/srpt/ib_srpt.h b/kernel/drivers/infiniband/ulp/srpt/ib_srpt.h index 3dae15690..5faad8acd 100644 --- a/kernel/drivers/infiniband/ulp/srpt/ib_srpt.h +++ b/kernel/drivers/infiniband/ulp/srpt/ib_srpt.h @@ -238,14 +238,13 @@ struct srpt_send_ioctx { bool rdma_aborted; struct se_cmd cmd; struct completion tx_done; - u64 tag; int sg_cnt; int mapped_sg_count; u16 n_rdma_ius; u8 n_rdma; u8 n_rbuf; bool queue_status_only; - u8 sense_data[SCSI_SENSE_BUFFERSIZE]; + u8 sense_data[TRANSPORT_SENSE_BUFFER]; }; /** @@ -394,7 +393,6 @@ struct srpt_port { struct srpt_device { struct ib_device *device; struct ib_pd *pd; - struct ib_mr *mr; struct ib_srq *srq; struct ib_cm_id *cm_id; struct ib_device_attr dev_attr; @@ -410,34 +408,16 @@ struct srpt_device { /** * struct srpt_node_acl - Per-initiator ACL data (managed via configfs). + * @nacl: Target core node ACL information. * @i_port_id: 128-bit SRP initiator port ID. * @sport: port information. - * @nacl: Target core node ACL information. * @list: Element of the per-HCA ACL list. */ struct srpt_node_acl { + struct se_node_acl nacl; u8 i_port_id[16]; struct srpt_port *sport; - struct se_node_acl nacl; struct list_head list; }; -/* - * SRP-releated SCSI persistent reservation definitions. - * - * See also SPC4r28, section 7.6.1 (Protocol specific parameters introduction). - * See also SPC4r28, section 7.6.4.5 (TransportID for initiator ports using - * SCSI over an RDMA interface). - */ - -enum { - SCSI_TRANSPORTID_PROTOCOLID_SRP = 4, -}; - -struct spc_rdma_transport_id { - uint8_t protocol_identifier; - uint8_t reserved[7]; - uint8_t i_port_id[16]; -}; - #endif /* IB_SRPT_H */ |