diff options
author | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-11 10:41:07 +0300 |
---|---|---|
committer | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-13 08:17:18 +0300 |
commit | e09b41010ba33a20a87472ee821fa407a5b8da36 (patch) | |
tree | d10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/drivers/infiniband/ulp/iser | |
parent | f93b97fd65072de626c074dbe099a1fff05ce060 (diff) |
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page.
During the rebasing, the following patch collided:
Force tick interrupt and get rid of softirq magic(I70131fb85).
Collisions have been removed because its logic was found on the
source already.
Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769
Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/drivers/infiniband/ulp/iser')
-rw-r--r-- | kernel/drivers/infiniband/ulp/iser/iscsi_iser.c | 96 | ||||
-rw-r--r-- | kernel/drivers/infiniband/ulp/iser/iscsi_iser.h | 214 | ||||
-rw-r--r-- | kernel/drivers/infiniband/ulp/iser/iser_initiator.c | 81 | ||||
-rw-r--r-- | kernel/drivers/infiniband/ulp/iser/iser_memory.c | 672 | ||||
-rw-r--r-- | kernel/drivers/infiniband/ulp/iser/iser_verbs.c | 383 |
5 files changed, 650 insertions, 796 deletions
diff --git a/kernel/drivers/infiniband/ulp/iser/iscsi_iser.c b/kernel/drivers/infiniband/ulp/iser/iscsi_iser.c index c933d882c..9080161e0 100644 --- a/kernel/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/kernel/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -74,36 +74,44 @@ #include "iscsi_iser.h" +MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Alex Nezhinsky, Dan Bar Dov, Or Gerlitz"); +MODULE_VERSION(DRV_VER); + static struct scsi_host_template iscsi_iser_sht; static struct iscsi_transport iscsi_iser_transport; static struct scsi_transport_template *iscsi_iser_scsi_transport; +static struct workqueue_struct *release_wq; +struct iser_global ig; + +int iser_debug_level = 0; +module_param_named(debug_level, iser_debug_level, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:disabled)"); static unsigned int iscsi_max_lun = 512; module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); +MODULE_PARM_DESC(max_lun, "Max LUNs to allow per session (default:512"); -int iser_debug_level = 0; -bool iser_pi_enable = false; -int iser_pi_guard = 1; +unsigned int iser_max_sectors = ISER_DEF_MAX_SECTORS; +module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024"); -MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover"); -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_AUTHOR("Alex Nezhinsky, Dan Bar Dov, Or Gerlitz"); -MODULE_VERSION(DRV_VER); - -module_param_named(debug_level, iser_debug_level, int, 0644); -MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:disabled)"); +bool iser_always_reg = true; +module_param_named(always_register, iser_always_reg, bool, S_IRUGO); +MODULE_PARM_DESC(always_register, + "Always register memory, even for continuous memory regions (default:true)"); -module_param_named(pi_enable, iser_pi_enable, bool, 0644); +bool iser_pi_enable = false; +module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO); MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)"); -module_param_named(pi_guard, iser_pi_guard, int, 0644); +int iser_pi_guard; +module_param_named(pi_guard, iser_pi_guard, int, S_IRUGO); MODULE_PARM_DESC(pi_guard, "T10-PI guard_type [deprecated]"); -static struct workqueue_struct *release_wq; -struct iser_global ig; - /* - * iscsi_iser_recv() - Process a successfull recv completion + * iscsi_iser_recv() - Process a successful recv completion * @conn: iscsi connection * @hdr: iscsi header * @rx_data: buffer containing receive data payload @@ -118,7 +126,6 @@ iscsi_iser_recv(struct iscsi_conn *conn, struct iscsi_hdr *hdr, { int rc = 0; int datalen; - int ahslen; /* verify PDU length */ datalen = ntoh24(hdr->dlength); @@ -133,9 +140,6 @@ iscsi_iser_recv(struct iscsi_conn *conn, struct iscsi_hdr *hdr, iser_dbg("aligned datalen (%d) hdr, %d (IB)\n", datalen, rx_data_len); - /* read AHS */ - ahslen = hdr->hlength * 4; - rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len); if (rc && rc != ISCSI_ERR_NO_SCSI_CMD) goto error; @@ -201,11 +205,12 @@ iser_initialize_task_headers(struct iscsi_task *task, goto out; } + tx_desc->wr_idx = 0; tx_desc->mapped = true; tx_desc->dma_addr = dma_addr; tx_desc->tx_sg[0].addr = tx_desc->dma_addr; tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; - tx_desc->tx_sg[0].lkey = device->mr->lkey; + tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; iser_task->iser_conn = iser_conn; out: @@ -626,6 +631,8 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, if (ep) { iser_conn = ep->dd_data; max_cmds = iser_conn->max_cmds; + shost->sg_tablesize = iser_conn->scsi_sg_tablesize; + shost->max_sectors = iser_conn->scsi_max_sectors; mutex_lock(&iser_conn->state_mutex); if (iser_conn->state != ISER_CONN_UP) { @@ -644,6 +651,15 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, SHOST_DIX_GUARD_CRC); } + /* + * Limit the sg_tablesize and max_sectors based on the device + * max fastreg page list length. + */ + shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize, + ib_conn->device->dev_attr.max_fast_reg_page_list_len); + shost->max_sectors = min_t(unsigned int, + 1024, (shost->sg_tablesize * PAGE_SIZE) >> 9); + if (iscsi_host_add(shost, ib_conn->device->ib_device->dma_device)) { mutex_unlock(&iser_conn->state_mutex); @@ -746,15 +762,7 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s stats->r2t_pdus = conn->r2t_pdus_cnt; /* always 0 */ stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; - stats->custom_length = 4; - strcpy(stats->custom[0].desc, "qp_tx_queue_full"); - stats->custom[0].value = 0; /* TB iser_conn->qp_tx_queue_full; */ - strcpy(stats->custom[1].desc, "fmr_map_not_avail"); - stats->custom[1].value = 0; /* TB iser_conn->fmr_map_not_avail */; - strcpy(stats->custom[2].desc, "eh_abort_cnt"); - stats->custom[2].value = conn->eh_abort_cnt; - strcpy(stats->custom[3].desc, "fmr_unalign_cnt"); - stats->custom[3].value = conn->fmr_unalign_cnt; + stats->custom_length = 0; } static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep, @@ -843,10 +851,9 @@ failure: static int iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) { - struct iser_conn *iser_conn; + struct iser_conn *iser_conn = ep->dd_data; int rc; - iser_conn = ep->dd_data; rc = wait_for_completion_interruptible_timeout(&iser_conn->up_completion, msecs_to_jiffies(timeout_ms)); /* if conn establishment failed, return error code to iscsi */ @@ -858,7 +865,7 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) mutex_unlock(&iser_conn->state_mutex); } - iser_info("ib conn %p rc = %d\n", iser_conn, rc); + iser_info("iser conn %p rc = %d\n", iser_conn, rc); if (rc > 0) return 1; /* success, this is the equivalent of POLLOUT */ @@ -880,11 +887,9 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) static void iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep) { - struct iser_conn *iser_conn; + struct iser_conn *iser_conn = ep->dd_data; - iser_conn = ep->dd_data; - iser_info("ep %p iser conn %p state %d\n", - ep, iser_conn, iser_conn->state); + iser_info("ep %p iser conn %p\n", ep, iser_conn); mutex_lock(&iser_conn->state_mutex); iser_conn_terminate(iser_conn); @@ -904,6 +909,7 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep) mutex_unlock(&iser_conn->state_mutex); iser_conn_release(iser_conn); } + iscsi_destroy_endpoint(ep); } @@ -961,19 +967,27 @@ static umode_t iser_attr_is_visible(int param_type, int param) return 0; } +static int iscsi_iser_slave_alloc(struct scsi_device *sdev) +{ + blk_queue_virt_boundary(sdev->request_queue, ~MASK_4K); + + return 0; +} + static struct scsi_host_template iscsi_iser_sht = { .module = THIS_MODULE, .name = "iSCSI Initiator over iSER", .queuecommand = iscsi_queuecommand, .change_queue_depth = scsi_change_queue_depth, - .sg_tablesize = ISCSI_ISER_SG_TABLESIZE, - .max_sectors = 1024, + .sg_tablesize = ISCSI_ISER_DEF_SG_TABLESIZE, + .max_sectors = ISER_DEF_MAX_SECTORS, .cmd_per_lun = ISER_DEF_CMD_PER_LUN, .eh_abort_handler = iscsi_eh_abort, .eh_device_reset_handler= iscsi_eh_device_reset, .eh_target_reset_handler = iscsi_eh_recover_target, .target_alloc = iscsi_target_alloc, - .use_clustering = DISABLE_CLUSTERING, + .use_clustering = ENABLE_CLUSTERING, + .slave_alloc = iscsi_iser_slave_alloc, .proc_name = "iscsi_iser", .this_id = -1, .track_queue_depth = 1, @@ -1078,7 +1092,7 @@ static void __exit iser_exit(void) if (!connlist_empty) { iser_err("Error cleanup stage completed but we still have iser " - "connections, destroying them anyway.\n"); + "connections, destroying them anyway\n"); list_for_each_entry_safe(iser_conn, n, &ig.connlist, conn_list) { iser_conn_release(iser_conn); diff --git a/kernel/drivers/infiniband/ulp/iser/iscsi_iser.h b/kernel/drivers/infiniband/ulp/iser/iscsi_iser.h index d2b6caf76..8a5998e6a 100644 --- a/kernel/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/kernel/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -98,8 +98,13 @@ #define SHIFT_4K 12 #define SIZE_4K (1ULL << SHIFT_4K) #define MASK_4K (~(SIZE_4K-1)) - /* support up to 512KB in one RDMA */ -#define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K) + +/* Default support is 512KB I/O size */ +#define ISER_DEF_MAX_SECTORS 1024 +#define ISCSI_ISER_DEF_SG_TABLESIZE ((ISER_DEF_MAX_SECTORS * 512) >> SHIFT_4K) +/* Maximum support is 8MB I/O size */ +#define ISCSI_ISER_MAX_SG_TABLESIZE ((16384 * 512) >> SHIFT_4K) + #define ISER_DEF_XMIT_CMDS_DEFAULT 512 #if ISCSI_DEF_XMIT_CMDS_MAX > ISER_DEF_XMIT_CMDS_DEFAULT #define ISER_DEF_XMIT_CMDS_MAX ISCSI_DEF_XMIT_CMDS_MAX @@ -222,23 +227,19 @@ enum iser_data_dir { * @size: num entries of this sg * @data_len: total beffer byte len * @dma_nents: returned by dma_map_sg - * @orig_sg: pointer to the original sg list (in case - * we used a copy) - * @orig_size: num entris of orig sg list */ struct iser_data_buf { struct scatterlist *sg; - unsigned int size; + int size; unsigned long data_len; unsigned int dma_nents; - struct scatterlist *orig_sg; - unsigned int orig_size; - }; +}; /* fwd declarations */ struct iser_device; struct iscsi_iser_task; struct iscsi_endpoint; +struct iser_reg_resources; /** * struct iser_mem_reg - iSER memory registration info @@ -259,6 +260,14 @@ enum iser_desc_type { ISCSI_TX_DATAOUT }; +/* Maximum number of work requests per task: + * Data memory region local invalidate + fast registration + * Protection memory region local invalidate + fast registration + * Signature memory region local invalidate + fast registration + * PDU send + */ +#define ISER_MAX_WRS 7 + /** * struct iser_tx_desc - iSER TX descriptor (for send wr_id) * @@ -271,6 +280,11 @@ enum iser_desc_type { * unsolicited data-out or control * @num_sge: number sges used on this TX task * @mapped: Is the task header mapped + * @wr_idx: Current WR index + * @wrs: Array of WRs per task + * @data_reg: Data buffer registration details + * @prot_reg: Protection buffer registration details + * @sig_attrs: Signature attributes */ struct iser_tx_desc { struct iser_hdr iser_header; @@ -280,6 +294,15 @@ struct iser_tx_desc { struct ib_sge tx_sg[2]; int num_sge; bool mapped; + u8 wr_idx; + union iser_wr { + struct ib_send_wr send; + struct ib_reg_wr fast_reg; + struct ib_sig_handover_wr sig; + } wrs[ISER_MAX_WRS]; + struct iser_mem_reg data_reg; + struct iser_mem_reg prot_reg; + struct ib_sig_attrs sig_attrs; }; #define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \ @@ -326,6 +349,33 @@ struct iser_comp { }; /** + * struct iser_device - Memory registration operations + * per-device registration schemes + * + * @alloc_reg_res: Allocate registration resources + * @free_reg_res: Free registration resources + * @fast_reg_mem: Register memory buffers + * @unreg_mem: Un-register memory buffers + * @reg_desc_get: Get a registration descriptor for pool + * @reg_desc_put: Get a registration descriptor to pool + */ +struct iser_reg_ops { + int (*alloc_reg_res)(struct ib_conn *ib_conn, + unsigned cmds_max, + unsigned int size); + void (*free_reg_res)(struct ib_conn *ib_conn); + int (*reg_mem)(struct iscsi_iser_task *iser_task, + struct iser_data_buf *mem, + struct iser_reg_resources *rsc, + struct iser_mem_reg *reg); + void (*unreg_mem)(struct iscsi_iser_task *iser_task, + enum iser_data_dir cmd_dir); + struct iser_fr_desc * (*reg_desc_get)(struct ib_conn *ib_conn); + void (*reg_desc_put)(struct ib_conn *ib_conn, + struct iser_fr_desc *desc); +}; + +/** * struct iser_device - iSER device handle * * @ib_device: RDMA device @@ -338,11 +388,7 @@ struct iser_comp { * @comps_used: Number of completion contexts used, Min between online * cpus and device max completion vectors * @comps: Dinamically allocated array of completion handlers - * Memory registration pool Function pointers (FMR or Fastreg): - * @iser_alloc_rdma_reg_res: Allocation of memory regions pool - * @iser_free_rdma_reg_res: Free of memory regions pool - * @iser_reg_rdma_mem: Memory registration routine - * @iser_unreg_rdma_mem: Memory deregistration routine + * @reg_ops: Registration ops */ struct iser_device { struct ib_device *ib_device; @@ -354,54 +400,69 @@ struct iser_device { int refcount; int comps_used; struct iser_comp *comps; - int (*iser_alloc_rdma_reg_res)(struct ib_conn *ib_conn, - unsigned cmds_max); - void (*iser_free_rdma_reg_res)(struct ib_conn *ib_conn); - int (*iser_reg_rdma_mem)(struct iscsi_iser_task *iser_task, - enum iser_data_dir cmd_dir); - void (*iser_unreg_rdma_mem)(struct iscsi_iser_task *iser_task, - enum iser_data_dir cmd_dir); + struct iser_reg_ops *reg_ops; }; #define ISER_CHECK_GUARD 0xc0 #define ISER_CHECK_REFTAG 0x0f #define ISER_CHECK_APPTAG 0x30 -enum iser_reg_indicator { - ISER_DATA_KEY_VALID = 1 << 0, - ISER_PROT_KEY_VALID = 1 << 1, - ISER_SIG_KEY_VALID = 1 << 2, - ISER_FASTREG_PROTECTED = 1 << 3, +/** + * struct iser_reg_resources - Fast registration recources + * + * @mr: memory region + * @fmr_pool: pool of fmrs + * @page_vec: fast reg page list used by fmr pool + * @mr_valid: is mr valid indicator + */ +struct iser_reg_resources { + union { + struct ib_mr *mr; + struct ib_fmr_pool *fmr_pool; + }; + struct iser_page_vec *page_vec; + u8 mr_valid:1; }; /** * struct iser_pi_context - Protection information context * - * @prot_mr: protection memory region - * @prot_frpl: protection fastreg page list - * @sig_mr: signature feature enabled memory region + * @rsc: protection buffer registration resources + * @sig_mr: signature enable memory region + * @sig_mr_valid: is sig_mr valid indicator + * @sig_protected: is region protected indicator */ struct iser_pi_context { - struct ib_mr *prot_mr; - struct ib_fast_reg_page_list *prot_frpl; + struct iser_reg_resources rsc; struct ib_mr *sig_mr; + u8 sig_mr_valid:1; + u8 sig_protected:1; }; /** - * struct fast_reg_descriptor - Fast registration descriptor + * struct iser_fr_desc - Fast registration descriptor * * @list: entry in connection fastreg pool - * @data_mr: data memory region - * @data_frpl: data fastreg page list + * @rsc: data buffer registration resources * @pi_ctx: protection information context - * @reg_indicators: fast registration indicators */ -struct fast_reg_descriptor { +struct iser_fr_desc { struct list_head list; - struct ib_mr *data_mr; - struct ib_fast_reg_page_list *data_frpl; + struct iser_reg_resources rsc; struct iser_pi_context *pi_ctx; - u8 reg_indicators; +}; + +/** + * struct iser_fr_pool: connection fast registration pool + * + * @list: list of fastreg descriptors + * @lock: protects fmr/fastreg pool + * @size: size of the pool + */ +struct iser_fr_pool { + struct list_head list; + spinlock_t lock; + int size; }; /** @@ -417,15 +478,7 @@ struct fast_reg_descriptor { * @pi_support: Indicate device T10-PI support * @beacon: beacon send wr to signal all flush errors were drained * @flush_comp: completes when all connection completions consumed - * @lock: protects fmr/fastreg pool - * @union.fmr: - * @pool: FMR pool for fast registrations - * @page_vec: page vector to hold mapped commands pages - * used for registration - * @union.fastreg: - * @pool: Fast registration descriptors pool for fast - * registrations - * @pool_size: Size of pool + * @fr_pool: connection fast registration poool */ struct ib_conn { struct rdma_cm_id *cma_id; @@ -438,17 +491,7 @@ struct ib_conn { bool pi_support; struct ib_send_wr beacon; struct completion flush_comp; - spinlock_t lock; - union { - struct { - struct ib_fmr_pool *pool; - struct iser_page_vec *page_vec; - } fmr; - struct { - struct list_head pool; - int pool_size; - } fastreg; - }; + struct iser_fr_pool fr_pool; }; /** @@ -479,6 +522,8 @@ struct ib_conn { * @rx_desc_head: head of rx_descs cyclic buffer * @rx_descs: rx buffers array (cyclic buffer) * @num_rx_descs: number of rx descriptors + * @scsi_sg_tablesize: scsi host sg_tablesize + * @scsi_max_sectors: scsi host max sectors */ struct iser_conn { struct ib_conn ib_conn; @@ -503,6 +548,8 @@ struct iser_conn { unsigned int rx_desc_head; struct iser_rx_desc *rx_descs; u32 num_rx_descs; + unsigned short scsi_sg_tablesize; + unsigned int scsi_max_sectors; }; /** @@ -558,6 +605,10 @@ extern struct iser_global ig; extern int iser_debug_level; extern bool iser_pi_enable; extern int iser_pi_guard; +extern unsigned int iser_max_sectors; +extern bool iser_always_reg; + +int iser_assign_reg_ops(struct iser_device *device); int iser_send_control(struct iscsi_conn *conn, struct iscsi_task *task); @@ -599,10 +650,10 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, struct iser_data_buf *mem, enum iser_data_dir cmd_dir); -int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *task, - enum iser_data_dir cmd_dir); -int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *task, - enum iser_data_dir cmd_dir); +int iser_reg_rdma_mem(struct iscsi_iser_task *task, + enum iser_data_dir dir); +void iser_unreg_rdma_mem(struct iscsi_iser_task *task, + enum iser_data_dir dir); int iser_connect(struct iser_conn *iser_conn, struct sockaddr *src_addr, @@ -632,15 +683,40 @@ int iser_initialize_task_headers(struct iscsi_task *task, struct iser_tx_desc *tx_desc); int iser_alloc_rx_descriptors(struct iser_conn *iser_conn, struct iscsi_session *session); -int iser_create_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max); +int iser_alloc_fmr_pool(struct ib_conn *ib_conn, + unsigned cmds_max, + unsigned int size); void iser_free_fmr_pool(struct ib_conn *ib_conn); -int iser_create_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max); +int iser_alloc_fastreg_pool(struct ib_conn *ib_conn, + unsigned cmds_max, + unsigned int size); void iser_free_fastreg_pool(struct ib_conn *ib_conn); u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, enum iser_data_dir cmd_dir, sector_t *sector); -struct fast_reg_descriptor * -iser_reg_desc_get(struct ib_conn *ib_conn); +struct iser_fr_desc * +iser_reg_desc_get_fr(struct ib_conn *ib_conn); void -iser_reg_desc_put(struct ib_conn *ib_conn, - struct fast_reg_descriptor *desc); +iser_reg_desc_put_fr(struct ib_conn *ib_conn, + struct iser_fr_desc *desc); +struct iser_fr_desc * +iser_reg_desc_get_fmr(struct ib_conn *ib_conn); +void +iser_reg_desc_put_fmr(struct ib_conn *ib_conn, + struct iser_fr_desc *desc); + +static inline struct ib_send_wr * +iser_tx_next_wr(struct iser_tx_desc *tx_desc) +{ + struct ib_send_wr *cur_wr = &tx_desc->wrs[tx_desc->wr_idx].send; + struct ib_send_wr *last_wr; + + if (tx_desc->wr_idx) { + last_wr = &tx_desc->wrs[tx_desc->wr_idx - 1].send; + last_wr->next = cur_wr; + } + tx_desc->wr_idx++; + + return cur_wr; +} + #endif diff --git a/kernel/drivers/infiniband/ulp/iser/iser_initiator.c b/kernel/drivers/infiniband/ulp/iser/iser_initiator.c index 0a47f42fe..ffd00c420 100644 --- a/kernel/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/kernel/drivers/infiniband/ulp/iser/iser_initiator.c @@ -49,7 +49,6 @@ static int iser_prepare_read_cmd(struct iscsi_task *task) { struct iscsi_iser_task *iser_task = task->dd_data; - struct iser_device *device = iser_task->iser_conn->ib_conn.device; struct iser_mem_reg *mem_reg; int err; struct iser_hdr *hdr = &iser_task->desc.iser_header; @@ -73,7 +72,7 @@ static int iser_prepare_read_cmd(struct iscsi_task *task) return err; } - err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_IN); + err = iser_reg_rdma_mem(iser_task, ISER_DIR_IN); if (err) { iser_err("Failed to set up Data-IN RDMA\n"); return err; @@ -103,7 +102,6 @@ iser_prepare_write_cmd(struct iscsi_task *task, unsigned int edtl) { struct iscsi_iser_task *iser_task = task->dd_data; - struct iser_device *device = iser_task->iser_conn->ib_conn.device; struct iser_mem_reg *mem_reg; int err; struct iser_hdr *hdr = &iser_task->desc.iser_header; @@ -128,7 +126,7 @@ iser_prepare_write_cmd(struct iscsi_task *task, return err; } - err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_OUT); + err = iser_reg_rdma_mem(iser_task, ISER_DIR_OUT); if (err != 0) { iser_err("Failed to register write cmd RDMA mem\n"); return err; @@ -170,13 +168,7 @@ static void iser_create_send_desc(struct iser_conn *iser_conn, memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr)); tx_desc->iser_header.flags = ISER_VER; - tx_desc->num_sge = 1; - - if (tx_desc->tx_sg[0].lkey != device->mr->lkey) { - tx_desc->tx_sg[0].lkey = device->mr->lkey; - iser_dbg("sdesc %p lkey mismatch, fixing\n", tx_desc); - } } static void iser_free_login_buf(struct iser_conn *iser_conn) @@ -266,7 +258,8 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn, iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */ iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2; - if (device->iser_alloc_rdma_reg_res(ib_conn, session->scsi_cmds_max)) + if (device->reg_ops->alloc_reg_res(ib_conn, session->scsi_cmds_max, + iser_conn->scsi_sg_tablesize)) goto create_rdma_reg_res_failed; if (iser_alloc_login_buf(iser_conn)) @@ -291,7 +284,7 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn, rx_sg = &rx_desc->rx_sg; rx_sg->addr = rx_desc->dma_addr; rx_sg->length = ISER_RX_PAYLOAD_SIZE; - rx_sg->lkey = device->mr->lkey; + rx_sg->lkey = device->pd->local_dma_lkey; } iser_conn->rx_desc_head = 0; @@ -307,7 +300,7 @@ rx_desc_dma_map_failed: rx_desc_alloc_fail: iser_free_login_buf(iser_conn); alloc_login_buf_fail: - device->iser_free_rdma_reg_res(ib_conn); + device->reg_ops->free_reg_res(ib_conn); create_rdma_reg_res_failed: iser_err("failed allocating rx descriptors / data buffers\n"); return -ENOMEM; @@ -320,8 +313,8 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn) struct ib_conn *ib_conn = &iser_conn->ib_conn; struct iser_device *device = ib_conn->device; - if (device->iser_free_rdma_reg_res) - device->iser_free_rdma_reg_res(ib_conn); + if (device->reg_ops->free_reg_res) + device->reg_ops->free_reg_res(ib_conn); rx_desc = iser_conn->rx_descs; for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++) @@ -545,7 +538,7 @@ int iser_send_control(struct iscsi_conn *conn, tx_dsg->addr = iser_conn->login_req_dma; tx_dsg->length = task->data_count; - tx_dsg->lkey = device->mr->lkey; + tx_dsg->lkey = device->pd->local_dma_lkey; mdesc->num_sge = 2; } @@ -668,61 +661,25 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task) void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) { - struct iser_device *device = iser_task->iser_conn->ib_conn.device; - int is_rdma_data_aligned = 1; - int is_rdma_prot_aligned = 1; int prot_count = scsi_prot_sg_count(iser_task->sc); - /* if we were reading, copy back to unaligned sglist, - * anyway dma_unmap and free the copy - */ - if (iser_task->data[ISER_DIR_IN].orig_sg) { - is_rdma_data_aligned = 0; - iser_finalize_rdma_unaligned_sg(iser_task, - &iser_task->data[ISER_DIR_IN], - ISER_DIR_IN); - } - - if (iser_task->data[ISER_DIR_OUT].orig_sg) { - is_rdma_data_aligned = 0; - iser_finalize_rdma_unaligned_sg(iser_task, - &iser_task->data[ISER_DIR_OUT], - ISER_DIR_OUT); - } - - if (iser_task->prot[ISER_DIR_IN].orig_sg) { - is_rdma_prot_aligned = 0; - iser_finalize_rdma_unaligned_sg(iser_task, - &iser_task->prot[ISER_DIR_IN], - ISER_DIR_IN); - } - - if (iser_task->prot[ISER_DIR_OUT].orig_sg) { - is_rdma_prot_aligned = 0; - iser_finalize_rdma_unaligned_sg(iser_task, - &iser_task->prot[ISER_DIR_OUT], - ISER_DIR_OUT); - } - if (iser_task->dir[ISER_DIR_IN]) { - device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN); - if (is_rdma_data_aligned) - iser_dma_unmap_task_data(iser_task, - &iser_task->data[ISER_DIR_IN], - DMA_FROM_DEVICE); - if (prot_count && is_rdma_prot_aligned) + iser_unreg_rdma_mem(iser_task, ISER_DIR_IN); + iser_dma_unmap_task_data(iser_task, + &iser_task->data[ISER_DIR_IN], + DMA_FROM_DEVICE); + if (prot_count) iser_dma_unmap_task_data(iser_task, &iser_task->prot[ISER_DIR_IN], DMA_FROM_DEVICE); } if (iser_task->dir[ISER_DIR_OUT]) { - device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT); - if (is_rdma_data_aligned) - iser_dma_unmap_task_data(iser_task, - &iser_task->data[ISER_DIR_OUT], - DMA_TO_DEVICE); - if (prot_count && is_rdma_prot_aligned) + iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT); + iser_dma_unmap_task_data(iser_task, + &iser_task->data[ISER_DIR_OUT], + DMA_TO_DEVICE); + if (prot_count) iser_dma_unmap_task_data(iser_task, &iser_task->prot[ISER_DIR_OUT], DMA_TO_DEVICE); diff --git a/kernel/drivers/infiniband/ulp/iser/iser_memory.c b/kernel/drivers/infiniband/ulp/iser/iser_memory.c index f0cdc961e..ea765fb96 100644 --- a/kernel/drivers/infiniband/ulp/iser/iser_memory.c +++ b/kernel/drivers/infiniband/ulp/iser/iser_memory.c @@ -38,194 +38,97 @@ #include <linux/scatterlist.h> #include "iscsi_iser.h" +static +int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task, + struct iser_data_buf *mem, + struct iser_reg_resources *rsc, + struct iser_mem_reg *mem_reg); +static +int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, + struct iser_data_buf *mem, + struct iser_reg_resources *rsc, + struct iser_mem_reg *mem_reg); + +static struct iser_reg_ops fastreg_ops = { + .alloc_reg_res = iser_alloc_fastreg_pool, + .free_reg_res = iser_free_fastreg_pool, + .reg_mem = iser_fast_reg_mr, + .unreg_mem = iser_unreg_mem_fastreg, + .reg_desc_get = iser_reg_desc_get_fr, + .reg_desc_put = iser_reg_desc_put_fr, +}; -static void -iser_free_bounce_sg(struct iser_data_buf *data) -{ - struct scatterlist *sg; - int count; - - for_each_sg(data->sg, sg, data->size, count) - __free_page(sg_page(sg)); - - kfree(data->sg); - - data->sg = data->orig_sg; - data->size = data->orig_size; - data->orig_sg = NULL; - data->orig_size = 0; -} +static struct iser_reg_ops fmr_ops = { + .alloc_reg_res = iser_alloc_fmr_pool, + .free_reg_res = iser_free_fmr_pool, + .reg_mem = iser_fast_reg_fmr, + .unreg_mem = iser_unreg_mem_fmr, + .reg_desc_get = iser_reg_desc_get_fmr, + .reg_desc_put = iser_reg_desc_put_fmr, +}; -static int -iser_alloc_bounce_sg(struct iser_data_buf *data) +int iser_assign_reg_ops(struct iser_device *device) { - struct scatterlist *sg; - struct page *page; - unsigned long length = data->data_len; - int i = 0, nents = DIV_ROUND_UP(length, PAGE_SIZE); - - sg = kcalloc(nents, sizeof(*sg), GFP_ATOMIC); - if (!sg) - goto err; - - sg_init_table(sg, nents); - while (length) { - u32 page_len = min_t(u32, length, PAGE_SIZE); - - page = alloc_page(GFP_ATOMIC); - if (!page) - goto err; - - sg_set_page(&sg[i], page, page_len, 0); - length -= page_len; - i++; + struct ib_device_attr *dev_attr = &device->dev_attr; + + /* Assign function handles - based on FMR support */ + if (device->ib_device->alloc_fmr && device->ib_device->dealloc_fmr && + device->ib_device->map_phys_fmr && device->ib_device->unmap_fmr) { + iser_info("FMR supported, using FMR for registration\n"); + device->reg_ops = &fmr_ops; + } else + if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { + iser_info("FastReg supported, using FastReg for registration\n"); + device->reg_ops = &fastreg_ops; + } else { + iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n"); + return -1; } - data->orig_sg = data->sg; - data->orig_size = data->size; - data->sg = sg; - data->size = nents; - return 0; - -err: - for (; i > 0; i--) - __free_page(sg_page(&sg[i - 1])); - kfree(sg); - - return -ENOMEM; -} - -static void -iser_copy_bounce(struct iser_data_buf *data, bool to_buffer) -{ - struct scatterlist *osg, *bsg = data->sg; - void *oaddr, *baddr; - unsigned int left = data->data_len; - unsigned int bsg_off = 0; - int i; - - for_each_sg(data->orig_sg, osg, data->orig_size, i) { - unsigned int copy_len, osg_off = 0; - - oaddr = kmap_atomic(sg_page(osg)) + osg->offset; - copy_len = min(left, osg->length); - while (copy_len) { - unsigned int len = min(copy_len, bsg->length - bsg_off); - - baddr = kmap_atomic(sg_page(bsg)) + bsg->offset; - if (to_buffer) - memcpy(baddr + bsg_off, oaddr + osg_off, len); - else - memcpy(oaddr + osg_off, baddr + bsg_off, len); - - kunmap_atomic(baddr - bsg->offset); - osg_off += len; - bsg_off += len; - copy_len -= len; - - if (bsg_off >= bsg->length) { - bsg = sg_next(bsg); - bsg_off = 0; - } - } - kunmap_atomic(oaddr - osg->offset); - left -= osg_off; - } -} - -static inline void -iser_copy_from_bounce(struct iser_data_buf *data) -{ - iser_copy_bounce(data, false); -} - -static inline void -iser_copy_to_bounce(struct iser_data_buf *data) -{ - iser_copy_bounce(data, true); } -struct fast_reg_descriptor * -iser_reg_desc_get(struct ib_conn *ib_conn) +struct iser_fr_desc * +iser_reg_desc_get_fr(struct ib_conn *ib_conn) { - struct fast_reg_descriptor *desc; + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; + struct iser_fr_desc *desc; unsigned long flags; - spin_lock_irqsave(&ib_conn->lock, flags); - desc = list_first_entry(&ib_conn->fastreg.pool, - struct fast_reg_descriptor, list); + spin_lock_irqsave(&fr_pool->lock, flags); + desc = list_first_entry(&fr_pool->list, + struct iser_fr_desc, list); list_del(&desc->list); - spin_unlock_irqrestore(&ib_conn->lock, flags); + spin_unlock_irqrestore(&fr_pool->lock, flags); return desc; } void -iser_reg_desc_put(struct ib_conn *ib_conn, - struct fast_reg_descriptor *desc) +iser_reg_desc_put_fr(struct ib_conn *ib_conn, + struct iser_fr_desc *desc) { + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; unsigned long flags; - spin_lock_irqsave(&ib_conn->lock, flags); - list_add(&desc->list, &ib_conn->fastreg.pool); - spin_unlock_irqrestore(&ib_conn->lock, flags); + spin_lock_irqsave(&fr_pool->lock, flags); + list_add(&desc->list, &fr_pool->list); + spin_unlock_irqrestore(&fr_pool->lock, flags); } -/** - * iser_start_rdma_unaligned_sg - */ -static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, - struct iser_data_buf *data, - enum iser_data_dir cmd_dir) +struct iser_fr_desc * +iser_reg_desc_get_fmr(struct ib_conn *ib_conn) { - struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device; - int rc; - - rc = iser_alloc_bounce_sg(data); - if (rc) { - iser_err("Failed to allocate bounce for data len %lu\n", - data->data_len); - return rc; - } - - if (cmd_dir == ISER_DIR_OUT) - iser_copy_to_bounce(data); - - data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, - (cmd_dir == ISER_DIR_OUT) ? - DMA_TO_DEVICE : DMA_FROM_DEVICE); - if (!data->dma_nents) { - iser_err("Got dma_nents %d, something went wrong...\n", - data->dma_nents); - rc = -ENOMEM; - goto err; - } + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; - return 0; -err: - iser_free_bounce_sg(data); - return rc; + return list_first_entry(&fr_pool->list, + struct iser_fr_desc, list); } -/** - * iser_finalize_rdma_unaligned_sg - */ - -void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, - struct iser_data_buf *data, - enum iser_data_dir cmd_dir) +void +iser_reg_desc_put_fmr(struct ib_conn *ib_conn, + struct iser_fr_desc *desc) { - struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device; - - ib_dma_unmap_sg(dev, data->sg, data->size, - (cmd_dir == ISER_DIR_OUT) ? - DMA_TO_DEVICE : DMA_FROM_DEVICE); - - if (cmd_dir == ISER_DIR_IN) - iser_copy_from_bounce(data); - - iser_free_bounce_sg(data); } #define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0) @@ -289,52 +192,6 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data, return cur_page; } - -/** - * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned - * for RDMA sub-list of a scatter-gather list of memory buffers, and returns - * the number of entries which are aligned correctly. Supports the case where - * consecutive SG elements are actually fragments of the same physcial page. - */ -static int iser_data_buf_aligned_len(struct iser_data_buf *data, - struct ib_device *ibdev) -{ - struct scatterlist *sg, *sgl, *next_sg = NULL; - u64 start_addr, end_addr; - int i, ret_len, start_check = 0; - - if (data->dma_nents == 1) - return 1; - - sgl = data->sg; - start_addr = ib_sg_dma_address(ibdev, sgl); - - for_each_sg(sgl, sg, data->dma_nents, i) { - if (start_check && !IS_4K_ALIGNED(start_addr)) - break; - - next_sg = sg_next(sg); - if (!next_sg) - break; - - end_addr = start_addr + ib_sg_dma_len(ibdev, sg); - start_addr = ib_sg_dma_address(ibdev, next_sg); - - if (end_addr == start_addr) { - start_check = 0; - continue; - } else - start_check = 1; - - if (!IS_4K_ALIGNED(end_addr)) - break; - } - ret_len = (next_sg) ? i : i+1; - iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n", - ret_len, data->dma_nents, data); - return ret_len; -} - static void iser_data_buf_dump(struct iser_data_buf *data, struct ib_device *ibdev) { @@ -393,7 +250,7 @@ iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem, { struct scatterlist *sg = mem->sg; - reg->sge.lkey = device->mr->lkey; + reg->sge.lkey = device->pd->local_dma_lkey; reg->rkey = device->mr->rkey; reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]); reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]); @@ -405,47 +262,21 @@ iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem, return 0; } -static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, - struct iser_data_buf *mem, - enum iser_data_dir cmd_dir, - int aligned_len) -{ - struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn; - struct iser_device *device = iser_task->iser_conn->ib_conn.device; - - iscsi_conn->fmr_unalign_cnt++; - iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n", - aligned_len, mem->size); - - if (iser_debug_level > 0) - iser_data_buf_dump(mem, device->ib_device); - - /* unmap the command data before accessing it */ - iser_dma_unmap_task_data(iser_task, mem, - (cmd_dir == ISER_DIR_OUT) ? - DMA_TO_DEVICE : DMA_FROM_DEVICE); - - /* allocate copy buf, if we are writing, copy the */ - /* unaligned scatterlist, dma map the copy */ - if (iser_start_rdma_unaligned_sg(iser_task, mem, cmd_dir) != 0) - return -ENOMEM; - - return 0; -} - /** * iser_reg_page_vec - Register physical memory * * returns: 0 on success, errno code on failure */ static -int iser_reg_page_vec(struct iscsi_iser_task *iser_task, +int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task, struct iser_data_buf *mem, - struct iser_page_vec *page_vec, - struct iser_mem_reg *mem_reg) + struct iser_reg_resources *rsc, + struct iser_mem_reg *reg) { struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn; struct iser_device *device = ib_conn->device; + struct iser_page_vec *page_vec = rsc->page_vec; + struct ib_fmr_pool *fmr_pool = rsc->fmr_pool; struct ib_pool_fmr *fmr; int ret, plen; @@ -461,7 +292,7 @@ int iser_reg_page_vec(struct iscsi_iser_task *iser_task, return -EINVAL; } - fmr = ib_fmr_pool_map_phys(ib_conn->fmr.pool, + fmr = ib_fmr_pool_map_phys(fmr_pool, page_vec->pages, page_vec->length, page_vec->pages[0]); @@ -471,11 +302,15 @@ int iser_reg_page_vec(struct iscsi_iser_task *iser_task, return ret; } - mem_reg->sge.lkey = fmr->fmr->lkey; - mem_reg->rkey = fmr->fmr->rkey; - mem_reg->sge.addr = page_vec->pages[0] + page_vec->offset; - mem_reg->sge.length = page_vec->data_size; - mem_reg->mem_h = fmr; + reg->sge.lkey = fmr->fmr->lkey; + reg->rkey = fmr->fmr->rkey; + reg->sge.addr = page_vec->pages[0] + page_vec->offset; + reg->sge.length = page_vec->data_size; + reg->mem_h = fmr; + + iser_dbg("fmr reg: lkey=0x%x, rkey=0x%x, addr=0x%llx," + " length=0x%x\n", reg->sge.lkey, reg->rkey, + reg->sge.addr, reg->sge.length); return 0; } @@ -505,71 +340,17 @@ void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task, void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task, enum iser_data_dir cmd_dir) { + struct iser_device *device = iser_task->iser_conn->ib_conn.device; struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir]; if (!reg->mem_h) return; - iser_reg_desc_put(&iser_task->iser_conn->ib_conn, - reg->mem_h); + device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn, + reg->mem_h); reg->mem_h = NULL; } -/** - * iser_reg_rdma_mem_fmr - Registers memory intended for RDMA, - * using FMR (if possible) obtaining rkey and va - * - * returns 0 on success, errno code on failure - */ -int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, - enum iser_data_dir cmd_dir) -{ - struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn; - struct iser_device *device = ib_conn->device; - struct ib_device *ibdev = device->ib_device; - struct iser_data_buf *mem = &iser_task->data[cmd_dir]; - struct iser_mem_reg *mem_reg; - int aligned_len; - int err; - int i; - - mem_reg = &iser_task->rdma_reg[cmd_dir]; - - aligned_len = iser_data_buf_aligned_len(mem, ibdev); - if (aligned_len != mem->dma_nents) { - err = fall_to_bounce_buf(iser_task, mem, - cmd_dir, aligned_len); - if (err) { - iser_err("failed to allocate bounce buffer\n"); - return err; - } - } - - /* if there a single dma entry, FMR is not needed */ - if (mem->dma_nents == 1) { - return iser_reg_dma(device, mem, mem_reg); - } else { /* use FMR for multiple dma entries */ - err = iser_reg_page_vec(iser_task, mem, ib_conn->fmr.page_vec, - mem_reg); - if (err && err != -EAGAIN) { - iser_data_buf_dump(mem, ibdev); - iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", - mem->dma_nents, - ntoh24(iser_task->desc.iscsi_header.dlength)); - iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", - ib_conn->fmr.page_vec->data_size, - ib_conn->fmr.page_vec->length, - ib_conn->fmr.page_vec->offset); - for (i = 0; i < ib_conn->fmr.page_vec->length; i++) - iser_err("page_vec[%d] = 0x%llx\n", i, - (unsigned long long)ib_conn->fmr.page_vec->pages[i]); - } - if (err) - return err; - } - return 0; -} - static void iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs, struct ib_sig_domain *domain) @@ -637,10 +418,11 @@ iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr) { u32 rkey; - memset(inv_wr, 0, sizeof(*inv_wr)); inv_wr->opcode = IB_WR_LOCAL_INV; inv_wr->wr_id = ISER_FASTREG_LI_WRID; inv_wr->ex.invalidate_rkey = mr->rkey; + inv_wr->send_flags = 0; + inv_wr->num_sge = 0; rkey = ib_inc_rkey(mr->rkey); ib_update_fast_reg_key(mr, rkey); @@ -648,61 +430,49 @@ iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr) static int iser_reg_sig_mr(struct iscsi_iser_task *iser_task, - struct fast_reg_descriptor *desc, + struct iser_pi_context *pi_ctx, struct iser_mem_reg *data_reg, struct iser_mem_reg *prot_reg, struct iser_mem_reg *sig_reg) { - struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn; - struct iser_pi_context *pi_ctx = desc->pi_ctx; - struct ib_send_wr sig_wr, inv_wr; - struct ib_send_wr *bad_wr, *wr = NULL; - struct ib_sig_attrs sig_attrs; + struct iser_tx_desc *tx_desc = &iser_task->desc; + struct ib_sig_attrs *sig_attrs = &tx_desc->sig_attrs; + struct ib_sig_handover_wr *wr; int ret; - memset(&sig_attrs, 0, sizeof(sig_attrs)); - ret = iser_set_sig_attrs(iser_task->sc, &sig_attrs); + memset(sig_attrs, 0, sizeof(*sig_attrs)); + ret = iser_set_sig_attrs(iser_task->sc, sig_attrs); if (ret) goto err; - iser_set_prot_checks(iser_task->sc, &sig_attrs.check_mask); + iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask); - if (!(desc->reg_indicators & ISER_SIG_KEY_VALID)) { - iser_inv_rkey(&inv_wr, pi_ctx->sig_mr); - wr = &inv_wr; - } + if (!pi_ctx->sig_mr_valid) + iser_inv_rkey(iser_tx_next_wr(tx_desc), pi_ctx->sig_mr); - memset(&sig_wr, 0, sizeof(sig_wr)); - sig_wr.opcode = IB_WR_REG_SIG_MR; - sig_wr.wr_id = ISER_FASTREG_LI_WRID; - sig_wr.sg_list = &data_reg->sge; - sig_wr.num_sge = 1; - sig_wr.wr.sig_handover.sig_attrs = &sig_attrs; - sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr; + wr = sig_handover_wr(iser_tx_next_wr(tx_desc)); + wr->wr.opcode = IB_WR_REG_SIG_MR; + wr->wr.wr_id = ISER_FASTREG_LI_WRID; + wr->wr.sg_list = &data_reg->sge; + wr->wr.num_sge = 1; + wr->wr.send_flags = 0; + wr->sig_attrs = sig_attrs; + wr->sig_mr = pi_ctx->sig_mr; if (scsi_prot_sg_count(iser_task->sc)) - sig_wr.wr.sig_handover.prot = &prot_reg->sge; - sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE | - IB_ACCESS_REMOTE_READ | - IB_ACCESS_REMOTE_WRITE; - - if (!wr) - wr = &sig_wr; + wr->prot = &prot_reg->sge; else - wr->next = &sig_wr; - - ret = ib_post_send(ib_conn->qp, wr, &bad_wr); - if (ret) { - iser_err("reg_sig_mr failed, ret:%d\n", ret); - goto err; - } - desc->reg_indicators &= ~ISER_SIG_KEY_VALID; + wr->prot = NULL; + wr->access_flags = IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_READ | + IB_ACCESS_REMOTE_WRITE; + pi_ctx->sig_mr_valid = 0; sig_reg->sge.lkey = pi_ctx->sig_mr->lkey; sig_reg->rkey = pi_ctx->sig_mr->rkey; sig_reg->sge.addr = 0; sig_reg->sge.length = scsi_transfer_length(iser_task->sc); - iser_dbg("sig_sge: lkey: 0x%x, rkey: 0x%x, addr: 0x%llx, length: %u\n", + iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=%u\n", sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr, sig_reg->sge.length); err: @@ -711,149 +481,139 @@ err: static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, struct iser_data_buf *mem, - struct fast_reg_descriptor *desc, - enum iser_reg_indicator ind, + struct iser_reg_resources *rsc, struct iser_mem_reg *reg) { - struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn; - struct iser_device *device = ib_conn->device; - struct ib_mr *mr; - struct ib_fast_reg_page_list *frpl; - struct ib_send_wr fastreg_wr, inv_wr; - struct ib_send_wr *bad_wr, *wr = NULL; - int ret, offset, size, plen; - - /* if there a single dma entry, dma mr suffices */ - if (mem->dma_nents == 1) - return iser_reg_dma(device, mem, reg); - - if (ind == ISER_DATA_KEY_VALID) { - mr = desc->data_mr; - frpl = desc->data_frpl; - } else { - mr = desc->pi_ctx->prot_mr; - frpl = desc->pi_ctx->prot_frpl; + struct iser_tx_desc *tx_desc = &iser_task->desc; + struct ib_mr *mr = rsc->mr; + struct ib_reg_wr *wr; + int n; + + if (!rsc->mr_valid) + iser_inv_rkey(iser_tx_next_wr(tx_desc), mr); + + n = ib_map_mr_sg(mr, mem->sg, mem->size, SIZE_4K); + if (unlikely(n != mem->size)) { + iser_err("failed to map sg (%d/%d)\n", + n, mem->size); + return n < 0 ? n : -EINVAL; } - plen = iser_sg_to_page_vec(mem, device->ib_device, frpl->page_list, - &offset, &size); - if (plen * SIZE_4K < size) { - iser_err("fast reg page_list too short to hold this SG\n"); - return -EINVAL; - } + wr = reg_wr(iser_tx_next_wr(tx_desc)); + wr->wr.opcode = IB_WR_REG_MR; + wr->wr.wr_id = ISER_FASTREG_LI_WRID; + wr->wr.send_flags = 0; + wr->wr.num_sge = 0; + wr->mr = mr; + wr->key = mr->rkey; + wr->access = IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_WRITE | + IB_ACCESS_REMOTE_READ; - if (!(desc->reg_indicators & ind)) { - iser_inv_rkey(&inv_wr, mr); - wr = &inv_wr; - } - - /* Prepare FASTREG WR */ - memset(&fastreg_wr, 0, sizeof(fastreg_wr)); - fastreg_wr.wr_id = ISER_FASTREG_LI_WRID; - fastreg_wr.opcode = IB_WR_FAST_REG_MR; - fastreg_wr.wr.fast_reg.iova_start = frpl->page_list[0] + offset; - fastreg_wr.wr.fast_reg.page_list = frpl; - fastreg_wr.wr.fast_reg.page_list_len = plen; - fastreg_wr.wr.fast_reg.page_shift = SHIFT_4K; - fastreg_wr.wr.fast_reg.length = size; - fastreg_wr.wr.fast_reg.rkey = mr->rkey; - fastreg_wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE | - IB_ACCESS_REMOTE_WRITE | - IB_ACCESS_REMOTE_READ); - - if (!wr) - wr = &fastreg_wr; - else - wr->next = &fastreg_wr; - - ret = ib_post_send(ib_conn->qp, wr, &bad_wr); - if (ret) { - iser_err("fast registration failed, ret:%d\n", ret); - return ret; - } - desc->reg_indicators &= ~ind; + rsc->mr_valid = 0; reg->sge.lkey = mr->lkey; reg->rkey = mr->rkey; - reg->sge.addr = frpl->page_list[0] + offset; - reg->sge.length = size; + reg->sge.addr = mr->iova; + reg->sge.length = mr->length; - return ret; + iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=0x%x\n", + reg->sge.lkey, reg->rkey, reg->sge.addr, reg->sge.length); + + return 0; } -/** - * iser_reg_rdma_mem_fastreg - Registers memory intended for RDMA, - * using Fast Registration WR (if possible) obtaining rkey and va - * - * returns 0 on success, errno code on failure - */ -int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, - enum iser_data_dir cmd_dir) +static int +iser_reg_prot_sg(struct iscsi_iser_task *task, + struct iser_data_buf *mem, + struct iser_fr_desc *desc, + bool use_dma_key, + struct iser_mem_reg *reg) { - struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn; + struct iser_device *device = task->iser_conn->ib_conn.device; + + if (use_dma_key) + return iser_reg_dma(device, mem, reg); + + return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg); +} + +static int +iser_reg_data_sg(struct iscsi_iser_task *task, + struct iser_data_buf *mem, + struct iser_fr_desc *desc, + bool use_dma_key, + struct iser_mem_reg *reg) +{ + struct iser_device *device = task->iser_conn->ib_conn.device; + + if (use_dma_key) + return iser_reg_dma(device, mem, reg); + + return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg); +} + +int iser_reg_rdma_mem(struct iscsi_iser_task *task, + enum iser_data_dir dir) +{ + struct ib_conn *ib_conn = &task->iser_conn->ib_conn; struct iser_device *device = ib_conn->device; - struct ib_device *ibdev = device->ib_device; - struct iser_data_buf *mem = &iser_task->data[cmd_dir]; - struct iser_mem_reg *mem_reg = &iser_task->rdma_reg[cmd_dir]; - struct fast_reg_descriptor *desc = NULL; - int err, aligned_len; - - aligned_len = iser_data_buf_aligned_len(mem, ibdev); - if (aligned_len != mem->dma_nents) { - err = fall_to_bounce_buf(iser_task, mem, - cmd_dir, aligned_len); - if (err) { - iser_err("failed to allocate bounce buffer\n"); - return err; - } - } + struct iser_data_buf *mem = &task->data[dir]; + struct iser_mem_reg *reg = &task->rdma_reg[dir]; + struct iser_mem_reg *data_reg; + struct iser_fr_desc *desc = NULL; + bool use_dma_key; + int err; - if (mem->dma_nents != 1 || - scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) { - desc = iser_reg_desc_get(ib_conn); - mem_reg->mem_h = desc; + use_dma_key = (mem->dma_nents == 1 && !iser_always_reg && + scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL); + + if (!use_dma_key) { + desc = device->reg_ops->reg_desc_get(ib_conn); + reg->mem_h = desc; } - err = iser_fast_reg_mr(iser_task, mem, desc, - ISER_DATA_KEY_VALID, mem_reg); - if (err) + if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL) + data_reg = reg; + else + data_reg = &task->desc.data_reg; + + err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg); + if (unlikely(err)) goto err_reg; - if (scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) { - struct iser_mem_reg prot_reg; - - memset(&prot_reg, 0, sizeof(prot_reg)); - if (scsi_prot_sg_count(iser_task->sc)) { - mem = &iser_task->prot[cmd_dir]; - aligned_len = iser_data_buf_aligned_len(mem, ibdev); - if (aligned_len != mem->dma_nents) { - err = fall_to_bounce_buf(iser_task, mem, - cmd_dir, aligned_len); - if (err) { - iser_err("failed to allocate bounce buffer\n"); - return err; - } - } - - err = iser_fast_reg_mr(iser_task, mem, desc, - ISER_PROT_KEY_VALID, &prot_reg); - if (err) + if (scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) { + struct iser_mem_reg *prot_reg = &task->desc.prot_reg; + + if (scsi_prot_sg_count(task->sc)) { + mem = &task->prot[dir]; + err = iser_reg_prot_sg(task, mem, desc, + use_dma_key, prot_reg); + if (unlikely(err)) goto err_reg; } - err = iser_reg_sig_mr(iser_task, desc, mem_reg, - &prot_reg, mem_reg); - if (err) { - iser_err("Failed to register signature mr\n"); - return err; - } - desc->reg_indicators |= ISER_FASTREG_PROTECTED; + err = iser_reg_sig_mr(task, desc->pi_ctx, data_reg, + prot_reg, reg); + if (unlikely(err)) + goto err_reg; + + desc->pi_ctx->sig_protected = 1; } return 0; + err_reg: if (desc) - iser_reg_desc_put(ib_conn, desc); + device->reg_ops->reg_desc_put(ib_conn, desc); return err; } + +void iser_unreg_rdma_mem(struct iscsi_iser_task *task, + enum iser_data_dir dir) +{ + struct iser_device *device = task->iser_conn->ib_conn.device; + + device->reg_ops->unreg_mem(task, dir); +} diff --git a/kernel/drivers/infiniband/ulp/iser/iser_verbs.c b/kernel/drivers/infiniband/ulp/iser/iser_verbs.c index cc2dd35ff..42f4da620 100644 --- a/kernel/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/kernel/drivers/infiniband/ulp/iser/iser_verbs.c @@ -51,19 +51,22 @@ static void iser_cq_callback(struct ib_cq *cq, void *cq_context); static void iser_cq_event_callback(struct ib_event *cause, void *context) { - iser_err("got cq event %d \n", cause->event); + iser_err("cq event %s (%d)\n", + ib_event_msg(cause->event), cause->event); } static void iser_qp_event_callback(struct ib_event *cause, void *context) { - iser_err("got qp event %d\n",cause->event); + iser_err("qp event %s (%d)\n", + ib_event_msg(cause->event), cause->event); } static void iser_event_handler(struct ib_event_handler *handler, struct ib_event *event) { - iser_err("async event %d on device %s port %d\n", event->event, - event->device->name, event->element.port_num); + iser_err("async event %s (%d) on device %s port %d\n", + ib_event_msg(event->event), event->event, + event->device->name, event->element.port_num); } /** @@ -84,25 +87,9 @@ static int iser_create_device_ib_res(struct iser_device *device) return ret; } - /* Assign function handles - based on FMR support */ - if (device->ib_device->alloc_fmr && device->ib_device->dealloc_fmr && - device->ib_device->map_phys_fmr && device->ib_device->unmap_fmr) { - iser_info("FMR supported, using FMR for registration\n"); - device->iser_alloc_rdma_reg_res = iser_create_fmr_pool; - device->iser_free_rdma_reg_res = iser_free_fmr_pool; - device->iser_reg_rdma_mem = iser_reg_rdma_mem_fmr; - device->iser_unreg_rdma_mem = iser_unreg_mem_fmr; - } else - if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { - iser_info("FastReg supported, using FastReg for registration\n"); - device->iser_alloc_rdma_reg_res = iser_create_fastreg_pool; - device->iser_free_rdma_reg_res = iser_free_fastreg_pool; - device->iser_reg_rdma_mem = iser_reg_rdma_mem_fastreg; - device->iser_unreg_rdma_mem = iser_unreg_mem_fastreg; - } else { - iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n"); - return -1; - } + ret = iser_assign_reg_ops(device); + if (ret) + return ret; device->comps_used = min_t(int, num_online_cpus(), device->ib_device->num_comp_vectors); @@ -123,14 +110,17 @@ static int iser_create_device_ib_res(struct iser_device *device) goto pd_err; for (i = 0; i < device->comps_used; i++) { + struct ib_cq_init_attr cq_attr = {}; struct iser_comp *comp = &device->comps[i]; comp->device = device; + cq_attr.cqe = max_cqe; + cq_attr.comp_vector = i; comp->cq = ib_create_cq(device->ib_device, iser_cq_callback, iser_cq_event_callback, (void *)comp, - max_cqe, i); + &cq_attr); if (IS_ERR(comp->cq)) { comp->cq = NULL; goto cq_err; @@ -143,11 +133,15 @@ static int iser_create_device_ib_res(struct iser_device *device) (unsigned long)comp); } - device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE | - IB_ACCESS_REMOTE_WRITE | - IB_ACCESS_REMOTE_READ); - if (IS_ERR(device->mr)) - goto dma_mr_err; + if (!iser_always_reg) { + int access = IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_WRITE | + IB_ACCESS_REMOTE_READ; + + device->mr = ib_get_dma_mr(device->pd, access); + if (IS_ERR(device->mr)) + goto dma_mr_err; + } INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device, iser_event_handler); @@ -157,7 +151,8 @@ static int iser_create_device_ib_res(struct iser_device *device) return 0; handler_err: - ib_dereg_mr(device->mr); + if (device->mr) + ib_dereg_mr(device->mr); dma_mr_err: for (i = 0; i < device->comps_used; i++) tasklet_kill(&device->comps[i].tasklet); @@ -183,7 +178,6 @@ comps_err: static void iser_free_device_ib_res(struct iser_device *device) { int i; - BUG_ON(device->mr == NULL); for (i = 0; i < device->comps_used; i++) { struct iser_comp *comp = &device->comps[i]; @@ -194,8 +188,9 @@ static void iser_free_device_ib_res(struct iser_device *device) } (void)ib_unregister_event_handler(&device->event_handler); - (void)ib_dereg_mr(device->mr); - (void)ib_dealloc_pd(device->pd); + if (device->mr) + (void)ib_dereg_mr(device->mr); + ib_dealloc_pd(device->pd); kfree(device->comps); device->comps = NULL; @@ -205,28 +200,40 @@ static void iser_free_device_ib_res(struct iser_device *device) } /** - * iser_create_fmr_pool - Creates FMR pool and page_vector + * iser_alloc_fmr_pool - Creates FMR pool and page_vector * * returns 0 on success, or errno code on failure */ -int iser_create_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max) +int iser_alloc_fmr_pool(struct ib_conn *ib_conn, + unsigned cmds_max, + unsigned int size) { struct iser_device *device = ib_conn->device; + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; + struct iser_page_vec *page_vec; + struct iser_fr_desc *desc; + struct ib_fmr_pool *fmr_pool; struct ib_fmr_pool_param params; - int ret = -ENOMEM; + int ret; - ib_conn->fmr.page_vec = kmalloc(sizeof(*ib_conn->fmr.page_vec) + - (sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE + 1)), - GFP_KERNEL); - if (!ib_conn->fmr.page_vec) - return ret; + INIT_LIST_HEAD(&fr_pool->list); + spin_lock_init(&fr_pool->lock); + + desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; + + page_vec = kmalloc(sizeof(*page_vec) + (sizeof(u64) * size), + GFP_KERNEL); + if (!page_vec) { + ret = -ENOMEM; + goto err_frpl; + } - ib_conn->fmr.page_vec->pages = (u64 *)(ib_conn->fmr.page_vec + 1); + page_vec->pages = (u64 *)(page_vec + 1); params.page_shift = SHIFT_4K; - /* when the first/last SG element are not start/end * - * page aligned, the map whould be of N+1 pages */ - params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1; + params.max_pages_per_fmr = size; /* make the pool size twice the max number of SCSI commands * * the ML is expected to queue, watermark for unmap at 50% */ params.pool_size = cmds_max * 2; @@ -237,23 +244,25 @@ int iser_create_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max) IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ); - ib_conn->fmr.pool = ib_create_fmr_pool(device->pd, ¶ms); - if (!IS_ERR(ib_conn->fmr.pool)) - return 0; - - /* no FMR => no need for page_vec */ - kfree(ib_conn->fmr.page_vec); - ib_conn->fmr.page_vec = NULL; - - ret = PTR_ERR(ib_conn->fmr.pool); - ib_conn->fmr.pool = NULL; - if (ret != -ENOSYS) { + fmr_pool = ib_create_fmr_pool(device->pd, ¶ms); + if (IS_ERR(fmr_pool)) { + ret = PTR_ERR(fmr_pool); iser_err("FMR allocation failed, err %d\n", ret); - return ret; - } else { - iser_warn("FMRs are not supported, using unaligned mode\n"); - return 0; + goto err_fmr; } + + desc->rsc.page_vec = page_vec; + desc->rsc.fmr_pool = fmr_pool; + list_add(&desc->list, &fr_pool->list); + + return 0; + +err_fmr: + kfree(page_vec); +err_frpl: + kfree(desc); + + return ret; } /** @@ -261,26 +270,54 @@ int iser_create_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max) */ void iser_free_fmr_pool(struct ib_conn *ib_conn) { + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; + struct iser_fr_desc *desc; + + desc = list_first_entry(&fr_pool->list, + struct iser_fr_desc, list); + list_del(&desc->list); + iser_info("freeing conn %p fmr pool %p\n", - ib_conn, ib_conn->fmr.pool); + ib_conn, desc->rsc.fmr_pool); + + ib_destroy_fmr_pool(desc->rsc.fmr_pool); + kfree(desc->rsc.page_vec); + kfree(desc); +} + +static int +iser_alloc_reg_res(struct ib_device *ib_device, + struct ib_pd *pd, + struct iser_reg_resources *res, + unsigned int size) +{ + int ret; - if (ib_conn->fmr.pool != NULL) - ib_destroy_fmr_pool(ib_conn->fmr.pool); + res->mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, size); + if (IS_ERR(res->mr)) { + ret = PTR_ERR(res->mr); + iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret); + return ret; + } + res->mr_valid = 1; - ib_conn->fmr.pool = NULL; + return 0; +} - kfree(ib_conn->fmr.page_vec); - ib_conn->fmr.page_vec = NULL; +static void +iser_free_reg_res(struct iser_reg_resources *rsc) +{ + ib_dereg_mr(rsc->mr); } static int -iser_alloc_pi_ctx(struct ib_device *ib_device, struct ib_pd *pd, - struct fast_reg_descriptor *desc) +iser_alloc_pi_ctx(struct ib_device *ib_device, + struct ib_pd *pd, + struct iser_fr_desc *desc, + unsigned int size) { struct iser_pi_context *pi_ctx = NULL; - struct ib_mr_init_attr mr_init_attr = {.max_reg_descriptors = 2, - .flags = IB_MR_SIGNATURE_EN}; - int ret = 0; + int ret; desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL); if (!desc->pi_ctx) @@ -288,36 +325,25 @@ iser_alloc_pi_ctx(struct ib_device *ib_device, struct ib_pd *pd, pi_ctx = desc->pi_ctx; - pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device, - ISCSI_ISER_SG_TABLESIZE); - if (IS_ERR(pi_ctx->prot_frpl)) { - ret = PTR_ERR(pi_ctx->prot_frpl); - goto prot_frpl_failure; - } - - pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, - ISCSI_ISER_SG_TABLESIZE + 1); - if (IS_ERR(pi_ctx->prot_mr)) { - ret = PTR_ERR(pi_ctx->prot_mr); - goto prot_mr_failure; + ret = iser_alloc_reg_res(ib_device, pd, &pi_ctx->rsc, size); + if (ret) { + iser_err("failed to allocate reg_resources\n"); + goto alloc_reg_res_err; } - desc->reg_indicators |= ISER_PROT_KEY_VALID; - pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr); + pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2); if (IS_ERR(pi_ctx->sig_mr)) { ret = PTR_ERR(pi_ctx->sig_mr); goto sig_mr_failure; } - desc->reg_indicators |= ISER_SIG_KEY_VALID; - desc->reg_indicators &= ~ISER_FASTREG_PROTECTED; + pi_ctx->sig_mr_valid = 1; + desc->pi_ctx->sig_protected = 0; return 0; sig_mr_failure: - ib_dereg_mr(desc->pi_ctx->prot_mr); -prot_mr_failure: - ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl); -prot_frpl_failure: + iser_free_reg_res(&pi_ctx->rsc); +alloc_reg_res_err: kfree(desc->pi_ctx); return ret; @@ -326,82 +352,71 @@ prot_frpl_failure: static void iser_free_pi_ctx(struct iser_pi_context *pi_ctx) { - ib_free_fast_reg_page_list(pi_ctx->prot_frpl); - ib_dereg_mr(pi_ctx->prot_mr); - ib_destroy_mr(pi_ctx->sig_mr); + iser_free_reg_res(&pi_ctx->rsc); + ib_dereg_mr(pi_ctx->sig_mr); kfree(pi_ctx); } -static int -iser_create_fastreg_desc(struct ib_device *ib_device, struct ib_pd *pd, - bool pi_enable, struct fast_reg_descriptor *desc) +static struct iser_fr_desc * +iser_create_fastreg_desc(struct ib_device *ib_device, + struct ib_pd *pd, + bool pi_enable, + unsigned int size) { + struct iser_fr_desc *desc; int ret; - desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device, - ISCSI_ISER_SG_TABLESIZE + 1); - if (IS_ERR(desc->data_frpl)) { - ret = PTR_ERR(desc->data_frpl); - iser_err("Failed to allocate ib_fast_reg_page_list err=%d\n", - ret); - return PTR_ERR(desc->data_frpl); - } + desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) + return ERR_PTR(-ENOMEM); - desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE + 1); - if (IS_ERR(desc->data_mr)) { - ret = PTR_ERR(desc->data_mr); - iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret); - goto fast_reg_mr_failure; - } - desc->reg_indicators |= ISER_DATA_KEY_VALID; + ret = iser_alloc_reg_res(ib_device, pd, &desc->rsc, size); + if (ret) + goto reg_res_alloc_failure; if (pi_enable) { - ret = iser_alloc_pi_ctx(ib_device, pd, desc); + ret = iser_alloc_pi_ctx(ib_device, pd, desc, size); if (ret) goto pi_ctx_alloc_failure; } - return 0; + return desc; + pi_ctx_alloc_failure: - ib_dereg_mr(desc->data_mr); -fast_reg_mr_failure: - ib_free_fast_reg_page_list(desc->data_frpl); + iser_free_reg_res(&desc->rsc); +reg_res_alloc_failure: + kfree(desc); - return ret; + return ERR_PTR(ret); } /** - * iser_create_fastreg_pool - Creates pool of fast_reg descriptors + * iser_alloc_fastreg_pool - Creates pool of fast_reg descriptors * for fast registration work requests. * returns 0 on success, or errno code on failure */ -int iser_create_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max) +int iser_alloc_fastreg_pool(struct ib_conn *ib_conn, + unsigned cmds_max, + unsigned int size) { struct iser_device *device = ib_conn->device; - struct fast_reg_descriptor *desc; + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; + struct iser_fr_desc *desc; int i, ret; - INIT_LIST_HEAD(&ib_conn->fastreg.pool); - ib_conn->fastreg.pool_size = 0; + INIT_LIST_HEAD(&fr_pool->list); + spin_lock_init(&fr_pool->lock); + fr_pool->size = 0; for (i = 0; i < cmds_max; i++) { - desc = kzalloc(sizeof(*desc), GFP_KERNEL); - if (!desc) { - iser_err("Failed to allocate a new fast_reg descriptor\n"); - ret = -ENOMEM; - goto err; - } - - ret = iser_create_fastreg_desc(device->ib_device, device->pd, - ib_conn->pi_support, desc); - if (ret) { - iser_err("Failed to create fastreg descriptor err=%d\n", - ret); - kfree(desc); + desc = iser_create_fastreg_desc(device->ib_device, device->pd, + ib_conn->pi_support, size); + if (IS_ERR(desc)) { + ret = PTR_ERR(desc); goto err; } - list_add_tail(&desc->list, &ib_conn->fastreg.pool); - ib_conn->fastreg.pool_size++; + list_add_tail(&desc->list, &fr_pool->list); + fr_pool->size++; } return 0; @@ -416,27 +431,27 @@ err: */ void iser_free_fastreg_pool(struct ib_conn *ib_conn) { - struct fast_reg_descriptor *desc, *tmp; + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; + struct iser_fr_desc *desc, *tmp; int i = 0; - if (list_empty(&ib_conn->fastreg.pool)) + if (list_empty(&fr_pool->list)) return; iser_info("freeing conn %p fr pool\n", ib_conn); - list_for_each_entry_safe(desc, tmp, &ib_conn->fastreg.pool, list) { + list_for_each_entry_safe(desc, tmp, &fr_pool->list, list) { list_del(&desc->list); - ib_free_fast_reg_page_list(desc->data_frpl); - ib_dereg_mr(desc->data_mr); + iser_free_reg_res(&desc->rsc); if (desc->pi_ctx) iser_free_pi_ctx(desc->pi_ctx); kfree(desc); ++i; } - if (i < ib_conn->fastreg.pool_size) + if (i < fr_pool->size) iser_warn("pool still has %d regions registered\n", - ib_conn->fastreg.pool_size - i); + fr_pool->size - i); } /** @@ -732,6 +747,31 @@ static void iser_connect_error(struct rdma_cm_id *cma_id) iser_conn->state = ISER_CONN_TERMINATING; } +static void +iser_calc_scsi_params(struct iser_conn *iser_conn, + unsigned int max_sectors) +{ + struct iser_device *device = iser_conn->ib_conn.device; + unsigned short sg_tablesize, sup_sg_tablesize; + + sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K); + sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE, + device->dev_attr.max_fast_reg_page_list_len); + + if (sg_tablesize > sup_sg_tablesize) { + sg_tablesize = sup_sg_tablesize; + iser_conn->scsi_max_sectors = sg_tablesize * SIZE_4K / 512; + } else { + iser_conn->scsi_max_sectors = max_sectors; + } + + iser_conn->scsi_sg_tablesize = sg_tablesize; + + iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n", + iser_conn, iser_conn->scsi_sg_tablesize, + iser_conn->scsi_max_sectors); +} + /** * Called with state mutex held **/ @@ -770,6 +810,8 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id) } } + iser_calc_scsi_params(iser_conn, iser_max_sectors); + ret = rdma_resolve_route(cma_id, 1000); if (ret) { iser_err("resolve route failed: %d\n", ret); @@ -873,8 +915,9 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve int ret = 0; iser_conn = (struct iser_conn *)cma_id->context; - iser_info("event %d status %d conn %p id %p\n", - event->event, event->status, cma_id->context, cma_id); + iser_info("%s (%d): status %d conn %p id %p\n", + rdma_event_msg(event->event), event->event, + event->status, cma_id->context, cma_id); mutex_lock(&iser_conn->state_mutex); switch (event->event) { @@ -913,7 +956,8 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve } break; default: - iser_err("Unexpected RDMA CM event (%d)\n", event->event); + iser_err("Unexpected RDMA CM event: %s (%d)\n", + rdma_event_msg(event->event), event->event); break; } mutex_unlock(&iser_conn->state_mutex); @@ -930,7 +974,6 @@ void iser_conn_init(struct iser_conn *iser_conn) init_completion(&iser_conn->ib_completion); init_completion(&iser_conn->up_completion); INIT_LIST_HEAD(&iser_conn->conn_list); - spin_lock_init(&iser_conn->ib_conn.lock); mutex_init(&iser_conn->state_mutex); } @@ -960,7 +1003,7 @@ int iser_connect(struct iser_conn *iser_conn, ib_conn->beacon.wr_id = ISER_BEACON_WRID; ib_conn->beacon.opcode = IB_WR_SEND; - ib_conn->cma_id = rdma_create_id(iser_cma_handler, + ib_conn->cma_id = rdma_create_id(&init_net, iser_cma_handler, (void *)iser_conn, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(ib_conn->cma_id)) { @@ -1009,7 +1052,7 @@ int iser_post_recvl(struct iser_conn *iser_conn) sge.addr = iser_conn->login_resp_dma; sge.length = ISER_RX_LOGIN_SIZE; - sge.lkey = ib_conn->device->mr->lkey; + sge.lkey = ib_conn->device->pd->local_dma_lkey; rx_wr.wr_id = (uintptr_t)iser_conn->login_resp_buf; rx_wr.sg_list = &sge; @@ -1064,23 +1107,24 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count) int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc, bool signal) { - int ib_ret; - struct ib_send_wr send_wr, *send_wr_failed; + struct ib_send_wr *bad_wr, *wr = iser_tx_next_wr(tx_desc); + int ib_ret; ib_dma_sync_single_for_device(ib_conn->device->ib_device, tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); - send_wr.next = NULL; - send_wr.wr_id = (uintptr_t)tx_desc; - send_wr.sg_list = tx_desc->tx_sg; - send_wr.num_sge = tx_desc->num_sge; - send_wr.opcode = IB_WR_SEND; - send_wr.send_flags = signal ? IB_SEND_SIGNALED : 0; + wr->next = NULL; + wr->wr_id = (uintptr_t)tx_desc; + wr->sg_list = tx_desc->tx_sg; + wr->num_sge = tx_desc->num_sge; + wr->opcode = IB_WR_SEND; + wr->send_flags = signal ? IB_SEND_SIGNALED : 0; - ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed); + ib_ret = ib_post_send(ib_conn->qp, &tx_desc->wrs[0].send, &bad_wr); if (ib_ret) - iser_err("ib_post_send failed, ret:%d\n", ib_ret); + iser_err("ib_post_send failed, ret:%d opcode:%d\n", + ib_ret, bad_wr->opcode); return ib_ret; } @@ -1173,10 +1217,13 @@ static void iser_handle_wc(struct ib_wc *wc) } } else { if (wc->status != IB_WC_WR_FLUSH_ERR) - iser_err("wr id %llx status %d vend_err %x\n", - wc->wr_id, wc->status, wc->vendor_err); + iser_err("%s (%d): wr id %llx vend_err %x\n", + ib_wc_status_msg(wc->status), wc->status, + wc->wr_id, wc->vendor_err); else - iser_dbg("flush error: wr id %llx\n", wc->wr_id); + iser_dbg("%s (%d): wr id %llx\n", + ib_wc_status_msg(wc->status), wc->status, + wc->wr_id); if (wc->wr_id == ISER_BEACON_WRID) /* all flush errors were consumed */ @@ -1229,13 +1276,13 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, enum iser_data_dir cmd_dir, sector_t *sector) { struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir]; - struct fast_reg_descriptor *desc = reg->mem_h; + struct iser_fr_desc *desc = reg->mem_h; unsigned long sector_size = iser_task->sc->device->sector_size; struct ib_mr_status mr_status; int ret; - if (desc && desc->reg_indicators & ISER_FASTREG_PROTECTED) { - desc->reg_indicators &= ~ISER_FASTREG_PROTECTED; + if (desc && desc->pi_ctx->sig_protected) { + desc->pi_ctx->sig_protected = 0; ret = ib_check_mr_status(desc->pi_ctx->sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status); if (ret) { @@ -1246,7 +1293,7 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { sector_t sector_off = mr_status.sig_err.sig_err_offset; - do_div(sector_off, sector_size + 8); + sector_div(sector_off, sector_size + 8); *sector = scsi_get_lba(iser_task->sc) + sector_off; pr_err("PI error found type %d at sector %llx " |