From e09b41010ba33a20a87472ee821fa407a5b8da36 Mon Sep 17 00:00:00 2001 From: José Pekkarinen Date: Mon, 11 Apr 2016 10:41:07 +0300 Subject: These changes are the raw update to linux-4.4.6-rt14. Kernel sources are taken from kernel.org, and rt patch from the rt wiki download page. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen --- kernel/include/rdma/ib_addr.h | 22 +- kernel/include/rdma/ib_cache.h | 36 +- kernel/include/rdma/ib_cm.h | 25 +- kernel/include/rdma/ib_mad.h | 125 +++++- kernel/include/rdma/ib_pack.h | 2 + kernel/include/rdma/ib_sa.h | 12 +- kernel/include/rdma/ib_smi.h | 47 +++ kernel/include/rdma/ib_verbs.h | 820 ++++++++++++++++++++++++++---------- kernel/include/rdma/iw_cm.h | 1 + kernel/include/rdma/opa_port_info.h | 433 +++++++++++++++++++ kernel/include/rdma/opa_smi.h | 153 +++++++ kernel/include/rdma/rdma_cm.h | 8 +- kernel/include/rdma/rdma_netlink.h | 7 + 13 files changed, 1444 insertions(+), 247 deletions(-) create mode 100644 kernel/include/rdma/opa_port_info.h create mode 100644 kernel/include/rdma/opa_smi.h (limited to 'kernel/include/rdma') diff --git a/kernel/include/rdma/ib_addr.h b/kernel/include/rdma/ib_addr.h index ac54c27a2..11528591d 100644 --- a/kernel/include/rdma/ib_addr.h +++ b/kernel/include/rdma/ib_addr.h @@ -47,6 +47,7 @@ #include #include #include +#include struct rdma_addr_client { atomic_t refcount; @@ -64,6 +65,16 @@ void rdma_addr_register_client(struct rdma_addr_client *client); */ void rdma_addr_unregister_client(struct rdma_addr_client *client); +/** + * struct rdma_dev_addr - Contains resolved RDMA hardware addresses + * @src_dev_addr: Source MAC address. + * @dst_dev_addr: Destination MAC address. + * @broadcast: Broadcast address of the device. + * @dev_type: The interface hardware type of the device. + * @bound_dev_if: An optional device interface index. + * @transport: The transport type used. + * @net: Network namespace containing the bound_dev_if net_dev. + */ struct rdma_dev_addr { unsigned char src_dev_addr[MAX_ADDR_LEN]; unsigned char dst_dev_addr[MAX_ADDR_LEN]; @@ -71,11 +82,14 @@ struct rdma_dev_addr { unsigned short dev_type; int bound_dev_if; enum rdma_transport_type transport; + struct net *net; }; /** * rdma_translate_ip - Translate a local IP address to an RDMA hardware * address. + * + * The dev_addr->net field must be initialized. */ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr, u16 *vlan_id); @@ -90,7 +104,7 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr, * @dst_addr: The destination address to resolve. * @addr: A reference to a data location that will receive the resolved * addresses. The data location must remain valid until the callback has - * been invoked. + * been invoked. The net field of the addr struct must be valid. * @timeout_ms: Amount of time to wait for the address resolution to complete. * @callback: Call invoked once address resolution has completed, timed out, * or been canceled. A status of 0 indicates success. @@ -111,8 +125,8 @@ int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev, int rdma_addr_size(struct sockaddr *addr); int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id); -int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *smac, - u16 *vlan_id); +int rdma_addr_find_dmac_by_grh(const union ib_gid *sgid, const union ib_gid *dgid, + u8 *smac, u16 *vlan_id, int if_index); static inline u16 ib_addr_get_pkey(struct rdma_dev_addr *dev_addr) { @@ -160,7 +174,7 @@ static inline int rdma_ip2gid(struct sockaddr *addr, union ib_gid *gid) } /* Important - sockaddr should be a union of sockaddr_in and sockaddr_in6 */ -static inline void rdma_gid2ip(struct sockaddr *out, union ib_gid *gid) +static inline void rdma_gid2ip(struct sockaddr *out, const union ib_gid *gid) { if (ipv6_addr_v4mapped((struct in6_addr *)gid)) { struct sockaddr_in *out_in = (struct sockaddr_in *)out; diff --git a/kernel/include/rdma/ib_cache.h b/kernel/include/rdma/ib_cache.h index ad9a3c280..269a27cf0 100644 --- a/kernel/include/rdma/ib_cache.h +++ b/kernel/include/rdma/ib_cache.h @@ -43,6 +43,8 @@ * @port_num: The port number of the device to query. * @index: The index into the cached GID table to query. * @gid: The GID value found at the specified index. + * @attr: The GID attribute found at the specified index (only in RoCE). + * NULL means ignore (output parameter). * * ib_get_cached_gid() fetches the specified GID table entry stored in * the local software cache. @@ -50,13 +52,15 @@ int ib_get_cached_gid(struct ib_device *device, u8 port_num, int index, - union ib_gid *gid); + union ib_gid *gid, + struct ib_gid_attr *attr); /** * ib_find_cached_gid - Returns the port number and GID table index where * a specified GID value occurs. * @device: The device to query. * @gid: The GID value to search for. + * @ndev: In RoCE, the net device of the device. NULL means ignore. * @port_num: The port number of the device where the GID value was found. * @index: The index into the cached GID table where the GID was found. This * parameter may be NULL. @@ -65,10 +69,38 @@ int ib_get_cached_gid(struct ib_device *device, * the local software cache. */ int ib_find_cached_gid(struct ib_device *device, - union ib_gid *gid, + const union ib_gid *gid, + struct net_device *ndev, u8 *port_num, u16 *index); +/** + * ib_find_cached_gid_by_port - Returns the GID table index where a specified + * GID value occurs + * @device: The device to query. + * @gid: The GID value to search for. + * @port_num: The port number of the device where the GID value sould be + * searched. + * @ndev: In RoCE, the net device of the device. Null means ignore. + * @index: The index into the cached GID table where the GID was found. This + * parameter may be NULL. + * + * ib_find_cached_gid() searches for the specified GID value in + * the local software cache. + */ +int ib_find_cached_gid_by_port(struct ib_device *device, + const union ib_gid *gid, + u8 port_num, + struct net_device *ndev, + u16 *index); + +int ib_find_gid_by_filter(struct ib_device *device, + const union ib_gid *gid, + u8 port_num, + bool (*filter)(const union ib_gid *gid, + const struct ib_gid_attr *, + void *), + void *context, u16 *index); /** * ib_get_cached_pkey - Returns a cached PKey table entry * @device: The device to query. diff --git a/kernel/include/rdma/ib_cm.h b/kernel/include/rdma/ib_cm.h index 39ed2d2fb..92a7d8591 100644 --- a/kernel/include/rdma/ib_cm.h +++ b/kernel/include/rdma/ib_cm.h @@ -105,14 +105,16 @@ enum ib_cm_data_size { IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE = 216, IB_CM_SIDR_REP_PRIVATE_DATA_SIZE = 136, IB_CM_SIDR_REP_INFO_LENGTH = 72, - /* compare done u32 at a time */ - IB_CM_COMPARE_SIZE = (64 / sizeof(u32)) }; struct ib_cm_id; struct ib_cm_req_event_param { struct ib_cm_id *listen_id; + + /* P_Key that was used by the GMP's BTH header */ + u16 bth_pkey; + u8 port; struct ib_sa_path_rec *primary_path; @@ -223,6 +225,9 @@ struct ib_cm_apr_event_param { struct ib_cm_sidr_req_event_param { struct ib_cm_id *listen_id; + __be64 service_id; + /* P_Key that was used by the GMP's BTH header */ + u16 bth_pkey; u8 port; u16 pkey; }; @@ -337,11 +342,6 @@ void ib_destroy_cm_id(struct ib_cm_id *cm_id); #define IB_SDP_SERVICE_ID cpu_to_be64(0x0000000000010000ULL) #define IB_SDP_SERVICE_ID_MASK cpu_to_be64(0xFFFFFFFFFFFF0000ULL) -struct ib_cm_compare_data { - u32 data[IB_CM_COMPARE_SIZE]; - u32 mask[IB_CM_COMPARE_SIZE]; -}; - /** * ib_cm_listen - Initiates listening on the specified service ID for * connection and service ID resolution requests. @@ -354,12 +354,13 @@ struct ib_cm_compare_data { * range of service IDs. If set to 0, the service ID is matched * exactly. This parameter is ignored if %service_id is set to * IB_CM_ASSIGN_SERVICE_ID. - * @compare_data: This parameter is optional. It specifies data that must - * appear in the private data of a connection request for the specified - * listen request. */ -int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask, - struct ib_cm_compare_data *compare_data); +int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, + __be64 service_mask); + +struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device, + ib_cm_handler cm_handler, + __be64 service_id); struct ib_cm_req_param { struct ib_sa_path_rec *primary_path; diff --git a/kernel/include/rdma/ib_mad.h b/kernel/include/rdma/ib_mad.h index 9bb99e983..ec9b44dd3 100644 --- a/kernel/include/rdma/ib_mad.h +++ b/kernel/include/rdma/ib_mad.h @@ -42,8 +42,11 @@ #include #include -/* Management base version */ +/* Management base versions */ #define IB_MGMT_BASE_VERSION 1 +#define OPA_MGMT_BASE_VERSION 0x80 + +#define OPA_SMP_CLASS_VERSION 0x80 /* Management classes */ #define IB_MGMT_CLASS_SUBN_LID_ROUTED 0x01 @@ -124,6 +127,23 @@ #define IB_DEFAULT_PKEY_PARTIAL 0x7FFF #define IB_DEFAULT_PKEY_FULL 0xFFFF +/* + * Generic trap/notice types + */ +#define IB_NOTICE_TYPE_FATAL 0x80 +#define IB_NOTICE_TYPE_URGENT 0x81 +#define IB_NOTICE_TYPE_SECURITY 0x82 +#define IB_NOTICE_TYPE_SM 0x83 +#define IB_NOTICE_TYPE_INFO 0x84 + +/* + * Generic trap/notice producers + */ +#define IB_NOTICE_PROD_CA cpu_to_be16(1) +#define IB_NOTICE_PROD_SWITCH cpu_to_be16(2) +#define IB_NOTICE_PROD_ROUTER cpu_to_be16(3) +#define IB_NOTICE_PROD_CLASS_MGR cpu_to_be16(4) + enum { IB_MGMT_MAD_HDR = 24, IB_MGMT_MAD_DATA = 232, @@ -135,6 +155,10 @@ enum { IB_MGMT_SA_DATA = 200, IB_MGMT_DEVICE_HDR = 64, IB_MGMT_DEVICE_DATA = 192, + IB_MGMT_MAD_SIZE = IB_MGMT_MAD_HDR + IB_MGMT_MAD_DATA, + OPA_MGMT_MAD_DATA = 2024, + OPA_MGMT_RMPP_DATA = 2012, + OPA_MGMT_MAD_SIZE = IB_MGMT_MAD_HDR + OPA_MGMT_MAD_DATA, }; struct ib_mad_hdr { @@ -181,12 +205,23 @@ struct ib_mad { u8 data[IB_MGMT_MAD_DATA]; }; +struct opa_mad { + struct ib_mad_hdr mad_hdr; + u8 data[OPA_MGMT_MAD_DATA]; +}; + struct ib_rmpp_mad { struct ib_mad_hdr mad_hdr; struct ib_rmpp_hdr rmpp_hdr; u8 data[IB_MGMT_RMPP_DATA]; }; +struct opa_rmpp_mad { + struct ib_mad_hdr mad_hdr; + struct ib_rmpp_hdr rmpp_hdr; + u8 data[OPA_MGMT_RMPP_DATA]; +}; + struct ib_sa_mad { struct ib_mad_hdr mad_hdr; struct ib_rmpp_hdr rmpp_hdr; @@ -202,6 +237,8 @@ struct ib_vendor_mad { u8 data[IB_MGMT_VENDOR_DATA]; }; +#define IB_MGMT_CLASSPORTINFO_ATTR_ID cpu_to_be16(0x0001) + struct ib_class_port_info { u8 base_version; u8 class_version; @@ -222,6 +259,70 @@ struct ib_class_port_info { __be32 trap_qkey; }; +struct ib_mad_notice_attr { + u8 generic_type; + u8 prod_type_msb; + __be16 prod_type_lsb; + __be16 trap_num; + __be16 issuer_lid; + __be16 toggle_count; + + union { + struct { + u8 details[54]; + } raw_data; + + struct { + __be16 reserved; + __be16 lid; /* where violation happened */ + u8 port_num; /* where violation happened */ + } __packed ntc_129_131; + + struct { + __be16 reserved; + __be16 lid; /* LID where change occurred */ + u8 reserved2; + u8 local_changes; /* low bit - local changes */ + __be32 new_cap_mask; /* new capability mask */ + u8 reserved3; + u8 change_flags; /* low 3 bits only */ + } __packed ntc_144; + + struct { + __be16 reserved; + __be16 lid; /* lid where sys guid changed */ + __be16 reserved2; + __be64 new_sys_guid; + } __packed ntc_145; + + struct { + __be16 reserved; + __be16 lid; + __be16 dr_slid; + u8 method; + u8 reserved2; + __be16 attr_id; + __be32 attr_mod; + __be64 mkey; + u8 reserved3; + u8 dr_trunc_hop; + u8 dr_rtn_path[30]; + } __packed ntc_256; + + struct { + __be16 reserved; + __be16 lid1; + __be16 lid2; + __be32 key; + __be32 sl_qp1; /* SL: high 4 bits */ + __be32 qp2; /* high 8 bits reserved */ + union ib_gid gid1; + union ib_gid gid2; + } __packed ntc_257_258; + + } details; +}; + /** * ib_mad_send_buf - MAD data buffer and work request for sends. * @next: A pointer used to chain together MADs for posting. @@ -235,7 +336,10 @@ struct ib_class_port_info { * includes the common MAD, RMPP, and class specific headers. * @data_len: Indicates the total size of user-transferred data. * @seg_count: The number of RMPP segments allocated for this send. - * @seg_size: Size of each RMPP segment. + * @seg_size: Size of the data in each RMPP segment. This does not include + * class specific headers. + * @seg_rmpp_size: Size of each RMPP segment including the class specific + * headers. * @timeout_ms: Time to wait for a response. * @retries: Number of times to retry a request for a response. For MADs * using RMPP, this applies per window. On completion, returns the number @@ -255,6 +359,7 @@ struct ib_mad_send_buf { int data_len; int seg_count; int seg_size; + int seg_rmpp_size; int timeout_ms; int retries; }; @@ -263,7 +368,7 @@ struct ib_mad_send_buf { * ib_response_mad - Returns if the specified MAD has been generated in * response to a sent request or trap. */ -int ib_response_mad(struct ib_mad *mad); +int ib_response_mad(const struct ib_mad_hdr *hdr); /** * ib_get_rmpp_resptime - Returns the RMPP response time. @@ -366,7 +471,6 @@ enum { struct ib_mad_agent { struct ib_device *device; struct ib_qp *qp; - struct ib_mr *mr; ib_mad_recv_handler recv_handler; ib_mad_send_handler send_handler; ib_mad_snoop_handler snoop_handler; @@ -401,7 +505,10 @@ struct ib_mad_send_wc { struct ib_mad_recv_buf { struct list_head list; struct ib_grh *grh; - struct ib_mad *mad; + union { + struct ib_mad *mad; + struct opa_mad *opa_mad; + }; }; /** @@ -410,6 +517,7 @@ struct ib_mad_recv_buf { * @recv_buf: Specifies the location of the received data buffer(s). * @rmpp_list: Specifies a list of RMPP reassembled received MAD buffers. * @mad_len: The length of the received MAD, without duplicated headers. + * @mad_seg_size: The size of individual MAD segments * * For received response, the wr_id contains a pointer to the ib_mad_send_buf * for the corresponding send request. @@ -419,6 +527,7 @@ struct ib_mad_recv_wc { struct ib_mad_recv_buf recv_buf; struct list_head rmpp_list; int mad_len; + size_t mad_seg_size; }; /** @@ -618,6 +727,7 @@ int ib_process_mad_wc(struct ib_mad_agent *mad_agent, * automatically adjust the allocated buffer size to account for any * additional padding that may be necessary. * @gfp_mask: GFP mask used for the memory allocation. + * @base_version: Base Version of this MAD * * This routine allocates a MAD for sending. The returned MAD send buffer * will reference a data buffer usable for sending a MAD, along @@ -633,7 +743,8 @@ struct ib_mad_send_buf *ib_create_send_mad(struct ib_mad_agent *mad_agent, u32 remote_qpn, u16 pkey_index, int rmpp_active, int hdr_len, int data_len, - gfp_t gfp_mask); + gfp_t gfp_mask, + u8 base_version); /** * ib_is_mad_class_rmpp - returns whether given management class @@ -675,6 +786,6 @@ void ib_free_send_mad(struct ib_mad_send_buf *send_buf); * @agent: the agent in question * @return: true if agent is performing rmpp, false otherwise. */ -int ib_mad_kernel_rmpp_agent(struct ib_mad_agent *agent); +int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent); #endif /* IB_MAD_H */ diff --git a/kernel/include/rdma/ib_pack.h b/kernel/include/rdma/ib_pack.h index b1f7592e0..e99d8f9a4 100644 --- a/kernel/include/rdma/ib_pack.h +++ b/kernel/include/rdma/ib_pack.h @@ -76,6 +76,8 @@ enum { IB_OPCODE_UC = 0x20, IB_OPCODE_RD = 0x40, IB_OPCODE_UD = 0x60, + /* per IBTA 1.3 vol 1 Table 38, A10.3.2 */ + IB_OPCODE_CNP = 0x80, /* operations -- just used to define real constants */ IB_OPCODE_SEND_FIRST = 0x00, diff --git a/kernel/include/rdma/ib_sa.h b/kernel/include/rdma/ib_sa.h index 7e071a6ab..301969552 100644 --- a/kernel/include/rdma/ib_sa.h +++ b/kernel/include/rdma/ib_sa.h @@ -39,6 +39,7 @@ #include #include +#include #include #include @@ -154,11 +155,18 @@ struct ib_sa_path_rec { u8 packet_life_time_selector; u8 packet_life_time; u8 preference; - u8 smac[ETH_ALEN]; u8 dmac[ETH_ALEN]; - u16 vlan_id; + /* ignored in IB */ + int ifindex; + /* ignored in IB */ + struct net *net; }; +static inline struct net_device *ib_get_ndev_from_path(struct ib_sa_path_rec *rec) +{ + return rec->net ? dev_get_by_index(rec->net, rec->ifindex) : NULL; +} + #define IB_SA_MCMEMBER_REC_MGID IB_SA_COMP_MASK( 0) #define IB_SA_MCMEMBER_REC_PORT_GID IB_SA_COMP_MASK( 1) #define IB_SA_MCMEMBER_REC_QKEY IB_SA_COMP_MASK( 2) diff --git a/kernel/include/rdma/ib_smi.h b/kernel/include/rdma/ib_smi.h index 98b9086d7..b439e9884 100644 --- a/kernel/include/rdma/ib_smi.h +++ b/kernel/include/rdma/ib_smi.h @@ -119,10 +119,57 @@ struct ib_port_info { u8 link_roundtrip_latency[3]; }; +struct ib_node_info { + u8 base_version; + u8 class_version; + u8 node_type; + u8 num_ports; + __be64 sys_guid; + __be64 node_guid; + __be64 port_guid; + __be16 partition_cap; + __be16 device_id; + __be32 revision; + u8 local_port_num; + u8 vendor_id[3]; +} __packed; + +struct ib_vl_weight_elem { + u8 vl; /* IB: VL is low 4 bits, upper 4 bits reserved */ + /* OPA: VL is low 5 bits, upper 3 bits reserved */ + u8 weight; +}; + static inline u8 ib_get_smp_direction(struct ib_smp *smp) { return ((smp->status & IB_SMP_DIRECTION) == IB_SMP_DIRECTION); } +/* + * SM Trap/Notice numbers + */ +#define IB_NOTICE_TRAP_LLI_THRESH cpu_to_be16(129) +#define IB_NOTICE_TRAP_EBO_THRESH cpu_to_be16(130) +#define IB_NOTICE_TRAP_FLOW_UPDATE cpu_to_be16(131) +#define IB_NOTICE_TRAP_CAP_MASK_CHG cpu_to_be16(144) +#define IB_NOTICE_TRAP_SYS_GUID_CHG cpu_to_be16(145) +#define IB_NOTICE_TRAP_BAD_MKEY cpu_to_be16(256) +#define IB_NOTICE_TRAP_BAD_PKEY cpu_to_be16(257) +#define IB_NOTICE_TRAP_BAD_QKEY cpu_to_be16(258) + +/* + * Other local changes flags (trap 144). + */ +#define IB_NOTICE_TRAP_LSE_CHG 0x04 /* Link Speed Enable changed */ +#define IB_NOTICE_TRAP_LWE_CHG 0x02 /* Link Width Enable changed */ +#define IB_NOTICE_TRAP_NODE_DESC_CHG 0x01 + +/* + * M_Key volation flags in dr_trunc_hop (trap 256). + */ +#define IB_NOTICE_TRAP_DR_NOTICE 0x80 +#define IB_NOTICE_TRAP_DR_TRUNC 0x40 + + #endif /* IB_SMI_H */ diff --git a/kernel/include/rdma/ib_verbs.h b/kernel/include/rdma/ib_verbs.h index 65994a19e..120da1d7f 100644 --- a/kernel/include/rdma/ib_verbs.h +++ b/kernel/include/rdma/ib_verbs.h @@ -48,6 +48,7 @@ #include #include #include +#include #include #include @@ -64,6 +65,12 @@ union ib_gid { } global; }; +extern union ib_gid zgid; + +struct ib_gid_attr { + struct net_device *ndev; +}; + enum rdma_node_type { /* IB values map to NodeInfo:NodeType. */ RDMA_NODE_IB_CA = 1, @@ -81,6 +88,13 @@ enum rdma_transport_type { RDMA_TRANSPORT_USNIC_UDP }; +enum rdma_protocol_type { + RDMA_PROTOCOL_IB, + RDMA_PROTOCOL_IBOE, + RDMA_PROTOCOL_IWARP, + RDMA_PROTOCOL_USNIC_UDP +}; + __attribute_const__ enum rdma_transport_type rdma_node_get_transport(enum rdma_node_type node_type); @@ -123,6 +137,8 @@ enum ib_device_cap_flags { IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22), IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23), IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24), + IB_DEVICE_RC_IP_CSUM = (1<<25), + IB_DEVICE_RAW_IP_CSUM = (1<<26), IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29), IB_DEVICE_SIGNATURE_HANDOVER = (1<<30), IB_DEVICE_ON_DEMAND_PAGING = (1<<31), @@ -166,6 +182,16 @@ struct ib_odp_caps { } per_transport_caps; }; +enum ib_cq_creation_flags { + IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0, +}; + +struct ib_cq_init_attr { + unsigned int cqe; + int comp_vector; + u32 flags; +}; + struct ib_device_attr { u64 fw_ver; __be64 sys_image_guid; @@ -210,6 +236,8 @@ struct ib_device_attr { int sig_prot_cap; int sig_guard_cap; struct ib_odp_caps odp_caps; + uint64_t timestamp_mask; + uint64_t hca_core_clock; /* in KHZ */ }; enum ib_mtu { @@ -265,7 +293,7 @@ enum ib_port_cap_flags { IB_PORT_BOOT_MGMT_SUP = 1 << 23, IB_PORT_LINK_LATENCY_SUP = 1 << 24, IB_PORT_CLIENT_REG_SUP = 1 << 25, - IB_PORT_IP_BASED_GIDS = 1 << 26 + IB_PORT_IP_BASED_GIDS = 1 << 26, }; enum ib_port_width { @@ -346,6 +374,42 @@ union rdma_protocol_stats { struct iw_protocol_stats iw; }; +/* Define bits for the various functionality this port needs to be supported by + * the core. + */ +/* Management 0x00000FFF */ +#define RDMA_CORE_CAP_IB_MAD 0x00000001 +#define RDMA_CORE_CAP_IB_SMI 0x00000002 +#define RDMA_CORE_CAP_IB_CM 0x00000004 +#define RDMA_CORE_CAP_IW_CM 0x00000008 +#define RDMA_CORE_CAP_IB_SA 0x00000010 +#define RDMA_CORE_CAP_OPA_MAD 0x00000020 + +/* Address format 0x000FF000 */ +#define RDMA_CORE_CAP_AF_IB 0x00001000 +#define RDMA_CORE_CAP_ETH_AH 0x00002000 + +/* Protocol 0xFFF00000 */ +#define RDMA_CORE_CAP_PROT_IB 0x00100000 +#define RDMA_CORE_CAP_PROT_ROCE 0x00200000 +#define RDMA_CORE_CAP_PROT_IWARP 0x00400000 + +#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \ + | RDMA_CORE_CAP_IB_MAD \ + | RDMA_CORE_CAP_IB_SMI \ + | RDMA_CORE_CAP_IB_CM \ + | RDMA_CORE_CAP_IB_SA \ + | RDMA_CORE_CAP_AF_IB) +#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \ + | RDMA_CORE_CAP_IB_MAD \ + | RDMA_CORE_CAP_IB_CM \ + | RDMA_CORE_CAP_AF_IB \ + | RDMA_CORE_CAP_ETH_AH) +#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \ + | RDMA_CORE_CAP_IW_CM) +#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \ + | RDMA_CORE_CAP_OPA_MAD) + struct ib_port_attr { enum ib_port_state state; enum ib_mtu max_mtu; @@ -412,6 +476,8 @@ enum ib_event_type { IB_EVENT_GID_CHANGE, }; +const char *__attribute_const__ ib_event_msg(enum ib_event_type event); + struct ib_event { struct ib_device *device; union { @@ -499,20 +565,18 @@ __attribute_const__ int ib_rate_to_mult(enum ib_rate rate); */ __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate); -enum ib_mr_create_flags { - IB_MR_SIGNATURE_EN = 1, -}; /** - * ib_mr_init_attr - Memory region init attributes passed to routine - * ib_create_mr. - * @max_reg_descriptors: max number of registration descriptors that - * may be used with registration work requests. - * @flags: MR creation flags bit mask. + * enum ib_mr_type - memory region type + * @IB_MR_TYPE_MEM_REG: memory region that is used for + * normal registration + * @IB_MR_TYPE_SIGNATURE: memory region that is used for + * signature operations (data-integrity + * capable regions) */ -struct ib_mr_init_attr { - int max_reg_descriptors; - u32 flags; +enum ib_mr_type { + IB_MR_TYPE_MEM_REG, + IB_MR_TYPE_SIGNATURE, }; /** @@ -635,7 +699,6 @@ struct ib_ah_attr { u8 ah_flags; u8 port_num; u8 dmac[ETH_ALEN]; - u16 vlan_id; }; enum ib_wc_status { @@ -663,6 +726,8 @@ enum ib_wc_status { IB_WC_GENERAL_ERR }; +const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status); + enum ib_wc_opcode { IB_WC_SEND, IB_WC_RDMA_WRITE, @@ -672,7 +737,7 @@ enum ib_wc_opcode { IB_WC_BIND_MW, IB_WC_LSO, IB_WC_LOCAL_INV, - IB_WC_FAST_REG_MR, + IB_WC_REG_MR, IB_WC_MASKED_COMP_SWAP, IB_WC_MASKED_FETCH_ADD, /* @@ -809,7 +874,6 @@ enum ib_qp_create_flags { IB_QP_CREATE_RESERVED_END = 1 << 31, }; - /* * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler * callback to destroy the passed in QP. @@ -893,10 +957,10 @@ enum ib_qp_attr_mask { IB_QP_PATH_MIG_STATE = (1<<18), IB_QP_CAP = (1<<19), IB_QP_DEST_QPN = (1<<20), - IB_QP_SMAC = (1<<21), - IB_QP_ALT_SMAC = (1<<22), - IB_QP_VID = (1<<23), - IB_QP_ALT_VID = (1<<24), + IB_QP_RESERVED1 = (1<<21), + IB_QP_RESERVED2 = (1<<22), + IB_QP_RESERVED3 = (1<<23), + IB_QP_RESERVED4 = (1<<24), }; enum ib_qp_state { @@ -946,10 +1010,6 @@ struct ib_qp_attr { u8 rnr_retry; u8 alt_port_num; u8 alt_timeout; - u8 smac[ETH_ALEN]; - u8 alt_smac[ETH_ALEN]; - u16 vlan_id; - u16 alt_vlan_id; }; enum ib_wr_opcode { @@ -964,7 +1024,7 @@ enum ib_wr_opcode { IB_WR_SEND_WITH_INV, IB_WR_RDMA_READ_WITH_INV, IB_WR_LOCAL_INV, - IB_WR_FAST_REG_MR, + IB_WR_REG_MR, IB_WR_MASKED_ATOMIC_CMP_AND_SWP, IB_WR_MASKED_ATOMIC_FETCH_AND_ADD, IB_WR_BIND_MW, @@ -1002,12 +1062,6 @@ struct ib_sge { u32 lkey; }; -struct ib_fast_reg_page_list { - struct ib_device *device; - u64 *page_list; - unsigned int max_page_list_len; -}; - /** * struct ib_mw_bind_info - Parameters for a memory window bind operation. * @mr: A memory region to bind the memory window to. @@ -1036,54 +1090,89 @@ struct ib_send_wr { __be32 imm_data; u32 invalidate_rkey; } ex; - union { - struct { - u64 remote_addr; - u32 rkey; - } rdma; - struct { - u64 remote_addr; - u64 compare_add; - u64 swap; - u64 compare_add_mask; - u64 swap_mask; - u32 rkey; - } atomic; - struct { - struct ib_ah *ah; - void *header; - int hlen; - int mss; - u32 remote_qpn; - u32 remote_qkey; - u16 pkey_index; /* valid for GSI only */ - u8 port_num; /* valid for DR SMPs on switch only */ - } ud; - struct { - u64 iova_start; - struct ib_fast_reg_page_list *page_list; - unsigned int page_shift; - unsigned int page_list_len; - u32 length; - int access_flags; - u32 rkey; - } fast_reg; - struct { - struct ib_mw *mw; - /* The new rkey for the memory window. */ - u32 rkey; - struct ib_mw_bind_info bind_info; - } bind_mw; - struct { - struct ib_sig_attrs *sig_attrs; - struct ib_mr *sig_mr; - int access_flags; - struct ib_sge *prot; - } sig_handover; - } wr; - u32 xrc_remote_srq_num; /* XRC TGT QPs only */ }; +struct ib_rdma_wr { + struct ib_send_wr wr; + u64 remote_addr; + u32 rkey; +}; + +static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr) +{ + return container_of(wr, struct ib_rdma_wr, wr); +} + +struct ib_atomic_wr { + struct ib_send_wr wr; + u64 remote_addr; + u64 compare_add; + u64 swap; + u64 compare_add_mask; + u64 swap_mask; + u32 rkey; +}; + +static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr) +{ + return container_of(wr, struct ib_atomic_wr, wr); +} + +struct ib_ud_wr { + struct ib_send_wr wr; + struct ib_ah *ah; + void *header; + int hlen; + int mss; + u32 remote_qpn; + u32 remote_qkey; + u16 pkey_index; /* valid for GSI only */ + u8 port_num; /* valid for DR SMPs on switch only */ +}; + +static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr) +{ + return container_of(wr, struct ib_ud_wr, wr); +} + +struct ib_reg_wr { + struct ib_send_wr wr; + struct ib_mr *mr; + u32 key; + int access; +}; + +static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr) +{ + return container_of(wr, struct ib_reg_wr, wr); +} + +struct ib_bind_mw_wr { + struct ib_send_wr wr; + struct ib_mw *mw; + /* The new rkey for the memory window. */ + u32 rkey; + struct ib_mw_bind_info bind_info; +}; + +static inline struct ib_bind_mw_wr *bind_mw_wr(struct ib_send_wr *wr) +{ + return container_of(wr, struct ib_bind_mw_wr, wr); +} + +struct ib_sig_handover_wr { + struct ib_send_wr wr; + struct ib_sig_attrs *sig_attrs; + struct ib_mr *sig_mr; + int access_flags; + struct ib_sge *prot; +}; + +static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr) +{ + return container_of(wr, struct ib_sig_handover_wr, wr); +} + struct ib_recv_wr { struct ib_recv_wr *next; u64 wr_id; @@ -1182,6 +1271,7 @@ struct ib_uobject { int id; /* index into kernel idr */ struct kref ref; struct rw_semaphore mutex; /* protects .live */ + struct rcu_head rcu; /* kfree_rcu() overhead */ int live; }; @@ -1193,9 +1283,11 @@ struct ib_udata { }; struct ib_pd { + u32 local_dma_lkey; struct ib_device *device; struct ib_uobject *uobject; atomic_t usecnt; /* count all resources */ + struct ib_mr *local_mr; }; struct ib_xrcd { @@ -1268,6 +1360,9 @@ struct ib_mr { struct ib_uobject *uobject; u32 lkey; u32 rkey; + u64 iova; + u32 length; + unsigned int page_size; atomic_t usecnt; /* count number of MWs */ }; @@ -1407,7 +1502,7 @@ struct ib_flow { struct ib_uobject *uobject; }; -struct ib_mad; +struct ib_mad_hdr; struct ib_grh; enum ib_process_mad_flags { @@ -1429,7 +1524,7 @@ struct ib_cache { rwlock_t lock; struct ib_event_handler event_handler; struct ib_pkey_cache **pkey_cache; - struct ib_gid_cache **gid_cache; + struct ib_gid_table **gid_cache; u8 *lmc_cache; }; @@ -1474,6 +1569,13 @@ struct ib_dma_mapping_ops { struct iw_cm_verbs; +struct ib_port_immutable { + int pkey_tbl_len; + int gid_tbl_len; + u32 core_cap_flags; + u32 max_mad_size; +}; + struct ib_device { struct device *dma_device; @@ -1484,11 +1586,15 @@ struct ib_device { spinlock_t client_data_lock; struct list_head core_list; + /* Access to the client_data_list is protected by the client_data_lock + * spinlock and the lists_rwsem read-write semaphore */ struct list_head client_data_list; struct ib_cache cache; - int *pkey_tbl_len; - int *gid_tbl_len; + /** + * port_immutable is indexed by port number + */ + struct ib_port_immutable *port_immutable; int num_comp_vectors; @@ -1497,15 +1603,54 @@ struct ib_device { int (*get_protocol_stats)(struct ib_device *device, union rdma_protocol_stats *stats); int (*query_device)(struct ib_device *device, - struct ib_device_attr *device_attr); + struct ib_device_attr *device_attr, + struct ib_udata *udata); int (*query_port)(struct ib_device *device, u8 port_num, struct ib_port_attr *port_attr); enum rdma_link_layer (*get_link_layer)(struct ib_device *device, u8 port_num); + /* When calling get_netdev, the HW vendor's driver should return the + * net device of device @device at port @port_num or NULL if such + * a net device doesn't exist. The vendor driver should call dev_hold + * on this net device. The HW vendor's device driver must guarantee + * that this function returns NULL before the net device reaches + * NETDEV_UNREGISTER_FINAL state. + */ + struct net_device *(*get_netdev)(struct ib_device *device, + u8 port_num); int (*query_gid)(struct ib_device *device, u8 port_num, int index, union ib_gid *gid); + /* When calling add_gid, the HW vendor's driver should + * add the gid of device @device at gid index @index of + * port @port_num to be @gid. Meta-info of that gid (for example, + * the network device related to this gid is available + * at @attr. @context allows the HW vendor driver to store extra + * information together with a GID entry. The HW vendor may allocate + * memory to contain this information and store it in @context when a + * new GID entry is written to. Params are consistent until the next + * call of add_gid or delete_gid. The function should return 0 on + * success or error otherwise. The function could be called + * concurrently for different ports. This function is only called + * when roce_gid_table is used. + */ + int (*add_gid)(struct ib_device *device, + u8 port_num, + unsigned int index, + const union ib_gid *gid, + const struct ib_gid_attr *attr, + void **context); + /* When calling del_gid, the HW vendor's driver should delete the + * gid of device @device at gid index @index of port @port_num. + * Upon the deletion of a GID entry, the HW vendor must free any + * allocated memory. The caller will clear @context afterwards. + * This function is only called when roce_gid_table is used. + */ + int (*del_gid)(struct ib_device *device, + u8 port_num, + unsigned int index, + void **context); int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index, u16 *pkey); int (*modify_device)(struct ib_device *device, @@ -1561,8 +1706,8 @@ struct ib_device { int (*post_recv)(struct ib_qp *qp, struct ib_recv_wr *recv_wr, struct ib_recv_wr **bad_recv_wr); - struct ib_cq * (*create_cq)(struct ib_device *device, int cqe, - int comp_vector, + struct ib_cq * (*create_cq)(struct ib_device *device, + const struct ib_cq_init_attr *attr, struct ib_ucontext *context, struct ib_udata *udata); int (*modify_cq)(struct ib_cq *cq, u16 cq_count, @@ -1599,14 +1744,12 @@ struct ib_device { int (*query_mr)(struct ib_mr *mr, struct ib_mr_attr *mr_attr); int (*dereg_mr)(struct ib_mr *mr); - int (*destroy_mr)(struct ib_mr *mr); - struct ib_mr * (*create_mr)(struct ib_pd *pd, - struct ib_mr_init_attr *mr_init_attr); - struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd, - int max_page_list_len); - struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device, - int page_list_len); - void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list); + struct ib_mr * (*alloc_mr)(struct ib_pd *pd, + enum ib_mr_type mr_type, + u32 max_num_sg); + int (*map_mr_sg)(struct ib_mr *mr, + struct scatterlist *sg, + int sg_nents); int (*rereg_phys_mr)(struct ib_mr *mr, int mr_rereg_mask, struct ib_pd *pd, @@ -1637,10 +1780,13 @@ struct ib_device { int (*process_mad)(struct ib_device *device, int process_mad_flags, u8 port_num, - struct ib_wc *in_wc, - struct ib_grh *in_grh, - struct ib_mad *in_mad, - struct ib_mad *out_mad); + const struct ib_wc *in_wc, + const struct ib_grh *in_grh, + const struct ib_mad_hdr *in_mad, + size_t in_mad_size, + struct ib_mad_hdr *out_mad, + size_t *out_mad_size, + u16 *out_mad_pkey_index); struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device, struct ib_ucontext *ucontext, struct ib_udata *udata); @@ -1652,6 +1798,7 @@ struct ib_device { int (*destroy_flow)(struct ib_flow *flow_id); int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, struct ib_mr_status *mr_status); + void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); struct ib_dma_mapping_ops *dma_ops; @@ -1673,15 +1820,46 @@ struct ib_device { char node_desc[64]; __be64 node_guid; u32 local_dma_lkey; + u16 is_switch:1; u8 node_type; u8 phys_port_cnt; + + /** + * The following mandatory functions are used only at device + * registration. Keep functions such as these at the end of this + * structure to avoid cache line misses when accessing struct ib_device + * in fast paths. + */ + int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *); }; struct ib_client { char *name; void (*add) (struct ib_device *); - void (*remove)(struct ib_device *); - + void (*remove)(struct ib_device *, void *client_data); + + /* Returns the net_dev belonging to this ib_client and matching the + * given parameters. + * @dev: An RDMA device that the net_dev use for communication. + * @port: A physical port number on the RDMA device. + * @pkey: P_Key that the net_dev uses if applicable. + * @gid: A GID that the net_dev uses to communicate. + * @addr: An IP address the net_dev is configured with. + * @client_data: The device's client data set by ib_set_client_data(). + * + * An ib_client that implements a net_dev on top of RDMA devices + * (such as IP over IB) should implement this callback, allowing the + * rdma_cm module to find the right net_dev for a given request. + * + * The caller is responsible for calling dev_put on the returned + * netdev. */ + struct net_device *(*get_net_dev_by_params)( + struct ib_device *dev, + u8 port, + u16 pkey, + const union ib_gid *gid, + const struct sockaddr *addr, + void *client_data); struct list_head list; }; @@ -1743,8 +1921,292 @@ int ib_query_port(struct ib_device *device, enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num); +/** + * rdma_cap_ib_switch - Check if the device is IB switch + * @device: Device to check + * + * Device driver is responsible for setting is_switch bit on + * in ib_device structure at init time. + * + * Return: true if the device is IB switch. + */ +static inline bool rdma_cap_ib_switch(const struct ib_device *device) +{ + return device->is_switch; +} + +/** + * rdma_start_port - Return the first valid port number for the device + * specified + * + * @device: Device to be checked + * + * Return start port number + */ +static inline u8 rdma_start_port(const struct ib_device *device) +{ + return rdma_cap_ib_switch(device) ? 0 : 1; +} + +/** + * rdma_end_port - Return the last valid port number for the device + * specified + * + * @device: Device to be checked + * + * Return last port number + */ +static inline u8 rdma_end_port(const struct ib_device *device) +{ + return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt; +} + +static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num) +{ + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB; +} + +static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num) +{ + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE; +} + +static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num) +{ + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP; +} + +static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num) +{ + return device->port_immutable[port_num].core_cap_flags & + (RDMA_CORE_CAP_PROT_IB | RDMA_CORE_CAP_PROT_ROCE); +} + +/** + * rdma_cap_ib_mad - Check if the port of a device supports Infiniband + * Management Datagrams. + * @device: Device to check + * @port_num: Port number to check + * + * Management Datagrams (MAD) are a required part of the InfiniBand + * specification and are supported on all InfiniBand devices. A slightly + * extended version are also supported on OPA interfaces. + * + * Return: true if the port supports sending/receiving of MAD packets. + */ +static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num) +{ + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD; +} + +/** + * rdma_cap_opa_mad - Check if the port of device provides support for OPA + * Management Datagrams. + * @device: Device to check + * @port_num: Port number to check + * + * Intel OmniPath devices extend and/or replace the InfiniBand Management + * datagrams with their own versions. These OPA MADs share many but not all of + * the characteristics of InfiniBand MADs. + * + * OPA MADs differ in the following ways: + * + * 1) MADs are variable size up to 2K + * IBTA defined MADs remain fixed at 256 bytes + * 2) OPA SMPs must carry valid PKeys + * 3) OPA SMP packets are a different format + * + * Return: true if the port supports OPA MAD packet formats. + */ +static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num) +{ + return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD) + == RDMA_CORE_CAP_OPA_MAD; +} + +/** + * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband + * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI). + * @device: Device to check + * @port_num: Port number to check + * + * Each InfiniBand node is required to provide a Subnet Management Agent + * that the subnet manager can access. Prior to the fabric being fully + * configured by the subnet manager, the SMA is accessed via a well known + * interface called the Subnet Management Interface (SMI). This interface + * uses directed route packets to communicate with the SM to get around the + * chicken and egg problem of the SM needing to know what's on the fabric + * in order to configure the fabric, and needing to configure the fabric in + * order to send packets to the devices on the fabric. These directed + * route packets do not need the fabric fully configured in order to reach + * their destination. The SMI is the only method allowed to send + * directed route packets on an InfiniBand fabric. + * + * Return: true if the port provides an SMI. + */ +static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num) +{ + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI; +} + +/** + * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband + * Communication Manager. + * @device: Device to check + * @port_num: Port number to check + * + * The InfiniBand Communication Manager is one of many pre-defined General + * Service Agents (GSA) that are accessed via the General Service + * Interface (GSI). It's role is to facilitate establishment of connections + * between nodes as well as other management related tasks for established + * connections. + * + * Return: true if the port supports an IB CM (this does not guarantee that + * a CM is actually running however). + */ +static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num) +{ + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM; +} + +/** + * rdma_cap_iw_cm - Check if the port of device has the capability IWARP + * Communication Manager. + * @device: Device to check + * @port_num: Port number to check + * + * Similar to above, but specific to iWARP connections which have a different + * managment protocol than InfiniBand. + * + * Return: true if the port supports an iWARP CM (this does not guarantee that + * a CM is actually running however). + */ +static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num) +{ + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM; +} + +/** + * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband + * Subnet Administration. + * @device: Device to check + * @port_num: Port number to check + * + * An InfiniBand Subnet Administration (SA) service is a pre-defined General + * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand + * fabrics, devices should resolve routes to other hosts by contacting the + * SA to query the proper route. + * + * Return: true if the port should act as a client to the fabric Subnet + * Administration interface. This does not imply that the SA service is + * running locally. + */ +static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num) +{ + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA; +} + +/** + * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband + * Multicast. + * @device: Device to check + * @port_num: Port number to check + * + * InfiniBand multicast registration is more complex than normal IPv4 or + * IPv6 multicast registration. Each Host Channel Adapter must register + * with the Subnet Manager when it wishes to join a multicast group. It + * should do so only once regardless of how many queue pairs it subscribes + * to this group. And it should leave the group only after all queue pairs + * attached to the group have been detached. + * + * Return: true if the port must undertake the additional adminstrative + * overhead of registering/unregistering with the SM and tracking of the + * total number of queue pairs attached to the multicast group. + */ +static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num) +{ + return rdma_cap_ib_sa(device, port_num); +} + +/** + * rdma_cap_af_ib - Check if the port of device has the capability + * Native Infiniband Address. + * @device: Device to check + * @port_num: Port number to check + * + * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default + * GID. RoCE uses a different mechanism, but still generates a GID via + * a prescribed mechanism and port specific data. + * + * Return: true if the port uses a GID address to identify devices on the + * network. + */ +static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num) +{ + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB; +} + +/** + * rdma_cap_eth_ah - Check if the port of device has the capability + * Ethernet Address Handle. + * @device: Device to check + * @port_num: Port number to check + * + * RoCE is InfiniBand over Ethernet, and it uses a well defined technique + * to fabricate GIDs over Ethernet/IP specific addresses native to the + * port. Normally, packet headers are generated by the sending host + * adapter, but when sending connectionless datagrams, we must manually + * inject the proper headers for the fabric we are communicating over. + * + * Return: true if we are running as a RoCE port and must force the + * addition of a Global Route Header built from our Ethernet Address + * Handle into our header list for connectionless packets. + */ +static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num) +{ + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH; +} + +/** + * rdma_max_mad_size - Return the max MAD size required by this RDMA Port. + * + * @device: Device + * @port_num: Port number + * + * This MAD size includes the MAD headers and MAD payload. No other headers + * are included. + * + * Return the max MAD size required by the Port. Will return 0 if the port + * does not support MADs + */ +static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num) +{ + return device->port_immutable[port_num].max_mad_size; +} + +/** + * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table + * @device: Device to check + * @port_num: Port number to check + * + * RoCE GID table mechanism manages the various GIDs for a device. + * + * NOTE: if allocating the port's GID table has failed, this call will still + * return true, but any RoCE GID table API will fail. + * + * Return: true if the port uses RoCE GID table mechanism in order to manage + * its GIDs. + */ +static inline bool rdma_cap_roce_gid_table(const struct ib_device *device, + u8 port_num) +{ + return rdma_protocol_roce(device, port_num) && + device->add_gid && device->del_gid; +} + int ib_query_gid(struct ib_device *device, - u8 port_num, int index, union ib_gid *gid); + u8 port_num, int index, union ib_gid *gid, + struct ib_gid_attr *attr); int ib_query_pkey(struct ib_device *device, u8 port_num, u16 index, u16 *pkey); @@ -1758,25 +2220,14 @@ int ib_modify_port(struct ib_device *device, struct ib_port_modify *port_modify); int ib_find_gid(struct ib_device *device, union ib_gid *gid, - u8 *port_num, u16 *index); + struct net_device *ndev, u8 *port_num, u16 *index); int ib_find_pkey(struct ib_device *device, u8 port_num, u16 pkey, u16 *index); -/** - * ib_alloc_pd - Allocates an unused protection domain. - * @device: The device on which to allocate the protection domain. - * - * A protection domain object provides an association between QPs, shared - * receive queues, address handles, memory regions, and memory windows. - */ struct ib_pd *ib_alloc_pd(struct ib_device *device); -/** - * ib_dealloc_pd - Deallocates a protection domain. - * @pd: The protection domain to deallocate. - */ -int ib_dealloc_pd(struct ib_pd *pd); +void ib_dealloc_pd(struct ib_pd *pd); /** * ib_create_ah - Creates an address handle for the given address vector. @@ -1799,8 +2250,9 @@ struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); * @ah_attr: Returned attributes that can be used when creating an address * handle for replying to the message. */ -int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc, - struct ib_grh *grh, struct ib_ah_attr *ah_attr); +int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, + const struct ib_wc *wc, const struct ib_grh *grh, + struct ib_ah_attr *ah_attr); /** * ib_create_ah_from_wc - Creates an address handle associated with the @@ -1814,8 +2266,8 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc, * The address handle is used to reference a local or global destination * in all UD QP post sends. */ -struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc, - struct ib_grh *grh, u8 port_num); +struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, + const struct ib_grh *grh, u8 port_num); /** * ib_modify_ah - Modifies the address vector associated with an address @@ -2011,16 +2463,15 @@ static inline int ib_post_recv(struct ib_qp *qp, * asynchronous event not associated with a completion occurs on the CQ. * @cq_context: Context associated with the CQ returned to the user via * the associated completion and event handlers. - * @cqe: The minimum size of the CQ. - * @comp_vector - Completion vector used to signal completion events. - * Must be >= 0 and < context->num_comp_vectors. + * @cq_attr: The attributes the CQ should be created upon. * * Users can examine the cq structure to determine the actual CQ size. */ struct ib_cq *ib_create_cq(struct ib_device *device, ib_comp_handler comp_handler, void (*event_handler)(struct ib_event *, void *), - void *cq_context, int cqe, int comp_vector); + void *cq_context, + const struct ib_cq_init_attr *cq_attr); /** * ib_resize_cq - Modifies the capacity of the CQ. @@ -2387,52 +2838,6 @@ static inline void ib_dma_free_coherent(struct ib_device *dev, dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); } -/** - * ib_reg_phys_mr - Prepares a virtually addressed memory region for use - * by an HCA. - * @pd: The protection domain associated assigned to the registered region. - * @phys_buf_array: Specifies a list of physical buffers to use in the - * memory region. - * @num_phys_buf: Specifies the size of the phys_buf_array. - * @mr_access_flags: Specifies the memory access rights. - * @iova_start: The offset of the region's starting I/O virtual address. - */ -struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd, - struct ib_phys_buf *phys_buf_array, - int num_phys_buf, - int mr_access_flags, - u64 *iova_start); - -/** - * ib_rereg_phys_mr - Modifies the attributes of an existing memory region. - * Conceptually, this call performs the functions deregister memory region - * followed by register physical memory region. Where possible, - * resources are reused instead of deallocated and reallocated. - * @mr: The memory region to modify. - * @mr_rereg_mask: A bit-mask used to indicate which of the following - * properties of the memory region are being modified. - * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies - * the new protection domain to associated with the memory region, - * otherwise, this parameter is ignored. - * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this - * field specifies a list of physical buffers to use in the new - * translation, otherwise, this parameter is ignored. - * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this - * field specifies the size of the phys_buf_array, otherwise, this - * parameter is ignored. - * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this - * field specifies the new memory access rights, otherwise, this - * parameter is ignored. - * @iova_start: The offset of the region's starting I/O virtual address. - */ -int ib_rereg_phys_mr(struct ib_mr *mr, - int mr_rereg_mask, - struct ib_pd *pd, - struct ib_phys_buf *phys_buf_array, - int num_phys_buf, - int mr_access_flags, - u64 *iova_start); - /** * ib_query_mr - Retrieves information about a specific memory region. * @mr: The memory region to retrieve information about. @@ -2449,60 +2854,9 @@ int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr); */ int ib_dereg_mr(struct ib_mr *mr); - -/** - * ib_create_mr - Allocates a memory region that may be used for - * signature handover operations. - * @pd: The protection domain associated with the region. - * @mr_init_attr: memory region init attributes. - */ -struct ib_mr *ib_create_mr(struct ib_pd *pd, - struct ib_mr_init_attr *mr_init_attr); - -/** - * ib_destroy_mr - Destroys a memory region that was created using - * ib_create_mr and removes it from HW translation tables. - * @mr: The memory region to destroy. - * - * This function can fail, if the memory region has memory windows bound to it. - */ -int ib_destroy_mr(struct ib_mr *mr); - -/** - * ib_alloc_fast_reg_mr - Allocates memory region usable with the - * IB_WR_FAST_REG_MR send work request. - * @pd: The protection domain associated with the region. - * @max_page_list_len: requested max physical buffer list length to be - * used with fast register work requests for this MR. - */ -struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len); - -/** - * ib_alloc_fast_reg_page_list - Allocates a page list array - * @device - ib device pointer. - * @page_list_len - size of the page list array to be allocated. - * - * This allocates and returns a struct ib_fast_reg_page_list * and a - * page_list array that is at least page_list_len in size. The actual - * size is returned in max_page_list_len. The caller is responsible - * for initializing the contents of the page_list array before posting - * a send work request with the IB_WC_FAST_REG_MR opcode. - * - * The page_list array entries must be translated using one of the - * ib_dma_*() functions just like the addresses passed to - * ib_map_phys_fmr(). Once the ib_post_send() is issued, the struct - * ib_fast_reg_page_list must not be modified by the caller until the - * IB_WC_FAST_REG_MR work request completes. - */ -struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list( - struct ib_device *device, int page_list_len); - -/** - * ib_free_fast_reg_page_list - Deallocates a previously allocated - * page list array. - * @page_list - struct ib_fast_reg_page_list pointer to be deallocated. - */ -void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list); +struct ib_mr *ib_alloc_mr(struct ib_pd *pd, + enum ib_mr_type mr_type, + u32 max_num_sg); /** * ib_update_fast_reg_key - updates the key portion of the fast_reg MR @@ -2668,4 +3022,32 @@ static inline int ib_check_mr_access(int flags) int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, struct ib_mr_status *mr_status); +struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port, + u16 pkey, const union ib_gid *gid, + const struct sockaddr *addr); + +int ib_map_mr_sg(struct ib_mr *mr, + struct scatterlist *sg, + int sg_nents, + unsigned int page_size); + +static inline int +ib_map_mr_sg_zbva(struct ib_mr *mr, + struct scatterlist *sg, + int sg_nents, + unsigned int page_size) +{ + int n; + + n = ib_map_mr_sg(mr, sg, sg_nents, page_size); + mr->iova = 0; + + return n; +} + +int ib_sg_to_pages(struct ib_mr *mr, + struct scatterlist *sgl, + int sg_nents, + int (*set_page)(struct ib_mr *, u64)); + #endif /* IB_VERBS_H */ diff --git a/kernel/include/rdma/iw_cm.h b/kernel/include/rdma/iw_cm.h index 1017e0bdf..036bd2772 100644 --- a/kernel/include/rdma/iw_cm.h +++ b/kernel/include/rdma/iw_cm.h @@ -91,6 +91,7 @@ struct iw_cm_id { /* Used by provider to add and remove refs on IW cm_id */ void (*add_ref)(struct iw_cm_id *); void (*rem_ref)(struct iw_cm_id *); + u8 tos; }; struct iw_cm_conn_param { diff --git a/kernel/include/rdma/opa_port_info.h b/kernel/include/rdma/opa_port_info.h new file mode 100644 index 000000000..a0fa975cd --- /dev/null +++ b/kernel/include/rdma/opa_port_info.h @@ -0,0 +1,433 @@ +/* + * Copyright (c) 2014 Intel Corporation. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#if !defined(OPA_PORT_INFO_H) +#define OPA_PORT_INFO_H + +/* Temporary until HFI driver is updated */ +#ifndef USE_PI_LED_ENABLE +#define USE_PI_LED_ENABLE 0 +#endif + +#define OPA_PORT_LINK_MODE_NOP 0 /* No change */ +#define OPA_PORT_LINK_MODE_OPA 4 /* Port mode is OPA */ + +#define OPA_PORT_PACKET_FORMAT_NOP 0 /* No change */ +#define OPA_PORT_PACKET_FORMAT_8B 1 /* Format 8B */ +#define OPA_PORT_PACKET_FORMAT_9B 2 /* Format 9B */ +#define OPA_PORT_PACKET_FORMAT_10B 4 /* Format 10B */ +#define OPA_PORT_PACKET_FORMAT_16B 8 /* Format 16B */ + +#define OPA_PORT_LTP_CRC_MODE_NONE 0 /* No change */ +#define OPA_PORT_LTP_CRC_MODE_14 1 /* 14-bit LTP CRC mode (optional) */ +#define OPA_PORT_LTP_CRC_MODE_16 2 /* 16-bit LTP CRC mode */ +#define OPA_PORT_LTP_CRC_MODE_48 4 /* 48-bit LTP CRC mode (optional) */ +#define OPA_PORT_LTP_CRC_MODE_PER_LANE 8 /* 12/16-bit per lane LTP CRC mode */ + +/* Link Down / Neighbor Link Down Reason; indicated as follows: */ +#define OPA_LINKDOWN_REASON_NONE 0 /* No specified reason */ +#define OPA_LINKDOWN_REASON_RCV_ERROR_0 1 +#define OPA_LINKDOWN_REASON_BAD_PKT_LEN 2 +#define OPA_LINKDOWN_REASON_PKT_TOO_LONG 3 +#define OPA_LINKDOWN_REASON_PKT_TOO_SHORT 4 +#define OPA_LINKDOWN_REASON_BAD_SLID 5 +#define OPA_LINKDOWN_REASON_BAD_DLID 6 +#define OPA_LINKDOWN_REASON_BAD_L2 7 +#define OPA_LINKDOWN_REASON_BAD_SC 8 +#define OPA_LINKDOWN_REASON_RCV_ERROR_8 9 +#define OPA_LINKDOWN_REASON_BAD_MID_TAIL 10 +#define OPA_LINKDOWN_REASON_RCV_ERROR_10 11 +#define OPA_LINKDOWN_REASON_PREEMPT_ERROR 12 +#define OPA_LINKDOWN_REASON_PREEMPT_VL15 13 +#define OPA_LINKDOWN_REASON_BAD_VL_MARKER 14 +#define OPA_LINKDOWN_REASON_RCV_ERROR_14 15 +#define OPA_LINKDOWN_REASON_RCV_ERROR_15 16 +#define OPA_LINKDOWN_REASON_BAD_HEAD_DIST 17 +#define OPA_LINKDOWN_REASON_BAD_TAIL_DIST 18 +#define OPA_LINKDOWN_REASON_BAD_CTRL_DIST 19 +#define OPA_LINKDOWN_REASON_BAD_CREDIT_ACK 20 +#define OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER 21 +#define OPA_LINKDOWN_REASON_BAD_PREEMPT 22 +#define OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT 23 +#define OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT 24 +#define OPA_LINKDOWN_REASON_RCV_ERROR_24 25 +#define OPA_LINKDOWN_REASON_RCV_ERROR_25 26 +#define OPA_LINKDOWN_REASON_RCV_ERROR_26 27 +#define OPA_LINKDOWN_REASON_RCV_ERROR_27 28 +#define OPA_LINKDOWN_REASON_RCV_ERROR_28 29 +#define OPA_LINKDOWN_REASON_RCV_ERROR_29 30 +#define OPA_LINKDOWN_REASON_RCV_ERROR_30 31 +#define OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN 32 +#define OPA_LINKDOWN_REASON_UNKNOWN 33 +/* 34 -reserved */ +#define OPA_LINKDOWN_REASON_REBOOT 35 +#define OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN 36 +/* 37-38 reserved */ +#define OPA_LINKDOWN_REASON_FM_BOUNCE 39 +#define OPA_LINKDOWN_REASON_SPEED_POLICY 40 +#define OPA_LINKDOWN_REASON_WIDTH_POLICY 41 +/* 42-48 reserved */ +#define OPA_LINKDOWN_REASON_DISCONNECTED 49 +#define OPA_LINKDOWN_REASONLOCAL_MEDIA_NOT_INSTALLED 50 +#define OPA_LINKDOWN_REASON_NOT_INSTALLED 51 +#define OPA_LINKDOWN_REASON_CHASSIS_CONFIG 52 +/* 53 reserved */ +#define OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED 54 +/* 55 reserved */ +#define OPA_LINKDOWN_REASON_POWER_POLICY 56 +#define OPA_LINKDOWN_REASON_LINKSPEED_POLICY 57 +#define OPA_LINKDOWN_REASON_LINKWIDTH_POLICY 58 +/* 59 reserved */ +#define OPA_LINKDOWN_REASON_SWITCH_MGMT 60 +#define OPA_LINKDOWN_REASON_SMA_DISABLED 61 +/* 62 reserved */ +#define OPA_LINKDOWN_REASON_TRANSIENT 63 +/* 64-255 reserved */ + +/* OPA Link Init reason; indicated as follows: */ +/* 3-7; 11-15 reserved; 8-15 cleared on Polling->LinkUp */ +#define OPA_LINKINIT_REASON_NOP 0 +#define OPA_LINKINIT_REASON_LINKUP (1 << 4) +#define OPA_LINKINIT_REASON_FLAPPING (2 << 4) +#define OPA_LINKINIT_REASON_CLEAR (8 << 4) +#define OPA_LINKINIT_OUTSIDE_POLICY (8 << 4) +#define OPA_LINKINIT_QUARANTINED (9 << 4) +#define OPA_LINKINIT_INSUFIC_CAPABILITY (10 << 4) + +#define OPA_LINK_SPEED_NOP 0x0000 /* Reserved (1-5 Gbps) */ +#define OPA_LINK_SPEED_12_5G 0x0001 /* 12.5 Gbps */ +#define OPA_LINK_SPEED_25G 0x0002 /* 25.78125? Gbps (EDR) */ + +#define OPA_LINK_WIDTH_1X 0x0001 +#define OPA_LINK_WIDTH_2X 0x0002 +#define OPA_LINK_WIDTH_3X 0x0004 +#define OPA_LINK_WIDTH_4X 0x0008 + +#define OPA_CAP_MASK3_IsSnoopSupported (1 << 7) +#define OPA_CAP_MASK3_IsAsyncSC2VLSupported (1 << 6) +#define OPA_CAP_MASK3_IsAddrRangeConfigSupported (1 << 5) +#define OPA_CAP_MASK3_IsPassThroughSupported (1 << 4) +#define OPA_CAP_MASK3_IsSharedSpaceSupported (1 << 3) +/* reserved (1 << 2) */ +#define OPA_CAP_MASK3_IsVLMarkerSupported (1 << 1) +#define OPA_CAP_MASK3_IsVLrSupported (1 << 0) + +/** + * new MTU values + */ +enum { + OPA_MTU_8192 = 6, + OPA_MTU_10240 = 7, +}; + +enum { + OPA_PORT_PHYS_CONF_DISCONNECTED = 0, + OPA_PORT_PHYS_CONF_STANDARD = 1, + OPA_PORT_PHYS_CONF_FIXED = 2, + OPA_PORT_PHYS_CONF_VARIABLE = 3, + OPA_PORT_PHYS_CONF_SI_PHOTO = 4 +}; + +enum port_info_field_masks { + /* vl.cap */ + OPA_PI_MASK_VL_CAP = 0x1F, + /* port_states.ledenable_offlinereason */ + OPA_PI_MASK_OFFLINE_REASON = 0x0F, + OPA_PI_MASK_LED_ENABLE = 0x40, + /* port_states.unsleepstate_downdefstate */ + OPA_PI_MASK_UNSLEEP_STATE = 0xF0, + OPA_PI_MASK_DOWNDEF_STATE = 0x0F, + /* port_states.portphysstate_portstate */ + OPA_PI_MASK_PORT_PHYSICAL_STATE = 0xF0, + OPA_PI_MASK_PORT_STATE = 0x0F, + /* port_phys_conf */ + OPA_PI_MASK_PORT_PHYSICAL_CONF = 0x0F, + /* collectivemask_multicastmask */ + OPA_PI_MASK_COLLECT_MASK = 0x38, + OPA_PI_MASK_MULTICAST_MASK = 0x07, + /* mkeyprotect_lmc */ + OPA_PI_MASK_MKEY_PROT_BIT = 0xC0, + OPA_PI_MASK_LMC = 0x0F, + /* smsl */ + OPA_PI_MASK_SMSL = 0x1F, + /* partenforce_filterraw */ + /* Filter Raw In/Out bits 1 and 2 were removed */ + OPA_PI_MASK_LINKINIT_REASON = 0xF0, + OPA_PI_MASK_PARTITION_ENFORCE_IN = 0x08, + OPA_PI_MASK_PARTITION_ENFORCE_OUT = 0x04, + /* operational_vls */ + OPA_PI_MASK_OPERATIONAL_VL = 0x1F, + /* sa_qp */ + OPA_PI_MASK_SA_QP = 0x00FFFFFF, + /* sm_trap_qp */ + OPA_PI_MASK_SM_TRAP_QP = 0x00FFFFFF, + /* localphy_overrun_errors */ + OPA_PI_MASK_LOCAL_PHY_ERRORS = 0xF0, + OPA_PI_MASK_OVERRUN_ERRORS = 0x0F, + /* clientrereg_subnettimeout */ + OPA_PI_MASK_CLIENT_REREGISTER = 0x80, + OPA_PI_MASK_SUBNET_TIMEOUT = 0x1F, + /* port_link_mode */ + OPA_PI_MASK_PORT_LINK_SUPPORTED = (0x001F << 10), + OPA_PI_MASK_PORT_LINK_ENABLED = (0x001F << 5), + OPA_PI_MASK_PORT_LINK_ACTIVE = (0x001F << 0), + /* port_link_crc_mode */ + OPA_PI_MASK_PORT_LINK_CRC_SUPPORTED = 0x0F00, + OPA_PI_MASK_PORT_LINK_CRC_ENABLED = 0x00F0, + OPA_PI_MASK_PORT_LINK_CRC_ACTIVE = 0x000F, + /* port_mode */ + OPA_PI_MASK_PORT_MODE_SECURITY_CHECK = 0x0001, + OPA_PI_MASK_PORT_MODE_16B_TRAP_QUERY = 0x0002, + OPA_PI_MASK_PORT_MODE_PKEY_CONVERT = 0x0004, + OPA_PI_MASK_PORT_MODE_SC2SC_MAPPING = 0x0008, + OPA_PI_MASK_PORT_MODE_VL_MARKER = 0x0010, + OPA_PI_MASK_PORT_PASS_THROUGH = 0x0020, + OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE = 0x0040, + /* flit_control.interleave */ + OPA_PI_MASK_INTERLEAVE_DIST_SUP = (0x0003 << 12), + OPA_PI_MASK_INTERLEAVE_DIST_ENABLE = (0x0003 << 10), + OPA_PI_MASK_INTERLEAVE_MAX_NEST_TX = (0x001F << 5), + OPA_PI_MASK_INTERLEAVE_MAX_NEST_RX = (0x001F << 0), + + /* port_error_action */ + OPA_PI_MASK_EX_BUFFER_OVERRUN = 0x80000000, + /* 7 bits reserved */ + OPA_PI_MASK_FM_CFG_ERR_EXCEED_MULTICAST_LIMIT = 0x00800000, + OPA_PI_MASK_FM_CFG_BAD_CONTROL_FLIT = 0x00400000, + OPA_PI_MASK_FM_CFG_BAD_PREEMPT = 0x00200000, + OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER = 0x00100000, + OPA_PI_MASK_FM_CFG_BAD_CRDT_ACK = 0x00080000, + OPA_PI_MASK_FM_CFG_BAD_CTRL_DIST = 0x00040000, + OPA_PI_MASK_FM_CFG_BAD_TAIL_DIST = 0x00020000, + OPA_PI_MASK_FM_CFG_BAD_HEAD_DIST = 0x00010000, + /* 2 bits reserved */ + OPA_PI_MASK_PORT_RCV_BAD_VL_MARKER = 0x00002000, + OPA_PI_MASK_PORT_RCV_PREEMPT_VL15 = 0x00001000, + OPA_PI_MASK_PORT_RCV_PREEMPT_ERROR = 0x00000800, + /* 1 bit reserved */ + OPA_PI_MASK_PORT_RCV_BAD_MidTail = 0x00000200, + /* 1 bit reserved */ + OPA_PI_MASK_PORT_RCV_BAD_SC = 0x00000080, + OPA_PI_MASK_PORT_RCV_BAD_L2 = 0x00000040, + OPA_PI_MASK_PORT_RCV_BAD_DLID = 0x00000020, + OPA_PI_MASK_PORT_RCV_BAD_SLID = 0x00000010, + OPA_PI_MASK_PORT_RCV_PKTLEN_TOOSHORT = 0x00000008, + OPA_PI_MASK_PORT_RCV_PKTLEN_TOOLONG = 0x00000004, + OPA_PI_MASK_PORT_RCV_BAD_PKTLEN = 0x00000002, + OPA_PI_MASK_PORT_RCV_BAD_LT = 0x00000001, + + /* pass_through.res_drctl */ + OPA_PI_MASK_PASS_THROUGH_DR_CONTROL = 0x01, + + /* buffer_units */ + OPA_PI_MASK_BUF_UNIT_VL15_INIT = (0x00000FFF << 11), + OPA_PI_MASK_BUF_UNIT_VL15_CREDIT_RATE = (0x0000001F << 6), + OPA_PI_MASK_BUF_UNIT_CREDIT_ACK = (0x00000003 << 3), + OPA_PI_MASK_BUF_UNIT_BUF_ALLOC = (0x00000003 << 0), + + /* neigh_mtu.pvlx_to_mtu */ + OPA_PI_MASK_NEIGH_MTU_PVL0 = 0xF0, + OPA_PI_MASK_NEIGH_MTU_PVL1 = 0x0F, + + /* neigh_mtu.vlstall_hoq_life */ + OPA_PI_MASK_VL_STALL = (0x03 << 5), + OPA_PI_MASK_HOQ_LIFE = (0x1F << 0), + + /* port_neigh_mode */ + OPA_PI_MASK_NEIGH_MGMT_ALLOWED = (0x01 << 3), + OPA_PI_MASK_NEIGH_FW_AUTH_BYPASS = (0x01 << 2), + OPA_PI_MASK_NEIGH_NODE_TYPE = (0x03 << 0), + + /* resptime_value */ + OPA_PI_MASK_RESPONSE_TIME_VALUE = 0x1F, + + /* mtucap */ + OPA_PI_MASK_MTU_CAP = 0x0F, +}; + +#if USE_PI_LED_ENABLE +struct opa_port_states { + u8 reserved; + u8 ledenable_offlinereason; /* 1 res, 1 bit, 6 bits */ + u8 reserved2; + u8 portphysstate_portstate; /* 4 bits, 4 bits */ +}; +#define PI_LED_ENABLE_SUP 1 +#else +struct opa_port_states { + u8 reserved; + u8 offline_reason; /* 2 res, 6 bits */ + u8 reserved2; + u8 portphysstate_portstate; /* 4 bits, 4 bits */ +}; +#define PI_LED_ENABLE_SUP 0 +#endif + +struct opa_port_state_info { + struct opa_port_states port_states; + __be16 link_width_downgrade_tx_active; + __be16 link_width_downgrade_rx_active; +}; + +struct opa_port_info { + __be32 lid; + __be32 flow_control_mask; + + struct { + u8 res; /* was inittype */ + u8 cap; /* 3 res, 5 bits */ + __be16 high_limit; + __be16 preempt_limit; + u8 arb_high_cap; + u8 arb_low_cap; + } vl; + + struct opa_port_states port_states; + u8 port_phys_conf; /* 4 res, 4 bits */ + u8 collectivemask_multicastmask; /* 2 res, 3, 3 */ + u8 mkeyprotect_lmc; /* 2 bits, 2 res, 4 bits */ + u8 smsl; /* 3 res, 5 bits */ + + u8 partenforce_filterraw; /* bit fields */ + u8 operational_vls; /* 3 res, 5 bits */ + __be16 pkey_8b; + __be16 pkey_10b; + __be16 mkey_violations; + + __be16 pkey_violations; + __be16 qkey_violations; + __be32 sm_trap_qp; /* 8 bits, 24 bits */ + + __be32 sa_qp; /* 8 bits, 24 bits */ + u8 neigh_port_num; + u8 link_down_reason; + u8 neigh_link_down_reason; + u8 clientrereg_subnettimeout; /* 1 bit, 2 bits, 5 */ + + struct { + __be16 supported; + __be16 enabled; + __be16 active; + } link_speed; + struct { + __be16 supported; + __be16 enabled; + __be16 active; + } link_width; + struct { + __be16 supported; + __be16 enabled; + __be16 tx_active; + __be16 rx_active; + } link_width_downgrade; + __be16 port_link_mode; /* 1 res, 5 bits, 5 bits, 5 bits */ + __be16 port_ltp_crc_mode; /* 4 res, 4 bits, 4 bits, 4 bits */ + + __be16 port_mode; /* 9 res, bit fields */ + struct { + __be16 supported; + __be16 enabled; + } port_packet_format; + struct { + __be16 interleave; /* 2 res, 2,2,5,5 */ + struct { + __be16 min_initial; + __be16 min_tail; + u8 large_pkt_limit; + u8 small_pkt_limit; + u8 max_small_pkt_limit; + u8 preemption_limit; + } preemption; + } flit_control; + + __be32 reserved4; + __be32 port_error_action; /* bit field */ + + struct { + u8 egress_port; + u8 res_drctl; /* 7 res, 1 */ + } pass_through; + __be16 mkey_lease_period; + __be32 buffer_units; /* 9 res, 12, 5, 3, 3 */ + + __be32 reserved5; + __be32 sm_lid; + + __be64 mkey; + + __be64 subnet_prefix; + + struct { + u8 pvlx_to_mtu[OPA_MAX_VLS/2]; /* 4 bits, 4 bits */ + } neigh_mtu; + + struct { + u8 vlstall_hoqlife; /* 3 bits, 5 bits */ + } xmit_q[OPA_MAX_VLS]; + + struct { + u8 addr[16]; + } ipaddr_ipv6; + + struct { + u8 addr[4]; + } ipaddr_ipv4; + + u32 reserved6; + u32 reserved7; + u32 reserved8; + + __be64 neigh_node_guid; + + __be32 ib_cap_mask; + __be16 reserved9; /* was ib_cap_mask2 */ + __be16 opa_cap_mask; + + __be32 reserved10; /* was link_roundtrip_latency */ + __be16 overall_buffer_space; + __be16 reserved11; /* was max_credit_hint */ + + __be16 diag_code; + struct { + u8 buffer; + u8 wire; + } replay_depth; + u8 port_neigh_mode; + u8 mtucap; /* 4 res, 4 bits */ + + u8 resptimevalue; /* 3 res, 5 bits */ + u8 local_port_num; + u8 reserved12; + u8 reserved13; /* was guid_cap */ +} __attribute__ ((packed)); + +#endif /* OPA_PORT_INFO_H */ diff --git a/kernel/include/rdma/opa_smi.h b/kernel/include/rdma/opa_smi.h new file mode 100644 index 000000000..4a529ef47 --- /dev/null +++ b/kernel/include/rdma/opa_smi.h @@ -0,0 +1,153 @@ +/* + * Copyright (c) 2014 Intel Corporation. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#if !defined(OPA_SMI_H) +#define OPA_SMI_H + +#include +#include + +#define OPA_SMP_LID_DATA_SIZE 2016 +#define OPA_SMP_DR_DATA_SIZE 1872 +#define OPA_SMP_MAX_PATH_HOPS 64 + +#define OPA_MAX_VLS 32 +#define OPA_MAX_SLS 32 +#define OPA_MAX_SCS 32 + +#define OPA_SMI_CLASS_VERSION 0x80 + +#define OPA_LID_PERMISSIVE cpu_to_be32(0xFFFFFFFF) + +struct opa_smp { + u8 base_version; + u8 mgmt_class; + u8 class_version; + u8 method; + __be16 status; + u8 hop_ptr; + u8 hop_cnt; + __be64 tid; + __be16 attr_id; + __be16 resv; + __be32 attr_mod; + __be64 mkey; + union { + struct { + uint8_t data[OPA_SMP_LID_DATA_SIZE]; + } lid; + struct { + __be32 dr_slid; + __be32 dr_dlid; + u8 initial_path[OPA_SMP_MAX_PATH_HOPS]; + u8 return_path[OPA_SMP_MAX_PATH_HOPS]; + u8 reserved[8]; + u8 data[OPA_SMP_DR_DATA_SIZE]; + } dr; + } route; +} __packed; + + +/* Subnet management attributes */ +/* ... */ +#define OPA_ATTRIB_ID_NODE_DESCRIPTION cpu_to_be16(0x0010) +#define OPA_ATTRIB_ID_NODE_INFO cpu_to_be16(0x0011) +#define OPA_ATTRIB_ID_PORT_INFO cpu_to_be16(0x0015) +#define OPA_ATTRIB_ID_PARTITION_TABLE cpu_to_be16(0x0016) +#define OPA_ATTRIB_ID_SL_TO_SC_MAP cpu_to_be16(0x0017) +#define OPA_ATTRIB_ID_VL_ARBITRATION cpu_to_be16(0x0018) +#define OPA_ATTRIB_ID_SM_INFO cpu_to_be16(0x0020) +#define OPA_ATTRIB_ID_CABLE_INFO cpu_to_be16(0x0032) +#define OPA_ATTRIB_ID_AGGREGATE cpu_to_be16(0x0080) +#define OPA_ATTRIB_ID_SC_TO_SL_MAP cpu_to_be16(0x0082) +#define OPA_ATTRIB_ID_SC_TO_VLR_MAP cpu_to_be16(0x0083) +#define OPA_ATTRIB_ID_SC_TO_VLT_MAP cpu_to_be16(0x0084) +#define OPA_ATTRIB_ID_SC_TO_VLNT_MAP cpu_to_be16(0x0085) +/* ... */ +#define OPA_ATTRIB_ID_PORT_STATE_INFO cpu_to_be16(0x0087) +/* ... */ +#define OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE cpu_to_be16(0x008A) +/* ... */ + +struct opa_node_description { + u8 data[64]; +} __attribute__ ((packed)); + +struct opa_node_info { + u8 base_version; + u8 class_version; + u8 node_type; + u8 num_ports; + __be32 reserved; + __be64 system_image_guid; + __be64 node_guid; + __be64 port_guid; + __be16 partition_cap; + __be16 device_id; + __be32 revision; + u8 local_port_num; + u8 vendor_id[3]; /* network byte order */ +} __attribute__ ((packed)); + +#define OPA_PARTITION_TABLE_BLK_SIZE 32 + +static inline u8 +opa_get_smp_direction(struct opa_smp *smp) +{ + return ib_get_smp_direction((struct ib_smp *)smp); +} + +static inline u8 *opa_get_smp_data(struct opa_smp *smp) +{ + if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) + return smp->route.dr.data; + + return smp->route.lid.data; +} + +static inline size_t opa_get_smp_data_size(struct opa_smp *smp) +{ + if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) + return sizeof(smp->route.dr.data); + + return sizeof(smp->route.lid.data); +} + +static inline size_t opa_get_smp_header_size(struct opa_smp *smp) +{ + if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) + return sizeof(*smp) - sizeof(smp->route.dr.data); + + return sizeof(*smp) - sizeof(smp->route.lid.data); +} + +#endif /* OPA_SMI_H */ diff --git a/kernel/include/rdma/rdma_cm.h b/kernel/include/rdma/rdma_cm.h index 1ed2088dc..afe44fde7 100644 --- a/kernel/include/rdma/rdma_cm.h +++ b/kernel/include/rdma/rdma_cm.h @@ -62,6 +62,8 @@ enum rdma_cm_event_type { RDMA_CM_EVENT_TIMEWAIT_EXIT }; +const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event); + enum rdma_port_space { RDMA_PS_SDP = 0x0001, RDMA_PS_IPOIB = 0x0002, @@ -158,13 +160,17 @@ struct rdma_cm_id { /** * rdma_create_id - Create an RDMA identifier. * + * @net: The network namespace in which to create the new id. * @event_handler: User callback invoked to report events associated with the * returned rdma_id. * @context: User specified context associated with the id. * @ps: RDMA port space. * @qp_type: type of queue pair associated with the id. + * + * The id holds a reference on the network namespace until it is destroyed. */ -struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, +struct rdma_cm_id *rdma_create_id(struct net *net, + rdma_cm_event_handler event_handler, void *context, enum rdma_port_space ps, enum ib_qp_type qp_type); diff --git a/kernel/include/rdma/rdma_netlink.h b/kernel/include/rdma/rdma_netlink.h index 0790882e0..585266144 100644 --- a/kernel/include/rdma/rdma_netlink.h +++ b/kernel/include/rdma/rdma_netlink.h @@ -77,4 +77,11 @@ int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh, int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh, unsigned int group, gfp_t flags); +/** + * Check if there are any listeners to the netlink group + * @group: the netlink group ID + * Returns 0 on success or a negative for no listeners. + */ +int ibnl_chk_listeners(unsigned int group); + #endif /* _RDMA_NETLINK_H */ -- cgit 1.2.3-korg