diff options
Diffstat (limited to 'kernel/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c')
-rw-r--r-- | kernel/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c | 422 |
1 files changed, 210 insertions, 212 deletions
diff --git a/kernel/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/kernel/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c index fa7ad883b..477b385f1 100644 --- a/kernel/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c +++ b/kernel/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c @@ -75,13 +75,13 @@ ksocknal_alloc_tx_noop(__u64 cookie, int nonblk) return NULL; } - tx->tx_conn = NULL; - tx->tx_lnetmsg = NULL; - tx->tx_kiov = NULL; - tx->tx_nkiov = 0; - tx->tx_iov = tx->tx_frags.virt.iov; - tx->tx_niov = 1; - tx->tx_nonblk = nonblk; + tx->tx_conn = NULL; + tx->tx_lnetmsg = NULL; + tx->tx_kiov = NULL; + tx->tx_nkiov = 0; + tx->tx_iov = tx->tx_frags.virt.iov; + tx->tx_niov = 1; + tx->tx_nonblk = nonblk; socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_NOOP); tx->tx_msg.ksm_zc_cookies[1] = cookie; @@ -89,7 +89,6 @@ ksocknal_alloc_tx_noop(__u64 cookie, int nonblk) return tx; } - void ksocknal_free_tx (ksock_tx_t *tx) { @@ -110,11 +109,11 @@ ksocknal_free_tx (ksock_tx_t *tx) static int ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) { - struct kvec *iov = tx->tx_iov; - int nob; - int rc; + struct kvec *iov = tx->tx_iov; + int nob; + int rc; - LASSERT (tx->tx_niov > 0); + LASSERT(tx->tx_niov > 0); /* Never touch tx->tx_iov inside ksocknal_lib_send_iov() */ rc = ksocknal_lib_send_iov(conn, tx); @@ -128,7 +127,7 @@ ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) /* "consume" iov */ do { - LASSERT (tx->tx_niov > 0); + LASSERT(tx->tx_niov > 0); if (nob < (int) iov->iov_len) { iov->iov_base = (void *)((char *)iov->iov_base + nob); @@ -147,12 +146,12 @@ ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) static int ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx) { - lnet_kiov_t *kiov = tx->tx_kiov; - int nob; - int rc; + lnet_kiov_t *kiov = tx->tx_kiov; + int nob; + int rc; - LASSERT (tx->tx_niov == 0); - LASSERT (tx->tx_nkiov > 0); + LASSERT(tx->tx_niov == 0); + LASSERT(tx->tx_nkiov > 0); /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */ rc = ksocknal_lib_send_kiov(conn, tx); @@ -185,15 +184,15 @@ ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx) static int ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx) { - int rc; - int bufnob; + int rc; + int bufnob; if (ksocknal_data.ksnd_stall_tx != 0) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx)); } - LASSERT (tx->tx_resid != 0); + LASSERT(tx->tx_resid != 0); rc = ksocknal_connsock_addref(conn); if (rc != 0) { @@ -252,10 +251,10 @@ static int ksocknal_recv_iov (ksock_conn_t *conn) { struct kvec *iov = conn->ksnc_rx_iov; - int nob; - int rc; + int nob; + int rc; - LASSERT (conn->ksnc_rx_niov > 0); + LASSERT(conn->ksnc_rx_niov > 0); /* Never touch conn->ksnc_rx_iov or change connection * status inside ksocknal_lib_recv_iov */ @@ -277,7 +276,7 @@ ksocknal_recv_iov (ksock_conn_t *conn) conn->ksnc_rx_nob_left -= nob; do { - LASSERT (conn->ksnc_rx_niov > 0); + LASSERT(conn->ksnc_rx_niov > 0); if (nob < (int)iov->iov_len) { iov->iov_len -= nob; @@ -296,10 +295,11 @@ ksocknal_recv_iov (ksock_conn_t *conn) static int ksocknal_recv_kiov (ksock_conn_t *conn) { - lnet_kiov_t *kiov = conn->ksnc_rx_kiov; - int nob; - int rc; - LASSERT (conn->ksnc_rx_nkiov > 0); + lnet_kiov_t *kiov = conn->ksnc_rx_kiov; + int nob; + int rc; + + LASSERT(conn->ksnc_rx_nkiov > 0); /* Never touch conn->ksnc_rx_kiov or change connection * status inside ksocknal_lib_recv_iov */ @@ -321,7 +321,7 @@ ksocknal_recv_kiov (ksock_conn_t *conn) conn->ksnc_rx_nob_left -= nob; do { - LASSERT (conn->ksnc_rx_nkiov > 0); + LASSERT(conn->ksnc_rx_nkiov > 0); if (nob < (int) kiov->kiov_len) { kiov->kiov_offset += nob; @@ -343,7 +343,7 @@ ksocknal_receive (ksock_conn_t *conn) /* Return 1 on success, 0 on EOF, < 0 on error. * Caller checks ksnc_rx_nob_wanted to determine * progress/completion. */ - int rc; + int rc; if (ksocknal_data.ksnd_stall_rx != 0) { set_current_state(TASK_UNINTERRUPTIBLE); @@ -388,8 +388,8 @@ ksocknal_receive (ksock_conn_t *conn) void ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx) { - lnet_msg_t *lnetmsg = tx->tx_lnetmsg; - int rc = (tx->tx_resid == 0 && !tx->tx_zc_aborted) ? 0 : -EIO; + lnet_msg_t *lnetmsg = tx->tx_lnetmsg; + int rc = (tx->tx_resid == 0 && !tx->tx_zc_aborted) ? 0 : -EIO; LASSERT(ni != NULL || tx->tx_conn != NULL); @@ -410,7 +410,7 @@ ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error) ksock_tx_t *tx; while (!list_empty (txlist)) { - tx = list_entry (txlist->next, ksock_tx_t, tx_list); + tx = list_entry(txlist->next, ksock_tx_t, tx_list); if (error && tx->tx_lnetmsg != NULL) { CNETERR("Deleting packet type %d len %d %s->%s\n", @@ -422,18 +422,18 @@ ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error) CNETERR("Deleting noop packet\n"); } - list_del (&tx->tx_list); + list_del(&tx->tx_list); - LASSERT (atomic_read(&tx->tx_refcount) == 1); - ksocknal_tx_done (ni, tx); + LASSERT(atomic_read(&tx->tx_refcount) == 1); + ksocknal_tx_done(ni, tx); } } static void ksocknal_check_zc_req(ksock_tx_t *tx) { - ksock_conn_t *conn = tx->tx_conn; - ksock_peer_t *peer = conn->ksnc_peer; + ksock_conn_t *conn = tx->tx_conn; + ksock_peer_t *peer = conn->ksnc_peer; /* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx * to ksnp_zc_req_list if some fragment of this message should be sent @@ -441,8 +441,8 @@ ksocknal_check_zc_req(ksock_tx_t *tx) * she has received this message to tell us we can signal completion. * tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on * ksnp_zc_req_list. */ - LASSERT (tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); - LASSERT (tx->tx_zc_capable); + LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); + LASSERT(tx->tx_zc_capable); tx->tx_zc_checked = 1; @@ -461,7 +461,7 @@ ksocknal_check_zc_req(ksock_tx_t *tx) tx->tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout); - LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0); + LASSERT(tx->tx_msg.ksm_zc_cookies[0] == 0); tx->tx_msg.ksm_zc_cookies[0] = peer->ksnp_zc_next_cookie++; @@ -476,7 +476,7 @@ ksocknal_check_zc_req(ksock_tx_t *tx) static void ksocknal_uncheck_zc_req(ksock_tx_t *tx) { - ksock_peer_t *peer = tx->tx_conn->ksnc_peer; + ksock_peer_t *peer = tx->tx_conn->ksnc_peer; LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); LASSERT(tx->tx_zc_capable); @@ -502,14 +502,14 @@ ksocknal_uncheck_zc_req(ksock_tx_t *tx) static int ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx) { - int rc; + int rc; if (tx->tx_zc_capable && !tx->tx_zc_checked) ksocknal_check_zc_req(tx); rc = ksocknal_transmit (conn, tx); - CDEBUG (D_NET, "send(%d) %d\n", tx->tx_resid, rc); + CDEBUG(D_NET, "send(%d) %d\n", tx->tx_resid, rc); if (tx->tx_resid == 0) { /* Sent everything OK */ @@ -526,8 +526,7 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx) counter++; /* exponential backoff warnings */ if ((counter & (-counter)) == counter) - CWARN("%u ENOMEM tx %p (%u allocated)\n", - counter, conn, atomic_read(&libcfs_kmemory)); + CWARN("%u ENOMEM tx %p\n", counter, conn); /* Queue on ksnd_enomem_conns for retry after a timeout */ spin_lock_bh(&ksocknal_data.ksnd_reaper_lock); @@ -546,7 +545,7 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx) } /* Actual error */ - LASSERT (rc < 0); + LASSERT(rc < 0); if (!conn->ksnc_closing) { switch (rc) { @@ -582,9 +581,9 @@ ksocknal_launch_connection_locked (ksock_route_t *route) /* called holding write lock on ksnd_global_lock */ - LASSERT (!route->ksnr_scheduled); - LASSERT (!route->ksnr_connecting); - LASSERT ((ksocknal_route_mask() & ~route->ksnr_connected) != 0); + LASSERT(!route->ksnr_scheduled); + LASSERT(!route->ksnr_connecting); + LASSERT((ksocknal_route_mask() & ~route->ksnr_connected) != 0); route->ksnr_scheduled = 1; /* scheduling conn for connd */ ksocknal_route_addref(route); /* extra ref for connd */ @@ -617,22 +616,22 @@ ksocknal_launch_all_connections_locked (ksock_peer_t *peer) ksock_conn_t * ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk) { - struct list_head *tmp; - ksock_conn_t *conn; - ksock_conn_t *typed = NULL; - ksock_conn_t *fallback = NULL; - int tnob = 0; - int fnob = 0; + struct list_head *tmp; + ksock_conn_t *conn; + ksock_conn_t *typed = NULL; + ksock_conn_t *fallback = NULL; + int tnob = 0; + int fnob = 0; list_for_each (tmp, &peer->ksnp_conns) { ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list); - int nob = atomic_read(&c->ksnc_tx_nob) + - c->ksnc_sock->sk->sk_wmem_queued; - int rc; + int nob = atomic_read(&c->ksnc_tx_nob) + + c->ksnc_sock->sk->sk_wmem_queued; + int rc; - LASSERT (!c->ksnc_closing); - LASSERT (c->ksnc_proto != NULL && - c->ksnc_proto->pro_match_tx != NULL); + LASSERT(!c->ksnc_closing); + LASSERT(c->ksnc_proto != NULL && + c->ksnc_proto->pro_match_tx != NULL); rc = c->ksnc_proto->pro_match_tx(c, tx, nonblk); @@ -656,7 +655,7 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk) (fnob == nob && *ksocknal_tunables.ksnd_round_robin && cfs_time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) { fallback = c; - fnob = nob; + fnob = nob; } break; } @@ -685,9 +684,9 @@ void ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) { ksock_sched_t *sched = conn->ksnc_scheduler; - ksock_msg_t *msg = &tx->tx_msg; - ksock_tx_t *ztx = NULL; - int bufnob = 0; + ksock_msg_t *msg = &tx->tx_msg; + ksock_tx_t *ztx = NULL; + int bufnob = 0; /* called holding global lock (read or irq-write) and caller may * not have dropped this lock between finding conn and calling me, @@ -708,14 +707,14 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) * * We always expect at least 1 mapped fragment containing the * complete ksocknal message header. */ - LASSERT (lnet_iov_nob (tx->tx_niov, tx->tx_iov) + - lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) == - (unsigned int)tx->tx_nob); - LASSERT (tx->tx_niov >= 1); - LASSERT (tx->tx_resid == tx->tx_nob); + LASSERT(lnet_iov_nob (tx->tx_niov, tx->tx_iov) + + lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) == + (unsigned int)tx->tx_nob); + LASSERT(tx->tx_niov >= 1); + LASSERT(tx->tx_resid == tx->tx_nob); CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n", - tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type: + tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type : KSOCK_MSG_NOOP, tx->tx_nob, tx->tx_niov, tx->tx_nkiov); @@ -739,8 +738,8 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) if (msg->ksm_type == KSOCK_MSG_NOOP) { /* The packet is noop ZC ACK, try to piggyback the ack_cookie * on a normal packet so I don't need to send it */ - LASSERT (msg->ksm_zc_cookies[1] != 0); - LASSERT (conn->ksnc_proto->pro_queue_tx_zcack != NULL); + LASSERT(msg->ksm_zc_cookies[1] != 0); + LASSERT(conn->ksnc_proto->pro_queue_tx_zcack != NULL); if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0)) ztx = tx; /* ZC ACK piggybacked on ztx release tx later */ @@ -748,8 +747,8 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) } else { /* It's a normal packet - can it piggback a noop zc-ack that * has been queued already? */ - LASSERT (msg->ksm_zc_cookies[1] == 0); - LASSERT (conn->ksnc_proto->pro_queue_tx_msg != NULL); + LASSERT(msg->ksm_zc_cookies[1] == 0); + LASSERT(conn->ksnc_proto->pro_queue_tx_msg != NULL); ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx); /* ztx will be released later */ @@ -773,18 +772,17 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) spin_unlock_bh(&sched->kss_lock); } - ksock_route_t * ksocknal_find_connectable_route_locked (ksock_peer_t *peer) { - unsigned long now = cfs_time_current(); - struct list_head *tmp; + unsigned long now = cfs_time_current(); + struct list_head *tmp; ksock_route_t *route; list_for_each (tmp, &peer->ksnp_routes) { route = list_entry (tmp, ksock_route_t, ksnr_list); - LASSERT (!route->ksnr_connecting || route->ksnr_scheduled); + LASSERT(!route->ksnr_connecting || route->ksnr_scheduled); if (route->ksnr_scheduled) /* connections being established */ continue; @@ -813,13 +811,13 @@ ksocknal_find_connectable_route_locked (ksock_peer_t *peer) ksock_route_t * ksocknal_find_connecting_route_locked (ksock_peer_t *peer) { - struct list_head *tmp; - ksock_route_t *route; + struct list_head *tmp; + ksock_route_t *route; list_for_each (tmp, &peer->ksnp_routes) { route = list_entry (tmp, ksock_route_t, ksnr_list); - LASSERT (!route->ksnr_connecting || route->ksnr_scheduled); + LASSERT(!route->ksnr_connecting || route->ksnr_scheduled); if (route->ksnr_scheduled) return route; @@ -831,13 +829,13 @@ ksocknal_find_connecting_route_locked (ksock_peer_t *peer) int ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) { - ksock_peer_t *peer; - ksock_conn_t *conn; - rwlock_t *g_lock; - int retry; - int rc; + ksock_peer_t *peer; + ksock_conn_t *conn; + rwlock_t *g_lock; + int retry; + int rc; - LASSERT (tx->tx_conn == NULL); + LASSERT(tx->tx_conn == NULL); g_lock = &ksocknal_data.ksnd_global_lock; @@ -922,17 +920,17 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) int ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) { - int mpflag = 1; - int type = lntmsg->msg_type; + int mpflag = 1; + int type = lntmsg->msg_type; lnet_process_id_t target = lntmsg->msg_target; - unsigned int payload_niov = lntmsg->msg_niov; - struct kvec *payload_iov = lntmsg->msg_iov; - lnet_kiov_t *payload_kiov = lntmsg->msg_kiov; - unsigned int payload_offset = lntmsg->msg_offset; - unsigned int payload_nob = lntmsg->msg_len; - ksock_tx_t *tx; - int desc_size; - int rc; + unsigned int payload_niov = lntmsg->msg_niov; + struct kvec *payload_iov = lntmsg->msg_iov; + lnet_kiov_t *payload_kiov = lntmsg->msg_kiov; + unsigned int payload_offset = lntmsg->msg_offset; + unsigned int payload_nob = lntmsg->msg_len; + ksock_tx_t *tx; + int desc_size; + int rc; /* NB 'private' is different depending on what we're sending. * Just ignore it... */ @@ -940,8 +938,8 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n", payload_nob, payload_niov, libcfs_id2str(target)); - LASSERT (payload_nob == 0 || payload_niov > 0); - LASSERT (payload_niov <= LNET_MAX_IOV); + LASSERT(payload_nob == 0 || payload_niov > 0); + LASSERT(payload_niov <= LNET_MAX_IOV); /* payload is either all vaddrs or all pages */ LASSERT (!(payload_kiov != NULL && payload_iov != NULL)); LASSERT (!in_interrupt ()); @@ -1028,9 +1026,9 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) { static char ksocknal_slop_buffer[4096]; - int nob; - unsigned int niov; - int skipped; + int nob; + unsigned int niov; + int skipped; LASSERT(conn->ksnc_proto != NULL); @@ -1063,7 +1061,7 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space; conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg; - conn->ksnc_rx_iov[0].iov_len = sizeof (lnet_hdr_t); + conn->ksnc_rx_iov[0].iov_len = sizeof (lnet_hdr_t); break; default: @@ -1093,7 +1091,7 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) conn->ksnc_rx_iov[niov].iov_len = nob; niov++; skipped += nob; - nob_to_skip -=nob; + nob_to_skip -= nob; } while (nob_to_skip != 0 && /* mustn't overflow conn's rx iov */ niov < sizeof(conn->ksnc_rx_iov_space) / sizeof (struct iovec)); @@ -1108,18 +1106,18 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) static int ksocknal_process_receive (ksock_conn_t *conn) { - lnet_hdr_t *lhdr; + lnet_hdr_t *lhdr; lnet_process_id_t *id; - int rc; + int rc; LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0); /* NB: sched lock NOT held */ /* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */ - LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER || - conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD || - conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER || - conn->ksnc_rx_state == SOCKNAL_RX_SLOP); + LASSERT(conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER || + conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD || + conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER || + conn->ksnc_rx_state == SOCKNAL_RX_SLOP); again: if (conn->ksnc_rx_nob_wanted != 0) { rc = ksocknal_receive(conn); @@ -1229,7 +1227,7 @@ ksocknal_process_receive (ksock_conn_t *conn) if ((conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) != 0) { /* Userspace peer */ lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr; - id = &conn->ksnc_peer->ksnp_id; + id = &conn->ksnc_peer->ksnp_id; /* Substitute process ID assigned at connection time */ lhdr->src_pid = cpu_to_le32(id->pid); @@ -1277,7 +1275,7 @@ ksocknal_process_receive (ksock_conn_t *conn) LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x); lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr; - id = &conn->ksnc_peer->ksnp_id; + id = &conn->ksnc_peer->ksnp_id; rc = conn->ksnc_proto->pro_handle_zcreq(conn, conn->ksnc_msg.ksm_zc_cookies[0], @@ -1305,7 +1303,7 @@ ksocknal_process_receive (ksock_conn_t *conn) } /* Not Reached */ - LBUG (); + LBUG(); return -EINVAL; /* keep gcc happy */ } @@ -1314,15 +1312,15 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen, unsigned int rlen) { - ksock_conn_t *conn = (ksock_conn_t *)private; + ksock_conn_t *conn = private; ksock_sched_t *sched = conn->ksnc_scheduler; - LASSERT (mlen <= rlen); - LASSERT (niov <= LNET_MAX_IOV); + LASSERT(mlen <= rlen); + LASSERT(niov <= LNET_MAX_IOV); conn->ksnc_cookie = msg; conn->ksnc_rx_nob_wanted = mlen; - conn->ksnc_rx_nob_left = rlen; + conn->ksnc_rx_nob_left = rlen; if (mlen == 0 || iov != NULL) { conn->ksnc_rx_nkiov = 0; @@ -1333,18 +1331,18 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, niov, iov, offset, mlen); } else { conn->ksnc_rx_niov = 0; - conn->ksnc_rx_iov = NULL; + conn->ksnc_rx_iov = NULL; conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov; conn->ksnc_rx_nkiov = lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov, niov, kiov, offset, mlen); } - LASSERT (mlen == - lnet_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) + - lnet_kiov_nob (conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov)); + LASSERT(mlen == + lnet_iov_nob(conn->ksnc_rx_niov, conn->ksnc_rx_iov) + + lnet_kiov_nob(conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov)); - LASSERT (conn->ksnc_rx_scheduled); + LASSERT(conn->ksnc_rx_scheduled); spin_lock_bh(&sched->kss_lock); @@ -1370,7 +1368,7 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, static inline int ksocknal_sched_cansleep(ksock_sched_t *sched) { - int rc; + int rc; spin_lock_bh(&sched->kss_lock); @@ -1384,13 +1382,13 @@ ksocknal_sched_cansleep(ksock_sched_t *sched) int ksocknal_scheduler(void *arg) { - struct ksock_sched_info *info; - ksock_sched_t *sched; - ksock_conn_t *conn; - ksock_tx_t *tx; - int rc; - int nloops = 0; - long id = (long)arg; + struct ksock_sched_info *info; + ksock_sched_t *sched; + ksock_conn_t *conn; + ksock_tx_t *tx; + int rc; + int nloops = 0; + long id = (long)arg; info = ksocknal_data.ksnd_sched_info[KSOCK_THREAD_CPT(id)]; sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)]; @@ -1455,7 +1453,7 @@ int ksocknal_scheduler(void *arg) } if (!list_empty (&sched->kss_tx_conns)) { - LIST_HEAD (zlist); + LIST_HEAD(zlist); if (!list_empty(&sched->kss_zombie_noop_txs)) { list_add(&zlist, @@ -1513,9 +1511,9 @@ int ksocknal_scheduler(void *arg) /* Do nothing; after a short timeout, this * conn will be reposted on kss_tx_conns. */ } else if (conn->ksnc_tx_ready && - !list_empty (&conn->ksnc_tx_queue)) { + !list_empty(&conn->ksnc_tx_queue)) { /* reschedule for tx */ - list_add_tail (&conn->ksnc_tx_list, + list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns); } else { conn->ksnc_tx_scheduled = 0; @@ -1606,7 +1604,7 @@ void ksocknal_write_callback (ksock_conn_t *conn) static ksock_proto_t * ksocknal_parse_proto_version (ksock_hello_msg_t *hello) { - __u32 version = 0; + __u32 version = 0; if (hello->kshm_magic == LNET_PROTO_MAGIC) version = hello->kshm_version; @@ -1634,8 +1632,8 @@ ksocknal_parse_proto_version (ksock_hello_msg_t *hello) if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) { lnet_magicversion_t *hmv = (lnet_magicversion_t *)hello; - CLASSERT (sizeof (lnet_magicversion_t) == - offsetof (ksock_hello_msg_t, kshm_src_nid)); + CLASSERT(sizeof (lnet_magicversion_t) == + offsetof (ksock_hello_msg_t, kshm_src_nid)); if (hmv->version_major == cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) && hmv->version_minor == cpu_to_le16 (KSOCK_PROTO_V1_MINOR)) @@ -1650,19 +1648,19 @@ ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn, lnet_nid_t peer_nid, ksock_hello_msg_t *hello) { /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */ - ksock_net_t *net = (ksock_net_t *)ni->ni_data; + ksock_net_t *net = (ksock_net_t *)ni->ni_data; - LASSERT (hello->kshm_nips <= LNET_MAX_INTERFACES); + LASSERT(hello->kshm_nips <= LNET_MAX_INTERFACES); /* rely on caller to hold a ref on socket so it wouldn't disappear */ - LASSERT (conn->ksnc_proto != NULL); + LASSERT(conn->ksnc_proto != NULL); - hello->kshm_src_nid = ni->ni_nid; - hello->kshm_dst_nid = peer_nid; - hello->kshm_src_pid = the_lnet.ln_pid; + hello->kshm_src_nid = ni->ni_nid; + hello->kshm_dst_nid = peer_nid; + hello->kshm_src_pid = the_lnet.ln_pid; hello->kshm_src_incarnation = net->ksnn_incarnation; - hello->kshm_ctype = conn->ksnc_type; + hello->kshm_ctype = conn->ksnc_type; return conn->ksnc_proto->pro_send_hello(conn, hello); } @@ -1693,21 +1691,21 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, * EALREADY lost connection race * EPROTO protocol version mismatch */ - struct socket *sock = conn->ksnc_sock; - int active = (conn->ksnc_proto != NULL); - int timeout; - int proto_match; - int rc; - ksock_proto_t *proto; - lnet_process_id_t recv_id; + struct socket *sock = conn->ksnc_sock; + int active = (conn->ksnc_proto != NULL); + int timeout; + int proto_match; + int rc; + ksock_proto_t *proto; + lnet_process_id_t recv_id; /* socket type set on active connections - not set on passive */ - LASSERT (!active == !(conn->ksnc_type != SOCKLND_CONN_NONE)); + LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE)); timeout = active ? *ksocknal_tunables.ksnd_timeout : lnet_acceptor_timeout(); - rc = libcfs_sock_read(sock, &hello->kshm_magic, sizeof (hello->kshm_magic), timeout); + rc = lnet_sock_read(sock, &hello->kshm_magic, sizeof (hello->kshm_magic), timeout); if (rc != 0) { CERROR("Error %d reading HELLO from %pI4h\n", rc, &conn->ksnc_ipaddr); @@ -1726,12 +1724,12 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, return -EPROTO; } - rc = libcfs_sock_read(sock, &hello->kshm_version, - sizeof(hello->kshm_version), timeout); + rc = lnet_sock_read(sock, &hello->kshm_version, + sizeof(hello->kshm_version), timeout); if (rc != 0) { CERROR("Error %d reading HELLO from %pI4h\n", rc, &conn->ksnc_ipaddr); - LASSERT (rc < 0); + LASSERT(rc < 0); return rc; } @@ -1765,7 +1763,7 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, if (rc != 0) { CERROR("Error %d reading or checking hello from from %pI4h\n", rc, &conn->ksnc_ipaddr); - LASSERT (rc < 0); + LASSERT(rc < 0); return rc; } @@ -1830,22 +1828,22 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, static int ksocknal_connect (ksock_route_t *route) { - LIST_HEAD (zombies); - ksock_peer_t *peer = route->ksnr_peer; - int type; - int wanted; - struct socket *sock; - unsigned long deadline; - int retry_later = 0; - int rc = 0; + LIST_HEAD(zombies); + ksock_peer_t *peer = route->ksnr_peer; + int type; + int wanted; + struct socket *sock; + unsigned long deadline; + int retry_later = 0; + int rc = 0; deadline = cfs_time_add(cfs_time_current(), cfs_time_seconds(*ksocknal_tunables.ksnd_timeout)); write_lock_bh(&ksocknal_data.ksnd_global_lock); - LASSERT (route->ksnr_scheduled); - LASSERT (!route->ksnr_connecting); + LASSERT(route->ksnr_scheduled); + LASSERT(!route->ksnr_connecting); route->ksnr_connecting = 1; @@ -1999,7 +1997,7 @@ ksocknal_connect (ksock_route_t *route) * running out of resource. */ static int -ksocknal_connd_check_start(long sec, long *timeout) +ksocknal_connd_check_start(time64_t sec, long *timeout) { char name[16]; int rc; @@ -2049,7 +2047,7 @@ ksocknal_connd_check_start(long sec, long *timeout) /* we tried ... */ LASSERT(ksocknal_data.ksnd_connd_starting > 0); ksocknal_data.ksnd_connd_starting--; - ksocknal_data.ksnd_connd_failed_stamp = get_seconds(); + ksocknal_data.ksnd_connd_failed_stamp = ktime_get_real_seconds(); return 1; } @@ -2061,7 +2059,7 @@ ksocknal_connd_check_start(long sec, long *timeout) * again to recheck these conditions. */ static int -ksocknal_connd_check_stop(long sec, long *timeout) +ksocknal_connd_check_stop(time64_t sec, long *timeout) { int val; @@ -2101,7 +2099,7 @@ static ksock_route_t * ksocknal_connd_get_route_locked(signed long *timeout_p) { ksock_route_t *route; - unsigned long now; + unsigned long now; now = cfs_time_current(); @@ -2124,13 +2122,13 @@ ksocknal_connd_get_route_locked(signed long *timeout_p) int ksocknal_connd (void *arg) { - spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock; - ksock_connreq_t *cr; - wait_queue_t wait; - int nloops = 0; - int cons_retry = 0; + spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock; + ksock_connreq_t *cr; + wait_queue_t wait; + int nloops = 0; + int cons_retry = 0; - cfs_block_allsigs (); + cfs_block_allsigs(); init_waitqueue_entry(&wait, current); @@ -2142,9 +2140,9 @@ ksocknal_connd (void *arg) while (!ksocknal_data.ksnd_shuttingdown) { ksock_route_t *route = NULL; - long sec = get_seconds(); + time64_t sec = ktime_get_real_seconds(); long timeout = MAX_SCHEDULE_TIMEOUT; - int dropped_lock = 0; + int dropped_lock = 0; if (ksocknal_connd_check_stop(sec, &timeout)) { /* wakeup another one to check stop */ @@ -2236,15 +2234,16 @@ static ksock_conn_t * ksocknal_find_timed_out_conn (ksock_peer_t *peer) { /* We're called with a shared lock on ksnd_global_lock */ - ksock_conn_t *conn; - struct list_head *ctmp; + ksock_conn_t *conn; + struct list_head *ctmp; list_for_each (ctmp, &peer->ksnp_conns) { - int error; + int error; + conn = list_entry (ctmp, ksock_conn_t, ksnc_list); /* Don't need the {get,put}connsock dance to deref ksnc_sock */ - LASSERT (!conn->ksnc_closing); + LASSERT(!conn->ksnc_closing); /* SOCK_ERROR will reset error code of socket in * some platform (like Darwin8.x) */ @@ -2313,8 +2312,8 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer) static inline void ksocknal_flush_stale_txs(ksock_peer_t *peer) { - ksock_tx_t *tx; - LIST_HEAD (stale_txs); + ksock_tx_t *tx; + LIST_HEAD(stale_txs); write_lock_bh(&ksocknal_data.ksnd_global_lock); @@ -2338,9 +2337,9 @@ ksocknal_flush_stale_txs(ksock_peer_t *peer) static int ksocknal_send_keepalive_locked(ksock_peer_t *peer) { - ksock_sched_t *sched; - ksock_conn_t *conn; - ksock_tx_t *tx; + ksock_sched_t *sched; + ksock_conn_t *conn; + ksock_tx_t *tx; if (list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */ return 0; @@ -2395,14 +2394,13 @@ ksocknal_send_keepalive_locked(ksock_peer_t *peer) return -EIO; } - static void ksocknal_check_peer_timeouts (int idx) { - struct list_head *peers = &ksocknal_data.ksnd_peers[idx]; - ksock_peer_t *peer; - ksock_conn_t *conn; - ksock_tx_t *tx; + struct list_head *peers = &ksocknal_data.ksnd_peers[idx]; + ksock_peer_t *peer; + ksock_conn_t *conn; + ksock_tx_t *tx; again: /* NB. We expect to have a look at all the peers and not find any @@ -2411,9 +2409,9 @@ ksocknal_check_peer_timeouts (int idx) read_lock(&ksocknal_data.ksnd_global_lock); list_for_each_entry(peer, peers, ksnp_list) { - unsigned long deadline = 0; - int resid = 0; - int n = 0; + unsigned long deadline = 0; + int resid = 0; + int n = 0; if (ksocknal_send_keepalive_locked(peer) != 0) { read_unlock(&ksocknal_data.ksnd_global_lock); @@ -2476,8 +2474,8 @@ ksocknal_check_peer_timeouts (int idx) tx = list_entry(peer->ksnp_zc_req_list.next, ksock_tx_t, tx_zc_list); deadline = tx->tx_deadline; - resid = tx->tx_resid; - conn = tx->tx_conn; + resid = tx->tx_resid; + conn = tx->tx_conn; ksocknal_conn_addref(conn); spin_unlock(&peer->ksnp_lock); @@ -2499,17 +2497,17 @@ ksocknal_check_peer_timeouts (int idx) int ksocknal_reaper (void *arg) { - wait_queue_t wait; - ksock_conn_t *conn; - ksock_sched_t *sched; - struct list_head enomem_conns; - int nenomem_conns; - long timeout; - int i; - int peer_index = 0; - unsigned long deadline = cfs_time_current(); - - cfs_block_allsigs (); + wait_queue_t wait; + ksock_conn_t *conn; + ksock_sched_t *sched; + struct list_head enomem_conns; + int nenomem_conns; + long timeout; + int i; + int peer_index = 0; + unsigned long deadline = cfs_time_current(); + + cfs_block_allsigs(); INIT_LIST_HEAD(&enomem_conns); init_waitqueue_entry(&wait, current); @@ -2580,7 +2578,7 @@ ksocknal_reaper (void *arg) cfs_time_current())) <= 0) { const int n = 4; const int p = 1; - int chunk = ksocknal_data.ksnd_peer_hash_size; + int chunk = ksocknal_data.ksnd_peer_hash_size; /* Time to check for timeouts on a few more peers: I do * checks every 'p' seconds on a proportion of the peer |