summaryrefslogtreecommitdiffstats
path: root/kernel/include/net/request_sock.h
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-11 10:41:07 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-13 08:17:18 +0300
commite09b41010ba33a20a87472ee821fa407a5b8da36 (patch)
treed10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/include/net/request_sock.h
parentf93b97fd65072de626c074dbe099a1fff05ce060 (diff)
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page. During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/include/net/request_sock.h')
-rw-r--r--kernel/include/net/request_sock.h190
1 files changed, 63 insertions, 127 deletions
diff --git a/kernel/include/net/request_sock.h b/kernel/include/net/request_sock.h
index 9f4265ce8..a0dde04eb 100644
--- a/kernel/include/net/request_sock.h
+++ b/kernel/include/net/request_sock.h
@@ -32,17 +32,17 @@ struct request_sock_ops {
int obj_size;
struct kmem_cache *slab;
char *slab_name;
- int (*rtx_syn_ack)(struct sock *sk,
+ int (*rtx_syn_ack)(const struct sock *sk,
struct request_sock *req);
- void (*send_ack)(struct sock *sk, struct sk_buff *skb,
+ void (*send_ack)(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req);
- void (*send_reset)(struct sock *sk,
+ void (*send_reset)(const struct sock *sk,
struct sk_buff *skb);
void (*destructor)(struct request_sock *req);
void (*syn_ack_timeout)(const struct request_sock *req);
};
-int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req);
+int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);
/* struct request_sock - mini sock to represent a connection request
*/
@@ -50,34 +50,54 @@ struct request_sock {
struct sock_common __req_common;
#define rsk_refcnt __req_common.skc_refcnt
#define rsk_hash __req_common.skc_hash
+#define rsk_listener __req_common.skc_listener
+#define rsk_window_clamp __req_common.skc_window_clamp
+#define rsk_rcv_wnd __req_common.skc_rcv_wnd
struct request_sock *dl_next;
- struct sock *rsk_listener;
u16 mss;
u8 num_retrans; /* number of retransmits */
u8 cookie_ts:1; /* syncookie: encode tcpopts in timestamp */
u8 num_timeout:7; /* number of timeouts */
- /* The following two fields can be easily recomputed I think -AK */
- u32 window_clamp; /* window clamp at creation time */
- u32 rcv_wnd; /* rcv_wnd offered first time */
u32 ts_recent;
struct timer_list rsk_timer;
const struct request_sock_ops *rsk_ops;
struct sock *sk;
+ u32 *saved_syn;
u32 secid;
u32 peer_secid;
};
+static inline struct request_sock *inet_reqsk(struct sock *sk)
+{
+ return (struct request_sock *)sk;
+}
+
+static inline struct sock *req_to_sk(struct request_sock *req)
+{
+ return (struct sock *)req;
+}
+
static inline struct request_sock *
-reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener)
+reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
+ bool attach_listener)
{
- struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC);
+ struct request_sock *req;
+
+ req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
if (req) {
req->rsk_ops = ops;
- sock_hold(sk_listener);
- req->rsk_listener = sk_listener;
-
+ if (attach_listener) {
+ sock_hold(sk_listener);
+ req->rsk_listener = sk_listener;
+ } else {
+ req->rsk_listener = NULL;
+ }
+ req_to_sk(req)->sk_prot = sk_listener->sk_prot;
+ sk_node_init(&req_to_sk(req)->sk_node);
+ sk_tx_queue_clear(req_to_sk(req));
+ req->saved_syn = NULL;
/* Following is temporary. It is coupled with debugging
* helpers in reqsk_put() & reqsk_free()
*/
@@ -86,16 +106,6 @@ reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener)
return req;
}
-static inline struct request_sock *inet_reqsk(struct sock *sk)
-{
- return (struct request_sock *)sk;
-}
-
-static inline struct sock *req_to_sk(struct request_sock *req)
-{
- return (struct sock *)req;
-}
-
static inline void reqsk_free(struct request_sock *req)
{
/* temporary debugging */
@@ -104,6 +114,7 @@ static inline void reqsk_free(struct request_sock *req)
req->rsk_ops->destructor(req);
if (req->rsk_listener)
sock_put(req->rsk_listener);
+ kfree(req->saved_syn);
kmem_cache_free(req->rsk_ops->slab, req);
}
@@ -115,26 +126,6 @@ static inline void reqsk_put(struct request_sock *req)
extern int sysctl_max_syn_backlog;
-/** struct listen_sock - listen state
- *
- * @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs
- */
-struct listen_sock {
- int qlen_inc; /* protected by listener lock */
- int young_inc;/* protected by listener lock */
-
- /* following fields can be updated by timer */
- atomic_t qlen_dec; /* qlen = qlen_inc - qlen_dec */
- atomic_t young_dec;
-
- u8 max_qlen_log ____cacheline_aligned_in_smp;
- u8 synflood_warned;
- /* 2 bytes hole, try to use */
- u32 hash_rnd;
- u32 nr_table_entries;
- struct request_sock *syn_table[0];
-};
-
/*
* For a TCP Fast Open listener -
* lock - protects the access to all the reqsk, which is co-owned by
@@ -168,127 +159,72 @@ struct fastopen_queue {
* @rskq_accept_head - FIFO head of established children
* @rskq_accept_tail - FIFO tail of established children
* @rskq_defer_accept - User waits for some data after accept()
- * @syn_wait_lock - serializer
- *
- * %syn_wait_lock is necessary only to avoid proc interface having to grab the main
- * lock sock while browsing the listening hash (otherwise it's deadlock prone).
*
*/
struct request_sock_queue {
+ spinlock_t rskq_lock;
+ u8 rskq_defer_accept;
+
+ u32 synflood_warned;
+ atomic_t qlen;
+ atomic_t young;
+
struct request_sock *rskq_accept_head;
struct request_sock *rskq_accept_tail;
- u8 rskq_defer_accept;
- struct listen_sock *listen_opt;
- struct fastopen_queue *fastopenq; /* This is non-NULL iff TFO has been
- * enabled on this listener. Check
- * max_qlen != 0 in fastopen_queue
- * to determine if TFO is enabled
- * right at this moment.
+ struct fastopen_queue fastopenq; /* Check max_qlen != 0 to determine
+ * if TFO is enabled.
*/
-
- /* temporary alignment, our goal is to get rid of this lock */
- spinlock_t syn_wait_lock ____cacheline_aligned_in_smp;
};
-int reqsk_queue_alloc(struct request_sock_queue *queue,
- unsigned int nr_table_entries);
+void reqsk_queue_alloc(struct request_sock_queue *queue);
-void __reqsk_queue_destroy(struct request_sock_queue *queue);
-void reqsk_queue_destroy(struct request_sock_queue *queue);
void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
bool reset);
-static inline struct request_sock *
- reqsk_queue_yank_acceptq(struct request_sock_queue *queue)
-{
- struct request_sock *req = queue->rskq_accept_head;
-
- queue->rskq_accept_head = NULL;
- return req;
-}
-
-static inline int reqsk_queue_empty(struct request_sock_queue *queue)
+static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
{
return queue->rskq_accept_head == NULL;
}
-static inline void reqsk_queue_add(struct request_sock_queue *queue,
- struct request_sock *req,
- struct sock *parent,
- struct sock *child)
-{
- req->sk = child;
- sk_acceptq_added(parent);
-
- if (queue->rskq_accept_head == NULL)
- queue->rskq_accept_head = req;
- else
- queue->rskq_accept_tail->dl_next = req;
-
- queue->rskq_accept_tail = req;
- req->dl_next = NULL;
-}
-
-static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue)
+static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
+ struct sock *parent)
{
- struct request_sock *req = queue->rskq_accept_head;
-
- WARN_ON(req == NULL);
-
- queue->rskq_accept_head = req->dl_next;
- if (queue->rskq_accept_head == NULL)
- queue->rskq_accept_tail = NULL;
+ struct request_sock *req;
+ spin_lock_bh(&queue->rskq_lock);
+ req = queue->rskq_accept_head;
+ if (req) {
+ sk_acceptq_removed(parent);
+ queue->rskq_accept_head = req->dl_next;
+ if (queue->rskq_accept_head == NULL)
+ queue->rskq_accept_tail = NULL;
+ }
+ spin_unlock_bh(&queue->rskq_lock);
return req;
}
static inline void reqsk_queue_removed(struct request_sock_queue *queue,
const struct request_sock *req)
{
- struct listen_sock *lopt = queue->listen_opt;
-
if (req->num_timeout == 0)
- atomic_inc(&lopt->young_dec);
- atomic_inc(&lopt->qlen_dec);
+ atomic_dec(&queue->young);
+ atomic_dec(&queue->qlen);
}
static inline void reqsk_queue_added(struct request_sock_queue *queue)
{
- struct listen_sock *lopt = queue->listen_opt;
-
- lopt->young_inc++;
- lopt->qlen_inc++;
-}
-
-static inline int listen_sock_qlen(const struct listen_sock *lopt)
-{
- return lopt->qlen_inc - atomic_read(&lopt->qlen_dec);
-}
-
-static inline int listen_sock_young(const struct listen_sock *lopt)
-{
- return lopt->young_inc - atomic_read(&lopt->young_dec);
+ atomic_inc(&queue->young);
+ atomic_inc(&queue->qlen);
}
static inline int reqsk_queue_len(const struct request_sock_queue *queue)
{
- const struct listen_sock *lopt = queue->listen_opt;
-
- return lopt ? listen_sock_qlen(lopt) : 0;
+ return atomic_read(&queue->qlen);
}
static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
{
- return listen_sock_young(queue->listen_opt);
+ return atomic_read(&queue->young);
}
-static inline int reqsk_queue_is_full(const struct request_sock_queue *queue)
-{
- return reqsk_queue_len(queue) >> queue->listen_opt->max_qlen_log;
-}
-
-void reqsk_queue_hash_req(struct request_sock_queue *queue,
- u32 hash, struct request_sock *req,
- unsigned long timeout);
-
#endif /* _REQUEST_SOCK_H */