summaryrefslogtreecommitdiffstats
path: root/kernel/net/netlink
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/net/netlink')
-rw-r--r--kernel/net/netlink/af_netlink.c39
-rw-r--r--kernel/net/netlink/af_netlink.h2
2 files changed, 31 insertions, 10 deletions
diff --git a/kernel/net/netlink/af_netlink.c b/kernel/net/netlink/af_netlink.c
index 59651af8c..360700a2f 100644
--- a/kernel/net/netlink/af_netlink.c
+++ b/kernel/net/netlink/af_netlink.c
@@ -931,7 +931,6 @@ static void netlink_sock_destruct(struct sock *sk)
if (nlk->cb_running) {
if (nlk->cb.done)
nlk->cb.done(&nlk->cb);
-
module_put(nlk->cb.module);
kfree_skb(nlk->cb.skb);
}
@@ -960,6 +959,14 @@ static void netlink_sock_destruct(struct sock *sk)
WARN_ON(nlk_sk(sk)->groups);
}
+static void netlink_sock_destruct_work(struct work_struct *work)
+{
+ struct netlink_sock *nlk = container_of(work, struct netlink_sock,
+ work);
+
+ sk_free(&nlk->sk);
+}
+
/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
* SMP. Look, when several writers sleep and reader wakes them up, all but one
* immediately hit write lock and grab all the cpus. Exclusive sleep solves
@@ -1265,8 +1272,18 @@ out_module:
static void deferred_put_nlk_sk(struct rcu_head *head)
{
struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
+ struct sock *sk = &nlk->sk;
+
+ if (!atomic_dec_and_test(&sk->sk_refcnt))
+ return;
+
+ if (nlk->cb_running && nlk->cb.done) {
+ INIT_WORK(&nlk->work, netlink_sock_destruct_work);
+ schedule_work(&nlk->work);
+ return;
+ }
- sock_put(&nlk->sk);
+ sk_free(sk);
}
static int netlink_release(struct socket *sock)
@@ -1305,7 +1322,7 @@ static int netlink_release(struct socket *sock)
skb_queue_purge(&sk->sk_write_queue);
- if (nlk->portid) {
+ if (nlk->portid && nlk->bound) {
struct netlink_notify n = {
.net = sock_net(sk),
.protocol = sk->sk_protocol,
@@ -2557,7 +2574,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
/* Record the max length of recvmsg() calls for future allocations */
nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
- 16384);
+ SKB_WITH_OVERHEAD(32768));
copied = data_skb->len;
if (len < copied) {
@@ -2784,6 +2801,7 @@ static int netlink_dump(struct sock *sk)
struct netlink_callback *cb;
struct sk_buff *skb = NULL;
struct nlmsghdr *nlh;
+ struct module *module;
int len, err = -ENOBUFS;
int alloc_min_size;
int alloc_size;
@@ -2809,14 +2827,13 @@ static int netlink_dump(struct sock *sk)
if (alloc_min_size < nlk->max_recvmsg_len) {
alloc_size = nlk->max_recvmsg_len;
skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
- GFP_KERNEL |
- __GFP_NOWARN |
- __GFP_NORETRY);
+ (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) |
+ __GFP_NOWARN | __GFP_NORETRY);
}
if (!skb) {
alloc_size = alloc_min_size;
skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
- GFP_KERNEL);
+ (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM));
}
if (!skb)
goto errout_skb;
@@ -2863,9 +2880,11 @@ static int netlink_dump(struct sock *sk)
cb->done(cb);
nlk->cb_running = false;
+ module = cb->module;
+ skb = cb->skb;
mutex_unlock(nlk->cb_mutex);
- module_put(cb->module);
- consume_skb(cb->skb);
+ module_put(module);
+ consume_skb(skb);
return 0;
errout_skb:
diff --git a/kernel/net/netlink/af_netlink.h b/kernel/net/netlink/af_netlink.h
index 14437d9b1..df32cb92d 100644
--- a/kernel/net/netlink/af_netlink.h
+++ b/kernel/net/netlink/af_netlink.h
@@ -3,6 +3,7 @@
#include <linux/rhashtable.h>
#include <linux/atomic.h>
+#include <linux/workqueue.h>
#include <net/sock.h>
#define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
@@ -53,6 +54,7 @@ struct netlink_sock {
struct rhash_head node;
struct rcu_head rcu;
+ struct work_struct work;
};
static inline struct netlink_sock *nlk_sk(struct sock *sk)