summaryrefslogtreecommitdiffstats
path: root/kernel/include/net/ip.h
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-11 10:41:07 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-13 08:17:18 +0300
commite09b41010ba33a20a87472ee821fa407a5b8da36 (patch)
treed10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/include/net/ip.h
parentf93b97fd65072de626c074dbe099a1fff05ce060 (diff)
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page. During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/include/net/ip.h')
-rw-r--r--kernel/include/net/ip.h91
1 files changed, 59 insertions, 32 deletions
diff --git a/kernel/include/net/ip.h b/kernel/include/net/ip.h
index f41fc497b..1a98f1ca1 100644
--- a/kernel/include/net/ip.h
+++ b/kernel/include/net/ip.h
@@ -31,7 +31,7 @@
#include <net/route.h>
#include <net/snmp.h>
#include <net/flow.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
struct sock;
@@ -45,6 +45,7 @@ struct inet_skb_parm {
#define IPSKB_FRAG_COMPLETE BIT(3)
#define IPSKB_REROUTED BIT(4)
#define IPSKB_DOREDIRECT BIT(5)
+#define IPSKB_FRAG_PMTU BIT(6)
u16 frag_max_size;
};
@@ -99,25 +100,20 @@ int igmp_mc_init(void);
* Functions provided by ip.c
*/
-int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
+int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
__be32 saddr, __be32 daddr,
struct ip_options_rcu *opt);
int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
struct net_device *orig_dev);
int ip_local_deliver(struct sk_buff *skb);
int ip_mr_input(struct sk_buff *skb);
-int ip_output(struct sock *sk, struct sk_buff *skb);
-int ip_mc_output(struct sock *sk, struct sk_buff *skb);
-int ip_fragment(struct sock *sk, struct sk_buff *skb,
- int (*output)(struct sock *, struct sk_buff *));
-int ip_do_nat(struct sk_buff *skb);
+int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
+int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
+int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
+ int (*output)(struct net *, struct sock *, struct sk_buff *));
void ip_send_check(struct iphdr *ip);
-int __ip_local_out(struct sk_buff *skb);
-int ip_local_out_sk(struct sock *sk, struct sk_buff *skb);
-static inline int ip_local_out(struct sk_buff *skb)
-{
- return ip_local_out_sk(skb->sk, skb);
-}
+int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
+int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
void ip_init(void);
@@ -202,10 +198,20 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
#define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd)
#define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)
+u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
unsigned long snmp_fold_field(void __percpu *mib, int offt);
#if BITS_PER_LONG==32
+u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
+ size_t syncp_offset);
u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off);
#else
+static inline u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
+ size_t syncp_offset)
+{
+ return snmp_get_cpu_field(mib, cpu, offct);
+
+}
+
static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off)
{
return snmp_fold_field(mib, offt);
@@ -272,10 +278,12 @@ int ip_decrease_ttl(struct iphdr *iph)
}
static inline
-int ip_dont_fragment(struct sock *sk, struct dst_entry *dst)
+int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
{
- return inet_sk(sk)->pmtudisc == IP_PMTUDISC_DO ||
- (inet_sk(sk)->pmtudisc == IP_PMTUDISC_WANT &&
+ u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
+
+ return pmtudisc == IP_PMTUDISC_DO ||
+ (pmtudisc == IP_PMTUDISC_WANT &&
!(dst_metric_locked(dst, RTAX_MTU)));
}
@@ -311,12 +319,15 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
{
- if (!skb->sk || ip_sk_use_pmtu(skb->sk)) {
+ struct sock *sk = skb->sk;
+
+ if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
+
return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
- } else {
- return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU);
}
+
+ return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU);
}
u32 ip_idents_reserve(u32 hash, int segs);
@@ -356,17 +367,18 @@ static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
skb->len, proto, 0);
}
-static inline void inet_set_txhash(struct sock *sk)
+/* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store
+ * Equivalent to : flow->v4addrs.src = iph->saddr;
+ * flow->v4addrs.dst = iph->daddr;
+ */
+static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
+ const struct iphdr *iph)
{
- struct inet_sock *inet = inet_sk(sk);
- struct flow_keys keys;
-
- keys.src = inet->inet_saddr;
- keys.dst = inet->inet_daddr;
- keys.port16[0] = inet->inet_sport;
- keys.port16[1] = inet->inet_dport;
-
- sk->sk_txhash = flow_hash_from_keys(&keys);
+ BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
+ offsetof(typeof(flow->addrs), v4addrs.src) +
+ sizeof(flow->addrs.v4addrs.src));
+ memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs));
+ flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
}
static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
@@ -457,6 +469,11 @@ static __inline__ void inet_reset_saddr(struct sock *sk)
#endif
+static inline unsigned int ipv4_addr_hash(__be32 ip)
+{
+ return (__force unsigned int) ip;
+}
+
bool ip_call_ra_chain(struct sk_buff *skb);
/*
@@ -479,11 +496,21 @@ enum ip_defrag_users {
IP_DEFRAG_MACVLAN,
};
-int ip_defrag(struct sk_buff *skb, u32 user);
+/* Return true if the value of 'user' is between 'lower_bond'
+ * and 'upper_bond' inclusively.
+ */
+static inline bool ip_defrag_user_in_between(u32 user,
+ enum ip_defrag_users lower_bond,
+ enum ip_defrag_users upper_bond)
+{
+ return user >= lower_bond && user <= upper_bond;
+}
+
+int ip_defrag(struct net *net, struct sk_buff *skb, u32 user);
#ifdef CONFIG_INET
-struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user);
+struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user);
#else
-static inline struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
+static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
{
return skb;
}