From 9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 Mon Sep 17 00:00:00 2001 From: Yunhong Jiang Date: Tue, 4 Aug 2015 12:17:53 -0700 Subject: Add the rt linux 4.1.3-rt3 as base Import the rt linux 4.1.3-rt3 as OPNFV kvm base. It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and the base is: commit 0917f823c59692d751951bf5ea699a2d1e2f26a2 Author: Sebastian Andrzej Siewior Date: Sat Jul 25 12:13:34 2015 +0200 Prepare v4.1.3-rt3 Signed-off-by: Sebastian Andrzej Siewior We lose all the git history this way and it's not good. We should apply another opnfv project repo in future. Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423 Signed-off-by: Yunhong Jiang --- kernel/net/ieee802154/6lowpan/6lowpan_i.h | 72 ++++ kernel/net/ieee802154/6lowpan/Kconfig | 5 + kernel/net/ieee802154/6lowpan/Makefile | 3 + kernel/net/ieee802154/6lowpan/core.c | 306 +++++++++++++++ kernel/net/ieee802154/6lowpan/reassembly.c | 585 +++++++++++++++++++++++++++++ kernel/net/ieee802154/6lowpan/rx.c | 171 +++++++++ kernel/net/ieee802154/6lowpan/tx.c | 271 +++++++++++++ 7 files changed, 1413 insertions(+) create mode 100644 kernel/net/ieee802154/6lowpan/6lowpan_i.h create mode 100644 kernel/net/ieee802154/6lowpan/Kconfig create mode 100644 kernel/net/ieee802154/6lowpan/Makefile create mode 100644 kernel/net/ieee802154/6lowpan/core.c create mode 100644 kernel/net/ieee802154/6lowpan/reassembly.c create mode 100644 kernel/net/ieee802154/6lowpan/rx.c create mode 100644 kernel/net/ieee802154/6lowpan/tx.c (limited to 'kernel/net/ieee802154/6lowpan') diff --git a/kernel/net/ieee802154/6lowpan/6lowpan_i.h b/kernel/net/ieee802154/6lowpan/6lowpan_i.h new file mode 100644 index 000000000..e50f69da7 --- /dev/null +++ b/kernel/net/ieee802154/6lowpan/6lowpan_i.h @@ -0,0 +1,72 @@ +#ifndef __IEEE802154_6LOWPAN_I_H__ +#define __IEEE802154_6LOWPAN_I_H__ + +#include + +#include +#include + +struct lowpan_create_arg { + u16 tag; + u16 d_size; + const struct ieee802154_addr *src; + const struct ieee802154_addr *dst; +}; + +/* Equivalent of ipv4 struct ip + */ +struct lowpan_frag_queue { + struct inet_frag_queue q; + + u16 tag; + u16 d_size; + struct ieee802154_addr saddr; + struct ieee802154_addr daddr; +}; + +static inline u32 ieee802154_addr_hash(const struct ieee802154_addr *a) +{ + switch (a->mode) { + case IEEE802154_ADDR_LONG: + return (((__force u64)a->extended_addr) >> 32) ^ + (((__force u64)a->extended_addr) & 0xffffffff); + case IEEE802154_ADDR_SHORT: + return (__force u32)(a->short_addr); + default: + return 0; + } +} + +struct lowpan_dev_record { + struct net_device *ldev; + struct list_head list; +}; + +/* private device info */ +struct lowpan_dev_info { + struct net_device *real_dev; /* real WPAN device ptr */ + struct mutex dev_list_mtx; /* mutex for list ops */ + u16 fragment_tag; +}; + +static inline struct +lowpan_dev_info *lowpan_dev_info(const struct net_device *dev) +{ + return netdev_priv(dev); +} + +extern struct list_head lowpan_devices; + +int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type); +void lowpan_net_frag_exit(void); +int lowpan_net_frag_init(void); + +void lowpan_rx_init(void); +void lowpan_rx_exit(void); + +int lowpan_header_create(struct sk_buff *skb, struct net_device *dev, + unsigned short type, const void *_daddr, + const void *_saddr, unsigned int len); +netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev); + +#endif /* __IEEE802154_6LOWPAN_I_H__ */ diff --git a/kernel/net/ieee802154/6lowpan/Kconfig b/kernel/net/ieee802154/6lowpan/Kconfig new file mode 100644 index 000000000..d24f985b0 --- /dev/null +++ b/kernel/net/ieee802154/6lowpan/Kconfig @@ -0,0 +1,5 @@ +config IEEE802154_6LOWPAN + tristate "6lowpan support over IEEE 802.15.4" + depends on 6LOWPAN + ---help--- + IPv6 compression over IEEE 802.15.4. diff --git a/kernel/net/ieee802154/6lowpan/Makefile b/kernel/net/ieee802154/6lowpan/Makefile new file mode 100644 index 000000000..6bfb270a8 --- /dev/null +++ b/kernel/net/ieee802154/6lowpan/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_IEEE802154_6LOWPAN) += ieee802154_6lowpan.o + +ieee802154_6lowpan-y := core.o rx.o reassembly.o tx.o diff --git a/kernel/net/ieee802154/6lowpan/core.c b/kernel/net/ieee802154/6lowpan/core.c new file mode 100644 index 000000000..0ae5822ef --- /dev/null +++ b/kernel/net/ieee802154/6lowpan/core.c @@ -0,0 +1,306 @@ +/* Copyright 2011, Siemens AG + * written by Alexander Smirnov + */ + +/* Based on patches from Jon Smirl + * Copyright (c) 2011 Jon Smirl + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* Jon's code is based on 6lowpan implementation for Contiki which is: + * Copyright (c) 2008, Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the Institute nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +#include +#include + +#include + +#include "6lowpan_i.h" + +LIST_HEAD(lowpan_devices); +static int lowpan_open_count; + +static __le16 lowpan_get_pan_id(const struct net_device *dev) +{ + struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; + + return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev); +} + +static __le16 lowpan_get_short_addr(const struct net_device *dev) +{ + struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; + + return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev); +} + +static u8 lowpan_get_dsn(const struct net_device *dev) +{ + struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; + + return ieee802154_mlme_ops(real_dev)->get_dsn(real_dev); +} + +static struct header_ops lowpan_header_ops = { + .create = lowpan_header_create, +}; + +static struct lock_class_key lowpan_tx_busylock; +static struct lock_class_key lowpan_netdev_xmit_lock_key; + +static void lowpan_set_lockdep_class_one(struct net_device *dev, + struct netdev_queue *txq, + void *_unused) +{ + lockdep_set_class(&txq->_xmit_lock, + &lowpan_netdev_xmit_lock_key); +} + +static int lowpan_dev_init(struct net_device *dev) +{ + netdev_for_each_tx_queue(dev, lowpan_set_lockdep_class_one, NULL); + dev->qdisc_tx_busylock = &lowpan_tx_busylock; + return 0; +} + +static const struct net_device_ops lowpan_netdev_ops = { + .ndo_init = lowpan_dev_init, + .ndo_start_xmit = lowpan_xmit, +}; + +static struct ieee802154_mlme_ops lowpan_mlme = { + .get_pan_id = lowpan_get_pan_id, + .get_short_addr = lowpan_get_short_addr, + .get_dsn = lowpan_get_dsn, +}; + +static void lowpan_setup(struct net_device *dev) +{ + dev->addr_len = IEEE802154_ADDR_LEN; + memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN); + dev->type = ARPHRD_6LOWPAN; + /* Frame Control + Sequence Number + Address fields + Security Header */ + dev->hard_header_len = 2 + 1 + 20 + 14; + dev->needed_tailroom = 2; /* FCS */ + dev->mtu = IPV6_MIN_MTU; + dev->tx_queue_len = 0; + dev->flags = IFF_BROADCAST | IFF_MULTICAST; + dev->watchdog_timeo = 0; + + dev->netdev_ops = &lowpan_netdev_ops; + dev->header_ops = &lowpan_header_ops; + dev->ml_priv = &lowpan_mlme; + dev->destructor = free_netdev; + dev->features |= NETIF_F_NETNS_LOCAL; +} + +static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[]) +{ + if (tb[IFLA_ADDRESS]) { + if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN) + return -EINVAL; + } + return 0; +} + +static int lowpan_newlink(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[]) +{ + struct net_device *real_dev; + struct lowpan_dev_record *entry; + int ret; + + ASSERT_RTNL(); + + pr_debug("adding new link\n"); + + if (!tb[IFLA_LINK] || + !net_eq(dev_net(dev), &init_net)) + return -EINVAL; + /* find and hold real wpan device */ + real_dev = dev_get_by_index(dev_net(dev), nla_get_u32(tb[IFLA_LINK])); + if (!real_dev) + return -ENODEV; + if (real_dev->type != ARPHRD_IEEE802154) { + dev_put(real_dev); + return -EINVAL; + } + + lowpan_dev_info(dev)->real_dev = real_dev; + mutex_init(&lowpan_dev_info(dev)->dev_list_mtx); + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + dev_put(real_dev); + lowpan_dev_info(dev)->real_dev = NULL; + return -ENOMEM; + } + + entry->ldev = dev; + + /* Set the lowpan hardware address to the wpan hardware address. */ + memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN); + + mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx); + INIT_LIST_HEAD(&entry->list); + list_add_tail(&entry->list, &lowpan_devices); + mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx); + + ret = register_netdevice(dev); + if (ret >= 0) { + if (!lowpan_open_count) + lowpan_rx_init(); + lowpan_open_count++; + } + + return ret; +} + +static void lowpan_dellink(struct net_device *dev, struct list_head *head) +{ + struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev); + struct net_device *real_dev = lowpan_dev->real_dev; + struct lowpan_dev_record *entry, *tmp; + + ASSERT_RTNL(); + + lowpan_open_count--; + if (!lowpan_open_count) + lowpan_rx_exit(); + + mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx); + list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) { + if (entry->ldev == dev) { + list_del(&entry->list); + kfree(entry); + } + } + mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx); + + mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx); + + unregister_netdevice_queue(dev, head); + + dev_put(real_dev); +} + +static struct rtnl_link_ops lowpan_link_ops __read_mostly = { + .kind = "lowpan", + .priv_size = sizeof(struct lowpan_dev_info), + .setup = lowpan_setup, + .newlink = lowpan_newlink, + .dellink = lowpan_dellink, + .validate = lowpan_validate, +}; + +static inline int __init lowpan_netlink_init(void) +{ + return rtnl_link_register(&lowpan_link_ops); +} + +static inline void lowpan_netlink_fini(void) +{ + rtnl_link_unregister(&lowpan_link_ops); +} + +static int lowpan_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + LIST_HEAD(del_list); + struct lowpan_dev_record *entry, *tmp; + + if (dev->type != ARPHRD_IEEE802154) + goto out; + + if (event == NETDEV_UNREGISTER) { + list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) { + if (lowpan_dev_info(entry->ldev)->real_dev == dev) + lowpan_dellink(entry->ldev, &del_list); + } + + unregister_netdevice_many(&del_list); + } + +out: + return NOTIFY_DONE; +} + +static struct notifier_block lowpan_dev_notifier = { + .notifier_call = lowpan_device_event, +}; + +static int __init lowpan_init_module(void) +{ + int err = 0; + + err = lowpan_net_frag_init(); + if (err < 0) + goto out; + + err = lowpan_netlink_init(); + if (err < 0) + goto out_frag; + + err = register_netdevice_notifier(&lowpan_dev_notifier); + if (err < 0) + goto out_pack; + + return 0; + +out_pack: + lowpan_netlink_fini(); +out_frag: + lowpan_net_frag_exit(); +out: + return err; +} + +static void __exit lowpan_cleanup_module(void) +{ + lowpan_netlink_fini(); + + lowpan_net_frag_exit(); + + unregister_netdevice_notifier(&lowpan_dev_notifier); +} + +module_init(lowpan_init_module); +module_exit(lowpan_cleanup_module); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_RTNL_LINK("lowpan"); diff --git a/kernel/net/ieee802154/6lowpan/reassembly.c b/kernel/net/ieee802154/6lowpan/reassembly.c new file mode 100644 index 000000000..f46e4d130 --- /dev/null +++ b/kernel/net/ieee802154/6lowpan/reassembly.c @@ -0,0 +1,585 @@ +/* 6LoWPAN fragment reassembly + * + * + * Authors: + * Alexander Aring + * + * Based on: net/ipv6/reassembly.c + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#define pr_fmt(fmt) "6LoWPAN: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "6lowpan_i.h" + +static const char lowpan_frags_cache_name[] = "lowpan-frags"; + +struct lowpan_frag_info { + u16 d_tag; + u16 d_size; + u8 d_offset; +}; + +static struct lowpan_frag_info *lowpan_cb(struct sk_buff *skb) +{ + return (struct lowpan_frag_info *)skb->cb; +} + +static struct inet_frags lowpan_frags; + +static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, + struct sk_buff *prev, struct net_device *dev); + +static unsigned int lowpan_hash_frag(u16 tag, u16 d_size, + const struct ieee802154_addr *saddr, + const struct ieee802154_addr *daddr) +{ + net_get_random_once(&lowpan_frags.rnd, sizeof(lowpan_frags.rnd)); + return jhash_3words(ieee802154_addr_hash(saddr), + ieee802154_addr_hash(daddr), + (__force u32)(tag + (d_size << 16)), + lowpan_frags.rnd); +} + +static unsigned int lowpan_hashfn(const struct inet_frag_queue *q) +{ + const struct lowpan_frag_queue *fq; + + fq = container_of(q, struct lowpan_frag_queue, q); + return lowpan_hash_frag(fq->tag, fq->d_size, &fq->saddr, &fq->daddr); +} + +static bool lowpan_frag_match(const struct inet_frag_queue *q, const void *a) +{ + const struct lowpan_frag_queue *fq; + const struct lowpan_create_arg *arg = a; + + fq = container_of(q, struct lowpan_frag_queue, q); + return fq->tag == arg->tag && fq->d_size == arg->d_size && + ieee802154_addr_equal(&fq->saddr, arg->src) && + ieee802154_addr_equal(&fq->daddr, arg->dst); +} + +static void lowpan_frag_init(struct inet_frag_queue *q, const void *a) +{ + const struct lowpan_create_arg *arg = a; + struct lowpan_frag_queue *fq; + + fq = container_of(q, struct lowpan_frag_queue, q); + + fq->tag = arg->tag; + fq->d_size = arg->d_size; + fq->saddr = *arg->src; + fq->daddr = *arg->dst; +} + +static void lowpan_frag_expire(unsigned long data) +{ + struct frag_queue *fq; + struct net *net; + + fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q); + net = container_of(fq->q.net, struct net, ieee802154_lowpan.frags); + + spin_lock(&fq->q.lock); + + if (fq->q.flags & INET_FRAG_COMPLETE) + goto out; + + inet_frag_kill(&fq->q, &lowpan_frags); +out: + spin_unlock(&fq->q.lock); + inet_frag_put(&fq->q, &lowpan_frags); +} + +static inline struct lowpan_frag_queue * +fq_find(struct net *net, const struct lowpan_frag_info *frag_info, + const struct ieee802154_addr *src, + const struct ieee802154_addr *dst) +{ + struct inet_frag_queue *q; + struct lowpan_create_arg arg; + unsigned int hash; + struct netns_ieee802154_lowpan *ieee802154_lowpan = + net_ieee802154_lowpan(net); + + arg.tag = frag_info->d_tag; + arg.d_size = frag_info->d_size; + arg.src = src; + arg.dst = dst; + + hash = lowpan_hash_frag(frag_info->d_tag, frag_info->d_size, src, dst); + + q = inet_frag_find(&ieee802154_lowpan->frags, + &lowpan_frags, &arg, hash); + if (IS_ERR_OR_NULL(q)) { + inet_frag_maybe_warn_overflow(q, pr_fmt()); + return NULL; + } + return container_of(q, struct lowpan_frag_queue, q); +} + +static int lowpan_frag_queue(struct lowpan_frag_queue *fq, + struct sk_buff *skb, const u8 frag_type) +{ + struct sk_buff *prev, *next; + struct net_device *dev; + int end, offset; + + if (fq->q.flags & INET_FRAG_COMPLETE) + goto err; + + offset = lowpan_cb(skb)->d_offset << 3; + end = lowpan_cb(skb)->d_size; + + /* Is this the final fragment? */ + if (offset + skb->len == end) { + /* If we already have some bits beyond end + * or have different end, the segment is corrupted. + */ + if (end < fq->q.len || + ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) + goto err; + fq->q.flags |= INET_FRAG_LAST_IN; + fq->q.len = end; + } else { + if (end > fq->q.len) { + /* Some bits beyond end -> corruption. */ + if (fq->q.flags & INET_FRAG_LAST_IN) + goto err; + fq->q.len = end; + } + } + + /* Find out which fragments are in front and at the back of us + * in the chain of fragments so far. We must know where to put + * this fragment, right? + */ + prev = fq->q.fragments_tail; + if (!prev || lowpan_cb(prev)->d_offset < lowpan_cb(skb)->d_offset) { + next = NULL; + goto found; + } + prev = NULL; + for (next = fq->q.fragments; next != NULL; next = next->next) { + if (lowpan_cb(next)->d_offset >= lowpan_cb(skb)->d_offset) + break; /* bingo! */ + prev = next; + } + +found: + /* Insert this fragment in the chain of fragments. */ + skb->next = next; + if (!next) + fq->q.fragments_tail = skb; + if (prev) + prev->next = skb; + else + fq->q.fragments = skb; + + dev = skb->dev; + if (dev) + skb->dev = NULL; + + fq->q.stamp = skb->tstamp; + if (frag_type == LOWPAN_DISPATCH_FRAG1) { + /* Calculate uncomp. 6lowpan header to estimate full size */ + fq->q.meat += lowpan_uncompress_size(skb, NULL); + fq->q.flags |= INET_FRAG_FIRST_IN; + } else { + fq->q.meat += skb->len; + } + add_frag_mem_limit(&fq->q, skb->truesize); + + if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && + fq->q.meat == fq->q.len) { + int res; + unsigned long orefdst = skb->_skb_refdst; + + skb->_skb_refdst = 0UL; + res = lowpan_frag_reasm(fq, prev, dev); + skb->_skb_refdst = orefdst; + return res; + } + + return -1; +err: + kfree_skb(skb); + return -1; +} + +/* Check if this packet is complete. + * Returns NULL on failure by any reason, and pointer + * to current nexthdr field in reassembled frame. + * + * It is called with locked fq, and caller must check that + * queue is eligible for reassembly i.e. it is not COMPLETE, + * the last and the first frames arrived and all the bits are here. + */ +static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev, + struct net_device *dev) +{ + struct sk_buff *fp, *head = fq->q.fragments; + int sum_truesize; + + inet_frag_kill(&fq->q, &lowpan_frags); + + /* Make the one we just received the head. */ + if (prev) { + head = prev->next; + fp = skb_clone(head, GFP_ATOMIC); + + if (!fp) + goto out_oom; + + fp->next = head->next; + if (!fp->next) + fq->q.fragments_tail = fp; + prev->next = fp; + + skb_morph(head, fq->q.fragments); + head->next = fq->q.fragments->next; + + consume_skb(fq->q.fragments); + fq->q.fragments = head; + } + + /* Head of list must not be cloned. */ + if (skb_unclone(head, GFP_ATOMIC)) + goto out_oom; + + /* If the first fragment is fragmented itself, we split + * it to two chunks: the first with data and paged part + * and the second, holding only fragments. + */ + if (skb_has_frag_list(head)) { + struct sk_buff *clone; + int i, plen = 0; + + clone = alloc_skb(0, GFP_ATOMIC); + if (!clone) + goto out_oom; + clone->next = head->next; + head->next = clone; + skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; + skb_frag_list_init(head); + for (i = 0; i < skb_shinfo(head)->nr_frags; i++) + plen += skb_frag_size(&skb_shinfo(head)->frags[i]); + clone->len = head->data_len - plen; + clone->data_len = clone->len; + head->data_len -= clone->len; + head->len -= clone->len; + add_frag_mem_limit(&fq->q, clone->truesize); + } + + WARN_ON(head == NULL); + + sum_truesize = head->truesize; + for (fp = head->next; fp;) { + bool headstolen; + int delta; + struct sk_buff *next = fp->next; + + sum_truesize += fp->truesize; + if (skb_try_coalesce(head, fp, &headstolen, &delta)) { + kfree_skb_partial(fp, headstolen); + } else { + if (!skb_shinfo(head)->frag_list) + skb_shinfo(head)->frag_list = fp; + head->data_len += fp->len; + head->len += fp->len; + head->truesize += fp->truesize; + } + fp = next; + } + sub_frag_mem_limit(&fq->q, sum_truesize); + + head->next = NULL; + head->dev = dev; + head->tstamp = fq->q.stamp; + + fq->q.fragments = NULL; + fq->q.fragments_tail = NULL; + + return 1; +out_oom: + net_dbg_ratelimited("lowpan_frag_reasm: no memory for reassembly\n"); + return -1; +} + +static int lowpan_get_frag_info(struct sk_buff *skb, const u8 frag_type, + struct lowpan_frag_info *frag_info) +{ + bool fail; + u8 pattern = 0, low = 0; + __be16 d_tag = 0; + + fail = lowpan_fetch_skb(skb, &pattern, 1); + fail |= lowpan_fetch_skb(skb, &low, 1); + frag_info->d_size = (pattern & 7) << 8 | low; + fail |= lowpan_fetch_skb(skb, &d_tag, 2); + frag_info->d_tag = ntohs(d_tag); + + if (frag_type == LOWPAN_DISPATCH_FRAGN) { + fail |= lowpan_fetch_skb(skb, &frag_info->d_offset, 1); + } else { + skb_reset_network_header(skb); + frag_info->d_offset = 0; + } + + if (unlikely(fail)) + return -EIO; + + return 0; +} + +int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type) +{ + struct lowpan_frag_queue *fq; + struct net *net = dev_net(skb->dev); + struct lowpan_frag_info *frag_info = lowpan_cb(skb); + struct ieee802154_addr source, dest; + int err; + + source = mac_cb(skb)->source; + dest = mac_cb(skb)->dest; + + err = lowpan_get_frag_info(skb, frag_type, frag_info); + if (err < 0) + goto err; + + if (frag_info->d_size > IPV6_MIN_MTU) { + net_warn_ratelimited("lowpan_frag_rcv: datagram size exceeds MTU\n"); + goto err; + } + + fq = fq_find(net, frag_info, &source, &dest); + if (fq != NULL) { + int ret; + + spin_lock(&fq->q.lock); + ret = lowpan_frag_queue(fq, skb, frag_type); + spin_unlock(&fq->q.lock); + + inet_frag_put(&fq->q, &lowpan_frags); + return ret; + } + +err: + kfree_skb(skb); + return -1; +} +EXPORT_SYMBOL(lowpan_frag_rcv); + +#ifdef CONFIG_SYSCTL +static int zero; + +static struct ctl_table lowpan_frags_ns_ctl_table[] = { + { + .procname = "6lowpanfrag_high_thresh", + .data = &init_net.ieee802154_lowpan.frags.high_thresh, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &init_net.ieee802154_lowpan.frags.low_thresh + }, + { + .procname = "6lowpanfrag_low_thresh", + .data = &init_net.ieee802154_lowpan.frags.low_thresh, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &init_net.ieee802154_lowpan.frags.high_thresh + }, + { + .procname = "6lowpanfrag_time", + .data = &init_net.ieee802154_lowpan.frags.timeout, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_jiffies, + }, + { } +}; + +/* secret interval has been deprecated */ +static int lowpan_frags_secret_interval_unused; +static struct ctl_table lowpan_frags_ctl_table[] = { + { + .procname = "6lowpanfrag_secret_interval", + .data = &lowpan_frags_secret_interval_unused, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_jiffies, + }, + { } +}; + +static int __net_init lowpan_frags_ns_sysctl_register(struct net *net) +{ + struct ctl_table *table; + struct ctl_table_header *hdr; + struct netns_ieee802154_lowpan *ieee802154_lowpan = + net_ieee802154_lowpan(net); + + table = lowpan_frags_ns_ctl_table; + if (!net_eq(net, &init_net)) { + table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table), + GFP_KERNEL); + if (table == NULL) + goto err_alloc; + + table[0].data = &ieee802154_lowpan->frags.high_thresh; + table[0].extra1 = &ieee802154_lowpan->frags.low_thresh; + table[0].extra2 = &init_net.ieee802154_lowpan.frags.high_thresh; + table[1].data = &ieee802154_lowpan->frags.low_thresh; + table[1].extra2 = &ieee802154_lowpan->frags.high_thresh; + table[2].data = &ieee802154_lowpan->frags.timeout; + + /* Don't export sysctls to unprivileged users */ + if (net->user_ns != &init_user_ns) + table[0].procname = NULL; + } + + hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table); + if (hdr == NULL) + goto err_reg; + + ieee802154_lowpan->sysctl.frags_hdr = hdr; + return 0; + +err_reg: + if (!net_eq(net, &init_net)) + kfree(table); +err_alloc: + return -ENOMEM; +} + +static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net) +{ + struct ctl_table *table; + struct netns_ieee802154_lowpan *ieee802154_lowpan = + net_ieee802154_lowpan(net); + + table = ieee802154_lowpan->sysctl.frags_hdr->ctl_table_arg; + unregister_net_sysctl_table(ieee802154_lowpan->sysctl.frags_hdr); + if (!net_eq(net, &init_net)) + kfree(table); +} + +static struct ctl_table_header *lowpan_ctl_header; + +static int __init lowpan_frags_sysctl_register(void) +{ + lowpan_ctl_header = register_net_sysctl(&init_net, + "net/ieee802154/6lowpan", + lowpan_frags_ctl_table); + return lowpan_ctl_header == NULL ? -ENOMEM : 0; +} + +static void lowpan_frags_sysctl_unregister(void) +{ + unregister_net_sysctl_table(lowpan_ctl_header); +} +#else +static inline int lowpan_frags_ns_sysctl_register(struct net *net) +{ + return 0; +} + +static inline void lowpan_frags_ns_sysctl_unregister(struct net *net) +{ +} + +static inline int __init lowpan_frags_sysctl_register(void) +{ + return 0; +} + +static inline void lowpan_frags_sysctl_unregister(void) +{ +} +#endif + +static int __net_init lowpan_frags_init_net(struct net *net) +{ + struct netns_ieee802154_lowpan *ieee802154_lowpan = + net_ieee802154_lowpan(net); + + ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH; + ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH; + ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT; + + inet_frags_init_net(&ieee802154_lowpan->frags); + + return lowpan_frags_ns_sysctl_register(net); +} + +static void __net_exit lowpan_frags_exit_net(struct net *net) +{ + struct netns_ieee802154_lowpan *ieee802154_lowpan = + net_ieee802154_lowpan(net); + + lowpan_frags_ns_sysctl_unregister(net); + inet_frags_exit_net(&ieee802154_lowpan->frags, &lowpan_frags); +} + +static struct pernet_operations lowpan_frags_ops = { + .init = lowpan_frags_init_net, + .exit = lowpan_frags_exit_net, +}; + +int __init lowpan_net_frag_init(void) +{ + int ret; + + ret = lowpan_frags_sysctl_register(); + if (ret) + return ret; + + ret = register_pernet_subsys(&lowpan_frags_ops); + if (ret) + goto err_pernet; + + lowpan_frags.hashfn = lowpan_hashfn; + lowpan_frags.constructor = lowpan_frag_init; + lowpan_frags.destructor = NULL; + lowpan_frags.skb_free = NULL; + lowpan_frags.qsize = sizeof(struct frag_queue); + lowpan_frags.match = lowpan_frag_match; + lowpan_frags.frag_expire = lowpan_frag_expire; + lowpan_frags.frags_cache_name = lowpan_frags_cache_name; + ret = inet_frags_init(&lowpan_frags); + if (ret) + goto err_pernet; + + return ret; +err_pernet: + lowpan_frags_sysctl_unregister(); + return ret; +} + +void lowpan_net_frag_exit(void) +{ + inet_frags_fini(&lowpan_frags); + lowpan_frags_sysctl_unregister(); + unregister_pernet_subsys(&lowpan_frags_ops); +} diff --git a/kernel/net/ieee802154/6lowpan/rx.c b/kernel/net/ieee802154/6lowpan/rx.c new file mode 100644 index 000000000..4be1d289a --- /dev/null +++ b/kernel/net/ieee802154/6lowpan/rx.c @@ -0,0 +1,171 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +#include +#include + +#include "6lowpan_i.h" + +static int lowpan_give_skb_to_devices(struct sk_buff *skb, + struct net_device *dev) +{ + struct lowpan_dev_record *entry; + struct sk_buff *skb_cp; + int stat = NET_RX_SUCCESS; + + skb->protocol = htons(ETH_P_IPV6); + skb->pkt_type = PACKET_HOST; + + rcu_read_lock(); + list_for_each_entry_rcu(entry, &lowpan_devices, list) + if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) { + skb_cp = skb_copy(skb, GFP_ATOMIC); + if (!skb_cp) { + kfree_skb(skb); + rcu_read_unlock(); + return NET_RX_DROP; + } + + skb_cp->dev = entry->ldev; + stat = netif_rx(skb_cp); + if (stat == NET_RX_DROP) + break; + } + rcu_read_unlock(); + + consume_skb(skb); + + return stat; +} + +static int +iphc_decompress(struct sk_buff *skb, const struct ieee802154_hdr *hdr) +{ + u8 iphc0, iphc1; + struct ieee802154_addr_sa sa, da; + void *sap, *dap; + + raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len); + /* at least two bytes will be used for the encoding */ + if (skb->len < 2) + return -EINVAL; + + if (lowpan_fetch_skb_u8(skb, &iphc0)) + return -EINVAL; + + if (lowpan_fetch_skb_u8(skb, &iphc1)) + return -EINVAL; + + ieee802154_addr_to_sa(&sa, &hdr->source); + ieee802154_addr_to_sa(&da, &hdr->dest); + + if (sa.addr_type == IEEE802154_ADDR_SHORT) + sap = &sa.short_addr; + else + sap = &sa.hwaddr; + + if (da.addr_type == IEEE802154_ADDR_SHORT) + dap = &da.short_addr; + else + dap = &da.hwaddr; + + return lowpan_header_decompress(skb, skb->dev, sap, sa.addr_type, + IEEE802154_ADDR_LEN, dap, da.addr_type, + IEEE802154_ADDR_LEN, iphc0, iphc1); +} + +static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, struct net_device *orig_dev) +{ + struct ieee802154_hdr hdr; + int ret; + + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) + goto drop; + + if (!netif_running(dev)) + goto drop_skb; + + if (skb->pkt_type == PACKET_OTHERHOST) + goto drop_skb; + + if (dev->type != ARPHRD_IEEE802154) + goto drop_skb; + + if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0) + goto drop_skb; + + /* check that it's our buffer */ + if (skb->data[0] == LOWPAN_DISPATCH_IPV6) { + /* Pull off the 1-byte of 6lowpan header. */ + skb_pull(skb, 1); + return lowpan_give_skb_to_devices(skb, NULL); + } else { + switch (skb->data[0] & 0xe0) { + case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */ + ret = iphc_decompress(skb, &hdr); + if (ret < 0) + goto drop_skb; + + return lowpan_give_skb_to_devices(skb, NULL); + case LOWPAN_DISPATCH_FRAG1: /* first fragment header */ + ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAG1); + if (ret == 1) { + ret = iphc_decompress(skb, &hdr); + if (ret < 0) + goto drop_skb; + + return lowpan_give_skb_to_devices(skb, NULL); + } else if (ret == -1) { + return NET_RX_DROP; + } else { + return NET_RX_SUCCESS; + } + case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */ + ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAGN); + if (ret == 1) { + ret = iphc_decompress(skb, &hdr); + if (ret < 0) + goto drop_skb; + + return lowpan_give_skb_to_devices(skb, NULL); + } else if (ret == -1) { + return NET_RX_DROP; + } else { + return NET_RX_SUCCESS; + } + default: + break; + } + } + +drop_skb: + kfree_skb(skb); +drop: + return NET_RX_DROP; +} + +static struct packet_type lowpan_packet_type = { + .type = htons(ETH_P_IEEE802154), + .func = lowpan_rcv, +}; + +void lowpan_rx_init(void) +{ + dev_add_pack(&lowpan_packet_type); +} + +void lowpan_rx_exit(void) +{ + dev_remove_pack(&lowpan_packet_type); +} diff --git a/kernel/net/ieee802154/6lowpan/tx.c b/kernel/net/ieee802154/6lowpan/tx.c new file mode 100644 index 000000000..2349070bd --- /dev/null +++ b/kernel/net/ieee802154/6lowpan/tx.c @@ -0,0 +1,271 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#include "6lowpan_i.h" + +/* don't save pan id, it's intra pan */ +struct lowpan_addr { + u8 mode; + union { + /* IPv6 needs big endian here */ + __be64 extended_addr; + __be16 short_addr; + } u; +}; + +struct lowpan_addr_info { + struct lowpan_addr daddr; + struct lowpan_addr saddr; +}; + +static inline struct +lowpan_addr_info *lowpan_skb_priv(const struct sk_buff *skb) +{ + WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct lowpan_addr_info)); + return (struct lowpan_addr_info *)(skb->data - + sizeof(struct lowpan_addr_info)); +} + +int lowpan_header_create(struct sk_buff *skb, struct net_device *dev, + unsigned short type, const void *_daddr, + const void *_saddr, unsigned int len) +{ + const u8 *saddr = _saddr; + const u8 *daddr = _daddr; + struct lowpan_addr_info *info; + + /* TODO: + * if this package isn't ipv6 one, where should it be routed? + */ + if (type != ETH_P_IPV6) + return 0; + + if (!saddr) + saddr = dev->dev_addr; + + raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8); + raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8); + + info = lowpan_skb_priv(skb); + + /* TODO: Currently we only support extended_addr */ + info->daddr.mode = IEEE802154_ADDR_LONG; + memcpy(&info->daddr.u.extended_addr, daddr, + sizeof(info->daddr.u.extended_addr)); + info->saddr.mode = IEEE802154_ADDR_LONG; + memcpy(&info->saddr.u.extended_addr, saddr, + sizeof(info->daddr.u.extended_addr)); + + return 0; +} + +static struct sk_buff* +lowpan_alloc_frag(struct sk_buff *skb, int size, + const struct ieee802154_hdr *master_hdr) +{ + struct net_device *real_dev = lowpan_dev_info(skb->dev)->real_dev; + struct sk_buff *frag; + int rc; + + frag = alloc_skb(real_dev->hard_header_len + + real_dev->needed_tailroom + size, + GFP_ATOMIC); + + if (likely(frag)) { + frag->dev = real_dev; + frag->priority = skb->priority; + skb_reserve(frag, real_dev->hard_header_len); + skb_reset_network_header(frag); + *mac_cb(frag) = *mac_cb(skb); + + rc = dev_hard_header(frag, real_dev, 0, &master_hdr->dest, + &master_hdr->source, size); + if (rc < 0) { + kfree_skb(frag); + return ERR_PTR(rc); + } + } else { + frag = ERR_PTR(-ENOMEM); + } + + return frag; +} + +static int +lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr, + u8 *frag_hdr, int frag_hdrlen, + int offset, int len) +{ + struct sk_buff *frag; + + raw_dump_inline(__func__, " fragment header", frag_hdr, frag_hdrlen); + + frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr); + if (IS_ERR(frag)) + return -PTR_ERR(frag); + + memcpy(skb_put(frag, frag_hdrlen), frag_hdr, frag_hdrlen); + memcpy(skb_put(frag, len), skb_network_header(skb) + offset, len); + + raw_dump_table(__func__, " fragment dump", frag->data, frag->len); + + return dev_queue_xmit(frag); +} + +static int +lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *dev, + const struct ieee802154_hdr *wpan_hdr) +{ + u16 dgram_size, dgram_offset; + __be16 frag_tag; + u8 frag_hdr[5]; + int frag_cap, frag_len, payload_cap, rc; + int skb_unprocessed, skb_offset; + + dgram_size = lowpan_uncompress_size(skb, &dgram_offset) - + skb->mac_len; + frag_tag = htons(lowpan_dev_info(dev)->fragment_tag); + lowpan_dev_info(dev)->fragment_tag++; + + frag_hdr[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x07); + frag_hdr[1] = dgram_size & 0xff; + memcpy(frag_hdr + 2, &frag_tag, sizeof(frag_tag)); + + payload_cap = ieee802154_max_payload(wpan_hdr); + + frag_len = round_down(payload_cap - LOWPAN_FRAG1_HEAD_SIZE - + skb_network_header_len(skb), 8); + + skb_offset = skb_network_header_len(skb); + skb_unprocessed = skb->len - skb->mac_len - skb_offset; + + rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr, + LOWPAN_FRAG1_HEAD_SIZE, 0, + frag_len + skb_network_header_len(skb)); + if (rc) { + pr_debug("%s unable to send FRAG1 packet (tag: %d)", + __func__, ntohs(frag_tag)); + goto err; + } + + frag_hdr[0] &= ~LOWPAN_DISPATCH_FRAG1; + frag_hdr[0] |= LOWPAN_DISPATCH_FRAGN; + frag_cap = round_down(payload_cap - LOWPAN_FRAGN_HEAD_SIZE, 8); + + do { + dgram_offset += frag_len; + skb_offset += frag_len; + skb_unprocessed -= frag_len; + frag_len = min(frag_cap, skb_unprocessed); + + frag_hdr[4] = dgram_offset >> 3; + + rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr, + LOWPAN_FRAGN_HEAD_SIZE, skb_offset, + frag_len); + if (rc) { + pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n", + __func__, ntohs(frag_tag), skb_offset); + goto err; + } + } while (skb_unprocessed > frag_cap); + + consume_skb(skb); + return NET_XMIT_SUCCESS; + +err: + kfree_skb(skb); + return rc; +} + +static int lowpan_header(struct sk_buff *skb, struct net_device *dev) +{ + struct ieee802154_addr sa, da; + struct ieee802154_mac_cb *cb = mac_cb_init(skb); + struct lowpan_addr_info info; + void *daddr, *saddr; + + memcpy(&info, lowpan_skb_priv(skb), sizeof(info)); + + /* TODO: Currently we only support extended_addr */ + daddr = &info.daddr.u.extended_addr; + saddr = &info.saddr.u.extended_addr; + + lowpan_header_compress(skb, dev, ETH_P_IPV6, daddr, saddr, skb->len); + + cb->type = IEEE802154_FC_TYPE_DATA; + + /* prepare wpan address data */ + sa.mode = IEEE802154_ADDR_LONG; + sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev); + sa.extended_addr = ieee802154_devaddr_from_raw(saddr); + + /* intra-PAN communications */ + da.pan_id = sa.pan_id; + + /* if the destination address is the broadcast address, use the + * corresponding short address + */ + if (lowpan_is_addr_broadcast((const u8 *)daddr)) { + da.mode = IEEE802154_ADDR_SHORT; + da.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST); + cb->ackreq = false; + } else { + da.mode = IEEE802154_ADDR_LONG; + da.extended_addr = ieee802154_devaddr_from_raw(daddr); + cb->ackreq = true; + } + + return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev, + ETH_P_IPV6, (void *)&da, (void *)&sa, 0); +} + +netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ieee802154_hdr wpan_hdr; + int max_single, ret; + + pr_debug("package xmit\n"); + + /* We must take a copy of the skb before we modify/replace the ipv6 + * header as the header could be used elsewhere + */ + skb = skb_unshare(skb, GFP_ATOMIC); + if (!skb) + return NET_XMIT_DROP; + + ret = lowpan_header(skb, dev); + if (ret < 0) { + kfree_skb(skb); + return NET_XMIT_DROP; + } + + if (ieee802154_hdr_peek(skb, &wpan_hdr) < 0) { + kfree_skb(skb); + return NET_XMIT_DROP; + } + + max_single = ieee802154_max_payload(&wpan_hdr); + + if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) { + skb->dev = lowpan_dev_info(dev)->real_dev; + return dev_queue_xmit(skb); + } else { + netdev_tx_t rc; + + pr_debug("frame is too big, fragmentation is needed\n"); + rc = lowpan_xmit_fragmented(skb, dev, &wpan_hdr); + + return rc < 0 ? NET_XMIT_DROP : rc; + } +} -- cgit 1.2.3-korg