summaryrefslogtreecommitdiffstats
path: root/kernel/net/ieee802154
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/net/ieee802154')
-rw-r--r--kernel/net/ieee802154/6lowpan/6lowpan_i.h25
-rw-r--r--kernel/net/ieee802154/6lowpan/core.c199
-rw-r--r--kernel/net/ieee802154/6lowpan/reassembly.c174
-rw-r--r--kernel/net/ieee802154/6lowpan/rx.c378
-rw-r--r--kernel/net/ieee802154/6lowpan/tx.c100
-rw-r--r--kernel/net/ieee802154/Kconfig5
-rw-r--r--kernel/net/ieee802154/core.c14
-rw-r--r--kernel/net/ieee802154/core.h1
-rw-r--r--kernel/net/ieee802154/header_ops.c20
-rw-r--r--kernel/net/ieee802154/nl-mac.c39
-rw-r--r--kernel/net/ieee802154/nl-phy.c10
-rw-r--r--kernel/net/ieee802154/nl802154.c1494
-rw-r--r--kernel/net/ieee802154/rdev-ops.h165
-rw-r--r--kernel/net/ieee802154/socket.c30
-rw-r--r--kernel/net/ieee802154/sysfs.c38
-rw-r--r--kernel/net/ieee802154/trace.h79
16 files changed, 2351 insertions, 420 deletions
diff --git a/kernel/net/ieee802154/6lowpan/6lowpan_i.h b/kernel/net/ieee802154/6lowpan/6lowpan_i.h
index e50f69da7..b4e17a7c0 100644
--- a/kernel/net/ieee802154/6lowpan/6lowpan_i.h
+++ b/kernel/net/ieee802154/6lowpan/6lowpan_i.h
@@ -5,6 +5,16 @@
#include <net/ieee802154_netdev.h>
#include <net/inet_frag.h>
+#include <net/6lowpan.h>
+
+typedef unsigned __bitwise__ lowpan_rx_result;
+#define RX_CONTINUE ((__force lowpan_rx_result) 0u)
+#define RX_DROP_UNUSABLE ((__force lowpan_rx_result) 1u)
+#define RX_DROP ((__force lowpan_rx_result) 2u)
+#define RX_QUEUED ((__force lowpan_rx_result) 3u)
+
+#define LOWPAN_DISPATCH_FRAG1 0xc0
+#define LOWPAN_DISPATCH_FRAGN 0xe0
struct lowpan_create_arg {
u16 tag;
@@ -37,26 +47,18 @@ static inline u32 ieee802154_addr_hash(const struct ieee802154_addr *a)
}
}
-struct lowpan_dev_record {
- struct net_device *ldev;
- struct list_head list;
-};
-
/* private device info */
struct lowpan_dev_info {
- struct net_device *real_dev; /* real WPAN device ptr */
- struct mutex dev_list_mtx; /* mutex for list ops */
+ struct net_device *wdev; /* wpan device ptr */
u16 fragment_tag;
};
static inline struct
lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
{
- return netdev_priv(dev);
+ return (struct lowpan_dev_info *)lowpan_priv(dev)->priv;
}
-extern struct list_head lowpan_devices;
-
int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type);
void lowpan_net_frag_exit(void);
int lowpan_net_frag_init(void);
@@ -69,4 +71,7 @@ int lowpan_header_create(struct sk_buff *skb, struct net_device *dev,
const void *_saddr, unsigned int len);
netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev);
+int lowpan_iphc_decompress(struct sk_buff *skb);
+lowpan_rx_result lowpan_rx_h_ipv6(struct sk_buff *skb);
+
#endif /* __IEEE802154_6LOWPAN_I_H__ */
diff --git a/kernel/net/ieee802154/6lowpan/core.c b/kernel/net/ieee802154/6lowpan/core.c
index 0ae5822ef..20c49c724 100644
--- a/kernel/net/ieee802154/6lowpan/core.c
+++ b/kernel/net/ieee802154/6lowpan/core.c
@@ -52,29 +52,7 @@
#include "6lowpan_i.h"
-LIST_HEAD(lowpan_devices);
-static int lowpan_open_count;
-
-static __le16 lowpan_get_pan_id(const struct net_device *dev)
-{
- struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
-
- return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
-}
-
-static __le16 lowpan_get_short_addr(const struct net_device *dev)
-{
- struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
-
- return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
-}
-
-static u8 lowpan_get_dsn(const struct net_device *dev)
-{
- struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
-
- return ieee802154_mlme_ops(real_dev)->get_dsn(real_dev);
-}
+static int open_count;
static struct header_ops lowpan_header_ops = {
.create = lowpan_header_create,
@@ -83,7 +61,7 @@ static struct header_ops lowpan_header_ops = {
static struct lock_class_key lowpan_tx_busylock;
static struct lock_class_key lowpan_netdev_xmit_lock_key;
-static void lowpan_set_lockdep_class_one(struct net_device *dev,
+static void lowpan_set_lockdep_class_one(struct net_device *ldev,
struct netdev_queue *txq,
void *_unused)
{
@@ -91,42 +69,47 @@ static void lowpan_set_lockdep_class_one(struct net_device *dev,
&lowpan_netdev_xmit_lock_key);
}
-static int lowpan_dev_init(struct net_device *dev)
+static int lowpan_dev_init(struct net_device *ldev)
+{
+ netdev_for_each_tx_queue(ldev, lowpan_set_lockdep_class_one, NULL);
+ ldev->qdisc_tx_busylock = &lowpan_tx_busylock;
+ return 0;
+}
+
+static int lowpan_open(struct net_device *dev)
{
- netdev_for_each_tx_queue(dev, lowpan_set_lockdep_class_one, NULL);
- dev->qdisc_tx_busylock = &lowpan_tx_busylock;
+ if (!open_count)
+ lowpan_rx_init();
+ open_count++;
+ return 0;
+}
+
+static int lowpan_stop(struct net_device *dev)
+{
+ open_count--;
+ if (!open_count)
+ lowpan_rx_exit();
return 0;
}
static const struct net_device_ops lowpan_netdev_ops = {
.ndo_init = lowpan_dev_init,
.ndo_start_xmit = lowpan_xmit,
+ .ndo_open = lowpan_open,
+ .ndo_stop = lowpan_stop,
};
-static struct ieee802154_mlme_ops lowpan_mlme = {
- .get_pan_id = lowpan_get_pan_id,
- .get_short_addr = lowpan_get_short_addr,
- .get_dsn = lowpan_get_dsn,
-};
-
-static void lowpan_setup(struct net_device *dev)
+static void lowpan_setup(struct net_device *ldev)
{
- dev->addr_len = IEEE802154_ADDR_LEN;
- memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
- dev->type = ARPHRD_6LOWPAN;
- /* Frame Control + Sequence Number + Address fields + Security Header */
- dev->hard_header_len = 2 + 1 + 20 + 14;
- dev->needed_tailroom = 2; /* FCS */
- dev->mtu = IPV6_MIN_MTU;
- dev->tx_queue_len = 0;
- dev->flags = IFF_BROADCAST | IFF_MULTICAST;
- dev->watchdog_timeo = 0;
-
- dev->netdev_ops = &lowpan_netdev_ops;
- dev->header_ops = &lowpan_header_ops;
- dev->ml_priv = &lowpan_mlme;
- dev->destructor = free_netdev;
- dev->features |= NETIF_F_NETNS_LOCAL;
+ memset(ldev->broadcast, 0xff, IEEE802154_ADDR_LEN);
+ /* We need an ipv6hdr as minimum len when calling xmit */
+ ldev->hard_header_len = sizeof(struct ipv6hdr);
+ ldev->flags = IFF_BROADCAST | IFF_MULTICAST;
+
+ ldev->netdev_ops = &lowpan_netdev_ops;
+ ldev->header_ops = &lowpan_header_ops;
+ ldev->destructor = free_netdev;
+ ldev->features |= NETIF_F_NETNS_LOCAL;
}
static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -138,11 +121,10 @@ static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
return 0;
}
-static int lowpan_newlink(struct net *src_net, struct net_device *dev,
+static int lowpan_newlink(struct net *src_net, struct net_device *ldev,
struct nlattr *tb[], struct nlattr *data[])
{
- struct net_device *real_dev;
- struct lowpan_dev_record *entry;
+ struct net_device *wdev;
int ret;
ASSERT_RTNL();
@@ -150,78 +132,61 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
pr_debug("adding new link\n");
if (!tb[IFLA_LINK] ||
- !net_eq(dev_net(dev), &init_net))
+ !net_eq(dev_net(ldev), &init_net))
return -EINVAL;
- /* find and hold real wpan device */
- real_dev = dev_get_by_index(dev_net(dev), nla_get_u32(tb[IFLA_LINK]));
- if (!real_dev)
+ /* find and hold wpan device */
+ wdev = dev_get_by_index(dev_net(ldev), nla_get_u32(tb[IFLA_LINK]));
+ if (!wdev)
return -ENODEV;
- if (real_dev->type != ARPHRD_IEEE802154) {
- dev_put(real_dev);
+ if (wdev->type != ARPHRD_IEEE802154) {
+ dev_put(wdev);
return -EINVAL;
}
- lowpan_dev_info(dev)->real_dev = real_dev;
- mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
-
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
- dev_put(real_dev);
- lowpan_dev_info(dev)->real_dev = NULL;
- return -ENOMEM;
+ if (wdev->ieee802154_ptr->lowpan_dev) {
+ dev_put(wdev);
+ return -EBUSY;
}
- entry->ldev = dev;
-
+ lowpan_dev_info(ldev)->wdev = wdev;
/* Set the lowpan hardware address to the wpan hardware address. */
- memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
-
- mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
- INIT_LIST_HEAD(&entry->list);
- list_add_tail(&entry->list, &lowpan_devices);
- mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
-
- ret = register_netdevice(dev);
- if (ret >= 0) {
- if (!lowpan_open_count)
- lowpan_rx_init();
- lowpan_open_count++;
+ memcpy(ldev->dev_addr, wdev->dev_addr, IEEE802154_ADDR_LEN);
+ /* We need headroom for possible wpan_dev_hard_header call and tailroom
+ * for encryption/fcs handling. The lowpan interface will replace
+ * the IPv6 header with 6LoWPAN header. At worst case the 6LoWPAN
+ * header has LOWPAN_IPHC_MAX_HEADER_LEN more bytes than the IPv6
+ * header.
+ */
+ ldev->needed_headroom = LOWPAN_IPHC_MAX_HEADER_LEN +
+ wdev->needed_headroom;
+ ldev->needed_tailroom = wdev->needed_tailroom;
+
+ lowpan_netdev_setup(ldev, LOWPAN_LLTYPE_IEEE802154);
+
+ ret = register_netdevice(ldev);
+ if (ret < 0) {
+ dev_put(wdev);
+ return ret;
}
- return ret;
+ wdev->ieee802154_ptr->lowpan_dev = ldev;
+ return 0;
}
-static void lowpan_dellink(struct net_device *dev, struct list_head *head)
+static void lowpan_dellink(struct net_device *ldev, struct list_head *head)
{
- struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
- struct net_device *real_dev = lowpan_dev->real_dev;
- struct lowpan_dev_record *entry, *tmp;
+ struct net_device *wdev = lowpan_dev_info(ldev)->wdev;
ASSERT_RTNL();
- lowpan_open_count--;
- if (!lowpan_open_count)
- lowpan_rx_exit();
-
- mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
- list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
- if (entry->ldev == dev) {
- list_del(&entry->list);
- kfree(entry);
- }
- }
- mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
-
- mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx);
-
- unregister_netdevice_queue(dev, head);
-
- dev_put(real_dev);
+ wdev->ieee802154_ptr->lowpan_dev = NULL;
+ unregister_netdevice(ldev);
+ dev_put(wdev);
}
static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
.kind = "lowpan",
- .priv_size = sizeof(struct lowpan_dev_info),
+ .priv_size = LOWPAN_PRIV_SIZE(sizeof(struct lowpan_dev_info)),
.setup = lowpan_setup,
.newlink = lowpan_newlink,
.dellink = lowpan_dellink,
@@ -241,20 +206,22 @@ static inline void lowpan_netlink_fini(void)
static int lowpan_device_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- LIST_HEAD(del_list);
- struct lowpan_dev_record *entry, *tmp;
+ struct net_device *wdev = netdev_notifier_info_to_dev(ptr);
- if (dev->type != ARPHRD_IEEE802154)
+ if (wdev->type != ARPHRD_IEEE802154)
goto out;
- if (event == NETDEV_UNREGISTER) {
- list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
- if (lowpan_dev_info(entry->ldev)->real_dev == dev)
- lowpan_dellink(entry->ldev, &del_list);
- }
-
- unregister_netdevice_many(&del_list);
+ switch (event) {
+ case NETDEV_UNREGISTER:
+ /* Check if wpan interface is unregistered that we
+ * also delete possible lowpan interfaces which belongs
+ * to the wpan interface.
+ */
+ if (wdev->ieee802154_ptr->lowpan_dev)
+ lowpan_dellink(wdev->ieee802154_ptr->lowpan_dev, NULL);
+ break;
+ default:
+ break;
}
out:
diff --git a/kernel/net/ieee802154/6lowpan/reassembly.c b/kernel/net/ieee802154/6lowpan/reassembly.c
index f46e4d130..6b437e876 100644
--- a/kernel/net/ieee802154/6lowpan/reassembly.c
+++ b/kernel/net/ieee802154/6lowpan/reassembly.c
@@ -32,21 +32,10 @@
static const char lowpan_frags_cache_name[] = "lowpan-frags";
-struct lowpan_frag_info {
- u16 d_tag;
- u16 d_size;
- u8 d_offset;
-};
-
-static struct lowpan_frag_info *lowpan_cb(struct sk_buff *skb)
-{
- return (struct lowpan_frag_info *)skb->cb;
-}
-
static struct inet_frags lowpan_frags;
static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
- struct sk_buff *prev, struct net_device *dev);
+ struct sk_buff *prev, struct net_device *ldev);
static unsigned int lowpan_hash_frag(u16 tag, u16 d_size,
const struct ieee802154_addr *saddr,
@@ -111,7 +100,7 @@ out:
}
static inline struct lowpan_frag_queue *
-fq_find(struct net *net, const struct lowpan_frag_info *frag_info,
+fq_find(struct net *net, const struct lowpan_802154_cb *cb,
const struct ieee802154_addr *src,
const struct ieee802154_addr *dst)
{
@@ -121,12 +110,12 @@ fq_find(struct net *net, const struct lowpan_frag_info *frag_info,
struct netns_ieee802154_lowpan *ieee802154_lowpan =
net_ieee802154_lowpan(net);
- arg.tag = frag_info->d_tag;
- arg.d_size = frag_info->d_size;
+ arg.tag = cb->d_tag;
+ arg.d_size = cb->d_size;
arg.src = src;
arg.dst = dst;
- hash = lowpan_hash_frag(frag_info->d_tag, frag_info->d_size, src, dst);
+ hash = lowpan_hash_frag(cb->d_tag, cb->d_size, src, dst);
q = inet_frag_find(&ieee802154_lowpan->frags,
&lowpan_frags, &arg, hash);
@@ -138,17 +127,17 @@ fq_find(struct net *net, const struct lowpan_frag_info *frag_info,
}
static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
- struct sk_buff *skb, const u8 frag_type)
+ struct sk_buff *skb, u8 frag_type)
{
struct sk_buff *prev, *next;
- struct net_device *dev;
+ struct net_device *ldev;
int end, offset;
if (fq->q.flags & INET_FRAG_COMPLETE)
goto err;
- offset = lowpan_cb(skb)->d_offset << 3;
- end = lowpan_cb(skb)->d_size;
+ offset = lowpan_802154_cb(skb)->d_offset << 3;
+ end = lowpan_802154_cb(skb)->d_size;
/* Is this the final fragment? */
if (offset + skb->len == end) {
@@ -174,13 +163,16 @@ static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
* this fragment, right?
*/
prev = fq->q.fragments_tail;
- if (!prev || lowpan_cb(prev)->d_offset < lowpan_cb(skb)->d_offset) {
+ if (!prev ||
+ lowpan_802154_cb(prev)->d_offset <
+ lowpan_802154_cb(skb)->d_offset) {
next = NULL;
goto found;
}
prev = NULL;
for (next = fq->q.fragments; next != NULL; next = next->next) {
- if (lowpan_cb(next)->d_offset >= lowpan_cb(skb)->d_offset)
+ if (lowpan_802154_cb(next)->d_offset >=
+ lowpan_802154_cb(skb)->d_offset)
break; /* bingo! */
prev = next;
}
@@ -195,19 +187,16 @@ found:
else
fq->q.fragments = skb;
- dev = skb->dev;
- if (dev)
+ ldev = skb->dev;
+ if (ldev)
skb->dev = NULL;
fq->q.stamp = skb->tstamp;
- if (frag_type == LOWPAN_DISPATCH_FRAG1) {
- /* Calculate uncomp. 6lowpan header to estimate full size */
- fq->q.meat += lowpan_uncompress_size(skb, NULL);
+ if (frag_type == LOWPAN_DISPATCH_FRAG1)
fq->q.flags |= INET_FRAG_FIRST_IN;
- } else {
- fq->q.meat += skb->len;
- }
- add_frag_mem_limit(&fq->q, skb->truesize);
+
+ fq->q.meat += skb->len;
+ add_frag_mem_limit(fq->q.net, skb->truesize);
if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
fq->q.meat == fq->q.len) {
@@ -215,7 +204,7 @@ found:
unsigned long orefdst = skb->_skb_refdst;
skb->_skb_refdst = 0UL;
- res = lowpan_frag_reasm(fq, prev, dev);
+ res = lowpan_frag_reasm(fq, prev, ldev);
skb->_skb_refdst = orefdst;
return res;
}
@@ -235,7 +224,7 @@ err:
* the last and the first frames arrived and all the bits are here.
*/
static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
- struct net_device *dev)
+ struct net_device *ldev)
{
struct sk_buff *fp, *head = fq->q.fragments;
int sum_truesize;
@@ -287,7 +276,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
clone->data_len = clone->len;
head->data_len -= clone->len;
head->len -= clone->len;
- add_frag_mem_limit(&fq->q, clone->truesize);
+ add_frag_mem_limit(fq->q.net, clone->truesize);
}
WARN_ON(head == NULL);
@@ -310,10 +299,10 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
}
fp = next;
}
- sub_frag_mem_limit(&fq->q, sum_truesize);
+ sub_frag_mem_limit(fq->q.net, sum_truesize);
head->next = NULL;
- head->dev = dev;
+ head->dev = ldev;
head->tstamp = fq->q.stamp;
fq->q.fragments = NULL;
@@ -325,24 +314,87 @@ out_oom:
return -1;
}
-static int lowpan_get_frag_info(struct sk_buff *skb, const u8 frag_type,
- struct lowpan_frag_info *frag_info)
+static int lowpan_frag_rx_handlers_result(struct sk_buff *skb,
+ lowpan_rx_result res)
+{
+ switch (res) {
+ case RX_QUEUED:
+ return NET_RX_SUCCESS;
+ case RX_CONTINUE:
+ /* nobody cared about this packet */
+ net_warn_ratelimited("%s: received unknown dispatch\n",
+ __func__);
+
+ /* fall-through */
+ default:
+ /* all others failure */
+ return NET_RX_DROP;
+ }
+}
+
+static lowpan_rx_result lowpan_frag_rx_h_iphc(struct sk_buff *skb)
+{
+ int ret;
+
+ if (!lowpan_is_iphc(*skb_network_header(skb)))
+ return RX_CONTINUE;
+
+ ret = lowpan_iphc_decompress(skb);
+ if (ret < 0)
+ return RX_DROP;
+
+ return RX_QUEUED;
+}
+
+static int lowpan_invoke_frag_rx_handlers(struct sk_buff *skb)
+{
+ lowpan_rx_result res;
+
+#define CALL_RXH(rxh) \
+ do { \
+ res = rxh(skb); \
+ if (res != RX_CONTINUE) \
+ goto rxh_next; \
+ } while (0)
+
+ /* likely at first */
+ CALL_RXH(lowpan_frag_rx_h_iphc);
+ CALL_RXH(lowpan_rx_h_ipv6);
+
+rxh_next:
+ return lowpan_frag_rx_handlers_result(skb, res);
+#undef CALL_RXH
+}
+
+#define LOWPAN_FRAG_DGRAM_SIZE_HIGH_MASK 0x07
+#define LOWPAN_FRAG_DGRAM_SIZE_HIGH_SHIFT 8
+
+static int lowpan_get_cb(struct sk_buff *skb, u8 frag_type,
+ struct lowpan_802154_cb *cb)
{
bool fail;
- u8 pattern = 0, low = 0;
+ u8 high = 0, low = 0;
__be16 d_tag = 0;
- fail = lowpan_fetch_skb(skb, &pattern, 1);
+ fail = lowpan_fetch_skb(skb, &high, 1);
fail |= lowpan_fetch_skb(skb, &low, 1);
- frag_info->d_size = (pattern & 7) << 8 | low;
+ /* remove the dispatch value and use first three bits as high value
+ * for the datagram size
+ */
+ cb->d_size = (high & LOWPAN_FRAG_DGRAM_SIZE_HIGH_MASK) <<
+ LOWPAN_FRAG_DGRAM_SIZE_HIGH_SHIFT | low;
fail |= lowpan_fetch_skb(skb, &d_tag, 2);
- frag_info->d_tag = ntohs(d_tag);
+ cb->d_tag = ntohs(d_tag);
if (frag_type == LOWPAN_DISPATCH_FRAGN) {
- fail |= lowpan_fetch_skb(skb, &frag_info->d_offset, 1);
+ fail |= lowpan_fetch_skb(skb, &cb->d_offset, 1);
} else {
skb_reset_network_header(skb);
- frag_info->d_offset = 0;
+ cb->d_offset = 0;
+ /* check if datagram_size has ipv6hdr on FRAG1 */
+ fail |= cb->d_size < sizeof(struct ipv6hdr);
+ /* check if we can dereference the dispatch value */
+ fail |= !skb->len;
}
if (unlikely(fail))
@@ -351,27 +403,33 @@ static int lowpan_get_frag_info(struct sk_buff *skb, const u8 frag_type,
return 0;
}
-int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
+int lowpan_frag_rcv(struct sk_buff *skb, u8 frag_type)
{
struct lowpan_frag_queue *fq;
struct net *net = dev_net(skb->dev);
- struct lowpan_frag_info *frag_info = lowpan_cb(skb);
- struct ieee802154_addr source, dest;
+ struct lowpan_802154_cb *cb = lowpan_802154_cb(skb);
+ struct ieee802154_hdr hdr;
int err;
- source = mac_cb(skb)->source;
- dest = mac_cb(skb)->dest;
+ if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
+ goto err;
- err = lowpan_get_frag_info(skb, frag_type, frag_info);
+ err = lowpan_get_cb(skb, frag_type, cb);
if (err < 0)
goto err;
- if (frag_info->d_size > IPV6_MIN_MTU) {
+ if (frag_type == LOWPAN_DISPATCH_FRAG1) {
+ err = lowpan_invoke_frag_rx_handlers(skb);
+ if (err == NET_RX_DROP)
+ goto err;
+ }
+
+ if (cb->d_size > IPV6_MIN_MTU) {
net_warn_ratelimited("lowpan_frag_rcv: datagram size exceeds MTU\n");
goto err;
}
- fq = fq_find(net, frag_info, &source, &dest);
+ fq = fq_find(net, cb, &hdr.source, &hdr.dest);
if (fq != NULL) {
int ret;
@@ -387,7 +445,6 @@ err:
kfree_skb(skb);
return -1;
}
-EXPORT_SYMBOL(lowpan_frag_rcv);
#ifdef CONFIG_SYSCTL
static int zero;
@@ -523,14 +580,19 @@ static int __net_init lowpan_frags_init_net(struct net *net)
{
struct netns_ieee802154_lowpan *ieee802154_lowpan =
net_ieee802154_lowpan(net);
+ int res;
ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
- inet_frags_init_net(&ieee802154_lowpan->frags);
-
- return lowpan_frags_ns_sysctl_register(net);
+ res = inet_frags_init_net(&ieee802154_lowpan->frags);
+ if (res)
+ return res;
+ res = lowpan_frags_ns_sysctl_register(net);
+ if (res)
+ inet_frags_uninit_net(&ieee802154_lowpan->frags);
+ return res;
}
static void __net_exit lowpan_frags_exit_net(struct net *net)
diff --git a/kernel/net/ieee802154/6lowpan/rx.c b/kernel/net/ieee802154/6lowpan/rx.c
index 4be1d289a..ef185dd41 100644
--- a/kernel/net/ieee802154/6lowpan/rx.c
+++ b/kernel/net/ieee802154/6lowpan/rx.c
@@ -11,147 +11,307 @@
#include <linux/if_arp.h>
#include <net/6lowpan.h>
+#include <net/mac802154.h>
#include <net/ieee802154_netdev.h>
#include "6lowpan_i.h"
-static int lowpan_give_skb_to_devices(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct lowpan_dev_record *entry;
- struct sk_buff *skb_cp;
- int stat = NET_RX_SUCCESS;
+#define LOWPAN_DISPATCH_FIRST 0xc0
+#define LOWPAN_DISPATCH_FRAG_MASK 0xf8
+
+#define LOWPAN_DISPATCH_NALP 0x00
+#define LOWPAN_DISPATCH_ESC 0x40
+#define LOWPAN_DISPATCH_HC1 0x42
+#define LOWPAN_DISPATCH_DFF 0x43
+#define LOWPAN_DISPATCH_BC0 0x50
+#define LOWPAN_DISPATCH_MESH 0x80
+static int lowpan_give_skb_to_device(struct sk_buff *skb)
+{
skb->protocol = htons(ETH_P_IPV6);
- skb->pkt_type = PACKET_HOST;
+ skb->dev->stats.rx_packets++;
+ skb->dev->stats.rx_bytes += skb->len;
- rcu_read_lock();
- list_for_each_entry_rcu(entry, &lowpan_devices, list)
- if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) {
- skb_cp = skb_copy(skb, GFP_ATOMIC);
- if (!skb_cp) {
- kfree_skb(skb);
- rcu_read_unlock();
- return NET_RX_DROP;
- }
+ return netif_rx(skb);
+}
- skb_cp->dev = entry->ldev;
- stat = netif_rx(skb_cp);
- if (stat == NET_RX_DROP)
- break;
- }
- rcu_read_unlock();
+static int lowpan_rx_handlers_result(struct sk_buff *skb, lowpan_rx_result res)
+{
+ switch (res) {
+ case RX_CONTINUE:
+ /* nobody cared about this packet */
+ net_warn_ratelimited("%s: received unknown dispatch\n",
+ __func__);
+
+ /* fall-through */
+ case RX_DROP_UNUSABLE:
+ kfree_skb(skb);
- consume_skb(skb);
+ /* fall-through */
+ case RX_DROP:
+ return NET_RX_DROP;
+ case RX_QUEUED:
+ return lowpan_give_skb_to_device(skb);
+ default:
+ break;
+ }
- return stat;
+ return NET_RX_DROP;
}
-static int
-iphc_decompress(struct sk_buff *skb, const struct ieee802154_hdr *hdr)
+static inline bool lowpan_is_frag1(u8 dispatch)
{
- u8 iphc0, iphc1;
- struct ieee802154_addr_sa sa, da;
- void *sap, *dap;
+ return (dispatch & LOWPAN_DISPATCH_FRAG_MASK) == LOWPAN_DISPATCH_FRAG1;
+}
- raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len);
- /* at least two bytes will be used for the encoding */
- if (skb->len < 2)
- return -EINVAL;
+static inline bool lowpan_is_fragn(u8 dispatch)
+{
+ return (dispatch & LOWPAN_DISPATCH_FRAG_MASK) == LOWPAN_DISPATCH_FRAGN;
+}
- if (lowpan_fetch_skb_u8(skb, &iphc0))
- return -EINVAL;
+static lowpan_rx_result lowpan_rx_h_frag(struct sk_buff *skb)
+{
+ int ret;
- if (lowpan_fetch_skb_u8(skb, &iphc1))
- return -EINVAL;
+ if (!(lowpan_is_frag1(*skb_network_header(skb)) ||
+ lowpan_is_fragn(*skb_network_header(skb))))
+ return RX_CONTINUE;
- ieee802154_addr_to_sa(&sa, &hdr->source);
- ieee802154_addr_to_sa(&da, &hdr->dest);
+ ret = lowpan_frag_rcv(skb, *skb_network_header(skb) &
+ LOWPAN_DISPATCH_FRAG_MASK);
+ if (ret == 1)
+ return RX_QUEUED;
- if (sa.addr_type == IEEE802154_ADDR_SHORT)
- sap = &sa.short_addr;
- else
- sap = &sa.hwaddr;
+ /* Packet is freed by lowpan_frag_rcv on error or put into the frag
+ * bucket.
+ */
+ return RX_DROP;
+}
- if (da.addr_type == IEEE802154_ADDR_SHORT)
- dap = &da.short_addr;
- else
- dap = &da.hwaddr;
+int lowpan_iphc_decompress(struct sk_buff *skb)
+{
+ struct ieee802154_hdr hdr;
- return lowpan_header_decompress(skb, skb->dev, sap, sa.addr_type,
- IEEE802154_ADDR_LEN, dap, da.addr_type,
- IEEE802154_ADDR_LEN, iphc0, iphc1);
+ if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
+ return -EINVAL;
+
+ return lowpan_header_decompress(skb, skb->dev, &hdr.dest, &hdr.source);
}
-static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *pt, struct net_device *orig_dev)
+static lowpan_rx_result lowpan_rx_h_iphc(struct sk_buff *skb)
{
- struct ieee802154_hdr hdr;
int ret;
- skb = skb_share_check(skb, GFP_ATOMIC);
- if (!skb)
- goto drop;
+ if (!lowpan_is_iphc(*skb_network_header(skb)))
+ return RX_CONTINUE;
+
+ /* Setting datagram_offset to zero indicates non frag handling
+ * while doing lowpan_header_decompress.
+ */
+ lowpan_802154_cb(skb)->d_size = 0;
- if (!netif_running(dev))
- goto drop_skb;
+ ret = lowpan_iphc_decompress(skb);
+ if (ret < 0)
+ return RX_DROP_UNUSABLE;
- if (skb->pkt_type == PACKET_OTHERHOST)
- goto drop_skb;
+ return RX_QUEUED;
+}
- if (dev->type != ARPHRD_IEEE802154)
- goto drop_skb;
+lowpan_rx_result lowpan_rx_h_ipv6(struct sk_buff *skb)
+{
+ if (!lowpan_is_ipv6(*skb_network_header(skb)))
+ return RX_CONTINUE;
- if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
- goto drop_skb;
-
- /* check that it's our buffer */
- if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
- /* Pull off the 1-byte of 6lowpan header. */
- skb_pull(skb, 1);
- return lowpan_give_skb_to_devices(skb, NULL);
- } else {
- switch (skb->data[0] & 0xe0) {
- case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
- ret = iphc_decompress(skb, &hdr);
- if (ret < 0)
- goto drop_skb;
-
- return lowpan_give_skb_to_devices(skb, NULL);
- case LOWPAN_DISPATCH_FRAG1: /* first fragment header */
- ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAG1);
- if (ret == 1) {
- ret = iphc_decompress(skb, &hdr);
- if (ret < 0)
- goto drop_skb;
-
- return lowpan_give_skb_to_devices(skb, NULL);
- } else if (ret == -1) {
- return NET_RX_DROP;
- } else {
- return NET_RX_SUCCESS;
- }
- case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */
- ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAGN);
- if (ret == 1) {
- ret = iphc_decompress(skb, &hdr);
- if (ret < 0)
- goto drop_skb;
-
- return lowpan_give_skb_to_devices(skb, NULL);
- } else if (ret == -1) {
- return NET_RX_DROP;
- } else {
- return NET_RX_SUCCESS;
- }
- default:
- break;
- }
+ /* Pull off the 1-byte of 6lowpan header. */
+ skb_pull(skb, 1);
+ return RX_QUEUED;
+}
+
+static inline bool lowpan_is_esc(u8 dispatch)
+{
+ return dispatch == LOWPAN_DISPATCH_ESC;
+}
+
+static lowpan_rx_result lowpan_rx_h_esc(struct sk_buff *skb)
+{
+ if (!lowpan_is_esc(*skb_network_header(skb)))
+ return RX_CONTINUE;
+
+ net_warn_ratelimited("%s: %s\n", skb->dev->name,
+ "6LoWPAN ESC not supported\n");
+
+ return RX_DROP_UNUSABLE;
+}
+
+static inline bool lowpan_is_hc1(u8 dispatch)
+{
+ return dispatch == LOWPAN_DISPATCH_HC1;
+}
+
+static lowpan_rx_result lowpan_rx_h_hc1(struct sk_buff *skb)
+{
+ if (!lowpan_is_hc1(*skb_network_header(skb)))
+ return RX_CONTINUE;
+
+ net_warn_ratelimited("%s: %s\n", skb->dev->name,
+ "6LoWPAN HC1 not supported\n");
+
+ return RX_DROP_UNUSABLE;
+}
+
+static inline bool lowpan_is_dff(u8 dispatch)
+{
+ return dispatch == LOWPAN_DISPATCH_DFF;
+}
+
+static lowpan_rx_result lowpan_rx_h_dff(struct sk_buff *skb)
+{
+ if (!lowpan_is_dff(*skb_network_header(skb)))
+ return RX_CONTINUE;
+
+ net_warn_ratelimited("%s: %s\n", skb->dev->name,
+ "6LoWPAN DFF not supported\n");
+
+ return RX_DROP_UNUSABLE;
+}
+
+static inline bool lowpan_is_bc0(u8 dispatch)
+{
+ return dispatch == LOWPAN_DISPATCH_BC0;
+}
+
+static lowpan_rx_result lowpan_rx_h_bc0(struct sk_buff *skb)
+{
+ if (!lowpan_is_bc0(*skb_network_header(skb)))
+ return RX_CONTINUE;
+
+ net_warn_ratelimited("%s: %s\n", skb->dev->name,
+ "6LoWPAN BC0 not supported\n");
+
+ return RX_DROP_UNUSABLE;
+}
+
+static inline bool lowpan_is_mesh(u8 dispatch)
+{
+ return (dispatch & LOWPAN_DISPATCH_FIRST) == LOWPAN_DISPATCH_MESH;
+}
+
+static lowpan_rx_result lowpan_rx_h_mesh(struct sk_buff *skb)
+{
+ if (!lowpan_is_mesh(*skb_network_header(skb)))
+ return RX_CONTINUE;
+
+ net_warn_ratelimited("%s: %s\n", skb->dev->name,
+ "6LoWPAN MESH not supported\n");
+
+ return RX_DROP_UNUSABLE;
+}
+
+static int lowpan_invoke_rx_handlers(struct sk_buff *skb)
+{
+ lowpan_rx_result res;
+
+#define CALL_RXH(rxh) \
+ do { \
+ res = rxh(skb); \
+ if (res != RX_CONTINUE) \
+ goto rxh_next; \
+ } while (0)
+
+ /* likely at first */
+ CALL_RXH(lowpan_rx_h_iphc);
+ CALL_RXH(lowpan_rx_h_frag);
+ CALL_RXH(lowpan_rx_h_ipv6);
+ CALL_RXH(lowpan_rx_h_esc);
+ CALL_RXH(lowpan_rx_h_hc1);
+ CALL_RXH(lowpan_rx_h_dff);
+ CALL_RXH(lowpan_rx_h_bc0);
+ CALL_RXH(lowpan_rx_h_mesh);
+
+rxh_next:
+ return lowpan_rx_handlers_result(skb, res);
+#undef CALL_RXH
+}
+
+static inline bool lowpan_is_nalp(u8 dispatch)
+{
+ return (dispatch & LOWPAN_DISPATCH_FIRST) == LOWPAN_DISPATCH_NALP;
+}
+
+/* Lookup for reserved dispatch values at:
+ * https://www.iana.org/assignments/_6lowpan-parameters/_6lowpan-parameters.xhtml#_6lowpan-parameters-1
+ *
+ * Last Updated: 2015-01-22
+ */
+static inline bool lowpan_is_reserved(u8 dispatch)
+{
+ return ((dispatch >= 0x44 && dispatch <= 0x4F) ||
+ (dispatch >= 0x51 && dispatch <= 0x5F) ||
+ (dispatch >= 0xc8 && dispatch <= 0xdf) ||
+ (dispatch >= 0xe8 && dispatch <= 0xff));
+}
+
+/* lowpan_rx_h_check checks on generic 6LoWPAN requirements
+ * in MAC and 6LoWPAN header.
+ *
+ * Don't manipulate the skb here, it could be shared buffer.
+ */
+static inline bool lowpan_rx_h_check(struct sk_buff *skb)
+{
+ __le16 fc = ieee802154_get_fc_from_skb(skb);
+
+ /* check on ieee802154 conform 6LoWPAN header */
+ if (!ieee802154_is_data(fc) ||
+ !ieee802154_is_intra_pan(fc))
+ return false;
+
+ /* check if we can dereference the dispatch */
+ if (unlikely(!skb->len))
+ return false;
+
+ if (lowpan_is_nalp(*skb_network_header(skb)) ||
+ lowpan_is_reserved(*skb_network_header(skb)))
+ return false;
+
+ return true;
+}
+
+static int lowpan_rcv(struct sk_buff *skb, struct net_device *wdev,
+ struct packet_type *pt, struct net_device *orig_wdev)
+{
+ struct net_device *ldev;
+
+ if (wdev->type != ARPHRD_IEEE802154 ||
+ skb->pkt_type == PACKET_OTHERHOST ||
+ !lowpan_rx_h_check(skb))
+ goto drop;
+
+ ldev = wdev->ieee802154_ptr->lowpan_dev;
+ if (!ldev || !netif_running(ldev))
+ goto drop;
+
+ /* Replacing skb->dev and followed rx handlers will manipulate skb. */
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+ skb->dev = ldev;
+
+ /* When receive frag1 it's likely that we manipulate the buffer.
+ * When recevie iphc we manipulate the data buffer. So we need
+ * to unshare the buffer.
+ */
+ if (lowpan_is_frag1(*skb_network_header(skb)) ||
+ lowpan_is_iphc(*skb_network_header(skb))) {
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out;
}
-drop_skb:
- kfree_skb(skb);
+ return lowpan_invoke_rx_handlers(skb);
+
drop:
+ kfree_skb(skb);
+out:
return NET_RX_DROP;
}
diff --git a/kernel/net/ieee802154/6lowpan/tx.c b/kernel/net/ieee802154/6lowpan/tx.c
index 2349070bd..d4353face 100644
--- a/kernel/net/ieee802154/6lowpan/tx.c
+++ b/kernel/net/ieee802154/6lowpan/tx.c
@@ -10,9 +10,13 @@
#include <net/6lowpan.h>
#include <net/ieee802154_netdev.h>
+#include <net/mac802154.h>
#include "6lowpan_i.h"
+#define LOWPAN_FRAG1_HEAD_SIZE 0x4
+#define LOWPAN_FRAGN_HEAD_SIZE 0x5
+
/* don't save pan id, it's intra pan */
struct lowpan_addr {
u8 mode;
@@ -36,7 +40,14 @@ lowpan_addr_info *lowpan_skb_priv(const struct sk_buff *skb)
sizeof(struct lowpan_addr_info));
}
-int lowpan_header_create(struct sk_buff *skb, struct net_device *dev,
+/* This callback will be called from AF_PACKET and IPv6 stack, the AF_PACKET
+ * sockets gives an 8 byte array for addresses only!
+ *
+ * TODO I think AF_PACKET DGRAM (sending/receiving) RAW (sending) makes no
+ * sense here. We should disable it, the right use-case would be AF_INET6
+ * RAW/DGRAM sockets.
+ */
+int lowpan_header_create(struct sk_buff *skb, struct net_device *ldev,
unsigned short type, const void *_daddr,
const void *_saddr, unsigned int len)
{
@@ -51,7 +62,7 @@ int lowpan_header_create(struct sk_buff *skb, struct net_device *dev,
return 0;
if (!saddr)
- saddr = dev->dev_addr;
+ saddr = ldev->dev_addr;
raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8);
raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8);
@@ -71,28 +82,33 @@ int lowpan_header_create(struct sk_buff *skb, struct net_device *dev,
static struct sk_buff*
lowpan_alloc_frag(struct sk_buff *skb, int size,
- const struct ieee802154_hdr *master_hdr)
+ const struct ieee802154_hdr *master_hdr, bool frag1)
{
- struct net_device *real_dev = lowpan_dev_info(skb->dev)->real_dev;
+ struct net_device *wdev = lowpan_dev_info(skb->dev)->wdev;
struct sk_buff *frag;
int rc;
- frag = alloc_skb(real_dev->hard_header_len +
- real_dev->needed_tailroom + size,
+ frag = alloc_skb(wdev->needed_headroom + wdev->needed_tailroom + size,
GFP_ATOMIC);
if (likely(frag)) {
- frag->dev = real_dev;
+ frag->dev = wdev;
frag->priority = skb->priority;
- skb_reserve(frag, real_dev->hard_header_len);
+ skb_reserve(frag, wdev->needed_headroom);
skb_reset_network_header(frag);
*mac_cb(frag) = *mac_cb(skb);
- rc = dev_hard_header(frag, real_dev, 0, &master_hdr->dest,
- &master_hdr->source, size);
- if (rc < 0) {
- kfree_skb(frag);
- return ERR_PTR(rc);
+ if (frag1) {
+ memcpy(skb_put(frag, skb->mac_len),
+ skb_mac_header(skb), skb->mac_len);
+ } else {
+ rc = wpan_dev_hard_header(frag, wdev,
+ &master_hdr->dest,
+ &master_hdr->source, size);
+ if (rc < 0) {
+ kfree_skb(frag);
+ return ERR_PTR(rc);
+ }
}
} else {
frag = ERR_PTR(-ENOMEM);
@@ -104,15 +120,15 @@ lowpan_alloc_frag(struct sk_buff *skb, int size,
static int
lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr,
u8 *frag_hdr, int frag_hdrlen,
- int offset, int len)
+ int offset, int len, bool frag1)
{
struct sk_buff *frag;
raw_dump_inline(__func__, " fragment header", frag_hdr, frag_hdrlen);
- frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr);
+ frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr, frag1);
if (IS_ERR(frag))
- return -PTR_ERR(frag);
+ return PTR_ERR(frag);
memcpy(skb_put(frag, frag_hdrlen), frag_hdr, frag_hdrlen);
memcpy(skb_put(frag, len), skb_network_header(skb) + offset, len);
@@ -123,19 +139,17 @@ lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr,
}
static int
-lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *dev,
- const struct ieee802154_hdr *wpan_hdr)
+lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *ldev,
+ const struct ieee802154_hdr *wpan_hdr, u16 dgram_size,
+ u16 dgram_offset)
{
- u16 dgram_size, dgram_offset;
__be16 frag_tag;
u8 frag_hdr[5];
int frag_cap, frag_len, payload_cap, rc;
int skb_unprocessed, skb_offset;
- dgram_size = lowpan_uncompress_size(skb, &dgram_offset) -
- skb->mac_len;
- frag_tag = htons(lowpan_dev_info(dev)->fragment_tag);
- lowpan_dev_info(dev)->fragment_tag++;
+ frag_tag = htons(lowpan_dev_info(ldev)->fragment_tag);
+ lowpan_dev_info(ldev)->fragment_tag++;
frag_hdr[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x07);
frag_hdr[1] = dgram_size & 0xff;
@@ -151,7 +165,8 @@ lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *dev,
rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
LOWPAN_FRAG1_HEAD_SIZE, 0,
- frag_len + skb_network_header_len(skb));
+ frag_len + skb_network_header_len(skb),
+ true);
if (rc) {
pr_debug("%s unable to send FRAG1 packet (tag: %d)",
__func__, ntohs(frag_tag));
@@ -172,7 +187,7 @@ lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *dev,
rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
LOWPAN_FRAGN_HEAD_SIZE, skb_offset,
- frag_len);
+ frag_len, false);
if (rc) {
pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n",
__func__, ntohs(frag_tag), skb_offset);
@@ -180,6 +195,8 @@ lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *dev,
}
} while (skb_unprocessed > frag_cap);
+ ldev->stats.tx_packets++;
+ ldev->stats.tx_bytes += dgram_size;
consume_skb(skb);
return NET_XMIT_SUCCESS;
@@ -188,8 +205,10 @@ err:
return rc;
}
-static int lowpan_header(struct sk_buff *skb, struct net_device *dev)
+static int lowpan_header(struct sk_buff *skb, struct net_device *ldev,
+ u16 *dgram_size, u16 *dgram_offset)
{
+ struct wpan_dev *wpan_dev = lowpan_dev_info(ldev)->wdev->ieee802154_ptr;
struct ieee802154_addr sa, da;
struct ieee802154_mac_cb *cb = mac_cb_init(skb);
struct lowpan_addr_info info;
@@ -201,13 +220,16 @@ static int lowpan_header(struct sk_buff *skb, struct net_device *dev)
daddr = &info.daddr.u.extended_addr;
saddr = &info.saddr.u.extended_addr;
- lowpan_header_compress(skb, dev, ETH_P_IPV6, daddr, saddr, skb->len);
+ *dgram_size = skb->len;
+ lowpan_header_compress(skb, ldev, daddr, saddr);
+ /* dgram_offset = (saved bytes after compression) + lowpan header len */
+ *dgram_offset = (*dgram_size - skb->len) + skb_network_header_len(skb);
cb->type = IEEE802154_FC_TYPE_DATA;
/* prepare wpan address data */
sa.mode = IEEE802154_ADDR_LONG;
- sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
+ sa.pan_id = wpan_dev->pan_id;
sa.extended_addr = ieee802154_devaddr_from_raw(saddr);
/* intra-PAN communications */
@@ -216,27 +238,30 @@ static int lowpan_header(struct sk_buff *skb, struct net_device *dev)
/* if the destination address is the broadcast address, use the
* corresponding short address
*/
- if (lowpan_is_addr_broadcast((const u8 *)daddr)) {
+ if (!memcmp(daddr, ldev->broadcast, EUI64_ADDR_LEN)) {
da.mode = IEEE802154_ADDR_SHORT;
da.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
cb->ackreq = false;
} else {
da.mode = IEEE802154_ADDR_LONG;
da.extended_addr = ieee802154_devaddr_from_raw(daddr);
- cb->ackreq = true;
+ cb->ackreq = wpan_dev->ackreq;
}
- return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
- ETH_P_IPV6, (void *)&da, (void *)&sa, 0);
+ return wpan_dev_hard_header(skb, lowpan_dev_info(ldev)->wdev, &da, &sa,
+ 0);
}
-netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
+netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev)
{
struct ieee802154_hdr wpan_hdr;
int max_single, ret;
+ u16 dgram_size, dgram_offset;
pr_debug("package xmit\n");
+ WARN_ON_ONCE(skb->len > IPV6_MIN_MTU);
+
/* We must take a copy of the skb before we modify/replace the ipv6
* header as the header could be used elsewhere
*/
@@ -244,7 +269,7 @@ netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
if (!skb)
return NET_XMIT_DROP;
- ret = lowpan_header(skb, dev);
+ ret = lowpan_header(skb, ldev, &dgram_size, &dgram_offset);
if (ret < 0) {
kfree_skb(skb);
return NET_XMIT_DROP;
@@ -258,13 +283,16 @@ netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
max_single = ieee802154_max_payload(&wpan_hdr);
if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) {
- skb->dev = lowpan_dev_info(dev)->real_dev;
+ skb->dev = lowpan_dev_info(ldev)->wdev;
+ ldev->stats.tx_packets++;
+ ldev->stats.tx_bytes += dgram_size;
return dev_queue_xmit(skb);
} else {
netdev_tx_t rc;
pr_debug("frame is too big, fragmentation is needed\n");
- rc = lowpan_xmit_fragmented(skb, dev, &wpan_hdr);
+ rc = lowpan_xmit_fragmented(skb, ldev, &wpan_hdr, dgram_size,
+ dgram_offset);
return rc < 0 ? NET_XMIT_DROP : rc;
}
diff --git a/kernel/net/ieee802154/Kconfig b/kernel/net/ieee802154/Kconfig
index 1370d5b00..188135bcb 100644
--- a/kernel/net/ieee802154/Kconfig
+++ b/kernel/net/ieee802154/Kconfig
@@ -12,6 +12,11 @@ menuconfig IEEE802154
if IEEE802154
+config IEEE802154_NL802154_EXPERIMENTAL
+ bool "IEEE 802.15.4 experimental netlink support"
+ ---help---
+ Adds experimental netlink support for nl802154.
+
config IEEE802154_SOCKET
tristate "IEEE 802.15.4 socket interface"
default y
diff --git a/kernel/net/ieee802154/core.c b/kernel/net/ieee802154/core.c
index 2ee00e8a0..c35fdfa6d 100644
--- a/kernel/net/ieee802154/core.c
+++ b/kernel/net/ieee802154/core.c
@@ -95,6 +95,18 @@ cfg802154_rdev_by_wpan_phy_idx(int wpan_phy_idx)
return result;
}
+struct wpan_phy *wpan_phy_idx_to_wpan_phy(int wpan_phy_idx)
+{
+ struct cfg802154_registered_device *rdev;
+
+ ASSERT_RTNL();
+
+ rdev = cfg802154_rdev_by_wpan_phy_idx(wpan_phy_idx);
+ if (!rdev)
+ return NULL;
+ return &rdev->wpan_phy;
+}
+
struct wpan_phy *
wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size)
{
@@ -121,8 +133,6 @@ wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size)
/* atomic_inc_return makes it start at 1, make it start at 0 */
rdev->wpan_phy_idx--;
- mutex_init(&rdev->wpan_phy.pib_lock);
-
INIT_LIST_HEAD(&rdev->wpan_dev_list);
device_initialize(&rdev->wpan_phy.dev);
dev_set_name(&rdev->wpan_phy.dev, PHY_NAME "%d", rdev->wpan_phy_idx);
diff --git a/kernel/net/ieee802154/core.h b/kernel/net/ieee802154/core.h
index f3e95580c..231fade95 100644
--- a/kernel/net/ieee802154/core.h
+++ b/kernel/net/ieee802154/core.h
@@ -42,5 +42,6 @@ extern int cfg802154_rdev_list_generation;
void cfg802154_dev_free(struct cfg802154_registered_device *rdev);
struct cfg802154_registered_device *
cfg802154_rdev_by_wpan_phy_idx(int wpan_phy_idx);
+struct wpan_phy *wpan_phy_idx_to_wpan_phy(int wpan_phy_idx);
#endif /* __IEEE802154_CORE_H */
diff --git a/kernel/net/ieee802154/header_ops.c b/kernel/net/ieee802154/header_ops.c
index a051b6993..c7439f0fb 100644
--- a/kernel/net/ieee802154/header_ops.c
+++ b/kernel/net/ieee802154/header_ops.c
@@ -83,35 +83,35 @@ ieee802154_hdr_push_sechdr(u8 *buf, const struct ieee802154_sechdr *hdr)
}
int
-ieee802154_hdr_push(struct sk_buff *skb, const struct ieee802154_hdr *hdr)
+ieee802154_hdr_push(struct sk_buff *skb, struct ieee802154_hdr *hdr)
{
- u8 buf[MAC802154_FRAME_HARD_HEADER_LEN];
+ u8 buf[IEEE802154_MAX_HEADER_LEN];
int pos = 2;
int rc;
- struct ieee802154_hdr_fc fc = hdr->fc;
+ struct ieee802154_hdr_fc *fc = &hdr->fc;
buf[pos++] = hdr->seq;
- fc.dest_addr_mode = hdr->dest.mode;
+ fc->dest_addr_mode = hdr->dest.mode;
rc = ieee802154_hdr_push_addr(buf + pos, &hdr->dest, false);
if (rc < 0)
return -EINVAL;
pos += rc;
- fc.source_addr_mode = hdr->source.mode;
+ fc->source_addr_mode = hdr->source.mode;
if (hdr->source.pan_id == hdr->dest.pan_id &&
hdr->dest.mode != IEEE802154_ADDR_NONE)
- fc.intra_pan = true;
+ fc->intra_pan = true;
- rc = ieee802154_hdr_push_addr(buf + pos, &hdr->source, fc.intra_pan);
+ rc = ieee802154_hdr_push_addr(buf + pos, &hdr->source, fc->intra_pan);
if (rc < 0)
return -EINVAL;
pos += rc;
- if (fc.security_enabled) {
- fc.version = 1;
+ if (fc->security_enabled) {
+ fc->version = 1;
rc = ieee802154_hdr_push_sechdr(buf + pos, &hdr->sec);
if (rc < 0)
@@ -120,7 +120,7 @@ ieee802154_hdr_push(struct sk_buff *skb, const struct ieee802154_hdr *hdr)
pos += rc;
}
- memcpy(buf, &fc, 2);
+ memcpy(buf, fc, 2);
memcpy(skb_push(skb, pos), buf, pos);
diff --git a/kernel/net/ieee802154/nl-mac.c b/kernel/net/ieee802154/nl-mac.c
index 2b4955d7a..3503c3895 100644
--- a/kernel/net/ieee802154/nl-mac.c
+++ b/kernel/net/ieee802154/nl-mac.c
@@ -97,8 +97,10 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
BUG_ON(!phy);
get_device(&phy->dev);
- short_addr = ops->get_short_addr(dev);
- pan_id = ops->get_pan_id(dev);
+ rtnl_lock();
+ short_addr = dev->ieee802154_ptr->short_addr;
+ pan_id = dev->ieee802154_ptr->pan_id;
+ rtnl_unlock();
if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
@@ -117,12 +119,12 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
rtnl_unlock();
if (nla_put_s8(msg, IEEE802154_ATTR_TXPOWER,
- params.transmit_power) ||
+ params.transmit_power / 100) ||
nla_put_u8(msg, IEEE802154_ATTR_LBT_ENABLED, params.lbt) ||
nla_put_u8(msg, IEEE802154_ATTR_CCA_MODE,
params.cca.mode) ||
nla_put_s32(msg, IEEE802154_ATTR_CCA_ED_LEVEL,
- params.cca_ed_level) ||
+ params.cca_ed_level / 100) ||
nla_put_u8(msg, IEEE802154_ATTR_CSMA_RETRIES,
params.csma_retries) ||
nla_put_u8(msg, IEEE802154_ATTR_CSMA_MIN_BE,
@@ -166,10 +168,7 @@ static struct net_device *ieee802154_nl_get_dev(struct genl_info *info)
if (!dev)
return NULL;
- /* Check on mtu is currently a hacked solution because lowpan
- * and wpan have the same ARPHRD type.
- */
- if (dev->type != ARPHRD_IEEE802154 || dev->mtu != IEEE802154_MTU) {
+ if (dev->type != ARPHRD_IEEE802154) {
dev_put(dev);
return NULL;
}
@@ -244,7 +243,9 @@ int ieee802154_associate_resp(struct sk_buff *skb, struct genl_info *info)
addr.mode = IEEE802154_ADDR_LONG;
addr.extended_addr = nla_get_hwaddr(
info->attrs[IEEE802154_ATTR_DEST_HW_ADDR]);
- addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
+ rtnl_lock();
+ addr.pan_id = dev->ieee802154_ptr->pan_id;
+ rtnl_unlock();
ret = ieee802154_mlme_ops(dev)->assoc_resp(dev, &addr,
nla_get_shortaddr(info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]),
@@ -281,7 +282,9 @@ int ieee802154_disassociate_req(struct sk_buff *skb, struct genl_info *info)
addr.short_addr = nla_get_shortaddr(
info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]);
}
- addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
+ rtnl_lock();
+ addr.pan_id = dev->ieee802154_ptr->pan_id;
+ rtnl_unlock();
ret = ieee802154_mlme_ops(dev)->disassoc_req(dev, &addr,
nla_get_u8(info->attrs[IEEE802154_ATTR_REASON]));
@@ -449,11 +452,7 @@ int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb)
idx = 0;
for_each_netdev(net, dev) {
- /* Check on mtu is currently a hacked solution because lowpan
- * and wpan have the same ARPHRD type.
- */
- if (idx < s_idx || dev->type != ARPHRD_IEEE802154 ||
- dev->mtu != IEEE802154_MTU)
+ if (idx < s_idx || dev->type != ARPHRD_IEEE802154)
goto cont;
if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).portid,
@@ -510,7 +509,7 @@ int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info)
ops->get_mac_params(dev, &params);
if (info->attrs[IEEE802154_ATTR_TXPOWER])
- params.transmit_power = nla_get_s8(info->attrs[IEEE802154_ATTR_TXPOWER]);
+ params.transmit_power = nla_get_s8(info->attrs[IEEE802154_ATTR_TXPOWER]) * 100;
if (info->attrs[IEEE802154_ATTR_LBT_ENABLED])
params.lbt = nla_get_u8(info->attrs[IEEE802154_ATTR_LBT_ENABLED]);
@@ -519,7 +518,7 @@ int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info)
params.cca.mode = nla_get_u8(info->attrs[IEEE802154_ATTR_CCA_MODE]);
if (info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL])
- params.cca_ed_level = nla_get_s32(info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]);
+ params.cca_ed_level = nla_get_s32(info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]) * 100;
if (info->attrs[IEEE802154_ATTR_CSMA_RETRIES])
params.csma_retries = nla_get_u8(info->attrs[IEEE802154_ATTR_CSMA_RETRIES]);
@@ -783,11 +782,7 @@ ieee802154_llsec_dump_table(struct sk_buff *skb, struct netlink_callback *cb,
int rc;
for_each_netdev(net, dev) {
- /* Check on mtu is currently a hacked solution because lowpan
- * and wpan have the same ARPHRD type.
- */
- if (idx < first_dev || dev->type != ARPHRD_IEEE802154 ||
- dev->mtu != IEEE802154_MTU)
+ if (idx < first_dev || dev->type != ARPHRD_IEEE802154)
goto skip;
data.ops = ieee802154_mlme_ops(dev);
diff --git a/kernel/net/ieee802154/nl-phy.c b/kernel/net/ieee802154/nl-phy.c
index 346c6665d..77d73014b 100644
--- a/kernel/net/ieee802154/nl-phy.c
+++ b/kernel/net/ieee802154/nl-phy.c
@@ -50,26 +50,26 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 portid,
if (!hdr)
goto out;
- mutex_lock(&phy->pib_lock);
+ rtnl_lock();
if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
nla_put_u8(msg, IEEE802154_ATTR_PAGE, phy->current_page) ||
nla_put_u8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel))
goto nla_put_failure;
for (i = 0; i < 32; i++) {
- if (phy->channels_supported[i])
- buf[pages++] = phy->channels_supported[i] | (i << 27);
+ if (phy->supported.channels[i])
+ buf[pages++] = phy->supported.channels[i] | (i << 27);
}
if (pages &&
nla_put(msg, IEEE802154_ATTR_CHANNEL_PAGE_LIST,
pages * sizeof(uint32_t), buf))
goto nla_put_failure;
- mutex_unlock(&phy->pib_lock);
+ rtnl_unlock();
kfree(buf);
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
- mutex_unlock(&phy->pib_lock);
+ rtnl_unlock();
genlmsg_cancel(msg, hdr);
out:
kfree(buf);
diff --git a/kernel/net/ieee802154/nl802154.c b/kernel/net/ieee802154/nl802154.c
index f3c12f6a4..16ef0d9f5 100644
--- a/kernel/net/ieee802154/nl802154.c
+++ b/kernel/net/ieee802154/nl802154.c
@@ -207,10 +207,11 @@ static const struct nla_policy nl802154_policy[NL802154_ATTR_MAX+1] = {
[NL802154_ATTR_PAGE] = { .type = NLA_U8, },
[NL802154_ATTR_CHANNEL] = { .type = NLA_U8, },
- [NL802154_ATTR_TX_POWER] = { .type = NLA_S8, },
+ [NL802154_ATTR_TX_POWER] = { .type = NLA_S32, },
[NL802154_ATTR_CCA_MODE] = { .type = NLA_U32, },
[NL802154_ATTR_CCA_OPT] = { .type = NLA_U32, },
+ [NL802154_ATTR_CCA_ED_LEVEL] = { .type = NLA_S32, },
[NL802154_ATTR_SUPPORTED_CHANNEL] = { .type = NLA_U32, },
@@ -225,8 +226,92 @@ static const struct nla_policy nl802154_policy[NL802154_ATTR_MAX+1] = {
[NL802154_ATTR_MAX_FRAME_RETRIES] = { .type = NLA_S8, },
[NL802154_ATTR_LBT_MODE] = { .type = NLA_U8, },
+
+ [NL802154_ATTR_WPAN_PHY_CAPS] = { .type = NLA_NESTED },
+
+ [NL802154_ATTR_SUPPORTED_COMMANDS] = { .type = NLA_NESTED },
+
+ [NL802154_ATTR_ACKREQ_DEFAULT] = { .type = NLA_U8 },
+
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+ [NL802154_ATTR_SEC_ENABLED] = { .type = NLA_U8, },
+ [NL802154_ATTR_SEC_OUT_LEVEL] = { .type = NLA_U32, },
+ [NL802154_ATTR_SEC_OUT_KEY_ID] = { .type = NLA_NESTED, },
+ [NL802154_ATTR_SEC_FRAME_COUNTER] = { .type = NLA_U32 },
+
+ [NL802154_ATTR_SEC_LEVEL] = { .type = NLA_NESTED },
+ [NL802154_ATTR_SEC_DEVICE] = { .type = NLA_NESTED },
+ [NL802154_ATTR_SEC_DEVKEY] = { .type = NLA_NESTED },
+ [NL802154_ATTR_SEC_KEY] = { .type = NLA_NESTED },
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
};
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+static int
+nl802154_prepare_wpan_dev_dump(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct cfg802154_registered_device **rdev,
+ struct wpan_dev **wpan_dev)
+{
+ int err;
+
+ rtnl_lock();
+
+ if (!cb->args[0]) {
+ err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl802154_fam.hdrsize,
+ nl802154_fam.attrbuf, nl802154_fam.maxattr,
+ nl802154_policy);
+ if (err)
+ goto out_unlock;
+
+ *wpan_dev = __cfg802154_wpan_dev_from_attrs(sock_net(skb->sk),
+ nl802154_fam.attrbuf);
+ if (IS_ERR(*wpan_dev)) {
+ err = PTR_ERR(*wpan_dev);
+ goto out_unlock;
+ }
+ *rdev = wpan_phy_to_rdev((*wpan_dev)->wpan_phy);
+ /* 0 is the first index - add 1 to parse only once */
+ cb->args[0] = (*rdev)->wpan_phy_idx + 1;
+ cb->args[1] = (*wpan_dev)->identifier;
+ } else {
+ /* subtract the 1 again here */
+ struct wpan_phy *wpan_phy = wpan_phy_idx_to_wpan_phy(cb->args[0] - 1);
+ struct wpan_dev *tmp;
+
+ if (!wpan_phy) {
+ err = -ENODEV;
+ goto out_unlock;
+ }
+ *rdev = wpan_phy_to_rdev(wpan_phy);
+ *wpan_dev = NULL;
+
+ list_for_each_entry(tmp, &(*rdev)->wpan_dev_list, list) {
+ if (tmp->identifier == cb->args[1]) {
+ *wpan_dev = tmp;
+ break;
+ }
+ }
+
+ if (!*wpan_dev) {
+ err = -ENODEV;
+ goto out_unlock;
+ }
+ }
+
+ return 0;
+ out_unlock:
+ rtnl_unlock();
+ return err;
+}
+
+static void
+nl802154_finish_wpan_dev_dump(struct cfg802154_registered_device *rdev)
+{
+ rtnl_unlock();
+}
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
/* message building helper */
static inline void *nl802154hdr_put(struct sk_buff *skb, u32 portid, u32 seq,
int flags, u8 cmd)
@@ -236,6 +321,28 @@ static inline void *nl802154hdr_put(struct sk_buff *skb, u32 portid, u32 seq,
}
static int
+nl802154_put_flags(struct sk_buff *msg, int attr, u32 mask)
+{
+ struct nlattr *nl_flags = nla_nest_start(msg, attr);
+ int i;
+
+ if (!nl_flags)
+ return -ENOBUFS;
+
+ i = 0;
+ while (mask) {
+ if ((mask & 1) && nla_put_flag(msg, i))
+ return -ENOBUFS;
+
+ mask >>= 1;
+ i++;
+ }
+
+ nla_nest_end(msg, nl_flags);
+ return 0;
+}
+
+static int
nl802154_send_wpan_phy_channels(struct cfg802154_registered_device *rdev,
struct sk_buff *msg)
{
@@ -248,7 +355,7 @@ nl802154_send_wpan_phy_channels(struct cfg802154_registered_device *rdev,
for (page = 0; page <= IEEE802154_MAX_PAGE; page++) {
if (nla_put_u32(msg, NL802154_ATTR_SUPPORTED_CHANNEL,
- rdev->wpan_phy.channels_supported[page]))
+ rdev->wpan_phy.supported.channels[page]))
return -ENOBUFS;
}
nla_nest_end(msg, nl_page);
@@ -256,12 +363,100 @@ nl802154_send_wpan_phy_channels(struct cfg802154_registered_device *rdev,
return 0;
}
+static int
+nl802154_put_capabilities(struct sk_buff *msg,
+ struct cfg802154_registered_device *rdev)
+{
+ const struct wpan_phy_supported *caps = &rdev->wpan_phy.supported;
+ struct nlattr *nl_caps, *nl_channels;
+ int i;
+
+ nl_caps = nla_nest_start(msg, NL802154_ATTR_WPAN_PHY_CAPS);
+ if (!nl_caps)
+ return -ENOBUFS;
+
+ nl_channels = nla_nest_start(msg, NL802154_CAP_ATTR_CHANNELS);
+ if (!nl_channels)
+ return -ENOBUFS;
+
+ for (i = 0; i <= IEEE802154_MAX_PAGE; i++) {
+ if (caps->channels[i]) {
+ if (nl802154_put_flags(msg, i, caps->channels[i]))
+ return -ENOBUFS;
+ }
+ }
+
+ nla_nest_end(msg, nl_channels);
+
+ if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_ED_LEVEL) {
+ struct nlattr *nl_ed_lvls;
+
+ nl_ed_lvls = nla_nest_start(msg,
+ NL802154_CAP_ATTR_CCA_ED_LEVELS);
+ if (!nl_ed_lvls)
+ return -ENOBUFS;
+
+ for (i = 0; i < caps->cca_ed_levels_size; i++) {
+ if (nla_put_s32(msg, i, caps->cca_ed_levels[i]))
+ return -ENOBUFS;
+ }
+
+ nla_nest_end(msg, nl_ed_lvls);
+ }
+
+ if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER) {
+ struct nlattr *nl_tx_pwrs;
+
+ nl_tx_pwrs = nla_nest_start(msg, NL802154_CAP_ATTR_TX_POWERS);
+ if (!nl_tx_pwrs)
+ return -ENOBUFS;
+
+ for (i = 0; i < caps->tx_powers_size; i++) {
+ if (nla_put_s32(msg, i, caps->tx_powers[i]))
+ return -ENOBUFS;
+ }
+
+ nla_nest_end(msg, nl_tx_pwrs);
+ }
+
+ if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_MODE) {
+ if (nl802154_put_flags(msg, NL802154_CAP_ATTR_CCA_MODES,
+ caps->cca_modes) ||
+ nl802154_put_flags(msg, NL802154_CAP_ATTR_CCA_OPTS,
+ caps->cca_opts))
+ return -ENOBUFS;
+ }
+
+ if (nla_put_u8(msg, NL802154_CAP_ATTR_MIN_MINBE, caps->min_minbe) ||
+ nla_put_u8(msg, NL802154_CAP_ATTR_MAX_MINBE, caps->max_minbe) ||
+ nla_put_u8(msg, NL802154_CAP_ATTR_MIN_MAXBE, caps->min_maxbe) ||
+ nla_put_u8(msg, NL802154_CAP_ATTR_MAX_MAXBE, caps->max_maxbe) ||
+ nla_put_u8(msg, NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS,
+ caps->min_csma_backoffs) ||
+ nla_put_u8(msg, NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS,
+ caps->max_csma_backoffs) ||
+ nla_put_s8(msg, NL802154_CAP_ATTR_MIN_FRAME_RETRIES,
+ caps->min_frame_retries) ||
+ nla_put_s8(msg, NL802154_CAP_ATTR_MAX_FRAME_RETRIES,
+ caps->max_frame_retries) ||
+ nl802154_put_flags(msg, NL802154_CAP_ATTR_IFTYPES,
+ caps->iftypes) ||
+ nla_put_u32(msg, NL802154_CAP_ATTR_LBT, caps->lbt))
+ return -ENOBUFS;
+
+ nla_nest_end(msg, nl_caps);
+
+ return 0;
+}
+
static int nl802154_send_wpan_phy(struct cfg802154_registered_device *rdev,
enum nl802154_commands cmd,
struct sk_buff *msg, u32 portid, u32 seq,
int flags)
{
+ struct nlattr *nl_cmds;
void *hdr;
+ int i;
hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
if (!hdr)
@@ -286,25 +481,77 @@ static int nl802154_send_wpan_phy(struct cfg802154_registered_device *rdev,
rdev->wpan_phy.current_channel))
goto nla_put_failure;
- /* supported channels array */
+ /* TODO remove this behaviour, we still keep support it for a while
+ * so users can change the behaviour to the new one.
+ */
if (nl802154_send_wpan_phy_channels(rdev, msg))
goto nla_put_failure;
/* cca mode */
- if (nla_put_u32(msg, NL802154_ATTR_CCA_MODE,
- rdev->wpan_phy.cca.mode))
- goto nla_put_failure;
+ if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_MODE) {
+ if (nla_put_u32(msg, NL802154_ATTR_CCA_MODE,
+ rdev->wpan_phy.cca.mode))
+ goto nla_put_failure;
+
+ if (rdev->wpan_phy.cca.mode == NL802154_CCA_ENERGY_CARRIER) {
+ if (nla_put_u32(msg, NL802154_ATTR_CCA_OPT,
+ rdev->wpan_phy.cca.opt))
+ goto nla_put_failure;
+ }
+ }
+
+ if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER) {
+ if (nla_put_s32(msg, NL802154_ATTR_TX_POWER,
+ rdev->wpan_phy.transmit_power))
+ goto nla_put_failure;
+ }
- if (rdev->wpan_phy.cca.mode == NL802154_CCA_ENERGY_CARRIER) {
- if (nla_put_u32(msg, NL802154_ATTR_CCA_OPT,
- rdev->wpan_phy.cca.opt))
+ if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_ED_LEVEL) {
+ if (nla_put_s32(msg, NL802154_ATTR_CCA_ED_LEVEL,
+ rdev->wpan_phy.cca_ed_level))
goto nla_put_failure;
}
- if (nla_put_s8(msg, NL802154_ATTR_TX_POWER,
- rdev->wpan_phy.transmit_power))
+ if (nl802154_put_capabilities(msg, rdev))
+ goto nla_put_failure;
+
+ nl_cmds = nla_nest_start(msg, NL802154_ATTR_SUPPORTED_COMMANDS);
+ if (!nl_cmds)
goto nla_put_failure;
+ i = 0;
+#define CMD(op, n) \
+ do { \
+ if (rdev->ops->op) { \
+ i++; \
+ if (nla_put_u32(msg, i, NL802154_CMD_ ## n)) \
+ goto nla_put_failure; \
+ } \
+ } while (0)
+
+ CMD(add_virtual_intf, NEW_INTERFACE);
+ CMD(del_virtual_intf, DEL_INTERFACE);
+ CMD(set_channel, SET_CHANNEL);
+ CMD(set_pan_id, SET_PAN_ID);
+ CMD(set_short_addr, SET_SHORT_ADDR);
+ CMD(set_backoff_exponent, SET_BACKOFF_EXPONENT);
+ CMD(set_max_csma_backoffs, SET_MAX_CSMA_BACKOFFS);
+ CMD(set_max_frame_retries, SET_MAX_FRAME_RETRIES);
+ CMD(set_lbt_mode, SET_LBT_MODE);
+ CMD(set_ackreq_default, SET_ACKREQ_DEFAULT);
+
+ if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER)
+ CMD(set_tx_power, SET_TX_POWER);
+
+ if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_ED_LEVEL)
+ CMD(set_cca_ed_level, SET_CCA_ED_LEVEL);
+
+ if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_MODE)
+ CMD(set_cca_mode, SET_CCA_MODE);
+
+#undef CMD
+ nla_nest_end(msg, nl_cmds);
+
finish:
genlmsg_end(msg, hdr);
return 0;
@@ -443,6 +690,107 @@ static inline u64 wpan_dev_id(struct wpan_dev *wpan_dev)
((u64)wpan_phy_to_rdev(wpan_dev->wpan_phy)->wpan_phy_idx << 32);
}
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+#include <net/ieee802154_netdev.h>
+
+static int
+ieee802154_llsec_send_key_id(struct sk_buff *msg,
+ const struct ieee802154_llsec_key_id *desc)
+{
+ struct nlattr *nl_dev_addr;
+
+ if (nla_put_u32(msg, NL802154_KEY_ID_ATTR_MODE, desc->mode))
+ return -ENOBUFS;
+
+ switch (desc->mode) {
+ case NL802154_KEY_ID_MODE_IMPLICIT:
+ nl_dev_addr = nla_nest_start(msg, NL802154_KEY_ID_ATTR_IMPLICIT);
+ if (!nl_dev_addr)
+ return -ENOBUFS;
+
+ if (nla_put_le16(msg, NL802154_DEV_ADDR_ATTR_PAN_ID,
+ desc->device_addr.pan_id) ||
+ nla_put_u32(msg, NL802154_DEV_ADDR_ATTR_MODE,
+ desc->device_addr.mode))
+ return -ENOBUFS;
+
+ switch (desc->device_addr.mode) {
+ case NL802154_DEV_ADDR_SHORT:
+ if (nla_put_le16(msg, NL802154_DEV_ADDR_ATTR_SHORT,
+ desc->device_addr.short_addr))
+ return -ENOBUFS;
+ break;
+ case NL802154_DEV_ADDR_EXTENDED:
+ if (nla_put_le64(msg, NL802154_DEV_ADDR_ATTR_EXTENDED,
+ desc->device_addr.extended_addr))
+ return -ENOBUFS;
+ break;
+ default:
+ /* userspace should handle unknown */
+ break;
+ }
+
+ nla_nest_end(msg, nl_dev_addr);
+ break;
+ case NL802154_KEY_ID_MODE_INDEX:
+ break;
+ case NL802154_KEY_ID_MODE_INDEX_SHORT:
+ /* TODO renmae short_source? */
+ if (nla_put_le32(msg, NL802154_KEY_ID_ATTR_SOURCE_SHORT,
+ desc->short_source))
+ return -ENOBUFS;
+ break;
+ case NL802154_KEY_ID_MODE_INDEX_EXTENDED:
+ if (nla_put_le64(msg, NL802154_KEY_ID_ATTR_SOURCE_EXTENDED,
+ desc->extended_source))
+ return -ENOBUFS;
+ break;
+ default:
+ /* userspace should handle unknown */
+ break;
+ }
+
+ /* TODO key_id to key_idx ? Check naming */
+ if (desc->mode != NL802154_KEY_ID_MODE_IMPLICIT) {
+ if (nla_put_u8(msg, NL802154_KEY_ID_ATTR_INDEX, desc->id))
+ return -ENOBUFS;
+ }
+
+ return 0;
+}
+
+static int nl802154_get_llsec_params(struct sk_buff *msg,
+ struct cfg802154_registered_device *rdev,
+ struct wpan_dev *wpan_dev)
+{
+ struct nlattr *nl_key_id;
+ struct ieee802154_llsec_params params;
+ int ret;
+
+ ret = rdev_get_llsec_params(rdev, wpan_dev, &params);
+ if (ret < 0)
+ return ret;
+
+ if (nla_put_u8(msg, NL802154_ATTR_SEC_ENABLED, params.enabled) ||
+ nla_put_u32(msg, NL802154_ATTR_SEC_OUT_LEVEL, params.out_level) ||
+ nla_put_be32(msg, NL802154_ATTR_SEC_FRAME_COUNTER,
+ params.frame_counter))
+ return -ENOBUFS;
+
+ nl_key_id = nla_nest_start(msg, NL802154_ATTR_SEC_OUT_KEY_ID);
+ if (!nl_key_id)
+ return -ENOBUFS;
+
+ ret = ieee802154_llsec_send_key_id(msg, &params.out_key);
+ if (ret < 0)
+ return ret;
+
+ nla_nest_end(msg, nl_key_id);
+
+ return 0;
+}
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
static int
nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags,
struct cfg802154_registered_device *rdev,
@@ -490,6 +838,15 @@ nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags,
if (nla_put_u8(msg, NL802154_ATTR_LBT_MODE, wpan_dev->lbt))
goto nla_put_failure;
+ /* ackreq default behaviour */
+ if (nla_put_u8(msg, NL802154_ATTR_ACKREQ_DEFAULT, wpan_dev->ackreq))
+ goto nla_put_failure;
+
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+ if (nl802154_get_llsec_params(msg, rdev, wpan_dev) < 0)
+ goto nla_put_failure;
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
genlmsg_end(msg, hdr);
return 0;
@@ -575,14 +932,13 @@ static int nl802154_new_interface(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL802154_ATTR_IFTYPE]) {
type = nla_get_u32(info->attrs[NL802154_ATTR_IFTYPE]);
- if (type > NL802154_IFTYPE_MAX)
+ if (type > NL802154_IFTYPE_MAX ||
+ !(rdev->wpan_phy.supported.iftypes & BIT(type)))
return -EINVAL;
}
- /* TODO add nla_get_le64 to netlink */
if (info->attrs[NL802154_ATTR_EXTENDED_ADDR])
- extended_addr = (__force __le64)nla_get_u64(
- info->attrs[NL802154_ATTR_EXTENDED_ADDR]);
+ extended_addr = nla_get_le64(info->attrs[NL802154_ATTR_EXTENDED_ADDR]);
if (!rdev->ops->add_virtual_intf)
return -EOPNOTSUPP;
@@ -625,7 +981,8 @@ static int nl802154_set_channel(struct sk_buff *skb, struct genl_info *info)
channel = nla_get_u8(info->attrs[NL802154_ATTR_CHANNEL]);
/* check 802.15.4 constraints */
- if (page > IEEE802154_MAX_PAGE || channel > IEEE802154_MAX_CHANNEL)
+ if (page > IEEE802154_MAX_PAGE || channel > IEEE802154_MAX_CHANNEL ||
+ !(rdev->wpan_phy.supported.channels[page] & BIT(channel)))
return -EINVAL;
return rdev_set_channel(rdev, page, channel);
@@ -636,12 +993,17 @@ static int nl802154_set_cca_mode(struct sk_buff *skb, struct genl_info *info)
struct cfg802154_registered_device *rdev = info->user_ptr[0];
struct wpan_phy_cca cca;
+ if (!(rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_MODE))
+ return -EOPNOTSUPP;
+
if (!info->attrs[NL802154_ATTR_CCA_MODE])
return -EINVAL;
cca.mode = nla_get_u32(info->attrs[NL802154_ATTR_CCA_MODE]);
/* checking 802.15.4 constraints */
- if (cca.mode < NL802154_CCA_ENERGY || cca.mode > NL802154_CCA_ATTR_MAX)
+ if (cca.mode < NL802154_CCA_ENERGY ||
+ cca.mode > NL802154_CCA_ATTR_MAX ||
+ !(rdev->wpan_phy.supported.cca_modes & BIT(cca.mode)))
return -EINVAL;
if (cca.mode == NL802154_CCA_ENERGY_CARRIER) {
@@ -649,13 +1011,58 @@ static int nl802154_set_cca_mode(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
cca.opt = nla_get_u32(info->attrs[NL802154_ATTR_CCA_OPT]);
- if (cca.opt > NL802154_CCA_OPT_ATTR_MAX)
+ if (cca.opt > NL802154_CCA_OPT_ATTR_MAX ||
+ !(rdev->wpan_phy.supported.cca_opts & BIT(cca.opt)))
return -EINVAL;
}
return rdev_set_cca_mode(rdev, &cca);
}
+static int nl802154_set_cca_ed_level(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ s32 ed_level;
+ int i;
+
+ if (!(rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_ED_LEVEL))
+ return -EOPNOTSUPP;
+
+ if (!info->attrs[NL802154_ATTR_CCA_ED_LEVEL])
+ return -EINVAL;
+
+ ed_level = nla_get_s32(info->attrs[NL802154_ATTR_CCA_ED_LEVEL]);
+
+ for (i = 0; i < rdev->wpan_phy.supported.cca_ed_levels_size; i++) {
+ if (ed_level == rdev->wpan_phy.supported.cca_ed_levels[i])
+ return rdev_set_cca_ed_level(rdev, ed_level);
+ }
+
+ return -EINVAL;
+}
+
+static int nl802154_set_tx_power(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ s32 power;
+ int i;
+
+ if (!(rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER))
+ return -EOPNOTSUPP;
+
+ if (!info->attrs[NL802154_ATTR_TX_POWER])
+ return -EINVAL;
+
+ power = nla_get_s32(info->attrs[NL802154_ATTR_TX_POWER]);
+
+ for (i = 0; i < rdev->wpan_phy.supported.tx_powers_size; i++) {
+ if (power == rdev->wpan_phy.supported.tx_powers[i])
+ return rdev_set_tx_power(rdev, power);
+ }
+
+ return -EINVAL;
+}
+
static int nl802154_set_pan_id(struct sk_buff *skb, struct genl_info *info)
{
struct cfg802154_registered_device *rdev = info->user_ptr[0];
@@ -668,14 +1075,22 @@ static int nl802154_set_pan_id(struct sk_buff *skb, struct genl_info *info)
return -EBUSY;
/* don't change address fields on monitor */
- if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
- return -EINVAL;
-
- if (!info->attrs[NL802154_ATTR_PAN_ID])
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR ||
+ !info->attrs[NL802154_ATTR_PAN_ID])
return -EINVAL;
pan_id = nla_get_le16(info->attrs[NL802154_ATTR_PAN_ID]);
+ /* TODO
+ * I am not sure about to check here on broadcast pan_id.
+ * Broadcast is a valid setting, comment from 802.15.4:
+ * If this value is 0xffff, the device is not associated.
+ *
+ * This could useful to simple deassociate an device.
+ */
+ if (pan_id == cpu_to_le16(IEEE802154_PAN_ID_BROADCAST))
+ return -EINVAL;
+
return rdev_set_pan_id(rdev, wpan_dev, pan_id);
}
@@ -691,14 +1106,27 @@ static int nl802154_set_short_addr(struct sk_buff *skb, struct genl_info *info)
return -EBUSY;
/* don't change address fields on monitor */
- if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
- return -EINVAL;
-
- if (!info->attrs[NL802154_ATTR_SHORT_ADDR])
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR ||
+ !info->attrs[NL802154_ATTR_SHORT_ADDR])
return -EINVAL;
short_addr = nla_get_le16(info->attrs[NL802154_ATTR_SHORT_ADDR]);
+ /* TODO
+ * I am not sure about to check here on broadcast short_addr.
+ * Broadcast is a valid setting, comment from 802.15.4:
+ * A value of 0xfffe indicates that the device has
+ * associated but has not been allocated an address. A
+ * value of 0xffff indicates that the device does not
+ * have a short address.
+ *
+ * I think we should allow to set these settings but
+ * don't allow to allow socket communication with it.
+ */
+ if (short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC) ||
+ short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_BROADCAST))
+ return -EINVAL;
+
return rdev_set_short_addr(rdev, wpan_dev, short_addr);
}
@@ -722,7 +1150,11 @@ nl802154_set_backoff_exponent(struct sk_buff *skb, struct genl_info *info)
max_be = nla_get_u8(info->attrs[NL802154_ATTR_MAX_BE]);
/* check 802.15.4 constraints */
- if (max_be < 3 || max_be > 8 || min_be > max_be)
+ if (min_be < rdev->wpan_phy.supported.min_minbe ||
+ min_be > rdev->wpan_phy.supported.max_minbe ||
+ max_be < rdev->wpan_phy.supported.min_maxbe ||
+ max_be > rdev->wpan_phy.supported.max_maxbe ||
+ min_be > max_be)
return -EINVAL;
return rdev_set_backoff_exponent(rdev, wpan_dev, min_be, max_be);
@@ -747,7 +1179,8 @@ nl802154_set_max_csma_backoffs(struct sk_buff *skb, struct genl_info *info)
info->attrs[NL802154_ATTR_MAX_CSMA_BACKOFFS]);
/* check 802.15.4 constraints */
- if (max_csma_backoffs > 5)
+ if (max_csma_backoffs < rdev->wpan_phy.supported.min_csma_backoffs ||
+ max_csma_backoffs > rdev->wpan_phy.supported.max_csma_backoffs)
return -EINVAL;
return rdev_set_max_csma_backoffs(rdev, wpan_dev, max_csma_backoffs);
@@ -771,7 +1204,8 @@ nl802154_set_max_frame_retries(struct sk_buff *skb, struct genl_info *info)
info->attrs[NL802154_ATTR_MAX_FRAME_RETRIES]);
/* check 802.15.4 constraints */
- if (max_frame_retries < -1 || max_frame_retries > 7)
+ if (max_frame_retries < rdev->wpan_phy.supported.min_frame_retries ||
+ max_frame_retries > rdev->wpan_phy.supported.max_frame_retries)
return -EINVAL;
return rdev_set_max_frame_retries(rdev, wpan_dev, max_frame_retries);
@@ -782,7 +1216,7 @@ static int nl802154_set_lbt_mode(struct sk_buff *skb, struct genl_info *info)
struct cfg802154_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
- bool mode;
+ int mode;
if (netif_running(dev))
return -EBUSY;
@@ -790,10 +1224,871 @@ static int nl802154_set_lbt_mode(struct sk_buff *skb, struct genl_info *info)
if (!info->attrs[NL802154_ATTR_LBT_MODE])
return -EINVAL;
- mode = !!nla_get_u8(info->attrs[NL802154_ATTR_LBT_MODE]);
+ mode = nla_get_u8(info->attrs[NL802154_ATTR_LBT_MODE]);
+
+ if (mode != 0 && mode != 1)
+ return -EINVAL;
+
+ if (!wpan_phy_supported_bool(mode, rdev->wpan_phy.supported.lbt))
+ return -EINVAL;
+
return rdev_set_lbt_mode(rdev, wpan_dev, mode);
}
+static int
+nl802154_set_ackreq_default(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+ int ackreq;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ if (!info->attrs[NL802154_ATTR_ACKREQ_DEFAULT])
+ return -EINVAL;
+
+ ackreq = nla_get_u8(info->attrs[NL802154_ATTR_ACKREQ_DEFAULT]);
+
+ if (ackreq != 0 && ackreq != 1)
+ return -EINVAL;
+
+ return rdev_set_ackreq_default(rdev, wpan_dev, ackreq);
+}
+
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+static const struct nla_policy nl802154_dev_addr_policy[NL802154_DEV_ADDR_ATTR_MAX + 1] = {
+ [NL802154_DEV_ADDR_ATTR_PAN_ID] = { .type = NLA_U16 },
+ [NL802154_DEV_ADDR_ATTR_MODE] = { .type = NLA_U32 },
+ [NL802154_DEV_ADDR_ATTR_SHORT] = { .type = NLA_U16 },
+ [NL802154_DEV_ADDR_ATTR_EXTENDED] = { .type = NLA_U64 },
+};
+
+static int
+ieee802154_llsec_parse_dev_addr(struct nlattr *nla,
+ struct ieee802154_addr *addr)
+{
+ struct nlattr *attrs[NL802154_DEV_ADDR_ATTR_MAX + 1];
+
+ if (!nla || nla_parse_nested(attrs, NL802154_DEV_ADDR_ATTR_MAX, nla,
+ nl802154_dev_addr_policy))
+ return -EINVAL;
+
+ if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] &&
+ !attrs[NL802154_DEV_ADDR_ATTR_MODE] &&
+ !(attrs[NL802154_DEV_ADDR_ATTR_SHORT] ||
+ attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]))
+ return -EINVAL;
+
+ addr->pan_id = nla_get_le16(attrs[NL802154_DEV_ADDR_ATTR_PAN_ID]);
+ addr->mode = nla_get_u32(attrs[NL802154_DEV_ADDR_ATTR_MODE]);
+ switch (addr->mode) {
+ case NL802154_DEV_ADDR_SHORT:
+ addr->short_addr = nla_get_le16(attrs[NL802154_DEV_ADDR_ATTR_SHORT]);
+ break;
+ case NL802154_DEV_ADDR_EXTENDED:
+ addr->extended_addr = nla_get_le64(attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct nla_policy nl802154_key_id_policy[NL802154_KEY_ID_ATTR_MAX + 1] = {
+ [NL802154_KEY_ID_ATTR_MODE] = { .type = NLA_U32 },
+ [NL802154_KEY_ID_ATTR_INDEX] = { .type = NLA_U8 },
+ [NL802154_KEY_ID_ATTR_IMPLICIT] = { .type = NLA_NESTED },
+ [NL802154_KEY_ID_ATTR_SOURCE_SHORT] = { .type = NLA_U32 },
+ [NL802154_KEY_ID_ATTR_SOURCE_EXTENDED] = { .type = NLA_U64 },
+};
+
+static int
+ieee802154_llsec_parse_key_id(struct nlattr *nla,
+ struct ieee802154_llsec_key_id *desc)
+{
+ struct nlattr *attrs[NL802154_KEY_ID_ATTR_MAX + 1];
+
+ if (!nla || nla_parse_nested(attrs, NL802154_KEY_ID_ATTR_MAX, nla,
+ nl802154_key_id_policy))
+ return -EINVAL;
+
+ if (!attrs[NL802154_KEY_ID_ATTR_MODE])
+ return -EINVAL;
+
+ desc->mode = nla_get_u32(attrs[NL802154_KEY_ID_ATTR_MODE]);
+ switch (desc->mode) {
+ case NL802154_KEY_ID_MODE_IMPLICIT:
+ if (!attrs[NL802154_KEY_ID_ATTR_IMPLICIT])
+ return -EINVAL;
+
+ if (ieee802154_llsec_parse_dev_addr(attrs[NL802154_KEY_ID_ATTR_IMPLICIT],
+ &desc->device_addr) < 0)
+ return -EINVAL;
+ break;
+ case NL802154_KEY_ID_MODE_INDEX:
+ break;
+ case NL802154_KEY_ID_MODE_INDEX_SHORT:
+ if (!attrs[NL802154_KEY_ID_ATTR_SOURCE_SHORT])
+ return -EINVAL;
+
+ desc->short_source = nla_get_le32(attrs[NL802154_KEY_ID_ATTR_SOURCE_SHORT]);
+ break;
+ case NL802154_KEY_ID_MODE_INDEX_EXTENDED:
+ if (!attrs[NL802154_KEY_ID_ATTR_SOURCE_EXTENDED])
+ return -EINVAL;
+
+ desc->extended_source = nla_get_le64(attrs[NL802154_KEY_ID_ATTR_SOURCE_EXTENDED]);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (desc->mode != NL802154_KEY_ID_MODE_IMPLICIT) {
+ if (!attrs[NL802154_KEY_ID_ATTR_INDEX])
+ return -EINVAL;
+
+ /* TODO change id to idx */
+ desc->id = nla_get_u8(attrs[NL802154_KEY_ID_ATTR_INDEX]);
+ }
+
+ return 0;
+}
+
+static int nl802154_set_llsec_params(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+ struct ieee802154_llsec_params params;
+ u32 changed = 0;
+ int ret;
+
+ if (info->attrs[NL802154_ATTR_SEC_ENABLED]) {
+ u8 enabled;
+
+ enabled = nla_get_u8(info->attrs[NL802154_ATTR_SEC_ENABLED]);
+ if (enabled != 0 && enabled != 1)
+ return -EINVAL;
+
+ params.enabled = nla_get_u8(info->attrs[NL802154_ATTR_SEC_ENABLED]);
+ changed |= IEEE802154_LLSEC_PARAM_ENABLED;
+ }
+
+ if (info->attrs[NL802154_ATTR_SEC_OUT_KEY_ID]) {
+ ret = ieee802154_llsec_parse_key_id(info->attrs[NL802154_ATTR_SEC_OUT_KEY_ID],
+ &params.out_key);
+ if (ret < 0)
+ return ret;
+
+ changed |= IEEE802154_LLSEC_PARAM_OUT_KEY;
+ }
+
+ if (info->attrs[NL802154_ATTR_SEC_OUT_LEVEL]) {
+ params.out_level = nla_get_u32(info->attrs[NL802154_ATTR_SEC_OUT_LEVEL]);
+ if (params.out_level > NL802154_SECLEVEL_MAX)
+ return -EINVAL;
+
+ changed |= IEEE802154_LLSEC_PARAM_OUT_LEVEL;
+ }
+
+ if (info->attrs[NL802154_ATTR_SEC_FRAME_COUNTER]) {
+ params.frame_counter = nla_get_be32(info->attrs[NL802154_ATTR_SEC_FRAME_COUNTER]);
+ changed |= IEEE802154_LLSEC_PARAM_FRAME_COUNTER;
+ }
+
+ return rdev_set_llsec_params(rdev, wpan_dev, &params, changed);
+}
+
+static int nl802154_send_key(struct sk_buff *msg, u32 cmd, u32 portid,
+ u32 seq, int flags,
+ struct cfg802154_registered_device *rdev,
+ struct net_device *dev,
+ const struct ieee802154_llsec_key_entry *key)
+{
+ void *hdr;
+ u32 commands[NL802154_CMD_FRAME_NR_IDS / 32];
+ struct nlattr *nl_key, *nl_key_id;
+
+ hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
+ if (!hdr)
+ return -1;
+
+ if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
+ goto nla_put_failure;
+
+ nl_key = nla_nest_start(msg, NL802154_ATTR_SEC_KEY);
+ if (!nl_key)
+ goto nla_put_failure;
+
+ nl_key_id = nla_nest_start(msg, NL802154_KEY_ATTR_ID);
+ if (!nl_key_id)
+ goto nla_put_failure;
+
+ if (ieee802154_llsec_send_key_id(msg, &key->id) < 0)
+ goto nla_put_failure;
+
+ nla_nest_end(msg, nl_key_id);
+
+ if (nla_put_u8(msg, NL802154_KEY_ATTR_USAGE_FRAMES,
+ key->key->frame_types))
+ goto nla_put_failure;
+
+ if (key->key->frame_types & BIT(NL802154_FRAME_CMD)) {
+ /* TODO for each nested */
+ memset(commands, 0, sizeof(commands));
+ commands[7] = key->key->cmd_frame_ids;
+ if (nla_put(msg, NL802154_KEY_ATTR_USAGE_CMDS,
+ sizeof(commands), commands))
+ goto nla_put_failure;
+ }
+
+ if (nla_put(msg, NL802154_KEY_ATTR_BYTES, NL802154_KEY_SIZE,
+ key->key->key))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, nl_key);
+ genlmsg_end(msg, hdr);
+
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+static int
+nl802154_dump_llsec_key(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct cfg802154_registered_device *rdev = NULL;
+ struct ieee802154_llsec_key_entry *key;
+ struct ieee802154_llsec_table *table;
+ struct wpan_dev *wpan_dev;
+ int err;
+
+ err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev);
+ if (err)
+ return err;
+
+ if (!wpan_dev->netdev) {
+ err = -EINVAL;
+ goto out_err;
+ }
+
+ rdev_lock_llsec_table(rdev, wpan_dev);
+ rdev_get_llsec_table(rdev, wpan_dev, &table);
+
+ /* TODO make it like station dump */
+ if (cb->args[2])
+ goto out;
+
+ list_for_each_entry(key, &table->keys, list) {
+ if (nl802154_send_key(skb, NL802154_CMD_NEW_SEC_KEY,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ rdev, wpan_dev->netdev, key) < 0) {
+ /* TODO */
+ err = -EIO;
+ rdev_unlock_llsec_table(rdev, wpan_dev);
+ goto out_err;
+ }
+ }
+
+ cb->args[2] = 1;
+
+out:
+ rdev_unlock_llsec_table(rdev, wpan_dev);
+ err = skb->len;
+out_err:
+ nl802154_finish_wpan_dev_dump(rdev);
+
+ return err;
+}
+
+static const struct nla_policy nl802154_key_policy[NL802154_KEY_ATTR_MAX + 1] = {
+ [NL802154_KEY_ATTR_ID] = { NLA_NESTED },
+ /* TODO handle it as for_each_nested and NLA_FLAG? */
+ [NL802154_KEY_ATTR_USAGE_FRAMES] = { NLA_U8 },
+ /* TODO handle it as for_each_nested, not static array? */
+ [NL802154_KEY_ATTR_USAGE_CMDS] = { .len = NL802154_CMD_FRAME_NR_IDS / 8 },
+ [NL802154_KEY_ATTR_BYTES] = { .len = NL802154_KEY_SIZE },
+};
+
+static int nl802154_add_llsec_key(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+ struct nlattr *attrs[NL802154_KEY_ATTR_MAX + 1];
+ struct ieee802154_llsec_key key = { };
+ struct ieee802154_llsec_key_id id = { };
+ u32 commands[NL802154_CMD_FRAME_NR_IDS / 32] = { };
+
+ if (nla_parse_nested(attrs, NL802154_KEY_ATTR_MAX,
+ info->attrs[NL802154_ATTR_SEC_KEY],
+ nl802154_key_policy))
+ return -EINVAL;
+
+ if (!attrs[NL802154_KEY_ATTR_USAGE_FRAMES] ||
+ !attrs[NL802154_KEY_ATTR_BYTES])
+ return -EINVAL;
+
+ if (ieee802154_llsec_parse_key_id(attrs[NL802154_KEY_ATTR_ID], &id) < 0)
+ return -ENOBUFS;
+
+ key.frame_types = nla_get_u8(attrs[NL802154_KEY_ATTR_USAGE_FRAMES]);
+ if (key.frame_types > BIT(NL802154_FRAME_MAX) ||
+ ((key.frame_types & BIT(NL802154_FRAME_CMD)) &&
+ !attrs[NL802154_KEY_ATTR_USAGE_CMDS]))
+ return -EINVAL;
+
+ if (attrs[NL802154_KEY_ATTR_USAGE_CMDS]) {
+ /* TODO for each nested */
+ nla_memcpy(commands, attrs[NL802154_KEY_ATTR_USAGE_CMDS],
+ NL802154_CMD_FRAME_NR_IDS / 8);
+
+ /* TODO understand the -EINVAL logic here? last condition */
+ if (commands[0] || commands[1] || commands[2] || commands[3] ||
+ commands[4] || commands[5] || commands[6] ||
+ commands[7] > BIT(NL802154_CMD_FRAME_MAX))
+ return -EINVAL;
+
+ key.cmd_frame_ids = commands[7];
+ } else {
+ key.cmd_frame_ids = 0;
+ }
+
+ nla_memcpy(key.key, attrs[NL802154_KEY_ATTR_BYTES], NL802154_KEY_SIZE);
+
+ if (ieee802154_llsec_parse_key_id(attrs[NL802154_KEY_ATTR_ID], &id) < 0)
+ return -ENOBUFS;
+
+ return rdev_add_llsec_key(rdev, wpan_dev, &id, &key);
+}
+
+static int nl802154_del_llsec_key(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+ struct nlattr *attrs[NL802154_KEY_ATTR_MAX + 1];
+ struct ieee802154_llsec_key_id id;
+
+ if (nla_parse_nested(attrs, NL802154_KEY_ATTR_MAX,
+ info->attrs[NL802154_ATTR_SEC_KEY],
+ nl802154_key_policy))
+ return -EINVAL;
+
+ if (ieee802154_llsec_parse_key_id(attrs[NL802154_KEY_ATTR_ID], &id) < 0)
+ return -ENOBUFS;
+
+ return rdev_del_llsec_key(rdev, wpan_dev, &id);
+}
+
+static int nl802154_send_device(struct sk_buff *msg, u32 cmd, u32 portid,
+ u32 seq, int flags,
+ struct cfg802154_registered_device *rdev,
+ struct net_device *dev,
+ const struct ieee802154_llsec_device *dev_desc)
+{
+ void *hdr;
+ struct nlattr *nl_device;
+
+ hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
+ if (!hdr)
+ return -1;
+
+ if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
+ goto nla_put_failure;
+
+ nl_device = nla_nest_start(msg, NL802154_ATTR_SEC_DEVICE);
+ if (!nl_device)
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, NL802154_DEV_ATTR_FRAME_COUNTER,
+ dev_desc->frame_counter) ||
+ nla_put_le16(msg, NL802154_DEV_ATTR_PAN_ID, dev_desc->pan_id) ||
+ nla_put_le16(msg, NL802154_DEV_ATTR_SHORT_ADDR,
+ dev_desc->short_addr) ||
+ nla_put_le64(msg, NL802154_DEV_ATTR_EXTENDED_ADDR,
+ dev_desc->hwaddr) ||
+ nla_put_u8(msg, NL802154_DEV_ATTR_SECLEVEL_EXEMPT,
+ dev_desc->seclevel_exempt) ||
+ nla_put_u32(msg, NL802154_DEV_ATTR_KEY_MODE, dev_desc->key_mode))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, nl_device);
+ genlmsg_end(msg, hdr);
+
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+static int
+nl802154_dump_llsec_dev(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct cfg802154_registered_device *rdev = NULL;
+ struct ieee802154_llsec_device *dev;
+ struct ieee802154_llsec_table *table;
+ struct wpan_dev *wpan_dev;
+ int err;
+
+ err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev);
+ if (err)
+ return err;
+
+ if (!wpan_dev->netdev) {
+ err = -EINVAL;
+ goto out_err;
+ }
+
+ rdev_lock_llsec_table(rdev, wpan_dev);
+ rdev_get_llsec_table(rdev, wpan_dev, &table);
+
+ /* TODO make it like station dump */
+ if (cb->args[2])
+ goto out;
+
+ list_for_each_entry(dev, &table->devices, list) {
+ if (nl802154_send_device(skb, NL802154_CMD_NEW_SEC_LEVEL,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ rdev, wpan_dev->netdev, dev) < 0) {
+ /* TODO */
+ err = -EIO;
+ rdev_unlock_llsec_table(rdev, wpan_dev);
+ goto out_err;
+ }
+ }
+
+ cb->args[2] = 1;
+
+out:
+ rdev_unlock_llsec_table(rdev, wpan_dev);
+ err = skb->len;
+out_err:
+ nl802154_finish_wpan_dev_dump(rdev);
+
+ return err;
+}
+
+static const struct nla_policy nl802154_dev_policy[NL802154_DEV_ATTR_MAX + 1] = {
+ [NL802154_DEV_ATTR_FRAME_COUNTER] = { NLA_U32 },
+ [NL802154_DEV_ATTR_PAN_ID] = { .type = NLA_U16 },
+ [NL802154_DEV_ATTR_SHORT_ADDR] = { .type = NLA_U16 },
+ [NL802154_DEV_ATTR_EXTENDED_ADDR] = { .type = NLA_U64 },
+ [NL802154_DEV_ATTR_SECLEVEL_EXEMPT] = { NLA_U8 },
+ [NL802154_DEV_ATTR_KEY_MODE] = { NLA_U32 },
+};
+
+static int
+ieee802154_llsec_parse_device(struct nlattr *nla,
+ struct ieee802154_llsec_device *dev)
+{
+ struct nlattr *attrs[NL802154_DEV_ATTR_MAX + 1];
+
+ if (!nla || nla_parse_nested(attrs, NL802154_DEV_ATTR_MAX, nla,
+ nl802154_dev_policy))
+ return -EINVAL;
+
+ memset(dev, 0, sizeof(*dev));
+
+ if (!attrs[NL802154_DEV_ATTR_FRAME_COUNTER] ||
+ !attrs[NL802154_DEV_ATTR_PAN_ID] ||
+ !attrs[NL802154_DEV_ATTR_SHORT_ADDR] ||
+ !attrs[NL802154_DEV_ATTR_EXTENDED_ADDR] ||
+ !attrs[NL802154_DEV_ATTR_SECLEVEL_EXEMPT] ||
+ !attrs[NL802154_DEV_ATTR_KEY_MODE])
+ return -EINVAL;
+
+ /* TODO be32 */
+ dev->frame_counter = nla_get_u32(attrs[NL802154_DEV_ATTR_FRAME_COUNTER]);
+ dev->pan_id = nla_get_le16(attrs[NL802154_DEV_ATTR_PAN_ID]);
+ dev->short_addr = nla_get_le16(attrs[NL802154_DEV_ATTR_SHORT_ADDR]);
+ /* TODO rename hwaddr to extended_addr */
+ dev->hwaddr = nla_get_le64(attrs[NL802154_DEV_ATTR_EXTENDED_ADDR]);
+ dev->seclevel_exempt = nla_get_u8(attrs[NL802154_DEV_ATTR_SECLEVEL_EXEMPT]);
+ dev->key_mode = nla_get_u32(attrs[NL802154_DEV_ATTR_KEY_MODE]);
+
+ if (dev->key_mode > NL802154_DEVKEY_MAX ||
+ (dev->seclevel_exempt != 0 && dev->seclevel_exempt != 1))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int nl802154_add_llsec_dev(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+ struct ieee802154_llsec_device dev_desc;
+
+ if (ieee802154_llsec_parse_device(info->attrs[NL802154_ATTR_SEC_DEVICE],
+ &dev_desc) < 0)
+ return -EINVAL;
+
+ return rdev_add_device(rdev, wpan_dev, &dev_desc);
+}
+
+static int nl802154_del_llsec_dev(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+ struct nlattr *attrs[NL802154_DEV_ATTR_MAX + 1];
+ __le64 extended_addr;
+
+ if (nla_parse_nested(attrs, NL802154_DEV_ATTR_MAX,
+ info->attrs[NL802154_ATTR_SEC_DEVICE],
+ nl802154_dev_policy))
+ return -EINVAL;
+
+ if (!attrs[NL802154_DEV_ATTR_EXTENDED_ADDR])
+ return -EINVAL;
+
+ extended_addr = nla_get_le64(attrs[NL802154_DEV_ATTR_EXTENDED_ADDR]);
+ return rdev_del_device(rdev, wpan_dev, extended_addr);
+}
+
+static int nl802154_send_devkey(struct sk_buff *msg, u32 cmd, u32 portid,
+ u32 seq, int flags,
+ struct cfg802154_registered_device *rdev,
+ struct net_device *dev, __le64 extended_addr,
+ const struct ieee802154_llsec_device_key *devkey)
+{
+ void *hdr;
+ struct nlattr *nl_devkey, *nl_key_id;
+
+ hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
+ if (!hdr)
+ return -1;
+
+ if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
+ goto nla_put_failure;
+
+ nl_devkey = nla_nest_start(msg, NL802154_ATTR_SEC_DEVKEY);
+ if (!nl_devkey)
+ goto nla_put_failure;
+
+ if (nla_put_le64(msg, NL802154_DEVKEY_ATTR_EXTENDED_ADDR,
+ extended_addr) ||
+ nla_put_u32(msg, NL802154_DEVKEY_ATTR_FRAME_COUNTER,
+ devkey->frame_counter))
+ goto nla_put_failure;
+
+ nl_key_id = nla_nest_start(msg, NL802154_DEVKEY_ATTR_ID);
+ if (!nl_key_id)
+ goto nla_put_failure;
+
+ if (ieee802154_llsec_send_key_id(msg, &devkey->key_id) < 0)
+ goto nla_put_failure;
+
+ nla_nest_end(msg, nl_key_id);
+ nla_nest_end(msg, nl_devkey);
+ genlmsg_end(msg, hdr);
+
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+static int
+nl802154_dump_llsec_devkey(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct cfg802154_registered_device *rdev = NULL;
+ struct ieee802154_llsec_device_key *kpos;
+ struct ieee802154_llsec_device *dpos;
+ struct ieee802154_llsec_table *table;
+ struct wpan_dev *wpan_dev;
+ int err;
+
+ err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev);
+ if (err)
+ return err;
+
+ if (!wpan_dev->netdev) {
+ err = -EINVAL;
+ goto out_err;
+ }
+
+ rdev_lock_llsec_table(rdev, wpan_dev);
+ rdev_get_llsec_table(rdev, wpan_dev, &table);
+
+ /* TODO make it like station dump */
+ if (cb->args[2])
+ goto out;
+
+ /* TODO look if remove devkey and do some nested attribute */
+ list_for_each_entry(dpos, &table->devices, list) {
+ list_for_each_entry(kpos, &dpos->keys, list) {
+ if (nl802154_send_devkey(skb,
+ NL802154_CMD_NEW_SEC_LEVEL,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI, rdev,
+ wpan_dev->netdev,
+ dpos->hwaddr,
+ kpos) < 0) {
+ /* TODO */
+ err = -EIO;
+ rdev_unlock_llsec_table(rdev, wpan_dev);
+ goto out_err;
+ }
+ }
+ }
+
+ cb->args[2] = 1;
+
+out:
+ rdev_unlock_llsec_table(rdev, wpan_dev);
+ err = skb->len;
+out_err:
+ nl802154_finish_wpan_dev_dump(rdev);
+
+ return err;
+}
+
+static const struct nla_policy nl802154_devkey_policy[NL802154_DEVKEY_ATTR_MAX + 1] = {
+ [NL802154_DEVKEY_ATTR_FRAME_COUNTER] = { NLA_U32 },
+ [NL802154_DEVKEY_ATTR_EXTENDED_ADDR] = { NLA_U64 },
+ [NL802154_DEVKEY_ATTR_ID] = { NLA_NESTED },
+};
+
+static int nl802154_add_llsec_devkey(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+ struct nlattr *attrs[NL802154_DEVKEY_ATTR_MAX + 1];
+ struct ieee802154_llsec_device_key key;
+ __le64 extended_addr;
+
+ if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] ||
+ nla_parse_nested(attrs, NL802154_DEVKEY_ATTR_MAX,
+ info->attrs[NL802154_ATTR_SEC_DEVKEY],
+ nl802154_devkey_policy) < 0)
+ return -EINVAL;
+
+ if (!attrs[NL802154_DEVKEY_ATTR_FRAME_COUNTER] ||
+ !attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR])
+ return -EINVAL;
+
+ /* TODO change key.id ? */
+ if (ieee802154_llsec_parse_key_id(attrs[NL802154_DEVKEY_ATTR_ID],
+ &key.key_id) < 0)
+ return -ENOBUFS;
+
+ /* TODO be32 */
+ key.frame_counter = nla_get_u32(attrs[NL802154_DEVKEY_ATTR_FRAME_COUNTER]);
+ /* TODO change naming hwaddr -> extended_addr
+ * check unique identifier short+pan OR extended_addr
+ */
+ extended_addr = nla_get_le64(attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR]);
+ return rdev_add_devkey(rdev, wpan_dev, extended_addr, &key);
+}
+
+static int nl802154_del_llsec_devkey(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+ struct nlattr *attrs[NL802154_DEVKEY_ATTR_MAX + 1];
+ struct ieee802154_llsec_device_key key;
+ __le64 extended_addr;
+
+ if (nla_parse_nested(attrs, NL802154_DEVKEY_ATTR_MAX,
+ info->attrs[NL802154_ATTR_SEC_DEVKEY],
+ nl802154_devkey_policy))
+ return -EINVAL;
+
+ if (!attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR])
+ return -EINVAL;
+
+ /* TODO change key.id ? */
+ if (ieee802154_llsec_parse_key_id(attrs[NL802154_DEVKEY_ATTR_ID],
+ &key.key_id) < 0)
+ return -ENOBUFS;
+
+ /* TODO change naming hwaddr -> extended_addr
+ * check unique identifier short+pan OR extended_addr
+ */
+ extended_addr = nla_get_le64(attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR]);
+ return rdev_del_devkey(rdev, wpan_dev, extended_addr, &key);
+}
+
+static int nl802154_send_seclevel(struct sk_buff *msg, u32 cmd, u32 portid,
+ u32 seq, int flags,
+ struct cfg802154_registered_device *rdev,
+ struct net_device *dev,
+ const struct ieee802154_llsec_seclevel *sl)
+{
+ void *hdr;
+ struct nlattr *nl_seclevel;
+
+ hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
+ if (!hdr)
+ return -1;
+
+ if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
+ goto nla_put_failure;
+
+ nl_seclevel = nla_nest_start(msg, NL802154_ATTR_SEC_LEVEL);
+ if (!nl_seclevel)
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, NL802154_SECLEVEL_ATTR_FRAME, sl->frame_type) ||
+ nla_put_u32(msg, NL802154_SECLEVEL_ATTR_LEVELS, sl->sec_levels) ||
+ nla_put_u8(msg, NL802154_SECLEVEL_ATTR_DEV_OVERRIDE,
+ sl->device_override))
+ goto nla_put_failure;
+
+ if (sl->frame_type == NL802154_FRAME_CMD) {
+ if (nla_put_u32(msg, NL802154_SECLEVEL_ATTR_CMD_FRAME,
+ sl->cmd_frame_id))
+ goto nla_put_failure;
+ }
+
+ nla_nest_end(msg, nl_seclevel);
+ genlmsg_end(msg, hdr);
+
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+static int
+nl802154_dump_llsec_seclevel(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct cfg802154_registered_device *rdev = NULL;
+ struct ieee802154_llsec_seclevel *sl;
+ struct ieee802154_llsec_table *table;
+ struct wpan_dev *wpan_dev;
+ int err;
+
+ err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev);
+ if (err)
+ return err;
+
+ if (!wpan_dev->netdev) {
+ err = -EINVAL;
+ goto out_err;
+ }
+
+ rdev_lock_llsec_table(rdev, wpan_dev);
+ rdev_get_llsec_table(rdev, wpan_dev, &table);
+
+ /* TODO make it like station dump */
+ if (cb->args[2])
+ goto out;
+
+ list_for_each_entry(sl, &table->security_levels, list) {
+ if (nl802154_send_seclevel(skb, NL802154_CMD_NEW_SEC_LEVEL,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ rdev, wpan_dev->netdev, sl) < 0) {
+ /* TODO */
+ err = -EIO;
+ rdev_unlock_llsec_table(rdev, wpan_dev);
+ goto out_err;
+ }
+ }
+
+ cb->args[2] = 1;
+
+out:
+ rdev_unlock_llsec_table(rdev, wpan_dev);
+ err = skb->len;
+out_err:
+ nl802154_finish_wpan_dev_dump(rdev);
+
+ return err;
+}
+
+static const struct nla_policy nl802154_seclevel_policy[NL802154_SECLEVEL_ATTR_MAX + 1] = {
+ [NL802154_SECLEVEL_ATTR_LEVELS] = { .type = NLA_U8 },
+ [NL802154_SECLEVEL_ATTR_FRAME] = { .type = NLA_U32 },
+ [NL802154_SECLEVEL_ATTR_CMD_FRAME] = { .type = NLA_U32 },
+ [NL802154_SECLEVEL_ATTR_DEV_OVERRIDE] = { .type = NLA_U8 },
+};
+
+static int
+llsec_parse_seclevel(struct nlattr *nla, struct ieee802154_llsec_seclevel *sl)
+{
+ struct nlattr *attrs[NL802154_SECLEVEL_ATTR_MAX + 1];
+
+ if (!nla || nla_parse_nested(attrs, NL802154_SECLEVEL_ATTR_MAX, nla,
+ nl802154_seclevel_policy))
+ return -EINVAL;
+
+ memset(sl, 0, sizeof(*sl));
+
+ if (!attrs[NL802154_SECLEVEL_ATTR_LEVELS] ||
+ !attrs[NL802154_SECLEVEL_ATTR_FRAME] ||
+ !attrs[NL802154_SECLEVEL_ATTR_DEV_OVERRIDE])
+ return -EINVAL;
+
+ sl->sec_levels = nla_get_u8(attrs[NL802154_SECLEVEL_ATTR_LEVELS]);
+ sl->frame_type = nla_get_u32(attrs[NL802154_SECLEVEL_ATTR_FRAME]);
+ sl->device_override = nla_get_u8(attrs[NL802154_SECLEVEL_ATTR_DEV_OVERRIDE]);
+ if (sl->frame_type > NL802154_FRAME_MAX ||
+ (sl->device_override != 0 && sl->device_override != 1))
+ return -EINVAL;
+
+ if (sl->frame_type == NL802154_FRAME_CMD) {
+ if (!attrs[NL802154_SECLEVEL_ATTR_CMD_FRAME])
+ return -EINVAL;
+
+ sl->cmd_frame_id = nla_get_u32(attrs[NL802154_SECLEVEL_ATTR_CMD_FRAME]);
+ if (sl->cmd_frame_id > NL802154_CMD_FRAME_MAX)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int nl802154_add_llsec_seclevel(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+ struct ieee802154_llsec_seclevel sl;
+
+ if (llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL],
+ &sl) < 0)
+ return -EINVAL;
+
+ return rdev_add_seclevel(rdev, wpan_dev, &sl);
+}
+
+static int nl802154_del_llsec_seclevel(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+ struct ieee802154_llsec_seclevel sl;
+
+ if (!info->attrs[NL802154_ATTR_SEC_LEVEL] ||
+ llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL],
+ &sl) < 0)
+ return -EINVAL;
+
+ return rdev_del_seclevel(rdev, wpan_dev, &sl);
+}
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
#define NL802154_FLAG_NEED_WPAN_PHY 0x01
#define NL802154_FLAG_NEED_NETDEV 0x02
#define NL802154_FLAG_NEED_RTNL 0x04
@@ -937,6 +2232,22 @@ static const struct genl_ops nl802154_ops[] = {
NL802154_FLAG_NEED_RTNL,
},
{
+ .cmd = NL802154_CMD_SET_CCA_ED_LEVEL,
+ .doit = nl802154_set_cca_ed_level,
+ .policy = nl802154_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_WPAN_PHY |
+ NL802154_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL802154_CMD_SET_TX_POWER,
+ .doit = nl802154_set_tx_power,
+ .policy = nl802154_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_WPAN_PHY |
+ NL802154_FLAG_NEED_RTNL,
+ },
+ {
.cmd = NL802154_CMD_SET_PAN_ID,
.doit = nl802154_set_pan_id,
.policy = nl802154_policy,
@@ -984,6 +2295,127 @@ static const struct genl_ops nl802154_ops[] = {
.internal_flags = NL802154_FLAG_NEED_NETDEV |
NL802154_FLAG_NEED_RTNL,
},
+ {
+ .cmd = NL802154_CMD_SET_ACKREQ_DEFAULT,
+ .doit = nl802154_set_ackreq_default,
+ .policy = nl802154_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_NETDEV |
+ NL802154_FLAG_NEED_RTNL,
+ },
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+ {
+ .cmd = NL802154_CMD_SET_SEC_PARAMS,
+ .doit = nl802154_set_llsec_params,
+ .policy = nl802154_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_NETDEV |
+ NL802154_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL802154_CMD_GET_SEC_KEY,
+ /* TODO .doit by matching key id? */
+ .dumpit = nl802154_dump_llsec_key,
+ .policy = nl802154_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_NETDEV |
+ NL802154_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL802154_CMD_NEW_SEC_KEY,
+ .doit = nl802154_add_llsec_key,
+ .policy = nl802154_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_NETDEV |
+ NL802154_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL802154_CMD_DEL_SEC_KEY,
+ .doit = nl802154_del_llsec_key,
+ .policy = nl802154_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_NETDEV |
+ NL802154_FLAG_NEED_RTNL,
+ },
+ /* TODO unique identifier must short+pan OR extended_addr */
+ {
+ .cmd = NL802154_CMD_GET_SEC_DEV,
+ /* TODO .doit by matching extended_addr? */
+ .dumpit = nl802154_dump_llsec_dev,
+ .policy = nl802154_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_NETDEV |
+ NL802154_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL802154_CMD_NEW_SEC_DEV,
+ .doit = nl802154_add_llsec_dev,
+ .policy = nl802154_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_NETDEV |
+ NL802154_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL802154_CMD_DEL_SEC_DEV,
+ .doit = nl802154_del_llsec_dev,
+ .policy = nl802154_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_NETDEV |
+ NL802154_FLAG_NEED_RTNL,
+ },
+ /* TODO remove complete devkey, put it as nested? */
+ {
+ .cmd = NL802154_CMD_GET_SEC_DEVKEY,
+ /* TODO doit by matching ??? */
+ .dumpit = nl802154_dump_llsec_devkey,
+ .policy = nl802154_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_NETDEV |
+ NL802154_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL802154_CMD_NEW_SEC_DEVKEY,
+ .doit = nl802154_add_llsec_devkey,
+ .policy = nl802154_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_NETDEV |
+ NL802154_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL802154_CMD_DEL_SEC_DEVKEY,
+ .doit = nl802154_del_llsec_devkey,
+ .policy = nl802154_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_NETDEV |
+ NL802154_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL802154_CMD_GET_SEC_LEVEL,
+ /* TODO .doit by matching frame_type? */
+ .dumpit = nl802154_dump_llsec_seclevel,
+ .policy = nl802154_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_NETDEV |
+ NL802154_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL802154_CMD_NEW_SEC_LEVEL,
+ .doit = nl802154_add_llsec_seclevel,
+ .policy = nl802154_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_NETDEV |
+ NL802154_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL802154_CMD_DEL_SEC_LEVEL,
+ /* TODO match frame_type only? */
+ .doit = nl802154_del_llsec_seclevel,
+ .policy = nl802154_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_NETDEV |
+ NL802154_FLAG_NEED_RTNL,
+ },
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
};
/* initialisation/exit functions */
diff --git a/kernel/net/ieee802154/rdev-ops.h b/kernel/net/ieee802154/rdev-ops.h
index 7b5a9dd94..4441c63b3 100644
--- a/kernel/net/ieee802154/rdev-ops.h
+++ b/kernel/net/ieee802154/rdev-ops.h
@@ -24,6 +24,26 @@ rdev_del_virtual_intf_deprecated(struct cfg802154_registered_device *rdev,
}
static inline int
+rdev_suspend(struct cfg802154_registered_device *rdev)
+{
+ int ret;
+ trace_802154_rdev_suspend(&rdev->wpan_phy);
+ ret = rdev->ops->suspend(&rdev->wpan_phy);
+ trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+ return ret;
+}
+
+static inline int
+rdev_resume(struct cfg802154_registered_device *rdev)
+{
+ int ret;
+ trace_802154_rdev_resume(&rdev->wpan_phy);
+ ret = rdev->ops->resume(&rdev->wpan_phy);
+ trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+ return ret;
+}
+
+static inline int
rdev_add_virtual_intf(struct cfg802154_registered_device *rdev, char *name,
unsigned char name_assign_type,
enum nl802154_iftype type, __le64 extended_addr)
@@ -75,6 +95,29 @@ rdev_set_cca_mode(struct cfg802154_registered_device *rdev,
}
static inline int
+rdev_set_cca_ed_level(struct cfg802154_registered_device *rdev, s32 ed_level)
+{
+ int ret;
+
+ trace_802154_rdev_set_cca_ed_level(&rdev->wpan_phy, ed_level);
+ ret = rdev->ops->set_cca_ed_level(&rdev->wpan_phy, ed_level);
+ trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+ return ret;
+}
+
+static inline int
+rdev_set_tx_power(struct cfg802154_registered_device *rdev,
+ s32 power)
+{
+ int ret;
+
+ trace_802154_rdev_set_tx_power(&rdev->wpan_phy, power);
+ ret = rdev->ops->set_tx_power(&rdev->wpan_phy, power);
+ trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+ return ret;
+}
+
+static inline int
rdev_set_pan_id(struct cfg802154_registered_device *rdev,
struct wpan_dev *wpan_dev, __le16 pan_id)
{
@@ -152,4 +195,126 @@ rdev_set_lbt_mode(struct cfg802154_registered_device *rdev,
return ret;
}
+static inline int
+rdev_set_ackreq_default(struct cfg802154_registered_device *rdev,
+ struct wpan_dev *wpan_dev, bool ackreq)
+{
+ int ret;
+
+ trace_802154_rdev_set_ackreq_default(&rdev->wpan_phy, wpan_dev,
+ ackreq);
+ ret = rdev->ops->set_ackreq_default(&rdev->wpan_phy, wpan_dev, ackreq);
+ trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+ return ret;
+}
+
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+/* TODO this is already a nl802154, so move into ieee802154 */
+static inline void
+rdev_get_llsec_table(struct cfg802154_registered_device *rdev,
+ struct wpan_dev *wpan_dev,
+ struct ieee802154_llsec_table **table)
+{
+ rdev->ops->get_llsec_table(&rdev->wpan_phy, wpan_dev, table);
+}
+
+static inline void
+rdev_lock_llsec_table(struct cfg802154_registered_device *rdev,
+ struct wpan_dev *wpan_dev)
+{
+ rdev->ops->lock_llsec_table(&rdev->wpan_phy, wpan_dev);
+}
+
+static inline void
+rdev_unlock_llsec_table(struct cfg802154_registered_device *rdev,
+ struct wpan_dev *wpan_dev)
+{
+ rdev->ops->unlock_llsec_table(&rdev->wpan_phy, wpan_dev);
+}
+
+static inline int
+rdev_get_llsec_params(struct cfg802154_registered_device *rdev,
+ struct wpan_dev *wpan_dev,
+ struct ieee802154_llsec_params *params)
+{
+ return rdev->ops->get_llsec_params(&rdev->wpan_phy, wpan_dev, params);
+}
+
+static inline int
+rdev_set_llsec_params(struct cfg802154_registered_device *rdev,
+ struct wpan_dev *wpan_dev,
+ const struct ieee802154_llsec_params *params,
+ u32 changed)
+{
+ return rdev->ops->set_llsec_params(&rdev->wpan_phy, wpan_dev, params,
+ changed);
+}
+
+static inline int
+rdev_add_llsec_key(struct cfg802154_registered_device *rdev,
+ struct wpan_dev *wpan_dev,
+ const struct ieee802154_llsec_key_id *id,
+ const struct ieee802154_llsec_key *key)
+{
+ return rdev->ops->add_llsec_key(&rdev->wpan_phy, wpan_dev, id, key);
+}
+
+static inline int
+rdev_del_llsec_key(struct cfg802154_registered_device *rdev,
+ struct wpan_dev *wpan_dev,
+ const struct ieee802154_llsec_key_id *id)
+{
+ return rdev->ops->del_llsec_key(&rdev->wpan_phy, wpan_dev, id);
+}
+
+static inline int
+rdev_add_seclevel(struct cfg802154_registered_device *rdev,
+ struct wpan_dev *wpan_dev,
+ const struct ieee802154_llsec_seclevel *sl)
+{
+ return rdev->ops->add_seclevel(&rdev->wpan_phy, wpan_dev, sl);
+}
+
+static inline int
+rdev_del_seclevel(struct cfg802154_registered_device *rdev,
+ struct wpan_dev *wpan_dev,
+ const struct ieee802154_llsec_seclevel *sl)
+{
+ return rdev->ops->del_seclevel(&rdev->wpan_phy, wpan_dev, sl);
+}
+
+static inline int
+rdev_add_device(struct cfg802154_registered_device *rdev,
+ struct wpan_dev *wpan_dev,
+ const struct ieee802154_llsec_device *dev_desc)
+{
+ return rdev->ops->add_device(&rdev->wpan_phy, wpan_dev, dev_desc);
+}
+
+static inline int
+rdev_del_device(struct cfg802154_registered_device *rdev,
+ struct wpan_dev *wpan_dev, __le64 extended_addr)
+{
+ return rdev->ops->del_device(&rdev->wpan_phy, wpan_dev, extended_addr);
+}
+
+static inline int
+rdev_add_devkey(struct cfg802154_registered_device *rdev,
+ struct wpan_dev *wpan_dev, __le64 extended_addr,
+ const struct ieee802154_llsec_device_key *devkey)
+{
+ return rdev->ops->add_devkey(&rdev->wpan_phy, wpan_dev, extended_addr,
+ devkey);
+}
+
+static inline int
+rdev_del_devkey(struct cfg802154_registered_device *rdev,
+ struct wpan_dev *wpan_dev, __le64 extended_addr,
+ const struct ieee802154_llsec_device_key *devkey)
+{
+ return rdev->ops->del_devkey(&rdev->wpan_phy, wpan_dev, extended_addr,
+ devkey);
+}
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
#endif /* __CFG802154_RDEV_OPS */
diff --git a/kernel/net/ieee802154/socket.c b/kernel/net/ieee802154/socket.c
index 627a25376..a548be247 100644
--- a/kernel/net/ieee802154/socket.c
+++ b/kernel/net/ieee802154/socket.c
@@ -64,10 +64,8 @@ ieee802154_get_dev(struct net *net, const struct ieee802154_addr *addr)
if (tmp->type != ARPHRD_IEEE802154)
continue;
- pan_id = ieee802154_mlme_ops(tmp)->get_pan_id(tmp);
- short_addr =
- ieee802154_mlme_ops(tmp)->get_short_addr(tmp);
-
+ pan_id = tmp->ieee802154_ptr->pan_id;
+ short_addr = tmp->ieee802154_ptr->short_addr;
if (pan_id == addr->pan_id &&
short_addr == addr->short_addr) {
dev = tmp;
@@ -228,15 +226,9 @@ static int raw_bind(struct sock *sk, struct sockaddr *_uaddr, int len)
goto out;
}
- if (dev->type != ARPHRD_IEEE802154) {
- err = -ENODEV;
- goto out_put;
- }
-
sk->sk_bound_dev_if = dev->ifindex;
sk_dst_reset(sk);
-out_put:
dev_put(dev);
out:
release_sock(sk);
@@ -281,12 +273,12 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
goto out;
}
- mtu = dev->mtu;
+ mtu = IEEE802154_MTU;
pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
if (size > mtu) {
pr_debug("size = %Zu, mtu = %u\n", size, mtu);
- err = -EINVAL;
+ err = -EMSGSIZE;
goto out_dev;
}
@@ -645,7 +637,7 @@ static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
err = -ENXIO;
goto out;
}
- mtu = dev->mtu;
+ mtu = IEEE802154_MTU;
pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
if (size > mtu) {
@@ -684,8 +676,8 @@ static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
cb->seclevel = ro->seclevel;
cb->seclevel_override = ro->seclevel_override;
- err = dev_hard_header(skb, dev, ETH_P_IEEE802154, &dst_addr,
- ro->bound ? &ro->src_addr : NULL, size);
+ err = wpan_dev_hard_header(skb, dev, &dst_addr,
+ ro->bound ? &ro->src_addr : NULL, size);
if (err < 0)
goto out_skb;
@@ -803,9 +795,9 @@ static int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
/* Data frame processing */
BUG_ON(dev->type != ARPHRD_IEEE802154);
- pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
- short_addr = ieee802154_mlme_ops(dev)->get_short_addr(dev);
- hw_addr = ieee802154_devaddr_from_raw(dev->dev_addr);
+ pan_id = dev->ieee802154_ptr->pan_id;
+ short_addr = dev->ieee802154_ptr->short_addr;
+ hw_addr = dev->ieee802154_ptr->extended_addr;
read_lock(&dgram_lock);
sk_for_each(sk, &dgram_head) {
@@ -1020,7 +1012,7 @@ static int ieee802154_create(struct net *net, struct socket *sock,
}
rc = -ENOMEM;
- sk = sk_alloc(net, PF_IEEE802154, GFP_KERNEL, proto);
+ sk = sk_alloc(net, PF_IEEE802154, GFP_KERNEL, proto, kern);
if (!sk)
goto out;
rc = 0;
diff --git a/kernel/net/ieee802154/sysfs.c b/kernel/net/ieee802154/sysfs.c
index 133b42806..bd88525b0 100644
--- a/kernel/net/ieee802154/sysfs.c
+++ b/kernel/net/ieee802154/sysfs.c
@@ -14,11 +14,13 @@
*/
#include <linux/device.h>
+#include <linux/rtnetlink.h>
#include <net/cfg802154.h>
#include "core.h"
#include "sysfs.h"
+#include "rdev-ops.h"
static inline struct cfg802154_registered_device *
dev_to_rdev(struct device *dev)
@@ -62,10 +64,46 @@ static struct attribute *pmib_attrs[] = {
};
ATTRIBUTE_GROUPS(pmib);
+#ifdef CONFIG_PM_SLEEP
+static int wpan_phy_suspend(struct device *dev)
+{
+ struct cfg802154_registered_device *rdev = dev_to_rdev(dev);
+ int ret = 0;
+
+ if (rdev->ops->suspend) {
+ rtnl_lock();
+ ret = rdev_suspend(rdev);
+ rtnl_unlock();
+ }
+
+ return ret;
+}
+
+static int wpan_phy_resume(struct device *dev)
+{
+ struct cfg802154_registered_device *rdev = dev_to_rdev(dev);
+ int ret = 0;
+
+ if (rdev->ops->resume) {
+ rtnl_lock();
+ ret = rdev_resume(rdev);
+ rtnl_unlock();
+ }
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(wpan_phy_pm_ops, wpan_phy_suspend, wpan_phy_resume);
+#define WPAN_PHY_PM_OPS (&wpan_phy_pm_ops)
+#else
+#define WPAN_PHY_PM_OPS NULL
+#endif
+
struct class wpan_phy_class = {
.name = "ieee802154",
.dev_release = wpan_phy_release,
.dev_groups = pmib_groups,
+ .pm = WPAN_PHY_PM_OPS,
};
int wpan_phy_sysfs_init(void)
diff --git a/kernel/net/ieee802154/trace.h b/kernel/net/ieee802154/trace.h
index 5ac25eb6e..9a471e41e 100644
--- a/kernel/net/ieee802154/trace.h
+++ b/kernel/net/ieee802154/trace.h
@@ -1,4 +1,4 @@
-/* Based on net/wireless/tracing.h */
+/* Based on net/wireless/trace.h */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM cfg802154
@@ -40,6 +40,28 @@
* rdev->ops traces *
*************************************************************/
+DECLARE_EVENT_CLASS(wpan_phy_only_evt,
+ TP_PROTO(struct wpan_phy *wpan_phy),
+ TP_ARGS(wpan_phy),
+ TP_STRUCT__entry(
+ WPAN_PHY_ENTRY
+ ),
+ TP_fast_assign(
+ WPAN_PHY_ASSIGN;
+ ),
+ TP_printk(WPAN_PHY_PR_FMT, WPAN_PHY_PR_ARG)
+);
+
+DEFINE_EVENT(wpan_phy_only_evt, 802154_rdev_suspend,
+ TP_PROTO(struct wpan_phy *wpan_phy),
+ TP_ARGS(wpan_phy)
+);
+
+DEFINE_EVENT(wpan_phy_only_evt, 802154_rdev_resume,
+ TP_PROTO(struct wpan_phy *wpan_phy),
+ TP_ARGS(wpan_phy)
+);
+
TRACE_EVENT(802154_rdev_add_virtual_intf,
TP_PROTO(struct wpan_phy *wpan_phy, char *name,
enum nl802154_iftype type, __le64 extended_addr),
@@ -56,7 +78,7 @@ TRACE_EVENT(802154_rdev_add_virtual_intf,
__entry->type = type;
__entry->extended_addr = extended_addr;
),
- TP_printk(WPAN_PHY_PR_FMT ", virtual intf name: %s, type: %d, ea %llx",
+ TP_printk(WPAN_PHY_PR_FMT ", virtual intf name: %s, type: %d, extended addr: 0x%llx",
WPAN_PHY_PR_ARG, __get_str(vir_intf_name), __entry->type,
__le64_to_cpu(__entry->extended_addr))
);
@@ -93,6 +115,21 @@ TRACE_EVENT(802154_rdev_set_channel,
__entry->page, __entry->channel)
);
+TRACE_EVENT(802154_rdev_set_tx_power,
+ TP_PROTO(struct wpan_phy *wpan_phy, s32 power),
+ TP_ARGS(wpan_phy, power),
+ TP_STRUCT__entry(
+ WPAN_PHY_ENTRY
+ __field(s32, power)
+ ),
+ TP_fast_assign(
+ WPAN_PHY_ASSIGN;
+ __entry->power = power;
+ ),
+ TP_printk(WPAN_PHY_PR_FMT ", mbm: %d", WPAN_PHY_PR_ARG,
+ __entry->power)
+);
+
TRACE_EVENT(802154_rdev_set_cca_mode,
TP_PROTO(struct wpan_phy *wpan_phy, const struct wpan_phy_cca *cca),
TP_ARGS(wpan_phy, cca),
@@ -108,6 +145,21 @@ TRACE_EVENT(802154_rdev_set_cca_mode,
WPAN_CCA_PR_ARG)
);
+TRACE_EVENT(802154_rdev_set_cca_ed_level,
+ TP_PROTO(struct wpan_phy *wpan_phy, s32 ed_level),
+ TP_ARGS(wpan_phy, ed_level),
+ TP_STRUCT__entry(
+ WPAN_PHY_ENTRY
+ __field(s32, ed_level)
+ ),
+ TP_fast_assign(
+ WPAN_PHY_ASSIGN;
+ __entry->ed_level = ed_level;
+ ),
+ TP_printk(WPAN_PHY_PR_FMT ", ed level: %d", WPAN_PHY_PR_ARG,
+ __entry->ed_level)
+);
+
DECLARE_EVENT_CLASS(802154_le16_template,
TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
__le16 le16arg),
@@ -137,7 +189,7 @@ DEFINE_EVENT_PRINT(802154_le16_template, 802154_rdev_set_short_addr,
TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
__le16 le16arg),
TP_ARGS(wpan_phy, wpan_dev, le16arg),
- TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT ", sa: 0x%04x",
+ TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT ", short addr: 0x%04x",
WPAN_PHY_PR_ARG, WPAN_DEV_PR_ARG,
__le16_to_cpu(__entry->le16arg))
);
@@ -160,7 +212,7 @@ TRACE_EVENT(802154_rdev_set_backoff_exponent,
),
TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT
- ", min be: %d, max_be: %d", WPAN_PHY_PR_ARG,
+ ", min be: %d, max be: %d", WPAN_PHY_PR_ARG,
WPAN_DEV_PR_ARG, __entry->min_be, __entry->max_be)
);
@@ -223,6 +275,25 @@ TRACE_EVENT(802154_rdev_set_lbt_mode,
WPAN_DEV_PR_ARG, BOOL_TO_STR(__entry->mode))
);
+TRACE_EVENT(802154_rdev_set_ackreq_default,
+ TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+ bool ackreq),
+ TP_ARGS(wpan_phy, wpan_dev, ackreq),
+ TP_STRUCT__entry(
+ WPAN_PHY_ENTRY
+ WPAN_DEV_ENTRY
+ __field(bool, ackreq)
+ ),
+ TP_fast_assign(
+ WPAN_PHY_ASSIGN;
+ WPAN_DEV_ASSIGN;
+ __entry->ackreq = ackreq;
+ ),
+ TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT
+ ", ackreq default: %s", WPAN_PHY_PR_ARG,
+ WPAN_DEV_PR_ARG, BOOL_TO_STR(__entry->ackreq))
+);
+
TRACE_EVENT(802154_rdev_return_int,
TP_PROTO(struct wpan_phy *wpan_phy, int ret),
TP_ARGS(wpan_phy, ret),