From 9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 Mon Sep 17 00:00:00 2001 From: Yunhong Jiang Date: Tue, 4 Aug 2015 12:17:53 -0700 Subject: Add the rt linux 4.1.3-rt3 as base Import the rt linux 4.1.3-rt3 as OPNFV kvm base. It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and the base is: commit 0917f823c59692d751951bf5ea699a2d1e2f26a2 Author: Sebastian Andrzej Siewior Date: Sat Jul 25 12:13:34 2015 +0200 Prepare v4.1.3-rt3 Signed-off-by: Sebastian Andrzej Siewior We lose all the git history this way and it's not good. We should apply another opnfv project repo in future. Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423 Signed-off-by: Yunhong Jiang --- kernel/drivers/staging/ozwpan/Kconfig | 9 + kernel/drivers/staging/ozwpan/Makefile | 16 + kernel/drivers/staging/ozwpan/README | 25 + kernel/drivers/staging/ozwpan/TODO | 14 + kernel/drivers/staging/ozwpan/ozappif.h | 36 + kernel/drivers/staging/ozwpan/ozcdev.c | 554 ++++++ kernel/drivers/staging/ozwpan/ozcdev.h | 17 + kernel/drivers/staging/ozwpan/ozdbg.h | 54 + kernel/drivers/staging/ozwpan/ozeltbuf.c | 252 +++ kernel/drivers/staging/ozwpan/ozeltbuf.h | 65 + kernel/drivers/staging/ozwpan/ozhcd.c | 2301 +++++++++++++++++++++++++ kernel/drivers/staging/ozwpan/ozhcd.h | 15 + kernel/drivers/staging/ozwpan/ozmain.c | 71 + kernel/drivers/staging/ozwpan/ozpd.c | 886 ++++++++++ kernel/drivers/staging/ozwpan/ozpd.h | 134 ++ kernel/drivers/staging/ozwpan/ozproto.c | 813 +++++++++ kernel/drivers/staging/ozwpan/ozproto.h | 62 + kernel/drivers/staging/ozwpan/ozprotocol.h | 375 ++++ kernel/drivers/staging/ozwpan/ozurbparanoia.c | 54 + kernel/drivers/staging/ozwpan/ozurbparanoia.h | 19 + kernel/drivers/staging/ozwpan/ozusbif.h | 43 + kernel/drivers/staging/ozwpan/ozusbsvc.c | 263 +++ kernel/drivers/staging/ozwpan/ozusbsvc.h | 32 + kernel/drivers/staging/ozwpan/ozusbsvc1.c | 462 +++++ 24 files changed, 6572 insertions(+) create mode 100644 kernel/drivers/staging/ozwpan/Kconfig create mode 100644 kernel/drivers/staging/ozwpan/Makefile create mode 100644 kernel/drivers/staging/ozwpan/README create mode 100644 kernel/drivers/staging/ozwpan/TODO create mode 100644 kernel/drivers/staging/ozwpan/ozappif.h create mode 100644 kernel/drivers/staging/ozwpan/ozcdev.c create mode 100644 kernel/drivers/staging/ozwpan/ozcdev.h create mode 100644 kernel/drivers/staging/ozwpan/ozdbg.h create mode 100644 kernel/drivers/staging/ozwpan/ozeltbuf.c create mode 100644 kernel/drivers/staging/ozwpan/ozeltbuf.h create mode 100644 kernel/drivers/staging/ozwpan/ozhcd.c create mode 100644 kernel/drivers/staging/ozwpan/ozhcd.h create mode 100644 kernel/drivers/staging/ozwpan/ozmain.c create mode 100644 kernel/drivers/staging/ozwpan/ozpd.c create mode 100644 kernel/drivers/staging/ozwpan/ozpd.h create mode 100644 kernel/drivers/staging/ozwpan/ozproto.c create mode 100644 kernel/drivers/staging/ozwpan/ozproto.h create mode 100644 kernel/drivers/staging/ozwpan/ozprotocol.h create mode 100644 kernel/drivers/staging/ozwpan/ozurbparanoia.c create mode 100644 kernel/drivers/staging/ozwpan/ozurbparanoia.h create mode 100644 kernel/drivers/staging/ozwpan/ozusbif.h create mode 100644 kernel/drivers/staging/ozwpan/ozusbsvc.c create mode 100644 kernel/drivers/staging/ozwpan/ozusbsvc.h create mode 100644 kernel/drivers/staging/ozwpan/ozusbsvc1.c (limited to 'kernel/drivers/staging/ozwpan') diff --git a/kernel/drivers/staging/ozwpan/Kconfig b/kernel/drivers/staging/ozwpan/Kconfig new file mode 100644 index 000000000..7904caec5 --- /dev/null +++ b/kernel/drivers/staging/ozwpan/Kconfig @@ -0,0 +1,9 @@ +config USB_WPAN_HCD + tristate "USB over WiFi Host Controller" + depends on USB && NET + help + A driver for USB Host Controllers that are compatible with + Ozmo Devices USB over WiFi technology. + + To compile this driver a module, choose M here: the module + will be called "ozwpan". diff --git a/kernel/drivers/staging/ozwpan/Makefile b/kernel/drivers/staging/ozwpan/Makefile new file mode 100644 index 000000000..29529c1a8 --- /dev/null +++ b/kernel/drivers/staging/ozwpan/Makefile @@ -0,0 +1,16 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2011 Ozmo Inc +# Released under the GNU General Public License Version 2 (GPLv2). +# ----------------------------------------------------------------------------- + +obj-$(CONFIG_USB_WPAN_HCD) += ozwpan.o +ozwpan-y := \ + ozmain.o \ + ozpd.o \ + ozusbsvc.o \ + ozusbsvc1.o \ + ozhcd.o \ + ozeltbuf.o \ + ozproto.o \ + ozcdev.o \ + ozurbparanoia.o diff --git a/kernel/drivers/staging/ozwpan/README b/kernel/drivers/staging/ozwpan/README new file mode 100644 index 000000000..7c055ec99 --- /dev/null +++ b/kernel/drivers/staging/ozwpan/README @@ -0,0 +1,25 @@ +OZWPAN USB Host Controller Driver +--------------------------------- +This driver is a USB HCD driver that does not have an associated a physical +device but instead uses Wi-Fi to communicate with the wireless peripheral. +The USB requests are converted into a layer 2 network protocol and transmitted +on the network using an ethertype (0x892e) regestered to Ozmo Device Inc. +This driver is compatible with existing wireless devices that use Ozmo Devices +technology. + +To operate the driver must be bound to a suitable network interface. This can +be done when the module is loaded (specifying the name of the network interface +as a parameter - e.g. 'insmod ozwpan g_net_dev=go0') or can be bound after +loading using an ioctl call. See the ozappif.h file and the ioctls +OZ_IOCTL_ADD_BINDING and OZ_IOCTL_REMOVE_BINDING. + +The devices connect to the host use Wi-Fi Direct so a network card that supports +Wi-Fi direct is required. A recent version (0.8.x or later) version of the +wpa_supplicant can be used to setup the network interface to create a persistent +autonomous group (for older pre-WFD peripherals) or put in a listen state to +allow group negotiation to occur for more recent devices that support WFD. + +The protocol used over the network does not directly mimic the USB bus +transactions as this would be rather busy and inefficient. Instead the chapter 9 +requests are converted into a request/response pair of messages. (See +ozprotocol.h for data structures used in the protocol). diff --git a/kernel/drivers/staging/ozwpan/TODO b/kernel/drivers/staging/ozwpan/TODO new file mode 100644 index 000000000..f32c1c0bc --- /dev/null +++ b/kernel/drivers/staging/ozwpan/TODO @@ -0,0 +1,14 @@ +TODO: + - Convert event tracing code to in-kernel tracing infrastructure + - Check for remaining ioctl & check if that can be converted into + sysfs entries + - Convert debug prints to appropriate dev_debug or something better + - Modify Kconfig to add CONFIG option for enabling/disabling event + tracing. + - check USB HCD implementation is complete and correct. + - code review by USB developer community. + - testing with as many devices as possible. + +Please send any patches for this driver to +Shigekatsu Tateno +and Greg Kroah-Hartman . diff --git a/kernel/drivers/staging/ozwpan/ozappif.h b/kernel/drivers/staging/ozwpan/ozappif.h new file mode 100644 index 000000000..ea1b271fd --- /dev/null +++ b/kernel/drivers/staging/ozwpan/ozappif.h @@ -0,0 +1,36 @@ +/* ----------------------------------------------------------------------------- + * Copyright (c) 2011 Ozmo Inc + * Released under the GNU General Public License Version 2 (GPLv2). + * ----------------------------------------------------------------------------- + */ +#ifndef _OZAPPIF_H +#define _OZAPPIF_H + +#define OZ_IOCTL_MAGIC 0xf4 + +struct oz_mac_addr { + __u8 a[6]; +}; + +#define OZ_MAX_PDS 8 + +struct oz_pd_list { + __u32 count; + struct oz_mac_addr addr[OZ_MAX_PDS]; +}; + +#define OZ_MAX_BINDING_LEN 32 + +struct oz_binding_info { + char name[OZ_MAX_BINDING_LEN]; +}; + +#define OZ_IOCTL_GET_PD_LIST _IOR(OZ_IOCTL_MAGIC, 0, struct oz_pd_list) +#define OZ_IOCTL_SET_ACTIVE_PD _IOW(OZ_IOCTL_MAGIC, 1, struct oz_mac_addr) +#define OZ_IOCTL_GET_ACTIVE_PD _IOR(OZ_IOCTL_MAGIC, 2, struct oz_mac_addr) +#define OZ_IOCTL_ADD_BINDING _IOW(OZ_IOCTL_MAGIC, 3, struct oz_binding_info) +#define OZ_IOCTL_REMOVE_BINDING _IOW(OZ_IOCTL_MAGIC, 4, struct oz_binding_info) +#define OZ_IOCTL_MAX 5 + + +#endif /* _OZAPPIF_H */ diff --git a/kernel/drivers/staging/ozwpan/ozcdev.c b/kernel/drivers/staging/ozwpan/ozcdev.c new file mode 100644 index 000000000..da0e1fd50 --- /dev/null +++ b/kernel/drivers/staging/ozwpan/ozcdev.c @@ -0,0 +1,554 @@ +/* ----------------------------------------------------------------------------- + * Copyright (c) 2011 Ozmo Inc + * Released under the GNU General Public License Version 2 (GPLv2). + * ----------------------------------------------------------------------------- + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "ozdbg.h" +#include "ozprotocol.h" +#include "ozappif.h" +#include "ozeltbuf.h" +#include "ozpd.h" +#include "ozproto.h" +#include "ozcdev.h" + +#define OZ_RD_BUF_SZ 256 +struct oz_cdev { + dev_t devnum; + struct cdev cdev; + wait_queue_head_t rdq; + spinlock_t lock; + u8 active_addr[ETH_ALEN]; + struct oz_pd *active_pd; +}; + +/* Per PD context for the serial service stored in the PD. */ +struct oz_serial_ctx { + atomic_t ref_count; + u8 tx_seq_num; + u8 rx_seq_num; + u8 rd_buf[OZ_RD_BUF_SZ]; + int rd_in; + int rd_out; +}; + +static struct oz_cdev g_cdev; +static struct class *g_oz_class; + +/* + * Context: process and softirq + */ +static struct oz_serial_ctx *oz_cdev_claim_ctx(struct oz_pd *pd) +{ + struct oz_serial_ctx *ctx; + + spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL]); + ctx = (struct oz_serial_ctx *) pd->app_ctx[OZ_APPID_SERIAL]; + if (ctx) + atomic_inc(&ctx->ref_count); + spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL]); + return ctx; +} + +/* + * Context: softirq or process + */ +static void oz_cdev_release_ctx(struct oz_serial_ctx *ctx) +{ + if (atomic_dec_and_test(&ctx->ref_count)) { + oz_dbg(ON, "Dealloc serial context\n"); + kfree(ctx); + } +} + +/* + * Context: process + */ +static int oz_cdev_open(struct inode *inode, struct file *filp) +{ + struct oz_cdev *dev = container_of(inode->i_cdev, struct oz_cdev, cdev); + + oz_dbg(ON, "major = %d minor = %d\n", imajor(inode), iminor(inode)); + + filp->private_data = dev; + return 0; +} + +/* + * Context: process + */ +static int oz_cdev_release(struct inode *inode, struct file *filp) +{ + return 0; +} + +/* + * Context: process + */ +static ssize_t oz_cdev_read(struct file *filp, char __user *buf, size_t count, + loff_t *fpos) +{ + int n; + int ix; + + struct oz_pd *pd; + struct oz_serial_ctx *ctx; + + spin_lock_bh(&g_cdev.lock); + pd = g_cdev.active_pd; + if (pd) + oz_pd_get(pd); + spin_unlock_bh(&g_cdev.lock); + if (pd == NULL) + return -1; + ctx = oz_cdev_claim_ctx(pd); + if (ctx == NULL) + goto out2; + n = ctx->rd_in - ctx->rd_out; + if (n < 0) + n += OZ_RD_BUF_SZ; + if (count > n) + count = n; + ix = ctx->rd_out; + n = OZ_RD_BUF_SZ - ix; + if (n > count) + n = count; + if (copy_to_user(buf, &ctx->rd_buf[ix], n)) { + count = 0; + goto out1; + } + ix += n; + if (ix == OZ_RD_BUF_SZ) + ix = 0; + if (n < count) { + if (copy_to_user(&buf[n], ctx->rd_buf, count-n)) { + count = 0; + goto out1; + } + ix = count-n; + } + ctx->rd_out = ix; +out1: + oz_cdev_release_ctx(ctx); +out2: + oz_pd_put(pd); + return count; +} + +/* + * Context: process + */ +static ssize_t oz_cdev_write(struct file *filp, const char __user *buf, + size_t count, loff_t *fpos) +{ + struct oz_pd *pd; + struct oz_elt_buf *eb; + struct oz_elt_info *ei; + struct oz_elt *elt; + struct oz_app_hdr *app_hdr; + struct oz_serial_ctx *ctx; + + if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr)) + return -EINVAL; + + spin_lock_bh(&g_cdev.lock); + pd = g_cdev.active_pd; + if (pd) + oz_pd_get(pd); + spin_unlock_bh(&g_cdev.lock); + if (pd == NULL) + return -ENXIO; + if (!(pd->state & OZ_PD_S_CONNECTED)) + return -EAGAIN; + eb = &pd->elt_buff; + ei = oz_elt_info_alloc(eb); + if (ei == NULL) { + count = 0; + goto out; + } + elt = (struct oz_elt *)ei->data; + app_hdr = (struct oz_app_hdr *)(elt+1); + elt->length = sizeof(struct oz_app_hdr) + count; + elt->type = OZ_ELT_APP_DATA; + ei->app_id = OZ_APPID_SERIAL; + ei->length = elt->length + sizeof(struct oz_elt); + app_hdr->app_id = OZ_APPID_SERIAL; + if (copy_from_user(app_hdr+1, buf, count)) + goto out; + spin_lock_bh(&pd->app_lock[OZ_APPID_USB]); + ctx = (struct oz_serial_ctx *) pd->app_ctx[OZ_APPID_SERIAL]; + if (ctx) { + app_hdr->elt_seq_num = ctx->tx_seq_num++; + if (ctx->tx_seq_num == 0) + ctx->tx_seq_num = 1; + spin_lock(&eb->lock); + if (oz_queue_elt_info(eb, 0, 0, ei) == 0) + ei = NULL; + spin_unlock(&eb->lock); + } + spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]); +out: + if (ei) { + count = 0; + spin_lock_bh(&eb->lock); + oz_elt_info_free(eb, ei); + spin_unlock_bh(&eb->lock); + } + oz_pd_put(pd); + return count; +} + +/* + * Context: process + */ +static int oz_set_active_pd(const u8 *addr) +{ + int rc = 0; + struct oz_pd *pd; + struct oz_pd *old_pd; + + pd = oz_pd_find(addr); + if (pd) { + spin_lock_bh(&g_cdev.lock); + ether_addr_copy(g_cdev.active_addr, addr); + old_pd = g_cdev.active_pd; + g_cdev.active_pd = pd; + spin_unlock_bh(&g_cdev.lock); + if (old_pd) + oz_pd_put(old_pd); + } else { + if (is_zero_ether_addr(addr)) { + spin_lock_bh(&g_cdev.lock); + pd = g_cdev.active_pd; + g_cdev.active_pd = NULL; + memset(g_cdev.active_addr, 0, + sizeof(g_cdev.active_addr)); + spin_unlock_bh(&g_cdev.lock); + if (pd) + oz_pd_put(pd); + } else { + rc = -1; + } + } + return rc; +} + +/* + * Context: process + */ +static long oz_cdev_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int rc = 0; + + if (_IOC_TYPE(cmd) != OZ_IOCTL_MAGIC) + return -ENOTTY; + if (_IOC_NR(cmd) > OZ_IOCTL_MAX) + return -ENOTTY; + if (_IOC_DIR(cmd) & _IOC_READ) + rc = !access_ok(VERIFY_WRITE, (void __user *)arg, + _IOC_SIZE(cmd)); + else if (_IOC_DIR(cmd) & _IOC_WRITE) + rc = !access_ok(VERIFY_READ, (void __user *)arg, + _IOC_SIZE(cmd)); + if (rc) + return -EFAULT; + switch (cmd) { + case OZ_IOCTL_GET_PD_LIST: { + struct oz_pd_list list; + + oz_dbg(ON, "OZ_IOCTL_GET_PD_LIST\n"); + memset(&list, 0, sizeof(list)); + list.count = oz_get_pd_list(list.addr, OZ_MAX_PDS); + if (copy_to_user((void __user *)arg, &list, + sizeof(list))) + return -EFAULT; + } + break; + case OZ_IOCTL_SET_ACTIVE_PD: { + u8 addr[ETH_ALEN]; + + oz_dbg(ON, "OZ_IOCTL_SET_ACTIVE_PD\n"); + if (copy_from_user(addr, (void __user *)arg, ETH_ALEN)) + return -EFAULT; + rc = oz_set_active_pd(addr); + } + break; + case OZ_IOCTL_GET_ACTIVE_PD: { + u8 addr[ETH_ALEN]; + + oz_dbg(ON, "OZ_IOCTL_GET_ACTIVE_PD\n"); + spin_lock_bh(&g_cdev.lock); + ether_addr_copy(addr, g_cdev.active_addr); + spin_unlock_bh(&g_cdev.lock); + if (copy_to_user((void __user *)arg, addr, ETH_ALEN)) + return -EFAULT; + } + break; + case OZ_IOCTL_ADD_BINDING: + case OZ_IOCTL_REMOVE_BINDING: { + struct oz_binding_info b; + + if (copy_from_user(&b, (void __user *)arg, + sizeof(struct oz_binding_info))) { + return -EFAULT; + } + /* Make sure name is null terminated. */ + b.name[OZ_MAX_BINDING_LEN-1] = 0; + if (cmd == OZ_IOCTL_ADD_BINDING) + oz_binding_add(b.name); + else + oz_binding_remove(b.name); + } + break; + } + return rc; +} + +/* + * Context: process + */ +static unsigned int oz_cdev_poll(struct file *filp, poll_table *wait) +{ + unsigned int ret = 0; + struct oz_cdev *dev = filp->private_data; + + oz_dbg(ON, "Poll called wait = %p\n", wait); + spin_lock_bh(&dev->lock); + if (dev->active_pd) { + struct oz_serial_ctx *ctx = oz_cdev_claim_ctx(dev->active_pd); + + if (ctx) { + if (ctx->rd_in != ctx->rd_out) + ret |= POLLIN | POLLRDNORM; + oz_cdev_release_ctx(ctx); + } + } + spin_unlock_bh(&dev->lock); + if (wait) + poll_wait(filp, &dev->rdq, wait); + return ret; +} + +/* + */ +static const struct file_operations oz_fops = { + .owner = THIS_MODULE, + .open = oz_cdev_open, + .release = oz_cdev_release, + .read = oz_cdev_read, + .write = oz_cdev_write, + .unlocked_ioctl = oz_cdev_ioctl, + .poll = oz_cdev_poll +}; + +/* + * Context: process + */ +int oz_cdev_register(void) +{ + int err; + struct device *dev; + + memset(&g_cdev, 0, sizeof(g_cdev)); + err = alloc_chrdev_region(&g_cdev.devnum, 0, 1, "ozwpan"); + if (err < 0) + return err; + oz_dbg(ON, "Alloc dev number %d:%d\n", + MAJOR(g_cdev.devnum), MINOR(g_cdev.devnum)); + cdev_init(&g_cdev.cdev, &oz_fops); + g_cdev.cdev.owner = THIS_MODULE; + spin_lock_init(&g_cdev.lock); + init_waitqueue_head(&g_cdev.rdq); + err = cdev_add(&g_cdev.cdev, g_cdev.devnum, 1); + if (err < 0) { + oz_dbg(ON, "Failed to add cdev\n"); + goto unregister; + } + g_oz_class = class_create(THIS_MODULE, "ozmo_wpan"); + if (IS_ERR(g_oz_class)) { + oz_dbg(ON, "Failed to register ozmo_wpan class\n"); + err = PTR_ERR(g_oz_class); + goto delete; + } + dev = device_create(g_oz_class, NULL, g_cdev.devnum, NULL, "ozwpan"); + if (IS_ERR(dev)) { + oz_dbg(ON, "Failed to create sysfs entry for cdev\n"); + err = PTR_ERR(dev); + goto delete; + } + return 0; + +delete: + cdev_del(&g_cdev.cdev); +unregister: + unregister_chrdev_region(g_cdev.devnum, 1); + return err; +} + +/* + * Context: process + */ +int oz_cdev_deregister(void) +{ + cdev_del(&g_cdev.cdev); + unregister_chrdev_region(g_cdev.devnum, 1); + if (g_oz_class) { + device_destroy(g_oz_class, g_cdev.devnum); + class_destroy(g_oz_class); + } + return 0; +} + +/* + * Context: process + */ +int oz_cdev_init(void) +{ + oz_app_enable(OZ_APPID_SERIAL, 1); + return 0; +} + +/* + * Context: process + */ +void oz_cdev_term(void) +{ + oz_app_enable(OZ_APPID_SERIAL, 0); +} + +/* + * Context: softirq-serialized + */ +int oz_cdev_start(struct oz_pd *pd, int resume) +{ + struct oz_serial_ctx *ctx; + struct oz_serial_ctx *old_ctx; + + if (resume) { + oz_dbg(ON, "Serial service resumed\n"); + return 0; + } + ctx = kzalloc(sizeof(struct oz_serial_ctx), GFP_ATOMIC); + if (ctx == NULL) + return -ENOMEM; + atomic_set(&ctx->ref_count, 1); + ctx->tx_seq_num = 1; + spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL]); + old_ctx = pd->app_ctx[OZ_APPID_SERIAL]; + if (old_ctx) { + spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL]); + kfree(ctx); + } else { + pd->app_ctx[OZ_APPID_SERIAL] = ctx; + spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL]); + } + spin_lock(&g_cdev.lock); + if ((g_cdev.active_pd == NULL) && + ether_addr_equal(pd->mac_addr, g_cdev.active_addr)) { + oz_pd_get(pd); + g_cdev.active_pd = pd; + oz_dbg(ON, "Active PD arrived\n"); + } + spin_unlock(&g_cdev.lock); + oz_dbg(ON, "Serial service started\n"); + return 0; +} + +/* + * Context: softirq or process + */ +void oz_cdev_stop(struct oz_pd *pd, int pause) +{ + struct oz_serial_ctx *ctx; + + if (pause) { + oz_dbg(ON, "Serial service paused\n"); + return; + } + spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL]); + ctx = (struct oz_serial_ctx *) pd->app_ctx[OZ_APPID_SERIAL]; + pd->app_ctx[OZ_APPID_SERIAL] = NULL; + spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL]); + if (ctx) + oz_cdev_release_ctx(ctx); + spin_lock(&g_cdev.lock); + if (pd == g_cdev.active_pd) + g_cdev.active_pd = NULL; + else + pd = NULL; + spin_unlock(&g_cdev.lock); + if (pd) { + oz_pd_put(pd); + oz_dbg(ON, "Active PD departed\n"); + } + oz_dbg(ON, "Serial service stopped\n"); +} + +/* + * Context: softirq-serialized + */ +void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt) +{ + struct oz_serial_ctx *ctx; + struct oz_app_hdr *app_hdr; + u8 *data; + int len; + int space; + int copy_sz; + int ix; + + ctx = oz_cdev_claim_ctx(pd); + if (ctx == NULL) { + oz_dbg(ON, "Cannot claim serial context\n"); + return; + } + + app_hdr = (struct oz_app_hdr *)(elt+1); + /* If sequence number is non-zero then check it is not a duplicate. + */ + if (app_hdr->elt_seq_num != 0) { + if (((ctx->rx_seq_num - app_hdr->elt_seq_num) & 0x80) == 0) { + /* Reject duplicate element. */ + oz_dbg(ON, "Duplicate element:%02x %02x\n", + app_hdr->elt_seq_num, ctx->rx_seq_num); + goto out; + } + } + ctx->rx_seq_num = app_hdr->elt_seq_num; + len = elt->length - sizeof(struct oz_app_hdr); + data = ((u8 *)(elt+1)) + sizeof(struct oz_app_hdr); + if (len <= 0) + goto out; + space = ctx->rd_out - ctx->rd_in - 1; + if (space < 0) + space += OZ_RD_BUF_SZ; + if (len > space) { + oz_dbg(ON, "Not enough space:%d %d\n", len, space); + len = space; + } + ix = ctx->rd_in; + copy_sz = OZ_RD_BUF_SZ - ix; + if (copy_sz > len) + copy_sz = len; + memcpy(&ctx->rd_buf[ix], data, copy_sz); + len -= copy_sz; + ix += copy_sz; + if (ix == OZ_RD_BUF_SZ) + ix = 0; + if (len) { + memcpy(ctx->rd_buf, data+copy_sz, len); + ix = len; + } + ctx->rd_in = ix; + wake_up(&g_cdev.rdq); +out: + oz_cdev_release_ctx(ctx); +} diff --git a/kernel/drivers/staging/ozwpan/ozcdev.h b/kernel/drivers/staging/ozwpan/ozcdev.h new file mode 100644 index 000000000..dd11935a0 --- /dev/null +++ b/kernel/drivers/staging/ozwpan/ozcdev.h @@ -0,0 +1,17 @@ +/* ----------------------------------------------------------------------------- + * Copyright (c) 2011 Ozmo Inc + * Released under the GNU General Public License Version 2 (GPLv2). + * ----------------------------------------------------------------------------- + */ +#ifndef _OZCDEV_H +#define _OZCDEV_H + +int oz_cdev_register(void); +int oz_cdev_deregister(void); +int oz_cdev_init(void); +void oz_cdev_term(void); +int oz_cdev_start(struct oz_pd *pd, int resume); +void oz_cdev_stop(struct oz_pd *pd, int pause); +void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt); + +#endif /* _OZCDEV_H */ diff --git a/kernel/drivers/staging/ozwpan/ozdbg.h b/kernel/drivers/staging/ozwpan/ozdbg.h new file mode 100644 index 000000000..b86a2b7e0 --- /dev/null +++ b/kernel/drivers/staging/ozwpan/ozdbg.h @@ -0,0 +1,54 @@ +/* ----------------------------------------------------------------------------- + * Copyright (c) 2011 Ozmo Inc + * Released under the GNU General Public License Version 2 (GPLv2). + * ---------------------------------------------------------------------------*/ + +#ifndef _OZDBG_H +#define _OZDBG_H + +#define OZ_WANT_DBG 0 +#define OZ_WANT_VERBOSE_DBG 1 + +#define OZ_DBG_ON 0x0 +#define OZ_DBG_STREAM 0x1 +#define OZ_DBG_URB 0x2 +#define OZ_DBG_CTRL_DETAIL 0x4 +#define OZ_DBG_HUB 0x8 +#define OZ_DBG_RX_FRAMES 0x10 +#define OZ_DBG_TX_FRAMES 0x20 + +#define OZ_DEFAULT_DBG_MASK \ + ( \ + /* OZ_DBG_STREAM | */ \ + /* OZ_DBG_URB | */ \ + /* OZ_DBG_CTRL_DETAIL | */ \ + OZ_DBG_HUB | \ + /* OZ_DBG_RX_FRAMES | */ \ + /* OZ_DBG_TX_FRAMES | */ \ + 0) + +extern unsigned int oz_dbg_mask; + +#define oz_want_dbg(mask) \ + ((OZ_WANT_DBG && (OZ_DBG_##mask == OZ_DBG_ON)) || \ + (OZ_WANT_VERBOSE_DBG && (OZ_DBG_##mask & oz_dbg_mask))) + +#define oz_dbg(mask, fmt, ...) \ +do { \ + if (oz_want_dbg(mask)) \ + pr_debug(fmt, ##__VA_ARGS__); \ +} while (0) + +#define oz_cdev_dbg(cdev, mask, fmt, ...) \ +do { \ + if (oz_want_dbg(mask)) \ + netdev_dbg((cdev)->dev, fmt, ##__VA_ARGS__); \ +} while (0) + +#define oz_pd_dbg(pd, mask, fmt, ...) \ +do { \ + if (oz_want_dbg(mask)) \ + pr_debug(fmt, ##__VA_ARGS__); \ +} while (0) + +#endif /* _OZDBG_H */ diff --git a/kernel/drivers/staging/ozwpan/ozeltbuf.c b/kernel/drivers/staging/ozwpan/ozeltbuf.c new file mode 100644 index 000000000..01b25da44 --- /dev/null +++ b/kernel/drivers/staging/ozwpan/ozeltbuf.c @@ -0,0 +1,252 @@ +/* ----------------------------------------------------------------------------- + * Copyright (c) 2011 Ozmo Inc + * Released under the GNU General Public License Version 2 (GPLv2). + * ----------------------------------------------------------------------------- + */ +#include +#include +#include "ozdbg.h" +#include "ozprotocol.h" +#include "ozeltbuf.h" +#include "ozpd.h" + +/* + * Context: softirq-serialized + */ +void oz_elt_buf_init(struct oz_elt_buf *buf) +{ + memset(buf, 0, sizeof(struct oz_elt_buf)); + INIT_LIST_HEAD(&buf->stream_list); + INIT_LIST_HEAD(&buf->order_list); + INIT_LIST_HEAD(&buf->isoc_list); + spin_lock_init(&buf->lock); +} + +/* + * Context: softirq or process + */ +void oz_elt_buf_term(struct oz_elt_buf *buf) +{ + struct oz_elt_info *ei, *n; + + list_for_each_entry_safe(ei, n, &buf->isoc_list, link_order) + kfree(ei); + list_for_each_entry_safe(ei, n, &buf->order_list, link_order) + kfree(ei); +} + +/* + * Context: softirq or process + */ +struct oz_elt_info *oz_elt_info_alloc(struct oz_elt_buf *buf) +{ + struct oz_elt_info *ei; + + ei = kmem_cache_zalloc(oz_elt_info_cache, GFP_ATOMIC); + if (ei) { + INIT_LIST_HEAD(&ei->link); + INIT_LIST_HEAD(&ei->link_order); + } + return ei; +} + +/* + * Precondition: oz_elt_buf.lock must be held. + * Context: softirq or process + */ +void oz_elt_info_free(struct oz_elt_buf *buf, struct oz_elt_info *ei) +{ + if (ei) + kmem_cache_free(oz_elt_info_cache, ei); +} + +/*------------------------------------------------------------------------------ + * Context: softirq + */ +void oz_elt_info_free_chain(struct oz_elt_buf *buf, struct list_head *list) +{ + struct oz_elt_info *ei, *n; + + spin_lock_bh(&buf->lock); + list_for_each_entry_safe(ei, n, list->next, link) + oz_elt_info_free(buf, ei); + spin_unlock_bh(&buf->lock); +} + +int oz_elt_stream_create(struct oz_elt_buf *buf, u8 id, int max_buf_count) +{ + struct oz_elt_stream *st; + + oz_dbg(ON, "%s: (0x%x)\n", __func__, id); + + st = kzalloc(sizeof(struct oz_elt_stream), GFP_ATOMIC); + if (st == NULL) + return -ENOMEM; + atomic_set(&st->ref_count, 1); + st->id = id; + st->max_buf_count = max_buf_count; + INIT_LIST_HEAD(&st->elt_list); + spin_lock_bh(&buf->lock); + list_add_tail(&st->link, &buf->stream_list); + spin_unlock_bh(&buf->lock); + return 0; +} + +int oz_elt_stream_delete(struct oz_elt_buf *buf, u8 id) +{ + struct list_head *e, *n; + struct oz_elt_stream *st = NULL; + + oz_dbg(ON, "%s: (0x%x)\n", __func__, id); + spin_lock_bh(&buf->lock); + list_for_each(e, &buf->stream_list) { + st = list_entry(e, struct oz_elt_stream, link); + if (st->id == id) { + list_del(e); + break; + } + st = NULL; + } + if (!st) { + spin_unlock_bh(&buf->lock); + return -1; + } + list_for_each_safe(e, n, &st->elt_list) { + struct oz_elt_info *ei = + list_entry(e, struct oz_elt_info, link); + list_del_init(&ei->link); + list_del_init(&ei->link_order); + st->buf_count -= ei->length; + oz_dbg(STREAM, "Stream down: %d %d %d\n", + st->buf_count, ei->length, atomic_read(&st->ref_count)); + oz_elt_stream_put(st); + oz_elt_info_free(buf, ei); + } + spin_unlock_bh(&buf->lock); + oz_elt_stream_put(st); + return 0; +} + +void oz_elt_stream_get(struct oz_elt_stream *st) +{ + atomic_inc(&st->ref_count); +} + +void oz_elt_stream_put(struct oz_elt_stream *st) +{ + if (atomic_dec_and_test(&st->ref_count)) { + oz_dbg(ON, "Stream destroyed\n"); + kfree(st); + } +} + +/* + * Precondition: Element buffer lock must be held. + * If this function fails the caller is responsible for deallocating the elt + * info structure. + */ +int oz_queue_elt_info(struct oz_elt_buf *buf, u8 isoc, u8 id, + struct oz_elt_info *ei) +{ + struct oz_elt_stream *st = NULL; + struct list_head *e; + + if (id) { + list_for_each(e, &buf->stream_list) { + st = list_entry(e, struct oz_elt_stream, link); + if (st->id == id) + break; + } + if (e == &buf->stream_list) { + /* Stream specified but stream not known so fail. + * Caller deallocates element info. */ + return -1; + } + } + if (st) { + /* If this is an ISOC fixed element that needs a frame number + * then insert that now. Earlier we stored the unit count in + * this field. + */ + struct oz_isoc_fixed *body = (struct oz_isoc_fixed *) + &ei->data[sizeof(struct oz_elt)]; + if ((body->app_id == OZ_APPID_USB) && (body->type + == OZ_USB_ENDPOINT_DATA) && + (body->format == OZ_DATA_F_ISOC_FIXED)) { + u8 unit_count = body->frame_number; + + body->frame_number = st->frame_number; + st->frame_number += unit_count; + } + /* Claim stream and update accounts */ + oz_elt_stream_get(st); + ei->stream = st; + st->buf_count += ei->length; + /* Add to list in stream. */ + list_add_tail(&ei->link, &st->elt_list); + oz_dbg(STREAM, "Stream up: %d %d\n", st->buf_count, ei->length); + /* Check if we have too much buffered for this stream. If so + * start dropping elements until we are back in bounds. + */ + while ((st->buf_count > st->max_buf_count) && + !list_empty(&st->elt_list)) { + struct oz_elt_info *ei2 = + list_first_entry(&st->elt_list, + struct oz_elt_info, link); + list_del_init(&ei2->link); + list_del_init(&ei2->link_order); + st->buf_count -= ei2->length; + oz_elt_info_free(buf, ei2); + oz_elt_stream_put(st); + } + } + list_add_tail(&ei->link_order, isoc ? + &buf->isoc_list : &buf->order_list); + return 0; +} + +int oz_select_elts_for_tx(struct oz_elt_buf *buf, u8 isoc, unsigned *len, + unsigned max_len, struct list_head *list) +{ + int count = 0; + struct list_head *el; + struct oz_elt_info *ei, *n; + + spin_lock_bh(&buf->lock); + if (isoc) + el = &buf->isoc_list; + else + el = &buf->order_list; + + list_for_each_entry_safe(ei, n, el, link_order) { + if ((*len + ei->length) <= max_len) { + struct oz_app_hdr *app_hdr = (struct oz_app_hdr *) + &ei->data[sizeof(struct oz_elt)]; + app_hdr->elt_seq_num = buf->tx_seq_num[ei->app_id]++; + if (buf->tx_seq_num[ei->app_id] == 0) + buf->tx_seq_num[ei->app_id] = 1; + *len += ei->length; + list_del(&ei->link); + list_del(&ei->link_order); + if (ei->stream) { + ei->stream->buf_count -= ei->length; + oz_dbg(STREAM, "Stream down: %d %d\n", + ei->stream->buf_count, ei->length); + oz_elt_stream_put(ei->stream); + ei->stream = NULL; + } + INIT_LIST_HEAD(&ei->link_order); + list_add_tail(&ei->link, list); + count++; + } else { + break; + } + } + spin_unlock_bh(&buf->lock); + return count; +} + +int oz_are_elts_available(struct oz_elt_buf *buf) +{ + return !list_empty(&buf->order_list); +} diff --git a/kernel/drivers/staging/ozwpan/ozeltbuf.h b/kernel/drivers/staging/ozwpan/ozeltbuf.h new file mode 100644 index 000000000..f09f5fe3f --- /dev/null +++ b/kernel/drivers/staging/ozwpan/ozeltbuf.h @@ -0,0 +1,65 @@ +/* ----------------------------------------------------------------------------- + * Copyright (c) 2011 Ozmo Inc + * Released under the GNU General Public License Version 2 (GPLv2). + * ----------------------------------------------------------------------------- + */ +#ifndef _OZELTBUF_H +#define _OZELTBUF_H + +#include "ozprotocol.h" + +/*----------------------------------------------------------------------------- + */ +struct oz_pd; +typedef void (*oz_elt_callback_t)(struct oz_pd *pd, long context); + +struct oz_elt_stream { + struct list_head link; + struct list_head elt_list; + atomic_t ref_count; + unsigned buf_count; + unsigned max_buf_count; + u8 frame_number; + u8 id; +}; + +#define OZ_MAX_ELT_PAYLOAD 255 +struct oz_elt_info { + struct list_head link; + struct list_head link_order; + u8 flags; + u8 app_id; + oz_elt_callback_t callback; + long context; + struct oz_elt_stream *stream; + u8 data[sizeof(struct oz_elt) + OZ_MAX_ELT_PAYLOAD]; + int length; +}; +/* Flags values */ +#define OZ_EI_F_MARKED 0x1 + +struct oz_elt_buf { + spinlock_t lock; + struct list_head stream_list; + struct list_head order_list; + struct list_head isoc_list; + u8 tx_seq_num[OZ_NB_APPS]; +}; + +void oz_elt_buf_init(struct oz_elt_buf *buf); +void oz_elt_buf_term(struct oz_elt_buf *buf); +struct oz_elt_info *oz_elt_info_alloc(struct oz_elt_buf *buf); +void oz_elt_info_free(struct oz_elt_buf *buf, struct oz_elt_info *ei); +void oz_elt_info_free_chain(struct oz_elt_buf *buf, struct list_head *list); +int oz_elt_stream_create(struct oz_elt_buf *buf, u8 id, int max_buf_count); +int oz_elt_stream_delete(struct oz_elt_buf *buf, u8 id); +void oz_elt_stream_get(struct oz_elt_stream *st); +void oz_elt_stream_put(struct oz_elt_stream *st); +int oz_queue_elt_info(struct oz_elt_buf *buf, u8 isoc, u8 id, + struct oz_elt_info *ei); +int oz_select_elts_for_tx(struct oz_elt_buf *buf, u8 isoc, unsigned *len, + unsigned max_len, struct list_head *list); +int oz_are_elts_available(struct oz_elt_buf *buf); + +#endif /* _OZELTBUF_H */ + diff --git a/kernel/drivers/staging/ozwpan/ozhcd.c b/kernel/drivers/staging/ozwpan/ozhcd.c new file mode 100644 index 000000000..784b5ecfa --- /dev/null +++ b/kernel/drivers/staging/ozwpan/ozhcd.c @@ -0,0 +1,2301 @@ +/* ----------------------------------------------------------------------------- + * Copyright (c) 2011 Ozmo Inc + * Released under the GNU General Public License Version 2 (GPLv2). + * + * This file provides the implementation of a USB host controller device that + * does not have any associated hardware. Instead the virtual device is + * connected to the WiFi network and emulates the operation of a USB hcd by + * receiving and sending network frames. + * Note: + * We take great pains to reduce the amount of code where interrupts need to be + * disabled and in this respect we are different from standard HCD's. In + * particular we don't want in_irq() code bleeding over to the protocol side of + * the driver. + * The troublesome functions are the urb enqueue and dequeue functions both of + * which can be called in_irq(). So for these functions we put the urbs into a + * queue and request a tasklet to process them. This means that a spinlock with + * interrupts disabled must be held for insertion and removal but most code is + * is in tasklet or soft irq context. The lock that protects this list is called + * the tasklet lock and serves the purpose of the 'HCD lock' which must be held + * when calling the following functions. + * usb_hcd_link_urb_to_ep() + * usb_hcd_unlink_urb_from_ep() + * usb_hcd_flush_endpoint() + * usb_hcd_check_unlink_urb() + * ----------------------------------------------------------------------------- + */ +#include +#include +#include +#include +#include "linux/usb/hcd.h" +#include +#include "ozdbg.h" +#include "ozusbif.h" +#include "ozurbparanoia.h" +#include "ozhcd.h" + +/* + * Number of units of buffering to capture for an isochronous IN endpoint before + * allowing data to be indicated up. + */ +#define OZ_IN_BUFFERING_UNITS 100 + +/* Name of our platform device. + */ +#define OZ_PLAT_DEV_NAME "ozwpan" + +/*EP0 timeout before ep0 request is again added to TX queue. (13*8 = 98mSec) + */ +#define EP0_TIMEOUT_COUNTER 13 + +/* Debounce time HCD driver should wait before unregistering. + */ +#define OZ_HUB_DEBOUNCE_TIMEOUT 1500 + +/* + * Used to link urbs together and also store some status information for each + * urb. + * A cache of these are kept in a pool to reduce number of calls to kmalloc. + */ +struct oz_urb_link { + struct list_head link; + struct urb *urb; + struct oz_port *port; + u8 req_id; + u8 ep_num; + unsigned submit_counter; +}; + +static struct kmem_cache *oz_urb_link_cache; + +/* Holds state information about a USB endpoint. + */ +#define OZ_EP_BUFFER_SIZE_ISOC (1024 * 24) +#define OZ_EP_BUFFER_SIZE_INT 512 +struct oz_endpoint { + struct list_head urb_list; /* List of oz_urb_link items. */ + struct list_head link; /* For isoc ep, links in to isoc + lists of oz_port. */ + struct timespec timestamp; + int credit; + int credit_ceiling; + u8 ep_num; + u8 attrib; + u8 *buffer; + int buffer_size; + int in_ix; + int out_ix; + int buffered_units; + unsigned flags; + int start_frame; +}; + +/* Bits in the flags field. */ +#define OZ_F_EP_BUFFERING 0x1 +#define OZ_F_EP_HAVE_STREAM 0x2 + +/* Holds state information about a USB interface. + */ +struct oz_interface { + unsigned ep_mask; + u8 alt; +}; + +/* Holds state information about an hcd port. + */ +#define OZ_NB_ENDPOINTS 16 +struct oz_port { + unsigned flags; + unsigned status; + void *hpd; + struct oz_hcd *ozhcd; + spinlock_t port_lock; + u8 bus_addr; + u8 next_req_id; + u8 config_num; + int num_iface; + struct oz_interface *iface; + struct oz_endpoint *out_ep[OZ_NB_ENDPOINTS]; + struct oz_endpoint *in_ep[OZ_NB_ENDPOINTS]; + struct list_head isoc_out_ep; + struct list_head isoc_in_ep; +}; + +#define OZ_PORT_F_PRESENT 0x1 +#define OZ_PORT_F_CHANGED 0x2 +#define OZ_PORT_F_DYING 0x4 + +/* Data structure in the private context area of struct usb_hcd. + */ +#define OZ_NB_PORTS 8 +struct oz_hcd { + spinlock_t hcd_lock; + struct list_head urb_pending_list; + struct list_head urb_cancel_list; + struct list_head orphanage; + int conn_port; /* Port that is currently connecting, -1 if none.*/ + struct oz_port ports[OZ_NB_PORTS]; + uint flags; + struct usb_hcd *hcd; +}; + +/* Bits in flags field. + */ +#define OZ_HDC_F_SUSPENDED 0x1 + +/* + * Static function prototypes. + */ +static int oz_hcd_start(struct usb_hcd *hcd); +static void oz_hcd_stop(struct usb_hcd *hcd); +static void oz_hcd_shutdown(struct usb_hcd *hcd); +static int oz_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, + gfp_t mem_flags); +static int oz_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status); +static void oz_hcd_endpoint_disable(struct usb_hcd *hcd, + struct usb_host_endpoint *ep); +static void oz_hcd_endpoint_reset(struct usb_hcd *hcd, + struct usb_host_endpoint *ep); +static int oz_hcd_get_frame_number(struct usb_hcd *hcd); +static int oz_hcd_hub_status_data(struct usb_hcd *hcd, char *buf); +static int oz_hcd_hub_control(struct usb_hcd *hcd, u16 req_type, u16 wvalue, + u16 windex, char *buf, u16 wlength); +static int oz_hcd_bus_suspend(struct usb_hcd *hcd); +static int oz_hcd_bus_resume(struct usb_hcd *hcd); +static int oz_plat_probe(struct platform_device *dev); +static int oz_plat_remove(struct platform_device *dev); +static void oz_plat_shutdown(struct platform_device *dev); +static int oz_plat_suspend(struct platform_device *dev, pm_message_t msg); +static int oz_plat_resume(struct platform_device *dev); +static void oz_urb_process_tasklet(unsigned long unused); +static int oz_build_endpoints_for_config(struct usb_hcd *hcd, + struct oz_port *port, struct usb_host_config *config, + gfp_t mem_flags); +static void oz_clean_endpoints_for_config(struct usb_hcd *hcd, + struct oz_port *port); +static int oz_build_endpoints_for_interface(struct usb_hcd *hcd, + struct oz_port *port, + struct usb_host_interface *intf, gfp_t mem_flags); +static void oz_clean_endpoints_for_interface(struct usb_hcd *hcd, + struct oz_port *port, int if_ix); +static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb, + gfp_t mem_flags); +static struct oz_urb_link *oz_remove_urb(struct oz_endpoint *ep, + struct urb *urb); +static void oz_hcd_clear_orphanage(struct oz_hcd *ozhcd, int status); + +/* + * Static external variables. + */ +static struct platform_device *g_plat_dev; +static struct oz_hcd *g_ozhcd; +static DEFINE_SPINLOCK(g_hcdlock); /* Guards g_ozhcd. */ +static const char g_hcd_name[] = "Ozmo WPAN"; +static DEFINE_SPINLOCK(g_tasklet_lock); +static struct tasklet_struct g_urb_process_tasklet; +static struct tasklet_struct g_urb_cancel_tasklet; +static atomic_t g_pending_urbs = ATOMIC_INIT(0); +static atomic_t g_usb_frame_number = ATOMIC_INIT(0); +static const struct hc_driver g_oz_hc_drv = { + .description = g_hcd_name, + .product_desc = "Ozmo Devices WPAN", + .hcd_priv_size = sizeof(struct oz_hcd), + .flags = HCD_USB11, + .start = oz_hcd_start, + .stop = oz_hcd_stop, + .shutdown = oz_hcd_shutdown, + .urb_enqueue = oz_hcd_urb_enqueue, + .urb_dequeue = oz_hcd_urb_dequeue, + .endpoint_disable = oz_hcd_endpoint_disable, + .endpoint_reset = oz_hcd_endpoint_reset, + .get_frame_number = oz_hcd_get_frame_number, + .hub_status_data = oz_hcd_hub_status_data, + .hub_control = oz_hcd_hub_control, + .bus_suspend = oz_hcd_bus_suspend, + .bus_resume = oz_hcd_bus_resume, +}; + +static struct platform_driver g_oz_plat_drv = { + .probe = oz_plat_probe, + .remove = oz_plat_remove, + .shutdown = oz_plat_shutdown, + .suspend = oz_plat_suspend, + .resume = oz_plat_resume, + .driver = { + .name = OZ_PLAT_DEV_NAME, + }, +}; + +/* + * Gets our private context area (which is of type struct oz_hcd) from the + * usb_hcd structure. + * Context: any + */ +static inline struct oz_hcd *oz_hcd_private(struct usb_hcd *hcd) +{ + return (struct oz_hcd *)hcd->hcd_priv; +} + +/* + * Searches list of ports to find the index of the one with a specified USB + * bus address. If none of the ports has the bus address then the connection + * port is returned, if there is one or -1 otherwise. + * Context: any + */ +static int oz_get_port_from_addr(struct oz_hcd *ozhcd, u8 bus_addr) +{ + int i; + + for (i = 0; i < OZ_NB_PORTS; i++) { + if (ozhcd->ports[i].bus_addr == bus_addr) + return i; + } + return ozhcd->conn_port; +} + +/* + * Context: any + */ +static struct oz_urb_link *oz_alloc_urb_link(void) +{ + return kmem_cache_alloc(oz_urb_link_cache, GFP_ATOMIC); +} + +/* + * Context: any + */ +static void oz_free_urb_link(struct oz_urb_link *urbl) +{ + if (!urbl) + return; + + kmem_cache_free(oz_urb_link_cache, urbl); +} + +/* + * Allocates endpoint structure and optionally a buffer. If a buffer is + * allocated it immediately follows the endpoint structure. + * Context: softirq + */ +static struct oz_endpoint *oz_ep_alloc(int buffer_size, gfp_t mem_flags) +{ + struct oz_endpoint *ep; + + ep = kzalloc(sizeof(struct oz_endpoint)+buffer_size, mem_flags); + if (!ep) + return NULL; + + INIT_LIST_HEAD(&ep->urb_list); + INIT_LIST_HEAD(&ep->link); + ep->credit = -1; + if (buffer_size) { + ep->buffer_size = buffer_size; + ep->buffer = (u8 *)(ep+1); + } + + return ep; +} + +/* + * Pre-condition: Must be called with g_tasklet_lock held and interrupts + * disabled. + * Context: softirq or process + */ +static struct oz_urb_link *oz_uncancel_urb(struct oz_hcd *ozhcd, + struct urb *urb) +{ + struct oz_urb_link *urbl; + + list_for_each_entry(urbl, &ozhcd->urb_cancel_list, link) { + if (urb == urbl->urb) { + list_del_init(&urbl->link); + return urbl; + } + } + return NULL; +} + +/* + * This is called when we have finished processing an urb. It unlinks it from + * the ep and returns it to the core. + * Context: softirq or process + */ +static void oz_complete_urb(struct usb_hcd *hcd, struct urb *urb, + int status) +{ + struct oz_hcd *ozhcd = oz_hcd_private(hcd); + unsigned long irq_state; + struct oz_urb_link *cancel_urbl; + + spin_lock_irqsave(&g_tasklet_lock, irq_state); + usb_hcd_unlink_urb_from_ep(hcd, urb); + /* Clear hcpriv which will prevent it being put in the cancel list + * in the event that an attempt is made to cancel it. + */ + urb->hcpriv = NULL; + /* Walk the cancel list in case the urb is already sitting there. + * Since we process the cancel list in a tasklet rather than in + * the dequeue function this could happen. + */ + cancel_urbl = oz_uncancel_urb(ozhcd, urb); + /* Note: we release lock but do not enable local irqs. + * It appears that usb_hcd_giveback_urb() expects irqs to be disabled, + * or at least other host controllers disable interrupts at this point + * so we do the same. We must, however, release the lock otherwise a + * deadlock will occur if an urb is submitted to our driver in the urb + * completion function. Because we disable interrupts it is possible + * that the urb_enqueue function can be called with them disabled. + */ + spin_unlock(&g_tasklet_lock); + if (oz_forget_urb(urb)) { + oz_dbg(ON, "ERROR Unknown URB %p\n", urb); + } else { + atomic_dec(&g_pending_urbs); + usb_hcd_giveback_urb(hcd, urb, status); + } + spin_lock(&g_tasklet_lock); + spin_unlock_irqrestore(&g_tasklet_lock, irq_state); + oz_free_urb_link(cancel_urbl); +} + +/* + * Deallocates an endpoint including deallocating any associated stream and + * returning any queued urbs to the core. + * Context: softirq + */ +static void oz_ep_free(struct oz_port *port, struct oz_endpoint *ep) +{ + if (port) { + LIST_HEAD(list); + struct oz_hcd *ozhcd = port->ozhcd; + + if (ep->flags & OZ_F_EP_HAVE_STREAM) + oz_usb_stream_delete(port->hpd, ep->ep_num); + /* Transfer URBs to the orphanage while we hold the lock. */ + spin_lock_bh(&ozhcd->hcd_lock); + /* Note: this works even if ep->urb_list is empty.*/ + list_replace_init(&ep->urb_list, &list); + /* Put the URBs in the orphanage. */ + list_splice_tail(&list, &ozhcd->orphanage); + spin_unlock_bh(&ozhcd->hcd_lock); + } + oz_dbg(ON, "Freeing endpoint memory\n"); + kfree(ep); +} + +/* + * Context: softirq + */ +static void oz_complete_buffered_urb(struct oz_port *port, + struct oz_endpoint *ep, + struct urb *urb) +{ + int data_len, available_space, copy_len; + + data_len = ep->buffer[ep->out_ix]; + if (data_len <= urb->transfer_buffer_length) + available_space = data_len; + else + available_space = urb->transfer_buffer_length; + + if (++ep->out_ix == ep->buffer_size) + ep->out_ix = 0; + copy_len = ep->buffer_size - ep->out_ix; + if (copy_len >= available_space) + copy_len = available_space; + memcpy(urb->transfer_buffer, &ep->buffer[ep->out_ix], copy_len); + + if (copy_len < available_space) { + memcpy((urb->transfer_buffer + copy_len), ep->buffer, + (available_space - copy_len)); + ep->out_ix = available_space - copy_len; + } else { + ep->out_ix += copy_len; + } + urb->actual_length = available_space; + if (ep->out_ix == ep->buffer_size) + ep->out_ix = 0; + + ep->buffered_units--; + oz_dbg(ON, "Trying to give back buffered frame of size=%d\n", + available_space); + oz_complete_urb(port->ozhcd->hcd, urb, 0); +} + +/* + * Context: softirq + */ +static int oz_enqueue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir, + struct urb *urb, u8 req_id) +{ + struct oz_urb_link *urbl; + struct oz_endpoint *ep = NULL; + int err = 0; + + if (ep_addr >= OZ_NB_ENDPOINTS) { + oz_dbg(ON, "%s: Invalid endpoint number\n", __func__); + return -EINVAL; + } + urbl = oz_alloc_urb_link(); + if (!urbl) + return -ENOMEM; + urbl->submit_counter = 0; + urbl->urb = urb; + urbl->req_id = req_id; + urbl->ep_num = ep_addr; + /* Hold lock while we insert the URB into the list within the + * endpoint structure. + */ + spin_lock_bh(&port->ozhcd->hcd_lock); + /* If the urb has been unlinked while out of any list then + * complete it now. + */ + if (urb->unlinked) { + spin_unlock_bh(&port->ozhcd->hcd_lock); + oz_dbg(ON, "urb %p unlinked so complete immediately\n", urb); + oz_complete_urb(port->ozhcd->hcd, urb, 0); + oz_free_urb_link(urbl); + return 0; + } + + if (in_dir) + ep = port->in_ep[ep_addr]; + else + ep = port->out_ep[ep_addr]; + if (!ep) { + err = -ENOMEM; + goto out; + } + + /*For interrupt endpoint check for buffered data + * & complete urb + */ + if (((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT) + && ep->buffered_units > 0) { + oz_free_urb_link(urbl); + spin_unlock_bh(&port->ozhcd->hcd_lock); + oz_complete_buffered_urb(port, ep, urb); + return 0; + } + + if (port->hpd) { + list_add_tail(&urbl->link, &ep->urb_list); + if (!in_dir && ep_addr && (ep->credit < 0)) { + getrawmonotonic(&ep->timestamp); + ep->credit = 0; + } + } else { + err = -EPIPE; + } +out: + spin_unlock_bh(&port->ozhcd->hcd_lock); + if (err) + oz_free_urb_link(urbl); + return err; +} + +/* + * Removes an urb from the queue in the endpoint. + * Returns 0 if it is found and -EIDRM otherwise. + * Context: softirq + */ +static int oz_dequeue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir, + struct urb *urb) +{ + struct oz_urb_link *urbl = NULL; + struct oz_endpoint *ep; + + spin_lock_bh(&port->ozhcd->hcd_lock); + if (in_dir) + ep = port->in_ep[ep_addr]; + else + ep = port->out_ep[ep_addr]; + if (ep) { + struct list_head *e; + + list_for_each(e, &ep->urb_list) { + urbl = list_entry(e, struct oz_urb_link, link); + if (urbl->urb == urb) { + list_del_init(e); + break; + } + urbl = NULL; + } + } + spin_unlock_bh(&port->ozhcd->hcd_lock); + oz_free_urb_link(urbl); + return urbl ? 0 : -EIDRM; +} + +/* + * Finds an urb given its request id. + * Context: softirq + */ +static struct urb *oz_find_urb_by_id(struct oz_port *port, int ep_ix, + u8 req_id) +{ + struct oz_hcd *ozhcd = port->ozhcd; + struct urb *urb = NULL; + struct oz_urb_link *urbl; + struct oz_endpoint *ep; + + spin_lock_bh(&ozhcd->hcd_lock); + ep = port->out_ep[ep_ix]; + if (ep) { + struct list_head *e; + + list_for_each(e, &ep->urb_list) { + urbl = list_entry(e, struct oz_urb_link, link); + if (urbl->req_id == req_id) { + urb = urbl->urb; + list_del_init(e); + break; + } + } + } + spin_unlock_bh(&ozhcd->hcd_lock); + /* If urb is non-zero then we we must have an urb link to delete. + */ + if (urb) + oz_free_urb_link(urbl); + return urb; +} + +/* + * Pre-condition: Port lock must be held. + * Context: softirq + */ +static void oz_acquire_port(struct oz_port *port, void *hpd) +{ + INIT_LIST_HEAD(&port->isoc_out_ep); + INIT_LIST_HEAD(&port->isoc_in_ep); + port->flags |= OZ_PORT_F_PRESENT | OZ_PORT_F_CHANGED; + port->status |= USB_PORT_STAT_CONNECTION | + (USB_PORT_STAT_C_CONNECTION << 16); + oz_usb_get(hpd); + port->hpd = hpd; +} + +/* + * Context: softirq + */ +static struct oz_hcd *oz_hcd_claim(void) +{ + struct oz_hcd *ozhcd; + + spin_lock_bh(&g_hcdlock); + ozhcd = g_ozhcd; + if (ozhcd) + usb_get_hcd(ozhcd->hcd); + spin_unlock_bh(&g_hcdlock); + return ozhcd; +} + +/* + * Context: softirq + */ +static inline void oz_hcd_put(struct oz_hcd *ozhcd) +{ + if (ozhcd) + usb_put_hcd(ozhcd->hcd); +} + +/* + * This is called by the protocol handler to notify that a PD has arrived. + * We allocate a port to associate with the PD and create a structure for + * endpoint 0. This port is made the connection port. + * In the event that one of the other port is already a connection port then + * we fail. + * TODO We should be able to do better than fail and should be able remember + * that this port needs configuring and make it the connection port once the + * current connection port has been assigned an address. Collisions here are + * probably very rare indeed. + * Context: softirq + */ +struct oz_port *oz_hcd_pd_arrived(void *hpd) +{ + int i; + struct oz_port *hport; + struct oz_hcd *ozhcd; + struct oz_endpoint *ep; + + ozhcd = oz_hcd_claim(); + if (!ozhcd) + return NULL; + /* Allocate an endpoint object in advance (before holding hcd lock) to + * use for out endpoint 0. + */ + ep = oz_ep_alloc(0, GFP_ATOMIC); + if (!ep) + goto err_put; + + spin_lock_bh(&ozhcd->hcd_lock); + if (ozhcd->conn_port >= 0) + goto err_unlock; + + for (i = 0; i < OZ_NB_PORTS; i++) { + struct oz_port *port = &ozhcd->ports[i]; + + spin_lock(&port->port_lock); + if (!(port->flags & (OZ_PORT_F_PRESENT | OZ_PORT_F_CHANGED))) { + oz_acquire_port(port, hpd); + spin_unlock(&port->port_lock); + break; + } + spin_unlock(&port->port_lock); + } + if (i == OZ_NB_PORTS) + goto err_unlock; + + ozhcd->conn_port = i; + hport = &ozhcd->ports[i]; + hport->out_ep[0] = ep; + spin_unlock_bh(&ozhcd->hcd_lock); + if (ozhcd->flags & OZ_HDC_F_SUSPENDED) + usb_hcd_resume_root_hub(ozhcd->hcd); + usb_hcd_poll_rh_status(ozhcd->hcd); + oz_hcd_put(ozhcd); + + return hport; + +err_unlock: + spin_unlock_bh(&ozhcd->hcd_lock); + oz_ep_free(NULL, ep); +err_put: + oz_hcd_put(ozhcd); + return NULL; +} + +/* + * This is called by the protocol handler to notify that the PD has gone away. + * We need to deallocate all resources and then request that the root hub is + * polled. We release the reference we hold on the PD. + * Context: softirq + */ +void oz_hcd_pd_departed(struct oz_port *port) +{ + struct oz_hcd *ozhcd; + void *hpd; + struct oz_endpoint *ep = NULL; + + if (port == NULL) { + oz_dbg(ON, "%s: port = 0\n", __func__); + return; + } + ozhcd = port->ozhcd; + if (ozhcd == NULL) + return; + /* Check if this is the connection port - if so clear it. + */ + spin_lock_bh(&ozhcd->hcd_lock); + if ((ozhcd->conn_port >= 0) && + (port == &ozhcd->ports[ozhcd->conn_port])) { + oz_dbg(ON, "Clearing conn_port\n"); + ozhcd->conn_port = -1; + } + spin_lock(&port->port_lock); + port->flags |= OZ_PORT_F_DYING; + spin_unlock(&port->port_lock); + spin_unlock_bh(&ozhcd->hcd_lock); + + oz_clean_endpoints_for_config(ozhcd->hcd, port); + spin_lock_bh(&port->port_lock); + hpd = port->hpd; + port->hpd = NULL; + port->bus_addr = 0xff; + port->config_num = 0; + port->flags &= ~(OZ_PORT_F_PRESENT | OZ_PORT_F_DYING); + port->flags |= OZ_PORT_F_CHANGED; + port->status &= ~(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE); + port->status |= (USB_PORT_STAT_C_CONNECTION << 16); + /* If there is an endpont 0 then clear the pointer while we hold + * the spinlock be we deallocate it after releasing the lock. + */ + if (port->out_ep[0]) { + ep = port->out_ep[0]; + port->out_ep[0] = NULL; + } + spin_unlock_bh(&port->port_lock); + if (ep) + oz_ep_free(port, ep); + usb_hcd_poll_rh_status(ozhcd->hcd); + oz_usb_put(hpd); +} + +/* + * Context: softirq + */ +void oz_hcd_pd_reset(void *hpd, void *hport) +{ + /* Cleanup the current configuration and report reset to the core. + */ + struct oz_port *port = hport; + struct oz_hcd *ozhcd = port->ozhcd; + + oz_dbg(ON, "PD Reset\n"); + spin_lock_bh(&port->port_lock); + port->flags |= OZ_PORT_F_CHANGED; + port->status |= USB_PORT_STAT_RESET; + port->status |= (USB_PORT_STAT_C_RESET << 16); + spin_unlock_bh(&port->port_lock); + oz_clean_endpoints_for_config(ozhcd->hcd, port); + usb_hcd_poll_rh_status(ozhcd->hcd); +} + +/* + * Context: softirq + */ +void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status, const u8 *desc, + u8 length, u16 offset, u16 total_size) +{ + struct oz_port *port = hport; + struct urb *urb; + int err = 0; + + oz_dbg(ON, "oz_hcd_get_desc_cnf length = %d offs = %d tot_size = %d\n", + length, offset, total_size); + urb = oz_find_urb_by_id(port, 0, req_id); + if (!urb) + return; + if (status == 0) { + unsigned int copy_len; + unsigned int required_size = urb->transfer_buffer_length; + + if (required_size > total_size) + required_size = total_size; + copy_len = required_size-offset; + if (length <= copy_len) + copy_len = length; + memcpy(urb->transfer_buffer+offset, desc, copy_len); + offset += copy_len; + if (offset < required_size) { + struct usb_ctrlrequest *setup = + (struct usb_ctrlrequest *)urb->setup_packet; + unsigned wvalue = le16_to_cpu(setup->wValue); + + if (oz_enqueue_ep_urb(port, 0, 0, urb, req_id)) + err = -ENOMEM; + else if (oz_usb_get_desc_req(port->hpd, req_id, + setup->bRequestType, (u8)(wvalue>>8), + (u8)wvalue, setup->wIndex, offset, + required_size-offset)) { + oz_dequeue_ep_urb(port, 0, 0, urb); + err = -ENOMEM; + } + if (err == 0) + return; + } + } + urb->actual_length = total_size; + oz_complete_urb(port->ozhcd->hcd, urb, 0); +} + +/* + * Context: softirq + */ +static void oz_display_conf_type(u8 t) +{ + switch (t) { + case USB_REQ_GET_STATUS: + oz_dbg(ON, "USB_REQ_GET_STATUS - cnf\n"); + break; + case USB_REQ_CLEAR_FEATURE: + oz_dbg(ON, "USB_REQ_CLEAR_FEATURE - cnf\n"); + break; + case USB_REQ_SET_FEATURE: + oz_dbg(ON, "USB_REQ_SET_FEATURE - cnf\n"); + break; + case USB_REQ_SET_ADDRESS: + oz_dbg(ON, "USB_REQ_SET_ADDRESS - cnf\n"); + break; + case USB_REQ_GET_DESCRIPTOR: + oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n"); + break; + case USB_REQ_SET_DESCRIPTOR: + oz_dbg(ON, "USB_REQ_SET_DESCRIPTOR - cnf\n"); + break; + case USB_REQ_GET_CONFIGURATION: + oz_dbg(ON, "USB_REQ_GET_CONFIGURATION - cnf\n"); + break; + case USB_REQ_SET_CONFIGURATION: + oz_dbg(ON, "USB_REQ_SET_CONFIGURATION - cnf\n"); + break; + case USB_REQ_GET_INTERFACE: + oz_dbg(ON, "USB_REQ_GET_INTERFACE - cnf\n"); + break; + case USB_REQ_SET_INTERFACE: + oz_dbg(ON, "USB_REQ_SET_INTERFACE - cnf\n"); + break; + case USB_REQ_SYNCH_FRAME: + oz_dbg(ON, "USB_REQ_SYNCH_FRAME - cnf\n"); + break; + } +} + +/* + * Context: softirq + */ +static void oz_hcd_complete_set_config(struct oz_port *port, struct urb *urb, + u8 rcode, u8 config_num) +{ + int rc = 0; + struct usb_hcd *hcd = port->ozhcd->hcd; + + if (rcode == 0) { + port->config_num = config_num; + oz_clean_endpoints_for_config(hcd, port); + if (oz_build_endpoints_for_config(hcd, port, + &urb->dev->config[port->config_num-1], GFP_ATOMIC)) { + rc = -ENOMEM; + } + } else { + rc = -ENOMEM; + } + oz_complete_urb(hcd, urb, rc); +} + +/* + * Context: softirq + */ +static void oz_hcd_complete_set_interface(struct oz_port *port, struct urb *urb, + u8 rcode, u8 if_num, u8 alt) +{ + struct usb_hcd *hcd = port->ozhcd->hcd; + int rc = 0; + + if ((rcode == 0) && (port->config_num > 0)) { + struct usb_host_config *config; + struct usb_host_interface *intf; + + oz_dbg(ON, "Set interface %d alt %d\n", if_num, alt); + oz_clean_endpoints_for_interface(hcd, port, if_num); + config = &urb->dev->config[port->config_num-1]; + intf = &config->intf_cache[if_num]->altsetting[alt]; + if (oz_build_endpoints_for_interface(hcd, port, intf, + GFP_ATOMIC)) + rc = -ENOMEM; + else + port->iface[if_num].alt = alt; + } else { + rc = -ENOMEM; + } + oz_complete_urb(hcd, urb, rc); +} + +/* + * Context: softirq + */ +void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, const u8 *data, + int data_len) +{ + struct oz_port *port = hport; + struct urb *urb; + struct usb_ctrlrequest *setup; + struct usb_hcd *hcd = port->ozhcd->hcd; + unsigned windex; + unsigned wvalue; + + oz_dbg(ON, "oz_hcd_control_cnf rcode=%u len=%d\n", rcode, data_len); + urb = oz_find_urb_by_id(port, 0, req_id); + if (!urb) { + oz_dbg(ON, "URB not found\n"); + return; + } + setup = (struct usb_ctrlrequest *)urb->setup_packet; + windex = le16_to_cpu(setup->wIndex); + wvalue = le16_to_cpu(setup->wValue); + if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { + /* Standard requests */ + oz_display_conf_type(setup->bRequest); + switch (setup->bRequest) { + case USB_REQ_SET_CONFIGURATION: + oz_hcd_complete_set_config(port, urb, rcode, + (u8)wvalue); + break; + case USB_REQ_SET_INTERFACE: + oz_hcd_complete_set_interface(port, urb, rcode, + (u8)windex, (u8)wvalue); + break; + default: + oz_complete_urb(hcd, urb, 0); + } + + } else { + int copy_len; + + oz_dbg(ON, "VENDOR-CLASS - cnf\n"); + if (data_len) { + if (data_len <= urb->transfer_buffer_length) + copy_len = data_len; + else + copy_len = urb->transfer_buffer_length; + memcpy(urb->transfer_buffer, data, copy_len); + urb->actual_length = copy_len; + } + oz_complete_urb(hcd, urb, 0); + } +} + +/* + * Context: softirq-serialized + */ +static int oz_hcd_buffer_data(struct oz_endpoint *ep, const u8 *data, + int data_len) +{ + int space; + int copy_len; + + if (!ep->buffer) + return -1; + space = ep->out_ix-ep->in_ix-1; + if (space < 0) + space += ep->buffer_size; + if (space < (data_len+1)) { + oz_dbg(ON, "Buffer full\n"); + return -1; + } + ep->buffer[ep->in_ix] = (u8)data_len; + if (++ep->in_ix == ep->buffer_size) + ep->in_ix = 0; + copy_len = ep->buffer_size - ep->in_ix; + if (copy_len > data_len) + copy_len = data_len; + memcpy(&ep->buffer[ep->in_ix], data, copy_len); + + if (copy_len < data_len) { + memcpy(ep->buffer, data+copy_len, data_len-copy_len); + ep->in_ix = data_len-copy_len; + } else { + ep->in_ix += copy_len; + } + if (ep->in_ix == ep->buffer_size) + ep->in_ix = 0; + ep->buffered_units++; + return 0; +} + +/* + * Context: softirq-serialized + */ +void oz_hcd_data_ind(void *hport, u8 endpoint, const u8 *data, int data_len) +{ + struct oz_port *port = (struct oz_port *)hport; + struct oz_endpoint *ep; + struct oz_hcd *ozhcd = port->ozhcd; + + spin_lock_bh(&ozhcd->hcd_lock); + ep = port->in_ep[endpoint & USB_ENDPOINT_NUMBER_MASK]; + if (ep == NULL) + goto done; + switch (ep->attrib & USB_ENDPOINT_XFERTYPE_MASK) { + case USB_ENDPOINT_XFER_INT: + case USB_ENDPOINT_XFER_BULK: + if (!list_empty(&ep->urb_list)) { + struct oz_urb_link *urbl = + list_first_entry(&ep->urb_list, + struct oz_urb_link, link); + struct urb *urb; + int copy_len; + + list_del_init(&urbl->link); + spin_unlock_bh(&ozhcd->hcd_lock); + urb = urbl->urb; + oz_free_urb_link(urbl); + if (data_len <= urb->transfer_buffer_length) + copy_len = data_len; + else + copy_len = urb->transfer_buffer_length; + memcpy(urb->transfer_buffer, data, copy_len); + urb->actual_length = copy_len; + oz_complete_urb(port->ozhcd->hcd, urb, 0); + return; + } + oz_dbg(ON, "buffering frame as URB is not available\n"); + oz_hcd_buffer_data(ep, data, data_len); + break; + case USB_ENDPOINT_XFER_ISOC: + oz_hcd_buffer_data(ep, data, data_len); + break; + } +done: + spin_unlock_bh(&ozhcd->hcd_lock); +} + +/* + * Context: unknown + */ +static inline int oz_usb_get_frame_number(void) +{ + return atomic_inc_return(&g_usb_frame_number); +} + +/* + * Context: softirq + */ +int oz_hcd_heartbeat(void *hport) +{ + int rc = 0; + struct oz_port *port = hport; + struct oz_hcd *ozhcd = port->ozhcd; + struct oz_urb_link *urbl, *n; + LIST_HEAD(xfr_list); + struct urb *urb; + struct oz_endpoint *ep; + struct timespec ts, delta; + + getrawmonotonic(&ts); + /* Check the OUT isoc endpoints to see if any URB data can be sent. + */ + spin_lock_bh(&ozhcd->hcd_lock); + list_for_each_entry(ep, &port->isoc_out_ep, link) { + if (ep->credit < 0) + continue; + delta = timespec_sub(ts, ep->timestamp); + ep->credit += div_u64(timespec_to_ns(&delta), NSEC_PER_MSEC); + if (ep->credit > ep->credit_ceiling) + ep->credit = ep->credit_ceiling; + ep->timestamp = ts; + while (ep->credit && !list_empty(&ep->urb_list)) { + urbl = list_first_entry(&ep->urb_list, + struct oz_urb_link, link); + urb = urbl->urb; + if ((ep->credit + 1) < urb->number_of_packets) + break; + ep->credit -= urb->number_of_packets; + if (ep->credit < 0) + ep->credit = 0; + list_move_tail(&urbl->link, &xfr_list); + } + } + spin_unlock_bh(&ozhcd->hcd_lock); + /* Send to PD and complete URBs. + */ + list_for_each_entry_safe(urbl, n, &xfr_list, link) { + urb = urbl->urb; + list_del_init(&urbl->link); + urb->error_count = 0; + urb->start_frame = oz_usb_get_frame_number(); + oz_usb_send_isoc(port->hpd, urbl->ep_num, urb); + oz_free_urb_link(urbl); + oz_complete_urb(port->ozhcd->hcd, urb, 0); + } + /* Check the IN isoc endpoints to see if any URBs can be completed. + */ + spin_lock_bh(&ozhcd->hcd_lock); + list_for_each_entry(ep, &port->isoc_in_ep, link) { + if (ep->flags & OZ_F_EP_BUFFERING) { + if (ep->buffered_units >= OZ_IN_BUFFERING_UNITS) { + ep->flags &= ~OZ_F_EP_BUFFERING; + ep->credit = 0; + ep->timestamp = ts; + ep->start_frame = 0; + } + continue; + } + delta = timespec_sub(ts, ep->timestamp); + ep->credit += div_u64(timespec_to_ns(&delta), NSEC_PER_MSEC); + ep->timestamp = ts; + list_for_each_entry_safe(urbl, n, &ep->urb_list, link) { + struct urb *urb = urbl->urb; + int len = 0; + int copy_len; + int i; + + if (ep->credit < urb->number_of_packets) + break; + if (ep->buffered_units < urb->number_of_packets) + break; + urb->actual_length = 0; + for (i = 0; i < urb->number_of_packets; i++) { + len = ep->buffer[ep->out_ix]; + if (++ep->out_ix == ep->buffer_size) + ep->out_ix = 0; + copy_len = ep->buffer_size - ep->out_ix; + if (copy_len > len) + copy_len = len; + memcpy(urb->transfer_buffer, + &ep->buffer[ep->out_ix], copy_len); + if (copy_len < len) { + memcpy(urb->transfer_buffer+copy_len, + ep->buffer, len-copy_len); + ep->out_ix = len-copy_len; + } else + ep->out_ix += copy_len; + if (ep->out_ix == ep->buffer_size) + ep->out_ix = 0; + urb->iso_frame_desc[i].offset = + urb->actual_length; + urb->actual_length += len; + urb->iso_frame_desc[i].actual_length = len; + urb->iso_frame_desc[i].status = 0; + } + ep->buffered_units -= urb->number_of_packets; + urb->error_count = 0; + urb->start_frame = ep->start_frame; + ep->start_frame += urb->number_of_packets; + list_move_tail(&urbl->link, &xfr_list); + ep->credit -= urb->number_of_packets; + } + } + if (!list_empty(&port->isoc_out_ep) || !list_empty(&port->isoc_in_ep)) + rc = 1; + spin_unlock_bh(&ozhcd->hcd_lock); + /* Complete the filled URBs. + */ + list_for_each_entry_safe(urbl, n, &xfr_list, link) { + urb = urbl->urb; + list_del_init(&urbl->link); + oz_free_urb_link(urbl); + oz_complete_urb(port->ozhcd->hcd, urb, 0); + } + /* Check if there are any ep0 requests that have timed out. + * If so resent to PD. + */ + ep = port->out_ep[0]; + if (ep) { + spin_lock_bh(&ozhcd->hcd_lock); + list_for_each_entry_safe(urbl, n, &ep->urb_list, link) { + if (urbl->submit_counter > EP0_TIMEOUT_COUNTER) { + oz_dbg(ON, "Request 0x%p timeout\n", urbl->urb); + list_move_tail(&urbl->link, &xfr_list); + urbl->submit_counter = 0; + } else { + urbl->submit_counter++; + } + } + if (!list_empty(&ep->urb_list)) + rc = 1; + spin_unlock_bh(&ozhcd->hcd_lock); + list_for_each_entry_safe(urbl, n, &xfr_list, link) { + oz_dbg(ON, "Resending request to PD\n"); + oz_process_ep0_urb(ozhcd, urbl->urb, GFP_ATOMIC); + oz_free_urb_link(urbl); + } + } + return rc; +} + +/* + * Context: softirq + */ +static int oz_build_endpoints_for_interface(struct usb_hcd *hcd, + struct oz_port *port, + struct usb_host_interface *intf, gfp_t mem_flags) +{ + struct oz_hcd *ozhcd = port->ozhcd; + int i; + int if_ix = intf->desc.bInterfaceNumber; + int request_heartbeat = 0; + + oz_dbg(ON, "interface[%d] = %p\n", if_ix, intf); + if (if_ix >= port->num_iface || port->iface == NULL) + return -ENOMEM; + for (i = 0; i < intf->desc.bNumEndpoints; i++) { + struct usb_host_endpoint *hep = &intf->endpoint[i]; + u8 ep_addr = hep->desc.bEndpointAddress; + u8 ep_num = ep_addr & USB_ENDPOINT_NUMBER_MASK; + struct oz_endpoint *ep; + int buffer_size = 0; + + oz_dbg(ON, "%d bEndpointAddress = %x\n", i, ep_addr); + if (ep_addr & USB_ENDPOINT_DIR_MASK) { + switch (hep->desc.bmAttributes & + USB_ENDPOINT_XFERTYPE_MASK) { + case USB_ENDPOINT_XFER_ISOC: + buffer_size = OZ_EP_BUFFER_SIZE_ISOC; + break; + case USB_ENDPOINT_XFER_INT: + buffer_size = OZ_EP_BUFFER_SIZE_INT; + break; + } + } + + ep = oz_ep_alloc(buffer_size, mem_flags); + if (!ep) { + oz_clean_endpoints_for_interface(hcd, port, if_ix); + return -ENOMEM; + } + ep->attrib = hep->desc.bmAttributes; + ep->ep_num = ep_num; + if ((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK) + == USB_ENDPOINT_XFER_ISOC) { + oz_dbg(ON, "wMaxPacketSize = %d\n", + usb_endpoint_maxp(&hep->desc)); + ep->credit_ceiling = 200; + if (ep_addr & USB_ENDPOINT_DIR_MASK) { + ep->flags |= OZ_F_EP_BUFFERING; + } else { + ep->flags |= OZ_F_EP_HAVE_STREAM; + if (oz_usb_stream_create(port->hpd, ep_num)) + ep->flags &= ~OZ_F_EP_HAVE_STREAM; + } + } + spin_lock_bh(&ozhcd->hcd_lock); + if (ep_addr & USB_ENDPOINT_DIR_MASK) { + port->in_ep[ep_num] = ep; + port->iface[if_ix].ep_mask |= + (1<<(ep_num+OZ_NB_ENDPOINTS)); + if ((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK) + == USB_ENDPOINT_XFER_ISOC) { + list_add_tail(&ep->link, &port->isoc_in_ep); + request_heartbeat = 1; + } + } else { + port->out_ep[ep_num] = ep; + port->iface[if_ix].ep_mask |= (1<attrib & USB_ENDPOINT_XFERTYPE_MASK) + == USB_ENDPOINT_XFER_ISOC) { + list_add_tail(&ep->link, &port->isoc_out_ep); + request_heartbeat = 1; + } + } + spin_unlock_bh(&ozhcd->hcd_lock); + if (request_heartbeat && port->hpd) + oz_usb_request_heartbeat(port->hpd); + } + return 0; +} + +/* + * Context: softirq + */ +static void oz_clean_endpoints_for_interface(struct usb_hcd *hcd, + struct oz_port *port, int if_ix) +{ + struct oz_hcd *ozhcd = port->ozhcd; + unsigned mask; + int i; + LIST_HEAD(ep_list); + struct oz_endpoint *ep, *n; + + oz_dbg(ON, "Deleting endpoints for interface %d\n", if_ix); + if (if_ix >= port->num_iface) + return; + spin_lock_bh(&ozhcd->hcd_lock); + mask = port->iface[if_ix].ep_mask; + port->iface[if_ix].ep_mask = 0; + for (i = 0; i < OZ_NB_ENDPOINTS; i++) { + struct list_head *e; + /* Gather OUT endpoints. + */ + if ((mask & (1<out_ep[i]) { + e = &port->out_ep[i]->link; + port->out_ep[i] = NULL; + /* Remove from isoc list if present. + */ + list_move_tail(e, &ep_list); + } + /* Gather IN endpoints. + */ + if ((mask & (1<<(i+OZ_NB_ENDPOINTS))) && port->in_ep[i]) { + e = &port->in_ep[i]->link; + port->in_ep[i] = NULL; + list_move_tail(e, &ep_list); + } + } + spin_unlock_bh(&ozhcd->hcd_lock); + list_for_each_entry_safe(ep, n, &ep_list, link) { + list_del_init(&ep->link); + oz_ep_free(port, ep); + } +} + +/* + * Context: softirq + */ +static int oz_build_endpoints_for_config(struct usb_hcd *hcd, + struct oz_port *port, struct usb_host_config *config, + gfp_t mem_flags) +{ + struct oz_hcd *ozhcd = port->ozhcd; + int i; + int num_iface = config->desc.bNumInterfaces; + + if (num_iface) { + struct oz_interface *iface; + + iface = kmalloc_array(num_iface, sizeof(struct oz_interface), + mem_flags | __GFP_ZERO); + if (!iface) + return -ENOMEM; + spin_lock_bh(&ozhcd->hcd_lock); + port->iface = iface; + port->num_iface = num_iface; + spin_unlock_bh(&ozhcd->hcd_lock); + } + for (i = 0; i < num_iface; i++) { + struct usb_host_interface *intf = + &config->intf_cache[i]->altsetting[0]; + if (oz_build_endpoints_for_interface(hcd, port, intf, + mem_flags)) + goto fail; + } + return 0; +fail: + oz_clean_endpoints_for_config(hcd, port); + return -1; +} + +/* + * Context: softirq + */ +static void oz_clean_endpoints_for_config(struct usb_hcd *hcd, + struct oz_port *port) +{ + struct oz_hcd *ozhcd = port->ozhcd; + int i; + + oz_dbg(ON, "Deleting endpoints for configuration\n"); + for (i = 0; i < port->num_iface; i++) + oz_clean_endpoints_for_interface(hcd, port, i); + spin_lock_bh(&ozhcd->hcd_lock); + if (port->iface) { + oz_dbg(ON, "Freeing interfaces object\n"); + kfree(port->iface); + port->iface = NULL; + } + port->num_iface = 0; + spin_unlock_bh(&ozhcd->hcd_lock); +} + +/* + * Context: tasklet + */ +static void *oz_claim_hpd(struct oz_port *port) +{ + void *hpd; + struct oz_hcd *ozhcd = port->ozhcd; + + spin_lock_bh(&ozhcd->hcd_lock); + hpd = port->hpd; + if (hpd) + oz_usb_get(hpd); + spin_unlock_bh(&ozhcd->hcd_lock); + return hpd; +} + +/* + * Context: tasklet + */ +static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb, + gfp_t mem_flags) +{ + struct usb_ctrlrequest *setup; + unsigned windex; + unsigned wvalue; + unsigned wlength; + void *hpd; + u8 req_id; + int rc = 0; + unsigned complete = 0; + + int port_ix = -1; + struct oz_port *port = NULL; + + oz_dbg(URB, "[%s]:(%p)\n", __func__, urb); + port_ix = oz_get_port_from_addr(ozhcd, urb->dev->devnum); + if (port_ix < 0) { + rc = -EPIPE; + goto out; + } + port = &ozhcd->ports[port_ix]; + if (((port->flags & OZ_PORT_F_PRESENT) == 0) + || (port->flags & OZ_PORT_F_DYING)) { + oz_dbg(ON, "Refusing URB port_ix = %d devnum = %d\n", + port_ix, urb->dev->devnum); + rc = -EPIPE; + goto out; + } + /* Store port in private context data. + */ + urb->hcpriv = port; + setup = (struct usb_ctrlrequest *)urb->setup_packet; + windex = le16_to_cpu(setup->wIndex); + wvalue = le16_to_cpu(setup->wValue); + wlength = le16_to_cpu(setup->wLength); + oz_dbg(CTRL_DETAIL, "bRequestType = %x\n", setup->bRequestType); + oz_dbg(CTRL_DETAIL, "bRequest = %x\n", setup->bRequest); + oz_dbg(CTRL_DETAIL, "wValue = %x\n", wvalue); + oz_dbg(CTRL_DETAIL, "wIndex = %x\n", windex); + oz_dbg(CTRL_DETAIL, "wLength = %x\n", wlength); + + req_id = port->next_req_id++; + hpd = oz_claim_hpd(port); + if (hpd == NULL) { + oz_dbg(ON, "Cannot claim port\n"); + rc = -EPIPE; + goto out; + } + + if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { + /* Standard requests + */ + switch (setup->bRequest) { + case USB_REQ_GET_DESCRIPTOR: + oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - req\n"); + break; + case USB_REQ_SET_ADDRESS: + oz_dbg(ON, "USB_REQ_SET_ADDRESS - req\n"); + oz_dbg(ON, "Port %d address is 0x%x\n", + ozhcd->conn_port, + (u8)le16_to_cpu(setup->wValue)); + spin_lock_bh(&ozhcd->hcd_lock); + if (ozhcd->conn_port >= 0) { + ozhcd->ports[ozhcd->conn_port].bus_addr = + (u8)le16_to_cpu(setup->wValue); + oz_dbg(ON, "Clearing conn_port\n"); + ozhcd->conn_port = -1; + } + spin_unlock_bh(&ozhcd->hcd_lock); + complete = 1; + break; + case USB_REQ_SET_CONFIGURATION: + oz_dbg(ON, "USB_REQ_SET_CONFIGURATION - req\n"); + break; + case USB_REQ_GET_CONFIGURATION: + /* We short circuit this case and reply directly since + * we have the selected configuration number cached. + */ + oz_dbg(ON, "USB_REQ_GET_CONFIGURATION - reply now\n"); + if (urb->transfer_buffer_length >= 1) { + urb->actual_length = 1; + *((u8 *)urb->transfer_buffer) = + port->config_num; + complete = 1; + } else { + rc = -EPIPE; + } + break; + case USB_REQ_GET_INTERFACE: + /* We short circuit this case and reply directly since + * we have the selected interface alternative cached. + */ + oz_dbg(ON, "USB_REQ_GET_INTERFACE - reply now\n"); + if (urb->transfer_buffer_length >= 1) { + urb->actual_length = 1; + *((u8 *)urb->transfer_buffer) = + port->iface[(u8)windex].alt; + oz_dbg(ON, "interface = %d alt = %d\n", + windex, port->iface[(u8)windex].alt); + complete = 1; + } else { + rc = -EPIPE; + } + break; + case USB_REQ_SET_INTERFACE: + oz_dbg(ON, "USB_REQ_SET_INTERFACE - req\n"); + break; + } + } + if (!rc && !complete) { + int data_len = 0; + + if ((setup->bRequestType & USB_DIR_IN) == 0) + data_len = wlength; + urb->actual_length = data_len; + if (oz_usb_control_req(port->hpd, req_id, setup, + urb->transfer_buffer, data_len)) { + rc = -ENOMEM; + } else { + /* Note: we are queuing the request after we have + * submitted it to be transmitted. If the request were + * to complete before we queued it then it would not + * be found in the queue. It seems impossible for + * this to happen but if it did the request would + * be resubmitted so the problem would hopefully + * resolve itself. Putting the request into the + * queue before it has been sent is worse since the + * urb could be cancelled while we are using it + * to build the request. + */ + if (oz_enqueue_ep_urb(port, 0, 0, urb, req_id)) + rc = -ENOMEM; + } + } + oz_usb_put(hpd); +out: + if (rc || complete) { + oz_dbg(ON, "Completing request locally\n"); + oz_complete_urb(ozhcd->hcd, urb, rc); + } else { + oz_usb_request_heartbeat(port->hpd); + } +} + +/* + * Context: tasklet + */ +static int oz_urb_process(struct oz_hcd *ozhcd, struct urb *urb) +{ + int rc = 0; + struct oz_port *port = urb->hcpriv; + u8 ep_addr; + + /* When we are paranoid we keep a list of urbs which we check against + * before handing one back. This is just for debugging during + * development and should be turned off in the released driver. + */ + oz_remember_urb(urb); + /* Check buffer is valid. + */ + if (!urb->transfer_buffer && urb->transfer_buffer_length) + return -EINVAL; + /* Check if there is a device at the port - refuse if not. + */ + if ((port->flags & OZ_PORT_F_PRESENT) == 0) + return -EPIPE; + ep_addr = usb_pipeendpoint(urb->pipe); + if (ep_addr) { + /* If the request is not for EP0 then queue it. + */ + if (oz_enqueue_ep_urb(port, ep_addr, usb_pipein(urb->pipe), + urb, 0)) + rc = -EPIPE; + } else { + oz_process_ep0_urb(ozhcd, urb, GFP_ATOMIC); + } + return rc; +} + +/* + * Context: tasklet + */ +static void oz_urb_process_tasklet(unsigned long unused) +{ + unsigned long irq_state; + struct urb *urb; + struct oz_hcd *ozhcd = oz_hcd_claim(); + struct oz_urb_link *urbl, *n; + int rc = 0; + + if (ozhcd == NULL) + return; + /* This is called from a tasklet so is in softirq context but the urb + * list is filled from any context so we need to lock + * appropriately while removing urbs. + */ + spin_lock_irqsave(&g_tasklet_lock, irq_state); + list_for_each_entry_safe(urbl, n, &ozhcd->urb_pending_list, link) { + list_del_init(&urbl->link); + spin_unlock_irqrestore(&g_tasklet_lock, irq_state); + urb = urbl->urb; + oz_free_urb_link(urbl); + rc = oz_urb_process(ozhcd, urb); + if (rc) + oz_complete_urb(ozhcd->hcd, urb, rc); + spin_lock_irqsave(&g_tasklet_lock, irq_state); + } + spin_unlock_irqrestore(&g_tasklet_lock, irq_state); + oz_hcd_put(ozhcd); +} + +/* + * This function searches for the urb in any of the lists it could be in. + * If it is found it is removed from the list and completed. If the urb is + * being processed then it won't be in a list so won't be found. However, the + * call to usb_hcd_check_unlink_urb() will set the value of the unlinked field + * to a non-zero value. When an attempt is made to put the urb back in a list + * the unlinked field will be checked and the urb will then be completed. + * Context: tasklet + */ +static void oz_urb_cancel(struct oz_port *port, u8 ep_num, struct urb *urb) +{ + struct oz_urb_link *urbl = NULL; + struct list_head *e; + struct oz_hcd *ozhcd; + unsigned long irq_state; + u8 ix; + + if (port == NULL) { + oz_dbg(ON, "%s: ERROR: (%p) port is null\n", __func__, urb); + return; + } + ozhcd = port->ozhcd; + if (ozhcd == NULL) { + oz_dbg(ON, "%s; ERROR: (%p) ozhcd is null\n", __func__, urb); + return; + } + + /* Look in the tasklet queue. + */ + spin_lock_irqsave(&g_tasklet_lock, irq_state); + list_for_each(e, &ozhcd->urb_cancel_list) { + urbl = list_entry(e, struct oz_urb_link, link); + if (urb == urbl->urb) { + list_del_init(e); + spin_unlock_irqrestore(&g_tasklet_lock, irq_state); + goto out2; + } + } + spin_unlock_irqrestore(&g_tasklet_lock, irq_state); + urbl = NULL; + + /* Look in the orphanage. + */ + spin_lock_irqsave(&ozhcd->hcd_lock, irq_state); + list_for_each(e, &ozhcd->orphanage) { + urbl = list_entry(e, struct oz_urb_link, link); + if (urbl->urb == urb) { + list_del(e); + oz_dbg(ON, "Found urb in orphanage\n"); + goto out; + } + } + ix = (ep_num & 0xf); + urbl = NULL; + if ((ep_num & USB_DIR_IN) && ix) + urbl = oz_remove_urb(port->in_ep[ix], urb); + else + urbl = oz_remove_urb(port->out_ep[ix], urb); +out: + spin_unlock_irqrestore(&ozhcd->hcd_lock, irq_state); +out2: + if (urbl) { + urb->actual_length = 0; + oz_free_urb_link(urbl); + oz_complete_urb(ozhcd->hcd, urb, -EPIPE); + } +} + +/* + * Context: tasklet + */ +static void oz_urb_cancel_tasklet(unsigned long unused) +{ + unsigned long irq_state; + struct urb *urb; + struct oz_urb_link *urbl, *n; + struct oz_hcd *ozhcd = oz_hcd_claim(); + + if (ozhcd == NULL) + return; + spin_lock_irqsave(&g_tasklet_lock, irq_state); + list_for_each_entry_safe(urbl, n, &ozhcd->urb_cancel_list, link) { + list_del_init(&urbl->link); + spin_unlock_irqrestore(&g_tasklet_lock, irq_state); + urb = urbl->urb; + if (urb->unlinked) + oz_urb_cancel(urbl->port, urbl->ep_num, urb); + oz_free_urb_link(urbl); + spin_lock_irqsave(&g_tasklet_lock, irq_state); + } + spin_unlock_irqrestore(&g_tasklet_lock, irq_state); + oz_hcd_put(ozhcd); +} + +/* + * Context: unknown + */ +static void oz_hcd_clear_orphanage(struct oz_hcd *ozhcd, int status) +{ + if (ozhcd) { + struct oz_urb_link *urbl, *n; + + list_for_each_entry_safe(urbl, n, &ozhcd->orphanage, link) { + list_del(&urbl->link); + oz_complete_urb(ozhcd->hcd, urbl->urb, status); + oz_free_urb_link(urbl); + } + } +} + +/* + * Context: unknown + */ +static int oz_hcd_start(struct usb_hcd *hcd) +{ + hcd->power_budget = 200; + hcd->state = HC_STATE_RUNNING; + hcd->uses_new_polling = 1; + return 0; +} + +/* + * Context: unknown + */ +static void oz_hcd_stop(struct usb_hcd *hcd) +{ +} + +/* + * Context: unknown + */ +static void oz_hcd_shutdown(struct usb_hcd *hcd) +{ +} + +/* + * Called to queue an urb for the device. + * This function should return a non-zero error code if it fails the urb but + * should not call usb_hcd_giveback_urb(). + * Context: any + */ +static int oz_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, + gfp_t mem_flags) +{ + struct oz_hcd *ozhcd = oz_hcd_private(hcd); + int rc; + int port_ix; + struct oz_port *port; + unsigned long irq_state; + struct oz_urb_link *urbl; + + oz_dbg(URB, "%s: (%p)\n", __func__, urb); + if (unlikely(ozhcd == NULL)) { + oz_dbg(URB, "Refused urb(%p) not ozhcd\n", urb); + return -EPIPE; + } + if (unlikely(hcd->state != HC_STATE_RUNNING)) { + oz_dbg(URB, "Refused urb(%p) not running\n", urb); + return -EPIPE; + } + port_ix = oz_get_port_from_addr(ozhcd, urb->dev->devnum); + if (port_ix < 0) + return -EPIPE; + port = &ozhcd->ports[port_ix]; + if (port == NULL) + return -EPIPE; + if (!(port->flags & OZ_PORT_F_PRESENT) || + (port->flags & OZ_PORT_F_CHANGED)) { + oz_dbg(ON, "Refusing URB port_ix = %d devnum = %d\n", + port_ix, urb->dev->devnum); + return -EPIPE; + } + urb->hcpriv = port; + /* Put request in queue for processing by tasklet. + */ + urbl = oz_alloc_urb_link(); + if (unlikely(urbl == NULL)) + return -ENOMEM; + urbl->urb = urb; + spin_lock_irqsave(&g_tasklet_lock, irq_state); + rc = usb_hcd_link_urb_to_ep(hcd, urb); + if (unlikely(rc)) { + spin_unlock_irqrestore(&g_tasklet_lock, irq_state); + oz_free_urb_link(urbl); + return rc; + } + list_add_tail(&urbl->link, &ozhcd->urb_pending_list); + spin_unlock_irqrestore(&g_tasklet_lock, irq_state); + tasklet_schedule(&g_urb_process_tasklet); + atomic_inc(&g_pending_urbs); + return 0; +} + +/* + * Context: tasklet + */ +static struct oz_urb_link *oz_remove_urb(struct oz_endpoint *ep, + struct urb *urb) +{ + struct oz_urb_link *urbl; + + if (unlikely(ep == NULL)) + return NULL; + + list_for_each_entry(urbl, &ep->urb_list, link) { + if (urbl->urb == urb) { + list_del_init(&urbl->link); + if (usb_pipeisoc(urb->pipe)) { + ep->credit -= urb->number_of_packets; + if (ep->credit < 0) + ep->credit = 0; + } + return urbl; + } + } + return NULL; +} + +/* + * Called to dequeue a previously submitted urb for the device. + * Context: any + */ +static int oz_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) +{ + struct oz_hcd *ozhcd = oz_hcd_private(hcd); + struct oz_urb_link *urbl; + int rc; + unsigned long irq_state; + + oz_dbg(URB, "%s: (%p)\n", __func__, urb); + urbl = oz_alloc_urb_link(); + if (unlikely(urbl == NULL)) + return -ENOMEM; + spin_lock_irqsave(&g_tasklet_lock, irq_state); + /* The following function checks the urb is still in the queue + * maintained by the core and that the unlinked field is zero. + * If both are true the function sets the unlinked field and returns + * zero. Otherwise it returns an error. + */ + rc = usb_hcd_check_unlink_urb(hcd, urb, status); + /* We have to check we haven't completed the urb or are about + * to complete it. When we do we set hcpriv to 0 so if this has + * already happened we don't put the urb in the cancel queue. + */ + if ((rc == 0) && urb->hcpriv) { + urbl->urb = urb; + urbl->port = (struct oz_port *)urb->hcpriv; + urbl->ep_num = usb_pipeendpoint(urb->pipe); + if (usb_pipein(urb->pipe)) + urbl->ep_num |= USB_DIR_IN; + list_add_tail(&urbl->link, &ozhcd->urb_cancel_list); + spin_unlock_irqrestore(&g_tasklet_lock, irq_state); + tasklet_schedule(&g_urb_cancel_tasklet); + } else { + spin_unlock_irqrestore(&g_tasklet_lock, irq_state); + oz_free_urb_link(urbl); + } + return rc; +} + +/* + * Context: unknown + */ +static void oz_hcd_endpoint_disable(struct usb_hcd *hcd, + struct usb_host_endpoint *ep) +{ +} + +/* + * Context: unknown + */ +static void oz_hcd_endpoint_reset(struct usb_hcd *hcd, + struct usb_host_endpoint *ep) +{ +} + +/* + * Context: unknown + */ +static int oz_hcd_get_frame_number(struct usb_hcd *hcd) +{ + oz_dbg(ON, "oz_hcd_get_frame_number\n"); + return oz_usb_get_frame_number(); +} + +/* + * Context: softirq + * This is called as a consquence of us calling usb_hcd_poll_rh_status() and we + * always do that in softirq context. + */ +static int oz_hcd_hub_status_data(struct usb_hcd *hcd, char *buf) +{ + struct oz_hcd *ozhcd = oz_hcd_private(hcd); + int i; + + buf[0] = 0; + buf[1] = 0; + + spin_lock_bh(&ozhcd->hcd_lock); + for (i = 0; i < OZ_NB_PORTS; i++) { + if (ozhcd->ports[i].flags & OZ_PORT_F_CHANGED) { + oz_dbg(HUB, "Port %d changed\n", i); + ozhcd->ports[i].flags &= ~OZ_PORT_F_CHANGED; + if (i < 7) + buf[0] |= 1 << (i + 1); + else + buf[1] |= 1 << (i - 7); + } + } + spin_unlock_bh(&ozhcd->hcd_lock); + if (buf[0] != 0 || buf[1] != 0) + return 2; + return 0; +} + +/* + * Context: process + */ +static void oz_get_hub_descriptor(struct usb_hcd *hcd, + struct usb_hub_descriptor *desc) +{ + memset(desc, 0, sizeof(*desc)); + desc->bDescriptorType = 0x29; + desc->bDescLength = 9; + desc->wHubCharacteristics = cpu_to_le16(0x0001); + desc->bNbrPorts = OZ_NB_PORTS; +} + +/* + * Context: process + */ +static int oz_set_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex) +{ + struct oz_port *port; + u8 port_id = (u8)windex; + struct oz_hcd *ozhcd = oz_hcd_private(hcd); + unsigned set_bits = 0; + unsigned clear_bits = 0; + + if ((port_id < 1) || (port_id > OZ_NB_PORTS)) + return -EPIPE; + port = &ozhcd->ports[port_id-1]; + switch (wvalue) { + case USB_PORT_FEAT_CONNECTION: + oz_dbg(HUB, "USB_PORT_FEAT_CONNECTION\n"); + break; + case USB_PORT_FEAT_ENABLE: + oz_dbg(HUB, "USB_PORT_FEAT_ENABLE\n"); + break; + case USB_PORT_FEAT_SUSPEND: + oz_dbg(HUB, "USB_PORT_FEAT_SUSPEND\n"); + break; + case USB_PORT_FEAT_OVER_CURRENT: + oz_dbg(HUB, "USB_PORT_FEAT_OVER_CURRENT\n"); + break; + case USB_PORT_FEAT_RESET: + oz_dbg(HUB, "USB_PORT_FEAT_RESET\n"); + set_bits = USB_PORT_STAT_ENABLE | (USB_PORT_STAT_C_RESET<<16); + clear_bits = USB_PORT_STAT_RESET; + ozhcd->ports[port_id-1].bus_addr = 0; + break; + case USB_PORT_FEAT_POWER: + oz_dbg(HUB, "USB_PORT_FEAT_POWER\n"); + set_bits |= USB_PORT_STAT_POWER; + break; + case USB_PORT_FEAT_LOWSPEED: + oz_dbg(HUB, "USB_PORT_FEAT_LOWSPEED\n"); + break; + case USB_PORT_FEAT_C_CONNECTION: + oz_dbg(HUB, "USB_PORT_FEAT_C_CONNECTION\n"); + break; + case USB_PORT_FEAT_C_ENABLE: + oz_dbg(HUB, "USB_PORT_FEAT_C_ENABLE\n"); + break; + case USB_PORT_FEAT_C_SUSPEND: + oz_dbg(HUB, "USB_PORT_FEAT_C_SUSPEND\n"); + break; + case USB_PORT_FEAT_C_OVER_CURRENT: + oz_dbg(HUB, "USB_PORT_FEAT_C_OVER_CURRENT\n"); + break; + case USB_PORT_FEAT_C_RESET: + oz_dbg(HUB, "USB_PORT_FEAT_C_RESET\n"); + break; + case USB_PORT_FEAT_TEST: + oz_dbg(HUB, "USB_PORT_FEAT_TEST\n"); + break; + case USB_PORT_FEAT_INDICATOR: + oz_dbg(HUB, "USB_PORT_FEAT_INDICATOR\n"); + break; + default: + oz_dbg(HUB, "Other %d\n", wvalue); + break; + } + if (set_bits || clear_bits) { + spin_lock_bh(&port->port_lock); + port->status &= ~clear_bits; + port->status |= set_bits; + spin_unlock_bh(&port->port_lock); + } + oz_dbg(HUB, "Port[%d] status = 0x%x\n", port_id, port->status); + return 0; +} + +/* + * Context: process + */ +static int oz_clear_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex) +{ + struct oz_port *port; + u8 port_id = (u8)windex; + struct oz_hcd *ozhcd = oz_hcd_private(hcd); + unsigned clear_bits = 0; + + if ((port_id < 1) || (port_id > OZ_NB_PORTS)) + return -EPIPE; + port = &ozhcd->ports[port_id-1]; + switch (wvalue) { + case USB_PORT_FEAT_CONNECTION: + oz_dbg(HUB, "USB_PORT_FEAT_CONNECTION\n"); + break; + case USB_PORT_FEAT_ENABLE: + oz_dbg(HUB, "USB_PORT_FEAT_ENABLE\n"); + clear_bits = USB_PORT_STAT_ENABLE; + break; + case USB_PORT_FEAT_SUSPEND: + oz_dbg(HUB, "USB_PORT_FEAT_SUSPEND\n"); + break; + case USB_PORT_FEAT_OVER_CURRENT: + oz_dbg(HUB, "USB_PORT_FEAT_OVER_CURRENT\n"); + break; + case USB_PORT_FEAT_RESET: + oz_dbg(HUB, "USB_PORT_FEAT_RESET\n"); + break; + case USB_PORT_FEAT_POWER: + oz_dbg(HUB, "USB_PORT_FEAT_POWER\n"); + clear_bits |= USB_PORT_STAT_POWER; + break; + case USB_PORT_FEAT_LOWSPEED: + oz_dbg(HUB, "USB_PORT_FEAT_LOWSPEED\n"); + break; + case USB_PORT_FEAT_C_CONNECTION: + oz_dbg(HUB, "USB_PORT_FEAT_C_CONNECTION\n"); + clear_bits = USB_PORT_STAT_C_CONNECTION << 16; + break; + case USB_PORT_FEAT_C_ENABLE: + oz_dbg(HUB, "USB_PORT_FEAT_C_ENABLE\n"); + clear_bits = USB_PORT_STAT_C_ENABLE << 16; + break; + case USB_PORT_FEAT_C_SUSPEND: + oz_dbg(HUB, "USB_PORT_FEAT_C_SUSPEND\n"); + break; + case USB_PORT_FEAT_C_OVER_CURRENT: + oz_dbg(HUB, "USB_PORT_FEAT_C_OVER_CURRENT\n"); + break; + case USB_PORT_FEAT_C_RESET: + oz_dbg(HUB, "USB_PORT_FEAT_C_RESET\n"); + clear_bits = USB_PORT_FEAT_C_RESET << 16; + break; + case USB_PORT_FEAT_TEST: + oz_dbg(HUB, "USB_PORT_FEAT_TEST\n"); + break; + case USB_PORT_FEAT_INDICATOR: + oz_dbg(HUB, "USB_PORT_FEAT_INDICATOR\n"); + break; + default: + oz_dbg(HUB, "Other %d\n", wvalue); + break; + } + if (clear_bits) { + spin_lock_bh(&port->port_lock); + port->status &= ~clear_bits; + spin_unlock_bh(&port->port_lock); + } + oz_dbg(HUB, "Port[%d] status = 0x%x\n", + port_id, ozhcd->ports[port_id-1].status); + return 0; +} + +/* + * Context: process + */ +static int oz_get_port_status(struct usb_hcd *hcd, u16 windex, char *buf) +{ + struct oz_hcd *ozhcd; + u32 status; + + if ((windex < 1) || (windex > OZ_NB_PORTS)) + return -EPIPE; + ozhcd = oz_hcd_private(hcd); + oz_dbg(HUB, "GetPortStatus windex = %d\n", windex); + status = ozhcd->ports[windex-1].status; + put_unaligned(cpu_to_le32(status), (__le32 *)buf); + oz_dbg(HUB, "Port[%d] status = %x\n", windex, status); + return 0; +} + +/* + * Context: process + */ +static int oz_hcd_hub_control(struct usb_hcd *hcd, u16 req_type, u16 wvalue, + u16 windex, char *buf, u16 wlength) +{ + int err = 0; + + switch (req_type) { + case ClearHubFeature: + oz_dbg(HUB, "ClearHubFeature: %d\n", req_type); + break; + case ClearPortFeature: + err = oz_clear_port_feature(hcd, wvalue, windex); + break; + case GetHubDescriptor: + oz_get_hub_descriptor(hcd, (struct usb_hub_descriptor *)buf); + break; + case GetHubStatus: + oz_dbg(HUB, "GetHubStatus: req_type = 0x%x\n", req_type); + put_unaligned(cpu_to_le32(0), (__le32 *)buf); + break; + case GetPortStatus: + err = oz_get_port_status(hcd, windex, buf); + break; + case SetHubFeature: + oz_dbg(HUB, "SetHubFeature: %d\n", req_type); + break; + case SetPortFeature: + err = oz_set_port_feature(hcd, wvalue, windex); + break; + default: + oz_dbg(HUB, "Other: %d\n", req_type); + break; + } + return err; +} + +/* + * Context: process + */ +static int oz_hcd_bus_suspend(struct usb_hcd *hcd) +{ + struct oz_hcd *ozhcd; + + ozhcd = oz_hcd_private(hcd); + spin_lock_bh(&ozhcd->hcd_lock); + hcd->state = HC_STATE_SUSPENDED; + ozhcd->flags |= OZ_HDC_F_SUSPENDED; + spin_unlock_bh(&ozhcd->hcd_lock); + return 0; +} + +/* + * Context: process + */ +static int oz_hcd_bus_resume(struct usb_hcd *hcd) +{ + struct oz_hcd *ozhcd; + + ozhcd = oz_hcd_private(hcd); + spin_lock_bh(&ozhcd->hcd_lock); + ozhcd->flags &= ~OZ_HDC_F_SUSPENDED; + hcd->state = HC_STATE_RUNNING; + spin_unlock_bh(&ozhcd->hcd_lock); + return 0; +} + +static void oz_plat_shutdown(struct platform_device *dev) +{ +} + +/* + * Context: process + */ +static int oz_plat_probe(struct platform_device *dev) +{ + int i; + int err; + struct usb_hcd *hcd; + struct oz_hcd *ozhcd; + + hcd = usb_create_hcd(&g_oz_hc_drv, &dev->dev, dev_name(&dev->dev)); + if (hcd == NULL) { + oz_dbg(ON, "Failed to created hcd object OK\n"); + return -ENOMEM; + } + ozhcd = oz_hcd_private(hcd); + memset(ozhcd, 0, sizeof(*ozhcd)); + INIT_LIST_HEAD(&ozhcd->urb_pending_list); + INIT_LIST_HEAD(&ozhcd->urb_cancel_list); + INIT_LIST_HEAD(&ozhcd->orphanage); + ozhcd->hcd = hcd; + ozhcd->conn_port = -1; + spin_lock_init(&ozhcd->hcd_lock); + for (i = 0; i < OZ_NB_PORTS; i++) { + struct oz_port *port = &ozhcd->ports[i]; + + port->ozhcd = ozhcd; + port->flags = 0; + port->status = 0; + port->bus_addr = 0xff; + spin_lock_init(&port->port_lock); + } + err = usb_add_hcd(hcd, 0, 0); + if (err) { + oz_dbg(ON, "Failed to add hcd object OK\n"); + usb_put_hcd(hcd); + return -1; + } + device_wakeup_enable(hcd->self.controller); + + spin_lock_bh(&g_hcdlock); + g_ozhcd = ozhcd; + spin_unlock_bh(&g_hcdlock); + return 0; +} + +/* + * Context: unknown + */ +static int oz_plat_remove(struct platform_device *dev) +{ + struct usb_hcd *hcd = platform_get_drvdata(dev); + struct oz_hcd *ozhcd; + + if (hcd == NULL) + return -1; + ozhcd = oz_hcd_private(hcd); + spin_lock_bh(&g_hcdlock); + if (ozhcd == g_ozhcd) + g_ozhcd = NULL; + spin_unlock_bh(&g_hcdlock); + oz_dbg(ON, "Clearing orphanage\n"); + oz_hcd_clear_orphanage(ozhcd, -EPIPE); + oz_dbg(ON, "Removing hcd\n"); + usb_remove_hcd(hcd); + usb_put_hcd(hcd); + return 0; +} + +/* + * Context: unknown + */ +static int oz_plat_suspend(struct platform_device *dev, pm_message_t msg) +{ + return 0; +} + + +/* + * Context: unknown + */ +static int oz_plat_resume(struct platform_device *dev) +{ + return 0; +} + +/* + * Context: process + */ +int oz_hcd_init(void) +{ + int err; + + if (usb_disabled()) + return -ENODEV; + + oz_urb_link_cache = KMEM_CACHE(oz_urb_link, 0); + if (!oz_urb_link_cache) + return -ENOMEM; + + tasklet_init(&g_urb_process_tasklet, oz_urb_process_tasklet, 0); + tasklet_init(&g_urb_cancel_tasklet, oz_urb_cancel_tasklet, 0); + err = platform_driver_register(&g_oz_plat_drv); + oz_dbg(ON, "platform_driver_register() returned %d\n", err); + if (err) + goto error; + g_plat_dev = platform_device_alloc(OZ_PLAT_DEV_NAME, -1); + if (g_plat_dev == NULL) { + err = -ENOMEM; + goto error1; + } + oz_dbg(ON, "platform_device_alloc() succeeded\n"); + err = platform_device_add(g_plat_dev); + if (err) + goto error2; + oz_dbg(ON, "platform_device_add() succeeded\n"); + return 0; +error2: + platform_device_put(g_plat_dev); +error1: + platform_driver_unregister(&g_oz_plat_drv); +error: + tasklet_disable(&g_urb_process_tasklet); + tasklet_disable(&g_urb_cancel_tasklet); + oz_dbg(ON, "oz_hcd_init() failed %d\n", err); + return err; +} + +/* + * Context: process + */ +void oz_hcd_term(void) +{ + msleep(OZ_HUB_DEBOUNCE_TIMEOUT); + tasklet_kill(&g_urb_process_tasklet); + tasklet_kill(&g_urb_cancel_tasklet); + platform_device_unregister(g_plat_dev); + platform_driver_unregister(&g_oz_plat_drv); + oz_dbg(ON, "Pending urbs:%d\n", atomic_read(&g_pending_urbs)); + kmem_cache_destroy(oz_urb_link_cache); +} diff --git a/kernel/drivers/staging/ozwpan/ozhcd.h b/kernel/drivers/staging/ozwpan/ozhcd.h new file mode 100644 index 000000000..55e97b1c7 --- /dev/null +++ b/kernel/drivers/staging/ozwpan/ozhcd.h @@ -0,0 +1,15 @@ +/* ----------------------------------------------------------------------------- + * Copyright (c) 2011 Ozmo Inc + * Released under the GNU General Public License Version 2 (GPLv2). + * ---------------------------------------------------------------------------*/ +#ifndef _OZHCD_H +#define _OZHCD_H + +int oz_hcd_init(void); +void oz_hcd_term(void); +struct oz_port *oz_hcd_pd_arrived(void *ctx); +void oz_hcd_pd_departed(struct oz_port *hport); +void oz_hcd_pd_reset(void *hpd, void *hport); + +#endif /* _OZHCD_H */ + diff --git a/kernel/drivers/staging/ozwpan/ozmain.c b/kernel/drivers/staging/ozwpan/ozmain.c new file mode 100644 index 000000000..74ef34815 --- /dev/null +++ b/kernel/drivers/staging/ozwpan/ozmain.c @@ -0,0 +1,71 @@ +/* ----------------------------------------------------------------------------- + * Copyright (c) 2011 Ozmo Inc + * Released under the GNU General Public License Version 2 (GPLv2). + * ----------------------------------------------------------------------------- + */ + +#include +#include +#include +#include +#include +#include +#include +#include "ozdbg.h" +#include "ozpd.h" +#include "ozproto.h" +#include "ozcdev.h" + +unsigned int oz_dbg_mask = OZ_DEFAULT_DBG_MASK; + +/* + * The name of the 802.11 mac device. Empty string is the default value but a + * value can be supplied as a parameter to the module. An empty string means + * bind to nothing. '*' means bind to all netcards - this includes non-802.11 + * netcards. Bindings can be added later using an IOCTL. + */ +static char *g_net_dev = ""; +module_param(g_net_dev, charp, S_IRUGO); +MODULE_PARM_DESC(g_net_dev, "The device(s) to bind to; " + "'*' means all, '' (empty string; default) means none."); + +/* + * Context: process + */ +static int __init ozwpan_init(void) +{ + int err; + + err = oz_cdev_register(); + if (err) + return err; + err = oz_protocol_init(g_net_dev); + if (err) + goto err_protocol; + oz_app_enable(OZ_APPID_USB, 1); + oz_apps_init(); + return 0; + +err_protocol: + oz_cdev_deregister(); + return err; +} + +/* + * Context: process + */ +static void __exit ozwpan_exit(void) +{ + oz_protocol_term(); + oz_apps_term(); + oz_cdev_deregister(); +} + +module_init(ozwpan_init); +module_exit(ozwpan_exit); + +MODULE_AUTHOR("Chris Kelly"); +MODULE_DESCRIPTION("Ozmo Devices USB over WiFi hcd driver"); +MODULE_VERSION("1.0.13"); +MODULE_LICENSE("GPL"); + diff --git a/kernel/drivers/staging/ozwpan/ozpd.c b/kernel/drivers/staging/ozwpan/ozpd.c new file mode 100644 index 000000000..021d74a13 --- /dev/null +++ b/kernel/drivers/staging/ozwpan/ozpd.c @@ -0,0 +1,886 @@ +/* ----------------------------------------------------------------------------- + * Copyright (c) 2011 Ozmo Inc + * Released under the GNU General Public License Version 2 (GPLv2). + * ----------------------------------------------------------------------------- + */ + +#include +#include +#include +#include +#include +#include +#include "ozdbg.h" +#include "ozprotocol.h" +#include "ozeltbuf.h" +#include "ozpd.h" +#include "ozproto.h" +#include "ozcdev.h" +#include "ozusbsvc.h" +#include +#include +#include + +static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd); +static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f); +static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f); +static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f); +static int oz_send_isoc_frame(struct oz_pd *pd); +static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f); +static void oz_isoc_stream_free(struct oz_isoc_stream *st); +static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data); +static void oz_isoc_destructor(struct sk_buff *skb); + +/* + * Counts the uncompleted isoc frames submitted to netcard. + */ +static atomic_t g_submitted_isoc = ATOMIC_INIT(0); + +/* Application handler functions. + */ +static const struct oz_app_if g_app_if[OZ_NB_APPS] = { + [OZ_APPID_USB] = { + .init = oz_usb_init, + .term = oz_usb_term, + .start = oz_usb_start, + .stop = oz_usb_stop, + .rx = oz_usb_rx, + .heartbeat = oz_usb_heartbeat, + .farewell = oz_usb_farewell, + }, + [OZ_APPID_SERIAL] = { + .init = oz_cdev_init, + .term = oz_cdev_term, + .start = oz_cdev_start, + .stop = oz_cdev_stop, + .rx = oz_cdev_rx, + }, +}; + + +/* + * Context: softirq or process + */ +void oz_pd_set_state(struct oz_pd *pd, unsigned state) +{ + pd->state = state; + switch (state) { + case OZ_PD_S_IDLE: + oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_IDLE\n"); + break; + case OZ_PD_S_CONNECTED: + oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_CONNECTED\n"); + break; + case OZ_PD_S_STOPPED: + oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_STOPPED\n"); + break; + case OZ_PD_S_SLEEP: + oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_SLEEP\n"); + break; + } +} + +/* + * Context: softirq or process + */ +void oz_pd_get(struct oz_pd *pd) +{ + atomic_inc(&pd->ref_count); +} + +/* + * Context: softirq or process + */ +void oz_pd_put(struct oz_pd *pd) +{ + if (atomic_dec_and_test(&pd->ref_count)) + oz_pd_destroy(pd); +} + +/* + * Context: softirq-serialized + */ +struct oz_pd *oz_pd_alloc(const u8 *mac_addr) +{ + struct oz_pd *pd; + int i; + + pd = kzalloc(sizeof(struct oz_pd), GFP_ATOMIC); + if (!pd) + return NULL; + + atomic_set(&pd->ref_count, 2); + for (i = 0; i < OZ_NB_APPS; i++) + spin_lock_init(&pd->app_lock[i]); + pd->last_rx_pkt_num = 0xffffffff; + oz_pd_set_state(pd, OZ_PD_S_IDLE); + pd->max_tx_size = OZ_MAX_TX_SIZE; + ether_addr_copy(pd->mac_addr, mac_addr); + oz_elt_buf_init(&pd->elt_buff); + spin_lock_init(&pd->tx_frame_lock); + INIT_LIST_HEAD(&pd->tx_queue); + INIT_LIST_HEAD(&pd->farewell_list); + pd->last_sent_frame = &pd->tx_queue; + spin_lock_init(&pd->stream_lock); + INIT_LIST_HEAD(&pd->stream_list); + tasklet_init(&pd->heartbeat_tasklet, oz_pd_heartbeat_handler, + (unsigned long)pd); + tasklet_init(&pd->timeout_tasklet, oz_pd_timeout_handler, + (unsigned long)pd); + hrtimer_init(&pd->heartbeat, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer_init(&pd->timeout, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + pd->heartbeat.function = oz_pd_heartbeat_event; + pd->timeout.function = oz_pd_timeout_event; + + return pd; +} + +/* + * Context: softirq or process + */ +static void oz_pd_free(struct work_struct *work) +{ + struct list_head *e, *n; + struct oz_pd *pd; + + oz_pd_dbg(pd, ON, "Destroying PD\n"); + pd = container_of(work, struct oz_pd, workitem); + /*Disable timer tasklets*/ + tasklet_kill(&pd->heartbeat_tasklet); + tasklet_kill(&pd->timeout_tasklet); + + /* Free streams, queued tx frames and farewells. */ + + list_for_each_safe(e, n, &pd->stream_list) + oz_isoc_stream_free(list_entry(e, struct oz_isoc_stream, link)); + + list_for_each_safe(e, n, &pd->tx_queue) { + struct oz_tx_frame *f = list_entry(e, struct oz_tx_frame, link); + + if (f->skb != NULL) + kfree_skb(f->skb); + oz_retire_frame(pd, f); + } + + oz_elt_buf_term(&pd->elt_buff); + + list_for_each_safe(e, n, &pd->farewell_list) + kfree(list_entry(e, struct oz_farewell, link)); + + if (pd->net_dev) + dev_put(pd->net_dev); + kfree(pd); +} + +/* + * Context: softirq or Process + */ +void oz_pd_destroy(struct oz_pd *pd) +{ + if (hrtimer_active(&pd->timeout)) + hrtimer_cancel(&pd->timeout); + if (hrtimer_active(&pd->heartbeat)) + hrtimer_cancel(&pd->heartbeat); + + INIT_WORK(&pd->workitem, oz_pd_free); + if (!schedule_work(&pd->workitem)) + oz_pd_dbg(pd, ON, "failed to schedule workitem\n"); +} + +/* + * Context: softirq-serialized + */ +int oz_services_start(struct oz_pd *pd, u16 apps, int resume) +{ + int i, rc = 0; + + oz_pd_dbg(pd, ON, "%s: (0x%x) resume(%d)\n", __func__, apps, resume); + for (i = 0; i < OZ_NB_APPS; i++) { + if (g_app_if[i].start && (apps & (1 << i))) { + if (g_app_if[i].start(pd, resume)) { + rc = -1; + oz_pd_dbg(pd, ON, + "Unable to start service %d\n", i); + break; + } + spin_lock_bh(&g_polling_lock); + pd->total_apps |= (1 << i); + if (resume) + pd->paused_apps &= ~(1 << i); + spin_unlock_bh(&g_polling_lock); + } + } + return rc; +} + +/* + * Context: softirq or process + */ +void oz_services_stop(struct oz_pd *pd, u16 apps, int pause) +{ + int i; + + oz_pd_dbg(pd, ON, "%s: (0x%x) pause(%d)\n", __func__, apps, pause); + for (i = 0; i < OZ_NB_APPS; i++) { + if (g_app_if[i].stop && (apps & (1 << i))) { + spin_lock_bh(&g_polling_lock); + if (pause) { + pd->paused_apps |= (1 << i); + } else { + pd->total_apps &= ~(1 << i); + pd->paused_apps &= ~(1 << i); + } + spin_unlock_bh(&g_polling_lock); + g_app_if[i].stop(pd, pause); + } + } +} + +/* + * Context: softirq + */ +void oz_pd_heartbeat(struct oz_pd *pd, u16 apps) +{ + int i, more = 0; + + for (i = 0; i < OZ_NB_APPS; i++) { + if (g_app_if[i].heartbeat && (apps & (1 << i))) { + if (g_app_if[i].heartbeat(pd)) + more = 1; + } + } + if ((!more) && (hrtimer_active(&pd->heartbeat))) + hrtimer_cancel(&pd->heartbeat); + if (pd->mode & OZ_F_ISOC_ANYTIME) { + int count = 8; + + while (count-- && (oz_send_isoc_frame(pd) >= 0)) + ; + } +} + +/* + * Context: softirq or process + */ +void oz_pd_stop(struct oz_pd *pd) +{ + u16 stop_apps; + + oz_dbg(ON, "oz_pd_stop() State = 0x%x\n", pd->state); + oz_pd_indicate_farewells(pd); + spin_lock_bh(&g_polling_lock); + stop_apps = pd->total_apps; + pd->total_apps = 0; + pd->paused_apps = 0; + spin_unlock_bh(&g_polling_lock); + oz_services_stop(pd, stop_apps, 0); + spin_lock_bh(&g_polling_lock); + oz_pd_set_state(pd, OZ_PD_S_STOPPED); + /* Remove from PD list.*/ + list_del(&pd->link); + spin_unlock_bh(&g_polling_lock); + oz_dbg(ON, "pd ref count = %d\n", atomic_read(&pd->ref_count)); + oz_pd_put(pd); +} + +/* + * Context: softirq + */ +int oz_pd_sleep(struct oz_pd *pd) +{ + int do_stop = 0; + u16 stop_apps; + + spin_lock_bh(&g_polling_lock); + if (pd->state & (OZ_PD_S_SLEEP | OZ_PD_S_STOPPED)) { + spin_unlock_bh(&g_polling_lock); + return 0; + } + if (pd->keep_alive && pd->session_id) + oz_pd_set_state(pd, OZ_PD_S_SLEEP); + else + do_stop = 1; + + stop_apps = pd->total_apps; + spin_unlock_bh(&g_polling_lock); + if (do_stop) { + oz_pd_stop(pd); + } else { + oz_services_stop(pd, stop_apps, 1); + oz_timer_add(pd, OZ_TIMER_STOP, pd->keep_alive); + } + return do_stop; +} + +/* + * Context: softirq + */ +static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd) +{ + struct oz_tx_frame *f; + + f = kmem_cache_alloc(oz_tx_frame_cache, GFP_ATOMIC); + if (f) { + f->total_size = sizeof(struct oz_hdr); + INIT_LIST_HEAD(&f->link); + INIT_LIST_HEAD(&f->elt_list); + } + return f; +} + +/* + * Context: softirq or process + */ +static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f) +{ + pd->nb_queued_isoc_frames--; + list_del_init(&f->link); + + kmem_cache_free(oz_tx_frame_cache, f); + + oz_dbg(TX_FRAMES, "Releasing ISOC Frame isoc_nb= %d\n", + pd->nb_queued_isoc_frames); +} + +/* + * Context: softirq or process + */ +static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f) +{ + kmem_cache_free(oz_tx_frame_cache, f); +} + +/* + * Context: softirq-serialized + */ +static void oz_set_more_bit(struct sk_buff *skb) +{ + struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb); + + oz_hdr->control |= OZ_F_MORE_DATA; +} + +/* + * Context: softirq-serialized + */ +static void oz_set_last_pkt_nb(struct oz_pd *pd, struct sk_buff *skb) +{ + struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb); + + oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK; +} + +/* + * Context: softirq + */ +int oz_prepare_frame(struct oz_pd *pd, int empty) +{ + struct oz_tx_frame *f; + + if ((pd->mode & OZ_MODE_MASK) != OZ_MODE_TRIGGERED) + return -1; + if (pd->nb_queued_frames >= OZ_MAX_QUEUED_FRAMES) + return -1; + if (!empty && !oz_are_elts_available(&pd->elt_buff)) + return -1; + f = oz_tx_frame_alloc(pd); + if (f == NULL) + return -1; + f->skb = NULL; + f->hdr.control = + (OZ_PROTOCOL_VERSION<last_tx_pkt_num; + put_unaligned(cpu_to_le32(pd->last_tx_pkt_num), &f->hdr.pkt_num); + if (empty == 0) { + oz_select_elts_for_tx(&pd->elt_buff, 0, &f->total_size, + pd->max_tx_size, &f->elt_list); + } + spin_lock(&pd->tx_frame_lock); + list_add_tail(&f->link, &pd->tx_queue); + pd->nb_queued_frames++; + spin_unlock(&pd->tx_frame_lock); + return 0; +} + +/* + * Context: softirq-serialized + */ +static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f) +{ + struct sk_buff *skb; + struct net_device *dev = pd->net_dev; + struct oz_hdr *oz_hdr; + struct oz_elt *elt; + struct oz_elt_info *ei; + + /* Allocate skb with enough space for the lower layers as well + * as the space we need. + */ + skb = alloc_skb(f->total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC); + if (skb == NULL) + return NULL; + /* Reserve the head room for lower layers. + */ + skb_reserve(skb, LL_RESERVED_SPACE(dev)); + skb_reset_network_header(skb); + skb->dev = dev; + skb->protocol = htons(OZ_ETHERTYPE); + if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr, + dev->dev_addr, skb->len) < 0) + goto fail; + /* Push the tail to the end of the area we are going to copy to. + */ + oz_hdr = (struct oz_hdr *)skb_put(skb, f->total_size); + f->hdr.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK; + memcpy(oz_hdr, &f->hdr, sizeof(struct oz_hdr)); + /* Copy the elements into the frame body. + */ + elt = (struct oz_elt *)(oz_hdr+1); + list_for_each_entry(ei, &f->elt_list, link) { + memcpy(elt, ei->data, ei->length); + elt = oz_next_elt(elt); + } + return skb; +fail: + kfree_skb(skb); + return NULL; +} + +/* + * Context: softirq or process + */ +static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f) +{ + struct oz_elt_info *ei, *n; + + list_for_each_entry_safe(ei, n, &f->elt_list, link) { + list_del_init(&ei->link); + if (ei->callback) + ei->callback(pd, ei->context); + spin_lock_bh(&pd->elt_buff.lock); + oz_elt_info_free(&pd->elt_buff, ei); + spin_unlock_bh(&pd->elt_buff.lock); + } + oz_tx_frame_free(pd, f); +} + +/* + * Context: softirq-serialized + */ +static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data) +{ + struct sk_buff *skb; + struct oz_tx_frame *f; + struct list_head *e; + + spin_lock(&pd->tx_frame_lock); + e = pd->last_sent_frame->next; + if (e == &pd->tx_queue) { + spin_unlock(&pd->tx_frame_lock); + return -1; + } + f = list_entry(e, struct oz_tx_frame, link); + + if (f->skb != NULL) { + skb = f->skb; + oz_tx_isoc_free(pd, f); + spin_unlock(&pd->tx_frame_lock); + if (more_data) + oz_set_more_bit(skb); + oz_set_last_pkt_nb(pd, skb); + if ((int)atomic_read(&g_submitted_isoc) < + OZ_MAX_SUBMITTED_ISOC) { + if (dev_queue_xmit(skb) < 0) { + oz_dbg(TX_FRAMES, "Dropping ISOC Frame\n"); + return -1; + } + atomic_inc(&g_submitted_isoc); + oz_dbg(TX_FRAMES, "Sending ISOC Frame, nb_isoc= %d\n", + pd->nb_queued_isoc_frames); + return 0; + } + kfree_skb(skb); + oz_dbg(TX_FRAMES, "Dropping ISOC Frame>\n"); + return -1; + } + + pd->last_sent_frame = e; + skb = oz_build_frame(pd, f); + spin_unlock(&pd->tx_frame_lock); + if (!skb) + return -1; + if (more_data) + oz_set_more_bit(skb); + oz_dbg(TX_FRAMES, "TX frame PN=0x%x\n", f->hdr.pkt_num); + if (dev_queue_xmit(skb) < 0) + return -1; + + return 0; +} + +/* + * Context: softirq-serialized + */ +void oz_send_queued_frames(struct oz_pd *pd, int backlog) +{ + while (oz_prepare_frame(pd, 0) >= 0) + backlog++; + + switch (pd->mode & (OZ_F_ISOC_NO_ELTS | OZ_F_ISOC_ANYTIME)) { + + case OZ_F_ISOC_NO_ELTS: { + backlog += pd->nb_queued_isoc_frames; + if (backlog <= 0) + goto out; + if (backlog > OZ_MAX_SUBMITTED_ISOC) + backlog = OZ_MAX_SUBMITTED_ISOC; + break; + } + case OZ_NO_ELTS_ANYTIME: { + if ((backlog <= 0) && (pd->isoc_sent == 0)) + goto out; + break; + } + default: { + if (backlog <= 0) + goto out; + break; + } + } + while (backlog--) { + if (oz_send_next_queued_frame(pd, backlog) < 0) + break; + } + return; + +out: oz_prepare_frame(pd, 1); + oz_send_next_queued_frame(pd, 0); +} + +/* + * Context: softirq + */ +static int oz_send_isoc_frame(struct oz_pd *pd) +{ + struct sk_buff *skb; + struct net_device *dev = pd->net_dev; + struct oz_hdr *oz_hdr; + struct oz_elt *elt; + struct oz_elt_info *ei; + LIST_HEAD(list); + int total_size = sizeof(struct oz_hdr); + + oz_select_elts_for_tx(&pd->elt_buff, 1, &total_size, + pd->max_tx_size, &list); + if (list_empty(&list)) + return 0; + skb = alloc_skb(total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC); + if (skb == NULL) { + oz_dbg(ON, "Cannot alloc skb\n"); + oz_elt_info_free_chain(&pd->elt_buff, &list); + return -1; + } + skb_reserve(skb, LL_RESERVED_SPACE(dev)); + skb_reset_network_header(skb); + skb->dev = dev; + skb->protocol = htons(OZ_ETHERTYPE); + if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr, + dev->dev_addr, skb->len) < 0) { + kfree_skb(skb); + return -1; + } + oz_hdr = (struct oz_hdr *)skb_put(skb, total_size); + oz_hdr->control = (OZ_PROTOCOL_VERSION<last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK; + elt = (struct oz_elt *)(oz_hdr+1); + + list_for_each_entry(ei, &list, link) { + memcpy(elt, ei->data, ei->length); + elt = oz_next_elt(elt); + } + dev_queue_xmit(skb); + oz_elt_info_free_chain(&pd->elt_buff, &list); + return 0; +} + +/* + * Context: softirq-serialized + */ +void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn) +{ + struct oz_tx_frame *f, *tmp = NULL; + u8 diff; + u32 pkt_num; + + LIST_HEAD(list); + + spin_lock(&pd->tx_frame_lock); + list_for_each_entry(f, &pd->tx_queue, link) { + pkt_num = le32_to_cpu(get_unaligned(&f->hdr.pkt_num)); + diff = (lpn - (pkt_num & OZ_LAST_PN_MASK)) & OZ_LAST_PN_MASK; + if ((diff > OZ_LAST_PN_HALF_CYCLE) || (pkt_num == 0)) + break; + oz_dbg(TX_FRAMES, "Releasing pkt_num= %u, nb= %d\n", + pkt_num, pd->nb_queued_frames); + tmp = f; + pd->nb_queued_frames--; + } + if (tmp) + list_cut_position(&list, &pd->tx_queue, &tmp->link); + pd->last_sent_frame = &pd->tx_queue; + spin_unlock(&pd->tx_frame_lock); + + list_for_each_entry_safe(f, tmp, &list, link) + oz_retire_frame(pd, f); +} + +/* + * Precondition: stream_lock must be held. + * Context: softirq + */ +static struct oz_isoc_stream *pd_stream_find(struct oz_pd *pd, u8 ep_num) +{ + struct oz_isoc_stream *st; + + list_for_each_entry(st, &pd->stream_list, link) { + if (st->ep_num == ep_num) + return st; + } + return NULL; +} + +/* + * Context: softirq + */ +int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num) +{ + struct oz_isoc_stream *st; + + st = kzalloc(sizeof(struct oz_isoc_stream), GFP_ATOMIC); + if (!st) + return -ENOMEM; + st->ep_num = ep_num; + spin_lock_bh(&pd->stream_lock); + if (!pd_stream_find(pd, ep_num)) { + list_add(&st->link, &pd->stream_list); + st = NULL; + } + spin_unlock_bh(&pd->stream_lock); + kfree(st); + return 0; +} + +/* + * Context: softirq or process + */ +static void oz_isoc_stream_free(struct oz_isoc_stream *st) +{ + kfree_skb(st->skb); + kfree(st); +} + +/* + * Context: softirq + */ +int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num) +{ + struct oz_isoc_stream *st; + + spin_lock_bh(&pd->stream_lock); + st = pd_stream_find(pd, ep_num); + if (st) + list_del(&st->link); + spin_unlock_bh(&pd->stream_lock); + if (st) + oz_isoc_stream_free(st); + return 0; +} + +/* + * Context: any + */ +static void oz_isoc_destructor(struct sk_buff *skb) +{ + atomic_dec(&g_submitted_isoc); +} + +/* + * Context: softirq + */ +int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len) +{ + struct net_device *dev = pd->net_dev; + struct oz_isoc_stream *st; + u8 nb_units = 0; + struct sk_buff *skb = NULL; + struct oz_hdr *oz_hdr = NULL; + int size = 0; + + spin_lock_bh(&pd->stream_lock); + st = pd_stream_find(pd, ep_num); + if (st) { + skb = st->skb; + st->skb = NULL; + nb_units = st->nb_units; + st->nb_units = 0; + oz_hdr = st->oz_hdr; + size = st->size; + } + spin_unlock_bh(&pd->stream_lock); + if (!st) + return 0; + if (!skb) { + /* Allocate enough space for max size frame. */ + skb = alloc_skb(pd->max_tx_size + OZ_ALLOCATED_SPACE(dev), + GFP_ATOMIC); + if (skb == NULL) + return 0; + /* Reserve the head room for lower layers. */ + skb_reserve(skb, LL_RESERVED_SPACE(dev)); + skb_reset_network_header(skb); + skb->dev = dev; + skb->protocol = htons(OZ_ETHERTYPE); + /* For audio packet set priority to AC_VO */ + skb->priority = 0x7; + size = sizeof(struct oz_hdr) + sizeof(struct oz_isoc_large); + oz_hdr = (struct oz_hdr *)skb_put(skb, size); + } + memcpy(skb_put(skb, len), data, len); + size += len; + if (++nb_units < pd->ms_per_isoc) { + spin_lock_bh(&pd->stream_lock); + st->skb = skb; + st->nb_units = nb_units; + st->oz_hdr = oz_hdr; + st->size = size; + spin_unlock_bh(&pd->stream_lock); + } else { + struct oz_hdr oz; + struct oz_isoc_large iso; + + spin_lock_bh(&pd->stream_lock); + iso.frame_number = st->frame_num; + st->frame_num += nb_units; + spin_unlock_bh(&pd->stream_lock); + oz.control = + (OZ_PROTOCOL_VERSION<trigger_pkt_num & OZ_LAST_PN_MASK; + oz.pkt_num = 0; + iso.endpoint = ep_num; + iso.format = OZ_DATA_F_ISOC_LARGE; + iso.ms_data = nb_units; + memcpy(oz_hdr, &oz, sizeof(oz)); + memcpy(oz_hdr+1, &iso, sizeof(iso)); + if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr, + dev->dev_addr, skb->len) < 0) + goto out; + + skb->destructor = oz_isoc_destructor; + /*Queue for Xmit if mode is not ANYTIME*/ + if (!(pd->mode & OZ_F_ISOC_ANYTIME)) { + struct oz_tx_frame *isoc_unit = NULL; + int nb = pd->nb_queued_isoc_frames; + + if (nb >= pd->isoc_latency) { + struct oz_tx_frame *f; + + oz_dbg(TX_FRAMES, "Dropping ISOC Unit nb= %d\n", + nb); + spin_lock(&pd->tx_frame_lock); + list_for_each_entry(f, &pd->tx_queue, link) { + if (f->skb != NULL) { + oz_tx_isoc_free(pd, f); + break; + } + } + spin_unlock(&pd->tx_frame_lock); + } + isoc_unit = oz_tx_frame_alloc(pd); + if (isoc_unit == NULL) + goto out; + isoc_unit->hdr = oz; + isoc_unit->skb = skb; + spin_lock_bh(&pd->tx_frame_lock); + list_add_tail(&isoc_unit->link, &pd->tx_queue); + pd->nb_queued_isoc_frames++; + spin_unlock_bh(&pd->tx_frame_lock); + oz_dbg(TX_FRAMES, + "Added ISOC Frame to Tx Queue isoc_nb= %d, nb= %d\n", + pd->nb_queued_isoc_frames, pd->nb_queued_frames); + return 0; + } + + /*In ANYTIME mode Xmit unit immediately*/ + if (atomic_read(&g_submitted_isoc) < OZ_MAX_SUBMITTED_ISOC) { + atomic_inc(&g_submitted_isoc); + if (dev_queue_xmit(skb) < 0) + return -1; + return 0; + } + +out: kfree_skb(skb); + return -1; + + } + return 0; +} + +/* + * Context: process + */ +void oz_apps_init(void) +{ + int i; + + for (i = 0; i < OZ_NB_APPS; i++) { + if (g_app_if[i].init) + g_app_if[i].init(); + } +} + +/* + * Context: process + */ +void oz_apps_term(void) +{ + int i; + + /* Terminate all the apps. */ + for (i = 0; i < OZ_NB_APPS; i++) { + if (g_app_if[i].term) + g_app_if[i].term(); + } +} + +/* + * Context: softirq-serialized + */ +void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt) +{ + if (app_id < OZ_NB_APPS && g_app_if[app_id].rx) + g_app_if[app_id].rx(pd, elt); +} + +/* + * Context: softirq or process + */ +void oz_pd_indicate_farewells(struct oz_pd *pd) +{ + struct oz_farewell *f; + const struct oz_app_if *ai = &g_app_if[OZ_APPID_USB]; + + while (1) { + spin_lock_bh(&g_polling_lock); + if (list_empty(&pd->farewell_list)) { + spin_unlock_bh(&g_polling_lock); + break; + } + f = list_first_entry(&pd->farewell_list, + struct oz_farewell, link); + list_del(&f->link); + spin_unlock_bh(&g_polling_lock); + if (ai->farewell) + ai->farewell(pd, f->ep_num, f->report, f->len); + kfree(f); + } +} diff --git a/kernel/drivers/staging/ozwpan/ozpd.h b/kernel/drivers/staging/ozwpan/ozpd.h new file mode 100644 index 000000000..212fab0d8 --- /dev/null +++ b/kernel/drivers/staging/ozwpan/ozpd.h @@ -0,0 +1,134 @@ +/* ----------------------------------------------------------------------------- + * Copyright (c) 2011 Ozmo Inc + * Released under the GNU General Public License Version 2 (GPLv2). + * ----------------------------------------------------------------------------- + */ +#ifndef _OZPD_H_ +#define _OZPD_H_ + +#include +#include "ozeltbuf.h" + +/* PD state + */ +#define OZ_PD_S_IDLE 0x1 +#define OZ_PD_S_CONNECTED 0x2 +#define OZ_PD_S_SLEEP 0x4 +#define OZ_PD_S_STOPPED 0x8 + +/* Timer event types. + */ +#define OZ_TIMER_TOUT 1 +#define OZ_TIMER_HEARTBEAT 2 +#define OZ_TIMER_STOP 3 + +/* + *External spinlock variable + */ +extern spinlock_t g_polling_lock; + +/* Data structure that hold information on a frame for transmisson. This is + * built when the frame is first transmitted and is used to rebuild the frame + * if a re-transmission is required. + */ +struct oz_tx_frame { + struct list_head link; + struct list_head elt_list; + struct oz_hdr hdr; + struct sk_buff *skb; + int total_size; +}; + +struct oz_isoc_stream { + struct list_head link; + u8 ep_num; + u8 frame_num; + u8 nb_units; + int size; + struct sk_buff *skb; + struct oz_hdr *oz_hdr; +}; + +struct oz_farewell { + struct list_head link; + u8 ep_num; + u8 index; + u8 len; + u8 report[0]; +}; + +/* Data structure that holds information on a specific peripheral device (PD). + */ +struct oz_pd { + struct list_head link; + atomic_t ref_count; + u8 mac_addr[ETH_ALEN]; + unsigned state; + unsigned state_flags; + unsigned send_flags; + u16 total_apps; + u16 paused_apps; + u8 session_id; + u8 param_rsp_status; + u8 pd_info; + u8 isoc_sent; + u32 last_rx_pkt_num; + u32 last_tx_pkt_num; + struct timespec last_rx_timestamp; + u32 trigger_pkt_num; + unsigned long pulse_time; + unsigned long pulse_period; + unsigned long presleep; + unsigned long keep_alive; + struct oz_elt_buf elt_buff; + void *app_ctx[OZ_NB_APPS]; + spinlock_t app_lock[OZ_NB_APPS]; + int max_tx_size; + u8 mode; + u8 ms_per_isoc; + unsigned isoc_latency; + unsigned max_stream_buffering; + int nb_queued_frames; + int nb_queued_isoc_frames; + spinlock_t tx_frame_lock; + struct list_head *last_sent_frame; + struct list_head tx_queue; + struct list_head farewell_list; + spinlock_t stream_lock; + struct list_head stream_list; + struct net_device *net_dev; + struct hrtimer heartbeat; + struct hrtimer timeout; + u8 timeout_type; + struct tasklet_struct heartbeat_tasklet; + struct tasklet_struct timeout_tasklet; + struct work_struct workitem; +}; + +#define OZ_MAX_QUEUED_FRAMES 4 + +struct oz_pd *oz_pd_alloc(const u8 *mac_addr); +void oz_pd_destroy(struct oz_pd *pd); +void oz_pd_get(struct oz_pd *pd); +void oz_pd_put(struct oz_pd *pd); +void oz_pd_set_state(struct oz_pd *pd, unsigned state); +void oz_pd_indicate_farewells(struct oz_pd *pd); +int oz_pd_sleep(struct oz_pd *pd); +void oz_pd_stop(struct oz_pd *pd); +void oz_pd_heartbeat(struct oz_pd *pd, u16 apps); +int oz_services_start(struct oz_pd *pd, u16 apps, int resume); +void oz_services_stop(struct oz_pd *pd, u16 apps, int pause); +int oz_prepare_frame(struct oz_pd *pd, int empty); +void oz_send_queued_frames(struct oz_pd *pd, int backlog); +void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn); +int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num); +int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num); +int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len); +void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt); +void oz_apps_init(void); +void oz_apps_term(void); + +extern struct kmem_cache *oz_elt_info_cache; +extern struct kmem_cache *oz_tx_frame_cache; + +#endif /* Sentry */ diff --git a/kernel/drivers/staging/ozwpan/ozproto.c b/kernel/drivers/staging/ozwpan/ozproto.c new file mode 100644 index 000000000..1ba24a2ae --- /dev/null +++ b/kernel/drivers/staging/ozwpan/ozproto.c @@ -0,0 +1,813 @@ +/* ----------------------------------------------------------------------------- + * Copyright (c) 2011 Ozmo Inc + * Released under the GNU General Public License Version 2 (GPLv2). + * ----------------------------------------------------------------------------- + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "ozdbg.h" +#include "ozprotocol.h" +#include "ozeltbuf.h" +#include "ozpd.h" +#include "ozproto.h" +#include "ozusbsvc.h" + +#include "ozappif.h" +#include +#include +#include + +#define OZ_CF_CONN_SUCCESS 1 +#define OZ_CF_CONN_FAILURE 2 + +#define OZ_DO_STOP 1 +#define OZ_DO_SLEEP 2 + +struct oz_binding { + struct packet_type ptype; + char name[OZ_MAX_BINDING_LEN]; + struct list_head link; +}; + +/* + * External variable + */ + +DEFINE_SPINLOCK(g_polling_lock); +/* + * Static external variables. + */ +static LIST_HEAD(g_pd_list); +static LIST_HEAD(g_binding); +static DEFINE_SPINLOCK(g_binding_lock); +static struct sk_buff_head g_rx_queue; +static u8 g_session_id; +static u16 g_apps = 0x1; +static int g_processing_rx; + +struct kmem_cache *oz_elt_info_cache; +struct kmem_cache *oz_tx_frame_cache; + +/* + * Context: softirq-serialized + */ +static u8 oz_get_new_session_id(u8 exclude) +{ + if (++g_session_id == 0) + g_session_id = 1; + if (g_session_id == exclude) { + if (++g_session_id == 0) + g_session_id = 1; + } + return g_session_id; +} + +/* + * Context: softirq-serialized + */ +static void oz_send_conn_rsp(struct oz_pd *pd, u8 status) +{ + struct sk_buff *skb; + struct net_device *dev = pd->net_dev; + struct oz_hdr *oz_hdr; + struct oz_elt *elt; + struct oz_elt_connect_rsp *body; + + int sz = sizeof(struct oz_hdr) + sizeof(struct oz_elt) + + sizeof(struct oz_elt_connect_rsp); + skb = alloc_skb(sz + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC); + if (skb == NULL) + return; + skb_reserve(skb, LL_RESERVED_SPACE(dev)); + skb_reset_network_header(skb); + oz_hdr = (struct oz_hdr *)skb_put(skb, sz); + elt = (struct oz_elt *)(oz_hdr+1); + body = (struct oz_elt_connect_rsp *)(elt+1); + skb->dev = dev; + skb->protocol = htons(OZ_ETHERTYPE); + /* Fill in device header */ + if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr, + dev->dev_addr, skb->len) < 0) { + kfree_skb(skb); + return; + } + oz_hdr->control = OZ_PROTOCOL_VERSION<last_pkt_num = 0; + put_unaligned(0, &oz_hdr->pkt_num); + elt->type = OZ_ELT_CONNECT_RSP; + elt->length = sizeof(struct oz_elt_connect_rsp); + memset(body, 0, sizeof(struct oz_elt_connect_rsp)); + body->status = status; + if (status == 0) { + body->mode = pd->mode; + body->session_id = pd->session_id; + put_unaligned(cpu_to_le16(pd->total_apps), &body->apps); + } + oz_dbg(ON, "TX: OZ_ELT_CONNECT_RSP %d", status); + dev_queue_xmit(skb); +} + +/* + * Context: softirq-serialized + */ +static void pd_set_keepalive(struct oz_pd *pd, u8 kalive) +{ + unsigned long keep_alive = kalive & OZ_KALIVE_VALUE_MASK; + + switch (kalive & OZ_KALIVE_TYPE_MASK) { + case OZ_KALIVE_SPECIAL: + pd->keep_alive = keep_alive * 1000*60*60*24*20; + break; + case OZ_KALIVE_SECS: + pd->keep_alive = keep_alive*1000; + break; + case OZ_KALIVE_MINS: + pd->keep_alive = keep_alive*1000*60; + break; + case OZ_KALIVE_HOURS: + pd->keep_alive = keep_alive*1000*60*60; + break; + default: + pd->keep_alive = 0; + } + oz_dbg(ON, "Keepalive = %lu mSec\n", pd->keep_alive); +} + +/* + * Context: softirq-serialized + */ +static void pd_set_presleep(struct oz_pd *pd, u8 presleep, u8 start_timer) +{ + if (presleep) + pd->presleep = presleep*100; + else + pd->presleep = OZ_PRESLEEP_TOUT; + if (start_timer) { + spin_unlock(&g_polling_lock); + oz_timer_add(pd, OZ_TIMER_TOUT, pd->presleep); + spin_lock(&g_polling_lock); + } + oz_dbg(ON, "Presleep time = %lu mSec\n", pd->presleep); +} + +/* + * Context: softirq-serialized + */ +static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt, + const u8 *pd_addr, struct net_device *net_dev) +{ + struct oz_pd *pd; + struct oz_elt_connect_req *body = + (struct oz_elt_connect_req *)(elt+1); + u8 rsp_status = OZ_STATUS_SUCCESS; + u8 stop_needed = 0; + u16 new_apps = g_apps; + struct net_device *old_net_dev = NULL; + struct oz_pd *free_pd = NULL; + + if (cur_pd) { + pd = cur_pd; + spin_lock_bh(&g_polling_lock); + } else { + struct oz_pd *pd2 = NULL; + struct list_head *e; + + pd = oz_pd_alloc(pd_addr); + if (pd == NULL) + return NULL; + getnstimeofday(&pd->last_rx_timestamp); + spin_lock_bh(&g_polling_lock); + list_for_each(e, &g_pd_list) { + pd2 = list_entry(e, struct oz_pd, link); + if (ether_addr_equal(pd2->mac_addr, pd_addr)) { + free_pd = pd; + pd = pd2; + break; + } + } + if (pd != pd2) + list_add_tail(&pd->link, &g_pd_list); + } + if (pd == NULL) { + spin_unlock_bh(&g_polling_lock); + return NULL; + } + if (pd->net_dev != net_dev) { + old_net_dev = pd->net_dev; + dev_hold(net_dev); + pd->net_dev = net_dev; + } + oz_dbg(ON, "Host vendor: %d\n", body->host_vendor); + pd->max_tx_size = OZ_MAX_TX_SIZE; + pd->mode = body->mode; + pd->pd_info = body->pd_info; + if (pd->mode & OZ_F_ISOC_NO_ELTS) { + pd->ms_per_isoc = body->ms_per_isoc; + if (!pd->ms_per_isoc) + pd->ms_per_isoc = 4; + + switch (body->ms_isoc_latency & OZ_LATENCY_MASK) { + case OZ_ONE_MS_LATENCY: + pd->isoc_latency = (body->ms_isoc_latency & + ~OZ_LATENCY_MASK) / pd->ms_per_isoc; + break; + case OZ_TEN_MS_LATENCY: + pd->isoc_latency = ((body->ms_isoc_latency & + ~OZ_LATENCY_MASK) * 10) / pd->ms_per_isoc; + break; + default: + pd->isoc_latency = OZ_MAX_TX_QUEUE_ISOC; + } + } + if (body->max_len_div16) + pd->max_tx_size = ((u16)body->max_len_div16)<<4; + oz_dbg(ON, "Max frame:%u Ms per isoc:%u\n", + pd->max_tx_size, pd->ms_per_isoc); + pd->max_stream_buffering = 3*1024; + pd->pulse_period = OZ_QUANTUM; + pd_set_presleep(pd, body->presleep, 0); + pd_set_keepalive(pd, body->keep_alive); + + new_apps &= le16_to_cpu(get_unaligned(&body->apps)); + if ((new_apps & 0x1) && (body->session_id)) { + if (pd->session_id) { + if (pd->session_id != body->session_id) { + rsp_status = OZ_STATUS_SESSION_MISMATCH; + goto done; + } + } else { + new_apps &= ~0x1; /* Resume not permitted */ + pd->session_id = + oz_get_new_session_id(body->session_id); + } + } else { + if (pd->session_id && !body->session_id) { + rsp_status = OZ_STATUS_SESSION_TEARDOWN; + stop_needed = 1; + } else { + new_apps &= ~0x1; /* Resume not permitted */ + pd->session_id = + oz_get_new_session_id(body->session_id); + } + } +done: + if (rsp_status == OZ_STATUS_SUCCESS) { + u16 start_apps = new_apps & ~pd->total_apps & ~0x1; + u16 stop_apps = pd->total_apps & ~new_apps & ~0x1; + u16 resume_apps = new_apps & pd->paused_apps & ~0x1; + + spin_unlock_bh(&g_polling_lock); + oz_pd_set_state(pd, OZ_PD_S_CONNECTED); + oz_dbg(ON, "new_apps=0x%x total_apps=0x%x paused_apps=0x%x\n", + new_apps, pd->total_apps, pd->paused_apps); + if (start_apps) { + if (oz_services_start(pd, start_apps, 0)) + rsp_status = OZ_STATUS_TOO_MANY_PDS; + } + if (resume_apps) + if (oz_services_start(pd, resume_apps, 1)) + rsp_status = OZ_STATUS_TOO_MANY_PDS; + if (stop_apps) + oz_services_stop(pd, stop_apps, 0); + oz_pd_request_heartbeat(pd); + } else { + spin_unlock_bh(&g_polling_lock); + } + oz_send_conn_rsp(pd, rsp_status); + if (rsp_status != OZ_STATUS_SUCCESS) { + if (stop_needed) + oz_pd_stop(pd); + oz_pd_put(pd); + pd = NULL; + } + if (old_net_dev) + dev_put(old_net_dev); + if (free_pd) + oz_pd_destroy(free_pd); + return pd; +} + +/* + * Context: softirq-serialized + */ +static void oz_add_farewell(struct oz_pd *pd, u8 ep_num, u8 index, + const u8 *report, u8 len) +{ + struct oz_farewell *f; + struct oz_farewell *f2; + int found = 0; + + f = kmalloc(sizeof(struct oz_farewell) + len, GFP_ATOMIC); + if (!f) + return; + f->ep_num = ep_num; + f->index = index; + f->len = len; + memcpy(f->report, report, len); + oz_dbg(ON, "RX: Adding farewell report\n"); + spin_lock(&g_polling_lock); + list_for_each_entry(f2, &pd->farewell_list, link) { + if ((f2->ep_num == ep_num) && (f2->index == index)) { + found = 1; + list_del(&f2->link); + break; + } + } + list_add_tail(&f->link, &pd->farewell_list); + spin_unlock(&g_polling_lock); + if (found) + kfree(f2); +} + +/* + * Context: softirq-serialized + */ +static void oz_rx_frame(struct sk_buff *skb) +{ + u8 *mac_hdr; + u8 *src_addr; + struct oz_elt *elt; + int length; + struct oz_pd *pd = NULL; + struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb); + struct timespec current_time; + int dup = 0; + u32 pkt_num; + + oz_dbg(RX_FRAMES, "RX frame PN=0x%x LPN=0x%x control=0x%x\n", + oz_hdr->pkt_num, oz_hdr->last_pkt_num, oz_hdr->control); + mac_hdr = skb_mac_header(skb); + src_addr = &mac_hdr[ETH_ALEN]; + length = skb->len; + + /* Check the version field */ + if (oz_get_prot_ver(oz_hdr->control) != OZ_PROTOCOL_VERSION) { + oz_dbg(ON, "Incorrect protocol version: %d\n", + oz_get_prot_ver(oz_hdr->control)); + goto done; + } + + pkt_num = le32_to_cpu(get_unaligned(&oz_hdr->pkt_num)); + + pd = oz_pd_find(src_addr); + if (pd) { + if (!(pd->state & OZ_PD_S_CONNECTED)) + oz_pd_set_state(pd, OZ_PD_S_CONNECTED); + getnstimeofday(¤t_time); + if ((current_time.tv_sec != pd->last_rx_timestamp.tv_sec) || + (pd->presleep < MSEC_PER_SEC)) { + oz_timer_add(pd, OZ_TIMER_TOUT, pd->presleep); + pd->last_rx_timestamp = current_time; + } + if (pkt_num != pd->last_rx_pkt_num) { + pd->last_rx_pkt_num = pkt_num; + } else { + dup = 1; + oz_dbg(ON, "Duplicate frame\n"); + } + } + + if (pd && !dup && ((pd->mode & OZ_MODE_MASK) == OZ_MODE_TRIGGERED)) { + oz_dbg(RX_FRAMES, "Received TRIGGER Frame\n"); + pd->last_sent_frame = &pd->tx_queue; + if (oz_hdr->control & OZ_F_ACK) { + /* Retire completed frames */ + oz_retire_tx_frames(pd, oz_hdr->last_pkt_num); + } + if ((oz_hdr->control & OZ_F_ACK_REQUESTED) && + (pd->state == OZ_PD_S_CONNECTED)) { + int backlog = pd->nb_queued_frames; + + pd->trigger_pkt_num = pkt_num; + /* Send queued frames */ + oz_send_queued_frames(pd, backlog); + } + } + + length -= sizeof(struct oz_hdr); + elt = (struct oz_elt *)((u8 *)oz_hdr + sizeof(struct oz_hdr)); + + while (length >= sizeof(struct oz_elt)) { + length -= sizeof(struct oz_elt) + elt->length; + if (length < 0) + break; + switch (elt->type) { + case OZ_ELT_CONNECT_REQ: + oz_dbg(ON, "RX: OZ_ELT_CONNECT_REQ\n"); + pd = oz_connect_req(pd, elt, src_addr, skb->dev); + break; + case OZ_ELT_DISCONNECT: + oz_dbg(ON, "RX: OZ_ELT_DISCONNECT\n"); + if (pd) + oz_pd_sleep(pd); + break; + case OZ_ELT_UPDATE_PARAM_REQ: { + struct oz_elt_update_param *body = + (struct oz_elt_update_param *)(elt + 1); + oz_dbg(ON, "RX: OZ_ELT_UPDATE_PARAM_REQ\n"); + if (pd && (pd->state & OZ_PD_S_CONNECTED)) { + spin_lock(&g_polling_lock); + pd_set_keepalive(pd, body->keepalive); + pd_set_presleep(pd, body->presleep, 1); + spin_unlock(&g_polling_lock); + } + } + break; + case OZ_ELT_FAREWELL_REQ: { + struct oz_elt_farewell *body = + (struct oz_elt_farewell *)(elt + 1); + oz_dbg(ON, "RX: OZ_ELT_FAREWELL_REQ\n"); + oz_add_farewell(pd, body->ep_num, + body->index, body->report, + elt->length + 1 - sizeof(*body)); + } + break; + case OZ_ELT_APP_DATA: + if (pd && (pd->state & OZ_PD_S_CONNECTED)) { + struct oz_app_hdr *app_hdr = + (struct oz_app_hdr *)(elt+1); + if (dup) + break; + oz_handle_app_elt(pd, app_hdr->app_id, elt); + } + break; + default: + oz_dbg(ON, "RX: Unknown elt %02x\n", elt->type); + } + elt = oz_next_elt(elt); + } +done: + if (pd) + oz_pd_put(pd); + consume_skb(skb); +} + +/* + * Context: process + */ +void oz_protocol_term(void) +{ + struct oz_binding *b, *t; + + /* Walk the list of bindings and remove each one. + */ + spin_lock_bh(&g_binding_lock); + list_for_each_entry_safe(b, t, &g_binding, link) { + list_del(&b->link); + spin_unlock_bh(&g_binding_lock); + dev_remove_pack(&b->ptype); + if (b->ptype.dev) + dev_put(b->ptype.dev); + kfree(b); + spin_lock_bh(&g_binding_lock); + } + spin_unlock_bh(&g_binding_lock); + /* Walk the list of PDs and stop each one. This causes the PD to be + * removed from the list so we can just pull each one from the head + * of the list. + */ + spin_lock_bh(&g_polling_lock); + while (!list_empty(&g_pd_list)) { + struct oz_pd *pd = + list_first_entry(&g_pd_list, struct oz_pd, link); + oz_pd_get(pd); + spin_unlock_bh(&g_polling_lock); + oz_pd_stop(pd); + oz_pd_put(pd); + spin_lock_bh(&g_polling_lock); + } + spin_unlock_bh(&g_polling_lock); + oz_dbg(ON, "Protocol stopped\n"); + + kmem_cache_destroy(oz_tx_frame_cache); + kmem_cache_destroy(oz_elt_info_cache); +} + +/* + * Context: softirq + */ +void oz_pd_heartbeat_handler(unsigned long data) +{ + struct oz_pd *pd = (struct oz_pd *)data; + u16 apps = 0; + + spin_lock_bh(&g_polling_lock); + if (pd->state & OZ_PD_S_CONNECTED) + apps = pd->total_apps; + spin_unlock_bh(&g_polling_lock); + if (apps) + oz_pd_heartbeat(pd, apps); + oz_pd_put(pd); +} + +/* + * Context: softirq + */ +void oz_pd_timeout_handler(unsigned long data) +{ + int type; + struct oz_pd *pd = (struct oz_pd *)data; + + spin_lock_bh(&g_polling_lock); + type = pd->timeout_type; + spin_unlock_bh(&g_polling_lock); + switch (type) { + case OZ_TIMER_TOUT: + oz_pd_sleep(pd); + break; + case OZ_TIMER_STOP: + oz_pd_stop(pd); + break; + } + oz_pd_put(pd); +} + +/* + * Context: Interrupt + */ +enum hrtimer_restart oz_pd_heartbeat_event(struct hrtimer *timer) +{ + struct oz_pd *pd; + + pd = container_of(timer, struct oz_pd, heartbeat); + hrtimer_forward_now(timer, ktime_set(pd->pulse_period / + MSEC_PER_SEC, (pd->pulse_period % MSEC_PER_SEC) * NSEC_PER_MSEC)); + oz_pd_get(pd); + tasklet_schedule(&pd->heartbeat_tasklet); + return HRTIMER_RESTART; +} + +/* + * Context: Interrupt + */ +enum hrtimer_restart oz_pd_timeout_event(struct hrtimer *timer) +{ + struct oz_pd *pd; + + pd = container_of(timer, struct oz_pd, timeout); + oz_pd_get(pd); + tasklet_schedule(&pd->timeout_tasklet); + return HRTIMER_NORESTART; +} + +/* + * Context: softirq or process + */ +void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time) +{ + spin_lock_bh(&g_polling_lock); + switch (type) { + case OZ_TIMER_TOUT: + case OZ_TIMER_STOP: + if (hrtimer_active(&pd->timeout)) { + hrtimer_set_expires(&pd->timeout, ktime_set(due_time / + MSEC_PER_SEC, (due_time % MSEC_PER_SEC) * + NSEC_PER_MSEC)); + hrtimer_start_expires(&pd->timeout, HRTIMER_MODE_REL); + } else { + hrtimer_start(&pd->timeout, ktime_set(due_time / + MSEC_PER_SEC, (due_time % MSEC_PER_SEC) * + NSEC_PER_MSEC), HRTIMER_MODE_REL); + } + pd->timeout_type = type; + break; + case OZ_TIMER_HEARTBEAT: + if (!hrtimer_active(&pd->heartbeat)) + hrtimer_start(&pd->heartbeat, ktime_set(due_time / + MSEC_PER_SEC, (due_time % MSEC_PER_SEC) * + NSEC_PER_MSEC), HRTIMER_MODE_REL); + break; + } + spin_unlock_bh(&g_polling_lock); +} + +/* + * Context: softirq or process + */ +void oz_pd_request_heartbeat(struct oz_pd *pd) +{ + oz_timer_add(pd, OZ_TIMER_HEARTBEAT, pd->pulse_period > 0 ? + pd->pulse_period : OZ_QUANTUM); +} + +/* + * Context: softirq or process + */ +struct oz_pd *oz_pd_find(const u8 *mac_addr) +{ + struct oz_pd *pd; + + spin_lock_bh(&g_polling_lock); + list_for_each_entry(pd, &g_pd_list, link) { + if (ether_addr_equal(pd->mac_addr, mac_addr)) { + oz_pd_get(pd); + spin_unlock_bh(&g_polling_lock); + return pd; + } + } + spin_unlock_bh(&g_polling_lock); + return NULL; +} + +/* + * Context: process + */ +void oz_app_enable(int app_id, int enable) +{ + if (app_id < OZ_NB_APPS) { + spin_lock_bh(&g_polling_lock); + if (enable) + g_apps |= (1<ptype.type = htons(OZ_ETHERTYPE); + binding->ptype.func = oz_pkt_recv; + if (net_dev && *net_dev) { + memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN); + oz_dbg(ON, "Adding binding: %s\n", net_dev); + binding->ptype.dev = dev_get_by_name(&init_net, net_dev); + if (binding->ptype.dev == NULL) { + oz_dbg(ON, "Netdev %s not found\n", net_dev); + kfree(binding); + return; + } + } + dev_add_pack(&binding->ptype); + spin_lock_bh(&g_binding_lock); + list_add_tail(&binding->link, &g_binding); + spin_unlock_bh(&g_binding_lock); +} + +/* + * Context: process + */ +static void pd_stop_all_for_device(struct net_device *net_dev) +{ + LIST_HEAD(h); + struct oz_pd *pd; + struct oz_pd *n; + + spin_lock_bh(&g_polling_lock); + list_for_each_entry_safe(pd, n, &g_pd_list, link) { + if (pd->net_dev == net_dev) { + list_move(&pd->link, &h); + oz_pd_get(pd); + } + } + spin_unlock_bh(&g_polling_lock); + while (!list_empty(&h)) { + pd = list_first_entry(&h, struct oz_pd, link); + oz_pd_stop(pd); + oz_pd_put(pd); + } +} + +/* + * Context: process + */ +void oz_binding_remove(const char *net_dev) +{ + struct oz_binding *binding; + int found = 0; + + oz_dbg(ON, "Removing binding: %s\n", net_dev); + spin_lock_bh(&g_binding_lock); + list_for_each_entry(binding, &g_binding, link) { + if (strncmp(binding->name, net_dev, OZ_MAX_BINDING_LEN) == 0) { + oz_dbg(ON, "Binding '%s' found\n", net_dev); + found = 1; + break; + } + } + spin_unlock_bh(&g_binding_lock); + if (found) { + dev_remove_pack(&binding->ptype); + if (binding->ptype.dev) { + dev_put(binding->ptype.dev); + pd_stop_all_for_device(binding->ptype.dev); + } + list_del(&binding->link); + kfree(binding); + } +} + +/* + * Context: process + */ +static char *oz_get_next_device_name(char *s, char *dname, int max_size) +{ + while (*s == ',') + s++; + while (*s && (*s != ',') && max_size > 1) { + *dname++ = *s++; + max_size--; + } + *dname = 0; + return s; +} + +/* + * Context: process + */ +int oz_protocol_init(char *devs) +{ + oz_elt_info_cache = KMEM_CACHE(oz_elt_info, 0); + if (!oz_elt_info_cache) + return -ENOMEM; + + oz_tx_frame_cache = KMEM_CACHE(oz_tx_frame, 0); + if (!oz_tx_frame_cache) { + kmem_cache_destroy(oz_elt_info_cache); + return -ENOMEM; + } + + skb_queue_head_init(&g_rx_queue); + if (devs[0] == '*') { + oz_binding_add(NULL); + } else { + char d[32]; + + while (*devs) { + devs = oz_get_next_device_name(devs, d, sizeof(d)); + if (d[0]) + oz_binding_add(d); + } + } + return 0; +} + +/* + * Context: process + */ +int oz_get_pd_list(struct oz_mac_addr *addr, int max_count) +{ + struct oz_pd *pd; + int count = 0; + + spin_lock_bh(&g_polling_lock); + list_for_each_entry(pd, &g_pd_list, link) { + if (count >= max_count) + break; + ether_addr_copy((u8 *)&addr[count++], pd->mac_addr); + } + spin_unlock_bh(&g_polling_lock); + return count; +} + diff --git a/kernel/drivers/staging/ozwpan/ozproto.h b/kernel/drivers/staging/ozwpan/ozproto.h new file mode 100644 index 000000000..30c2db91c --- /dev/null +++ b/kernel/drivers/staging/ozwpan/ozproto.h @@ -0,0 +1,62 @@ +/* ----------------------------------------------------------------------------- + * Copyright (c) 2011 Ozmo Inc + * Released under the GNU General Public License Version 2 (GPLv2). + * ----------------------------------------------------------------------------- + */ +#ifndef _OZPROTO_H +#define _OZPROTO_H + +#include +#include "ozdbg.h" +#include "ozappif.h" + +#define OZ_ALLOCATED_SPACE(__x) (LL_RESERVED_SPACE(__x)+(__x)->needed_tailroom) + +/* Quantum in MS */ +#define OZ_QUANTUM 8 +/* Default timeouts. + */ +#define OZ_PRESLEEP_TOUT 11 + +/* Maximun sizes of tx frames. */ +#define OZ_MAX_TX_SIZE 760 + +/* Maximum number of uncompleted isoc frames that can be pending in network. */ +#define OZ_MAX_SUBMITTED_ISOC 16 + +/* Maximum number of uncompleted isoc frames that can be pending in Tx Queue. */ +#define OZ_MAX_TX_QUEUE_ISOC 32 + +/* Application handler functions. + */ +struct oz_app_if { + int (*init)(void); + void (*term)(void); + int (*start)(struct oz_pd *pd, int resume); + void (*stop)(struct oz_pd *pd, int pause); + void (*rx)(struct oz_pd *pd, struct oz_elt *elt); + int (*heartbeat)(struct oz_pd *pd); + void (*farewell)(struct oz_pd *pd, u8 ep_num, u8 *data, u8 len); +}; + +int oz_protocol_init(char *devs); +void oz_protocol_term(void); +int oz_get_pd_list(struct oz_mac_addr *addr, int max_count); +void oz_app_enable(int app_id, int enable); +struct oz_pd *oz_pd_find(const u8 *mac_addr); +void oz_binding_add(const char *net_dev); +void oz_binding_remove(const char *net_dev); +void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time); +void oz_timer_delete(struct oz_pd *pd, int type); +void oz_pd_request_heartbeat(struct oz_pd *pd); +void oz_pd_heartbeat_handler(unsigned long data); +void oz_pd_timeout_handler(unsigned long data); +enum hrtimer_restart oz_pd_heartbeat_event(struct hrtimer *timer); +enum hrtimer_restart oz_pd_timeout_event(struct hrtimer *timer); +int oz_get_pd_status_list(char *pd_list, int max_count); +int oz_get_binding_list(char *buf, int max_if); + +extern struct kmem_cache *oz_elt_info_cache; +extern struct kmem_cache *oz_tx_frame_cache; + +#endif /* _OZPROTO_H */ diff --git a/kernel/drivers/staging/ozwpan/ozprotocol.h b/kernel/drivers/staging/ozwpan/ozprotocol.h new file mode 100644 index 000000000..464207259 --- /dev/null +++ b/kernel/drivers/staging/ozwpan/ozprotocol.h @@ -0,0 +1,375 @@ +/* ----------------------------------------------------------------------------- + * Copyright (c) 2011 Ozmo Inc + * Released under the GNU General Public License Version 2 (GPLv2). + * ----------------------------------------------------------------------------- + */ +#ifndef _OZPROTOCOL_H +#define _OZPROTOCOL_H + +#define PACKED __packed + +#define OZ_ETHERTYPE 0x892e + +/* Status codes + */ +#define OZ_STATUS_SUCCESS 0 +#define OZ_STATUS_INVALID_PARAM 1 +#define OZ_STATUS_TOO_MANY_PDS 2 +#define OZ_STATUS_NOT_ALLOWED 4 +#define OZ_STATUS_SESSION_MISMATCH 5 +#define OZ_STATUS_SESSION_TEARDOWN 6 + +/* This is the generic element header. + Every element starts with this. + */ +struct oz_elt { + u8 type; + u8 length; +} PACKED; + +#define oz_next_elt(__elt) \ + (struct oz_elt *)((u8 *)((__elt) + 1) + (__elt)->length) + +/* Protocol element IDs. + */ +#define OZ_ELT_CONNECT_REQ 0x06 +#define OZ_ELT_CONNECT_RSP 0x07 +#define OZ_ELT_DISCONNECT 0x08 +#define OZ_ELT_UPDATE_PARAM_REQ 0x11 +#define OZ_ELT_FAREWELL_REQ 0x12 +#define OZ_ELT_APP_DATA 0x31 + +/* This is the Ozmo header which is the first Ozmo specific part + * of a frame and comes after the MAC header. + */ +struct oz_hdr { + u8 control; + u8 last_pkt_num; + u32 pkt_num; +} PACKED; + +#define OZ_PROTOCOL_VERSION 0x1 +/* Bits in the control field. */ +#define OZ_VERSION_MASK 0xc +#define OZ_VERSION_SHIFT 2 +#define OZ_F_ACK 0x10 +#define OZ_F_ISOC 0x20 +#define OZ_F_MORE_DATA 0x40 +#define OZ_F_ACK_REQUESTED 0x80 + +#define oz_get_prot_ver(__x) (((__x) & OZ_VERSION_MASK) >> OZ_VERSION_SHIFT) + +/* Used to select the bits of packet number to put in the last_pkt_num. + */ +#define OZ_LAST_PN_MASK 0x00ff + +#define OZ_LAST_PN_HALF_CYCLE 127 + +#define OZ_LATENCY_MASK 0xc0 +#define OZ_ONE_MS_LATENCY 0x40 +#define OZ_TEN_MS_LATENCY 0x80 + +/* Connect request data structure. + */ +struct oz_elt_connect_req { + u8 mode; + u8 resv1[16]; + u8 pd_info; + u8 session_id; + u8 presleep; + u8 ms_isoc_latency; + u8 host_vendor; + u8 keep_alive; + u16 apps; + u8 max_len_div16; + u8 ms_per_isoc; + u8 resv3[2]; +} PACKED; + +/* mode field bits. + */ +#define OZ_MODE_POLLED 0x0 +#define OZ_MODE_TRIGGERED 0x1 +#define OZ_MODE_MASK 0xf +#define OZ_F_ISOC_NO_ELTS 0x40 +#define OZ_F_ISOC_ANYTIME 0x80 +#define OZ_NO_ELTS_ANYTIME 0xc0 + +/* Keep alive field. + */ +#define OZ_KALIVE_TYPE_MASK 0xc0 +#define OZ_KALIVE_VALUE_MASK 0x3f +#define OZ_KALIVE_SPECIAL 0x00 +#define OZ_KALIVE_SECS 0x40 +#define OZ_KALIVE_MINS 0x80 +#define OZ_KALIVE_HOURS 0xc0 + +/* Connect response data structure. + */ +struct oz_elt_connect_rsp { + u8 mode; + u8 status; + u8 resv1[3]; + u8 session_id; + u16 apps; + u32 resv2; +} PACKED; + +struct oz_elt_farewell { + u8 ep_num; + u8 index; + u8 report[1]; +} PACKED; + +struct oz_elt_update_param { + u8 resv1[16]; + u8 presleep; + u8 resv2; + u8 host_vendor; + u8 keepalive; +} PACKED; + +/* Header common to all application elements. + */ +struct oz_app_hdr { + u8 app_id; + u8 elt_seq_num; +} PACKED; + +/* Values for app_id. + */ +#define OZ_APPID_USB 0x1 +#define OZ_APPID_SERIAL 0x4 +#define OZ_APPID_MAX OZ_APPID_SERIAL +#define OZ_NB_APPS (OZ_APPID_MAX+1) + +/* USB header common to all elements for the USB application. + * This header extends the oz_app_hdr and comes directly after + * the element header in a USB application. + */ +struct oz_usb_hdr { + u8 app_id; + u8 elt_seq_num; + u8 type; +} PACKED; + + + +/* USB requests element subtypes (type field of hs_usb_hdr). + */ +#define OZ_GET_DESC_REQ 1 +#define OZ_GET_DESC_RSP 2 +#define OZ_SET_CONFIG_REQ 3 +#define OZ_SET_CONFIG_RSP 4 +#define OZ_SET_INTERFACE_REQ 5 +#define OZ_SET_INTERFACE_RSP 6 +#define OZ_VENDOR_CLASS_REQ 7 +#define OZ_VENDOR_CLASS_RSP 8 +#define OZ_GET_STATUS_REQ 9 +#define OZ_GET_STATUS_RSP 10 +#define OZ_CLEAR_FEATURE_REQ 11 +#define OZ_CLEAR_FEATURE_RSP 12 +#define OZ_SET_FEATURE_REQ 13 +#define OZ_SET_FEATURE_RSP 14 +#define OZ_GET_CONFIGURATION_REQ 15 +#define OZ_GET_CONFIGURATION_RSP 16 +#define OZ_GET_INTERFACE_REQ 17 +#define OZ_GET_INTERFACE_RSP 18 +#define OZ_SYNCH_FRAME_REQ 19 +#define OZ_SYNCH_FRAME_RSP 20 +#define OZ_USB_ENDPOINT_DATA 23 + +#define OZ_REQD_D2H 0x80 + +struct oz_get_desc_req { + u8 app_id; + u8 elt_seq_num; + u8 type; + u8 req_id; + u16 offset; + u16 size; + u8 req_type; + u8 desc_type; + __le16 w_index; + u8 index; +} PACKED; + +/* Values for desc_type field. +*/ +#define OZ_DESC_DEVICE 0x01 +#define OZ_DESC_CONFIG 0x02 +#define OZ_DESC_STRING 0x03 + +/* Values for req_type field. + */ +#define OZ_RECP_MASK 0x1F +#define OZ_RECP_DEVICE 0x00 +#define OZ_RECP_INTERFACE 0x01 +#define OZ_RECP_ENDPOINT 0x02 + +#define OZ_REQT_MASK 0x60 +#define OZ_REQT_STD 0x00 +#define OZ_REQT_CLASS 0x20 +#define OZ_REQT_VENDOR 0x40 + +struct oz_get_desc_rsp { + u8 app_id; + u8 elt_seq_num; + u8 type; + u8 req_id; + __le16 offset; + __le16 total_size; + u8 rcode; + u8 data[1]; +} PACKED; + +struct oz_feature_req { + u8 app_id; + u8 elt_seq_num; + u8 type; + u8 req_id; + u8 recipient; + u8 index; + u16 feature; +} PACKED; + +struct oz_feature_rsp { + u8 app_id; + u8 elt_seq_num; + u8 type; + u8 req_id; + u8 rcode; +} PACKED; + +struct oz_set_config_req { + u8 app_id; + u8 elt_seq_num; + u8 type; + u8 req_id; + u8 index; +} PACKED; + +struct oz_set_config_rsp { + u8 app_id; + u8 elt_seq_num; + u8 type; + u8 req_id; + u8 rcode; +} PACKED; + +struct oz_set_interface_req { + u8 app_id; + u8 elt_seq_num; + u8 type; + u8 req_id; + u8 index; + u8 alternative; +} PACKED; + +struct oz_set_interface_rsp { + u8 app_id; + u8 elt_seq_num; + u8 type; + u8 req_id; + u8 rcode; +} PACKED; + +struct oz_get_interface_req { + u8 app_id; + u8 elt_seq_num; + u8 type; + u8 req_id; + u8 index; +} PACKED; + +struct oz_get_interface_rsp { + u8 app_id; + u8 elt_seq_num; + u8 type; + u8 req_id; + u8 rcode; + u8 alternative; +} PACKED; + +struct oz_vendor_class_req { + u8 app_id; + u8 elt_seq_num; + u8 type; + u8 req_id; + u8 req_type; + u8 request; + u16 value; + u16 index; + u8 data[1]; +} PACKED; + +struct oz_vendor_class_rsp { + u8 app_id; + u8 elt_seq_num; + u8 type; + u8 req_id; + u8 rcode; + u8 data[1]; +} PACKED; + +struct oz_data { + u8 app_id; + u8 elt_seq_num; + u8 type; + u8 endpoint; + u8 format; +} PACKED; + +struct oz_isoc_fixed { + u8 app_id; + u8 elt_seq_num; + u8 type; + u8 endpoint; + u8 format; + u8 unit_size; + u8 frame_number; + u8 data[1]; +} PACKED; + +struct oz_multiple_fixed { + u8 app_id; + u8 elt_seq_num; + u8 type; + u8 endpoint; + u8 format; + u8 unit_size; + u8 data[1]; +} PACKED; + +struct oz_fragmented { + u8 app_id; + u8 elt_seq_num; + u8 type; + u8 endpoint; + u8 format; + u16 total_size; + u16 offset; + u8 data[1]; +} PACKED; + +/* Note: the following does not get packaged in an element in the same way + * that other data formats are packaged. Instead the data is put in a frame + * directly after the oz_header and is the only permitted data in such a + * frame. The length of the data is directly determined from the frame size. + */ +struct oz_isoc_large { + u8 endpoint; + u8 format; + u8 ms_data; + u8 frame_number; +} PACKED; + +#define OZ_DATA_F_TYPE_MASK 0xF +#define OZ_DATA_F_MULTIPLE_FIXED 0x1 +#define OZ_DATA_F_MULTIPLE_VAR 0x2 +#define OZ_DATA_F_ISOC_FIXED 0x3 +#define OZ_DATA_F_ISOC_VAR 0x4 +#define OZ_DATA_F_FRAGMENTED 0x5 +#define OZ_DATA_F_ISOC_LARGE 0x7 + +#endif /* _OZPROTOCOL_H */ diff --git a/kernel/drivers/staging/ozwpan/ozurbparanoia.c b/kernel/drivers/staging/ozwpan/ozurbparanoia.c new file mode 100644 index 000000000..cf6278a19 --- /dev/null +++ b/kernel/drivers/staging/ozwpan/ozurbparanoia.c @@ -0,0 +1,54 @@ +/* ----------------------------------------------------------------------------- + * Copyright (c) 2011 Ozmo Inc + * Released under the GNU General Public License Version 2 (GPLv2). + * ----------------------------------------------------------------------------- + */ +#include +#include "ozdbg.h" + +#ifdef WANT_URB_PARANOIA + +#include "ozurbparanoia.h" + +#define OZ_MAX_URBS 1000 +struct urb *g_urb_memory[OZ_MAX_URBS]; +int g_nb_urbs; +DEFINE_SPINLOCK(g_urb_mem_lock); + +void oz_remember_urb(struct urb *urb) +{ + unsigned long irq_state; + + spin_lock_irqsave(&g_urb_mem_lock, irq_state); + if (g_nb_urbs < OZ_MAX_URBS) { + g_urb_memory[g_nb_urbs++] = urb; + oz_dbg(ON, "urb up = %d %p\n", g_nb_urbs, urb); + } else { + oz_dbg(ON, "ERROR urb buffer full\n"); + } + spin_unlock_irqrestore(&g_urb_mem_lock, irq_state); +} + +/* + */ +int oz_forget_urb(struct urb *urb) +{ + unsigned long irq_state; + int i; + int rc = -1; + + spin_lock_irqsave(&g_urb_mem_lock, irq_state); + for (i = 0; i < g_nb_urbs; i++) { + if (g_urb_memory[i] == urb) { + rc = 0; + if (--g_nb_urbs > i) + memcpy(&g_urb_memory[i], &g_urb_memory[i+1], + (g_nb_urbs - i) * sizeof(struct urb *)); + oz_dbg(ON, "urb down = %d %p\n", g_nb_urbs, urb); + } + } + spin_unlock_irqrestore(&g_urb_mem_lock, irq_state); + return rc; +} +#endif /* #ifdef WANT_URB_PARANOIA */ + diff --git a/kernel/drivers/staging/ozwpan/ozurbparanoia.h b/kernel/drivers/staging/ozwpan/ozurbparanoia.h new file mode 100644 index 000000000..5080ea76f --- /dev/null +++ b/kernel/drivers/staging/ozwpan/ozurbparanoia.h @@ -0,0 +1,19 @@ +#ifndef _OZURBPARANOIA_H +#define _OZURBPARANOIA_H +/* ----------------------------------------------------------------------------- + * Released under the GNU General Public License Version 2 (GPLv2). + * Copyright (c) 2011 Ozmo Inc + * ----------------------------------------------------------------------------- + */ + +#ifdef WANT_URB_PARANOIA +void oz_remember_urb(struct urb *urb); +int oz_forget_urb(struct urb *urb); +#else +static inline void oz_remember_urb(struct urb *urb) {} +static inline int oz_forget_urb(struct urb *urb) { return 0; } +#endif /* WANT_URB_PARANOIA */ + + +#endif /* _OZURBPARANOIA_H */ + diff --git a/kernel/drivers/staging/ozwpan/ozusbif.h b/kernel/drivers/staging/ozwpan/ozusbif.h new file mode 100644 index 000000000..d2a608534 --- /dev/null +++ b/kernel/drivers/staging/ozwpan/ozusbif.h @@ -0,0 +1,43 @@ +/* ----------------------------------------------------------------------------- + * Copyright (c) 2011 Ozmo Inc + * Released under the GNU General Public License Version 2 (GPLv2). + * ----------------------------------------------------------------------------- + */ +#ifndef _OZUSBIF_H +#define _OZUSBIF_H + +#include + +/* Reference counting functions. + */ +void oz_usb_get(void *hpd); +void oz_usb_put(void *hpd); + +/* Stream functions. + */ +int oz_usb_stream_create(void *hpd, u8 ep_num); +int oz_usb_stream_delete(void *hpd, u8 ep_num); + +/* Request functions. + */ +int oz_usb_control_req(void *hpd, u8 req_id, struct usb_ctrlrequest *setup, + const u8 *data, int data_len); +int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type, + u8 index, __le16 windex, int offset, int len); +int oz_usb_send_isoc(void *hpd, u8 ep_num, struct urb *urb); +void oz_usb_request_heartbeat(void *hpd); + +/* Confirmation functions. + */ +void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status, + const u8 *desc, u8 length, u16 offset, u16 total_size); +void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, + const u8 *data, int data_len); + +/* Indication functions. + */ +void oz_hcd_data_ind(void *hport, u8 endpoint, const u8 *data, int data_len); + +int oz_hcd_heartbeat(void *hport); + +#endif /* _OZUSBIF_H */ diff --git a/kernel/drivers/staging/ozwpan/ozusbsvc.c b/kernel/drivers/staging/ozwpan/ozusbsvc.c new file mode 100644 index 000000000..bf15dc301 --- /dev/null +++ b/kernel/drivers/staging/ozwpan/ozusbsvc.c @@ -0,0 +1,263 @@ +/* ----------------------------------------------------------------------------- + * Copyright (c) 2011 Ozmo Inc + * Released under the GNU General Public License Version 2 (GPLv2). + * + * This file provides protocol independent part of the implementation of the USB + * service for a PD. + * The implementation of this service is split into two parts the first of which + * is protocol independent and the second contains protocol specific details. + * This split is to allow alternative protocols to be defined. + * The implementation of this service uses ozhcd.c to implement a USB HCD. + * ----------------------------------------------------------------------------- + */ + +#include +#include +#include +#include +#include +#include +#include +#include "ozdbg.h" +#include "ozprotocol.h" +#include "ozeltbuf.h" +#include "ozpd.h" +#include "ozproto.h" +#include "ozusbif.h" +#include "ozhcd.h" +#include "ozusbsvc.h" + +/* + * This is called once when the driver is loaded to initialise the USB service. + * Context: process + */ +int oz_usb_init(void) +{ + return oz_hcd_init(); +} + +/* + * This is called once when the driver is unloaded to terminate the USB service. + * Context: process + */ +void oz_usb_term(void) +{ + oz_hcd_term(); +} + +/* + * This is called when the USB service is started or resumed for a PD. + * Context: softirq + */ +int oz_usb_start(struct oz_pd *pd, int resume) +{ + int rc = 0; + struct oz_usb_ctx *usb_ctx; + struct oz_usb_ctx *old_ctx; + + if (resume) { + oz_dbg(ON, "USB service resumed\n"); + return 0; + } + oz_dbg(ON, "USB service started\n"); + /* Create a USB context in case we need one. If we find the PD already + * has a USB context then we will destroy it. + */ + usb_ctx = kzalloc(sizeof(struct oz_usb_ctx), GFP_ATOMIC); + if (usb_ctx == NULL) + return -ENOMEM; + atomic_set(&usb_ctx->ref_count, 1); + usb_ctx->pd = pd; + usb_ctx->stopped = 0; + /* Install the USB context if the PD doesn't already have one. + * If it does already have one then destroy the one we have just + * created. + */ + spin_lock_bh(&pd->app_lock[OZ_APPID_USB]); + old_ctx = pd->app_ctx[OZ_APPID_USB]; + if (old_ctx == NULL) + pd->app_ctx[OZ_APPID_USB] = usb_ctx; + oz_usb_get(pd->app_ctx[OZ_APPID_USB]); + spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]); + if (old_ctx) { + oz_dbg(ON, "Already have USB context\n"); + kfree(usb_ctx); + usb_ctx = old_ctx; + } else if (usb_ctx) { + /* Take a reference to the PD. This will be released when + * the USB context is destroyed. + */ + oz_pd_get(pd); + } + /* If we already had a USB context and had obtained a port from + * the USB HCD then just reset the port. If we didn't have a port + * then report the arrival to the USB HCD so we get one. + */ + if (usb_ctx->hport) { + oz_hcd_pd_reset(usb_ctx, usb_ctx->hport); + } else { + usb_ctx->hport = oz_hcd_pd_arrived(usb_ctx); + if (usb_ctx->hport == NULL) { + oz_dbg(ON, "USB hub returned null port\n"); + spin_lock_bh(&pd->app_lock[OZ_APPID_USB]); + pd->app_ctx[OZ_APPID_USB] = NULL; + spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]); + oz_usb_put(usb_ctx); + rc = -1; + } + } + oz_usb_put(usb_ctx); + return rc; +} + +/* + * This is called when the USB service is stopped or paused for a PD. + * Context: softirq or process + */ +void oz_usb_stop(struct oz_pd *pd, int pause) +{ + struct oz_usb_ctx *usb_ctx; + + if (pause) { + oz_dbg(ON, "USB service paused\n"); + return; + } + spin_lock_bh(&pd->app_lock[OZ_APPID_USB]); + usb_ctx = (struct oz_usb_ctx *) pd->app_ctx[OZ_APPID_USB]; + pd->app_ctx[OZ_APPID_USB] = NULL; + spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]); + if (usb_ctx) { + struct timespec ts, now; + + getnstimeofday(&ts); + oz_dbg(ON, "USB service stopping...\n"); + usb_ctx->stopped = 1; + /* At this point the reference count on the usb context should + * be 2 - one from when we created it and one from the hcd + * which claims a reference. Since stopped = 1 no one else + * should get in but someone may already be in. So wait + * until they leave but timeout after 1 second. + */ + while ((atomic_read(&usb_ctx->ref_count) > 2)) { + getnstimeofday(&now); + /*Approx 1 Sec. this is not perfect calculation*/ + if (now.tv_sec != ts.tv_sec) + break; + } + oz_dbg(ON, "USB service stopped\n"); + oz_hcd_pd_departed(usb_ctx->hport); + /* Release the reference taken in oz_usb_start. + */ + oz_usb_put(usb_ctx); + } +} + +/* + * This increments the reference count of the context area for a specific PD. + * This ensures this context area does not disappear while still in use. + * Context: softirq + */ +void oz_usb_get(void *hpd) +{ + struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd; + + atomic_inc(&usb_ctx->ref_count); +} + +/* + * This decrements the reference count of the context area for a specific PD + * and destroys the context area if the reference count becomes zero. + * Context: irq or process + */ +void oz_usb_put(void *hpd) +{ + struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd; + + if (atomic_dec_and_test(&usb_ctx->ref_count)) { + oz_dbg(ON, "Dealloc USB context\n"); + oz_pd_put(usb_ctx->pd); + kfree(usb_ctx); + } +} + +/* + * Context: softirq + */ +int oz_usb_heartbeat(struct oz_pd *pd) +{ + struct oz_usb_ctx *usb_ctx; + int rc = 0; + + spin_lock_bh(&pd->app_lock[OZ_APPID_USB]); + usb_ctx = (struct oz_usb_ctx *) pd->app_ctx[OZ_APPID_USB]; + if (usb_ctx) + oz_usb_get(usb_ctx); + spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]); + if (usb_ctx == NULL) + return rc; + if (usb_ctx->stopped) + goto done; + if (usb_ctx->hport) + if (oz_hcd_heartbeat(usb_ctx->hport)) + rc = 1; +done: + oz_usb_put(usb_ctx); + return rc; +} + +/* + * Context: softirq + */ +int oz_usb_stream_create(void *hpd, u8 ep_num) +{ + struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd; + struct oz_pd *pd = usb_ctx->pd; + + oz_dbg(ON, "%s: (0x%x)\n", __func__, ep_num); + if (pd->mode & OZ_F_ISOC_NO_ELTS) { + oz_isoc_stream_create(pd, ep_num); + } else { + oz_pd_get(pd); + if (oz_elt_stream_create(&pd->elt_buff, ep_num, + 4*pd->max_tx_size)) { + oz_pd_put(pd); + return -1; + } + } + return 0; +} + +/* + * Context: softirq + */ +int oz_usb_stream_delete(void *hpd, u8 ep_num) +{ + struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd; + + if (usb_ctx) { + struct oz_pd *pd = usb_ctx->pd; + + if (pd) { + oz_dbg(ON, "%s: (0x%x)\n", __func__, ep_num); + if (pd->mode & OZ_F_ISOC_NO_ELTS) { + oz_isoc_stream_delete(pd, ep_num); + } else { + if (oz_elt_stream_delete(&pd->elt_buff, ep_num)) + return -1; + oz_pd_put(pd); + } + } + } + return 0; +} + +/* + * Context: softirq or process + */ +void oz_usb_request_heartbeat(void *hpd) +{ + struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd; + + if (usb_ctx && usb_ctx->pd) + oz_pd_request_heartbeat(usb_ctx->pd); +} diff --git a/kernel/drivers/staging/ozwpan/ozusbsvc.h b/kernel/drivers/staging/ozwpan/ozusbsvc.h new file mode 100644 index 000000000..58e05a59b --- /dev/null +++ b/kernel/drivers/staging/ozwpan/ozusbsvc.h @@ -0,0 +1,32 @@ +/* ----------------------------------------------------------------------------- + * Copyright (c) 2011 Ozmo Inc + * Released under the GNU General Public License Version 2 (GPLv2). + * ----------------------------------------------------------------------------- + */ +#ifndef _OZUSBSVC_H +#define _OZUSBSVC_H + +/*------------------------------------------------------------------------------ + * Per PD context info stored in application context area of PD. + * This object is reference counted to ensure it doesn't disappear while + * still in use. + */ +struct oz_usb_ctx { + atomic_t ref_count; + u8 tx_seq_num; + u8 rx_seq_num; + struct oz_pd *pd; + void *hport; + int stopped; +}; + +int oz_usb_init(void); +void oz_usb_term(void); +int oz_usb_start(struct oz_pd *pd, int resume); +void oz_usb_stop(struct oz_pd *pd, int pause); +void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt); +int oz_usb_heartbeat(struct oz_pd *pd); +void oz_usb_farewell(struct oz_pd *pd, u8 ep_num, u8 *data, u8 len); + +#endif /* _OZUSBSVC_H */ + diff --git a/kernel/drivers/staging/ozwpan/ozusbsvc1.c b/kernel/drivers/staging/ozwpan/ozusbsvc1.c new file mode 100644 index 000000000..f660bb198 --- /dev/null +++ b/kernel/drivers/staging/ozwpan/ozusbsvc1.c @@ -0,0 +1,462 @@ +/* ----------------------------------------------------------------------------- + * Copyright (c) 2011 Ozmo Inc + * Released under the GNU General Public License Version 2 (GPLv2). + * + * This file implements the protocol specific parts of the USB service for a PD. + * ----------------------------------------------------------------------------- + */ +#include +#include +#include +#include +#include +#include +#include +#include "ozdbg.h" +#include "ozprotocol.h" +#include "ozeltbuf.h" +#include "ozpd.h" +#include "ozproto.h" +#include "ozusbif.h" +#include "ozhcd.h" +#include "ozusbsvc.h" + +#define MAX_ISOC_FIXED_DATA (253-sizeof(struct oz_isoc_fixed)) + +/* + * Context: softirq + */ +static int oz_usb_submit_elt(struct oz_elt_buf *eb, struct oz_elt_info *ei, + struct oz_usb_ctx *usb_ctx, u8 strid, u8 isoc) +{ + int ret; + struct oz_elt *elt = (struct oz_elt *)ei->data; + struct oz_app_hdr *app_hdr = (struct oz_app_hdr *)(elt+1); + + elt->type = OZ_ELT_APP_DATA; + ei->app_id = OZ_APPID_USB; + ei->length = elt->length + sizeof(struct oz_elt); + app_hdr->app_id = OZ_APPID_USB; + spin_lock_bh(&eb->lock); + if (isoc == 0) { + app_hdr->elt_seq_num = usb_ctx->tx_seq_num++; + if (usb_ctx->tx_seq_num == 0) + usb_ctx->tx_seq_num = 1; + } + ret = oz_queue_elt_info(eb, isoc, strid, ei); + if (ret) + oz_elt_info_free(eb, ei); + spin_unlock_bh(&eb->lock); + return ret; +} + +/* + * Context: softirq + */ +int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type, + u8 index, __le16 windex, int offset, int len) +{ + struct oz_usb_ctx *usb_ctx = hpd; + struct oz_pd *pd = usb_ctx->pd; + struct oz_elt *elt; + struct oz_get_desc_req *body; + struct oz_elt_buf *eb = &pd->elt_buff; + struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff); + + oz_dbg(ON, " req_type = 0x%x\n", req_type); + oz_dbg(ON, " desc_type = 0x%x\n", desc_type); + oz_dbg(ON, " index = 0x%x\n", index); + oz_dbg(ON, " windex = 0x%x\n", windex); + oz_dbg(ON, " offset = 0x%x\n", offset); + oz_dbg(ON, " len = 0x%x\n", len); + if (len > 200) + len = 200; + if (ei == NULL) + return -1; + elt = (struct oz_elt *)ei->data; + elt->length = sizeof(struct oz_get_desc_req); + body = (struct oz_get_desc_req *)(elt+1); + body->type = OZ_GET_DESC_REQ; + body->req_id = req_id; + put_unaligned(cpu_to_le16(offset), &body->offset); + put_unaligned(cpu_to_le16(len), &body->size); + body->req_type = req_type; + body->desc_type = desc_type; + body->w_index = windex; + body->index = index; + return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0); +} + +/* + * Context: tasklet + */ +static int oz_usb_set_config_req(void *hpd, u8 req_id, u8 index) +{ + struct oz_usb_ctx *usb_ctx = hpd; + struct oz_pd *pd = usb_ctx->pd; + struct oz_elt *elt; + struct oz_elt_buf *eb = &pd->elt_buff; + struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff); + struct oz_set_config_req *body; + + if (ei == NULL) + return -1; + elt = (struct oz_elt *)ei->data; + elt->length = sizeof(struct oz_set_config_req); + body = (struct oz_set_config_req *)(elt+1); + body->type = OZ_SET_CONFIG_REQ; + body->req_id = req_id; + body->index = index; + return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0); +} + +/* + * Context: tasklet + */ +static int oz_usb_set_interface_req(void *hpd, u8 req_id, u8 index, u8 alt) +{ + struct oz_usb_ctx *usb_ctx = hpd; + struct oz_pd *pd = usb_ctx->pd; + struct oz_elt *elt; + struct oz_elt_buf *eb = &pd->elt_buff; + struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff); + struct oz_set_interface_req *body; + + if (ei == NULL) + return -1; + elt = (struct oz_elt *)ei->data; + elt->length = sizeof(struct oz_set_interface_req); + body = (struct oz_set_interface_req *)(elt+1); + body->type = OZ_SET_INTERFACE_REQ; + body->req_id = req_id; + body->index = index; + body->alternative = alt; + return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0); +} + +/* + * Context: tasklet + */ +static int oz_usb_set_clear_feature_req(void *hpd, u8 req_id, u8 type, + u8 recipient, u8 index, __le16 feature) +{ + struct oz_usb_ctx *usb_ctx = hpd; + struct oz_pd *pd = usb_ctx->pd; + struct oz_elt *elt; + struct oz_elt_buf *eb = &pd->elt_buff; + struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff); + struct oz_feature_req *body; + + if (ei == NULL) + return -1; + elt = (struct oz_elt *)ei->data; + elt->length = sizeof(struct oz_feature_req); + body = (struct oz_feature_req *)(elt+1); + body->type = type; + body->req_id = req_id; + body->recipient = recipient; + body->index = index; + put_unaligned(feature, &body->feature); + return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0); +} + +/* + * Context: tasklet + */ +static int oz_usb_vendor_class_req(void *hpd, u8 req_id, u8 req_type, + u8 request, __le16 value, __le16 index, const u8 *data, int data_len) +{ + struct oz_usb_ctx *usb_ctx = hpd; + struct oz_pd *pd = usb_ctx->pd; + struct oz_elt *elt; + struct oz_elt_buf *eb = &pd->elt_buff; + struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff); + struct oz_vendor_class_req *body; + + if (ei == NULL) + return -1; + elt = (struct oz_elt *)ei->data; + elt->length = sizeof(struct oz_vendor_class_req) - 1 + data_len; + body = (struct oz_vendor_class_req *)(elt+1); + body->type = OZ_VENDOR_CLASS_REQ; + body->req_id = req_id; + body->req_type = req_type; + body->request = request; + put_unaligned(value, &body->value); + put_unaligned(index, &body->index); + if (data_len) + memcpy(body->data, data, data_len); + return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0); +} + +/* + * Context: tasklet + */ +int oz_usb_control_req(void *hpd, u8 req_id, struct usb_ctrlrequest *setup, + const u8 *data, int data_len) +{ + unsigned wvalue = le16_to_cpu(setup->wValue); + unsigned windex = le16_to_cpu(setup->wIndex); + unsigned wlength = le16_to_cpu(setup->wLength); + int rc = 0; + + if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { + switch (setup->bRequest) { + case USB_REQ_GET_DESCRIPTOR: + rc = oz_usb_get_desc_req(hpd, req_id, + setup->bRequestType, (u8)(wvalue>>8), + (u8)wvalue, setup->wIndex, 0, wlength); + break; + case USB_REQ_SET_CONFIGURATION: + rc = oz_usb_set_config_req(hpd, req_id, (u8)wvalue); + break; + case USB_REQ_SET_INTERFACE: { + u8 if_num = (u8)windex; + u8 alt = (u8)wvalue; + + rc = oz_usb_set_interface_req(hpd, req_id, + if_num, alt); + } + break; + case USB_REQ_SET_FEATURE: + rc = oz_usb_set_clear_feature_req(hpd, req_id, + OZ_SET_FEATURE_REQ, + setup->bRequestType & 0xf, (u8)windex, + setup->wValue); + break; + case USB_REQ_CLEAR_FEATURE: + rc = oz_usb_set_clear_feature_req(hpd, req_id, + OZ_CLEAR_FEATURE_REQ, + setup->bRequestType & 0xf, + (u8)windex, setup->wValue); + break; + } + } else { + rc = oz_usb_vendor_class_req(hpd, req_id, setup->bRequestType, + setup->bRequest, setup->wValue, setup->wIndex, + data, data_len); + } + return rc; +} + +/* + * Context: softirq + */ +int oz_usb_send_isoc(void *hpd, u8 ep_num, struct urb *urb) +{ + struct oz_usb_ctx *usb_ctx = hpd; + struct oz_pd *pd = usb_ctx->pd; + struct oz_elt_buf *eb; + int i; + int hdr_size; + u8 *data; + struct usb_iso_packet_descriptor *desc; + + if (pd->mode & OZ_F_ISOC_NO_ELTS) { + for (i = 0; i < urb->number_of_packets; i++) { + u8 *data; + + desc = &urb->iso_frame_desc[i]; + data = ((u8 *)urb->transfer_buffer)+desc->offset; + oz_send_isoc_unit(pd, ep_num, data, desc->length); + } + return 0; + } + + hdr_size = sizeof(struct oz_isoc_fixed) - 1; + eb = &pd->elt_buff; + i = 0; + while (i < urb->number_of_packets) { + struct oz_elt_info *ei = oz_elt_info_alloc(eb); + struct oz_elt *elt; + struct oz_isoc_fixed *body; + int unit_count; + int unit_size; + int rem; + + if (ei == NULL) + return -1; + rem = MAX_ISOC_FIXED_DATA; + elt = (struct oz_elt *)ei->data; + body = (struct oz_isoc_fixed *)(elt + 1); + body->type = OZ_USB_ENDPOINT_DATA; + body->endpoint = ep_num; + body->format = OZ_DATA_F_ISOC_FIXED; + unit_size = urb->iso_frame_desc[i].length; + body->unit_size = (u8)unit_size; + data = ((u8 *)(elt+1)) + hdr_size; + unit_count = 0; + while (i < urb->number_of_packets) { + desc = &urb->iso_frame_desc[i]; + if ((unit_size == desc->length) && + (desc->length <= rem)) { + memcpy(data, ((u8 *)urb->transfer_buffer) + + desc->offset, unit_size); + data += unit_size; + rem -= unit_size; + unit_count++; + desc->status = 0; + desc->actual_length = desc->length; + i++; + } else { + break; + } + } + elt->length = hdr_size + MAX_ISOC_FIXED_DATA - rem; + /* Store the number of units in body->frame_number for the + * moment. This field will be correctly determined before + * the element is sent. */ + body->frame_number = (u8)unit_count; + oz_usb_submit_elt(eb, ei, usb_ctx, ep_num, + pd->mode & OZ_F_ISOC_ANYTIME); + } + return 0; +} + +/* + * Context: softirq-serialized + */ +static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx, + struct oz_usb_hdr *usb_hdr, int len) +{ + struct oz_data *data_hdr = (struct oz_data *)usb_hdr; + + switch (data_hdr->format) { + case OZ_DATA_F_MULTIPLE_FIXED: { + struct oz_multiple_fixed *body = + (struct oz_multiple_fixed *)data_hdr; + u8 *data = body->data; + unsigned int n; + if (!body->unit_size || + len < sizeof(struct oz_multiple_fixed) - 1) + break; + n = (len - (sizeof(struct oz_multiple_fixed) - 1)) + / body->unit_size; + while (n--) { + oz_hcd_data_ind(usb_ctx->hport, body->endpoint, + data, body->unit_size); + data += body->unit_size; + } + } + break; + case OZ_DATA_F_ISOC_FIXED: { + struct oz_isoc_fixed *body = + (struct oz_isoc_fixed *)data_hdr; + int data_len = len-sizeof(struct oz_isoc_fixed)+1; + int unit_size = body->unit_size; + u8 *data = body->data; + int count; + int i; + + if (!unit_size) + break; + count = data_len/unit_size; + for (i = 0; i < count; i++) { + oz_hcd_data_ind(usb_ctx->hport, + body->endpoint, data, unit_size); + data += unit_size; + } + } + break; + } + +} + +/* + * This is called when the PD has received a USB element. The type of element + * is determined and is then passed to an appropriate handler function. + * Context: softirq-serialized + */ +void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt) +{ + struct oz_usb_hdr *usb_hdr = (struct oz_usb_hdr *)(elt + 1); + struct oz_usb_ctx *usb_ctx; + + spin_lock_bh(&pd->app_lock[OZ_APPID_USB]); + usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB]; + if (usb_ctx) + oz_usb_get(usb_ctx); + spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]); + if (usb_ctx == NULL) + return; /* Context has gone so nothing to do. */ + if (usb_ctx->stopped) + goto done; + /* If sequence number is non-zero then check it is not a duplicate. + * Zero sequence numbers are always accepted. + */ + if (usb_hdr->elt_seq_num != 0) { + if (((usb_ctx->rx_seq_num - usb_hdr->elt_seq_num) & 0x80) == 0) + /* Reject duplicate element. */ + goto done; + } + usb_ctx->rx_seq_num = usb_hdr->elt_seq_num; + switch (usb_hdr->type) { + case OZ_GET_DESC_RSP: { + struct oz_get_desc_rsp *body = + (struct oz_get_desc_rsp *)usb_hdr; + u16 offs, total_size; + u8 data_len; + + if (elt->length < sizeof(struct oz_get_desc_rsp) - 1) + break; + data_len = elt->length - + (sizeof(struct oz_get_desc_rsp) - 1); + offs = le16_to_cpu(get_unaligned(&body->offset)); + total_size = + le16_to_cpu(get_unaligned(&body->total_size)); + oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n"); + oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id, + body->rcode, body->data, + data_len, offs, total_size); + } + break; + case OZ_SET_CONFIG_RSP: { + struct oz_set_config_rsp *body = + (struct oz_set_config_rsp *)usb_hdr; + oz_hcd_control_cnf(usb_ctx->hport, body->req_id, + body->rcode, NULL, 0); + } + break; + case OZ_SET_INTERFACE_RSP: { + struct oz_set_interface_rsp *body = + (struct oz_set_interface_rsp *)usb_hdr; + oz_hcd_control_cnf(usb_ctx->hport, + body->req_id, body->rcode, NULL, 0); + } + break; + case OZ_VENDOR_CLASS_RSP: { + struct oz_vendor_class_rsp *body = + (struct oz_vendor_class_rsp *)usb_hdr; + oz_hcd_control_cnf(usb_ctx->hport, body->req_id, + body->rcode, body->data, elt->length- + sizeof(struct oz_vendor_class_rsp)+1); + } + break; + case OZ_USB_ENDPOINT_DATA: + oz_usb_handle_ep_data(usb_ctx, usb_hdr, elt->length); + break; + } +done: + oz_usb_put(usb_ctx); +} + +/* + * Context: softirq, process + */ +void oz_usb_farewell(struct oz_pd *pd, u8 ep_num, u8 *data, u8 len) +{ + struct oz_usb_ctx *usb_ctx; + + spin_lock_bh(&pd->app_lock[OZ_APPID_USB]); + usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB]; + if (usb_ctx) + oz_usb_get(usb_ctx); + spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]); + if (usb_ctx == NULL) + return; /* Context has gone so nothing to do. */ + if (!usb_ctx->stopped) { + oz_dbg(ON, "Farewell indicated ep = 0x%x\n", ep_num); + oz_hcd_data_ind(usb_ctx->hport, ep_num, data, len); + } + oz_usb_put(usb_ctx); +} -- cgit 1.2.3-korg