diff options
author | Yunhong Jiang <yunhong.jiang@intel.com> | 2015-08-04 12:17:53 -0700 |
---|---|---|
committer | Yunhong Jiang <yunhong.jiang@intel.com> | 2015-08-04 15:44:42 -0700 |
commit | 9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 (patch) | |
tree | 1c9cafbcd35f783a87880a10f85d1a060db1a563 /kernel/drivers/firewire | |
parent | 98260f3884f4a202f9ca5eabed40b1354c489b29 (diff) |
Add the rt linux 4.1.3-rt3 as base
Import the rt linux 4.1.3-rt3 as OPNFV kvm base.
It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and
the base is:
commit 0917f823c59692d751951bf5ea699a2d1e2f26a2
Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Sat Jul 25 12:13:34 2015 +0200
Prepare v4.1.3-rt3
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
We lose all the git history this way and it's not good. We
should apply another opnfv project repo in future.
Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423
Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Diffstat (limited to 'kernel/drivers/firewire')
-rw-r--r-- | kernel/drivers/firewire/Kconfig | 83 | ||||
-rw-r--r-- | kernel/drivers/firewire/Makefile | 16 | ||||
-rw-r--r-- | kernel/drivers/firewire/core-card.c | 706 | ||||
-rw-r--r-- | kernel/drivers/firewire/core-cdev.c | 1815 | ||||
-rw-r--r-- | kernel/drivers/firewire/core-device.c | 1328 | ||||
-rw-r--r-- | kernel/drivers/firewire/core-iso.c | 403 | ||||
-rw-r--r-- | kernel/drivers/firewire/core-topology.c | 569 | ||||
-rw-r--r-- | kernel/drivers/firewire/core-transaction.c | 1300 | ||||
-rw-r--r-- | kernel/drivers/firewire/core.h | 258 | ||||
-rw-r--r-- | kernel/drivers/firewire/init_ohci1394_dma.c | 309 | ||||
-rw-r--r-- | kernel/drivers/firewire/net.c | 1707 | ||||
-rw-r--r-- | kernel/drivers/firewire/nosy-user.h | 25 | ||||
-rw-r--r-- | kernel/drivers/firewire/nosy.c | 709 | ||||
-rw-r--r-- | kernel/drivers/firewire/nosy.h | 237 | ||||
-rw-r--r-- | kernel/drivers/firewire/ohci.c | 3892 | ||||
-rw-r--r-- | kernel/drivers/firewire/ohci.h | 158 | ||||
-rw-r--r-- | kernel/drivers/firewire/sbp2.c | 1638 |
17 files changed, 15153 insertions, 0 deletions
diff --git a/kernel/drivers/firewire/Kconfig b/kernel/drivers/firewire/Kconfig new file mode 100644 index 000000000..145974f96 --- /dev/null +++ b/kernel/drivers/firewire/Kconfig @@ -0,0 +1,83 @@ +menu "IEEE 1394 (FireWire) support" + depends on HAS_DMA + depends on PCI || COMPILE_TEST + # firewire-core does not depend on PCI but is + # not useful without PCI controller driver + +config FIREWIRE + tristate "FireWire driver stack" + select CRC_ITU_T + help + This is the new-generation IEEE 1394 (FireWire) driver stack + a.k.a. Juju, a new implementation designed for robustness and + simplicity. + See http://ieee1394.wiki.kernel.org/index.php/Juju_Migration + for information about migration from the older Linux 1394 stack + to the new driver stack. + + To compile this driver as a module, say M here: the module will be + called firewire-core. + +config FIREWIRE_OHCI + tristate "OHCI-1394 controllers" + depends on PCI && FIREWIRE && MMU + help + Enable this driver if you have a FireWire controller based + on the OHCI specification. For all practical purposes, this + is the only chipset in use, so say Y here. + + To compile this driver as a module, say M here: The module will be + called firewire-ohci. + +config FIREWIRE_SBP2 + tristate "Storage devices (SBP-2 protocol)" + depends on FIREWIRE && SCSI + help + This option enables you to use SBP-2 devices connected to a + FireWire bus. SBP-2 devices include storage devices like + harddisks and DVD drives, also some other FireWire devices + like scanners. + + To compile this driver as a module, say M here: The module will be + called firewire-sbp2. + + You should also enable support for disks, CD-ROMs, etc. in the SCSI + configuration section. + +config FIREWIRE_NET + tristate "IP networking over 1394" + depends on FIREWIRE && INET + help + This enables IPv4/IPv6 over IEEE 1394, providing IP connectivity + with other implementations of RFC 2734/3146 as found on several + operating systems. Multicast support is currently limited. + + To compile this driver as a module, say M here: The module will be + called firewire-net. + +config FIREWIRE_NOSY + tristate "Nosy - a FireWire traffic sniffer for PCILynx cards" + depends on PCI + help + Nosy is an IEEE 1394 packet sniffer that is used for protocol + analysis and in development of IEEE 1394 drivers, applications, + or firmwares. + + This driver lets you use a Texas Instruments PCILynx 1394 to PCI + link layer controller TSB12LV21/A/B as a low-budget bus analyzer. + PCILynx is a nowadays very rare IEEE 1394 controller which is + not OHCI 1394 compliant. + + The following cards are known to be based on PCILynx or PCILynx-2: + IOI IOI-1394TT (PCI card), Unibrain Fireboard 400 PCI Lynx-2 + (PCI card), Newer Technology FireWire 2 Go (CardBus card), + Apple Power Mac G3 blue & white and G4 with PCI graphics + (onboard controller). + + To compile this driver as a module, say M here: The module will be + called nosy. Source code of a userspace interface to nosy, called + nosy-dump, can be found in tools/firewire/ of the kernel sources. + + If unsure, say N. + +endmenu diff --git a/kernel/drivers/firewire/Makefile b/kernel/drivers/firewire/Makefile new file mode 100644 index 000000000..e3870d5c4 --- /dev/null +++ b/kernel/drivers/firewire/Makefile @@ -0,0 +1,16 @@ +# +# Makefile for the Linux IEEE 1394 implementation +# + +firewire-core-y += core-card.o core-cdev.o core-device.o \ + core-iso.o core-topology.o core-transaction.o +firewire-ohci-y += ohci.o +firewire-sbp2-y += sbp2.o +firewire-net-y += net.o + +obj-$(CONFIG_FIREWIRE) += firewire-core.o +obj-$(CONFIG_FIREWIRE_OHCI) += firewire-ohci.o +obj-$(CONFIG_FIREWIRE_SBP2) += firewire-sbp2.o +obj-$(CONFIG_FIREWIRE_NET) += firewire-net.o +obj-$(CONFIG_FIREWIRE_NOSY) += nosy.o +obj-$(CONFIG_PROVIDE_OHCI1394_DMA_INIT) += init_ohci1394_dma.o diff --git a/kernel/drivers/firewire/core-card.c b/kernel/drivers/firewire/core-card.c new file mode 100644 index 000000000..57ea7f464 --- /dev/null +++ b/kernel/drivers/firewire/core-card.c @@ -0,0 +1,706 @@ +/* + * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/bug.h> +#include <linux/completion.h> +#include <linux/crc-itu-t.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/firewire.h> +#include <linux/firewire-constants.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/kref.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> + +#include <linux/atomic.h> +#include <asm/byteorder.h> + +#include "core.h" + +#define define_fw_printk_level(func, kern_level) \ +void func(const struct fw_card *card, const char *fmt, ...) \ +{ \ + struct va_format vaf; \ + va_list args; \ + \ + va_start(args, fmt); \ + vaf.fmt = fmt; \ + vaf.va = &args; \ + printk(kern_level KBUILD_MODNAME " %s: %pV", \ + dev_name(card->device), &vaf); \ + va_end(args); \ +} +define_fw_printk_level(fw_err, KERN_ERR); +define_fw_printk_level(fw_notice, KERN_NOTICE); + +int fw_compute_block_crc(__be32 *block) +{ + int length; + u16 crc; + + length = (be32_to_cpu(block[0]) >> 16) & 0xff; + crc = crc_itu_t(0, (u8 *)&block[1], length * 4); + *block |= cpu_to_be32(crc); + + return length; +} + +static DEFINE_MUTEX(card_mutex); +static LIST_HEAD(card_list); + +static LIST_HEAD(descriptor_list); +static int descriptor_count; + +static __be32 tmp_config_rom[256]; +/* ROM header, bus info block, root dir header, capabilities = 7 quadlets */ +static size_t config_rom_length = 1 + 4 + 1 + 1; + +#define BIB_CRC(v) ((v) << 0) +#define BIB_CRC_LENGTH(v) ((v) << 16) +#define BIB_INFO_LENGTH(v) ((v) << 24) +#define BIB_BUS_NAME 0x31333934 /* "1394" */ +#define BIB_LINK_SPEED(v) ((v) << 0) +#define BIB_GENERATION(v) ((v) << 4) +#define BIB_MAX_ROM(v) ((v) << 8) +#define BIB_MAX_RECEIVE(v) ((v) << 12) +#define BIB_CYC_CLK_ACC(v) ((v) << 16) +#define BIB_PMC ((1) << 27) +#define BIB_BMC ((1) << 28) +#define BIB_ISC ((1) << 29) +#define BIB_CMC ((1) << 30) +#define BIB_IRMC ((1) << 31) +#define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */ + +/* + * IEEE-1394 specifies a default SPLIT_TIMEOUT value of 800 cycles (100 ms), + * but we have to make it longer because there are many devices whose firmware + * is just too slow for that. + */ +#define DEFAULT_SPLIT_TIMEOUT (2 * 8000) + +#define CANON_OUI 0x000085 + +static void generate_config_rom(struct fw_card *card, __be32 *config_rom) +{ + struct fw_descriptor *desc; + int i, j, k, length; + + /* + * Initialize contents of config rom buffer. On the OHCI + * controller, block reads to the config rom accesses the host + * memory, but quadlet read access the hardware bus info block + * registers. That's just crack, but it means we should make + * sure the contents of bus info block in host memory matches + * the version stored in the OHCI registers. + */ + + config_rom[0] = cpu_to_be32( + BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0)); + config_rom[1] = cpu_to_be32(BIB_BUS_NAME); + config_rom[2] = cpu_to_be32( + BIB_LINK_SPEED(card->link_speed) | + BIB_GENERATION(card->config_rom_generation++ % 14 + 2) | + BIB_MAX_ROM(2) | + BIB_MAX_RECEIVE(card->max_receive) | + BIB_BMC | BIB_ISC | BIB_CMC | BIB_IRMC); + config_rom[3] = cpu_to_be32(card->guid >> 32); + config_rom[4] = cpu_to_be32(card->guid); + + /* Generate root directory. */ + config_rom[6] = cpu_to_be32(NODE_CAPABILITIES); + i = 7; + j = 7 + descriptor_count; + + /* Generate root directory entries for descriptors. */ + list_for_each_entry (desc, &descriptor_list, link) { + if (desc->immediate > 0) + config_rom[i++] = cpu_to_be32(desc->immediate); + config_rom[i] = cpu_to_be32(desc->key | (j - i)); + i++; + j += desc->length; + } + + /* Update root directory length. */ + config_rom[5] = cpu_to_be32((i - 5 - 1) << 16); + + /* End of root directory, now copy in descriptors. */ + list_for_each_entry (desc, &descriptor_list, link) { + for (k = 0; k < desc->length; k++) + config_rom[i + k] = cpu_to_be32(desc->data[k]); + i += desc->length; + } + + /* Calculate CRCs for all blocks in the config rom. This + * assumes that CRC length and info length are identical for + * the bus info block, which is always the case for this + * implementation. */ + for (i = 0; i < j; i += length + 1) + length = fw_compute_block_crc(config_rom + i); + + WARN_ON(j != config_rom_length); +} + +static void update_config_roms(void) +{ + struct fw_card *card; + + list_for_each_entry (card, &card_list, link) { + generate_config_rom(card, tmp_config_rom); + card->driver->set_config_rom(card, tmp_config_rom, + config_rom_length); + } +} + +static size_t required_space(struct fw_descriptor *desc) +{ + /* descriptor + entry into root dir + optional immediate entry */ + return desc->length + 1 + (desc->immediate > 0 ? 1 : 0); +} + +int fw_core_add_descriptor(struct fw_descriptor *desc) +{ + size_t i; + int ret; + + /* + * Check descriptor is valid; the length of all blocks in the + * descriptor has to add up to exactly the length of the + * block. + */ + i = 0; + while (i < desc->length) + i += (desc->data[i] >> 16) + 1; + + if (i != desc->length) + return -EINVAL; + + mutex_lock(&card_mutex); + + if (config_rom_length + required_space(desc) > 256) { + ret = -EBUSY; + } else { + list_add_tail(&desc->link, &descriptor_list); + config_rom_length += required_space(desc); + descriptor_count++; + if (desc->immediate > 0) + descriptor_count++; + update_config_roms(); + ret = 0; + } + + mutex_unlock(&card_mutex); + + return ret; +} +EXPORT_SYMBOL(fw_core_add_descriptor); + +void fw_core_remove_descriptor(struct fw_descriptor *desc) +{ + mutex_lock(&card_mutex); + + list_del(&desc->link); + config_rom_length -= required_space(desc); + descriptor_count--; + if (desc->immediate > 0) + descriptor_count--; + update_config_roms(); + + mutex_unlock(&card_mutex); +} +EXPORT_SYMBOL(fw_core_remove_descriptor); + +static int reset_bus(struct fw_card *card, bool short_reset) +{ + int reg = short_reset ? 5 : 1; + int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET; + + return card->driver->update_phy_reg(card, reg, 0, bit); +} + +void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset) +{ + /* We don't try hard to sort out requests of long vs. short resets. */ + card->br_short = short_reset; + + /* Use an arbitrary short delay to combine multiple reset requests. */ + fw_card_get(card); + if (!queue_delayed_work(fw_workqueue, &card->br_work, + delayed ? DIV_ROUND_UP(HZ, 100) : 0)) + fw_card_put(card); +} +EXPORT_SYMBOL(fw_schedule_bus_reset); + +static void br_work(struct work_struct *work) +{ + struct fw_card *card = container_of(work, struct fw_card, br_work.work); + + /* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */ + if (card->reset_jiffies != 0 && + time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) { + if (!queue_delayed_work(fw_workqueue, &card->br_work, 2 * HZ)) + fw_card_put(card); + return; + } + + fw_send_phy_config(card, FW_PHY_CONFIG_NO_NODE_ID, card->generation, + FW_PHY_CONFIG_CURRENT_GAP_COUNT); + reset_bus(card, card->br_short); + fw_card_put(card); +} + +static void allocate_broadcast_channel(struct fw_card *card, int generation) +{ + int channel, bandwidth = 0; + + if (!card->broadcast_channel_allocated) { + fw_iso_resource_manage(card, generation, 1ULL << 31, + &channel, &bandwidth, true); + if (channel != 31) { + fw_notice(card, "failed to allocate broadcast channel\n"); + return; + } + card->broadcast_channel_allocated = true; + } + + device_for_each_child(card->device, (void *)(long)generation, + fw_device_set_broadcast_channel); +} + +static const char gap_count_table[] = { + 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40 +}; + +void fw_schedule_bm_work(struct fw_card *card, unsigned long delay) +{ + fw_card_get(card); + if (!schedule_delayed_work(&card->bm_work, delay)) + fw_card_put(card); +} + +static void bm_work(struct work_struct *work) +{ + struct fw_card *card = container_of(work, struct fw_card, bm_work.work); + struct fw_device *root_device, *irm_device; + struct fw_node *root_node; + int root_id, new_root_id, irm_id, bm_id, local_id; + int gap_count, generation, grace, rcode; + bool do_reset = false; + bool root_device_is_running; + bool root_device_is_cmc; + bool irm_is_1394_1995_only; + bool keep_this_irm; + __be32 transaction_data[2]; + + spin_lock_irq(&card->lock); + + if (card->local_node == NULL) { + spin_unlock_irq(&card->lock); + goto out_put_card; + } + + generation = card->generation; + + root_node = card->root_node; + fw_node_get(root_node); + root_device = root_node->data; + root_device_is_running = root_device && + atomic_read(&root_device->state) == FW_DEVICE_RUNNING; + root_device_is_cmc = root_device && root_device->cmc; + + irm_device = card->irm_node->data; + irm_is_1394_1995_only = irm_device && irm_device->config_rom && + (irm_device->config_rom[2] & 0x000000f0) == 0; + + /* Canon MV5i works unreliably if it is not root node. */ + keep_this_irm = irm_device && irm_device->config_rom && + irm_device->config_rom[3] >> 8 == CANON_OUI; + + root_id = root_node->node_id; + irm_id = card->irm_node->node_id; + local_id = card->local_node->node_id; + + grace = time_after64(get_jiffies_64(), + card->reset_jiffies + DIV_ROUND_UP(HZ, 8)); + + if ((is_next_generation(generation, card->bm_generation) && + !card->bm_abdicate) || + (card->bm_generation != generation && grace)) { + /* + * This first step is to figure out who is IRM and + * then try to become bus manager. If the IRM is not + * well defined (e.g. does not have an active link + * layer or does not responds to our lock request, we + * will have to do a little vigilante bus management. + * In that case, we do a goto into the gap count logic + * so that when we do the reset, we still optimize the + * gap count. That could well save a reset in the + * next generation. + */ + + if (!card->irm_node->link_on) { + new_root_id = local_id; + fw_notice(card, "%s, making local node (%02x) root\n", + "IRM has link off", new_root_id); + goto pick_me; + } + + if (irm_is_1394_1995_only && !keep_this_irm) { + new_root_id = local_id; + fw_notice(card, "%s, making local node (%02x) root\n", + "IRM is not 1394a compliant", new_root_id); + goto pick_me; + } + + transaction_data[0] = cpu_to_be32(0x3f); + transaction_data[1] = cpu_to_be32(local_id); + + spin_unlock_irq(&card->lock); + + rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, + irm_id, generation, SCODE_100, + CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, + transaction_data, 8); + + if (rcode == RCODE_GENERATION) + /* Another bus reset, BM work has been rescheduled. */ + goto out; + + bm_id = be32_to_cpu(transaction_data[0]); + + spin_lock_irq(&card->lock); + if (rcode == RCODE_COMPLETE && generation == card->generation) + card->bm_node_id = + bm_id == 0x3f ? local_id : 0xffc0 | bm_id; + spin_unlock_irq(&card->lock); + + if (rcode == RCODE_COMPLETE && bm_id != 0x3f) { + /* Somebody else is BM. Only act as IRM. */ + if (local_id == irm_id) + allocate_broadcast_channel(card, generation); + + goto out; + } + + if (rcode == RCODE_SEND_ERROR) { + /* + * We have been unable to send the lock request due to + * some local problem. Let's try again later and hope + * that the problem has gone away by then. + */ + fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8)); + goto out; + } + + spin_lock_irq(&card->lock); + + if (rcode != RCODE_COMPLETE && !keep_this_irm) { + /* + * The lock request failed, maybe the IRM + * isn't really IRM capable after all. Let's + * do a bus reset and pick the local node as + * root, and thus, IRM. + */ + new_root_id = local_id; + fw_notice(card, "BM lock failed (%s), making local node (%02x) root\n", + fw_rcode_string(rcode), new_root_id); + goto pick_me; + } + } else if (card->bm_generation != generation) { + /* + * We weren't BM in the last generation, and the last + * bus reset is less than 125ms ago. Reschedule this job. + */ + spin_unlock_irq(&card->lock); + fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8)); + goto out; + } + + /* + * We're bus manager for this generation, so next step is to + * make sure we have an active cycle master and do gap count + * optimization. + */ + card->bm_generation = generation; + + if (root_device == NULL) { + /* + * Either link_on is false, or we failed to read the + * config rom. In either case, pick another root. + */ + new_root_id = local_id; + } else if (!root_device_is_running) { + /* + * If we haven't probed this device yet, bail out now + * and let's try again once that's done. + */ + spin_unlock_irq(&card->lock); + goto out; + } else if (root_device_is_cmc) { + /* + * We will send out a force root packet for this + * node as part of the gap count optimization. + */ + new_root_id = root_id; + } else { + /* + * Current root has an active link layer and we + * successfully read the config rom, but it's not + * cycle master capable. + */ + new_root_id = local_id; + } + + pick_me: + /* + * Pick a gap count from 1394a table E-1. The table doesn't cover + * the typically much larger 1394b beta repeater delays though. + */ + if (!card->beta_repeaters_present && + root_node->max_hops < ARRAY_SIZE(gap_count_table)) + gap_count = gap_count_table[root_node->max_hops]; + else + gap_count = 63; + + /* + * Finally, figure out if we should do a reset or not. If we have + * done less than 5 resets with the same physical topology and we + * have either a new root or a new gap count setting, let's do it. + */ + + if (card->bm_retries++ < 5 && + (card->gap_count != gap_count || new_root_id != root_id)) + do_reset = true; + + spin_unlock_irq(&card->lock); + + if (do_reset) { + fw_notice(card, "phy config: new root=%x, gap_count=%d\n", + new_root_id, gap_count); + fw_send_phy_config(card, new_root_id, generation, gap_count); + reset_bus(card, true); + /* Will allocate broadcast channel after the reset. */ + goto out; + } + + if (root_device_is_cmc) { + /* + * Make sure that the cycle master sends cycle start packets. + */ + transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR); + rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST, + root_id, generation, SCODE_100, + CSR_REGISTER_BASE + CSR_STATE_SET, + transaction_data, 4); + if (rcode == RCODE_GENERATION) + goto out; + } + + if (local_id == irm_id) + allocate_broadcast_channel(card, generation); + + out: + fw_node_put(root_node); + out_put_card: + fw_card_put(card); +} + +void fw_card_initialize(struct fw_card *card, + const struct fw_card_driver *driver, + struct device *device) +{ + static atomic_t index = ATOMIC_INIT(-1); + + card->index = atomic_inc_return(&index); + card->driver = driver; + card->device = device; + card->current_tlabel = 0; + card->tlabel_mask = 0; + card->split_timeout_hi = DEFAULT_SPLIT_TIMEOUT / 8000; + card->split_timeout_lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19; + card->split_timeout_cycles = DEFAULT_SPLIT_TIMEOUT; + card->split_timeout_jiffies = + DIV_ROUND_UP(DEFAULT_SPLIT_TIMEOUT * HZ, 8000); + card->color = 0; + card->broadcast_channel = BROADCAST_CHANNEL_INITIAL; + + kref_init(&card->kref); + init_completion(&card->done); + INIT_LIST_HEAD(&card->transaction_list); + INIT_LIST_HEAD(&card->phy_receiver_list); + spin_lock_init(&card->lock); + + card->local_node = NULL; + + INIT_DELAYED_WORK(&card->br_work, br_work); + INIT_DELAYED_WORK(&card->bm_work, bm_work); +} +EXPORT_SYMBOL(fw_card_initialize); + +int fw_card_add(struct fw_card *card, + u32 max_receive, u32 link_speed, u64 guid) +{ + int ret; + + card->max_receive = max_receive; + card->link_speed = link_speed; + card->guid = guid; + + mutex_lock(&card_mutex); + + generate_config_rom(card, tmp_config_rom); + ret = card->driver->enable(card, tmp_config_rom, config_rom_length); + if (ret == 0) + list_add_tail(&card->link, &card_list); + + mutex_unlock(&card_mutex); + + return ret; +} +EXPORT_SYMBOL(fw_card_add); + +/* + * The next few functions implement a dummy driver that is used once a card + * driver shuts down an fw_card. This allows the driver to cleanly unload, + * as all IO to the card will be handled (and failed) by the dummy driver + * instead of calling into the module. Only functions for iso context + * shutdown still need to be provided by the card driver. + * + * .read/write_csr() should never be called anymore after the dummy driver + * was bound since they are only used within request handler context. + * .set_config_rom() is never called since the card is taken out of card_list + * before switching to the dummy driver. + */ + +static int dummy_read_phy_reg(struct fw_card *card, int address) +{ + return -ENODEV; +} + +static int dummy_update_phy_reg(struct fw_card *card, int address, + int clear_bits, int set_bits) +{ + return -ENODEV; +} + +static void dummy_send_request(struct fw_card *card, struct fw_packet *packet) +{ + packet->callback(packet, card, RCODE_CANCELLED); +} + +static void dummy_send_response(struct fw_card *card, struct fw_packet *packet) +{ + packet->callback(packet, card, RCODE_CANCELLED); +} + +static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet) +{ + return -ENOENT; +} + +static int dummy_enable_phys_dma(struct fw_card *card, + int node_id, int generation) +{ + return -ENODEV; +} + +static struct fw_iso_context *dummy_allocate_iso_context(struct fw_card *card, + int type, int channel, size_t header_size) +{ + return ERR_PTR(-ENODEV); +} + +static int dummy_start_iso(struct fw_iso_context *ctx, + s32 cycle, u32 sync, u32 tags) +{ + return -ENODEV; +} + +static int dummy_set_iso_channels(struct fw_iso_context *ctx, u64 *channels) +{ + return -ENODEV; +} + +static int dummy_queue_iso(struct fw_iso_context *ctx, struct fw_iso_packet *p, + struct fw_iso_buffer *buffer, unsigned long payload) +{ + return -ENODEV; +} + +static void dummy_flush_queue_iso(struct fw_iso_context *ctx) +{ +} + +static int dummy_flush_iso_completions(struct fw_iso_context *ctx) +{ + return -ENODEV; +} + +static const struct fw_card_driver dummy_driver_template = { + .read_phy_reg = dummy_read_phy_reg, + .update_phy_reg = dummy_update_phy_reg, + .send_request = dummy_send_request, + .send_response = dummy_send_response, + .cancel_packet = dummy_cancel_packet, + .enable_phys_dma = dummy_enable_phys_dma, + .allocate_iso_context = dummy_allocate_iso_context, + .start_iso = dummy_start_iso, + .set_iso_channels = dummy_set_iso_channels, + .queue_iso = dummy_queue_iso, + .flush_queue_iso = dummy_flush_queue_iso, + .flush_iso_completions = dummy_flush_iso_completions, +}; + +void fw_card_release(struct kref *kref) +{ + struct fw_card *card = container_of(kref, struct fw_card, kref); + + complete(&card->done); +} +EXPORT_SYMBOL_GPL(fw_card_release); + +void fw_core_remove_card(struct fw_card *card) +{ + struct fw_card_driver dummy_driver = dummy_driver_template; + + card->driver->update_phy_reg(card, 4, + PHY_LINK_ACTIVE | PHY_CONTENDER, 0); + fw_schedule_bus_reset(card, false, true); + + mutex_lock(&card_mutex); + list_del_init(&card->link); + mutex_unlock(&card_mutex); + + /* Switch off most of the card driver interface. */ + dummy_driver.free_iso_context = card->driver->free_iso_context; + dummy_driver.stop_iso = card->driver->stop_iso; + card->driver = &dummy_driver; + + fw_destroy_nodes(card); + + /* Wait for all users, especially device workqueue jobs, to finish. */ + fw_card_put(card); + wait_for_completion(&card->done); + + WARN_ON(!list_empty(&card->transaction_list)); +} +EXPORT_SYMBOL(fw_core_remove_card); diff --git a/kernel/drivers/firewire/core-cdev.c b/kernel/drivers/firewire/core-cdev.c new file mode 100644 index 000000000..2a3973a7c --- /dev/null +++ b/kernel/drivers/firewire/core-cdev.c @@ -0,0 +1,1815 @@ +/* + * Char device for device raw access + * + * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/bug.h> +#include <linux/compat.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/errno.h> +#include <linux/firewire.h> +#include <linux/firewire-cdev.h> +#include <linux/idr.h> +#include <linux/irqflags.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/kref.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/poll.h> +#include <linux/sched.h> /* required for linux/wait.h */ +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/time.h> +#include <linux/uaccess.h> +#include <linux/vmalloc.h> +#include <linux/wait.h> +#include <linux/workqueue.h> + + +#include "core.h" + +/* + * ABI version history is documented in linux/firewire-cdev.h. + */ +#define FW_CDEV_KERNEL_VERSION 5 +#define FW_CDEV_VERSION_EVENT_REQUEST2 4 +#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4 +#define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5 + +struct client { + u32 version; + struct fw_device *device; + + spinlock_t lock; + bool in_shutdown; + struct idr resource_idr; + struct list_head event_list; + wait_queue_head_t wait; + wait_queue_head_t tx_flush_wait; + u64 bus_reset_closure; + + struct fw_iso_context *iso_context; + u64 iso_closure; + struct fw_iso_buffer buffer; + unsigned long vm_start; + bool buffer_is_mapped; + + struct list_head phy_receiver_link; + u64 phy_receiver_closure; + + struct list_head link; + struct kref kref; +}; + +static inline void client_get(struct client *client) +{ + kref_get(&client->kref); +} + +static void client_release(struct kref *kref) +{ + struct client *client = container_of(kref, struct client, kref); + + fw_device_put(client->device); + kfree(client); +} + +static void client_put(struct client *client) +{ + kref_put(&client->kref, client_release); +} + +struct client_resource; +typedef void (*client_resource_release_fn_t)(struct client *, + struct client_resource *); +struct client_resource { + client_resource_release_fn_t release; + int handle; +}; + +struct address_handler_resource { + struct client_resource resource; + struct fw_address_handler handler; + __u64 closure; + struct client *client; +}; + +struct outbound_transaction_resource { + struct client_resource resource; + struct fw_transaction transaction; +}; + +struct inbound_transaction_resource { + struct client_resource resource; + struct fw_card *card; + struct fw_request *request; + void *data; + size_t length; +}; + +struct descriptor_resource { + struct client_resource resource; + struct fw_descriptor descriptor; + u32 data[0]; +}; + +struct iso_resource { + struct client_resource resource; + struct client *client; + /* Schedule work and access todo only with client->lock held. */ + struct delayed_work work; + enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC, + ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo; + int generation; + u64 channels; + s32 bandwidth; + struct iso_resource_event *e_alloc, *e_dealloc; +}; + +static void release_iso_resource(struct client *, struct client_resource *); + +static void schedule_iso_resource(struct iso_resource *r, unsigned long delay) +{ + client_get(r->client); + if (!queue_delayed_work(fw_workqueue, &r->work, delay)) + client_put(r->client); +} + +static void schedule_if_iso_resource(struct client_resource *resource) +{ + if (resource->release == release_iso_resource) + schedule_iso_resource(container_of(resource, + struct iso_resource, resource), 0); +} + +/* + * dequeue_event() just kfree()'s the event, so the event has to be + * the first field in a struct XYZ_event. + */ +struct event { + struct { void *data; size_t size; } v[2]; + struct list_head link; +}; + +struct bus_reset_event { + struct event event; + struct fw_cdev_event_bus_reset reset; +}; + +struct outbound_transaction_event { + struct event event; + struct client *client; + struct outbound_transaction_resource r; + struct fw_cdev_event_response response; +}; + +struct inbound_transaction_event { + struct event event; + union { + struct fw_cdev_event_request request; + struct fw_cdev_event_request2 request2; + } req; +}; + +struct iso_interrupt_event { + struct event event; + struct fw_cdev_event_iso_interrupt interrupt; +}; + +struct iso_interrupt_mc_event { + struct event event; + struct fw_cdev_event_iso_interrupt_mc interrupt; +}; + +struct iso_resource_event { + struct event event; + struct fw_cdev_event_iso_resource iso_resource; +}; + +struct outbound_phy_packet_event { + struct event event; + struct client *client; + struct fw_packet p; + struct fw_cdev_event_phy_packet phy_packet; +}; + +struct inbound_phy_packet_event { + struct event event; + struct fw_cdev_event_phy_packet phy_packet; +}; + +#ifdef CONFIG_COMPAT +static void __user *u64_to_uptr(u64 value) +{ + if (is_compat_task()) + return compat_ptr(value); + else + return (void __user *)(unsigned long)value; +} + +static u64 uptr_to_u64(void __user *ptr) +{ + if (is_compat_task()) + return ptr_to_compat(ptr); + else + return (u64)(unsigned long)ptr; +} +#else +static inline void __user *u64_to_uptr(u64 value) +{ + return (void __user *)(unsigned long)value; +} + +static inline u64 uptr_to_u64(void __user *ptr) +{ + return (u64)(unsigned long)ptr; +} +#endif /* CONFIG_COMPAT */ + +static int fw_device_op_open(struct inode *inode, struct file *file) +{ + struct fw_device *device; + struct client *client; + + device = fw_device_get_by_devt(inode->i_rdev); + if (device == NULL) + return -ENODEV; + + if (fw_device_is_shutdown(device)) { + fw_device_put(device); + return -ENODEV; + } + + client = kzalloc(sizeof(*client), GFP_KERNEL); + if (client == NULL) { + fw_device_put(device); + return -ENOMEM; + } + + client->device = device; + spin_lock_init(&client->lock); + idr_init(&client->resource_idr); + INIT_LIST_HEAD(&client->event_list); + init_waitqueue_head(&client->wait); + init_waitqueue_head(&client->tx_flush_wait); + INIT_LIST_HEAD(&client->phy_receiver_link); + INIT_LIST_HEAD(&client->link); + kref_init(&client->kref); + + file->private_data = client; + + return nonseekable_open(inode, file); +} + +static void queue_event(struct client *client, struct event *event, + void *data0, size_t size0, void *data1, size_t size1) +{ + unsigned long flags; + + event->v[0].data = data0; + event->v[0].size = size0; + event->v[1].data = data1; + event->v[1].size = size1; + + spin_lock_irqsave(&client->lock, flags); + if (client->in_shutdown) + kfree(event); + else + list_add_tail(&event->link, &client->event_list); + spin_unlock_irqrestore(&client->lock, flags); + + wake_up_interruptible(&client->wait); +} + +static int dequeue_event(struct client *client, + char __user *buffer, size_t count) +{ + struct event *event; + size_t size, total; + int i, ret; + + ret = wait_event_interruptible(client->wait, + !list_empty(&client->event_list) || + fw_device_is_shutdown(client->device)); + if (ret < 0) + return ret; + + if (list_empty(&client->event_list) && + fw_device_is_shutdown(client->device)) + return -ENODEV; + + spin_lock_irq(&client->lock); + event = list_first_entry(&client->event_list, struct event, link); + list_del(&event->link); + spin_unlock_irq(&client->lock); + + total = 0; + for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { + size = min(event->v[i].size, count - total); + if (copy_to_user(buffer + total, event->v[i].data, size)) { + ret = -EFAULT; + goto out; + } + total += size; + } + ret = total; + + out: + kfree(event); + + return ret; +} + +static ssize_t fw_device_op_read(struct file *file, char __user *buffer, + size_t count, loff_t *offset) +{ + struct client *client = file->private_data; + + return dequeue_event(client, buffer, count); +} + +static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, + struct client *client) +{ + struct fw_card *card = client->device->card; + + spin_lock_irq(&card->lock); + + event->closure = client->bus_reset_closure; + event->type = FW_CDEV_EVENT_BUS_RESET; + event->generation = client->device->generation; + event->node_id = client->device->node_id; + event->local_node_id = card->local_node->node_id; + event->bm_node_id = card->bm_node_id; + event->irm_node_id = card->irm_node->node_id; + event->root_node_id = card->root_node->node_id; + + spin_unlock_irq(&card->lock); +} + +static void for_each_client(struct fw_device *device, + void (*callback)(struct client *client)) +{ + struct client *c; + + mutex_lock(&device->client_list_mutex); + list_for_each_entry(c, &device->client_list, link) + callback(c); + mutex_unlock(&device->client_list_mutex); +} + +static int schedule_reallocations(int id, void *p, void *data) +{ + schedule_if_iso_resource(p); + + return 0; +} + +static void queue_bus_reset_event(struct client *client) +{ + struct bus_reset_event *e; + + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (e == NULL) + return; + + fill_bus_reset_event(&e->reset, client); + + queue_event(client, &e->event, + &e->reset, sizeof(e->reset), NULL, 0); + + spin_lock_irq(&client->lock); + idr_for_each(&client->resource_idr, schedule_reallocations, client); + spin_unlock_irq(&client->lock); +} + +void fw_device_cdev_update(struct fw_device *device) +{ + for_each_client(device, queue_bus_reset_event); +} + +static void wake_up_client(struct client *client) +{ + wake_up_interruptible(&client->wait); +} + +void fw_device_cdev_remove(struct fw_device *device) +{ + for_each_client(device, wake_up_client); +} + +union ioctl_arg { + struct fw_cdev_get_info get_info; + struct fw_cdev_send_request send_request; + struct fw_cdev_allocate allocate; + struct fw_cdev_deallocate deallocate; + struct fw_cdev_send_response send_response; + struct fw_cdev_initiate_bus_reset initiate_bus_reset; + struct fw_cdev_add_descriptor add_descriptor; + struct fw_cdev_remove_descriptor remove_descriptor; + struct fw_cdev_create_iso_context create_iso_context; + struct fw_cdev_queue_iso queue_iso; + struct fw_cdev_start_iso start_iso; + struct fw_cdev_stop_iso stop_iso; + struct fw_cdev_get_cycle_timer get_cycle_timer; + struct fw_cdev_allocate_iso_resource allocate_iso_resource; + struct fw_cdev_send_stream_packet send_stream_packet; + struct fw_cdev_get_cycle_timer2 get_cycle_timer2; + struct fw_cdev_send_phy_packet send_phy_packet; + struct fw_cdev_receive_phy_packets receive_phy_packets; + struct fw_cdev_set_iso_channels set_iso_channels; + struct fw_cdev_flush_iso flush_iso; +}; + +static int ioctl_get_info(struct client *client, union ioctl_arg *arg) +{ + struct fw_cdev_get_info *a = &arg->get_info; + struct fw_cdev_event_bus_reset bus_reset; + unsigned long ret = 0; + + client->version = a->version; + a->version = FW_CDEV_KERNEL_VERSION; + a->card = client->device->card->index; + + down_read(&fw_device_rwsem); + + if (a->rom != 0) { + size_t want = a->rom_length; + size_t have = client->device->config_rom_length * 4; + + ret = copy_to_user(u64_to_uptr(a->rom), + client->device->config_rom, min(want, have)); + } + a->rom_length = client->device->config_rom_length * 4; + + up_read(&fw_device_rwsem); + + if (ret != 0) + return -EFAULT; + + mutex_lock(&client->device->client_list_mutex); + + client->bus_reset_closure = a->bus_reset_closure; + if (a->bus_reset != 0) { + fill_bus_reset_event(&bus_reset, client); + /* unaligned size of bus_reset is 36 bytes */ + ret = copy_to_user(u64_to_uptr(a->bus_reset), &bus_reset, 36); + } + if (ret == 0 && list_empty(&client->link)) + list_add_tail(&client->link, &client->device->client_list); + + mutex_unlock(&client->device->client_list_mutex); + + return ret ? -EFAULT : 0; +} + +static int add_client_resource(struct client *client, + struct client_resource *resource, gfp_t gfp_mask) +{ + bool preload = !!(gfp_mask & __GFP_WAIT); + unsigned long flags; + int ret; + + if (preload) + idr_preload(gfp_mask); + spin_lock_irqsave(&client->lock, flags); + + if (client->in_shutdown) + ret = -ECANCELED; + else + ret = idr_alloc(&client->resource_idr, resource, 0, 0, + GFP_NOWAIT); + if (ret >= 0) { + resource->handle = ret; + client_get(client); + schedule_if_iso_resource(resource); + } + + spin_unlock_irqrestore(&client->lock, flags); + if (preload) + idr_preload_end(); + + return ret < 0 ? ret : 0; +} + +static int release_client_resource(struct client *client, u32 handle, + client_resource_release_fn_t release, + struct client_resource **return_resource) +{ + struct client_resource *resource; + + spin_lock_irq(&client->lock); + if (client->in_shutdown) + resource = NULL; + else + resource = idr_find(&client->resource_idr, handle); + if (resource && resource->release == release) + idr_remove(&client->resource_idr, handle); + spin_unlock_irq(&client->lock); + + if (!(resource && resource->release == release)) + return -EINVAL; + + if (return_resource) + *return_resource = resource; + else + resource->release(client, resource); + + client_put(client); + + return 0; +} + +static void release_transaction(struct client *client, + struct client_resource *resource) +{ +} + +static void complete_transaction(struct fw_card *card, int rcode, + void *payload, size_t length, void *data) +{ + struct outbound_transaction_event *e = data; + struct fw_cdev_event_response *rsp = &e->response; + struct client *client = e->client; + unsigned long flags; + + if (length < rsp->length) + rsp->length = length; + if (rcode == RCODE_COMPLETE) + memcpy(rsp->data, payload, rsp->length); + + spin_lock_irqsave(&client->lock, flags); + idr_remove(&client->resource_idr, e->r.resource.handle); + if (client->in_shutdown) + wake_up(&client->tx_flush_wait); + spin_unlock_irqrestore(&client->lock, flags); + + rsp->type = FW_CDEV_EVENT_RESPONSE; + rsp->rcode = rcode; + + /* + * In the case that sizeof(*rsp) doesn't align with the position of the + * data, and the read is short, preserve an extra copy of the data + * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless + * for short reads and some apps depended on it, this is both safe + * and prudent for compatibility. + */ + if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data)) + queue_event(client, &e->event, rsp, sizeof(*rsp), + rsp->data, rsp->length); + else + queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, + NULL, 0); + + /* Drop the idr's reference */ + client_put(client); +} + +static int init_request(struct client *client, + struct fw_cdev_send_request *request, + int destination_id, int speed) +{ + struct outbound_transaction_event *e; + int ret; + + if (request->tcode != TCODE_STREAM_DATA && + (request->length > 4096 || request->length > 512 << speed)) + return -EIO; + + if (request->tcode == TCODE_WRITE_QUADLET_REQUEST && + request->length < 4) + return -EINVAL; + + e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL); + if (e == NULL) + return -ENOMEM; + + e->client = client; + e->response.length = request->length; + e->response.closure = request->closure; + + if (request->data && + copy_from_user(e->response.data, + u64_to_uptr(request->data), request->length)) { + ret = -EFAULT; + goto failed; + } + + e->r.resource.release = release_transaction; + ret = add_client_resource(client, &e->r.resource, GFP_KERNEL); + if (ret < 0) + goto failed; + + fw_send_request(client->device->card, &e->r.transaction, + request->tcode, destination_id, request->generation, + speed, request->offset, e->response.data, + request->length, complete_transaction, e); + return 0; + + failed: + kfree(e); + + return ret; +} + +static int ioctl_send_request(struct client *client, union ioctl_arg *arg) +{ + switch (arg->send_request.tcode) { + case TCODE_WRITE_QUADLET_REQUEST: + case TCODE_WRITE_BLOCK_REQUEST: + case TCODE_READ_QUADLET_REQUEST: + case TCODE_READ_BLOCK_REQUEST: + case TCODE_LOCK_MASK_SWAP: + case TCODE_LOCK_COMPARE_SWAP: + case TCODE_LOCK_FETCH_ADD: + case TCODE_LOCK_LITTLE_ADD: + case TCODE_LOCK_BOUNDED_ADD: + case TCODE_LOCK_WRAP_ADD: + case TCODE_LOCK_VENDOR_DEPENDENT: + break; + default: + return -EINVAL; + } + + return init_request(client, &arg->send_request, client->device->node_id, + client->device->max_speed); +} + +static inline bool is_fcp_request(struct fw_request *request) +{ + return request == NULL; +} + +static void release_request(struct client *client, + struct client_resource *resource) +{ + struct inbound_transaction_resource *r = container_of(resource, + struct inbound_transaction_resource, resource); + + if (is_fcp_request(r->request)) + kfree(r->data); + else + fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR); + + fw_card_put(r->card); + kfree(r); +} + +static void handle_request(struct fw_card *card, struct fw_request *request, + int tcode, int destination, int source, + int generation, unsigned long long offset, + void *payload, size_t length, void *callback_data) +{ + struct address_handler_resource *handler = callback_data; + struct inbound_transaction_resource *r; + struct inbound_transaction_event *e; + size_t event_size0; + void *fcp_frame = NULL; + int ret; + + /* card may be different from handler->client->device->card */ + fw_card_get(card); + + r = kmalloc(sizeof(*r), GFP_ATOMIC); + e = kmalloc(sizeof(*e), GFP_ATOMIC); + if (r == NULL || e == NULL) + goto failed; + + r->card = card; + r->request = request; + r->data = payload; + r->length = length; + + if (is_fcp_request(request)) { + /* + * FIXME: Let core-transaction.c manage a + * single reference-counted copy? + */ + fcp_frame = kmemdup(payload, length, GFP_ATOMIC); + if (fcp_frame == NULL) + goto failed; + + r->data = fcp_frame; + } + + r->resource.release = release_request; + ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC); + if (ret < 0) + goto failed; + + if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) { + struct fw_cdev_event_request *req = &e->req.request; + + if (tcode & 0x10) + tcode = TCODE_LOCK_REQUEST; + + req->type = FW_CDEV_EVENT_REQUEST; + req->tcode = tcode; + req->offset = offset; + req->length = length; + req->handle = r->resource.handle; + req->closure = handler->closure; + event_size0 = sizeof(*req); + } else { + struct fw_cdev_event_request2 *req = &e->req.request2; + + req->type = FW_CDEV_EVENT_REQUEST2; + req->tcode = tcode; + req->offset = offset; + req->source_node_id = source; + req->destination_node_id = destination; + req->card = card->index; + req->generation = generation; + req->length = length; + req->handle = r->resource.handle; + req->closure = handler->closure; + event_size0 = sizeof(*req); + } + + queue_event(handler->client, &e->event, + &e->req, event_size0, r->data, length); + return; + + failed: + kfree(r); + kfree(e); + kfree(fcp_frame); + + if (!is_fcp_request(request)) + fw_send_response(card, request, RCODE_CONFLICT_ERROR); + + fw_card_put(card); +} + +static void release_address_handler(struct client *client, + struct client_resource *resource) +{ + struct address_handler_resource *r = + container_of(resource, struct address_handler_resource, resource); + + fw_core_remove_address_handler(&r->handler); + kfree(r); +} + +static int ioctl_allocate(struct client *client, union ioctl_arg *arg) +{ + struct fw_cdev_allocate *a = &arg->allocate; + struct address_handler_resource *r; + struct fw_address_region region; + int ret; + + r = kmalloc(sizeof(*r), GFP_KERNEL); + if (r == NULL) + return -ENOMEM; + + region.start = a->offset; + if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END) + region.end = a->offset + a->length; + else + region.end = a->region_end; + + r->handler.length = a->length; + r->handler.address_callback = handle_request; + r->handler.callback_data = r; + r->closure = a->closure; + r->client = client; + + ret = fw_core_add_address_handler(&r->handler, ®ion); + if (ret < 0) { + kfree(r); + return ret; + } + a->offset = r->handler.offset; + + r->resource.release = release_address_handler; + ret = add_client_resource(client, &r->resource, GFP_KERNEL); + if (ret < 0) { + release_address_handler(client, &r->resource); + return ret; + } + a->handle = r->resource.handle; + + return 0; +} + +static int ioctl_deallocate(struct client *client, union ioctl_arg *arg) +{ + return release_client_resource(client, arg->deallocate.handle, + release_address_handler, NULL); +} + +static int ioctl_send_response(struct client *client, union ioctl_arg *arg) +{ + struct fw_cdev_send_response *a = &arg->send_response; + struct client_resource *resource; + struct inbound_transaction_resource *r; + int ret = 0; + + if (release_client_resource(client, a->handle, + release_request, &resource) < 0) + return -EINVAL; + + r = container_of(resource, struct inbound_transaction_resource, + resource); + if (is_fcp_request(r->request)) + goto out; + + if (a->length != fw_get_response_length(r->request)) { + ret = -EINVAL; + kfree(r->request); + goto out; + } + if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) { + ret = -EFAULT; + kfree(r->request); + goto out; + } + fw_send_response(r->card, r->request, a->rcode); + out: + fw_card_put(r->card); + kfree(r); + + return ret; +} + +static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg) +{ + fw_schedule_bus_reset(client->device->card, true, + arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET); + return 0; +} + +static void release_descriptor(struct client *client, + struct client_resource *resource) +{ + struct descriptor_resource *r = + container_of(resource, struct descriptor_resource, resource); + + fw_core_remove_descriptor(&r->descriptor); + kfree(r); +} + +static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg) +{ + struct fw_cdev_add_descriptor *a = &arg->add_descriptor; + struct descriptor_resource *r; + int ret; + + /* Access policy: Allow this ioctl only on local nodes' device files. */ + if (!client->device->is_local) + return -ENOSYS; + + if (a->length > 256) + return -EINVAL; + + r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL); + if (r == NULL) + return -ENOMEM; + + if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) { + ret = -EFAULT; + goto failed; + } + + r->descriptor.length = a->length; + r->descriptor.immediate = a->immediate; + r->descriptor.key = a->key; + r->descriptor.data = r->data; + + ret = fw_core_add_descriptor(&r->descriptor); + if (ret < 0) + goto failed; + + r->resource.release = release_descriptor; + ret = add_client_resource(client, &r->resource, GFP_KERNEL); + if (ret < 0) { + fw_core_remove_descriptor(&r->descriptor); + goto failed; + } + a->handle = r->resource.handle; + + return 0; + failed: + kfree(r); + + return ret; +} + +static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg) +{ + return release_client_resource(client, arg->remove_descriptor.handle, + release_descriptor, NULL); +} + +static void iso_callback(struct fw_iso_context *context, u32 cycle, + size_t header_length, void *header, void *data) +{ + struct client *client = data; + struct iso_interrupt_event *e; + + e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC); + if (e == NULL) + return; + + e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; + e->interrupt.closure = client->iso_closure; + e->interrupt.cycle = cycle; + e->interrupt.header_length = header_length; + memcpy(e->interrupt.header, header, header_length); + queue_event(client, &e->event, &e->interrupt, + sizeof(e->interrupt) + header_length, NULL, 0); +} + +static void iso_mc_callback(struct fw_iso_context *context, + dma_addr_t completed, void *data) +{ + struct client *client = data; + struct iso_interrupt_mc_event *e; + + e = kmalloc(sizeof(*e), GFP_ATOMIC); + if (e == NULL) + return; + + e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL; + e->interrupt.closure = client->iso_closure; + e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer, + completed); + queue_event(client, &e->event, &e->interrupt, + sizeof(e->interrupt), NULL, 0); +} + +static enum dma_data_direction iso_dma_direction(struct fw_iso_context *context) +{ + if (context->type == FW_ISO_CONTEXT_TRANSMIT) + return DMA_TO_DEVICE; + else + return DMA_FROM_DEVICE; +} + +static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) +{ + struct fw_cdev_create_iso_context *a = &arg->create_iso_context; + struct fw_iso_context *context; + fw_iso_callback_t cb; + int ret; + + BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT || + FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE || + FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL != + FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL); + + switch (a->type) { + case FW_ISO_CONTEXT_TRANSMIT: + if (a->speed > SCODE_3200 || a->channel > 63) + return -EINVAL; + + cb = iso_callback; + break; + + case FW_ISO_CONTEXT_RECEIVE: + if (a->header_size < 4 || (a->header_size & 3) || + a->channel > 63) + return -EINVAL; + + cb = iso_callback; + break; + + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + cb = (fw_iso_callback_t)iso_mc_callback; + break; + + default: + return -EINVAL; + } + + context = fw_iso_context_create(client->device->card, a->type, + a->channel, a->speed, a->header_size, cb, client); + if (IS_ERR(context)) + return PTR_ERR(context); + if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW) + context->drop_overflow_headers = true; + + /* We only support one context at this time. */ + spin_lock_irq(&client->lock); + if (client->iso_context != NULL) { + spin_unlock_irq(&client->lock); + fw_iso_context_destroy(context); + + return -EBUSY; + } + if (!client->buffer_is_mapped) { + ret = fw_iso_buffer_map_dma(&client->buffer, + client->device->card, + iso_dma_direction(context)); + if (ret < 0) { + spin_unlock_irq(&client->lock); + fw_iso_context_destroy(context); + + return ret; + } + client->buffer_is_mapped = true; + } + client->iso_closure = a->closure; + client->iso_context = context; + spin_unlock_irq(&client->lock); + + a->handle = 0; + + return 0; +} + +static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg) +{ + struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels; + struct fw_iso_context *ctx = client->iso_context; + + if (ctx == NULL || a->handle != 0) + return -EINVAL; + + return fw_iso_context_set_channels(ctx, &a->channels); +} + +/* Macros for decoding the iso packet control header. */ +#define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff) +#define GET_INTERRUPT(v) (((v) >> 16) & 0x01) +#define GET_SKIP(v) (((v) >> 17) & 0x01) +#define GET_TAG(v) (((v) >> 18) & 0x03) +#define GET_SY(v) (((v) >> 20) & 0x0f) +#define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff) + +static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) +{ + struct fw_cdev_queue_iso *a = &arg->queue_iso; + struct fw_cdev_iso_packet __user *p, *end, *next; + struct fw_iso_context *ctx = client->iso_context; + unsigned long payload, buffer_end, transmit_header_bytes = 0; + u32 control; + int count; + struct { + struct fw_iso_packet packet; + u8 header[256]; + } u; + + if (ctx == NULL || a->handle != 0) + return -EINVAL; + + /* + * If the user passes a non-NULL data pointer, has mmap()'ed + * the iso buffer, and the pointer points inside the buffer, + * we setup the payload pointers accordingly. Otherwise we + * set them both to 0, which will still let packets with + * payload_length == 0 through. In other words, if no packets + * use the indirect payload, the iso buffer need not be mapped + * and the a->data pointer is ignored. + */ + payload = (unsigned long)a->data - client->vm_start; + buffer_end = client->buffer.page_count << PAGE_SHIFT; + if (a->data == 0 || client->buffer.pages == NULL || + payload >= buffer_end) { + payload = 0; + buffer_end = 0; + } + + if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3) + return -EINVAL; + + p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets); + if (!access_ok(VERIFY_READ, p, a->size)) + return -EFAULT; + + end = (void __user *)p + a->size; + count = 0; + while (p < end) { + if (get_user(control, &p->control)) + return -EFAULT; + u.packet.payload_length = GET_PAYLOAD_LENGTH(control); + u.packet.interrupt = GET_INTERRUPT(control); + u.packet.skip = GET_SKIP(control); + u.packet.tag = GET_TAG(control); + u.packet.sy = GET_SY(control); + u.packet.header_length = GET_HEADER_LENGTH(control); + + switch (ctx->type) { + case FW_ISO_CONTEXT_TRANSMIT: + if (u.packet.header_length & 3) + return -EINVAL; + transmit_header_bytes = u.packet.header_length; + break; + + case FW_ISO_CONTEXT_RECEIVE: + if (u.packet.header_length == 0 || + u.packet.header_length % ctx->header_size != 0) + return -EINVAL; + break; + + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + if (u.packet.payload_length == 0 || + u.packet.payload_length & 3) + return -EINVAL; + break; + } + + next = (struct fw_cdev_iso_packet __user *) + &p->header[transmit_header_bytes / 4]; + if (next > end) + return -EINVAL; + if (__copy_from_user + (u.packet.header, p->header, transmit_header_bytes)) + return -EFAULT; + if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT && + u.packet.header_length + u.packet.payload_length > 0) + return -EINVAL; + if (payload + u.packet.payload_length > buffer_end) + return -EINVAL; + + if (fw_iso_context_queue(ctx, &u.packet, + &client->buffer, payload)) + break; + + p = next; + payload += u.packet.payload_length; + count++; + } + fw_iso_context_queue_flush(ctx); + + a->size -= uptr_to_u64(p) - a->packets; + a->packets = uptr_to_u64(p); + a->data = client->vm_start + payload; + + return count; +} + +static int ioctl_start_iso(struct client *client, union ioctl_arg *arg) +{ + struct fw_cdev_start_iso *a = &arg->start_iso; + + BUILD_BUG_ON( + FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 || + FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 || + FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 || + FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 || + FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS); + + if (client->iso_context == NULL || a->handle != 0) + return -EINVAL; + + if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE && + (a->tags == 0 || a->tags > 15 || a->sync > 15)) + return -EINVAL; + + return fw_iso_context_start(client->iso_context, + a->cycle, a->sync, a->tags); +} + +static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg) +{ + struct fw_cdev_stop_iso *a = &arg->stop_iso; + + if (client->iso_context == NULL || a->handle != 0) + return -EINVAL; + + return fw_iso_context_stop(client->iso_context); +} + +static int ioctl_flush_iso(struct client *client, union ioctl_arg *arg) +{ + struct fw_cdev_flush_iso *a = &arg->flush_iso; + + if (client->iso_context == NULL || a->handle != 0) + return -EINVAL; + + return fw_iso_context_flush_completions(client->iso_context); +} + +static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg) +{ + struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2; + struct fw_card *card = client->device->card; + struct timespec ts = {0, 0}; + u32 cycle_time; + int ret = 0; + + local_irq_disable(); + + cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME); + + switch (a->clk_id) { + case CLOCK_REALTIME: getnstimeofday(&ts); break; + case CLOCK_MONOTONIC: ktime_get_ts(&ts); break; + case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts); break; + default: + ret = -EINVAL; + } + + local_irq_enable(); + + a->tv_sec = ts.tv_sec; + a->tv_nsec = ts.tv_nsec; + a->cycle_timer = cycle_time; + + return ret; +} + +static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg) +{ + struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer; + struct fw_cdev_get_cycle_timer2 ct2; + + ct2.clk_id = CLOCK_REALTIME; + ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2); + + a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC; + a->cycle_timer = ct2.cycle_timer; + + return 0; +} + +static void iso_resource_work(struct work_struct *work) +{ + struct iso_resource_event *e; + struct iso_resource *r = + container_of(work, struct iso_resource, work.work); + struct client *client = r->client; + int generation, channel, bandwidth, todo; + bool skip, free, success; + + spin_lock_irq(&client->lock); + generation = client->device->generation; + todo = r->todo; + /* Allow 1000ms grace period for other reallocations. */ + if (todo == ISO_RES_ALLOC && + time_before64(get_jiffies_64(), + client->device->card->reset_jiffies + HZ)) { + schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3)); + skip = true; + } else { + /* We could be called twice within the same generation. */ + skip = todo == ISO_RES_REALLOC && + r->generation == generation; + } + free = todo == ISO_RES_DEALLOC || + todo == ISO_RES_ALLOC_ONCE || + todo == ISO_RES_DEALLOC_ONCE; + r->generation = generation; + spin_unlock_irq(&client->lock); + + if (skip) + goto out; + + bandwidth = r->bandwidth; + + fw_iso_resource_manage(client->device->card, generation, + r->channels, &channel, &bandwidth, + todo == ISO_RES_ALLOC || + todo == ISO_RES_REALLOC || + todo == ISO_RES_ALLOC_ONCE); + /* + * Is this generation outdated already? As long as this resource sticks + * in the idr, it will be scheduled again for a newer generation or at + * shutdown. + */ + if (channel == -EAGAIN && + (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC)) + goto out; + + success = channel >= 0 || bandwidth > 0; + + spin_lock_irq(&client->lock); + /* + * Transit from allocation to reallocation, except if the client + * requested deallocation in the meantime. + */ + if (r->todo == ISO_RES_ALLOC) + r->todo = ISO_RES_REALLOC; + /* + * Allocation or reallocation failure? Pull this resource out of the + * idr and prepare for deletion, unless the client is shutting down. + */ + if (r->todo == ISO_RES_REALLOC && !success && + !client->in_shutdown && + idr_find(&client->resource_idr, r->resource.handle)) { + idr_remove(&client->resource_idr, r->resource.handle); + client_put(client); + free = true; + } + spin_unlock_irq(&client->lock); + + if (todo == ISO_RES_ALLOC && channel >= 0) + r->channels = 1ULL << channel; + + if (todo == ISO_RES_REALLOC && success) + goto out; + + if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) { + e = r->e_alloc; + r->e_alloc = NULL; + } else { + e = r->e_dealloc; + r->e_dealloc = NULL; + } + e->iso_resource.handle = r->resource.handle; + e->iso_resource.channel = channel; + e->iso_resource.bandwidth = bandwidth; + + queue_event(client, &e->event, + &e->iso_resource, sizeof(e->iso_resource), NULL, 0); + + if (free) { + cancel_delayed_work(&r->work); + kfree(r->e_alloc); + kfree(r->e_dealloc); + kfree(r); + } + out: + client_put(client); +} + +static void release_iso_resource(struct client *client, + struct client_resource *resource) +{ + struct iso_resource *r = + container_of(resource, struct iso_resource, resource); + + spin_lock_irq(&client->lock); + r->todo = ISO_RES_DEALLOC; + schedule_iso_resource(r, 0); + spin_unlock_irq(&client->lock); +} + +static int init_iso_resource(struct client *client, + struct fw_cdev_allocate_iso_resource *request, int todo) +{ + struct iso_resource_event *e1, *e2; + struct iso_resource *r; + int ret; + + if ((request->channels == 0 && request->bandwidth == 0) || + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL) + return -EINVAL; + + r = kmalloc(sizeof(*r), GFP_KERNEL); + e1 = kmalloc(sizeof(*e1), GFP_KERNEL); + e2 = kmalloc(sizeof(*e2), GFP_KERNEL); + if (r == NULL || e1 == NULL || e2 == NULL) { + ret = -ENOMEM; + goto fail; + } + + INIT_DELAYED_WORK(&r->work, iso_resource_work); + r->client = client; + r->todo = todo; + r->generation = -1; + r->channels = request->channels; + r->bandwidth = request->bandwidth; + r->e_alloc = e1; + r->e_dealloc = e2; + + e1->iso_resource.closure = request->closure; + e1->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED; + e2->iso_resource.closure = request->closure; + e2->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED; + + if (todo == ISO_RES_ALLOC) { + r->resource.release = release_iso_resource; + ret = add_client_resource(client, &r->resource, GFP_KERNEL); + if (ret < 0) + goto fail; + } else { + r->resource.release = NULL; + r->resource.handle = -1; + schedule_iso_resource(r, 0); + } + request->handle = r->resource.handle; + + return 0; + fail: + kfree(r); + kfree(e1); + kfree(e2); + + return ret; +} + +static int ioctl_allocate_iso_resource(struct client *client, + union ioctl_arg *arg) +{ + return init_iso_resource(client, + &arg->allocate_iso_resource, ISO_RES_ALLOC); +} + +static int ioctl_deallocate_iso_resource(struct client *client, + union ioctl_arg *arg) +{ + return release_client_resource(client, + arg->deallocate.handle, release_iso_resource, NULL); +} + +static int ioctl_allocate_iso_resource_once(struct client *client, + union ioctl_arg *arg) +{ + return init_iso_resource(client, + &arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE); +} + +static int ioctl_deallocate_iso_resource_once(struct client *client, + union ioctl_arg *arg) +{ + return init_iso_resource(client, + &arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE); +} + +/* + * Returns a speed code: Maximum speed to or from this device, + * limited by the device's link speed, the local node's link speed, + * and all PHY port speeds between the two links. + */ +static int ioctl_get_speed(struct client *client, union ioctl_arg *arg) +{ + return client->device->max_speed; +} + +static int ioctl_send_broadcast_request(struct client *client, + union ioctl_arg *arg) +{ + struct fw_cdev_send_request *a = &arg->send_request; + + switch (a->tcode) { + case TCODE_WRITE_QUADLET_REQUEST: + case TCODE_WRITE_BLOCK_REQUEST: + break; + default: + return -EINVAL; + } + + /* Security policy: Only allow accesses to Units Space. */ + if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END) + return -EACCES; + + return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100); +} + +static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg) +{ + struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet; + struct fw_cdev_send_request request; + int dest; + + if (a->speed > client->device->card->link_speed || + a->length > 1024 << a->speed) + return -EIO; + + if (a->tag > 3 || a->channel > 63 || a->sy > 15) + return -EINVAL; + + dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy); + request.tcode = TCODE_STREAM_DATA; + request.length = a->length; + request.closure = a->closure; + request.data = a->data; + request.generation = a->generation; + + return init_request(client, &request, dest, a->speed); +} + +static void outbound_phy_packet_callback(struct fw_packet *packet, + struct fw_card *card, int status) +{ + struct outbound_phy_packet_event *e = + container_of(packet, struct outbound_phy_packet_event, p); + + switch (status) { + /* expected: */ + case ACK_COMPLETE: e->phy_packet.rcode = RCODE_COMPLETE; break; + /* should never happen with PHY packets: */ + case ACK_PENDING: e->phy_packet.rcode = RCODE_COMPLETE; break; + case ACK_BUSY_X: + case ACK_BUSY_A: + case ACK_BUSY_B: e->phy_packet.rcode = RCODE_BUSY; break; + case ACK_DATA_ERROR: e->phy_packet.rcode = RCODE_DATA_ERROR; break; + case ACK_TYPE_ERROR: e->phy_packet.rcode = RCODE_TYPE_ERROR; break; + /* stale generation; cancelled; on certain controllers: no ack */ + default: e->phy_packet.rcode = status; break; + } + e->phy_packet.data[0] = packet->timestamp; + + queue_event(e->client, &e->event, &e->phy_packet, + sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0); + client_put(e->client); +} + +static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg) +{ + struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet; + struct fw_card *card = client->device->card; + struct outbound_phy_packet_event *e; + + /* Access policy: Allow this ioctl only on local nodes' device files. */ + if (!client->device->is_local) + return -ENOSYS; + + e = kzalloc(sizeof(*e) + 4, GFP_KERNEL); + if (e == NULL) + return -ENOMEM; + + client_get(client); + e->client = client; + e->p.speed = SCODE_100; + e->p.generation = a->generation; + e->p.header[0] = TCODE_LINK_INTERNAL << 4; + e->p.header[1] = a->data[0]; + e->p.header[2] = a->data[1]; + e->p.header_length = 12; + e->p.callback = outbound_phy_packet_callback; + e->phy_packet.closure = a->closure; + e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_SENT; + if (is_ping_packet(a->data)) + e->phy_packet.length = 4; + + card->driver->send_request(card, &e->p); + + return 0; +} + +static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg) +{ + struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets; + struct fw_card *card = client->device->card; + + /* Access policy: Allow this ioctl only on local nodes' device files. */ + if (!client->device->is_local) + return -ENOSYS; + + spin_lock_irq(&card->lock); + + list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list); + client->phy_receiver_closure = a->closure; + + spin_unlock_irq(&card->lock); + + return 0; +} + +void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p) +{ + struct client *client; + struct inbound_phy_packet_event *e; + unsigned long flags; + + spin_lock_irqsave(&card->lock, flags); + + list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) { + e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC); + if (e == NULL) + break; + + e->phy_packet.closure = client->phy_receiver_closure; + e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED; + e->phy_packet.rcode = RCODE_COMPLETE; + e->phy_packet.length = 8; + e->phy_packet.data[0] = p->header[1]; + e->phy_packet.data[1] = p->header[2]; + queue_event(client, &e->event, + &e->phy_packet, sizeof(e->phy_packet) + 8, NULL, 0); + } + + spin_unlock_irqrestore(&card->lock, flags); +} + +static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = { + [0x00] = ioctl_get_info, + [0x01] = ioctl_send_request, + [0x02] = ioctl_allocate, + [0x03] = ioctl_deallocate, + [0x04] = ioctl_send_response, + [0x05] = ioctl_initiate_bus_reset, + [0x06] = ioctl_add_descriptor, + [0x07] = ioctl_remove_descriptor, + [0x08] = ioctl_create_iso_context, + [0x09] = ioctl_queue_iso, + [0x0a] = ioctl_start_iso, + [0x0b] = ioctl_stop_iso, + [0x0c] = ioctl_get_cycle_timer, + [0x0d] = ioctl_allocate_iso_resource, + [0x0e] = ioctl_deallocate_iso_resource, + [0x0f] = ioctl_allocate_iso_resource_once, + [0x10] = ioctl_deallocate_iso_resource_once, + [0x11] = ioctl_get_speed, + [0x12] = ioctl_send_broadcast_request, + [0x13] = ioctl_send_stream_packet, + [0x14] = ioctl_get_cycle_timer2, + [0x15] = ioctl_send_phy_packet, + [0x16] = ioctl_receive_phy_packets, + [0x17] = ioctl_set_iso_channels, + [0x18] = ioctl_flush_iso, +}; + +static int dispatch_ioctl(struct client *client, + unsigned int cmd, void __user *arg) +{ + union ioctl_arg buffer; + int ret; + + if (fw_device_is_shutdown(client->device)) + return -ENODEV; + + if (_IOC_TYPE(cmd) != '#' || + _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) || + _IOC_SIZE(cmd) > sizeof(buffer)) + return -ENOTTY; + + memset(&buffer, 0, sizeof(buffer)); + + if (_IOC_DIR(cmd) & _IOC_WRITE) + if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd))) + return -EFAULT; + + ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer); + if (ret < 0) + return ret; + + if (_IOC_DIR(cmd) & _IOC_READ) + if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd))) + return -EFAULT; + + return ret; +} + +static long fw_device_op_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + return dispatch_ioctl(file->private_data, cmd, (void __user *)arg); +} + +#ifdef CONFIG_COMPAT +static long fw_device_op_compat_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + return dispatch_ioctl(file->private_data, cmd, compat_ptr(arg)); +} +#endif + +static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct client *client = file->private_data; + unsigned long size; + int page_count, ret; + + if (fw_device_is_shutdown(client->device)) + return -ENODEV; + + /* FIXME: We could support multiple buffers, but we don't. */ + if (client->buffer.pages != NULL) + return -EBUSY; + + if (!(vma->vm_flags & VM_SHARED)) + return -EINVAL; + + if (vma->vm_start & ~PAGE_MASK) + return -EINVAL; + + client->vm_start = vma->vm_start; + size = vma->vm_end - vma->vm_start; + page_count = size >> PAGE_SHIFT; + if (size & ~PAGE_MASK) + return -EINVAL; + + ret = fw_iso_buffer_alloc(&client->buffer, page_count); + if (ret < 0) + return ret; + + spin_lock_irq(&client->lock); + if (client->iso_context) { + ret = fw_iso_buffer_map_dma(&client->buffer, + client->device->card, + iso_dma_direction(client->iso_context)); + client->buffer_is_mapped = (ret == 0); + } + spin_unlock_irq(&client->lock); + if (ret < 0) + goto fail; + + ret = fw_iso_buffer_map_vma(&client->buffer, vma); + if (ret < 0) + goto fail; + + return 0; + fail: + fw_iso_buffer_destroy(&client->buffer, client->device->card); + return ret; +} + +static int is_outbound_transaction_resource(int id, void *p, void *data) +{ + struct client_resource *resource = p; + + return resource->release == release_transaction; +} + +static int has_outbound_transactions(struct client *client) +{ + int ret; + + spin_lock_irq(&client->lock); + ret = idr_for_each(&client->resource_idr, + is_outbound_transaction_resource, NULL); + spin_unlock_irq(&client->lock); + + return ret; +} + +static int shutdown_resource(int id, void *p, void *data) +{ + struct client_resource *resource = p; + struct client *client = data; + + resource->release(client, resource); + client_put(client); + + return 0; +} + +static int fw_device_op_release(struct inode *inode, struct file *file) +{ + struct client *client = file->private_data; + struct event *event, *next_event; + + spin_lock_irq(&client->device->card->lock); + list_del(&client->phy_receiver_link); + spin_unlock_irq(&client->device->card->lock); + + mutex_lock(&client->device->client_list_mutex); + list_del(&client->link); + mutex_unlock(&client->device->client_list_mutex); + + if (client->iso_context) + fw_iso_context_destroy(client->iso_context); + + if (client->buffer.pages) + fw_iso_buffer_destroy(&client->buffer, client->device->card); + + /* Freeze client->resource_idr and client->event_list */ + spin_lock_irq(&client->lock); + client->in_shutdown = true; + spin_unlock_irq(&client->lock); + + wait_event(client->tx_flush_wait, !has_outbound_transactions(client)); + + idr_for_each(&client->resource_idr, shutdown_resource, client); + idr_destroy(&client->resource_idr); + + list_for_each_entry_safe(event, next_event, &client->event_list, link) + kfree(event); + + client_put(client); + + return 0; +} + +static unsigned int fw_device_op_poll(struct file *file, poll_table * pt) +{ + struct client *client = file->private_data; + unsigned int mask = 0; + + poll_wait(file, &client->wait, pt); + + if (fw_device_is_shutdown(client->device)) + mask |= POLLHUP | POLLERR; + if (!list_empty(&client->event_list)) + mask |= POLLIN | POLLRDNORM; + + return mask; +} + +const struct file_operations fw_device_ops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .open = fw_device_op_open, + .read = fw_device_op_read, + .unlocked_ioctl = fw_device_op_ioctl, + .mmap = fw_device_op_mmap, + .release = fw_device_op_release, + .poll = fw_device_op_poll, +#ifdef CONFIG_COMPAT + .compat_ioctl = fw_device_op_compat_ioctl, +#endif +}; diff --git a/kernel/drivers/firewire/core-device.c b/kernel/drivers/firewire/core-device.c new file mode 100644 index 000000000..f9e3aee6a --- /dev/null +++ b/kernel/drivers/firewire/core-device.c @@ -0,0 +1,1328 @@ +/* + * Device probing and sysfs code. + * + * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/bug.h> +#include <linux/ctype.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/firewire.h> +#include <linux/firewire-constants.h> +#include <linux/idr.h> +#include <linux/jiffies.h> +#include <linux/kobject.h> +#include <linux/list.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/random.h> +#include <linux/rwsem.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/workqueue.h> + +#include <linux/atomic.h> +#include <asm/byteorder.h> + +#include "core.h" + +void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p) +{ + ci->p = p + 1; + ci->end = ci->p + (p[0] >> 16); +} +EXPORT_SYMBOL(fw_csr_iterator_init); + +int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value) +{ + *key = *ci->p >> 24; + *value = *ci->p & 0xffffff; + + return ci->p++ < ci->end; +} +EXPORT_SYMBOL(fw_csr_iterator_next); + +static const u32 *search_leaf(const u32 *directory, int search_key) +{ + struct fw_csr_iterator ci; + int last_key = 0, key, value; + + fw_csr_iterator_init(&ci, directory); + while (fw_csr_iterator_next(&ci, &key, &value)) { + if (last_key == search_key && + key == (CSR_DESCRIPTOR | CSR_LEAF)) + return ci.p - 1 + value; + + last_key = key; + } + + return NULL; +} + +static int textual_leaf_to_string(const u32 *block, char *buf, size_t size) +{ + unsigned int quadlets, i; + char c; + + if (!size || !buf) + return -EINVAL; + + quadlets = min(block[0] >> 16, 256U); + if (quadlets < 2) + return -ENODATA; + + if (block[1] != 0 || block[2] != 0) + /* unknown language/character set */ + return -ENODATA; + + block += 3; + quadlets -= 2; + for (i = 0; i < quadlets * 4 && i < size - 1; i++) { + c = block[i / 4] >> (24 - 8 * (i % 4)); + if (c == '\0') + break; + buf[i] = c; + } + buf[i] = '\0'; + + return i; +} + +/** + * fw_csr_string() - reads a string from the configuration ROM + * @directory: e.g. root directory or unit directory + * @key: the key of the preceding directory entry + * @buf: where to put the string + * @size: size of @buf, in bytes + * + * The string is taken from a minimal ASCII text descriptor leaf after + * the immediate entry with @key. The string is zero-terminated. + * An overlong string is silently truncated such that it and the + * zero byte fit into @size. + * + * Returns strlen(buf) or a negative error code. + */ +int fw_csr_string(const u32 *directory, int key, char *buf, size_t size) +{ + const u32 *leaf = search_leaf(directory, key); + if (!leaf) + return -ENOENT; + + return textual_leaf_to_string(leaf, buf, size); +} +EXPORT_SYMBOL(fw_csr_string); + +static void get_ids(const u32 *directory, int *id) +{ + struct fw_csr_iterator ci; + int key, value; + + fw_csr_iterator_init(&ci, directory); + while (fw_csr_iterator_next(&ci, &key, &value)) { + switch (key) { + case CSR_VENDOR: id[0] = value; break; + case CSR_MODEL: id[1] = value; break; + case CSR_SPECIFIER_ID: id[2] = value; break; + case CSR_VERSION: id[3] = value; break; + } + } +} + +static void get_modalias_ids(struct fw_unit *unit, int *id) +{ + get_ids(&fw_parent_device(unit)->config_rom[5], id); + get_ids(unit->directory, id); +} + +static bool match_ids(const struct ieee1394_device_id *id_table, int *id) +{ + int match = 0; + + if (id[0] == id_table->vendor_id) + match |= IEEE1394_MATCH_VENDOR_ID; + if (id[1] == id_table->model_id) + match |= IEEE1394_MATCH_MODEL_ID; + if (id[2] == id_table->specifier_id) + match |= IEEE1394_MATCH_SPECIFIER_ID; + if (id[3] == id_table->version) + match |= IEEE1394_MATCH_VERSION; + + return (match & id_table->match_flags) == id_table->match_flags; +} + +static const struct ieee1394_device_id *unit_match(struct device *dev, + struct device_driver *drv) +{ + const struct ieee1394_device_id *id_table = + container_of(drv, struct fw_driver, driver)->id_table; + int id[] = {0, 0, 0, 0}; + + get_modalias_ids(fw_unit(dev), id); + + for (; id_table->match_flags != 0; id_table++) + if (match_ids(id_table, id)) + return id_table; + + return NULL; +} + +static bool is_fw_unit(struct device *dev); + +static int fw_unit_match(struct device *dev, struct device_driver *drv) +{ + /* We only allow binding to fw_units. */ + return is_fw_unit(dev) && unit_match(dev, drv) != NULL; +} + +static int fw_unit_probe(struct device *dev) +{ + struct fw_driver *driver = + container_of(dev->driver, struct fw_driver, driver); + + return driver->probe(fw_unit(dev), unit_match(dev, dev->driver)); +} + +static int fw_unit_remove(struct device *dev) +{ + struct fw_driver *driver = + container_of(dev->driver, struct fw_driver, driver); + + return driver->remove(fw_unit(dev)), 0; +} + +static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size) +{ + int id[] = {0, 0, 0, 0}; + + get_modalias_ids(unit, id); + + return snprintf(buffer, buffer_size, + "ieee1394:ven%08Xmo%08Xsp%08Xver%08X", + id[0], id[1], id[2], id[3]); +} + +static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct fw_unit *unit = fw_unit(dev); + char modalias[64]; + + get_modalias(unit, modalias, sizeof(modalias)); + + if (add_uevent_var(env, "MODALIAS=%s", modalias)) + return -ENOMEM; + + return 0; +} + +struct bus_type fw_bus_type = { + .name = "firewire", + .match = fw_unit_match, + .probe = fw_unit_probe, + .remove = fw_unit_remove, +}; +EXPORT_SYMBOL(fw_bus_type); + +int fw_device_enable_phys_dma(struct fw_device *device) +{ + int generation = device->generation; + + /* device->node_id, accessed below, must not be older than generation */ + smp_rmb(); + + return device->card->driver->enable_phys_dma(device->card, + device->node_id, + generation); +} +EXPORT_SYMBOL(fw_device_enable_phys_dma); + +struct config_rom_attribute { + struct device_attribute attr; + u32 key; +}; + +static ssize_t show_immediate(struct device *dev, + struct device_attribute *dattr, char *buf) +{ + struct config_rom_attribute *attr = + container_of(dattr, struct config_rom_attribute, attr); + struct fw_csr_iterator ci; + const u32 *dir; + int key, value, ret = -ENOENT; + + down_read(&fw_device_rwsem); + + if (is_fw_unit(dev)) + dir = fw_unit(dev)->directory; + else + dir = fw_device(dev)->config_rom + 5; + + fw_csr_iterator_init(&ci, dir); + while (fw_csr_iterator_next(&ci, &key, &value)) + if (attr->key == key) { + ret = snprintf(buf, buf ? PAGE_SIZE : 0, + "0x%06x\n", value); + break; + } + + up_read(&fw_device_rwsem); + + return ret; +} + +#define IMMEDIATE_ATTR(name, key) \ + { __ATTR(name, S_IRUGO, show_immediate, NULL), key } + +static ssize_t show_text_leaf(struct device *dev, + struct device_attribute *dattr, char *buf) +{ + struct config_rom_attribute *attr = + container_of(dattr, struct config_rom_attribute, attr); + const u32 *dir; + size_t bufsize; + char dummy_buf[2]; + int ret; + + down_read(&fw_device_rwsem); + + if (is_fw_unit(dev)) + dir = fw_unit(dev)->directory; + else + dir = fw_device(dev)->config_rom + 5; + + if (buf) { + bufsize = PAGE_SIZE - 1; + } else { + buf = dummy_buf; + bufsize = 1; + } + + ret = fw_csr_string(dir, attr->key, buf, bufsize); + + if (ret >= 0) { + /* Strip trailing whitespace and add newline. */ + while (ret > 0 && isspace(buf[ret - 1])) + ret--; + strcpy(buf + ret, "\n"); + ret++; + } + + up_read(&fw_device_rwsem); + + return ret; +} + +#define TEXT_LEAF_ATTR(name, key) \ + { __ATTR(name, S_IRUGO, show_text_leaf, NULL), key } + +static struct config_rom_attribute config_rom_attributes[] = { + IMMEDIATE_ATTR(vendor, CSR_VENDOR), + IMMEDIATE_ATTR(hardware_version, CSR_HARDWARE_VERSION), + IMMEDIATE_ATTR(specifier_id, CSR_SPECIFIER_ID), + IMMEDIATE_ATTR(version, CSR_VERSION), + IMMEDIATE_ATTR(model, CSR_MODEL), + TEXT_LEAF_ATTR(vendor_name, CSR_VENDOR), + TEXT_LEAF_ATTR(model_name, CSR_MODEL), + TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION), +}; + +static void init_fw_attribute_group(struct device *dev, + struct device_attribute *attrs, + struct fw_attribute_group *group) +{ + struct device_attribute *attr; + int i, j; + + for (j = 0; attrs[j].attr.name != NULL; j++) + group->attrs[j] = &attrs[j].attr; + + for (i = 0; i < ARRAY_SIZE(config_rom_attributes); i++) { + attr = &config_rom_attributes[i].attr; + if (attr->show(dev, attr, NULL) < 0) + continue; + group->attrs[j++] = &attr->attr; + } + + group->attrs[j] = NULL; + group->groups[0] = &group->group; + group->groups[1] = NULL; + group->group.attrs = group->attrs; + dev->groups = (const struct attribute_group **) group->groups; +} + +static ssize_t modalias_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fw_unit *unit = fw_unit(dev); + int length; + + length = get_modalias(unit, buf, PAGE_SIZE); + strcpy(buf + length, "\n"); + + return length + 1; +} + +static ssize_t rom_index_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fw_device *device = fw_device(dev->parent); + struct fw_unit *unit = fw_unit(dev); + + return snprintf(buf, PAGE_SIZE, "%d\n", + (int)(unit->directory - device->config_rom)); +} + +static struct device_attribute fw_unit_attributes[] = { + __ATTR_RO(modalias), + __ATTR_RO(rom_index), + __ATTR_NULL, +}; + +static ssize_t config_rom_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fw_device *device = fw_device(dev); + size_t length; + + down_read(&fw_device_rwsem); + length = device->config_rom_length * 4; + memcpy(buf, device->config_rom, length); + up_read(&fw_device_rwsem); + + return length; +} + +static ssize_t guid_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fw_device *device = fw_device(dev); + int ret; + + down_read(&fw_device_rwsem); + ret = snprintf(buf, PAGE_SIZE, "0x%08x%08x\n", + device->config_rom[3], device->config_rom[4]); + up_read(&fw_device_rwsem); + + return ret; +} + +static ssize_t is_local_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fw_device *device = fw_device(dev); + + return sprintf(buf, "%u\n", device->is_local); +} + +static int units_sprintf(char *buf, const u32 *directory) +{ + struct fw_csr_iterator ci; + int key, value; + int specifier_id = 0; + int version = 0; + + fw_csr_iterator_init(&ci, directory); + while (fw_csr_iterator_next(&ci, &key, &value)) { + switch (key) { + case CSR_SPECIFIER_ID: + specifier_id = value; + break; + case CSR_VERSION: + version = value; + break; + } + } + + return sprintf(buf, "0x%06x:0x%06x ", specifier_id, version); +} + +static ssize_t units_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fw_device *device = fw_device(dev); + struct fw_csr_iterator ci; + int key, value, i = 0; + + down_read(&fw_device_rwsem); + fw_csr_iterator_init(&ci, &device->config_rom[5]); + while (fw_csr_iterator_next(&ci, &key, &value)) { + if (key != (CSR_UNIT | CSR_DIRECTORY)) + continue; + i += units_sprintf(&buf[i], ci.p + value - 1); + if (i >= PAGE_SIZE - (8 + 1 + 8 + 1)) + break; + } + up_read(&fw_device_rwsem); + + if (i) + buf[i - 1] = '\n'; + + return i; +} + +static struct device_attribute fw_device_attributes[] = { + __ATTR_RO(config_rom), + __ATTR_RO(guid), + __ATTR_RO(is_local), + __ATTR_RO(units), + __ATTR_NULL, +}; + +static int read_rom(struct fw_device *device, + int generation, int index, u32 *data) +{ + u64 offset = (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4; + int i, rcode; + + /* device->node_id, accessed below, must not be older than generation */ + smp_rmb(); + + for (i = 10; i < 100; i += 10) { + rcode = fw_run_transaction(device->card, + TCODE_READ_QUADLET_REQUEST, device->node_id, + generation, device->max_speed, offset, data, 4); + if (rcode != RCODE_BUSY) + break; + msleep(i); + } + be32_to_cpus(data); + + return rcode; +} + +#define MAX_CONFIG_ROM_SIZE 256 + +/* + * Read the bus info block, perform a speed probe, and read all of the rest of + * the config ROM. We do all this with a cached bus generation. If the bus + * generation changes under us, read_config_rom will fail and get retried. + * It's better to start all over in this case because the node from which we + * are reading the ROM may have changed the ROM during the reset. + * Returns either a result code or a negative error code. + */ +static int read_config_rom(struct fw_device *device, int generation) +{ + struct fw_card *card = device->card; + const u32 *old_rom, *new_rom; + u32 *rom, *stack; + u32 sp, key; + int i, end, length, ret; + + rom = kmalloc(sizeof(*rom) * MAX_CONFIG_ROM_SIZE + + sizeof(*stack) * MAX_CONFIG_ROM_SIZE, GFP_KERNEL); + if (rom == NULL) + return -ENOMEM; + + stack = &rom[MAX_CONFIG_ROM_SIZE]; + memset(rom, 0, sizeof(*rom) * MAX_CONFIG_ROM_SIZE); + + device->max_speed = SCODE_100; + + /* First read the bus info block. */ + for (i = 0; i < 5; i++) { + ret = read_rom(device, generation, i, &rom[i]); + if (ret != RCODE_COMPLETE) + goto out; + /* + * As per IEEE1212 7.2, during initialization, devices can + * reply with a 0 for the first quadlet of the config + * rom to indicate that they are booting (for example, + * if the firmware is on the disk of a external + * harddisk). In that case we just fail, and the + * retry mechanism will try again later. + */ + if (i == 0 && rom[i] == 0) { + ret = RCODE_BUSY; + goto out; + } + } + + device->max_speed = device->node->max_speed; + + /* + * Determine the speed of + * - devices with link speed less than PHY speed, + * - devices with 1394b PHY (unless only connected to 1394a PHYs), + * - all devices if there are 1394b repeaters. + * Note, we cannot use the bus info block's link_spd as starting point + * because some buggy firmwares set it lower than necessary and because + * 1394-1995 nodes do not have the field. + */ + if ((rom[2] & 0x7) < device->max_speed || + device->max_speed == SCODE_BETA || + card->beta_repeaters_present) { + u32 dummy; + + /* for S1600 and S3200 */ + if (device->max_speed == SCODE_BETA) + device->max_speed = card->link_speed; + + while (device->max_speed > SCODE_100) { + if (read_rom(device, generation, 0, &dummy) == + RCODE_COMPLETE) + break; + device->max_speed--; + } + } + + /* + * Now parse the config rom. The config rom is a recursive + * directory structure so we parse it using a stack of + * references to the blocks that make up the structure. We + * push a reference to the root directory on the stack to + * start things off. + */ + length = i; + sp = 0; + stack[sp++] = 0xc0000005; + while (sp > 0) { + /* + * Pop the next block reference of the stack. The + * lower 24 bits is the offset into the config rom, + * the upper 8 bits are the type of the reference the + * block. + */ + key = stack[--sp]; + i = key & 0xffffff; + if (WARN_ON(i >= MAX_CONFIG_ROM_SIZE)) { + ret = -ENXIO; + goto out; + } + + /* Read header quadlet for the block to get the length. */ + ret = read_rom(device, generation, i, &rom[i]); + if (ret != RCODE_COMPLETE) + goto out; + end = i + (rom[i] >> 16) + 1; + if (end > MAX_CONFIG_ROM_SIZE) { + /* + * This block extends outside the config ROM which is + * a firmware bug. Ignore this whole block, i.e. + * simply set a fake block length of 0. + */ + fw_err(card, "skipped invalid ROM block %x at %llx\n", + rom[i], + i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM); + rom[i] = 0; + end = i; + } + i++; + + /* + * Now read in the block. If this is a directory + * block, check the entries as we read them to see if + * it references another block, and push it in that case. + */ + for (; i < end; i++) { + ret = read_rom(device, generation, i, &rom[i]); + if (ret != RCODE_COMPLETE) + goto out; + + if ((key >> 30) != 3 || (rom[i] >> 30) < 2) + continue; + /* + * Offset points outside the ROM. May be a firmware + * bug or an Extended ROM entry (IEEE 1212-2001 clause + * 7.7.18). Simply overwrite this pointer here by a + * fake immediate entry so that later iterators over + * the ROM don't have to check offsets all the time. + */ + if (i + (rom[i] & 0xffffff) >= MAX_CONFIG_ROM_SIZE) { + fw_err(card, + "skipped unsupported ROM entry %x at %llx\n", + rom[i], + i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM); + rom[i] = 0; + continue; + } + stack[sp++] = i + rom[i]; + } + if (length < i) + length = i; + } + + old_rom = device->config_rom; + new_rom = kmemdup(rom, length * 4, GFP_KERNEL); + if (new_rom == NULL) { + ret = -ENOMEM; + goto out; + } + + down_write(&fw_device_rwsem); + device->config_rom = new_rom; + device->config_rom_length = length; + up_write(&fw_device_rwsem); + + kfree(old_rom); + ret = RCODE_COMPLETE; + device->max_rec = rom[2] >> 12 & 0xf; + device->cmc = rom[2] >> 30 & 1; + device->irmc = rom[2] >> 31 & 1; + out: + kfree(rom); + + return ret; +} + +static void fw_unit_release(struct device *dev) +{ + struct fw_unit *unit = fw_unit(dev); + + fw_device_put(fw_parent_device(unit)); + kfree(unit); +} + +static struct device_type fw_unit_type = { + .uevent = fw_unit_uevent, + .release = fw_unit_release, +}; + +static bool is_fw_unit(struct device *dev) +{ + return dev->type == &fw_unit_type; +} + +static void create_units(struct fw_device *device) +{ + struct fw_csr_iterator ci; + struct fw_unit *unit; + int key, value, i; + + i = 0; + fw_csr_iterator_init(&ci, &device->config_rom[5]); + while (fw_csr_iterator_next(&ci, &key, &value)) { + if (key != (CSR_UNIT | CSR_DIRECTORY)) + continue; + + /* + * Get the address of the unit directory and try to + * match the drivers id_tables against it. + */ + unit = kzalloc(sizeof(*unit), GFP_KERNEL); + if (unit == NULL) + continue; + + unit->directory = ci.p + value - 1; + unit->device.bus = &fw_bus_type; + unit->device.type = &fw_unit_type; + unit->device.parent = &device->device; + dev_set_name(&unit->device, "%s.%d", dev_name(&device->device), i++); + + BUILD_BUG_ON(ARRAY_SIZE(unit->attribute_group.attrs) < + ARRAY_SIZE(fw_unit_attributes) + + ARRAY_SIZE(config_rom_attributes)); + init_fw_attribute_group(&unit->device, + fw_unit_attributes, + &unit->attribute_group); + + if (device_register(&unit->device) < 0) + goto skip_unit; + + fw_device_get(device); + continue; + + skip_unit: + kfree(unit); + } +} + +static int shutdown_unit(struct device *device, void *data) +{ + device_unregister(device); + + return 0; +} + +/* + * fw_device_rwsem acts as dual purpose mutex: + * - serializes accesses to fw_device_idr, + * - serializes accesses to fw_device.config_rom/.config_rom_length and + * fw_unit.directory, unless those accesses happen at safe occasions + */ +DECLARE_RWSEM(fw_device_rwsem); + +DEFINE_IDR(fw_device_idr); +int fw_cdev_major; + +struct fw_device *fw_device_get_by_devt(dev_t devt) +{ + struct fw_device *device; + + down_read(&fw_device_rwsem); + device = idr_find(&fw_device_idr, MINOR(devt)); + if (device) + fw_device_get(device); + up_read(&fw_device_rwsem); + + return device; +} + +struct workqueue_struct *fw_workqueue; +EXPORT_SYMBOL(fw_workqueue); + +static void fw_schedule_device_work(struct fw_device *device, + unsigned long delay) +{ + queue_delayed_work(fw_workqueue, &device->work, delay); +} + +/* + * These defines control the retry behavior for reading the config + * rom. It shouldn't be necessary to tweak these; if the device + * doesn't respond to a config rom read within 10 seconds, it's not + * going to respond at all. As for the initial delay, a lot of + * devices will be able to respond within half a second after bus + * reset. On the other hand, it's not really worth being more + * aggressive than that, since it scales pretty well; if 10 devices + * are plugged in, they're all getting read within one second. + */ + +#define MAX_RETRIES 10 +#define RETRY_DELAY (3 * HZ) +#define INITIAL_DELAY (HZ / 2) +#define SHUTDOWN_DELAY (2 * HZ) + +static void fw_device_shutdown(struct work_struct *work) +{ + struct fw_device *device = + container_of(work, struct fw_device, work.work); + int minor = MINOR(device->device.devt); + + if (time_before64(get_jiffies_64(), + device->card->reset_jiffies + SHUTDOWN_DELAY) + && !list_empty(&device->card->link)) { + fw_schedule_device_work(device, SHUTDOWN_DELAY); + return; + } + + if (atomic_cmpxchg(&device->state, + FW_DEVICE_GONE, + FW_DEVICE_SHUTDOWN) != FW_DEVICE_GONE) + return; + + fw_device_cdev_remove(device); + device_for_each_child(&device->device, NULL, shutdown_unit); + device_unregister(&device->device); + + down_write(&fw_device_rwsem); + idr_remove(&fw_device_idr, minor); + up_write(&fw_device_rwsem); + + fw_device_put(device); +} + +static void fw_device_release(struct device *dev) +{ + struct fw_device *device = fw_device(dev); + struct fw_card *card = device->card; + unsigned long flags; + + /* + * Take the card lock so we don't set this to NULL while a + * FW_NODE_UPDATED callback is being handled or while the + * bus manager work looks at this node. + */ + spin_lock_irqsave(&card->lock, flags); + device->node->data = NULL; + spin_unlock_irqrestore(&card->lock, flags); + + fw_node_put(device->node); + kfree(device->config_rom); + kfree(device); + fw_card_put(card); +} + +static struct device_type fw_device_type = { + .release = fw_device_release, +}; + +static bool is_fw_device(struct device *dev) +{ + return dev->type == &fw_device_type; +} + +static int update_unit(struct device *dev, void *data) +{ + struct fw_unit *unit = fw_unit(dev); + struct fw_driver *driver = (struct fw_driver *)dev->driver; + + if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) { + device_lock(dev); + driver->update(unit); + device_unlock(dev); + } + + return 0; +} + +static void fw_device_update(struct work_struct *work) +{ + struct fw_device *device = + container_of(work, struct fw_device, work.work); + + fw_device_cdev_update(device); + device_for_each_child(&device->device, NULL, update_unit); +} + +/* + * If a device was pending for deletion because its node went away but its + * bus info block and root directory header matches that of a newly discovered + * device, revive the existing fw_device. + * The newly allocated fw_device becomes obsolete instead. + */ +static int lookup_existing_device(struct device *dev, void *data) +{ + struct fw_device *old = fw_device(dev); + struct fw_device *new = data; + struct fw_card *card = new->card; + int match = 0; + + if (!is_fw_device(dev)) + return 0; + + down_read(&fw_device_rwsem); /* serialize config_rom access */ + spin_lock_irq(&card->lock); /* serialize node access */ + + if (memcmp(old->config_rom, new->config_rom, 6 * 4) == 0 && + atomic_cmpxchg(&old->state, + FW_DEVICE_GONE, + FW_DEVICE_RUNNING) == FW_DEVICE_GONE) { + struct fw_node *current_node = new->node; + struct fw_node *obsolete_node = old->node; + + new->node = obsolete_node; + new->node->data = new; + old->node = current_node; + old->node->data = old; + + old->max_speed = new->max_speed; + old->node_id = current_node->node_id; + smp_wmb(); /* update node_id before generation */ + old->generation = card->generation; + old->config_rom_retries = 0; + fw_notice(card, "rediscovered device %s\n", dev_name(dev)); + + old->workfn = fw_device_update; + fw_schedule_device_work(old, 0); + + if (current_node == card->root_node) + fw_schedule_bm_work(card, 0); + + match = 1; + } + + spin_unlock_irq(&card->lock); + up_read(&fw_device_rwsem); + + return match; +} + +enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, }; + +static void set_broadcast_channel(struct fw_device *device, int generation) +{ + struct fw_card *card = device->card; + __be32 data; + int rcode; + + if (!card->broadcast_channel_allocated) + return; + + /* + * The Broadcast_Channel Valid bit is required by nodes which want to + * transmit on this channel. Such transmissions are practically + * exclusive to IP over 1394 (RFC 2734). IP capable nodes are required + * to be IRM capable and have a max_rec of 8 or more. We use this fact + * to narrow down to which nodes we send Broadcast_Channel updates. + */ + if (!device->irmc || device->max_rec < 8) + return; + + /* + * Some 1394-1995 nodes crash if this 1394a-2000 register is written. + * Perform a read test first. + */ + if (device->bc_implemented == BC_UNKNOWN) { + rcode = fw_run_transaction(card, TCODE_READ_QUADLET_REQUEST, + device->node_id, generation, device->max_speed, + CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL, + &data, 4); + switch (rcode) { + case RCODE_COMPLETE: + if (data & cpu_to_be32(1 << 31)) { + device->bc_implemented = BC_IMPLEMENTED; + break; + } + /* else fall through to case address error */ + case RCODE_ADDRESS_ERROR: + device->bc_implemented = BC_UNIMPLEMENTED; + } + } + + if (device->bc_implemented == BC_IMPLEMENTED) { + data = cpu_to_be32(BROADCAST_CHANNEL_INITIAL | + BROADCAST_CHANNEL_VALID); + fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST, + device->node_id, generation, device->max_speed, + CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL, + &data, 4); + } +} + +int fw_device_set_broadcast_channel(struct device *dev, void *gen) +{ + if (is_fw_device(dev)) + set_broadcast_channel(fw_device(dev), (long)gen); + + return 0; +} + +static void fw_device_init(struct work_struct *work) +{ + struct fw_device *device = + container_of(work, struct fw_device, work.work); + struct fw_card *card = device->card; + struct device *revived_dev; + int minor, ret; + + /* + * All failure paths here set node->data to NULL, so that we + * don't try to do device_for_each_child() on a kfree()'d + * device. + */ + + ret = read_config_rom(device, device->generation); + if (ret != RCODE_COMPLETE) { + if (device->config_rom_retries < MAX_RETRIES && + atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { + device->config_rom_retries++; + fw_schedule_device_work(device, RETRY_DELAY); + } else { + if (device->node->link_on) + fw_notice(card, "giving up on node %x: reading config rom failed: %s\n", + device->node_id, + fw_rcode_string(ret)); + if (device->node == card->root_node) + fw_schedule_bm_work(card, 0); + fw_device_release(&device->device); + } + return; + } + + revived_dev = device_find_child(card->device, + device, lookup_existing_device); + if (revived_dev) { + put_device(revived_dev); + fw_device_release(&device->device); + + return; + } + + device_initialize(&device->device); + + fw_device_get(device); + down_write(&fw_device_rwsem); + minor = idr_alloc(&fw_device_idr, device, 0, 1 << MINORBITS, + GFP_KERNEL); + up_write(&fw_device_rwsem); + + if (minor < 0) + goto error; + + device->device.bus = &fw_bus_type; + device->device.type = &fw_device_type; + device->device.parent = card->device; + device->device.devt = MKDEV(fw_cdev_major, minor); + dev_set_name(&device->device, "fw%d", minor); + + BUILD_BUG_ON(ARRAY_SIZE(device->attribute_group.attrs) < + ARRAY_SIZE(fw_device_attributes) + + ARRAY_SIZE(config_rom_attributes)); + init_fw_attribute_group(&device->device, + fw_device_attributes, + &device->attribute_group); + + if (device_add(&device->device)) { + fw_err(card, "failed to add device\n"); + goto error_with_cdev; + } + + create_units(device); + + /* + * Transition the device to running state. If it got pulled + * out from under us while we did the intialization work, we + * have to shut down the device again here. Normally, though, + * fw_node_event will be responsible for shutting it down when + * necessary. We have to use the atomic cmpxchg here to avoid + * racing with the FW_NODE_DESTROYED case in + * fw_node_event(). + */ + if (atomic_cmpxchg(&device->state, + FW_DEVICE_INITIALIZING, + FW_DEVICE_RUNNING) == FW_DEVICE_GONE) { + device->workfn = fw_device_shutdown; + fw_schedule_device_work(device, SHUTDOWN_DELAY); + } else { + fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n", + dev_name(&device->device), + device->config_rom[3], device->config_rom[4], + 1 << device->max_speed); + device->config_rom_retries = 0; + + set_broadcast_channel(device, device->generation); + + add_device_randomness(&device->config_rom[3], 8); + } + + /* + * Reschedule the IRM work if we just finished reading the + * root node config rom. If this races with a bus reset we + * just end up running the IRM work a couple of extra times - + * pretty harmless. + */ + if (device->node == card->root_node) + fw_schedule_bm_work(card, 0); + + return; + + error_with_cdev: + down_write(&fw_device_rwsem); + idr_remove(&fw_device_idr, minor); + up_write(&fw_device_rwsem); + error: + fw_device_put(device); /* fw_device_idr's reference */ + + put_device(&device->device); /* our reference */ +} + +/* Reread and compare bus info block and header of root directory */ +static int reread_config_rom(struct fw_device *device, int generation, + bool *changed) +{ + u32 q; + int i, rcode; + + for (i = 0; i < 6; i++) { + rcode = read_rom(device, generation, i, &q); + if (rcode != RCODE_COMPLETE) + return rcode; + + if (i == 0 && q == 0) + /* inaccessible (see read_config_rom); retry later */ + return RCODE_BUSY; + + if (q != device->config_rom[i]) { + *changed = true; + return RCODE_COMPLETE; + } + } + + *changed = false; + return RCODE_COMPLETE; +} + +static void fw_device_refresh(struct work_struct *work) +{ + struct fw_device *device = + container_of(work, struct fw_device, work.work); + struct fw_card *card = device->card; + int ret, node_id = device->node_id; + bool changed; + + ret = reread_config_rom(device, device->generation, &changed); + if (ret != RCODE_COMPLETE) + goto failed_config_rom; + + if (!changed) { + if (atomic_cmpxchg(&device->state, + FW_DEVICE_INITIALIZING, + FW_DEVICE_RUNNING) == FW_DEVICE_GONE) + goto gone; + + fw_device_update(work); + device->config_rom_retries = 0; + goto out; + } + + /* + * Something changed. We keep things simple and don't investigate + * further. We just destroy all previous units and create new ones. + */ + device_for_each_child(&device->device, NULL, shutdown_unit); + + ret = read_config_rom(device, device->generation); + if (ret != RCODE_COMPLETE) + goto failed_config_rom; + + fw_device_cdev_update(device); + create_units(device); + + /* Userspace may want to re-read attributes. */ + kobject_uevent(&device->device.kobj, KOBJ_CHANGE); + + if (atomic_cmpxchg(&device->state, + FW_DEVICE_INITIALIZING, + FW_DEVICE_RUNNING) == FW_DEVICE_GONE) + goto gone; + + fw_notice(card, "refreshed device %s\n", dev_name(&device->device)); + device->config_rom_retries = 0; + goto out; + + failed_config_rom: + if (device->config_rom_retries < MAX_RETRIES && + atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { + device->config_rom_retries++; + fw_schedule_device_work(device, RETRY_DELAY); + return; + } + + fw_notice(card, "giving up on refresh of device %s: %s\n", + dev_name(&device->device), fw_rcode_string(ret)); + gone: + atomic_set(&device->state, FW_DEVICE_GONE); + device->workfn = fw_device_shutdown; + fw_schedule_device_work(device, SHUTDOWN_DELAY); + out: + if (node_id == card->root_node->node_id) + fw_schedule_bm_work(card, 0); +} + +static void fw_device_workfn(struct work_struct *work) +{ + struct fw_device *device = container_of(to_delayed_work(work), + struct fw_device, work); + device->workfn(work); +} + +void fw_node_event(struct fw_card *card, struct fw_node *node, int event) +{ + struct fw_device *device; + + switch (event) { + case FW_NODE_CREATED: + /* + * Attempt to scan the node, regardless whether its self ID has + * the L (link active) flag set or not. Some broken devices + * send L=0 but have an up-and-running link; others send L=1 + * without actually having a link. + */ + create: + device = kzalloc(sizeof(*device), GFP_ATOMIC); + if (device == NULL) + break; + + /* + * Do minimal intialization of the device here, the + * rest will happen in fw_device_init(). + * + * Attention: A lot of things, even fw_device_get(), + * cannot be done before fw_device_init() finished! + * You can basically just check device->state and + * schedule work until then, but only while holding + * card->lock. + */ + atomic_set(&device->state, FW_DEVICE_INITIALIZING); + device->card = fw_card_get(card); + device->node = fw_node_get(node); + device->node_id = node->node_id; + device->generation = card->generation; + device->is_local = node == card->local_node; + mutex_init(&device->client_list_mutex); + INIT_LIST_HEAD(&device->client_list); + + /* + * Set the node data to point back to this device so + * FW_NODE_UPDATED callbacks can update the node_id + * and generation for the device. + */ + node->data = device; + + /* + * Many devices are slow to respond after bus resets, + * especially if they are bus powered and go through + * power-up after getting plugged in. We schedule the + * first config rom scan half a second after bus reset. + */ + device->workfn = fw_device_init; + INIT_DELAYED_WORK(&device->work, fw_device_workfn); + fw_schedule_device_work(device, INITIAL_DELAY); + break; + + case FW_NODE_INITIATED_RESET: + case FW_NODE_LINK_ON: + device = node->data; + if (device == NULL) + goto create; + + device->node_id = node->node_id; + smp_wmb(); /* update node_id before generation */ + device->generation = card->generation; + if (atomic_cmpxchg(&device->state, + FW_DEVICE_RUNNING, + FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) { + device->workfn = fw_device_refresh; + fw_schedule_device_work(device, + device->is_local ? 0 : INITIAL_DELAY); + } + break; + + case FW_NODE_UPDATED: + device = node->data; + if (device == NULL) + break; + + device->node_id = node->node_id; + smp_wmb(); /* update node_id before generation */ + device->generation = card->generation; + if (atomic_read(&device->state) == FW_DEVICE_RUNNING) { + device->workfn = fw_device_update; + fw_schedule_device_work(device, 0); + } + break; + + case FW_NODE_DESTROYED: + case FW_NODE_LINK_OFF: + if (!node->data) + break; + + /* + * Destroy the device associated with the node. There + * are two cases here: either the device is fully + * initialized (FW_DEVICE_RUNNING) or we're in the + * process of reading its config rom + * (FW_DEVICE_INITIALIZING). If it is fully + * initialized we can reuse device->work to schedule a + * full fw_device_shutdown(). If not, there's work + * scheduled to read it's config rom, and we just put + * the device in shutdown state to have that code fail + * to create the device. + */ + device = node->data; + if (atomic_xchg(&device->state, + FW_DEVICE_GONE) == FW_DEVICE_RUNNING) { + device->workfn = fw_device_shutdown; + fw_schedule_device_work(device, + list_empty(&card->link) ? 0 : SHUTDOWN_DELAY); + } + break; + } +} diff --git a/kernel/drivers/firewire/core-iso.c b/kernel/drivers/firewire/core-iso.c new file mode 100644 index 000000000..38c0aa60b --- /dev/null +++ b/kernel/drivers/firewire/core-iso.c @@ -0,0 +1,403 @@ +/* + * Isochronous I/O functionality: + * - Isochronous DMA context management + * - Isochronous bus resource management (channels, bandwidth), client side + * + * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/dma-mapping.h> +#include <linux/errno.h> +#include <linux/firewire.h> +#include <linux/firewire-constants.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/vmalloc.h> +#include <linux/export.h> + +#include <asm/byteorder.h> + +#include "core.h" + +/* + * Isochronous DMA context management + */ + +int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count) +{ + int i; + + buffer->page_count = 0; + buffer->page_count_mapped = 0; + buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]), + GFP_KERNEL); + if (buffer->pages == NULL) + return -ENOMEM; + + for (i = 0; i < page_count; i++) { + buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); + if (buffer->pages[i] == NULL) + break; + } + buffer->page_count = i; + if (i < page_count) { + fw_iso_buffer_destroy(buffer, NULL); + return -ENOMEM; + } + + return 0; +} + +int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card, + enum dma_data_direction direction) +{ + dma_addr_t address; + int i; + + buffer->direction = direction; + + for (i = 0; i < buffer->page_count; i++) { + address = dma_map_page(card->device, buffer->pages[i], + 0, PAGE_SIZE, direction); + if (dma_mapping_error(card->device, address)) + break; + + set_page_private(buffer->pages[i], address); + } + buffer->page_count_mapped = i; + if (i < buffer->page_count) + return -ENOMEM; + + return 0; +} + +int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, + int page_count, enum dma_data_direction direction) +{ + int ret; + + ret = fw_iso_buffer_alloc(buffer, page_count); + if (ret < 0) + return ret; + + ret = fw_iso_buffer_map_dma(buffer, card, direction); + if (ret < 0) + fw_iso_buffer_destroy(buffer, card); + + return ret; +} +EXPORT_SYMBOL(fw_iso_buffer_init); + +int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer, + struct vm_area_struct *vma) +{ + unsigned long uaddr; + int i, err; + + uaddr = vma->vm_start; + for (i = 0; i < buffer->page_count; i++) { + err = vm_insert_page(vma, uaddr, buffer->pages[i]); + if (err) + return err; + + uaddr += PAGE_SIZE; + } + + return 0; +} + +void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, + struct fw_card *card) +{ + int i; + dma_addr_t address; + + for (i = 0; i < buffer->page_count_mapped; i++) { + address = page_private(buffer->pages[i]); + dma_unmap_page(card->device, address, + PAGE_SIZE, buffer->direction); + } + for (i = 0; i < buffer->page_count; i++) + __free_page(buffer->pages[i]); + + kfree(buffer->pages); + buffer->pages = NULL; + buffer->page_count = 0; + buffer->page_count_mapped = 0; +} +EXPORT_SYMBOL(fw_iso_buffer_destroy); + +/* Convert DMA address to offset into virtually contiguous buffer. */ +size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed) +{ + size_t i; + dma_addr_t address; + ssize_t offset; + + for (i = 0; i < buffer->page_count; i++) { + address = page_private(buffer->pages[i]); + offset = (ssize_t)completed - (ssize_t)address; + if (offset > 0 && offset <= PAGE_SIZE) + return (i << PAGE_SHIFT) + offset; + } + + return 0; +} + +struct fw_iso_context *fw_iso_context_create(struct fw_card *card, + int type, int channel, int speed, size_t header_size, + fw_iso_callback_t callback, void *callback_data) +{ + struct fw_iso_context *ctx; + + ctx = card->driver->allocate_iso_context(card, + type, channel, header_size); + if (IS_ERR(ctx)) + return ctx; + + ctx->card = card; + ctx->type = type; + ctx->channel = channel; + ctx->speed = speed; + ctx->header_size = header_size; + ctx->callback.sc = callback; + ctx->callback_data = callback_data; + + return ctx; +} +EXPORT_SYMBOL(fw_iso_context_create); + +void fw_iso_context_destroy(struct fw_iso_context *ctx) +{ + ctx->card->driver->free_iso_context(ctx); +} +EXPORT_SYMBOL(fw_iso_context_destroy); + +int fw_iso_context_start(struct fw_iso_context *ctx, + int cycle, int sync, int tags) +{ + return ctx->card->driver->start_iso(ctx, cycle, sync, tags); +} +EXPORT_SYMBOL(fw_iso_context_start); + +int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels) +{ + return ctx->card->driver->set_iso_channels(ctx, channels); +} + +int fw_iso_context_queue(struct fw_iso_context *ctx, + struct fw_iso_packet *packet, + struct fw_iso_buffer *buffer, + unsigned long payload) +{ + return ctx->card->driver->queue_iso(ctx, packet, buffer, payload); +} +EXPORT_SYMBOL(fw_iso_context_queue); + +void fw_iso_context_queue_flush(struct fw_iso_context *ctx) +{ + ctx->card->driver->flush_queue_iso(ctx); +} +EXPORT_SYMBOL(fw_iso_context_queue_flush); + +int fw_iso_context_flush_completions(struct fw_iso_context *ctx) +{ + return ctx->card->driver->flush_iso_completions(ctx); +} +EXPORT_SYMBOL(fw_iso_context_flush_completions); + +int fw_iso_context_stop(struct fw_iso_context *ctx) +{ + return ctx->card->driver->stop_iso(ctx); +} +EXPORT_SYMBOL(fw_iso_context_stop); + +/* + * Isochronous bus resource management (channels, bandwidth), client side + */ + +static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, + int bandwidth, bool allocate) +{ + int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0; + __be32 data[2]; + + /* + * On a 1394a IRM with low contention, try < 1 is enough. + * On a 1394-1995 IRM, we need at least try < 2. + * Let's just do try < 5. + */ + for (try = 0; try < 5; try++) { + new = allocate ? old - bandwidth : old + bandwidth; + if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL) + return -EBUSY; + + data[0] = cpu_to_be32(old); + data[1] = cpu_to_be32(new); + switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, + irm_id, generation, SCODE_100, + CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE, + data, 8)) { + case RCODE_GENERATION: + /* A generation change frees all bandwidth. */ + return allocate ? -EAGAIN : bandwidth; + + case RCODE_COMPLETE: + if (be32_to_cpup(data) == old) + return bandwidth; + + old = be32_to_cpup(data); + /* Fall through. */ + } + } + + return -EIO; +} + +static int manage_channel(struct fw_card *card, int irm_id, int generation, + u32 channels_mask, u64 offset, bool allocate) +{ + __be32 bit, all, old; + __be32 data[2]; + int channel, ret = -EIO, retry = 5; + + old = all = allocate ? cpu_to_be32(~0) : 0; + + for (channel = 0; channel < 32; channel++) { + if (!(channels_mask & 1 << channel)) + continue; + + ret = -EBUSY; + + bit = cpu_to_be32(1 << (31 - channel)); + if ((old & bit) != (all & bit)) + continue; + + data[0] = old; + data[1] = old ^ bit; + switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, + irm_id, generation, SCODE_100, + offset, data, 8)) { + case RCODE_GENERATION: + /* A generation change frees all channels. */ + return allocate ? -EAGAIN : channel; + + case RCODE_COMPLETE: + if (data[0] == old) + return channel; + + old = data[0]; + + /* Is the IRM 1394a-2000 compliant? */ + if ((data[0] & bit) == (data[1] & bit)) + continue; + + /* 1394-1995 IRM, fall through to retry. */ + default: + if (retry) { + retry--; + channel--; + } else { + ret = -EIO; + } + } + } + + return ret; +} + +static void deallocate_channel(struct fw_card *card, int irm_id, + int generation, int channel) +{ + u32 mask; + u64 offset; + + mask = channel < 32 ? 1 << channel : 1 << (channel - 32); + offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI : + CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO; + + manage_channel(card, irm_id, generation, mask, offset, false); +} + +/** + * fw_iso_resource_manage() - Allocate or deallocate a channel and/or bandwidth + * + * In parameters: card, generation, channels_mask, bandwidth, allocate + * Out parameters: channel, bandwidth + * This function blocks (sleeps) during communication with the IRM. + * + * Allocates or deallocates at most one channel out of channels_mask. + * channels_mask is a bitfield with MSB for channel 63 and LSB for channel 0. + * (Note, the IRM's CHANNELS_AVAILABLE is a big-endian bitfield with MSB for + * channel 0 and LSB for channel 63.) + * Allocates or deallocates as many bandwidth allocation units as specified. + * + * Returns channel < 0 if no channel was allocated or deallocated. + * Returns bandwidth = 0 if no bandwidth was allocated or deallocated. + * + * If generation is stale, deallocations succeed but allocations fail with + * channel = -EAGAIN. + * + * If channel allocation fails, no bandwidth will be allocated either. + * If bandwidth allocation fails, no channel will be allocated either. + * But deallocations of channel and bandwidth are tried independently + * of each other's success. + */ +void fw_iso_resource_manage(struct fw_card *card, int generation, + u64 channels_mask, int *channel, int *bandwidth, + bool allocate) +{ + u32 channels_hi = channels_mask; /* channels 31...0 */ + u32 channels_lo = channels_mask >> 32; /* channels 63...32 */ + int irm_id, ret, c = -EINVAL; + + spin_lock_irq(&card->lock); + irm_id = card->irm_node->node_id; + spin_unlock_irq(&card->lock); + + if (channels_hi) + c = manage_channel(card, irm_id, generation, channels_hi, + CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, + allocate); + if (channels_lo && c < 0) { + c = manage_channel(card, irm_id, generation, channels_lo, + CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, + allocate); + if (c >= 0) + c += 32; + } + *channel = c; + + if (allocate && channels_mask != 0 && c < 0) + *bandwidth = 0; + + if (*bandwidth == 0) + return; + + ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate); + if (ret < 0) + *bandwidth = 0; + + if (allocate && ret < 0) { + if (c >= 0) + deallocate_channel(card, irm_id, generation, c); + *channel = ret; + } +} +EXPORT_SYMBOL(fw_iso_resource_manage); diff --git a/kernel/drivers/firewire/core-topology.c b/kernel/drivers/firewire/core-topology.c new file mode 100644 index 000000000..0de83508f --- /dev/null +++ b/kernel/drivers/firewire/core-topology.c @@ -0,0 +1,569 @@ +/* + * Incremental bus scan, based on bus topology + * + * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/bug.h> +#include <linux/errno.h> +#include <linux/firewire.h> +#include <linux/firewire-constants.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#include <linux/atomic.h> +#include <asm/byteorder.h> + +#include "core.h" + +#define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f) +#define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01) +#define SELF_ID_LINK_ON(q) (((q) >> 22) & 0x01) +#define SELF_ID_GAP_COUNT(q) (((q) >> 16) & 0x3f) +#define SELF_ID_PHY_SPEED(q) (((q) >> 14) & 0x03) +#define SELF_ID_CONTENDER(q) (((q) >> 11) & 0x01) +#define SELF_ID_PHY_INITIATOR(q) (((q) >> 1) & 0x01) +#define SELF_ID_MORE_PACKETS(q) (((q) >> 0) & 0x01) + +#define SELF_ID_EXT_SEQUENCE(q) (((q) >> 20) & 0x07) + +#define SELFID_PORT_CHILD 0x3 +#define SELFID_PORT_PARENT 0x2 +#define SELFID_PORT_NCONN 0x1 +#define SELFID_PORT_NONE 0x0 + +static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count) +{ + u32 q; + int port_type, shift, seq; + + *total_port_count = 0; + *child_port_count = 0; + + shift = 6; + q = *sid; + seq = 0; + + while (1) { + port_type = (q >> shift) & 0x03; + switch (port_type) { + case SELFID_PORT_CHILD: + (*child_port_count)++; + case SELFID_PORT_PARENT: + case SELFID_PORT_NCONN: + (*total_port_count)++; + case SELFID_PORT_NONE: + break; + } + + shift -= 2; + if (shift == 0) { + if (!SELF_ID_MORE_PACKETS(q)) + return sid + 1; + + shift = 16; + sid++; + q = *sid; + + /* + * Check that the extra packets actually are + * extended self ID packets and that the + * sequence numbers in the extended self ID + * packets increase as expected. + */ + + if (!SELF_ID_EXTENDED(q) || + seq != SELF_ID_EXT_SEQUENCE(q)) + return NULL; + + seq++; + } + } +} + +static int get_port_type(u32 *sid, int port_index) +{ + int index, shift; + + index = (port_index + 5) / 8; + shift = 16 - ((port_index + 5) & 7) * 2; + return (sid[index] >> shift) & 0x03; +} + +static struct fw_node *fw_node_create(u32 sid, int port_count, int color) +{ + struct fw_node *node; + + node = kzalloc(sizeof(*node) + port_count * sizeof(node->ports[0]), + GFP_ATOMIC); + if (node == NULL) + return NULL; + + node->color = color; + node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid); + node->link_on = SELF_ID_LINK_ON(sid); + node->phy_speed = SELF_ID_PHY_SPEED(sid); + node->initiated_reset = SELF_ID_PHY_INITIATOR(sid); + node->port_count = port_count; + + atomic_set(&node->ref_count, 1); + INIT_LIST_HEAD(&node->link); + + return node; +} + +/* + * Compute the maximum hop count for this node and it's children. The + * maximum hop count is the maximum number of connections between any + * two nodes in the subtree rooted at this node. We need this for + * setting the gap count. As we build the tree bottom up in + * build_tree() below, this is fairly easy to do: for each node we + * maintain the max hop count and the max depth, ie the number of hops + * to the furthest leaf. Computing the max hop count breaks down into + * two cases: either the path goes through this node, in which case + * the hop count is the sum of the two biggest child depths plus 2. + * Or it could be the case that the max hop path is entirely + * containted in a child tree, in which case the max hop count is just + * the max hop count of this child. + */ +static void update_hop_count(struct fw_node *node) +{ + int depths[2] = { -1, -1 }; + int max_child_hops = 0; + int i; + + for (i = 0; i < node->port_count; i++) { + if (node->ports[i] == NULL) + continue; + + if (node->ports[i]->max_hops > max_child_hops) + max_child_hops = node->ports[i]->max_hops; + + if (node->ports[i]->max_depth > depths[0]) { + depths[1] = depths[0]; + depths[0] = node->ports[i]->max_depth; + } else if (node->ports[i]->max_depth > depths[1]) + depths[1] = node->ports[i]->max_depth; + } + + node->max_depth = depths[0] + 1; + node->max_hops = max(max_child_hops, depths[0] + depths[1] + 2); +} + +static inline struct fw_node *fw_node(struct list_head *l) +{ + return list_entry(l, struct fw_node, link); +} + +/* + * This function builds the tree representation of the topology given + * by the self IDs from the latest bus reset. During the construction + * of the tree, the function checks that the self IDs are valid and + * internally consistent. On success this function returns the + * fw_node corresponding to the local card otherwise NULL. + */ +static struct fw_node *build_tree(struct fw_card *card, + u32 *sid, int self_id_count) +{ + struct fw_node *node, *child, *local_node, *irm_node; + struct list_head stack, *h; + u32 *next_sid, *end, q; + int i, port_count, child_port_count, phy_id, parent_count, stack_depth; + int gap_count; + bool beta_repeaters_present; + + local_node = NULL; + node = NULL; + INIT_LIST_HEAD(&stack); + stack_depth = 0; + end = sid + self_id_count; + phy_id = 0; + irm_node = NULL; + gap_count = SELF_ID_GAP_COUNT(*sid); + beta_repeaters_present = false; + + while (sid < end) { + next_sid = count_ports(sid, &port_count, &child_port_count); + + if (next_sid == NULL) { + fw_err(card, "inconsistent extended self IDs\n"); + return NULL; + } + + q = *sid; + if (phy_id != SELF_ID_PHY_ID(q)) { + fw_err(card, "PHY ID mismatch in self ID: %d != %d\n", + phy_id, SELF_ID_PHY_ID(q)); + return NULL; + } + + if (child_port_count > stack_depth) { + fw_err(card, "topology stack underflow\n"); + return NULL; + } + + /* + * Seek back from the top of our stack to find the + * start of the child nodes for this node. + */ + for (i = 0, h = &stack; i < child_port_count; i++) + h = h->prev; + /* + * When the stack is empty, this yields an invalid value, + * but that pointer will never be dereferenced. + */ + child = fw_node(h); + + node = fw_node_create(q, port_count, card->color); + if (node == NULL) { + fw_err(card, "out of memory while building topology\n"); + return NULL; + } + + if (phy_id == (card->node_id & 0x3f)) + local_node = node; + + if (SELF_ID_CONTENDER(q)) + irm_node = node; + + parent_count = 0; + + for (i = 0; i < port_count; i++) { + switch (get_port_type(sid, i)) { + case SELFID_PORT_PARENT: + /* + * Who's your daddy? We dont know the + * parent node at this time, so we + * temporarily abuse node->color for + * remembering the entry in the + * node->ports array where the parent + * node should be. Later, when we + * handle the parent node, we fix up + * the reference. + */ + parent_count++; + node->color = i; + break; + + case SELFID_PORT_CHILD: + node->ports[i] = child; + /* + * Fix up parent reference for this + * child node. + */ + child->ports[child->color] = node; + child->color = card->color; + child = fw_node(child->link.next); + break; + } + } + + /* + * Check that the node reports exactly one parent + * port, except for the root, which of course should + * have no parents. + */ + if ((next_sid == end && parent_count != 0) || + (next_sid < end && parent_count != 1)) { + fw_err(card, "parent port inconsistency for node %d: " + "parent_count=%d\n", phy_id, parent_count); + return NULL; + } + + /* Pop the child nodes off the stack and push the new node. */ + __list_del(h->prev, &stack); + list_add_tail(&node->link, &stack); + stack_depth += 1 - child_port_count; + + if (node->phy_speed == SCODE_BETA && + parent_count + child_port_count > 1) + beta_repeaters_present = true; + + /* + * If PHYs report different gap counts, set an invalid count + * which will force a gap count reconfiguration and a reset. + */ + if (SELF_ID_GAP_COUNT(q) != gap_count) + gap_count = 0; + + update_hop_count(node); + + sid = next_sid; + phy_id++; + } + + card->root_node = node; + card->irm_node = irm_node; + card->gap_count = gap_count; + card->beta_repeaters_present = beta_repeaters_present; + + return local_node; +} + +typedef void (*fw_node_callback_t)(struct fw_card * card, + struct fw_node * node, + struct fw_node * parent); + +static void for_each_fw_node(struct fw_card *card, struct fw_node *root, + fw_node_callback_t callback) +{ + struct list_head list; + struct fw_node *node, *next, *child, *parent; + int i; + + INIT_LIST_HEAD(&list); + + fw_node_get(root); + list_add_tail(&root->link, &list); + parent = NULL; + list_for_each_entry(node, &list, link) { + node->color = card->color; + + for (i = 0; i < node->port_count; i++) { + child = node->ports[i]; + if (!child) + continue; + if (child->color == card->color) + parent = child; + else { + fw_node_get(child); + list_add_tail(&child->link, &list); + } + } + + callback(card, node, parent); + } + + list_for_each_entry_safe(node, next, &list, link) + fw_node_put(node); +} + +static void report_lost_node(struct fw_card *card, + struct fw_node *node, struct fw_node *parent) +{ + fw_node_event(card, node, FW_NODE_DESTROYED); + fw_node_put(node); + + /* Topology has changed - reset bus manager retry counter */ + card->bm_retries = 0; +} + +static void report_found_node(struct fw_card *card, + struct fw_node *node, struct fw_node *parent) +{ + int b_path = (node->phy_speed == SCODE_BETA); + + if (parent != NULL) { + /* min() macro doesn't work here with gcc 3.4 */ + node->max_speed = parent->max_speed < node->phy_speed ? + parent->max_speed : node->phy_speed; + node->b_path = parent->b_path && b_path; + } else { + node->max_speed = node->phy_speed; + node->b_path = b_path; + } + + fw_node_event(card, node, FW_NODE_CREATED); + + /* Topology has changed - reset bus manager retry counter */ + card->bm_retries = 0; +} + +void fw_destroy_nodes(struct fw_card *card) +{ + unsigned long flags; + + spin_lock_irqsave(&card->lock, flags); + card->color++; + if (card->local_node != NULL) + for_each_fw_node(card, card->local_node, report_lost_node); + card->local_node = NULL; + spin_unlock_irqrestore(&card->lock, flags); +} + +static void move_tree(struct fw_node *node0, struct fw_node *node1, int port) +{ + struct fw_node *tree; + int i; + + tree = node1->ports[port]; + node0->ports[port] = tree; + for (i = 0; i < tree->port_count; i++) { + if (tree->ports[i] == node1) { + tree->ports[i] = node0; + break; + } + } +} + +/* + * Compare the old topology tree for card with the new one specified by root. + * Queue the nodes and mark them as either found, lost or updated. + * Update the nodes in the card topology tree as we go. + */ +static void update_tree(struct fw_card *card, struct fw_node *root) +{ + struct list_head list0, list1; + struct fw_node *node0, *node1, *next1; + int i, event; + + INIT_LIST_HEAD(&list0); + list_add_tail(&card->local_node->link, &list0); + INIT_LIST_HEAD(&list1); + list_add_tail(&root->link, &list1); + + node0 = fw_node(list0.next); + node1 = fw_node(list1.next); + + while (&node0->link != &list0) { + WARN_ON(node0->port_count != node1->port_count); + + if (node0->link_on && !node1->link_on) + event = FW_NODE_LINK_OFF; + else if (!node0->link_on && node1->link_on) + event = FW_NODE_LINK_ON; + else if (node1->initiated_reset && node1->link_on) + event = FW_NODE_INITIATED_RESET; + else + event = FW_NODE_UPDATED; + + node0->node_id = node1->node_id; + node0->color = card->color; + node0->link_on = node1->link_on; + node0->initiated_reset = node1->initiated_reset; + node0->max_hops = node1->max_hops; + node1->color = card->color; + fw_node_event(card, node0, event); + + if (card->root_node == node1) + card->root_node = node0; + if (card->irm_node == node1) + card->irm_node = node0; + + for (i = 0; i < node0->port_count; i++) { + if (node0->ports[i] && node1->ports[i]) { + /* + * This port didn't change, queue the + * connected node for further + * investigation. + */ + if (node0->ports[i]->color == card->color) + continue; + list_add_tail(&node0->ports[i]->link, &list0); + list_add_tail(&node1->ports[i]->link, &list1); + } else if (node0->ports[i]) { + /* + * The nodes connected here were + * unplugged; unref the lost nodes and + * queue FW_NODE_LOST callbacks for + * them. + */ + + for_each_fw_node(card, node0->ports[i], + report_lost_node); + node0->ports[i] = NULL; + } else if (node1->ports[i]) { + /* + * One or more node were connected to + * this port. Move the new nodes into + * the tree and queue FW_NODE_CREATED + * callbacks for them. + */ + move_tree(node0, node1, i); + for_each_fw_node(card, node0->ports[i], + report_found_node); + } + } + + node0 = fw_node(node0->link.next); + next1 = fw_node(node1->link.next); + fw_node_put(node1); + node1 = next1; + } +} + +static void update_topology_map(struct fw_card *card, + u32 *self_ids, int self_id_count) +{ + int node_count = (card->root_node->node_id & 0x3f) + 1; + __be32 *map = card->topology_map; + + *map++ = cpu_to_be32((self_id_count + 2) << 16); + *map++ = cpu_to_be32(be32_to_cpu(card->topology_map[1]) + 1); + *map++ = cpu_to_be32((node_count << 16) | self_id_count); + + while (self_id_count--) + *map++ = cpu_to_be32p(self_ids++); + + fw_compute_block_crc(card->topology_map); +} + +void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, + int self_id_count, u32 *self_ids, bool bm_abdicate) +{ + struct fw_node *local_node; + unsigned long flags; + + /* + * If the selfID buffer is not the immediate successor of the + * previously processed one, we cannot reliably compare the + * old and new topologies. + */ + if (!is_next_generation(generation, card->generation) && + card->local_node != NULL) { + fw_destroy_nodes(card); + card->bm_retries = 0; + } + + spin_lock_irqsave(&card->lock, flags); + + card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated; + card->node_id = node_id; + /* + * Update node_id before generation to prevent anybody from using + * a stale node_id together with a current generation. + */ + smp_wmb(); + card->generation = generation; + card->reset_jiffies = get_jiffies_64(); + card->bm_node_id = 0xffff; + card->bm_abdicate = bm_abdicate; + fw_schedule_bm_work(card, 0); + + local_node = build_tree(card, self_ids, self_id_count); + + update_topology_map(card, self_ids, self_id_count); + + card->color++; + + if (local_node == NULL) { + fw_err(card, "topology build failed\n"); + /* FIXME: We need to issue a bus reset in this case. */ + } else if (card->local_node == NULL) { + card->local_node = local_node; + for_each_fw_node(card, local_node, report_found_node); + } else { + update_tree(card, local_node); + } + + spin_unlock_irqrestore(&card->lock, flags); +} +EXPORT_SYMBOL(fw_core_handle_bus_reset); diff --git a/kernel/drivers/firewire/core-transaction.c b/kernel/drivers/firewire/core-transaction.c new file mode 100644 index 000000000..d6a09b9cd --- /dev/null +++ b/kernel/drivers/firewire/core-transaction.c @@ -0,0 +1,1300 @@ +/* + * Core IEEE1394 transaction logic + * + * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/bug.h> +#include <linux/completion.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/firewire.h> +#include <linux/firewire-constants.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/idr.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/rculist.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/timer.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +#include <asm/byteorder.h> + +#include "core.h" + +#define HEADER_PRI(pri) ((pri) << 0) +#define HEADER_TCODE(tcode) ((tcode) << 4) +#define HEADER_RETRY(retry) ((retry) << 8) +#define HEADER_TLABEL(tlabel) ((tlabel) << 10) +#define HEADER_DESTINATION(destination) ((destination) << 16) +#define HEADER_SOURCE(source) ((source) << 16) +#define HEADER_RCODE(rcode) ((rcode) << 12) +#define HEADER_OFFSET_HIGH(offset_high) ((offset_high) << 0) +#define HEADER_DATA_LENGTH(length) ((length) << 16) +#define HEADER_EXTENDED_TCODE(tcode) ((tcode) << 0) + +#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f) +#define HEADER_GET_TLABEL(q) (((q) >> 10) & 0x3f) +#define HEADER_GET_RCODE(q) (((q) >> 12) & 0x0f) +#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff) +#define HEADER_GET_SOURCE(q) (((q) >> 16) & 0xffff) +#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff) +#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) +#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) + +#define HEADER_DESTINATION_IS_BROADCAST(q) \ + (((q) & HEADER_DESTINATION(0x3f)) == HEADER_DESTINATION(0x3f)) + +#define PHY_PACKET_CONFIG 0x0 +#define PHY_PACKET_LINK_ON 0x1 +#define PHY_PACKET_SELF_ID 0x2 + +#define PHY_CONFIG_GAP_COUNT(gap_count) (((gap_count) << 16) | (1 << 22)) +#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23)) +#define PHY_IDENTIFIER(id) ((id) << 30) + +/* returns 0 if the split timeout handler is already running */ +static int try_cancel_split_timeout(struct fw_transaction *t) +{ + if (t->is_split_transaction) + return del_timer(&t->split_timeout_timer); + else + return 1; +} + +static int close_transaction(struct fw_transaction *transaction, + struct fw_card *card, int rcode) +{ + struct fw_transaction *t; + unsigned long flags; + + spin_lock_irqsave(&card->lock, flags); + list_for_each_entry(t, &card->transaction_list, link) { + if (t == transaction) { + if (!try_cancel_split_timeout(t)) { + spin_unlock_irqrestore(&card->lock, flags); + goto timed_out; + } + list_del_init(&t->link); + card->tlabel_mask &= ~(1ULL << t->tlabel); + break; + } + } + spin_unlock_irqrestore(&card->lock, flags); + + if (&t->link != &card->transaction_list) { + t->callback(card, rcode, NULL, 0, t->callback_data); + return 0; + } + + timed_out: + return -ENOENT; +} + +/* + * Only valid for transactions that are potentially pending (ie have + * been sent). + */ +int fw_cancel_transaction(struct fw_card *card, + struct fw_transaction *transaction) +{ + /* + * Cancel the packet transmission if it's still queued. That + * will call the packet transmission callback which cancels + * the transaction. + */ + + if (card->driver->cancel_packet(card, &transaction->packet) == 0) + return 0; + + /* + * If the request packet has already been sent, we need to see + * if the transaction is still pending and remove it in that case. + */ + + return close_transaction(transaction, card, RCODE_CANCELLED); +} +EXPORT_SYMBOL(fw_cancel_transaction); + +static void split_transaction_timeout_callback(unsigned long data) +{ + struct fw_transaction *t = (struct fw_transaction *)data; + struct fw_card *card = t->card; + unsigned long flags; + + spin_lock_irqsave(&card->lock, flags); + if (list_empty(&t->link)) { + spin_unlock_irqrestore(&card->lock, flags); + return; + } + list_del(&t->link); + card->tlabel_mask &= ~(1ULL << t->tlabel); + spin_unlock_irqrestore(&card->lock, flags); + + t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data); +} + +static void start_split_transaction_timeout(struct fw_transaction *t, + struct fw_card *card) +{ + unsigned long flags; + + spin_lock_irqsave(&card->lock, flags); + + if (list_empty(&t->link) || WARN_ON(t->is_split_transaction)) { + spin_unlock_irqrestore(&card->lock, flags); + return; + } + + t->is_split_transaction = true; + mod_timer(&t->split_timeout_timer, + jiffies + card->split_timeout_jiffies); + + spin_unlock_irqrestore(&card->lock, flags); +} + +static void transmit_complete_callback(struct fw_packet *packet, + struct fw_card *card, int status) +{ + struct fw_transaction *t = + container_of(packet, struct fw_transaction, packet); + + switch (status) { + case ACK_COMPLETE: + close_transaction(t, card, RCODE_COMPLETE); + break; + case ACK_PENDING: + start_split_transaction_timeout(t, card); + break; + case ACK_BUSY_X: + case ACK_BUSY_A: + case ACK_BUSY_B: + close_transaction(t, card, RCODE_BUSY); + break; + case ACK_DATA_ERROR: + close_transaction(t, card, RCODE_DATA_ERROR); + break; + case ACK_TYPE_ERROR: + close_transaction(t, card, RCODE_TYPE_ERROR); + break; + default: + /* + * In this case the ack is really a juju specific + * rcode, so just forward that to the callback. + */ + close_transaction(t, card, status); + break; + } +} + +static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, + int destination_id, int source_id, int generation, int speed, + unsigned long long offset, void *payload, size_t length) +{ + int ext_tcode; + + if (tcode == TCODE_STREAM_DATA) { + packet->header[0] = + HEADER_DATA_LENGTH(length) | + destination_id | + HEADER_TCODE(TCODE_STREAM_DATA); + packet->header_length = 4; + packet->payload = payload; + packet->payload_length = length; + + goto common; + } + + if (tcode > 0x10) { + ext_tcode = tcode & ~0x10; + tcode = TCODE_LOCK_REQUEST; + } else + ext_tcode = 0; + + packet->header[0] = + HEADER_RETRY(RETRY_X) | + HEADER_TLABEL(tlabel) | + HEADER_TCODE(tcode) | + HEADER_DESTINATION(destination_id); + packet->header[1] = + HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id); + packet->header[2] = + offset; + + switch (tcode) { + case TCODE_WRITE_QUADLET_REQUEST: + packet->header[3] = *(u32 *)payload; + packet->header_length = 16; + packet->payload_length = 0; + break; + + case TCODE_LOCK_REQUEST: + case TCODE_WRITE_BLOCK_REQUEST: + packet->header[3] = + HEADER_DATA_LENGTH(length) | + HEADER_EXTENDED_TCODE(ext_tcode); + packet->header_length = 16; + packet->payload = payload; + packet->payload_length = length; + break; + + case TCODE_READ_QUADLET_REQUEST: + packet->header_length = 12; + packet->payload_length = 0; + break; + + case TCODE_READ_BLOCK_REQUEST: + packet->header[3] = + HEADER_DATA_LENGTH(length) | + HEADER_EXTENDED_TCODE(ext_tcode); + packet->header_length = 16; + packet->payload_length = 0; + break; + + default: + WARN(1, "wrong tcode %d\n", tcode); + } + common: + packet->speed = speed; + packet->generation = generation; + packet->ack = 0; + packet->payload_mapped = false; +} + +static int allocate_tlabel(struct fw_card *card) +{ + int tlabel; + + tlabel = card->current_tlabel; + while (card->tlabel_mask & (1ULL << tlabel)) { + tlabel = (tlabel + 1) & 0x3f; + if (tlabel == card->current_tlabel) + return -EBUSY; + } + + card->current_tlabel = (tlabel + 1) & 0x3f; + card->tlabel_mask |= 1ULL << tlabel; + + return tlabel; +} + +/** + * fw_send_request() - submit a request packet for transmission + * @card: interface to send the request at + * @t: transaction instance to which the request belongs + * @tcode: transaction code + * @destination_id: destination node ID, consisting of bus_ID and phy_ID + * @generation: bus generation in which request and response are valid + * @speed: transmission speed + * @offset: 48bit wide offset into destination's address space + * @payload: data payload for the request subaction + * @length: length of the payload, in bytes + * @callback: function to be called when the transaction is completed + * @callback_data: data to be passed to the transaction completion callback + * + * Submit a request packet into the asynchronous request transmission queue. + * Can be called from atomic context. If you prefer a blocking API, use + * fw_run_transaction() in a context that can sleep. + * + * In case of lock requests, specify one of the firewire-core specific %TCODE_ + * constants instead of %TCODE_LOCK_REQUEST in @tcode. + * + * Make sure that the value in @destination_id is not older than the one in + * @generation. Otherwise the request is in danger to be sent to a wrong node. + * + * In case of asynchronous stream packets i.e. %TCODE_STREAM_DATA, the caller + * needs to synthesize @destination_id with fw_stream_packet_destination_id(). + * It will contain tag, channel, and sy data instead of a node ID then. + * + * The payload buffer at @data is going to be DMA-mapped except in case of + * @length <= 8 or of local (loopback) requests. Hence make sure that the + * buffer complies with the restrictions of the streaming DMA mapping API. + * @payload must not be freed before the @callback is called. + * + * In case of request types without payload, @data is NULL and @length is 0. + * + * After the transaction is completed successfully or unsuccessfully, the + * @callback will be called. Among its parameters is the response code which + * is either one of the rcodes per IEEE 1394 or, in case of internal errors, + * the firewire-core specific %RCODE_SEND_ERROR. The other firewire-core + * specific rcodes (%RCODE_CANCELLED, %RCODE_BUSY, %RCODE_GENERATION, + * %RCODE_NO_ACK) denote transaction timeout, busy responder, stale request + * generation, or missing ACK respectively. + * + * Note some timing corner cases: fw_send_request() may complete much earlier + * than when the request packet actually hits the wire. On the other hand, + * transaction completion and hence execution of @callback may happen even + * before fw_send_request() returns. + */ +void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, + int destination_id, int generation, int speed, + unsigned long long offset, void *payload, size_t length, + fw_transaction_callback_t callback, void *callback_data) +{ + unsigned long flags; + int tlabel; + + /* + * Allocate tlabel from the bitmap and put the transaction on + * the list while holding the card spinlock. + */ + + spin_lock_irqsave(&card->lock, flags); + + tlabel = allocate_tlabel(card); + if (tlabel < 0) { + spin_unlock_irqrestore(&card->lock, flags); + callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data); + return; + } + + t->node_id = destination_id; + t->tlabel = tlabel; + t->card = card; + t->is_split_transaction = false; + setup_timer(&t->split_timeout_timer, + split_transaction_timeout_callback, (unsigned long)t); + t->callback = callback; + t->callback_data = callback_data; + + fw_fill_request(&t->packet, tcode, t->tlabel, + destination_id, card->node_id, generation, + speed, offset, payload, length); + t->packet.callback = transmit_complete_callback; + + list_add_tail(&t->link, &card->transaction_list); + + spin_unlock_irqrestore(&card->lock, flags); + + card->driver->send_request(card, &t->packet); +} +EXPORT_SYMBOL(fw_send_request); + +struct transaction_callback_data { + struct completion done; + void *payload; + int rcode; +}; + +static void transaction_callback(struct fw_card *card, int rcode, + void *payload, size_t length, void *data) +{ + struct transaction_callback_data *d = data; + + if (rcode == RCODE_COMPLETE) + memcpy(d->payload, payload, length); + d->rcode = rcode; + complete(&d->done); +} + +/** + * fw_run_transaction() - send request and sleep until transaction is completed + * + * Returns the RCODE. See fw_send_request() for parameter documentation. + * Unlike fw_send_request(), @data points to the payload of the request or/and + * to the payload of the response. DMA mapping restrictions apply to outbound + * request payloads of >= 8 bytes but not to inbound response payloads. + */ +int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, + int generation, int speed, unsigned long long offset, + void *payload, size_t length) +{ + struct transaction_callback_data d; + struct fw_transaction t; + + init_timer_on_stack(&t.split_timeout_timer); + init_completion(&d.done); + d.payload = payload; + fw_send_request(card, &t, tcode, destination_id, generation, speed, + offset, payload, length, transaction_callback, &d); + wait_for_completion(&d.done); + destroy_timer_on_stack(&t.split_timeout_timer); + + return d.rcode; +} +EXPORT_SYMBOL(fw_run_transaction); + +static DEFINE_MUTEX(phy_config_mutex); +static DECLARE_COMPLETION(phy_config_done); + +static void transmit_phy_packet_callback(struct fw_packet *packet, + struct fw_card *card, int status) +{ + complete(&phy_config_done); +} + +static struct fw_packet phy_config_packet = { + .header_length = 12, + .header[0] = TCODE_LINK_INTERNAL << 4, + .payload_length = 0, + .speed = SCODE_100, + .callback = transmit_phy_packet_callback, +}; + +void fw_send_phy_config(struct fw_card *card, + int node_id, int generation, int gap_count) +{ + long timeout = DIV_ROUND_UP(HZ, 10); + u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG); + + if (node_id != FW_PHY_CONFIG_NO_NODE_ID) + data |= PHY_CONFIG_ROOT_ID(node_id); + + if (gap_count == FW_PHY_CONFIG_CURRENT_GAP_COUNT) { + gap_count = card->driver->read_phy_reg(card, 1); + if (gap_count < 0) + return; + + gap_count &= 63; + if (gap_count == 63) + return; + } + data |= PHY_CONFIG_GAP_COUNT(gap_count); + + mutex_lock(&phy_config_mutex); + + phy_config_packet.header[1] = data; + phy_config_packet.header[2] = ~data; + phy_config_packet.generation = generation; + reinit_completion(&phy_config_done); + + card->driver->send_request(card, &phy_config_packet); + wait_for_completion_timeout(&phy_config_done, timeout); + + mutex_unlock(&phy_config_mutex); +} + +static struct fw_address_handler *lookup_overlapping_address_handler( + struct list_head *list, unsigned long long offset, size_t length) +{ + struct fw_address_handler *handler; + + list_for_each_entry_rcu(handler, list, link) { + if (handler->offset < offset + length && + offset < handler->offset + handler->length) + return handler; + } + + return NULL; +} + +static bool is_enclosing_handler(struct fw_address_handler *handler, + unsigned long long offset, size_t length) +{ + return handler->offset <= offset && + offset + length <= handler->offset + handler->length; +} + +static struct fw_address_handler *lookup_enclosing_address_handler( + struct list_head *list, unsigned long long offset, size_t length) +{ + struct fw_address_handler *handler; + + list_for_each_entry_rcu(handler, list, link) { + if (is_enclosing_handler(handler, offset, length)) + return handler; + } + + return NULL; +} + +static DEFINE_SPINLOCK(address_handler_list_lock); +static LIST_HEAD(address_handler_list); + +const struct fw_address_region fw_high_memory_region = + { .start = FW_MAX_PHYSICAL_RANGE, .end = 0xffffe0000000ULL, }; +EXPORT_SYMBOL(fw_high_memory_region); + +static const struct fw_address_region low_memory_region = + { .start = 0x000000000000ULL, .end = FW_MAX_PHYSICAL_RANGE, }; + +#if 0 +const struct fw_address_region fw_private_region = + { .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL, }; +const struct fw_address_region fw_csr_region = + { .start = CSR_REGISTER_BASE, + .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM_END, }; +const struct fw_address_region fw_unit_space_region = + { .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, }; +#endif /* 0 */ + +static bool is_in_fcp_region(u64 offset, size_t length) +{ + return offset >= (CSR_REGISTER_BASE | CSR_FCP_COMMAND) && + offset + length <= (CSR_REGISTER_BASE | CSR_FCP_END); +} + +/** + * fw_core_add_address_handler() - register for incoming requests + * @handler: callback + * @region: region in the IEEE 1212 node space address range + * + * region->start, ->end, and handler->length have to be quadlet-aligned. + * + * When a request is received that falls within the specified address range, + * the specified callback is invoked. The parameters passed to the callback + * give the details of the particular request. + * + * To be called in process context. + * Return value: 0 on success, non-zero otherwise. + * + * The start offset of the handler's address region is determined by + * fw_core_add_address_handler() and is returned in handler->offset. + * + * Address allocations are exclusive, except for the FCP registers. + */ +int fw_core_add_address_handler(struct fw_address_handler *handler, + const struct fw_address_region *region) +{ + struct fw_address_handler *other; + int ret = -EBUSY; + + if (region->start & 0xffff000000000003ULL || + region->start >= region->end || + region->end > 0x0001000000000000ULL || + handler->length & 3 || + handler->length == 0) + return -EINVAL; + + spin_lock(&address_handler_list_lock); + + handler->offset = region->start; + while (handler->offset + handler->length <= region->end) { + if (is_in_fcp_region(handler->offset, handler->length)) + other = NULL; + else + other = lookup_overlapping_address_handler + (&address_handler_list, + handler->offset, handler->length); + if (other != NULL) { + handler->offset += other->length; + } else { + list_add_tail_rcu(&handler->link, &address_handler_list); + ret = 0; + break; + } + } + + spin_unlock(&address_handler_list_lock); + + return ret; +} +EXPORT_SYMBOL(fw_core_add_address_handler); + +/** + * fw_core_remove_address_handler() - unregister an address handler + * + * To be called in process context. + * + * When fw_core_remove_address_handler() returns, @handler->callback() is + * guaranteed to not run on any CPU anymore. + */ +void fw_core_remove_address_handler(struct fw_address_handler *handler) +{ + spin_lock(&address_handler_list_lock); + list_del_rcu(&handler->link); + spin_unlock(&address_handler_list_lock); + synchronize_rcu(); +} +EXPORT_SYMBOL(fw_core_remove_address_handler); + +struct fw_request { + struct fw_packet response; + u32 request_header[4]; + int ack; + u32 length; + u32 data[0]; +}; + +static void free_response_callback(struct fw_packet *packet, + struct fw_card *card, int status) +{ + struct fw_request *request; + + request = container_of(packet, struct fw_request, response); + kfree(request); +} + +int fw_get_response_length(struct fw_request *r) +{ + int tcode, ext_tcode, data_length; + + tcode = HEADER_GET_TCODE(r->request_header[0]); + + switch (tcode) { + case TCODE_WRITE_QUADLET_REQUEST: + case TCODE_WRITE_BLOCK_REQUEST: + return 0; + + case TCODE_READ_QUADLET_REQUEST: + return 4; + + case TCODE_READ_BLOCK_REQUEST: + data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]); + return data_length; + + case TCODE_LOCK_REQUEST: + ext_tcode = HEADER_GET_EXTENDED_TCODE(r->request_header[3]); + data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]); + switch (ext_tcode) { + case EXTCODE_FETCH_ADD: + case EXTCODE_LITTLE_ADD: + return data_length; + default: + return data_length / 2; + } + + default: + WARN(1, "wrong tcode %d\n", tcode); + return 0; + } +} + +void fw_fill_response(struct fw_packet *response, u32 *request_header, + int rcode, void *payload, size_t length) +{ + int tcode, tlabel, extended_tcode, source, destination; + + tcode = HEADER_GET_TCODE(request_header[0]); + tlabel = HEADER_GET_TLABEL(request_header[0]); + source = HEADER_GET_DESTINATION(request_header[0]); + destination = HEADER_GET_SOURCE(request_header[1]); + extended_tcode = HEADER_GET_EXTENDED_TCODE(request_header[3]); + + response->header[0] = + HEADER_RETRY(RETRY_1) | + HEADER_TLABEL(tlabel) | + HEADER_DESTINATION(destination); + response->header[1] = + HEADER_SOURCE(source) | + HEADER_RCODE(rcode); + response->header[2] = 0; + + switch (tcode) { + case TCODE_WRITE_QUADLET_REQUEST: + case TCODE_WRITE_BLOCK_REQUEST: + response->header[0] |= HEADER_TCODE(TCODE_WRITE_RESPONSE); + response->header_length = 12; + response->payload_length = 0; + break; + + case TCODE_READ_QUADLET_REQUEST: + response->header[0] |= + HEADER_TCODE(TCODE_READ_QUADLET_RESPONSE); + if (payload != NULL) + response->header[3] = *(u32 *)payload; + else + response->header[3] = 0; + response->header_length = 16; + response->payload_length = 0; + break; + + case TCODE_READ_BLOCK_REQUEST: + case TCODE_LOCK_REQUEST: + response->header[0] |= HEADER_TCODE(tcode + 2); + response->header[3] = + HEADER_DATA_LENGTH(length) | + HEADER_EXTENDED_TCODE(extended_tcode); + response->header_length = 16; + response->payload = payload; + response->payload_length = length; + break; + + default: + WARN(1, "wrong tcode %d\n", tcode); + } + + response->payload_mapped = false; +} +EXPORT_SYMBOL(fw_fill_response); + +static u32 compute_split_timeout_timestamp(struct fw_card *card, + u32 request_timestamp) +{ + unsigned int cycles; + u32 timestamp; + + cycles = card->split_timeout_cycles; + cycles += request_timestamp & 0x1fff; + + timestamp = request_timestamp & ~0x1fff; + timestamp += (cycles / 8000) << 13; + timestamp |= cycles % 8000; + + return timestamp; +} + +static struct fw_request *allocate_request(struct fw_card *card, + struct fw_packet *p) +{ + struct fw_request *request; + u32 *data, length; + int request_tcode; + + request_tcode = HEADER_GET_TCODE(p->header[0]); + switch (request_tcode) { + case TCODE_WRITE_QUADLET_REQUEST: + data = &p->header[3]; + length = 4; + break; + + case TCODE_WRITE_BLOCK_REQUEST: + case TCODE_LOCK_REQUEST: + data = p->payload; + length = HEADER_GET_DATA_LENGTH(p->header[3]); + break; + + case TCODE_READ_QUADLET_REQUEST: + data = NULL; + length = 4; + break; + + case TCODE_READ_BLOCK_REQUEST: + data = NULL; + length = HEADER_GET_DATA_LENGTH(p->header[3]); + break; + + default: + fw_notice(card, "ERROR - corrupt request received - %08x %08x %08x\n", + p->header[0], p->header[1], p->header[2]); + return NULL; + } + + request = kmalloc(sizeof(*request) + length, GFP_ATOMIC); + if (request == NULL) + return NULL; + + request->response.speed = p->speed; + request->response.timestamp = + compute_split_timeout_timestamp(card, p->timestamp); + request->response.generation = p->generation; + request->response.ack = 0; + request->response.callback = free_response_callback; + request->ack = p->ack; + request->length = length; + if (data) + memcpy(request->data, data, length); + + memcpy(request->request_header, p->header, sizeof(p->header)); + + return request; +} + +void fw_send_response(struct fw_card *card, + struct fw_request *request, int rcode) +{ + if (WARN_ONCE(!request, "invalid for FCP address handlers")) + return; + + /* unified transaction or broadcast transaction: don't respond */ + if (request->ack != ACK_PENDING || + HEADER_DESTINATION_IS_BROADCAST(request->request_header[0])) { + kfree(request); + return; + } + + if (rcode == RCODE_COMPLETE) + fw_fill_response(&request->response, request->request_header, + rcode, request->data, + fw_get_response_length(request)); + else + fw_fill_response(&request->response, request->request_header, + rcode, NULL, 0); + + card->driver->send_response(card, &request->response); +} +EXPORT_SYMBOL(fw_send_response); + +/** + * fw_get_request_speed() - returns speed at which the @request was received + */ +int fw_get_request_speed(struct fw_request *request) +{ + return request->response.speed; +} +EXPORT_SYMBOL(fw_get_request_speed); + +static void handle_exclusive_region_request(struct fw_card *card, + struct fw_packet *p, + struct fw_request *request, + unsigned long long offset) +{ + struct fw_address_handler *handler; + int tcode, destination, source; + + destination = HEADER_GET_DESTINATION(p->header[0]); + source = HEADER_GET_SOURCE(p->header[1]); + tcode = HEADER_GET_TCODE(p->header[0]); + if (tcode == TCODE_LOCK_REQUEST) + tcode = 0x10 + HEADER_GET_EXTENDED_TCODE(p->header[3]); + + rcu_read_lock(); + handler = lookup_enclosing_address_handler(&address_handler_list, + offset, request->length); + if (handler) + handler->address_callback(card, request, + tcode, destination, source, + p->generation, offset, + request->data, request->length, + handler->callback_data); + rcu_read_unlock(); + + if (!handler) + fw_send_response(card, request, RCODE_ADDRESS_ERROR); +} + +static void handle_fcp_region_request(struct fw_card *card, + struct fw_packet *p, + struct fw_request *request, + unsigned long long offset) +{ + struct fw_address_handler *handler; + int tcode, destination, source; + + if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) && + offset != (CSR_REGISTER_BASE | CSR_FCP_RESPONSE)) || + request->length > 0x200) { + fw_send_response(card, request, RCODE_ADDRESS_ERROR); + + return; + } + + tcode = HEADER_GET_TCODE(p->header[0]); + destination = HEADER_GET_DESTINATION(p->header[0]); + source = HEADER_GET_SOURCE(p->header[1]); + + if (tcode != TCODE_WRITE_QUADLET_REQUEST && + tcode != TCODE_WRITE_BLOCK_REQUEST) { + fw_send_response(card, request, RCODE_TYPE_ERROR); + + return; + } + + rcu_read_lock(); + list_for_each_entry_rcu(handler, &address_handler_list, link) { + if (is_enclosing_handler(handler, offset, request->length)) + handler->address_callback(card, NULL, tcode, + destination, source, + p->generation, offset, + request->data, + request->length, + handler->callback_data); + } + rcu_read_unlock(); + + fw_send_response(card, request, RCODE_COMPLETE); +} + +void fw_core_handle_request(struct fw_card *card, struct fw_packet *p) +{ + struct fw_request *request; + unsigned long long offset; + + if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE) + return; + + if (TCODE_IS_LINK_INTERNAL(HEADER_GET_TCODE(p->header[0]))) { + fw_cdev_handle_phy_packet(card, p); + return; + } + + request = allocate_request(card, p); + if (request == NULL) { + /* FIXME: send statically allocated busy packet. */ + return; + } + + offset = ((u64)HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) | + p->header[2]; + + if (!is_in_fcp_region(offset, request->length)) + handle_exclusive_region_request(card, p, request, offset); + else + handle_fcp_region_request(card, p, request, offset); + +} +EXPORT_SYMBOL(fw_core_handle_request); + +void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) +{ + struct fw_transaction *t; + unsigned long flags; + u32 *data; + size_t data_length; + int tcode, tlabel, source, rcode; + + tcode = HEADER_GET_TCODE(p->header[0]); + tlabel = HEADER_GET_TLABEL(p->header[0]); + source = HEADER_GET_SOURCE(p->header[1]); + rcode = HEADER_GET_RCODE(p->header[1]); + + spin_lock_irqsave(&card->lock, flags); + list_for_each_entry(t, &card->transaction_list, link) { + if (t->node_id == source && t->tlabel == tlabel) { + if (!try_cancel_split_timeout(t)) { + spin_unlock_irqrestore(&card->lock, flags); + goto timed_out; + } + list_del_init(&t->link); + card->tlabel_mask &= ~(1ULL << t->tlabel); + break; + } + } + spin_unlock_irqrestore(&card->lock, flags); + + if (&t->link == &card->transaction_list) { + timed_out: + fw_notice(card, "unsolicited response (source %x, tlabel %x)\n", + source, tlabel); + return; + } + + /* + * FIXME: sanity check packet, is length correct, does tcodes + * and addresses match. + */ + + switch (tcode) { + case TCODE_READ_QUADLET_RESPONSE: + data = (u32 *) &p->header[3]; + data_length = 4; + break; + + case TCODE_WRITE_RESPONSE: + data = NULL; + data_length = 0; + break; + + case TCODE_READ_BLOCK_RESPONSE: + case TCODE_LOCK_RESPONSE: + data = p->payload; + data_length = HEADER_GET_DATA_LENGTH(p->header[3]); + break; + + default: + /* Should never happen, this is just to shut up gcc. */ + data = NULL; + data_length = 0; + break; + } + + /* + * The response handler may be executed while the request handler + * is still pending. Cancel the request handler. + */ + card->driver->cancel_packet(card, &t->packet); + + t->callback(card, rcode, data, data_length, t->callback_data); +} +EXPORT_SYMBOL(fw_core_handle_response); + +/** + * fw_rcode_string - convert a firewire result code to an error description + * @rcode: the result code + */ +const char *fw_rcode_string(int rcode) +{ + static const char *const names[] = { + [RCODE_COMPLETE] = "no error", + [RCODE_CONFLICT_ERROR] = "conflict error", + [RCODE_DATA_ERROR] = "data error", + [RCODE_TYPE_ERROR] = "type error", + [RCODE_ADDRESS_ERROR] = "address error", + [RCODE_SEND_ERROR] = "send error", + [RCODE_CANCELLED] = "timeout", + [RCODE_BUSY] = "busy", + [RCODE_GENERATION] = "bus reset", + [RCODE_NO_ACK] = "no ack", + }; + + if ((unsigned int)rcode < ARRAY_SIZE(names) && names[rcode]) + return names[rcode]; + else + return "unknown"; +} +EXPORT_SYMBOL(fw_rcode_string); + +static const struct fw_address_region topology_map_region = + { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP, + .end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, }; + +static void handle_topology_map(struct fw_card *card, struct fw_request *request, + int tcode, int destination, int source, int generation, + unsigned long long offset, void *payload, size_t length, + void *callback_data) +{ + int start; + + if (!TCODE_IS_READ_REQUEST(tcode)) { + fw_send_response(card, request, RCODE_TYPE_ERROR); + return; + } + + if ((offset & 3) > 0 || (length & 3) > 0) { + fw_send_response(card, request, RCODE_ADDRESS_ERROR); + return; + } + + start = (offset - topology_map_region.start) / 4; + memcpy(payload, &card->topology_map[start], length); + + fw_send_response(card, request, RCODE_COMPLETE); +} + +static struct fw_address_handler topology_map = { + .length = 0x400, + .address_callback = handle_topology_map, +}; + +static const struct fw_address_region registers_region = + { .start = CSR_REGISTER_BASE, + .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, }; + +static void update_split_timeout(struct fw_card *card) +{ + unsigned int cycles; + + cycles = card->split_timeout_hi * 8000 + (card->split_timeout_lo >> 19); + + /* minimum per IEEE 1394, maximum which doesn't overflow OHCI */ + cycles = clamp(cycles, 800u, 3u * 8000u); + + card->split_timeout_cycles = cycles; + card->split_timeout_jiffies = DIV_ROUND_UP(cycles * HZ, 8000); +} + +static void handle_registers(struct fw_card *card, struct fw_request *request, + int tcode, int destination, int source, int generation, + unsigned long long offset, void *payload, size_t length, + void *callback_data) +{ + int reg = offset & ~CSR_REGISTER_BASE; + __be32 *data = payload; + int rcode = RCODE_COMPLETE; + unsigned long flags; + + switch (reg) { + case CSR_PRIORITY_BUDGET: + if (!card->priority_budget_implemented) { + rcode = RCODE_ADDRESS_ERROR; + break; + } + /* else fall through */ + + case CSR_NODE_IDS: + /* + * per IEEE 1394-2008 8.3.22.3, not IEEE 1394.1-2004 3.2.8 + * and 9.6, but interoperable with IEEE 1394.1-2004 bridges + */ + /* fall through */ + + case CSR_STATE_CLEAR: + case CSR_STATE_SET: + case CSR_CYCLE_TIME: + case CSR_BUS_TIME: + case CSR_BUSY_TIMEOUT: + if (tcode == TCODE_READ_QUADLET_REQUEST) + *data = cpu_to_be32(card->driver->read_csr(card, reg)); + else if (tcode == TCODE_WRITE_QUADLET_REQUEST) + card->driver->write_csr(card, reg, be32_to_cpu(*data)); + else + rcode = RCODE_TYPE_ERROR; + break; + + case CSR_RESET_START: + if (tcode == TCODE_WRITE_QUADLET_REQUEST) + card->driver->write_csr(card, CSR_STATE_CLEAR, + CSR_STATE_BIT_ABDICATE); + else + rcode = RCODE_TYPE_ERROR; + break; + + case CSR_SPLIT_TIMEOUT_HI: + if (tcode == TCODE_READ_QUADLET_REQUEST) { + *data = cpu_to_be32(card->split_timeout_hi); + } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) { + spin_lock_irqsave(&card->lock, flags); + card->split_timeout_hi = be32_to_cpu(*data) & 7; + update_split_timeout(card); + spin_unlock_irqrestore(&card->lock, flags); + } else { + rcode = RCODE_TYPE_ERROR; + } + break; + + case CSR_SPLIT_TIMEOUT_LO: + if (tcode == TCODE_READ_QUADLET_REQUEST) { + *data = cpu_to_be32(card->split_timeout_lo); + } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) { + spin_lock_irqsave(&card->lock, flags); + card->split_timeout_lo = + be32_to_cpu(*data) & 0xfff80000; + update_split_timeout(card); + spin_unlock_irqrestore(&card->lock, flags); + } else { + rcode = RCODE_TYPE_ERROR; + } + break; + + case CSR_MAINT_UTILITY: + if (tcode == TCODE_READ_QUADLET_REQUEST) + *data = card->maint_utility_register; + else if (tcode == TCODE_WRITE_QUADLET_REQUEST) + card->maint_utility_register = *data; + else + rcode = RCODE_TYPE_ERROR; + break; + + case CSR_BROADCAST_CHANNEL: + if (tcode == TCODE_READ_QUADLET_REQUEST) + *data = cpu_to_be32(card->broadcast_channel); + else if (tcode == TCODE_WRITE_QUADLET_REQUEST) + card->broadcast_channel = + (be32_to_cpu(*data) & BROADCAST_CHANNEL_VALID) | + BROADCAST_CHANNEL_INITIAL; + else + rcode = RCODE_TYPE_ERROR; + break; + + case CSR_BUS_MANAGER_ID: + case CSR_BANDWIDTH_AVAILABLE: + case CSR_CHANNELS_AVAILABLE_HI: + case CSR_CHANNELS_AVAILABLE_LO: + /* + * FIXME: these are handled by the OHCI hardware and + * the stack never sees these request. If we add + * support for a new type of controller that doesn't + * handle this in hardware we need to deal with these + * transactions. + */ + BUG(); + break; + + default: + rcode = RCODE_ADDRESS_ERROR; + break; + } + + fw_send_response(card, request, rcode); +} + +static struct fw_address_handler registers = { + .length = 0x400, + .address_callback = handle_registers, +}; + +static void handle_low_memory(struct fw_card *card, struct fw_request *request, + int tcode, int destination, int source, int generation, + unsigned long long offset, void *payload, size_t length, + void *callback_data) +{ + /* + * This catches requests not handled by the physical DMA unit, + * i.e., wrong transaction types or unauthorized source nodes. + */ + fw_send_response(card, request, RCODE_TYPE_ERROR); +} + +static struct fw_address_handler low_memory = { + .length = FW_MAX_PHYSICAL_RANGE, + .address_callback = handle_low_memory, +}; + +MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); +MODULE_DESCRIPTION("Core IEEE1394 transaction logic"); +MODULE_LICENSE("GPL"); + +static const u32 vendor_textual_descriptor[] = { + /* textual descriptor leaf () */ + 0x00060000, + 0x00000000, + 0x00000000, + 0x4c696e75, /* L i n u */ + 0x78204669, /* x F i */ + 0x72657769, /* r e w i */ + 0x72650000, /* r e */ +}; + +static const u32 model_textual_descriptor[] = { + /* model descriptor leaf () */ + 0x00030000, + 0x00000000, + 0x00000000, + 0x4a756a75, /* J u j u */ +}; + +static struct fw_descriptor vendor_id_descriptor = { + .length = ARRAY_SIZE(vendor_textual_descriptor), + .immediate = 0x03001f11, + .key = 0x81000000, + .data = vendor_textual_descriptor, +}; + +static struct fw_descriptor model_id_descriptor = { + .length = ARRAY_SIZE(model_textual_descriptor), + .immediate = 0x17023901, + .key = 0x81000000, + .data = model_textual_descriptor, +}; + +static int __init fw_core_init(void) +{ + int ret; + + fw_workqueue = alloc_workqueue("firewire", WQ_MEM_RECLAIM, 0); + if (!fw_workqueue) + return -ENOMEM; + + ret = bus_register(&fw_bus_type); + if (ret < 0) { + destroy_workqueue(fw_workqueue); + return ret; + } + + fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops); + if (fw_cdev_major < 0) { + bus_unregister(&fw_bus_type); + destroy_workqueue(fw_workqueue); + return fw_cdev_major; + } + + fw_core_add_address_handler(&topology_map, &topology_map_region); + fw_core_add_address_handler(®isters, ®isters_region); + fw_core_add_address_handler(&low_memory, &low_memory_region); + fw_core_add_descriptor(&vendor_id_descriptor); + fw_core_add_descriptor(&model_id_descriptor); + + return 0; +} + +static void __exit fw_core_cleanup(void) +{ + unregister_chrdev(fw_cdev_major, "firewire"); + bus_unregister(&fw_bus_type); + destroy_workqueue(fw_workqueue); + idr_destroy(&fw_device_idr); +} + +module_init(fw_core_init); +module_exit(fw_core_cleanup); diff --git a/kernel/drivers/firewire/core.h b/kernel/drivers/firewire/core.h new file mode 100644 index 000000000..e1480ff68 --- /dev/null +++ b/kernel/drivers/firewire/core.h @@ -0,0 +1,258 @@ +#ifndef _FIREWIRE_CORE_H +#define _FIREWIRE_CORE_H + +#include <linux/compiler.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/fs.h> +#include <linux/list.h> +#include <linux/idr.h> +#include <linux/mm_types.h> +#include <linux/rwsem.h> +#include <linux/slab.h> +#include <linux/types.h> + +#include <linux/atomic.h> + +struct device; +struct fw_card; +struct fw_device; +struct fw_iso_buffer; +struct fw_iso_context; +struct fw_iso_packet; +struct fw_node; +struct fw_packet; + + +/* -card */ + +extern __printf(2, 3) +void fw_err(const struct fw_card *card, const char *fmt, ...); +extern __printf(2, 3) +void fw_notice(const struct fw_card *card, const char *fmt, ...); + +/* bitfields within the PHY registers */ +#define PHY_LINK_ACTIVE 0x80 +#define PHY_CONTENDER 0x40 +#define PHY_BUS_RESET 0x40 +#define PHY_EXTENDED_REGISTERS 0xe0 +#define PHY_BUS_SHORT_RESET 0x40 +#define PHY_INT_STATUS_BITS 0x3c +#define PHY_ENABLE_ACCEL 0x02 +#define PHY_ENABLE_MULTI 0x01 +#define PHY_PAGE_SELECT 0xe0 + +#define BANDWIDTH_AVAILABLE_INITIAL 4915 +#define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31) +#define BROADCAST_CHANNEL_VALID (1 << 30) + +#define CSR_STATE_BIT_CMSTR (1 << 8) +#define CSR_STATE_BIT_ABDICATE (1 << 10) + +struct fw_card_driver { + /* + * Enable the given card with the given initial config rom. + * This function is expected to activate the card, and either + * enable the PHY or set the link_on bit and initiate a bus + * reset. + */ + int (*enable)(struct fw_card *card, + const __be32 *config_rom, size_t length); + + int (*read_phy_reg)(struct fw_card *card, int address); + int (*update_phy_reg)(struct fw_card *card, int address, + int clear_bits, int set_bits); + + /* + * Update the config rom for an enabled card. This function + * should change the config rom that is presented on the bus + * and initiate a bus reset. + */ + int (*set_config_rom)(struct fw_card *card, + const __be32 *config_rom, size_t length); + + void (*send_request)(struct fw_card *card, struct fw_packet *packet); + void (*send_response)(struct fw_card *card, struct fw_packet *packet); + /* Calling cancel is valid once a packet has been submitted. */ + int (*cancel_packet)(struct fw_card *card, struct fw_packet *packet); + + /* + * Allow the specified node ID to do direct DMA out and in of + * host memory. The card will disable this for all node when + * a bus reset happens, so driver need to reenable this after + * bus reset. Returns 0 on success, -ENODEV if the card + * doesn't support this, -ESTALE if the generation doesn't + * match. + */ + int (*enable_phys_dma)(struct fw_card *card, + int node_id, int generation); + + u32 (*read_csr)(struct fw_card *card, int csr_offset); + void (*write_csr)(struct fw_card *card, int csr_offset, u32 value); + + struct fw_iso_context * + (*allocate_iso_context)(struct fw_card *card, + int type, int channel, size_t header_size); + void (*free_iso_context)(struct fw_iso_context *ctx); + + int (*start_iso)(struct fw_iso_context *ctx, + s32 cycle, u32 sync, u32 tags); + + int (*set_iso_channels)(struct fw_iso_context *ctx, u64 *channels); + + int (*queue_iso)(struct fw_iso_context *ctx, + struct fw_iso_packet *packet, + struct fw_iso_buffer *buffer, + unsigned long payload); + + void (*flush_queue_iso)(struct fw_iso_context *ctx); + + int (*flush_iso_completions)(struct fw_iso_context *ctx); + + int (*stop_iso)(struct fw_iso_context *ctx); +}; + +void fw_card_initialize(struct fw_card *card, + const struct fw_card_driver *driver, struct device *device); +int fw_card_add(struct fw_card *card, + u32 max_receive, u32 link_speed, u64 guid); +void fw_core_remove_card(struct fw_card *card); +int fw_compute_block_crc(__be32 *block); +void fw_schedule_bm_work(struct fw_card *card, unsigned long delay); + +/* -cdev */ + +extern const struct file_operations fw_device_ops; + +void fw_device_cdev_update(struct fw_device *device); +void fw_device_cdev_remove(struct fw_device *device); +void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p); + + +/* -device */ + +extern struct rw_semaphore fw_device_rwsem; +extern struct idr fw_device_idr; +extern int fw_cdev_major; + +static inline struct fw_device *fw_device_get(struct fw_device *device) +{ + get_device(&device->device); + + return device; +} + +static inline void fw_device_put(struct fw_device *device) +{ + put_device(&device->device); +} + +struct fw_device *fw_device_get_by_devt(dev_t devt); +int fw_device_set_broadcast_channel(struct device *dev, void *gen); +void fw_node_event(struct fw_card *card, struct fw_node *node, int event); + + +/* -iso */ + +int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count); +int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card, + enum dma_data_direction direction); +int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer, + struct vm_area_struct *vma); + + +/* -topology */ + +enum { + FW_NODE_CREATED, + FW_NODE_UPDATED, + FW_NODE_DESTROYED, + FW_NODE_LINK_ON, + FW_NODE_LINK_OFF, + FW_NODE_INITIATED_RESET, +}; + +struct fw_node { + u16 node_id; + u8 color; + u8 port_count; + u8 link_on:1; + u8 initiated_reset:1; + u8 b_path:1; + u8 phy_speed:2; /* As in the self ID packet. */ + u8 max_speed:2; /* Minimum of all phy-speeds on the path from the + * local node to this node. */ + u8 max_depth:4; /* Maximum depth to any leaf node */ + u8 max_hops:4; /* Max hops in this sub tree */ + atomic_t ref_count; + + /* For serializing node topology into a list. */ + struct list_head link; + + /* Upper layer specific data. */ + void *data; + + struct fw_node *ports[0]; +}; + +static inline struct fw_node *fw_node_get(struct fw_node *node) +{ + atomic_inc(&node->ref_count); + + return node; +} + +static inline void fw_node_put(struct fw_node *node) +{ + if (atomic_dec_and_test(&node->ref_count)) + kfree(node); +} + +void fw_core_handle_bus_reset(struct fw_card *card, int node_id, + int generation, int self_id_count, u32 *self_ids, bool bm_abdicate); +void fw_destroy_nodes(struct fw_card *card); + +/* + * Check whether new_generation is the immediate successor of old_generation. + * Take counter roll-over at 255 (as per OHCI) into account. + */ +static inline bool is_next_generation(int new_generation, int old_generation) +{ + return (new_generation & 0xff) == ((old_generation + 1) & 0xff); +} + + +/* -transaction */ + +#define TCODE_LINK_INTERNAL 0xe + +#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) +#define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) +#define TCODE_IS_LINK_INTERNAL(tcode) ((tcode) == TCODE_LINK_INTERNAL) +#define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0) +#define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0) +#define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4) +#define TCODE_HAS_RESPONSE_DATA(tcode) (((tcode) & 12) != 0) + +#define LOCAL_BUS 0xffc0 + +/* OHCI-1394's default upper bound for physical DMA: 4 GB */ +#define FW_MAX_PHYSICAL_RANGE (1ULL << 32) + +void fw_core_handle_request(struct fw_card *card, struct fw_packet *request); +void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); +int fw_get_response_length(struct fw_request *request); +void fw_fill_response(struct fw_packet *response, u32 *request_header, + int rcode, void *payload, size_t length); + +#define FW_PHY_CONFIG_NO_NODE_ID -1 +#define FW_PHY_CONFIG_CURRENT_GAP_COUNT -1 +void fw_send_phy_config(struct fw_card *card, + int node_id, int generation, int gap_count); + +static inline bool is_ping_packet(u32 *data) +{ + return (data[0] & 0xc0ffffff) == 0 && ~data[0] == data[1]; +} + +#endif /* _FIREWIRE_CORE_H */ diff --git a/kernel/drivers/firewire/init_ohci1394_dma.c b/kernel/drivers/firewire/init_ohci1394_dma.c new file mode 100644 index 000000000..2cc89ce74 --- /dev/null +++ b/kernel/drivers/firewire/init_ohci1394_dma.c @@ -0,0 +1,309 @@ +/* + * init_ohci1394_dma.c - Initializes physical DMA on all OHCI 1394 controllers + * + * Copyright (C) 2006-2007 Bernhard Kaindl <bk@suse.de> + * + * Derived from drivers/ieee1394/ohci1394.c and arch/x86/kernel/early-quirks.c + * this file has functions to: + * - scan the PCI very early on boot for all OHCI 1394-compliant controllers + * - reset and initialize them and make them join the IEEE1394 bus and + * - enable physical DMA on them to allow remote debugging + * + * All code and data is marked as __init and __initdata, respective as + * during boot, all OHCI1394 controllers may be claimed by the firewire + * stack and at this point, this code should not touch them anymore. + * + * To use physical DMA after the initialization of the firewire stack, + * be sure that the stack enables it and (re-)attach after the bus reset + * which may be caused by the firewire stack initialization. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/pci.h> /* for PCI defines */ +#include <linux/string.h> + +#include <asm/pci-direct.h> /* for direct PCI config space access */ +#include <asm/fixmap.h> + +#include <linux/init_ohci1394_dma.h> +#include "ohci.h" + +int __initdata init_ohci1394_dma_early; + +struct ohci { + void __iomem *registers; +}; + +static inline void reg_write(const struct ohci *ohci, int offset, u32 data) +{ + writel(data, ohci->registers + offset); +} + +static inline u32 reg_read(const struct ohci *ohci, int offset) +{ + return readl(ohci->registers + offset); +} + +#define OHCI_LOOP_COUNT 100 /* Number of loops for reg read waits */ + +/* Reads a PHY register of an OHCI-1394 controller */ +static inline u8 __init get_phy_reg(struct ohci *ohci, u8 addr) +{ + int i; + u32 r; + + reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000); + + for (i = 0; i < OHCI_LOOP_COUNT; i++) { + if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000) + break; + mdelay(1); + } + r = reg_read(ohci, OHCI1394_PhyControl); + + return (r & 0x00ff0000) >> 16; +} + +/* Writes to a PHY register of an OHCI-1394 controller */ +static inline void __init set_phy_reg(struct ohci *ohci, u8 addr, u8 data) +{ + int i; + + reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000); + + for (i = 0; i < OHCI_LOOP_COUNT; i++) { + if (!(reg_read(ohci, OHCI1394_PhyControl) & 0x00004000)) + break; + mdelay(1); + } +} + +/* Resets an OHCI-1394 controller (for sane state before initialization) */ +static inline void __init init_ohci1394_soft_reset(struct ohci *ohci) +{ + int i; + + reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset); + + for (i = 0; i < OHCI_LOOP_COUNT; i++) { + if (!(reg_read(ohci, OHCI1394_HCControlSet) + & OHCI1394_HCControl_softReset)) + break; + mdelay(1); + } +} + +#define OHCI1394_MAX_AT_REQ_RETRIES 0xf +#define OHCI1394_MAX_AT_RESP_RETRIES 0x2 +#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8 + +/* Basic OHCI-1394 register and port inititalization */ +static inline void __init init_ohci1394_initialize(struct ohci *ohci) +{ + u32 bus_options; + int num_ports, i; + + /* Put some defaults to these undefined bus options */ + bus_options = reg_read(ohci, OHCI1394_BusOptions); + bus_options |= 0x60000000; /* Enable CMC and ISC */ + bus_options &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */ + bus_options &= ~0x18000000; /* Disable PMC and BMC */ + reg_write(ohci, OHCI1394_BusOptions, bus_options); + + /* Set the bus number */ + reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0); + + /* Enable posted writes */ + reg_write(ohci, OHCI1394_HCControlSet, + OHCI1394_HCControl_postedWriteEnable); + + /* Clear link control register */ + reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff); + + /* enable phys */ + reg_write(ohci, OHCI1394_LinkControlSet, + OHCI1394_LinkControl_rcvPhyPkt); + + /* Don't accept phy packets into AR request context */ + reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400); + + /* Clear the Isochonouys interrupt masks */ + reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff); + reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff); + reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff); + reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff); + + /* Accept asynchronous transfer requests from all nodes for now */ + reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); + + /* Specify asynchronous transfer retries */ + reg_write(ohci, OHCI1394_ATRetries, + OHCI1394_MAX_AT_REQ_RETRIES | + (OHCI1394_MAX_AT_RESP_RETRIES<<4) | + (OHCI1394_MAX_PHYS_RESP_RETRIES<<8)); + + /* We don't want hardware swapping */ + reg_write(ohci, OHCI1394_HCControlClear, + OHCI1394_HCControl_noByteSwapData); + + /* Enable link */ + reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable); + + /* If anything is connected to a port, make sure it is enabled */ + num_ports = get_phy_reg(ohci, 2) & 0xf; + for (i = 0; i < num_ports; i++) { + unsigned int status; + + set_phy_reg(ohci, 7, i); + status = get_phy_reg(ohci, 8); + + if (status & 0x20) + set_phy_reg(ohci, 8, status & ~1); + } +} + +/** + * init_ohci1394_wait_for_busresets - wait until bus resets are completed + * + * OHCI1394 initialization itself and any device going on- or offline + * and any cable issue cause a IEEE1394 bus reset. The OHCI1394 spec + * specifies that physical DMA is disabled on each bus reset and it + * has to be enabled after each bus reset when needed. We resort + * to polling here because on early boot, we have no interrupts. + */ +static inline void __init init_ohci1394_wait_for_busresets(struct ohci *ohci) +{ + int i, events; + + for (i = 0; i < 9; i++) { + mdelay(200); + events = reg_read(ohci, OHCI1394_IntEventSet); + if (events & OHCI1394_busReset) + reg_write(ohci, OHCI1394_IntEventClear, + OHCI1394_busReset); + } +} + +/** + * init_ohci1394_enable_physical_dma - Enable physical DMA for remote debugging + * This enables remote DMA access over IEEE1394 from every host for the low + * 4GB of address space. DMA accesses above 4GB are not available currently. + */ +static inline void __init init_ohci1394_enable_physical_dma(struct ohci *ohci) +{ + reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 0xffffffff); + reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 0xffffffff); + reg_write(ohci, OHCI1394_PhyUpperBound, 0xffff0000); +} + +/** + * init_ohci1394_reset_and_init_dma - init controller and enable DMA + * This initializes the given controller and enables physical DMA engine in it. + */ +static inline void __init init_ohci1394_reset_and_init_dma(struct ohci *ohci) +{ + /* Start off with a soft reset, clears everything to a sane state. */ + init_ohci1394_soft_reset(ohci); + + /* Accessing some registers without LPS enabled may cause lock up */ + reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS); + + /* Disable and clear interrupts */ + reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff); + reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff); + + mdelay(50); /* Wait 50msec to make sure we have full link enabled */ + + init_ohci1394_initialize(ohci); + /* + * The initialization causes at least one IEEE1394 bus reset. Enabling + * physical DMA only works *after* *all* bus resets have calmed down: + */ + init_ohci1394_wait_for_busresets(ohci); + + /* We had to wait and do this now if we want to debug early problems */ + init_ohci1394_enable_physical_dma(ohci); +} + +/** + * init_ohci1394_controller - Map the registers of the controller and init DMA + * This maps the registers of the specified controller and initializes it + */ +static inline void __init init_ohci1394_controller(int num, int slot, int func) +{ + unsigned long ohci_base; + struct ohci ohci; + + printk(KERN_INFO "init_ohci1394_dma: initializing OHCI-1394" + " at %02x:%02x.%x\n", num, slot, func); + + ohci_base = read_pci_config(num, slot, func, PCI_BASE_ADDRESS_0+(0<<2)) + & PCI_BASE_ADDRESS_MEM_MASK; + + set_fixmap_nocache(FIX_OHCI1394_BASE, ohci_base); + + ohci.registers = (void __iomem *)fix_to_virt(FIX_OHCI1394_BASE); + + init_ohci1394_reset_and_init_dma(&ohci); +} + +/** + * debug_init_ohci1394_dma - scan for OHCI1394 controllers and init DMA on them + * Scans the whole PCI space for OHCI1394 controllers and inits DMA on them + */ +void __init init_ohci1394_dma_on_all_controllers(void) +{ + int num, slot, func; + u32 class; + + if (!early_pci_allowed()) + return; + + /* Poor man's PCI discovery, the only thing we can do at early boot */ + for (num = 0; num < 32; num++) { + for (slot = 0; slot < 32; slot++) { + for (func = 0; func < 8; func++) { + class = read_pci_config(num, slot, func, + PCI_CLASS_REVISION); + if (class == 0xffffffff) + continue; /* No device at this func */ + + if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI) + continue; /* Not an OHCI-1394 device */ + + init_ohci1394_controller(num, slot, func); + break; /* Assume one controller per device */ + } + } + } + printk(KERN_INFO "init_ohci1394_dma: finished initializing OHCI DMA\n"); +} + +/** + * setup_init_ohci1394_early - enables early OHCI1394 DMA initialization + */ +static int __init setup_ohci1394_dma(char *opt) +{ + if (!strcmp(opt, "early")) + init_ohci1394_dma_early = 1; + return 0; +} + +/* passing ohci1394_dma=early on boot causes early OHCI1394 DMA initialization */ +early_param("ohci1394_dma", setup_ohci1394_dma); diff --git a/kernel/drivers/firewire/net.c b/kernel/drivers/firewire/net.c new file mode 100644 index 000000000..f4ea80d60 --- /dev/null +++ b/kernel/drivers/firewire/net.c @@ -0,0 +1,1707 @@ +/* + * IPv4 over IEEE 1394, per RFC 2734 + * IPv6 over IEEE 1394, per RFC 3146 + * + * Copyright (C) 2009 Jay Fenlason <fenlason@redhat.com> + * + * based on eth1394 by Ben Collins et al + */ + +#include <linux/bug.h> +#include <linux/compiler.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/ethtool.h> +#include <linux/firewire.h> +#include <linux/firewire-constants.h> +#include <linux/highmem.h> +#include <linux/in.h> +#include <linux/ip.h> +#include <linux/jiffies.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/mutex.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#include <asm/unaligned.h> +#include <net/arp.h> +#include <net/firewire.h> + +/* rx limits */ +#define FWNET_MAX_FRAGMENTS 30 /* arbitrary, > TX queue depth */ +#define FWNET_ISO_PAGE_COUNT (PAGE_SIZE < 16*1024 ? 4 : 2) + +/* tx limits */ +#define FWNET_MAX_QUEUED_DATAGRAMS 20 /* < 64 = number of tlabels */ +#define FWNET_MIN_QUEUED_DATAGRAMS 10 /* should keep AT DMA busy enough */ +#define FWNET_TX_QUEUE_LEN FWNET_MAX_QUEUED_DATAGRAMS /* ? */ + +#define IEEE1394_BROADCAST_CHANNEL 31 +#define IEEE1394_ALL_NODES (0xffc0 | 0x003f) +#define IEEE1394_MAX_PAYLOAD_S100 512 +#define FWNET_NO_FIFO_ADDR (~0ULL) + +#define IANA_SPECIFIER_ID 0x00005eU +#define RFC2734_SW_VERSION 0x000001U +#define RFC3146_SW_VERSION 0x000002U + +#define IEEE1394_GASP_HDR_SIZE 8 + +#define RFC2374_UNFRAG_HDR_SIZE 4 +#define RFC2374_FRAG_HDR_SIZE 8 +#define RFC2374_FRAG_OVERHEAD 4 + +#define RFC2374_HDR_UNFRAG 0 /* unfragmented */ +#define RFC2374_HDR_FIRSTFRAG 1 /* first fragment */ +#define RFC2374_HDR_LASTFRAG 2 /* last fragment */ +#define RFC2374_HDR_INTFRAG 3 /* interior fragment */ + +static bool fwnet_hwaddr_is_multicast(u8 *ha) +{ + return !!(*ha & 1); +} + +/* IPv4 and IPv6 encapsulation header */ +struct rfc2734_header { + u32 w0; + u32 w1; +}; + +#define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30) +#define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff)) +#define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16) +#define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff)) +#define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16) + +#define fwnet_set_hdr_lf(lf) ((lf) << 30) +#define fwnet_set_hdr_ether_type(et) (et) +#define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16) +#define fwnet_set_hdr_fg_off(fgo) (fgo) + +#define fwnet_set_hdr_dgl(dgl) ((dgl) << 16) + +static inline void fwnet_make_uf_hdr(struct rfc2734_header *hdr, + unsigned ether_type) +{ + hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_UNFRAG) + | fwnet_set_hdr_ether_type(ether_type); +} + +static inline void fwnet_make_ff_hdr(struct rfc2734_header *hdr, + unsigned ether_type, unsigned dg_size, unsigned dgl) +{ + hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_FIRSTFRAG) + | fwnet_set_hdr_dg_size(dg_size) + | fwnet_set_hdr_ether_type(ether_type); + hdr->w1 = fwnet_set_hdr_dgl(dgl); +} + +static inline void fwnet_make_sf_hdr(struct rfc2734_header *hdr, + unsigned lf, unsigned dg_size, unsigned fg_off, unsigned dgl) +{ + hdr->w0 = fwnet_set_hdr_lf(lf) + | fwnet_set_hdr_dg_size(dg_size) + | fwnet_set_hdr_fg_off(fg_off); + hdr->w1 = fwnet_set_hdr_dgl(dgl); +} + +/* This list keeps track of what parts of the datagram have been filled in */ +struct fwnet_fragment_info { + struct list_head fi_link; + u16 offset; + u16 len; +}; + +struct fwnet_partial_datagram { + struct list_head pd_link; + struct list_head fi_list; + struct sk_buff *skb; + /* FIXME Why not use skb->data? */ + char *pbuf; + u16 datagram_label; + u16 ether_type; + u16 datagram_size; +}; + +static DEFINE_MUTEX(fwnet_device_mutex); +static LIST_HEAD(fwnet_device_list); + +struct fwnet_device { + struct list_head dev_link; + spinlock_t lock; + enum { + FWNET_BROADCAST_ERROR, + FWNET_BROADCAST_RUNNING, + FWNET_BROADCAST_STOPPED, + } broadcast_state; + struct fw_iso_context *broadcast_rcv_context; + struct fw_iso_buffer broadcast_rcv_buffer; + void **broadcast_rcv_buffer_ptrs; + unsigned broadcast_rcv_next_ptr; + unsigned num_broadcast_rcv_ptrs; + unsigned rcv_buffer_size; + /* + * This value is the maximum unfragmented datagram size that can be + * sent by the hardware. It already has the GASP overhead and the + * unfragmented datagram header overhead calculated into it. + */ + unsigned broadcast_xmt_max_payload; + u16 broadcast_xmt_datagramlabel; + + /* + * The CSR address that remote nodes must send datagrams to for us to + * receive them. + */ + struct fw_address_handler handler; + u64 local_fifo; + + /* Number of tx datagrams that have been queued but not yet acked */ + int queued_datagrams; + + int peer_count; + struct list_head peer_list; + struct fw_card *card; + struct net_device *netdev; +}; + +struct fwnet_peer { + struct list_head peer_link; + struct fwnet_device *dev; + u64 guid; + + /* guarded by dev->lock */ + struct list_head pd_list; /* received partial datagrams */ + unsigned pdg_size; /* pd_list size */ + + u16 datagram_label; /* outgoing datagram label */ + u16 max_payload; /* includes RFC2374_FRAG_HDR_SIZE overhead */ + int node_id; + int generation; + unsigned speed; +}; + +/* This is our task struct. It's used for the packet complete callback. */ +struct fwnet_packet_task { + struct fw_transaction transaction; + struct rfc2734_header hdr; + struct sk_buff *skb; + struct fwnet_device *dev; + + int outstanding_pkts; + u64 fifo_addr; + u16 dest_node; + u16 max_payload; + u8 generation; + u8 speed; + u8 enqueued; +}; + +/* + * Get fifo address embedded in hwaddr + */ +static __u64 fwnet_hwaddr_fifo(union fwnet_hwaddr *ha) +{ + return (u64)get_unaligned_be16(&ha->uc.fifo_hi) << 32 + | get_unaligned_be32(&ha->uc.fifo_lo); +} + +/* + * saddr == NULL means use device source address. + * daddr == NULL means leave destination address (eg unresolved arp). + */ +static int fwnet_header_create(struct sk_buff *skb, struct net_device *net, + unsigned short type, const void *daddr, + const void *saddr, unsigned len) +{ + struct fwnet_header *h; + + h = (struct fwnet_header *)skb_push(skb, sizeof(*h)); + put_unaligned_be16(type, &h->h_proto); + + if (net->flags & (IFF_LOOPBACK | IFF_NOARP)) { + memset(h->h_dest, 0, net->addr_len); + + return net->hard_header_len; + } + + if (daddr) { + memcpy(h->h_dest, daddr, net->addr_len); + + return net->hard_header_len; + } + + return -net->hard_header_len; +} + +static int fwnet_header_cache(const struct neighbour *neigh, + struct hh_cache *hh, __be16 type) +{ + struct net_device *net; + struct fwnet_header *h; + + if (type == cpu_to_be16(ETH_P_802_3)) + return -1; + net = neigh->dev; + h = (struct fwnet_header *)((u8 *)hh->hh_data + HH_DATA_OFF(sizeof(*h))); + h->h_proto = type; + memcpy(h->h_dest, neigh->ha, net->addr_len); + hh->hh_len = FWNET_HLEN; + + return 0; +} + +/* Called by Address Resolution module to notify changes in address. */ +static void fwnet_header_cache_update(struct hh_cache *hh, + const struct net_device *net, const unsigned char *haddr) +{ + memcpy((u8 *)hh->hh_data + HH_DATA_OFF(FWNET_HLEN), haddr, net->addr_len); +} + +static int fwnet_header_parse(const struct sk_buff *skb, unsigned char *haddr) +{ + memcpy(haddr, skb->dev->dev_addr, FWNET_ALEN); + + return FWNET_ALEN; +} + +static const struct header_ops fwnet_header_ops = { + .create = fwnet_header_create, + .cache = fwnet_header_cache, + .cache_update = fwnet_header_cache_update, + .parse = fwnet_header_parse, +}; + +/* FIXME: is this correct for all cases? */ +static bool fwnet_frag_overlap(struct fwnet_partial_datagram *pd, + unsigned offset, unsigned len) +{ + struct fwnet_fragment_info *fi; + unsigned end = offset + len; + + list_for_each_entry(fi, &pd->fi_list, fi_link) + if (offset < fi->offset + fi->len && end > fi->offset) + return true; + + return false; +} + +/* Assumes that new fragment does not overlap any existing fragments */ +static struct fwnet_fragment_info *fwnet_frag_new( + struct fwnet_partial_datagram *pd, unsigned offset, unsigned len) +{ + struct fwnet_fragment_info *fi, *fi2, *new; + struct list_head *list; + + list = &pd->fi_list; + list_for_each_entry(fi, &pd->fi_list, fi_link) { + if (fi->offset + fi->len == offset) { + /* The new fragment can be tacked on to the end */ + /* Did the new fragment plug a hole? */ + fi2 = list_entry(fi->fi_link.next, + struct fwnet_fragment_info, fi_link); + if (fi->offset + fi->len == fi2->offset) { + /* glue fragments together */ + fi->len += len + fi2->len; + list_del(&fi2->fi_link); + kfree(fi2); + } else { + fi->len += len; + } + + return fi; + } + if (offset + len == fi->offset) { + /* The new fragment can be tacked on to the beginning */ + /* Did the new fragment plug a hole? */ + fi2 = list_entry(fi->fi_link.prev, + struct fwnet_fragment_info, fi_link); + if (fi2->offset + fi2->len == fi->offset) { + /* glue fragments together */ + fi2->len += fi->len + len; + list_del(&fi->fi_link); + kfree(fi); + + return fi2; + } + fi->offset = offset; + fi->len += len; + + return fi; + } + if (offset > fi->offset + fi->len) { + list = &fi->fi_link; + break; + } + if (offset + len < fi->offset) { + list = fi->fi_link.prev; + break; + } + } + + new = kmalloc(sizeof(*new), GFP_ATOMIC); + if (!new) + return NULL; + + new->offset = offset; + new->len = len; + list_add(&new->fi_link, list); + + return new; +} + +static struct fwnet_partial_datagram *fwnet_pd_new(struct net_device *net, + struct fwnet_peer *peer, u16 datagram_label, unsigned dg_size, + void *frag_buf, unsigned frag_off, unsigned frag_len) +{ + struct fwnet_partial_datagram *new; + struct fwnet_fragment_info *fi; + + new = kmalloc(sizeof(*new), GFP_ATOMIC); + if (!new) + goto fail; + + INIT_LIST_HEAD(&new->fi_list); + fi = fwnet_frag_new(new, frag_off, frag_len); + if (fi == NULL) + goto fail_w_new; + + new->datagram_label = datagram_label; + new->datagram_size = dg_size; + new->skb = dev_alloc_skb(dg_size + LL_RESERVED_SPACE(net)); + if (new->skb == NULL) + goto fail_w_fi; + + skb_reserve(new->skb, LL_RESERVED_SPACE(net)); + new->pbuf = skb_put(new->skb, dg_size); + memcpy(new->pbuf + frag_off, frag_buf, frag_len); + list_add_tail(&new->pd_link, &peer->pd_list); + + return new; + +fail_w_fi: + kfree(fi); +fail_w_new: + kfree(new); +fail: + return NULL; +} + +static struct fwnet_partial_datagram *fwnet_pd_find(struct fwnet_peer *peer, + u16 datagram_label) +{ + struct fwnet_partial_datagram *pd; + + list_for_each_entry(pd, &peer->pd_list, pd_link) + if (pd->datagram_label == datagram_label) + return pd; + + return NULL; +} + + +static void fwnet_pd_delete(struct fwnet_partial_datagram *old) +{ + struct fwnet_fragment_info *fi, *n; + + list_for_each_entry_safe(fi, n, &old->fi_list, fi_link) + kfree(fi); + + list_del(&old->pd_link); + dev_kfree_skb_any(old->skb); + kfree(old); +} + +static bool fwnet_pd_update(struct fwnet_peer *peer, + struct fwnet_partial_datagram *pd, void *frag_buf, + unsigned frag_off, unsigned frag_len) +{ + if (fwnet_frag_new(pd, frag_off, frag_len) == NULL) + return false; + + memcpy(pd->pbuf + frag_off, frag_buf, frag_len); + + /* + * Move list entry to beginning of list so that oldest partial + * datagrams percolate to the end of the list + */ + list_move_tail(&pd->pd_link, &peer->pd_list); + + return true; +} + +static bool fwnet_pd_is_complete(struct fwnet_partial_datagram *pd) +{ + struct fwnet_fragment_info *fi; + + fi = list_entry(pd->fi_list.next, struct fwnet_fragment_info, fi_link); + + return fi->len == pd->datagram_size; +} + +/* caller must hold dev->lock */ +static struct fwnet_peer *fwnet_peer_find_by_guid(struct fwnet_device *dev, + u64 guid) +{ + struct fwnet_peer *peer; + + list_for_each_entry(peer, &dev->peer_list, peer_link) + if (peer->guid == guid) + return peer; + + return NULL; +} + +/* caller must hold dev->lock */ +static struct fwnet_peer *fwnet_peer_find_by_node_id(struct fwnet_device *dev, + int node_id, int generation) +{ + struct fwnet_peer *peer; + + list_for_each_entry(peer, &dev->peer_list, peer_link) + if (peer->node_id == node_id && + peer->generation == generation) + return peer; + + return NULL; +} + +/* See IEEE 1394-2008 table 6-4, table 8-8, table 16-18. */ +static unsigned fwnet_max_payload(unsigned max_rec, unsigned speed) +{ + max_rec = min(max_rec, speed + 8); + max_rec = clamp(max_rec, 8U, 11U); /* 512...4096 */ + + return (1 << (max_rec + 1)) - RFC2374_FRAG_HDR_SIZE; +} + + +static int fwnet_finish_incoming_packet(struct net_device *net, + struct sk_buff *skb, u16 source_node_id, + bool is_broadcast, u16 ether_type) +{ + struct fwnet_device *dev; + int status; + __be64 guid; + + switch (ether_type) { + case ETH_P_ARP: + case ETH_P_IP: +#if IS_ENABLED(CONFIG_IPV6) + case ETH_P_IPV6: +#endif + break; + default: + goto err; + } + + dev = netdev_priv(net); + /* Write metadata, and then pass to the receive level */ + skb->dev = net; + skb->ip_summed = CHECKSUM_NONE; + + /* + * Parse the encapsulation header. This actually does the job of + * converting to an ethernet-like pseudo frame header. + */ + guid = cpu_to_be64(dev->card->guid); + if (dev_hard_header(skb, net, ether_type, + is_broadcast ? net->broadcast : net->dev_addr, + NULL, skb->len) >= 0) { + struct fwnet_header *eth; + u16 *rawp; + __be16 protocol; + + skb_reset_mac_header(skb); + skb_pull(skb, sizeof(*eth)); + eth = (struct fwnet_header *)skb_mac_header(skb); + if (fwnet_hwaddr_is_multicast(eth->h_dest)) { + if (memcmp(eth->h_dest, net->broadcast, + net->addr_len) == 0) + skb->pkt_type = PACKET_BROADCAST; +#if 0 + else + skb->pkt_type = PACKET_MULTICAST; +#endif + } else { + if (memcmp(eth->h_dest, net->dev_addr, net->addr_len)) + skb->pkt_type = PACKET_OTHERHOST; + } + if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) { + protocol = eth->h_proto; + } else { + rawp = (u16 *)skb->data; + if (*rawp == 0xffff) + protocol = htons(ETH_P_802_3); + else + protocol = htons(ETH_P_802_2); + } + skb->protocol = protocol; + } + status = netif_rx(skb); + if (status == NET_RX_DROP) { + net->stats.rx_errors++; + net->stats.rx_dropped++; + } else { + net->stats.rx_packets++; + net->stats.rx_bytes += skb->len; + } + + return 0; + + err: + net->stats.rx_errors++; + net->stats.rx_dropped++; + + dev_kfree_skb_any(skb); + + return -ENOENT; +} + +static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, + int source_node_id, int generation, + bool is_broadcast) +{ + struct sk_buff *skb; + struct net_device *net = dev->netdev; + struct rfc2734_header hdr; + unsigned lf; + unsigned long flags; + struct fwnet_peer *peer; + struct fwnet_partial_datagram *pd; + int fg_off; + int dg_size; + u16 datagram_label; + int retval; + u16 ether_type; + + hdr.w0 = be32_to_cpu(buf[0]); + lf = fwnet_get_hdr_lf(&hdr); + if (lf == RFC2374_HDR_UNFRAG) { + /* + * An unfragmented datagram has been received by the ieee1394 + * bus. Build an skbuff around it so we can pass it to the + * high level network layer. + */ + ether_type = fwnet_get_hdr_ether_type(&hdr); + buf++; + len -= RFC2374_UNFRAG_HDR_SIZE; + + skb = dev_alloc_skb(len + LL_RESERVED_SPACE(net)); + if (unlikely(!skb)) { + net->stats.rx_dropped++; + + return -ENOMEM; + } + skb_reserve(skb, LL_RESERVED_SPACE(net)); + memcpy(skb_put(skb, len), buf, len); + + return fwnet_finish_incoming_packet(net, skb, source_node_id, + is_broadcast, ether_type); + } + /* A datagram fragment has been received, now the fun begins. */ + hdr.w1 = ntohl(buf[1]); + buf += 2; + len -= RFC2374_FRAG_HDR_SIZE; + if (lf == RFC2374_HDR_FIRSTFRAG) { + ether_type = fwnet_get_hdr_ether_type(&hdr); + fg_off = 0; + } else { + ether_type = 0; + fg_off = fwnet_get_hdr_fg_off(&hdr); + } + datagram_label = fwnet_get_hdr_dgl(&hdr); + dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */ + + spin_lock_irqsave(&dev->lock, flags); + + peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation); + if (!peer) { + retval = -ENOENT; + goto fail; + } + + pd = fwnet_pd_find(peer, datagram_label); + if (pd == NULL) { + while (peer->pdg_size >= FWNET_MAX_FRAGMENTS) { + /* remove the oldest */ + fwnet_pd_delete(list_first_entry(&peer->pd_list, + struct fwnet_partial_datagram, pd_link)); + peer->pdg_size--; + } + pd = fwnet_pd_new(net, peer, datagram_label, + dg_size, buf, fg_off, len); + if (pd == NULL) { + retval = -ENOMEM; + goto fail; + } + peer->pdg_size++; + } else { + if (fwnet_frag_overlap(pd, fg_off, len) || + pd->datagram_size != dg_size) { + /* + * Differing datagram sizes or overlapping fragments, + * discard old datagram and start a new one. + */ + fwnet_pd_delete(pd); + pd = fwnet_pd_new(net, peer, datagram_label, + dg_size, buf, fg_off, len); + if (pd == NULL) { + peer->pdg_size--; + retval = -ENOMEM; + goto fail; + } + } else { + if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) { + /* + * Couldn't save off fragment anyway + * so might as well obliterate the + * datagram now. + */ + fwnet_pd_delete(pd); + peer->pdg_size--; + retval = -ENOMEM; + goto fail; + } + } + } /* new datagram or add to existing one */ + + if (lf == RFC2374_HDR_FIRSTFRAG) + pd->ether_type = ether_type; + + if (fwnet_pd_is_complete(pd)) { + ether_type = pd->ether_type; + peer->pdg_size--; + skb = skb_get(pd->skb); + fwnet_pd_delete(pd); + + spin_unlock_irqrestore(&dev->lock, flags); + + return fwnet_finish_incoming_packet(net, skb, source_node_id, + false, ether_type); + } + /* + * Datagram is not complete, we're done for the + * moment. + */ + retval = 0; + fail: + spin_unlock_irqrestore(&dev->lock, flags); + + return retval; +} + +static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r, + int tcode, int destination, int source, int generation, + unsigned long long offset, void *payload, size_t length, + void *callback_data) +{ + struct fwnet_device *dev = callback_data; + int rcode; + + if (destination == IEEE1394_ALL_NODES) { + kfree(r); + + return; + } + + if (offset != dev->handler.offset) + rcode = RCODE_ADDRESS_ERROR; + else if (tcode != TCODE_WRITE_BLOCK_REQUEST) + rcode = RCODE_TYPE_ERROR; + else if (fwnet_incoming_packet(dev, payload, length, + source, generation, false) != 0) { + dev_err(&dev->netdev->dev, "incoming packet failure\n"); + rcode = RCODE_CONFLICT_ERROR; + } else + rcode = RCODE_COMPLETE; + + fw_send_response(card, r, rcode); +} + +static void fwnet_receive_broadcast(struct fw_iso_context *context, + u32 cycle, size_t header_length, void *header, void *data) +{ + struct fwnet_device *dev; + struct fw_iso_packet packet; + __be16 *hdr_ptr; + __be32 *buf_ptr; + int retval; + u32 length; + u16 source_node_id; + u32 specifier_id; + u32 ver; + unsigned long offset; + unsigned long flags; + + dev = data; + hdr_ptr = header; + length = be16_to_cpup(hdr_ptr); + + spin_lock_irqsave(&dev->lock, flags); + + offset = dev->rcv_buffer_size * dev->broadcast_rcv_next_ptr; + buf_ptr = dev->broadcast_rcv_buffer_ptrs[dev->broadcast_rcv_next_ptr++]; + if (dev->broadcast_rcv_next_ptr == dev->num_broadcast_rcv_ptrs) + dev->broadcast_rcv_next_ptr = 0; + + spin_unlock_irqrestore(&dev->lock, flags); + + specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8 + | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24; + ver = be32_to_cpu(buf_ptr[1]) & 0xffffff; + source_node_id = be32_to_cpu(buf_ptr[0]) >> 16; + + if (specifier_id == IANA_SPECIFIER_ID && + (ver == RFC2734_SW_VERSION +#if IS_ENABLED(CONFIG_IPV6) + || ver == RFC3146_SW_VERSION +#endif + )) { + buf_ptr += 2; + length -= IEEE1394_GASP_HDR_SIZE; + fwnet_incoming_packet(dev, buf_ptr, length, source_node_id, + context->card->generation, true); + } + + packet.payload_length = dev->rcv_buffer_size; + packet.interrupt = 1; + packet.skip = 0; + packet.tag = 3; + packet.sy = 0; + packet.header_length = IEEE1394_GASP_HDR_SIZE; + + spin_lock_irqsave(&dev->lock, flags); + + retval = fw_iso_context_queue(dev->broadcast_rcv_context, &packet, + &dev->broadcast_rcv_buffer, offset); + + spin_unlock_irqrestore(&dev->lock, flags); + + if (retval >= 0) + fw_iso_context_queue_flush(dev->broadcast_rcv_context); + else + dev_err(&dev->netdev->dev, "requeue failed\n"); +} + +static struct kmem_cache *fwnet_packet_task_cache; + +static void fwnet_free_ptask(struct fwnet_packet_task *ptask) +{ + dev_kfree_skb_any(ptask->skb); + kmem_cache_free(fwnet_packet_task_cache, ptask); +} + +/* Caller must hold dev->lock. */ +static void dec_queued_datagrams(struct fwnet_device *dev) +{ + if (--dev->queued_datagrams == FWNET_MIN_QUEUED_DATAGRAMS) + netif_wake_queue(dev->netdev); +} + +static int fwnet_send_packet(struct fwnet_packet_task *ptask); + +static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) +{ + struct fwnet_device *dev = ptask->dev; + struct sk_buff *skb = ptask->skb; + unsigned long flags; + bool free; + + spin_lock_irqsave(&dev->lock, flags); + + ptask->outstanding_pkts--; + + /* Check whether we or the networking TX soft-IRQ is last user. */ + free = (ptask->outstanding_pkts == 0 && ptask->enqueued); + if (free) + dec_queued_datagrams(dev); + + if (ptask->outstanding_pkts == 0) { + dev->netdev->stats.tx_packets++; + dev->netdev->stats.tx_bytes += skb->len; + } + + spin_unlock_irqrestore(&dev->lock, flags); + + if (ptask->outstanding_pkts > 0) { + u16 dg_size; + u16 fg_off; + u16 datagram_label; + u16 lf; + + /* Update the ptask to point to the next fragment and send it */ + lf = fwnet_get_hdr_lf(&ptask->hdr); + switch (lf) { + case RFC2374_HDR_LASTFRAG: + case RFC2374_HDR_UNFRAG: + default: + dev_err(&dev->netdev->dev, + "outstanding packet %x lf %x, header %x,%x\n", + ptask->outstanding_pkts, lf, ptask->hdr.w0, + ptask->hdr.w1); + BUG(); + + case RFC2374_HDR_FIRSTFRAG: + /* Set frag type here for future interior fragments */ + dg_size = fwnet_get_hdr_dg_size(&ptask->hdr); + fg_off = ptask->max_payload - RFC2374_FRAG_HDR_SIZE; + datagram_label = fwnet_get_hdr_dgl(&ptask->hdr); + break; + + case RFC2374_HDR_INTFRAG: + dg_size = fwnet_get_hdr_dg_size(&ptask->hdr); + fg_off = fwnet_get_hdr_fg_off(&ptask->hdr) + + ptask->max_payload - RFC2374_FRAG_HDR_SIZE; + datagram_label = fwnet_get_hdr_dgl(&ptask->hdr); + break; + } + + if (ptask->dest_node == IEEE1394_ALL_NODES) { + skb_pull(skb, + ptask->max_payload + IEEE1394_GASP_HDR_SIZE); + } else { + skb_pull(skb, ptask->max_payload); + } + if (ptask->outstanding_pkts > 1) { + fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG, + dg_size, fg_off, datagram_label); + } else { + fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_LASTFRAG, + dg_size, fg_off, datagram_label); + ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE; + } + fwnet_send_packet(ptask); + } + + if (free) + fwnet_free_ptask(ptask); +} + +static void fwnet_transmit_packet_failed(struct fwnet_packet_task *ptask) +{ + struct fwnet_device *dev = ptask->dev; + unsigned long flags; + bool free; + + spin_lock_irqsave(&dev->lock, flags); + + /* One fragment failed; don't try to send remaining fragments. */ + ptask->outstanding_pkts = 0; + + /* Check whether we or the networking TX soft-IRQ is last user. */ + free = ptask->enqueued; + if (free) + dec_queued_datagrams(dev); + + dev->netdev->stats.tx_dropped++; + dev->netdev->stats.tx_errors++; + + spin_unlock_irqrestore(&dev->lock, flags); + + if (free) + fwnet_free_ptask(ptask); +} + +static void fwnet_write_complete(struct fw_card *card, int rcode, + void *payload, size_t length, void *data) +{ + struct fwnet_packet_task *ptask = data; + static unsigned long j; + static int last_rcode, errors_skipped; + + if (rcode == RCODE_COMPLETE) { + fwnet_transmit_packet_done(ptask); + } else { + if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) { + dev_err(&ptask->dev->netdev->dev, + "fwnet_write_complete failed: %x (skipped %d)\n", + rcode, errors_skipped); + + errors_skipped = 0; + last_rcode = rcode; + } else { + errors_skipped++; + } + fwnet_transmit_packet_failed(ptask); + } +} + +static int fwnet_send_packet(struct fwnet_packet_task *ptask) +{ + struct fwnet_device *dev; + unsigned tx_len; + struct rfc2734_header *bufhdr; + unsigned long flags; + bool free; + + dev = ptask->dev; + tx_len = ptask->max_payload; + switch (fwnet_get_hdr_lf(&ptask->hdr)) { + case RFC2374_HDR_UNFRAG: + bufhdr = (struct rfc2734_header *) + skb_push(ptask->skb, RFC2374_UNFRAG_HDR_SIZE); + put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0); + break; + + case RFC2374_HDR_FIRSTFRAG: + case RFC2374_HDR_INTFRAG: + case RFC2374_HDR_LASTFRAG: + bufhdr = (struct rfc2734_header *) + skb_push(ptask->skb, RFC2374_FRAG_HDR_SIZE); + put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0); + put_unaligned_be32(ptask->hdr.w1, &bufhdr->w1); + break; + + default: + BUG(); + } + if (ptask->dest_node == IEEE1394_ALL_NODES) { + u8 *p; + int generation; + int node_id; + unsigned int sw_version; + + /* ptask->generation may not have been set yet */ + generation = dev->card->generation; + smp_rmb(); + node_id = dev->card->node_id; + + switch (ptask->skb->protocol) { + default: + sw_version = RFC2734_SW_VERSION; + break; +#if IS_ENABLED(CONFIG_IPV6) + case htons(ETH_P_IPV6): + sw_version = RFC3146_SW_VERSION; +#endif + } + + p = skb_push(ptask->skb, IEEE1394_GASP_HDR_SIZE); + put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p); + put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24 + | sw_version, &p[4]); + + /* We should not transmit if broadcast_channel.valid == 0. */ + fw_send_request(dev->card, &ptask->transaction, + TCODE_STREAM_DATA, + fw_stream_packet_destination_id(3, + IEEE1394_BROADCAST_CHANNEL, 0), + generation, SCODE_100, 0ULL, ptask->skb->data, + tx_len + 8, fwnet_write_complete, ptask); + + spin_lock_irqsave(&dev->lock, flags); + + /* If the AT tasklet already ran, we may be last user. */ + free = (ptask->outstanding_pkts == 0 && !ptask->enqueued); + if (!free) + ptask->enqueued = true; + else + dec_queued_datagrams(dev); + + spin_unlock_irqrestore(&dev->lock, flags); + + goto out; + } + + fw_send_request(dev->card, &ptask->transaction, + TCODE_WRITE_BLOCK_REQUEST, ptask->dest_node, + ptask->generation, ptask->speed, ptask->fifo_addr, + ptask->skb->data, tx_len, fwnet_write_complete, ptask); + + spin_lock_irqsave(&dev->lock, flags); + + /* If the AT tasklet already ran, we may be last user. */ + free = (ptask->outstanding_pkts == 0 && !ptask->enqueued); + if (!free) + ptask->enqueued = true; + else + dec_queued_datagrams(dev); + + spin_unlock_irqrestore(&dev->lock, flags); + + dev->netdev->trans_start = jiffies; + out: + if (free) + fwnet_free_ptask(ptask); + + return 0; +} + +static void fwnet_fifo_stop(struct fwnet_device *dev) +{ + if (dev->local_fifo == FWNET_NO_FIFO_ADDR) + return; + + fw_core_remove_address_handler(&dev->handler); + dev->local_fifo = FWNET_NO_FIFO_ADDR; +} + +static int fwnet_fifo_start(struct fwnet_device *dev) +{ + int retval; + + if (dev->local_fifo != FWNET_NO_FIFO_ADDR) + return 0; + + dev->handler.length = 4096; + dev->handler.address_callback = fwnet_receive_packet; + dev->handler.callback_data = dev; + + retval = fw_core_add_address_handler(&dev->handler, + &fw_high_memory_region); + if (retval < 0) + return retval; + + dev->local_fifo = dev->handler.offset; + + return 0; +} + +static void __fwnet_broadcast_stop(struct fwnet_device *dev) +{ + unsigned u; + + if (dev->broadcast_state != FWNET_BROADCAST_ERROR) { + for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) + kunmap(dev->broadcast_rcv_buffer.pages[u]); + fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, dev->card); + } + if (dev->broadcast_rcv_context) { + fw_iso_context_destroy(dev->broadcast_rcv_context); + dev->broadcast_rcv_context = NULL; + } + kfree(dev->broadcast_rcv_buffer_ptrs); + dev->broadcast_rcv_buffer_ptrs = NULL; + dev->broadcast_state = FWNET_BROADCAST_ERROR; +} + +static void fwnet_broadcast_stop(struct fwnet_device *dev) +{ + if (dev->broadcast_state == FWNET_BROADCAST_ERROR) + return; + fw_iso_context_stop(dev->broadcast_rcv_context); + __fwnet_broadcast_stop(dev); +} + +static int fwnet_broadcast_start(struct fwnet_device *dev) +{ + struct fw_iso_context *context; + int retval; + unsigned num_packets; + unsigned max_receive; + struct fw_iso_packet packet; + unsigned long offset; + void **ptrptr; + unsigned u; + + if (dev->broadcast_state != FWNET_BROADCAST_ERROR) + return 0; + + max_receive = 1U << (dev->card->max_receive + 1); + num_packets = (FWNET_ISO_PAGE_COUNT * PAGE_SIZE) / max_receive; + + ptrptr = kmalloc(sizeof(void *) * num_packets, GFP_KERNEL); + if (!ptrptr) { + retval = -ENOMEM; + goto failed; + } + dev->broadcast_rcv_buffer_ptrs = ptrptr; + + context = fw_iso_context_create(dev->card, FW_ISO_CONTEXT_RECEIVE, + IEEE1394_BROADCAST_CHANNEL, + dev->card->link_speed, 8, + fwnet_receive_broadcast, dev); + if (IS_ERR(context)) { + retval = PTR_ERR(context); + goto failed; + } + + retval = fw_iso_buffer_init(&dev->broadcast_rcv_buffer, dev->card, + FWNET_ISO_PAGE_COUNT, DMA_FROM_DEVICE); + if (retval < 0) + goto failed; + + dev->broadcast_state = FWNET_BROADCAST_STOPPED; + + for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) { + void *ptr; + unsigned v; + + ptr = kmap(dev->broadcast_rcv_buffer.pages[u]); + for (v = 0; v < num_packets / FWNET_ISO_PAGE_COUNT; v++) + *ptrptr++ = (void *) ((char *)ptr + v * max_receive); + } + dev->broadcast_rcv_context = context; + + packet.payload_length = max_receive; + packet.interrupt = 1; + packet.skip = 0; + packet.tag = 3; + packet.sy = 0; + packet.header_length = IEEE1394_GASP_HDR_SIZE; + offset = 0; + + for (u = 0; u < num_packets; u++) { + retval = fw_iso_context_queue(context, &packet, + &dev->broadcast_rcv_buffer, offset); + if (retval < 0) + goto failed; + + offset += max_receive; + } + dev->num_broadcast_rcv_ptrs = num_packets; + dev->rcv_buffer_size = max_receive; + dev->broadcast_rcv_next_ptr = 0U; + retval = fw_iso_context_start(context, -1, 0, + FW_ISO_CONTEXT_MATCH_ALL_TAGS); /* ??? sync */ + if (retval < 0) + goto failed; + + /* FIXME: adjust it according to the min. speed of all known peers? */ + dev->broadcast_xmt_max_payload = IEEE1394_MAX_PAYLOAD_S100 + - IEEE1394_GASP_HDR_SIZE - RFC2374_UNFRAG_HDR_SIZE; + dev->broadcast_state = FWNET_BROADCAST_RUNNING; + + return 0; + + failed: + __fwnet_broadcast_stop(dev); + return retval; +} + +static void set_carrier_state(struct fwnet_device *dev) +{ + if (dev->peer_count > 1) + netif_carrier_on(dev->netdev); + else + netif_carrier_off(dev->netdev); +} + +/* ifup */ +static int fwnet_open(struct net_device *net) +{ + struct fwnet_device *dev = netdev_priv(net); + int ret; + + ret = fwnet_broadcast_start(dev); + if (ret) + return ret; + + netif_start_queue(net); + + spin_lock_irq(&dev->lock); + set_carrier_state(dev); + spin_unlock_irq(&dev->lock); + + return 0; +} + +/* ifdown */ +static int fwnet_stop(struct net_device *net) +{ + struct fwnet_device *dev = netdev_priv(net); + + netif_stop_queue(net); + fwnet_broadcast_stop(dev); + + return 0; +} + +static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net) +{ + struct fwnet_header hdr_buf; + struct fwnet_device *dev = netdev_priv(net); + __be16 proto; + u16 dest_node; + unsigned max_payload; + u16 dg_size; + u16 *datagram_label_ptr; + struct fwnet_packet_task *ptask; + struct fwnet_peer *peer; + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + + /* Can this happen? */ + if (netif_queue_stopped(dev->netdev)) { + spin_unlock_irqrestore(&dev->lock, flags); + + return NETDEV_TX_BUSY; + } + + ptask = kmem_cache_alloc(fwnet_packet_task_cache, GFP_ATOMIC); + if (ptask == NULL) + goto fail; + + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) + goto fail; + + /* + * Make a copy of the driver-specific header. + * We might need to rebuild the header on tx failure. + */ + memcpy(&hdr_buf, skb->data, sizeof(hdr_buf)); + proto = hdr_buf.h_proto; + + switch (proto) { + case htons(ETH_P_ARP): + case htons(ETH_P_IP): +#if IS_ENABLED(CONFIG_IPV6) + case htons(ETH_P_IPV6): +#endif + break; + default: + goto fail; + } + + skb_pull(skb, sizeof(hdr_buf)); + dg_size = skb->len; + + /* + * Set the transmission type for the packet. ARP packets and IP + * broadcast packets are sent via GASP. + */ + if (fwnet_hwaddr_is_multicast(hdr_buf.h_dest)) { + max_payload = dev->broadcast_xmt_max_payload; + datagram_label_ptr = &dev->broadcast_xmt_datagramlabel; + + ptask->fifo_addr = FWNET_NO_FIFO_ADDR; + ptask->generation = 0; + ptask->dest_node = IEEE1394_ALL_NODES; + ptask->speed = SCODE_100; + } else { + union fwnet_hwaddr *ha = (union fwnet_hwaddr *)hdr_buf.h_dest; + __be64 guid = get_unaligned(&ha->uc.uniq_id); + u8 generation; + + peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid)); + if (!peer) + goto fail; + + generation = peer->generation; + dest_node = peer->node_id; + max_payload = peer->max_payload; + datagram_label_ptr = &peer->datagram_label; + + ptask->fifo_addr = fwnet_hwaddr_fifo(ha); + ptask->generation = generation; + ptask->dest_node = dest_node; + ptask->speed = peer->speed; + } + + ptask->hdr.w0 = 0; + ptask->hdr.w1 = 0; + ptask->skb = skb; + ptask->dev = dev; + + /* Does it all fit in one packet? */ + if (dg_size <= max_payload) { + fwnet_make_uf_hdr(&ptask->hdr, ntohs(proto)); + ptask->outstanding_pkts = 1; + max_payload = dg_size + RFC2374_UNFRAG_HDR_SIZE; + } else { + u16 datagram_label; + + max_payload -= RFC2374_FRAG_OVERHEAD; + datagram_label = (*datagram_label_ptr)++; + fwnet_make_ff_hdr(&ptask->hdr, ntohs(proto), dg_size, + datagram_label); + ptask->outstanding_pkts = DIV_ROUND_UP(dg_size, max_payload); + max_payload += RFC2374_FRAG_HDR_SIZE; + } + + if (++dev->queued_datagrams == FWNET_MAX_QUEUED_DATAGRAMS) + netif_stop_queue(dev->netdev); + + spin_unlock_irqrestore(&dev->lock, flags); + + ptask->max_payload = max_payload; + ptask->enqueued = 0; + + fwnet_send_packet(ptask); + + return NETDEV_TX_OK; + + fail: + spin_unlock_irqrestore(&dev->lock, flags); + + if (ptask) + kmem_cache_free(fwnet_packet_task_cache, ptask); + + if (skb != NULL) + dev_kfree_skb(skb); + + net->stats.tx_dropped++; + net->stats.tx_errors++; + + /* + * FIXME: According to a patch from 2003-02-26, "returning non-zero + * causes serious problems" here, allegedly. Before that patch, + * -ERRNO was returned which is not appropriate under Linux 2.6. + * Perhaps more needs to be done? Stop the queue in serious + * conditions and restart it elsewhere? + */ + return NETDEV_TX_OK; +} + +static int fwnet_change_mtu(struct net_device *net, int new_mtu) +{ + if (new_mtu < 68) + return -EINVAL; + + net->mtu = new_mtu; + return 0; +} + +static const struct ethtool_ops fwnet_ethtool_ops = { + .get_link = ethtool_op_get_link, +}; + +static const struct net_device_ops fwnet_netdev_ops = { + .ndo_open = fwnet_open, + .ndo_stop = fwnet_stop, + .ndo_start_xmit = fwnet_tx, + .ndo_change_mtu = fwnet_change_mtu, +}; + +static void fwnet_init_dev(struct net_device *net) +{ + net->header_ops = &fwnet_header_ops; + net->netdev_ops = &fwnet_netdev_ops; + net->watchdog_timeo = 2 * HZ; + net->flags = IFF_BROADCAST | IFF_MULTICAST; + net->features = NETIF_F_HIGHDMA; + net->addr_len = FWNET_ALEN; + net->hard_header_len = FWNET_HLEN; + net->type = ARPHRD_IEEE1394; + net->tx_queue_len = FWNET_TX_QUEUE_LEN; + net->ethtool_ops = &fwnet_ethtool_ops; +} + +/* caller must hold fwnet_device_mutex */ +static struct fwnet_device *fwnet_dev_find(struct fw_card *card) +{ + struct fwnet_device *dev; + + list_for_each_entry(dev, &fwnet_device_list, dev_link) + if (dev->card == card) + return dev; + + return NULL; +} + +static int fwnet_add_peer(struct fwnet_device *dev, + struct fw_unit *unit, struct fw_device *device) +{ + struct fwnet_peer *peer; + + peer = kmalloc(sizeof(*peer), GFP_KERNEL); + if (!peer) + return -ENOMEM; + + dev_set_drvdata(&unit->device, peer); + + peer->dev = dev; + peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; + INIT_LIST_HEAD(&peer->pd_list); + peer->pdg_size = 0; + peer->datagram_label = 0; + peer->speed = device->max_speed; + peer->max_payload = fwnet_max_payload(device->max_rec, peer->speed); + + peer->generation = device->generation; + smp_rmb(); + peer->node_id = device->node_id; + + spin_lock_irq(&dev->lock); + list_add_tail(&peer->peer_link, &dev->peer_list); + dev->peer_count++; + set_carrier_state(dev); + spin_unlock_irq(&dev->lock); + + return 0; +} + +static int fwnet_probe(struct fw_unit *unit, + const struct ieee1394_device_id *id) +{ + struct fw_device *device = fw_parent_device(unit); + struct fw_card *card = device->card; + struct net_device *net; + bool allocated_netdev = false; + struct fwnet_device *dev; + unsigned max_mtu; + int ret; + union fwnet_hwaddr *ha; + + mutex_lock(&fwnet_device_mutex); + + dev = fwnet_dev_find(card); + if (dev) { + net = dev->netdev; + goto have_dev; + } + + net = alloc_netdev(sizeof(*dev), "firewire%d", NET_NAME_UNKNOWN, + fwnet_init_dev); + if (net == NULL) { + mutex_unlock(&fwnet_device_mutex); + return -ENOMEM; + } + + allocated_netdev = true; + SET_NETDEV_DEV(net, card->device); + dev = netdev_priv(net); + + spin_lock_init(&dev->lock); + dev->broadcast_state = FWNET_BROADCAST_ERROR; + dev->broadcast_rcv_context = NULL; + dev->broadcast_xmt_max_payload = 0; + dev->broadcast_xmt_datagramlabel = 0; + dev->local_fifo = FWNET_NO_FIFO_ADDR; + dev->queued_datagrams = 0; + INIT_LIST_HEAD(&dev->peer_list); + dev->card = card; + dev->netdev = net; + + ret = fwnet_fifo_start(dev); + if (ret < 0) + goto out; + dev->local_fifo = dev->handler.offset; + + /* + * Use the RFC 2734 default 1500 octets or the maximum payload + * as initial MTU + */ + max_mtu = (1 << (card->max_receive + 1)) + - sizeof(struct rfc2734_header) - IEEE1394_GASP_HDR_SIZE; + net->mtu = min(1500U, max_mtu); + + /* Set our hardware address while we're at it */ + ha = (union fwnet_hwaddr *)net->dev_addr; + put_unaligned_be64(card->guid, &ha->uc.uniq_id); + ha->uc.max_rec = dev->card->max_receive; + ha->uc.sspd = dev->card->link_speed; + put_unaligned_be16(dev->local_fifo >> 32, &ha->uc.fifo_hi); + put_unaligned_be32(dev->local_fifo & 0xffffffff, &ha->uc.fifo_lo); + + memset(net->broadcast, -1, net->addr_len); + + ret = register_netdev(net); + if (ret) + goto out; + + list_add_tail(&dev->dev_link, &fwnet_device_list); + dev_notice(&net->dev, "IP over IEEE 1394 on card %s\n", + dev_name(card->device)); + have_dev: + ret = fwnet_add_peer(dev, unit, device); + if (ret && allocated_netdev) { + unregister_netdev(net); + list_del(&dev->dev_link); + out: + fwnet_fifo_stop(dev); + free_netdev(net); + } + + mutex_unlock(&fwnet_device_mutex); + + return ret; +} + +/* + * FIXME abort partially sent fragmented datagrams, + * discard partially received fragmented datagrams + */ +static void fwnet_update(struct fw_unit *unit) +{ + struct fw_device *device = fw_parent_device(unit); + struct fwnet_peer *peer = dev_get_drvdata(&unit->device); + int generation; + + generation = device->generation; + + spin_lock_irq(&peer->dev->lock); + peer->node_id = device->node_id; + peer->generation = generation; + spin_unlock_irq(&peer->dev->lock); +} + +static void fwnet_remove_peer(struct fwnet_peer *peer, struct fwnet_device *dev) +{ + struct fwnet_partial_datagram *pd, *pd_next; + + spin_lock_irq(&dev->lock); + list_del(&peer->peer_link); + dev->peer_count--; + set_carrier_state(dev); + spin_unlock_irq(&dev->lock); + + list_for_each_entry_safe(pd, pd_next, &peer->pd_list, pd_link) + fwnet_pd_delete(pd); + + kfree(peer); +} + +static void fwnet_remove(struct fw_unit *unit) +{ + struct fwnet_peer *peer = dev_get_drvdata(&unit->device); + struct fwnet_device *dev = peer->dev; + struct net_device *net; + int i; + + mutex_lock(&fwnet_device_mutex); + + net = dev->netdev; + + fwnet_remove_peer(peer, dev); + + if (list_empty(&dev->peer_list)) { + unregister_netdev(net); + + fwnet_fifo_stop(dev); + + for (i = 0; dev->queued_datagrams && i < 5; i++) + ssleep(1); + WARN_ON(dev->queued_datagrams); + list_del(&dev->dev_link); + + free_netdev(net); + } + + mutex_unlock(&fwnet_device_mutex); +} + +static const struct ieee1394_device_id fwnet_id_table[] = { + { + .match_flags = IEEE1394_MATCH_SPECIFIER_ID | + IEEE1394_MATCH_VERSION, + .specifier_id = IANA_SPECIFIER_ID, + .version = RFC2734_SW_VERSION, + }, +#if IS_ENABLED(CONFIG_IPV6) + { + .match_flags = IEEE1394_MATCH_SPECIFIER_ID | + IEEE1394_MATCH_VERSION, + .specifier_id = IANA_SPECIFIER_ID, + .version = RFC3146_SW_VERSION, + }, +#endif + { } +}; + +static struct fw_driver fwnet_driver = { + .driver = { + .owner = THIS_MODULE, + .name = KBUILD_MODNAME, + .bus = &fw_bus_type, + }, + .probe = fwnet_probe, + .update = fwnet_update, + .remove = fwnet_remove, + .id_table = fwnet_id_table, +}; + +static const u32 rfc2374_unit_directory_data[] = { + 0x00040000, /* directory_length */ + 0x1200005e, /* unit_specifier_id: IANA */ + 0x81000003, /* textual descriptor offset */ + 0x13000001, /* unit_sw_version: RFC 2734 */ + 0x81000005, /* textual descriptor offset */ + 0x00030000, /* descriptor_length */ + 0x00000000, /* text */ + 0x00000000, /* minimal ASCII, en */ + 0x49414e41, /* I A N A */ + 0x00030000, /* descriptor_length */ + 0x00000000, /* text */ + 0x00000000, /* minimal ASCII, en */ + 0x49507634, /* I P v 4 */ +}; + +static struct fw_descriptor rfc2374_unit_directory = { + .length = ARRAY_SIZE(rfc2374_unit_directory_data), + .key = (CSR_DIRECTORY | CSR_UNIT) << 24, + .data = rfc2374_unit_directory_data +}; + +#if IS_ENABLED(CONFIG_IPV6) +static const u32 rfc3146_unit_directory_data[] = { + 0x00040000, /* directory_length */ + 0x1200005e, /* unit_specifier_id: IANA */ + 0x81000003, /* textual descriptor offset */ + 0x13000002, /* unit_sw_version: RFC 3146 */ + 0x81000005, /* textual descriptor offset */ + 0x00030000, /* descriptor_length */ + 0x00000000, /* text */ + 0x00000000, /* minimal ASCII, en */ + 0x49414e41, /* I A N A */ + 0x00030000, /* descriptor_length */ + 0x00000000, /* text */ + 0x00000000, /* minimal ASCII, en */ + 0x49507636, /* I P v 6 */ +}; + +static struct fw_descriptor rfc3146_unit_directory = { + .length = ARRAY_SIZE(rfc3146_unit_directory_data), + .key = (CSR_DIRECTORY | CSR_UNIT) << 24, + .data = rfc3146_unit_directory_data +}; +#endif + +static int __init fwnet_init(void) +{ + int err; + + err = fw_core_add_descriptor(&rfc2374_unit_directory); + if (err) + return err; + +#if IS_ENABLED(CONFIG_IPV6) + err = fw_core_add_descriptor(&rfc3146_unit_directory); + if (err) + goto out; +#endif + + fwnet_packet_task_cache = kmem_cache_create("packet_task", + sizeof(struct fwnet_packet_task), 0, 0, NULL); + if (!fwnet_packet_task_cache) { + err = -ENOMEM; + goto out2; + } + + err = driver_register(&fwnet_driver.driver); + if (!err) + return 0; + + kmem_cache_destroy(fwnet_packet_task_cache); +out2: +#if IS_ENABLED(CONFIG_IPV6) + fw_core_remove_descriptor(&rfc3146_unit_directory); +out: +#endif + fw_core_remove_descriptor(&rfc2374_unit_directory); + + return err; +} +module_init(fwnet_init); + +static void __exit fwnet_cleanup(void) +{ + driver_unregister(&fwnet_driver.driver); + kmem_cache_destroy(fwnet_packet_task_cache); +#if IS_ENABLED(CONFIG_IPV6) + fw_core_remove_descriptor(&rfc3146_unit_directory); +#endif + fw_core_remove_descriptor(&rfc2374_unit_directory); +} +module_exit(fwnet_cleanup); + +MODULE_AUTHOR("Jay Fenlason <fenlason@redhat.com>"); +MODULE_DESCRIPTION("IP over IEEE1394 as per RFC 2734/3146"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(ieee1394, fwnet_id_table); diff --git a/kernel/drivers/firewire/nosy-user.h b/kernel/drivers/firewire/nosy-user.h new file mode 100644 index 000000000..e48aa6200 --- /dev/null +++ b/kernel/drivers/firewire/nosy-user.h @@ -0,0 +1,25 @@ +#ifndef __nosy_user_h +#define __nosy_user_h + +#include <linux/ioctl.h> +#include <linux/types.h> + +#define NOSY_IOC_GET_STATS _IOR('&', 0, struct nosy_stats) +#define NOSY_IOC_START _IO('&', 1) +#define NOSY_IOC_STOP _IO('&', 2) +#define NOSY_IOC_FILTER _IOW('&', 2, __u32) + +struct nosy_stats { + __u32 total_packet_count; + __u32 lost_packet_count; +}; + +/* + * Format of packets returned from the kernel driver: + * + * quadlet with timestamp (microseconds, CPU endian) + * quadlet-padded packet data... (little endian) + * quadlet with ack (little endian) + */ + +#endif /* __nosy_user_h */ diff --git a/kernel/drivers/firewire/nosy.c b/kernel/drivers/firewire/nosy.c new file mode 100644 index 000000000..76b2d390f --- /dev/null +++ b/kernel/drivers/firewire/nosy.c @@ -0,0 +1,709 @@ +/* + * nosy - Snoop mode driver for TI PCILynx 1394 controllers + * Copyright (C) 2002-2007 Kristian Høgsberg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/kref.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/poll.h> +#include <linux/sched.h> /* required for linux/wait.h */ +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/timex.h> +#include <linux/uaccess.h> +#include <linux/wait.h> +#include <linux/dma-mapping.h> +#include <linux/atomic.h> +#include <asm/byteorder.h> + +#include "nosy.h" +#include "nosy-user.h" + +#define TCODE_PHY_PACKET 0x10 +#define PCI_DEVICE_ID_TI_PCILYNX 0x8000 + +static char driver_name[] = KBUILD_MODNAME; + +/* this is the physical layout of a PCL, its size is 128 bytes */ +struct pcl { + __le32 next; + __le32 async_error_next; + u32 user_data; + __le32 pcl_status; + __le32 remaining_transfer_count; + __le32 next_data_buffer; + struct { + __le32 control; + __le32 pointer; + } buffer[13]; +}; + +struct packet { + unsigned int length; + char data[0]; +}; + +struct packet_buffer { + char *data; + size_t capacity; + long total_packet_count, lost_packet_count; + atomic_t size; + struct packet *head, *tail; + wait_queue_head_t wait; +}; + +struct pcilynx { + struct pci_dev *pci_device; + __iomem char *registers; + + struct pcl *rcv_start_pcl, *rcv_pcl; + __le32 *rcv_buffer; + + dma_addr_t rcv_start_pcl_bus, rcv_pcl_bus, rcv_buffer_bus; + + spinlock_t client_list_lock; + struct list_head client_list; + + struct miscdevice misc; + struct list_head link; + struct kref kref; +}; + +static inline struct pcilynx * +lynx_get(struct pcilynx *lynx) +{ + kref_get(&lynx->kref); + + return lynx; +} + +static void +lynx_release(struct kref *kref) +{ + kfree(container_of(kref, struct pcilynx, kref)); +} + +static inline void +lynx_put(struct pcilynx *lynx) +{ + kref_put(&lynx->kref, lynx_release); +} + +struct client { + struct pcilynx *lynx; + u32 tcode_mask; + struct packet_buffer buffer; + struct list_head link; +}; + +static DEFINE_MUTEX(card_mutex); +static LIST_HEAD(card_list); + +static int +packet_buffer_init(struct packet_buffer *buffer, size_t capacity) +{ + buffer->data = kmalloc(capacity, GFP_KERNEL); + if (buffer->data == NULL) + return -ENOMEM; + buffer->head = (struct packet *) buffer->data; + buffer->tail = (struct packet *) buffer->data; + buffer->capacity = capacity; + buffer->lost_packet_count = 0; + atomic_set(&buffer->size, 0); + init_waitqueue_head(&buffer->wait); + + return 0; +} + +static void +packet_buffer_destroy(struct packet_buffer *buffer) +{ + kfree(buffer->data); +} + +static int +packet_buffer_get(struct client *client, char __user *data, size_t user_length) +{ + struct packet_buffer *buffer = &client->buffer; + size_t length; + char *end; + + if (wait_event_interruptible(buffer->wait, + atomic_read(&buffer->size) > 0) || + list_empty(&client->lynx->link)) + return -ERESTARTSYS; + + if (atomic_read(&buffer->size) == 0) + return -ENODEV; + + /* FIXME: Check length <= user_length. */ + + end = buffer->data + buffer->capacity; + length = buffer->head->length; + + if (&buffer->head->data[length] < end) { + if (copy_to_user(data, buffer->head->data, length)) + return -EFAULT; + buffer->head = (struct packet *) &buffer->head->data[length]; + } else { + size_t split = end - buffer->head->data; + + if (copy_to_user(data, buffer->head->data, split)) + return -EFAULT; + if (copy_to_user(data + split, buffer->data, length - split)) + return -EFAULT; + buffer->head = (struct packet *) &buffer->data[length - split]; + } + + /* + * Decrease buffer->size as the last thing, since this is what + * keeps the interrupt from overwriting the packet we are + * retrieving from the buffer. + */ + atomic_sub(sizeof(struct packet) + length, &buffer->size); + + return length; +} + +static void +packet_buffer_put(struct packet_buffer *buffer, void *data, size_t length) +{ + char *end; + + buffer->total_packet_count++; + + if (buffer->capacity < + atomic_read(&buffer->size) + sizeof(struct packet) + length) { + buffer->lost_packet_count++; + return; + } + + end = buffer->data + buffer->capacity; + buffer->tail->length = length; + + if (&buffer->tail->data[length] < end) { + memcpy(buffer->tail->data, data, length); + buffer->tail = (struct packet *) &buffer->tail->data[length]; + } else { + size_t split = end - buffer->tail->data; + + memcpy(buffer->tail->data, data, split); + memcpy(buffer->data, data + split, length - split); + buffer->tail = (struct packet *) &buffer->data[length - split]; + } + + /* Finally, adjust buffer size and wake up userspace reader. */ + + atomic_add(sizeof(struct packet) + length, &buffer->size); + wake_up_interruptible(&buffer->wait); +} + +static inline void +reg_write(struct pcilynx *lynx, int offset, u32 data) +{ + writel(data, lynx->registers + offset); +} + +static inline u32 +reg_read(struct pcilynx *lynx, int offset) +{ + return readl(lynx->registers + offset); +} + +static inline void +reg_set_bits(struct pcilynx *lynx, int offset, u32 mask) +{ + reg_write(lynx, offset, (reg_read(lynx, offset) | mask)); +} + +/* + * Maybe the pcl programs could be set up to just append data instead + * of using a whole packet. + */ +static inline void +run_pcl(struct pcilynx *lynx, dma_addr_t pcl_bus, + int dmachan) +{ + reg_write(lynx, DMA0_CURRENT_PCL + dmachan * 0x20, pcl_bus); + reg_write(lynx, DMA0_CHAN_CTRL + dmachan * 0x20, + DMA_CHAN_CTRL_ENABLE | DMA_CHAN_CTRL_LINK); +} + +static int +set_phy_reg(struct pcilynx *lynx, int addr, int val) +{ + if (addr > 15) { + dev_err(&lynx->pci_device->dev, + "PHY register address %d out of range\n", addr); + return -1; + } + if (val > 0xff) { + dev_err(&lynx->pci_device->dev, + "PHY register value %d out of range\n", val); + return -1; + } + reg_write(lynx, LINK_PHY, LINK_PHY_WRITE | + LINK_PHY_ADDR(addr) | LINK_PHY_WDATA(val)); + + return 0; +} + +static int +nosy_open(struct inode *inode, struct file *file) +{ + int minor = iminor(inode); + struct client *client; + struct pcilynx *tmp, *lynx = NULL; + + mutex_lock(&card_mutex); + list_for_each_entry(tmp, &card_list, link) + if (tmp->misc.minor == minor) { + lynx = lynx_get(tmp); + break; + } + mutex_unlock(&card_mutex); + if (lynx == NULL) + return -ENODEV; + + client = kmalloc(sizeof *client, GFP_KERNEL); + if (client == NULL) + goto fail; + + client->tcode_mask = ~0; + client->lynx = lynx; + INIT_LIST_HEAD(&client->link); + + if (packet_buffer_init(&client->buffer, 128 * 1024) < 0) + goto fail; + + file->private_data = client; + + return nonseekable_open(inode, file); +fail: + kfree(client); + lynx_put(lynx); + + return -ENOMEM; +} + +static int +nosy_release(struct inode *inode, struct file *file) +{ + struct client *client = file->private_data; + struct pcilynx *lynx = client->lynx; + + spin_lock_irq(&lynx->client_list_lock); + list_del_init(&client->link); + spin_unlock_irq(&lynx->client_list_lock); + + packet_buffer_destroy(&client->buffer); + kfree(client); + lynx_put(lynx); + + return 0; +} + +static unsigned int +nosy_poll(struct file *file, poll_table *pt) +{ + struct client *client = file->private_data; + unsigned int ret = 0; + + poll_wait(file, &client->buffer.wait, pt); + + if (atomic_read(&client->buffer.size) > 0) + ret = POLLIN | POLLRDNORM; + + if (list_empty(&client->lynx->link)) + ret |= POLLHUP; + + return ret; +} + +static ssize_t +nosy_read(struct file *file, char __user *buffer, size_t count, loff_t *offset) +{ + struct client *client = file->private_data; + + return packet_buffer_get(client, buffer, count); +} + +static long +nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct client *client = file->private_data; + spinlock_t *client_list_lock = &client->lynx->client_list_lock; + struct nosy_stats stats; + + switch (cmd) { + case NOSY_IOC_GET_STATS: + spin_lock_irq(client_list_lock); + stats.total_packet_count = client->buffer.total_packet_count; + stats.lost_packet_count = client->buffer.lost_packet_count; + spin_unlock_irq(client_list_lock); + + if (copy_to_user((void __user *) arg, &stats, sizeof stats)) + return -EFAULT; + else + return 0; + + case NOSY_IOC_START: + spin_lock_irq(client_list_lock); + list_add_tail(&client->link, &client->lynx->client_list); + spin_unlock_irq(client_list_lock); + + return 0; + + case NOSY_IOC_STOP: + spin_lock_irq(client_list_lock); + list_del_init(&client->link); + spin_unlock_irq(client_list_lock); + + return 0; + + case NOSY_IOC_FILTER: + spin_lock_irq(client_list_lock); + client->tcode_mask = arg; + spin_unlock_irq(client_list_lock); + + return 0; + + default: + return -EINVAL; + /* Flush buffer, configure filter. */ + } +} + +static const struct file_operations nosy_ops = { + .owner = THIS_MODULE, + .read = nosy_read, + .unlocked_ioctl = nosy_ioctl, + .poll = nosy_poll, + .open = nosy_open, + .release = nosy_release, +}; + +#define PHY_PACKET_SIZE 12 /* 1 payload, 1 inverse, 1 ack = 3 quadlets */ + +static void +packet_irq_handler(struct pcilynx *lynx) +{ + struct client *client; + u32 tcode_mask, tcode; + size_t length; + struct timeval tv; + + /* FIXME: Also report rcv_speed. */ + + length = __le32_to_cpu(lynx->rcv_pcl->pcl_status) & 0x00001fff; + tcode = __le32_to_cpu(lynx->rcv_buffer[1]) >> 4 & 0xf; + + do_gettimeofday(&tv); + lynx->rcv_buffer[0] = (__force __le32)tv.tv_usec; + + if (length == PHY_PACKET_SIZE) + tcode_mask = 1 << TCODE_PHY_PACKET; + else + tcode_mask = 1 << tcode; + + spin_lock(&lynx->client_list_lock); + + list_for_each_entry(client, &lynx->client_list, link) + if (client->tcode_mask & tcode_mask) + packet_buffer_put(&client->buffer, + lynx->rcv_buffer, length + 4); + + spin_unlock(&lynx->client_list_lock); +} + +static void +bus_reset_irq_handler(struct pcilynx *lynx) +{ + struct client *client; + struct timeval tv; + + do_gettimeofday(&tv); + + spin_lock(&lynx->client_list_lock); + + list_for_each_entry(client, &lynx->client_list, link) + packet_buffer_put(&client->buffer, &tv.tv_usec, 4); + + spin_unlock(&lynx->client_list_lock); +} + +static irqreturn_t +irq_handler(int irq, void *device) +{ + struct pcilynx *lynx = device; + u32 pci_int_status; + + pci_int_status = reg_read(lynx, PCI_INT_STATUS); + + if (pci_int_status == ~0) + /* Card was ejected. */ + return IRQ_NONE; + + if ((pci_int_status & PCI_INT_INT_PEND) == 0) + /* Not our interrupt, bail out quickly. */ + return IRQ_NONE; + + if ((pci_int_status & PCI_INT_P1394_INT) != 0) { + u32 link_int_status; + + link_int_status = reg_read(lynx, LINK_INT_STATUS); + reg_write(lynx, LINK_INT_STATUS, link_int_status); + + if ((link_int_status & LINK_INT_PHY_BUSRESET) > 0) + bus_reset_irq_handler(lynx); + } + + /* Clear the PCI_INT_STATUS register only after clearing the + * LINK_INT_STATUS register; otherwise the PCI_INT_P1394 will + * be set again immediately. */ + + reg_write(lynx, PCI_INT_STATUS, pci_int_status); + + if ((pci_int_status & PCI_INT_DMA0_HLT) > 0) { + packet_irq_handler(lynx); + run_pcl(lynx, lynx->rcv_start_pcl_bus, 0); + } + + return IRQ_HANDLED; +} + +static void +remove_card(struct pci_dev *dev) +{ + struct pcilynx *lynx = pci_get_drvdata(dev); + struct client *client; + + mutex_lock(&card_mutex); + list_del_init(&lynx->link); + misc_deregister(&lynx->misc); + mutex_unlock(&card_mutex); + + reg_write(lynx, PCI_INT_ENABLE, 0); + free_irq(lynx->pci_device->irq, lynx); + + spin_lock_irq(&lynx->client_list_lock); + list_for_each_entry(client, &lynx->client_list, link) + wake_up_interruptible(&client->buffer.wait); + spin_unlock_irq(&lynx->client_list_lock); + + pci_free_consistent(lynx->pci_device, sizeof(struct pcl), + lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus); + pci_free_consistent(lynx->pci_device, sizeof(struct pcl), + lynx->rcv_pcl, lynx->rcv_pcl_bus); + pci_free_consistent(lynx->pci_device, PAGE_SIZE, + lynx->rcv_buffer, lynx->rcv_buffer_bus); + + iounmap(lynx->registers); + pci_disable_device(dev); + lynx_put(lynx); +} + +#define RCV_BUFFER_SIZE (16 * 1024) + +static int +add_card(struct pci_dev *dev, const struct pci_device_id *unused) +{ + struct pcilynx *lynx; + u32 p, end; + int ret, i; + + if (pci_set_dma_mask(dev, DMA_BIT_MASK(32))) { + dev_err(&dev->dev, + "DMA address limits not supported for PCILynx hardware\n"); + return -ENXIO; + } + if (pci_enable_device(dev)) { + dev_err(&dev->dev, "Failed to enable PCILynx hardware\n"); + return -ENXIO; + } + pci_set_master(dev); + + lynx = kzalloc(sizeof *lynx, GFP_KERNEL); + if (lynx == NULL) { + dev_err(&dev->dev, "Failed to allocate control structure\n"); + ret = -ENOMEM; + goto fail_disable; + } + lynx->pci_device = dev; + pci_set_drvdata(dev, lynx); + + spin_lock_init(&lynx->client_list_lock); + INIT_LIST_HEAD(&lynx->client_list); + kref_init(&lynx->kref); + + lynx->registers = ioremap_nocache(pci_resource_start(dev, 0), + PCILYNX_MAX_REGISTER); + + lynx->rcv_start_pcl = pci_alloc_consistent(lynx->pci_device, + sizeof(struct pcl), &lynx->rcv_start_pcl_bus); + lynx->rcv_pcl = pci_alloc_consistent(lynx->pci_device, + sizeof(struct pcl), &lynx->rcv_pcl_bus); + lynx->rcv_buffer = pci_alloc_consistent(lynx->pci_device, + RCV_BUFFER_SIZE, &lynx->rcv_buffer_bus); + if (lynx->rcv_start_pcl == NULL || + lynx->rcv_pcl == NULL || + lynx->rcv_buffer == NULL) { + dev_err(&dev->dev, "Failed to allocate receive buffer\n"); + ret = -ENOMEM; + goto fail_deallocate; + } + lynx->rcv_start_pcl->next = cpu_to_le32(lynx->rcv_pcl_bus); + lynx->rcv_pcl->next = cpu_to_le32(PCL_NEXT_INVALID); + lynx->rcv_pcl->async_error_next = cpu_to_le32(PCL_NEXT_INVALID); + + lynx->rcv_pcl->buffer[0].control = + cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2044); + lynx->rcv_pcl->buffer[0].pointer = + cpu_to_le32(lynx->rcv_buffer_bus + 4); + p = lynx->rcv_buffer_bus + 2048; + end = lynx->rcv_buffer_bus + RCV_BUFFER_SIZE; + for (i = 1; p < end; i++, p += 2048) { + lynx->rcv_pcl->buffer[i].control = + cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2048); + lynx->rcv_pcl->buffer[i].pointer = cpu_to_le32(p); + } + lynx->rcv_pcl->buffer[i - 1].control |= cpu_to_le32(PCL_LAST_BUFF); + + reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET); + /* Fix buggy cards with autoboot pin not tied low: */ + reg_write(lynx, DMA0_CHAN_CTRL, 0); + reg_write(lynx, DMA_GLOBAL_REGISTER, 0x00 << 24); + +#if 0 + /* now, looking for PHY register set */ + if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) { + lynx->phyic.reg_1394a = 1; + PRINT(KERN_INFO, lynx->id, + "found 1394a conform PHY (using extended register set)"); + lynx->phyic.vendor = get_phy_vendorid(lynx); + lynx->phyic.product = get_phy_productid(lynx); + } else { + lynx->phyic.reg_1394a = 0; + PRINT(KERN_INFO, lynx->id, "found old 1394 PHY"); + } +#endif + + /* Setup the general receive FIFO max size. */ + reg_write(lynx, FIFO_SIZES, 255); + + reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL); + + reg_write(lynx, LINK_INT_ENABLE, + LINK_INT_PHY_TIME_OUT | LINK_INT_PHY_REG_RCVD | + LINK_INT_PHY_BUSRESET | LINK_INT_IT_STUCK | + LINK_INT_AT_STUCK | LINK_INT_SNTRJ | + LINK_INT_TC_ERR | LINK_INT_GRF_OVER_FLOW | + LINK_INT_ITF_UNDER_FLOW | LINK_INT_ATF_UNDER_FLOW); + + /* Disable the L flag in self ID packets. */ + set_phy_reg(lynx, 4, 0); + + /* Put this baby into snoop mode */ + reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_SNOOP_ENABLE); + + run_pcl(lynx, lynx->rcv_start_pcl_bus, 0); + + if (request_irq(dev->irq, irq_handler, IRQF_SHARED, + driver_name, lynx)) { + dev_err(&dev->dev, + "Failed to allocate shared interrupt %d\n", dev->irq); + ret = -EIO; + goto fail_deallocate; + } + + lynx->misc.parent = &dev->dev; + lynx->misc.minor = MISC_DYNAMIC_MINOR; + lynx->misc.name = "nosy"; + lynx->misc.fops = &nosy_ops; + + mutex_lock(&card_mutex); + ret = misc_register(&lynx->misc); + if (ret) { + dev_err(&dev->dev, "Failed to register misc char device\n"); + mutex_unlock(&card_mutex); + goto fail_free_irq; + } + list_add_tail(&lynx->link, &card_list); + mutex_unlock(&card_mutex); + + dev_info(&dev->dev, + "Initialized PCILynx IEEE1394 card, irq=%d\n", dev->irq); + + return 0; + +fail_free_irq: + reg_write(lynx, PCI_INT_ENABLE, 0); + free_irq(lynx->pci_device->irq, lynx); + +fail_deallocate: + if (lynx->rcv_start_pcl) + pci_free_consistent(lynx->pci_device, sizeof(struct pcl), + lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus); + if (lynx->rcv_pcl) + pci_free_consistent(lynx->pci_device, sizeof(struct pcl), + lynx->rcv_pcl, lynx->rcv_pcl_bus); + if (lynx->rcv_buffer) + pci_free_consistent(lynx->pci_device, PAGE_SIZE, + lynx->rcv_buffer, lynx->rcv_buffer_bus); + iounmap(lynx->registers); + kfree(lynx); + +fail_disable: + pci_disable_device(dev); + + return ret; +} + +static struct pci_device_id pci_table[] = { + { + .vendor = PCI_VENDOR_ID_TI, + .device = PCI_DEVICE_ID_TI_PCILYNX, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { } /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(pci, pci_table); + +static struct pci_driver lynx_pci_driver = { + .name = driver_name, + .id_table = pci_table, + .probe = add_card, + .remove = remove_card, +}; + +module_pci_driver(lynx_pci_driver); + +MODULE_AUTHOR("Kristian Hoegsberg"); +MODULE_DESCRIPTION("Snoop mode driver for TI pcilynx 1394 controllers"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/firewire/nosy.h b/kernel/drivers/firewire/nosy.h new file mode 100644 index 000000000..078ff27f4 --- /dev/null +++ b/kernel/drivers/firewire/nosy.h @@ -0,0 +1,237 @@ +/* + * Chip register definitions for PCILynx chipset. Based on pcilynx.h + * from the Linux 1394 drivers, but modified a bit so the names here + * match the specification exactly (even though they have weird names, + * like xxx_OVER_FLOW, or arbitrary abbreviations like SNTRJ for "sent + * reject" etc.) + */ + +#define PCILYNX_MAX_REGISTER 0xfff +#define PCILYNX_MAX_MEMORY 0xffff + +#define PCI_LATENCY_CACHELINE 0x0c + +#define MISC_CONTROL 0x40 +#define MISC_CONTROL_SWRESET (1<<0) + +#define SERIAL_EEPROM_CONTROL 0x44 + +#define PCI_INT_STATUS 0x48 +#define PCI_INT_ENABLE 0x4c +/* status and enable have identical bit numbers */ +#define PCI_INT_INT_PEND (1<<31) +#define PCI_INT_FRC_INT (1<<30) +#define PCI_INT_SLV_ADR_PERR (1<<28) +#define PCI_INT_SLV_DAT_PERR (1<<27) +#define PCI_INT_MST_DAT_PERR (1<<26) +#define PCI_INT_MST_DEV_TO (1<<25) +#define PCI_INT_INT_SLV_TO (1<<23) +#define PCI_INT_AUX_TO (1<<18) +#define PCI_INT_AUX_INT (1<<17) +#define PCI_INT_P1394_INT (1<<16) +#define PCI_INT_DMA4_PCL (1<<9) +#define PCI_INT_DMA4_HLT (1<<8) +#define PCI_INT_DMA3_PCL (1<<7) +#define PCI_INT_DMA3_HLT (1<<6) +#define PCI_INT_DMA2_PCL (1<<5) +#define PCI_INT_DMA2_HLT (1<<4) +#define PCI_INT_DMA1_PCL (1<<3) +#define PCI_INT_DMA1_HLT (1<<2) +#define PCI_INT_DMA0_PCL (1<<1) +#define PCI_INT_DMA0_HLT (1<<0) +/* all DMA interrupts combined: */ +#define PCI_INT_DMA_ALL 0x3ff + +#define PCI_INT_DMA_HLT(chan) (1 << (chan * 2)) +#define PCI_INT_DMA_PCL(chan) (1 << (chan * 2 + 1)) + +#define LBUS_ADDR 0xb4 +#define LBUS_ADDR_SEL_RAM (0x0<<16) +#define LBUS_ADDR_SEL_ROM (0x1<<16) +#define LBUS_ADDR_SEL_AUX (0x2<<16) +#define LBUS_ADDR_SEL_ZV (0x3<<16) + +#define GPIO_CTRL_A 0xb8 +#define GPIO_CTRL_B 0xbc +#define GPIO_DATA_BASE 0xc0 + +#define DMA_BREG(base, chan) (base + chan * 0x20) +#define DMA_SREG(base, chan) (base + chan * 0x10) + +#define PCL_NEXT_INVALID (1<<0) + +/* transfer commands */ +#define PCL_CMD_RCV (0x1<<24) +#define PCL_CMD_RCV_AND_UPDATE (0xa<<24) +#define PCL_CMD_XMT (0x2<<24) +#define PCL_CMD_UNFXMT (0xc<<24) +#define PCL_CMD_PCI_TO_LBUS (0x8<<24) +#define PCL_CMD_LBUS_TO_PCI (0x9<<24) + +/* aux commands */ +#define PCL_CMD_NOP (0x0<<24) +#define PCL_CMD_LOAD (0x3<<24) +#define PCL_CMD_STOREQ (0x4<<24) +#define PCL_CMD_STORED (0xb<<24) +#define PCL_CMD_STORE0 (0x5<<24) +#define PCL_CMD_STORE1 (0x6<<24) +#define PCL_CMD_COMPARE (0xe<<24) +#define PCL_CMD_SWAP_COMPARE (0xf<<24) +#define PCL_CMD_ADD (0xd<<24) +#define PCL_CMD_BRANCH (0x7<<24) + +/* BRANCH condition codes */ +#define PCL_COND_DMARDY_SET (0x1<<20) +#define PCL_COND_DMARDY_CLEAR (0x2<<20) + +#define PCL_GEN_INTR (1<<19) +#define PCL_LAST_BUFF (1<<18) +#define PCL_LAST_CMD (PCL_LAST_BUFF) +#define PCL_WAITSTAT (1<<17) +#define PCL_BIGENDIAN (1<<16) +#define PCL_ISOMODE (1<<12) + +#define DMA0_PREV_PCL 0x100 +#define DMA1_PREV_PCL 0x120 +#define DMA2_PREV_PCL 0x140 +#define DMA3_PREV_PCL 0x160 +#define DMA4_PREV_PCL 0x180 +#define DMA_PREV_PCL(chan) (DMA_BREG(DMA0_PREV_PCL, chan)) + +#define DMA0_CURRENT_PCL 0x104 +#define DMA1_CURRENT_PCL 0x124 +#define DMA2_CURRENT_PCL 0x144 +#define DMA3_CURRENT_PCL 0x164 +#define DMA4_CURRENT_PCL 0x184 +#define DMA_CURRENT_PCL(chan) (DMA_BREG(DMA0_CURRENT_PCL, chan)) + +#define DMA0_CHAN_STAT 0x10c +#define DMA1_CHAN_STAT 0x12c +#define DMA2_CHAN_STAT 0x14c +#define DMA3_CHAN_STAT 0x16c +#define DMA4_CHAN_STAT 0x18c +#define DMA_CHAN_STAT(chan) (DMA_BREG(DMA0_CHAN_STAT, chan)) +/* CHAN_STATUS registers share bits */ +#define DMA_CHAN_STAT_SELFID (1<<31) +#define DMA_CHAN_STAT_ISOPKT (1<<30) +#define DMA_CHAN_STAT_PCIERR (1<<29) +#define DMA_CHAN_STAT_PKTERR (1<<28) +#define DMA_CHAN_STAT_PKTCMPL (1<<27) +#define DMA_CHAN_STAT_SPECIALACK (1<<14) + +#define DMA0_CHAN_CTRL 0x110 +#define DMA1_CHAN_CTRL 0x130 +#define DMA2_CHAN_CTRL 0x150 +#define DMA3_CHAN_CTRL 0x170 +#define DMA4_CHAN_CTRL 0x190 +#define DMA_CHAN_CTRL(chan) (DMA_BREG(DMA0_CHAN_CTRL, chan)) +/* CHAN_CTRL registers share bits */ +#define DMA_CHAN_CTRL_ENABLE (1<<31) +#define DMA_CHAN_CTRL_BUSY (1<<30) +#define DMA_CHAN_CTRL_LINK (1<<29) + +#define DMA0_READY 0x114 +#define DMA1_READY 0x134 +#define DMA2_READY 0x154 +#define DMA3_READY 0x174 +#define DMA4_READY 0x194 +#define DMA_READY(chan) (DMA_BREG(DMA0_READY, chan)) + +#define DMA_GLOBAL_REGISTER 0x908 + +#define FIFO_SIZES 0xa00 + +#define FIFO_CONTROL 0xa10 +#define FIFO_CONTROL_GRF_FLUSH (1<<4) +#define FIFO_CONTROL_ITF_FLUSH (1<<3) +#define FIFO_CONTROL_ATF_FLUSH (1<<2) + +#define FIFO_XMIT_THRESHOLD 0xa14 + +#define DMA0_WORD0_CMP_VALUE 0xb00 +#define DMA1_WORD0_CMP_VALUE 0xb10 +#define DMA2_WORD0_CMP_VALUE 0xb20 +#define DMA3_WORD0_CMP_VALUE 0xb30 +#define DMA4_WORD0_CMP_VALUE 0xb40 +#define DMA_WORD0_CMP_VALUE(chan) (DMA_SREG(DMA0_WORD0_CMP_VALUE, chan)) + +#define DMA0_WORD0_CMP_ENABLE 0xb04 +#define DMA1_WORD0_CMP_ENABLE 0xb14 +#define DMA2_WORD0_CMP_ENABLE 0xb24 +#define DMA3_WORD0_CMP_ENABLE 0xb34 +#define DMA4_WORD0_CMP_ENABLE 0xb44 +#define DMA_WORD0_CMP_ENABLE(chan) (DMA_SREG(DMA0_WORD0_CMP_ENABLE, chan)) + +#define DMA0_WORD1_CMP_VALUE 0xb08 +#define DMA1_WORD1_CMP_VALUE 0xb18 +#define DMA2_WORD1_CMP_VALUE 0xb28 +#define DMA3_WORD1_CMP_VALUE 0xb38 +#define DMA4_WORD1_CMP_VALUE 0xb48 +#define DMA_WORD1_CMP_VALUE(chan) (DMA_SREG(DMA0_WORD1_CMP_VALUE, chan)) + +#define DMA0_WORD1_CMP_ENABLE 0xb0c +#define DMA1_WORD1_CMP_ENABLE 0xb1c +#define DMA2_WORD1_CMP_ENABLE 0xb2c +#define DMA3_WORD1_CMP_ENABLE 0xb3c +#define DMA4_WORD1_CMP_ENABLE 0xb4c +#define DMA_WORD1_CMP_ENABLE(chan) (DMA_SREG(DMA0_WORD1_CMP_ENABLE, chan)) +/* word 1 compare enable flags */ +#define DMA_WORD1_CMP_MATCH_OTHERBUS (1<<15) +#define DMA_WORD1_CMP_MATCH_BROADCAST (1<<14) +#define DMA_WORD1_CMP_MATCH_BUS_BCAST (1<<13) +#define DMA_WORD1_CMP_MATCH_LOCAL_NODE (1<<12) +#define DMA_WORD1_CMP_MATCH_EXACT (1<<11) +#define DMA_WORD1_CMP_ENABLE_SELF_ID (1<<10) +#define DMA_WORD1_CMP_ENABLE_MASTER (1<<8) + +#define LINK_ID 0xf00 +#define LINK_ID_BUS(id) (id<<22) +#define LINK_ID_NODE(id) (id<<16) + +#define LINK_CONTROL 0xf04 +#define LINK_CONTROL_BUSY (1<<29) +#define LINK_CONTROL_TX_ISO_EN (1<<26) +#define LINK_CONTROL_RX_ISO_EN (1<<25) +#define LINK_CONTROL_TX_ASYNC_EN (1<<24) +#define LINK_CONTROL_RX_ASYNC_EN (1<<23) +#define LINK_CONTROL_RESET_TX (1<<21) +#define LINK_CONTROL_RESET_RX (1<<20) +#define LINK_CONTROL_CYCMASTER (1<<11) +#define LINK_CONTROL_CYCSOURCE (1<<10) +#define LINK_CONTROL_CYCTIMEREN (1<<9) +#define LINK_CONTROL_RCV_CMP_VALID (1<<7) +#define LINK_CONTROL_SNOOP_ENABLE (1<<6) + +#define CYCLE_TIMER 0xf08 + +#define LINK_PHY 0xf0c +#define LINK_PHY_READ (1<<31) +#define LINK_PHY_WRITE (1<<30) +#define LINK_PHY_ADDR(addr) (addr<<24) +#define LINK_PHY_WDATA(data) (data<<16) +#define LINK_PHY_RADDR(addr) (addr<<8) + +#define LINK_INT_STATUS 0xf14 +#define LINK_INT_ENABLE 0xf18 +/* status and enable have identical bit numbers */ +#define LINK_INT_LINK_INT (1<<31) +#define LINK_INT_PHY_TIME_OUT (1<<30) +#define LINK_INT_PHY_REG_RCVD (1<<29) +#define LINK_INT_PHY_BUSRESET (1<<28) +#define LINK_INT_TX_RDY (1<<26) +#define LINK_INT_RX_DATA_RDY (1<<25) +#define LINK_INT_IT_STUCK (1<<20) +#define LINK_INT_AT_STUCK (1<<19) +#define LINK_INT_SNTRJ (1<<17) +#define LINK_INT_HDR_ERR (1<<16) +#define LINK_INT_TC_ERR (1<<15) +#define LINK_INT_CYC_SEC (1<<11) +#define LINK_INT_CYC_STRT (1<<10) +#define LINK_INT_CYC_DONE (1<<9) +#define LINK_INT_CYC_PEND (1<<8) +#define LINK_INT_CYC_LOST (1<<7) +#define LINK_INT_CYC_ARB_FAILED (1<<6) +#define LINK_INT_GRF_OVER_FLOW (1<<5) +#define LINK_INT_ITF_UNDER_FLOW (1<<4) +#define LINK_INT_ATF_UNDER_FLOW (1<<3) +#define LINK_INT_IARB_FAILED (1<<0) diff --git a/kernel/drivers/firewire/ohci.c b/kernel/drivers/firewire/ohci.c new file mode 100644 index 000000000..f51d376d1 --- /dev/null +++ b/kernel/drivers/firewire/ohci.c @@ -0,0 +1,3892 @@ +/* + * Driver for OHCI 1394 controllers + * + * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/bitops.h> +#include <linux/bug.h> +#include <linux/compiler.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/firewire.h> +#include <linux/firewire-constants.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/pci_ids.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/time.h> +#include <linux/vmalloc.h> +#include <linux/workqueue.h> + +#include <asm/byteorder.h> +#include <asm/page.h> + +#ifdef CONFIG_PPC_PMAC +#include <asm/pmac_feature.h> +#endif + +#include "core.h" +#include "ohci.h" + +#define ohci_info(ohci, f, args...) dev_info(ohci->card.device, f, ##args) +#define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args) +#define ohci_err(ohci, f, args...) dev_err(ohci->card.device, f, ##args) + +#define DESCRIPTOR_OUTPUT_MORE 0 +#define DESCRIPTOR_OUTPUT_LAST (1 << 12) +#define DESCRIPTOR_INPUT_MORE (2 << 12) +#define DESCRIPTOR_INPUT_LAST (3 << 12) +#define DESCRIPTOR_STATUS (1 << 11) +#define DESCRIPTOR_KEY_IMMEDIATE (2 << 8) +#define DESCRIPTOR_PING (1 << 7) +#define DESCRIPTOR_YY (1 << 6) +#define DESCRIPTOR_NO_IRQ (0 << 4) +#define DESCRIPTOR_IRQ_ERROR (1 << 4) +#define DESCRIPTOR_IRQ_ALWAYS (3 << 4) +#define DESCRIPTOR_BRANCH_ALWAYS (3 << 2) +#define DESCRIPTOR_WAIT (3 << 0) + +#define DESCRIPTOR_CMD (0xf << 12) + +struct descriptor { + __le16 req_count; + __le16 control; + __le32 data_address; + __le32 branch_address; + __le16 res_count; + __le16 transfer_status; +} __attribute__((aligned(16))); + +#define CONTROL_SET(regs) (regs) +#define CONTROL_CLEAR(regs) ((regs) + 4) +#define COMMAND_PTR(regs) ((regs) + 12) +#define CONTEXT_MATCH(regs) ((regs) + 16) + +#define AR_BUFFER_SIZE (32*1024) +#define AR_BUFFERS_MIN DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE) +/* we need at least two pages for proper list management */ +#define AR_BUFFERS (AR_BUFFERS_MIN >= 2 ? AR_BUFFERS_MIN : 2) + +#define MAX_ASYNC_PAYLOAD 4096 +#define MAX_AR_PACKET_SIZE (16 + MAX_ASYNC_PAYLOAD + 4) +#define AR_WRAPAROUND_PAGES DIV_ROUND_UP(MAX_AR_PACKET_SIZE, PAGE_SIZE) + +struct ar_context { + struct fw_ohci *ohci; + struct page *pages[AR_BUFFERS]; + void *buffer; + struct descriptor *descriptors; + dma_addr_t descriptors_bus; + void *pointer; + unsigned int last_buffer_index; + u32 regs; + struct tasklet_struct tasklet; +}; + +struct context; + +typedef int (*descriptor_callback_t)(struct context *ctx, + struct descriptor *d, + struct descriptor *last); + +/* + * A buffer that contains a block of DMA-able coherent memory used for + * storing a portion of a DMA descriptor program. + */ +struct descriptor_buffer { + struct list_head list; + dma_addr_t buffer_bus; + size_t buffer_size; + size_t used; + struct descriptor buffer[0]; +}; + +struct context { + struct fw_ohci *ohci; + u32 regs; + int total_allocation; + u32 current_bus; + bool running; + bool flushing; + + /* + * List of page-sized buffers for storing DMA descriptors. + * Head of list contains buffers in use and tail of list contains + * free buffers. + */ + struct list_head buffer_list; + + /* + * Pointer to a buffer inside buffer_list that contains the tail + * end of the current DMA program. + */ + struct descriptor_buffer *buffer_tail; + + /* + * The descriptor containing the branch address of the first + * descriptor that has not yet been filled by the device. + */ + struct descriptor *last; + + /* + * The last descriptor block in the DMA program. It contains the branch + * address that must be updated upon appending a new descriptor. + */ + struct descriptor *prev; + int prev_z; + + descriptor_callback_t callback; + + struct tasklet_struct tasklet; +}; + +#define IT_HEADER_SY(v) ((v) << 0) +#define IT_HEADER_TCODE(v) ((v) << 4) +#define IT_HEADER_CHANNEL(v) ((v) << 8) +#define IT_HEADER_TAG(v) ((v) << 14) +#define IT_HEADER_SPEED(v) ((v) << 16) +#define IT_HEADER_DATA_LENGTH(v) ((v) << 16) + +struct iso_context { + struct fw_iso_context base; + struct context context; + void *header; + size_t header_length; + unsigned long flushing_completions; + u32 mc_buffer_bus; + u16 mc_completed; + u16 last_timestamp; + u8 sync; + u8 tags; +}; + +#define CONFIG_ROM_SIZE 1024 + +struct fw_ohci { + struct fw_card card; + + __iomem char *registers; + int node_id; + int generation; + int request_generation; /* for timestamping incoming requests */ + unsigned quirks; + unsigned int pri_req_max; + u32 bus_time; + bool bus_time_running; + bool is_root; + bool csr_state_setclear_abdicate; + int n_ir; + int n_it; + /* + * Spinlock for accessing fw_ohci data. Never call out of + * this driver with this lock held. + */ + spinlock_t lock; + + struct mutex phy_reg_mutex; + + void *misc_buffer; + dma_addr_t misc_buffer_bus; + + struct ar_context ar_request_ctx; + struct ar_context ar_response_ctx; + struct context at_request_ctx; + struct context at_response_ctx; + + u32 it_context_support; + u32 it_context_mask; /* unoccupied IT contexts */ + struct iso_context *it_context_list; + u64 ir_context_channels; /* unoccupied channels */ + u32 ir_context_support; + u32 ir_context_mask; /* unoccupied IR contexts */ + struct iso_context *ir_context_list; + u64 mc_channels; /* channels in use by the multichannel IR context */ + bool mc_allocated; + + __be32 *config_rom; + dma_addr_t config_rom_bus; + __be32 *next_config_rom; + dma_addr_t next_config_rom_bus; + __be32 next_header; + + __le32 *self_id; + dma_addr_t self_id_bus; + struct work_struct bus_reset_work; + + u32 self_id_buffer[512]; +}; + +static struct workqueue_struct *selfid_workqueue; + +static inline struct fw_ohci *fw_ohci(struct fw_card *card) +{ + return container_of(card, struct fw_ohci, card); +} + +#define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000 +#define IR_CONTEXT_BUFFER_FILL 0x80000000 +#define IR_CONTEXT_ISOCH_HEADER 0x40000000 +#define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000 +#define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000 +#define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000 + +#define CONTEXT_RUN 0x8000 +#define CONTEXT_WAKE 0x1000 +#define CONTEXT_DEAD 0x0800 +#define CONTEXT_ACTIVE 0x0400 + +#define OHCI1394_MAX_AT_REQ_RETRIES 0xf +#define OHCI1394_MAX_AT_RESP_RETRIES 0x2 +#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8 + +#define OHCI1394_REGISTER_SIZE 0x800 +#define OHCI1394_PCI_HCI_Control 0x40 +#define SELF_ID_BUF_SIZE 0x800 +#define OHCI_TCODE_PHY_PACKET 0x0e +#define OHCI_VERSION_1_1 0x010010 + +static char ohci_driver_name[] = KBUILD_MODNAME; + +#define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd +#define PCI_DEVICE_ID_AGERE_FW643 0x5901 +#define PCI_DEVICE_ID_CREATIVE_SB1394 0x4001 +#define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380 +#define PCI_DEVICE_ID_TI_TSB12LV22 0x8009 +#define PCI_DEVICE_ID_TI_TSB12LV26 0x8020 +#define PCI_DEVICE_ID_TI_TSB82AA2 0x8025 +#define PCI_DEVICE_ID_VIA_VT630X 0x3044 +#define PCI_REV_ID_VIA_VT6306 0x46 +#define PCI_DEVICE_ID_VIA_VT6315 0x3403 + +#define QUIRK_CYCLE_TIMER 0x1 +#define QUIRK_RESET_PACKET 0x2 +#define QUIRK_BE_HEADERS 0x4 +#define QUIRK_NO_1394A 0x8 +#define QUIRK_NO_MSI 0x10 +#define QUIRK_TI_SLLZ059 0x20 +#define QUIRK_IR_WAKE 0x40 + +/* In case of multiple matches in ohci_quirks[], only the first one is used. */ +static const struct { + unsigned short vendor, device, revision, flags; +} ohci_quirks[] = { + {PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_ANY_ID, + QUIRK_CYCLE_TIMER}, + + {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, PCI_ANY_ID, + QUIRK_BE_HEADERS}, + + {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6, + QUIRK_NO_MSI}, + + {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID, + QUIRK_RESET_PACKET}, + + {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID, + QUIRK_NO_MSI}, + + {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID, + QUIRK_CYCLE_TIMER}, + + {PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID, + QUIRK_NO_MSI}, + + {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID, + QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, + + {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID, + QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A}, + + {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV26, PCI_ANY_ID, + QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059}, + + {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB82AA2, PCI_ANY_ID, + QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059}, + + {PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID, + QUIRK_RESET_PACKET}, + + {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT630X, PCI_REV_ID_VIA_VT6306, + QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE}, + + {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0, + QUIRK_CYCLE_TIMER /* FIXME: necessary? */ | QUIRK_NO_MSI}, + + {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID, + QUIRK_NO_MSI}, + + {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID, + QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, +}; + +/* This overrides anything that was found in ohci_quirks[]. */ +static int param_quirks; +module_param_named(quirks, param_quirks, int, 0644); +MODULE_PARM_DESC(quirks, "Chip quirks (default = 0" + ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER) + ", reset packet generation = " __stringify(QUIRK_RESET_PACKET) + ", AR/selfID endianness = " __stringify(QUIRK_BE_HEADERS) + ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A) + ", disable MSI = " __stringify(QUIRK_NO_MSI) + ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059) + ", IR wake unreliable = " __stringify(QUIRK_IR_WAKE) + ")"); + +#define OHCI_PARAM_DEBUG_AT_AR 1 +#define OHCI_PARAM_DEBUG_SELFIDS 2 +#define OHCI_PARAM_DEBUG_IRQS 4 +#define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */ + +static int param_debug; +module_param_named(debug, param_debug, int, 0644); +MODULE_PARM_DESC(debug, "Verbose logging (default = 0" + ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR) + ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS) + ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS) + ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS) + ", or a combination, or all = -1)"); + +static bool param_remote_dma; +module_param_named(remote_dma, param_remote_dma, bool, 0444); +MODULE_PARM_DESC(remote_dma, "Enable unfiltered remote DMA (default = N)"); + +static void log_irqs(struct fw_ohci *ohci, u32 evt) +{ + if (likely(!(param_debug & + (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS)))) + return; + + if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) && + !(evt & OHCI1394_busReset)) + return; + + ohci_notice(ohci, "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, + evt & OHCI1394_selfIDComplete ? " selfID" : "", + evt & OHCI1394_RQPkt ? " AR_req" : "", + evt & OHCI1394_RSPkt ? " AR_resp" : "", + evt & OHCI1394_reqTxComplete ? " AT_req" : "", + evt & OHCI1394_respTxComplete ? " AT_resp" : "", + evt & OHCI1394_isochRx ? " IR" : "", + evt & OHCI1394_isochTx ? " IT" : "", + evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "", + evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "", + evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "", + evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "", + evt & OHCI1394_regAccessFail ? " regAccessFail" : "", + evt & OHCI1394_unrecoverableError ? " unrecoverableError" : "", + evt & OHCI1394_busReset ? " busReset" : "", + evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt | + OHCI1394_RSPkt | OHCI1394_reqTxComplete | + OHCI1394_respTxComplete | OHCI1394_isochRx | + OHCI1394_isochTx | OHCI1394_postedWriteErr | + OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds | + OHCI1394_cycleInconsistent | + OHCI1394_regAccessFail | OHCI1394_busReset) + ? " ?" : ""); +} + +static const char *speed[] = { + [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta", +}; +static const char *power[] = { + [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W", + [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W", +}; +static const char port[] = { '.', '-', 'p', 'c', }; + +static char _p(u32 *s, int shift) +{ + return port[*s >> shift & 3]; +} + +static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count) +{ + u32 *s; + + if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS))) + return; + + ohci_notice(ohci, "%d selfIDs, generation %d, local node ID %04x\n", + self_id_count, generation, ohci->node_id); + + for (s = ohci->self_id_buffer; self_id_count--; ++s) + if ((*s & 1 << 23) == 0) + ohci_notice(ohci, + "selfID 0: %08x, phy %d [%c%c%c] %s gc=%d %s %s%s%s\n", + *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2), + speed[*s >> 14 & 3], *s >> 16 & 63, + power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "", + *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : ""); + else + ohci_notice(ohci, + "selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n", + *s, *s >> 24 & 63, + _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10), + _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2)); +} + +static const char *evts[] = { + [0x00] = "evt_no_status", [0x01] = "-reserved-", + [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack", + [0x04] = "evt_underrun", [0x05] = "evt_overrun", + [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read", + [0x08] = "evt_data_write", [0x09] = "evt_bus_reset", + [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err", + [0x0c] = "-reserved-", [0x0d] = "-reserved-", + [0x0e] = "evt_unknown", [0x0f] = "evt_flushed", + [0x10] = "-reserved-", [0x11] = "ack_complete", + [0x12] = "ack_pending ", [0x13] = "-reserved-", + [0x14] = "ack_busy_X", [0x15] = "ack_busy_A", + [0x16] = "ack_busy_B", [0x17] = "-reserved-", + [0x18] = "-reserved-", [0x19] = "-reserved-", + [0x1a] = "-reserved-", [0x1b] = "ack_tardy", + [0x1c] = "-reserved-", [0x1d] = "ack_data_error", + [0x1e] = "ack_type_error", [0x1f] = "-reserved-", + [0x20] = "pending/cancelled", +}; +static const char *tcodes[] = { + [0x0] = "QW req", [0x1] = "BW req", + [0x2] = "W resp", [0x3] = "-reserved-", + [0x4] = "QR req", [0x5] = "BR req", + [0x6] = "QR resp", [0x7] = "BR resp", + [0x8] = "cycle start", [0x9] = "Lk req", + [0xa] = "async stream packet", [0xb] = "Lk resp", + [0xc] = "-reserved-", [0xd] = "-reserved-", + [0xe] = "link internal", [0xf] = "-reserved-", +}; + +static void log_ar_at_event(struct fw_ohci *ohci, + char dir, int speed, u32 *header, int evt) +{ + int tcode = header[0] >> 4 & 0xf; + char specific[12]; + + if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR))) + return; + + if (unlikely(evt >= ARRAY_SIZE(evts))) + evt = 0x1f; + + if (evt == OHCI1394_evt_bus_reset) { + ohci_notice(ohci, "A%c evt_bus_reset, generation %d\n", + dir, (header[2] >> 16) & 0xff); + return; + } + + switch (tcode) { + case 0x0: case 0x6: case 0x8: + snprintf(specific, sizeof(specific), " = %08x", + be32_to_cpu((__force __be32)header[3])); + break; + case 0x1: case 0x5: case 0x7: case 0x9: case 0xb: + snprintf(specific, sizeof(specific), " %x,%x", + header[3] >> 16, header[3] & 0xffff); + break; + default: + specific[0] = '\0'; + } + + switch (tcode) { + case 0xa: + ohci_notice(ohci, "A%c %s, %s\n", + dir, evts[evt], tcodes[tcode]); + break; + case 0xe: + ohci_notice(ohci, "A%c %s, PHY %08x %08x\n", + dir, evts[evt], header[1], header[2]); + break; + case 0x0: case 0x1: case 0x4: case 0x5: case 0x9: + ohci_notice(ohci, + "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %04x%08x%s\n", + dir, speed, header[0] >> 10 & 0x3f, + header[1] >> 16, header[0] >> 16, evts[evt], + tcodes[tcode], header[1] & 0xffff, header[2], specific); + break; + default: + ohci_notice(ohci, + "A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n", + dir, speed, header[0] >> 10 & 0x3f, + header[1] >> 16, header[0] >> 16, evts[evt], + tcodes[tcode], specific); + } +} + +static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data) +{ + writel(data, ohci->registers + offset); +} + +static inline u32 reg_read(const struct fw_ohci *ohci, int offset) +{ + return readl(ohci->registers + offset); +} + +static inline void flush_writes(const struct fw_ohci *ohci) +{ + /* Do a dummy read to flush writes. */ + reg_read(ohci, OHCI1394_Version); +} + +/* + * Beware! read_phy_reg(), write_phy_reg(), update_phy_reg(), and + * read_paged_phy_reg() require the caller to hold ohci->phy_reg_mutex. + * In other words, only use ohci_read_phy_reg() and ohci_update_phy_reg() + * directly. Exceptions are intrinsically serialized contexts like pci_probe. + */ +static int read_phy_reg(struct fw_ohci *ohci, int addr) +{ + u32 val; + int i; + + reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr)); + for (i = 0; i < 3 + 100; i++) { + val = reg_read(ohci, OHCI1394_PhyControl); + if (!~val) + return -ENODEV; /* Card was ejected. */ + + if (val & OHCI1394_PhyControl_ReadDone) + return OHCI1394_PhyControl_ReadData(val); + + /* + * Try a few times without waiting. Sleeping is necessary + * only when the link/PHY interface is busy. + */ + if (i >= 3) + msleep(1); + } + ohci_err(ohci, "failed to read phy reg %d\n", addr); + dump_stack(); + + return -EBUSY; +} + +static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val) +{ + int i; + + reg_write(ohci, OHCI1394_PhyControl, + OHCI1394_PhyControl_Write(addr, val)); + for (i = 0; i < 3 + 100; i++) { + val = reg_read(ohci, OHCI1394_PhyControl); + if (!~val) + return -ENODEV; /* Card was ejected. */ + + if (!(val & OHCI1394_PhyControl_WritePending)) + return 0; + + if (i >= 3) + msleep(1); + } + ohci_err(ohci, "failed to write phy reg %d, val %u\n", addr, val); + dump_stack(); + + return -EBUSY; +} + +static int update_phy_reg(struct fw_ohci *ohci, int addr, + int clear_bits, int set_bits) +{ + int ret = read_phy_reg(ohci, addr); + if (ret < 0) + return ret; + + /* + * The interrupt status bits are cleared by writing a one bit. + * Avoid clearing them unless explicitly requested in set_bits. + */ + if (addr == 5) + clear_bits |= PHY_INT_STATUS_BITS; + + return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits); +} + +static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr) +{ + int ret; + + ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5); + if (ret < 0) + return ret; + + return read_phy_reg(ohci, addr); +} + +static int ohci_read_phy_reg(struct fw_card *card, int addr) +{ + struct fw_ohci *ohci = fw_ohci(card); + int ret; + + mutex_lock(&ohci->phy_reg_mutex); + ret = read_phy_reg(ohci, addr); + mutex_unlock(&ohci->phy_reg_mutex); + + return ret; +} + +static int ohci_update_phy_reg(struct fw_card *card, int addr, + int clear_bits, int set_bits) +{ + struct fw_ohci *ohci = fw_ohci(card); + int ret; + + mutex_lock(&ohci->phy_reg_mutex); + ret = update_phy_reg(ohci, addr, clear_bits, set_bits); + mutex_unlock(&ohci->phy_reg_mutex); + + return ret; +} + +static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i) +{ + return page_private(ctx->pages[i]); +} + +static void ar_context_link_page(struct ar_context *ctx, unsigned int index) +{ + struct descriptor *d; + + d = &ctx->descriptors[index]; + d->branch_address &= cpu_to_le32(~0xf); + d->res_count = cpu_to_le16(PAGE_SIZE); + d->transfer_status = 0; + + wmb(); /* finish init of new descriptors before branch_address update */ + d = &ctx->descriptors[ctx->last_buffer_index]; + d->branch_address |= cpu_to_le32(1); + + ctx->last_buffer_index = index; + + reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); +} + +static void ar_context_release(struct ar_context *ctx) +{ + unsigned int i; + + vunmap(ctx->buffer); + + for (i = 0; i < AR_BUFFERS; i++) + if (ctx->pages[i]) { + dma_unmap_page(ctx->ohci->card.device, + ar_buffer_bus(ctx, i), + PAGE_SIZE, DMA_FROM_DEVICE); + __free_page(ctx->pages[i]); + } +} + +static void ar_context_abort(struct ar_context *ctx, const char *error_msg) +{ + struct fw_ohci *ohci = ctx->ohci; + + if (reg_read(ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) { + reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); + flush_writes(ohci); + + ohci_err(ohci, "AR error: %s; DMA stopped\n", error_msg); + } + /* FIXME: restart? */ +} + +static inline unsigned int ar_next_buffer_index(unsigned int index) +{ + return (index + 1) % AR_BUFFERS; +} + +static inline unsigned int ar_first_buffer_index(struct ar_context *ctx) +{ + return ar_next_buffer_index(ctx->last_buffer_index); +} + +/* + * We search for the buffer that contains the last AR packet DMA data written + * by the controller. + */ +static unsigned int ar_search_last_active_buffer(struct ar_context *ctx, + unsigned int *buffer_offset) +{ + unsigned int i, next_i, last = ctx->last_buffer_index; + __le16 res_count, next_res_count; + + i = ar_first_buffer_index(ctx); + res_count = ACCESS_ONCE(ctx->descriptors[i].res_count); + + /* A buffer that is not yet completely filled must be the last one. */ + while (i != last && res_count == 0) { + + /* Peek at the next descriptor. */ + next_i = ar_next_buffer_index(i); + rmb(); /* read descriptors in order */ + next_res_count = ACCESS_ONCE( + ctx->descriptors[next_i].res_count); + /* + * If the next descriptor is still empty, we must stop at this + * descriptor. + */ + if (next_res_count == cpu_to_le16(PAGE_SIZE)) { + /* + * The exception is when the DMA data for one packet is + * split over three buffers; in this case, the middle + * buffer's descriptor might be never updated by the + * controller and look still empty, and we have to peek + * at the third one. + */ + if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) { + next_i = ar_next_buffer_index(next_i); + rmb(); + next_res_count = ACCESS_ONCE( + ctx->descriptors[next_i].res_count); + if (next_res_count != cpu_to_le16(PAGE_SIZE)) + goto next_buffer_is_active; + } + + break; + } + +next_buffer_is_active: + i = next_i; + res_count = next_res_count; + } + + rmb(); /* read res_count before the DMA data */ + + *buffer_offset = PAGE_SIZE - le16_to_cpu(res_count); + if (*buffer_offset > PAGE_SIZE) { + *buffer_offset = 0; + ar_context_abort(ctx, "corrupted descriptor"); + } + + return i; +} + +static void ar_sync_buffers_for_cpu(struct ar_context *ctx, + unsigned int end_buffer_index, + unsigned int end_buffer_offset) +{ + unsigned int i; + + i = ar_first_buffer_index(ctx); + while (i != end_buffer_index) { + dma_sync_single_for_cpu(ctx->ohci->card.device, + ar_buffer_bus(ctx, i), + PAGE_SIZE, DMA_FROM_DEVICE); + i = ar_next_buffer_index(i); + } + if (end_buffer_offset > 0) + dma_sync_single_for_cpu(ctx->ohci->card.device, + ar_buffer_bus(ctx, i), + end_buffer_offset, DMA_FROM_DEVICE); +} + +#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) +#define cond_le32_to_cpu(v) \ + (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v)) +#else +#define cond_le32_to_cpu(v) le32_to_cpu(v) +#endif + +static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) +{ + struct fw_ohci *ohci = ctx->ohci; + struct fw_packet p; + u32 status, length, tcode; + int evt; + + p.header[0] = cond_le32_to_cpu(buffer[0]); + p.header[1] = cond_le32_to_cpu(buffer[1]); + p.header[2] = cond_le32_to_cpu(buffer[2]); + + tcode = (p.header[0] >> 4) & 0x0f; + switch (tcode) { + case TCODE_WRITE_QUADLET_REQUEST: + case TCODE_READ_QUADLET_RESPONSE: + p.header[3] = (__force __u32) buffer[3]; + p.header_length = 16; + p.payload_length = 0; + break; + + case TCODE_READ_BLOCK_REQUEST : + p.header[3] = cond_le32_to_cpu(buffer[3]); + p.header_length = 16; + p.payload_length = 0; + break; + + case TCODE_WRITE_BLOCK_REQUEST: + case TCODE_READ_BLOCK_RESPONSE: + case TCODE_LOCK_REQUEST: + case TCODE_LOCK_RESPONSE: + p.header[3] = cond_le32_to_cpu(buffer[3]); + p.header_length = 16; + p.payload_length = p.header[3] >> 16; + if (p.payload_length > MAX_ASYNC_PAYLOAD) { + ar_context_abort(ctx, "invalid packet length"); + return NULL; + } + break; + + case TCODE_WRITE_RESPONSE: + case TCODE_READ_QUADLET_REQUEST: + case OHCI_TCODE_PHY_PACKET: + p.header_length = 12; + p.payload_length = 0; + break; + + default: + ar_context_abort(ctx, "invalid tcode"); + return NULL; + } + + p.payload = (void *) buffer + p.header_length; + + /* FIXME: What to do about evt_* errors? */ + length = (p.header_length + p.payload_length + 3) / 4; + status = cond_le32_to_cpu(buffer[length]); + evt = (status >> 16) & 0x1f; + + p.ack = evt - 16; + p.speed = (status >> 21) & 0x7; + p.timestamp = status & 0xffff; + p.generation = ohci->request_generation; + + log_ar_at_event(ohci, 'R', p.speed, p.header, evt); + + /* + * Several controllers, notably from NEC and VIA, forget to + * write ack_complete status at PHY packet reception. + */ + if (evt == OHCI1394_evt_no_status && + (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4)) + p.ack = ACK_COMPLETE; + + /* + * The OHCI bus reset handler synthesizes a PHY packet with + * the new generation number when a bus reset happens (see + * section 8.4.2.3). This helps us determine when a request + * was received and make sure we send the response in the same + * generation. We only need this for requests; for responses + * we use the unique tlabel for finding the matching + * request. + * + * Alas some chips sometimes emit bus reset packets with a + * wrong generation. We set the correct generation for these + * at a slightly incorrect time (in bus_reset_work). + */ + if (evt == OHCI1394_evt_bus_reset) { + if (!(ohci->quirks & QUIRK_RESET_PACKET)) + ohci->request_generation = (p.header[2] >> 16) & 0xff; + } else if (ctx == &ohci->ar_request_ctx) { + fw_core_handle_request(&ohci->card, &p); + } else { + fw_core_handle_response(&ohci->card, &p); + } + + return buffer + length + 1; +} + +static void *handle_ar_packets(struct ar_context *ctx, void *p, void *end) +{ + void *next; + + while (p < end) { + next = handle_ar_packet(ctx, p); + if (!next) + return p; + p = next; + } + + return p; +} + +static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer) +{ + unsigned int i; + + i = ar_first_buffer_index(ctx); + while (i != end_buffer) { + dma_sync_single_for_device(ctx->ohci->card.device, + ar_buffer_bus(ctx, i), + PAGE_SIZE, DMA_FROM_DEVICE); + ar_context_link_page(ctx, i); + i = ar_next_buffer_index(i); + } +} + +static void ar_context_tasklet(unsigned long data) +{ + struct ar_context *ctx = (struct ar_context *)data; + unsigned int end_buffer_index, end_buffer_offset; + void *p, *end; + + p = ctx->pointer; + if (!p) + return; + + end_buffer_index = ar_search_last_active_buffer(ctx, + &end_buffer_offset); + ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset); + end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset; + + if (end_buffer_index < ar_first_buffer_index(ctx)) { + /* + * The filled part of the overall buffer wraps around; handle + * all packets up to the buffer end here. If the last packet + * wraps around, its tail will be visible after the buffer end + * because the buffer start pages are mapped there again. + */ + void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE; + p = handle_ar_packets(ctx, p, buffer_end); + if (p < buffer_end) + goto error; + /* adjust p to point back into the actual buffer */ + p -= AR_BUFFERS * PAGE_SIZE; + } + + p = handle_ar_packets(ctx, p, end); + if (p != end) { + if (p > end) + ar_context_abort(ctx, "inconsistent descriptor"); + goto error; + } + + ctx->pointer = p; + ar_recycle_buffers(ctx, end_buffer_index); + + return; + +error: + ctx->pointer = NULL; +} + +static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, + unsigned int descriptors_offset, u32 regs) +{ + unsigned int i; + dma_addr_t dma_addr; + struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES]; + struct descriptor *d; + + ctx->regs = regs; + ctx->ohci = ohci; + tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx); + + for (i = 0; i < AR_BUFFERS; i++) { + ctx->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32); + if (!ctx->pages[i]) + goto out_of_memory; + dma_addr = dma_map_page(ohci->card.device, ctx->pages[i], + 0, PAGE_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(ohci->card.device, dma_addr)) { + __free_page(ctx->pages[i]); + ctx->pages[i] = NULL; + goto out_of_memory; + } + set_page_private(ctx->pages[i], dma_addr); + } + + for (i = 0; i < AR_BUFFERS; i++) + pages[i] = ctx->pages[i]; + for (i = 0; i < AR_WRAPAROUND_PAGES; i++) + pages[AR_BUFFERS + i] = ctx->pages[i]; + ctx->buffer = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL); + if (!ctx->buffer) + goto out_of_memory; + + ctx->descriptors = ohci->misc_buffer + descriptors_offset; + ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset; + + for (i = 0; i < AR_BUFFERS; i++) { + d = &ctx->descriptors[i]; + d->req_count = cpu_to_le16(PAGE_SIZE); + d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | + DESCRIPTOR_STATUS | + DESCRIPTOR_BRANCH_ALWAYS); + d->data_address = cpu_to_le32(ar_buffer_bus(ctx, i)); + d->branch_address = cpu_to_le32(ctx->descriptors_bus + + ar_next_buffer_index(i) * sizeof(struct descriptor)); + } + + return 0; + +out_of_memory: + ar_context_release(ctx); + + return -ENOMEM; +} + +static void ar_context_run(struct ar_context *ctx) +{ + unsigned int i; + + for (i = 0; i < AR_BUFFERS; i++) + ar_context_link_page(ctx, i); + + ctx->pointer = ctx->buffer; + + reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1); + reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN); +} + +static struct descriptor *find_branch_descriptor(struct descriptor *d, int z) +{ + __le16 branch; + + branch = d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS); + + /* figure out which descriptor the branch address goes in */ + if (z == 2 && branch == cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)) + return d; + else + return d + z - 1; +} + +static void context_tasklet(unsigned long data) +{ + struct context *ctx = (struct context *) data; + struct descriptor *d, *last; + u32 address; + int z; + struct descriptor_buffer *desc; + + desc = list_entry(ctx->buffer_list.next, + struct descriptor_buffer, list); + last = ctx->last; + while (last->branch_address != 0) { + struct descriptor_buffer *old_desc = desc; + address = le32_to_cpu(last->branch_address); + z = address & 0xf; + address &= ~0xf; + ctx->current_bus = address; + + /* If the branch address points to a buffer outside of the + * current buffer, advance to the next buffer. */ + if (address < desc->buffer_bus || + address >= desc->buffer_bus + desc->used) + desc = list_entry(desc->list.next, + struct descriptor_buffer, list); + d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d); + last = find_branch_descriptor(d, z); + + if (!ctx->callback(ctx, d, last)) + break; + + if (old_desc != desc) { + /* If we've advanced to the next buffer, move the + * previous buffer to the free list. */ + unsigned long flags; + old_desc->used = 0; + spin_lock_irqsave(&ctx->ohci->lock, flags); + list_move_tail(&old_desc->list, &ctx->buffer_list); + spin_unlock_irqrestore(&ctx->ohci->lock, flags); + } + ctx->last = last; + } +} + +/* + * Allocate a new buffer and add it to the list of free buffers for this + * context. Must be called with ohci->lock held. + */ +static int context_add_buffer(struct context *ctx) +{ + struct descriptor_buffer *desc; + dma_addr_t uninitialized_var(bus_addr); + int offset; + + /* + * 16MB of descriptors should be far more than enough for any DMA + * program. This will catch run-away userspace or DoS attacks. + */ + if (ctx->total_allocation >= 16*1024*1024) + return -ENOMEM; + + desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE, + &bus_addr, GFP_ATOMIC); + if (!desc) + return -ENOMEM; + + offset = (void *)&desc->buffer - (void *)desc; + desc->buffer_size = PAGE_SIZE - offset; + desc->buffer_bus = bus_addr + offset; + desc->used = 0; + + list_add_tail(&desc->list, &ctx->buffer_list); + ctx->total_allocation += PAGE_SIZE; + + return 0; +} + +static int context_init(struct context *ctx, struct fw_ohci *ohci, + u32 regs, descriptor_callback_t callback) +{ + ctx->ohci = ohci; + ctx->regs = regs; + ctx->total_allocation = 0; + + INIT_LIST_HEAD(&ctx->buffer_list); + if (context_add_buffer(ctx) < 0) + return -ENOMEM; + + ctx->buffer_tail = list_entry(ctx->buffer_list.next, + struct descriptor_buffer, list); + + tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx); + ctx->callback = callback; + + /* + * We put a dummy descriptor in the buffer that has a NULL + * branch address and looks like it's been sent. That way we + * have a descriptor to append DMA programs to. + */ + memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer)); + ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST); + ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011); + ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer); + ctx->last = ctx->buffer_tail->buffer; + ctx->prev = ctx->buffer_tail->buffer; + ctx->prev_z = 1; + + return 0; +} + +static void context_release(struct context *ctx) +{ + struct fw_card *card = &ctx->ohci->card; + struct descriptor_buffer *desc, *tmp; + + list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list) + dma_free_coherent(card->device, PAGE_SIZE, desc, + desc->buffer_bus - + ((void *)&desc->buffer - (void *)desc)); +} + +/* Must be called with ohci->lock held */ +static struct descriptor *context_get_descriptors(struct context *ctx, + int z, dma_addr_t *d_bus) +{ + struct descriptor *d = NULL; + struct descriptor_buffer *desc = ctx->buffer_tail; + + if (z * sizeof(*d) > desc->buffer_size) + return NULL; + + if (z * sizeof(*d) > desc->buffer_size - desc->used) { + /* No room for the descriptor in this buffer, so advance to the + * next one. */ + + if (desc->list.next == &ctx->buffer_list) { + /* If there is no free buffer next in the list, + * allocate one. */ + if (context_add_buffer(ctx) < 0) + return NULL; + } + desc = list_entry(desc->list.next, + struct descriptor_buffer, list); + ctx->buffer_tail = desc; + } + + d = desc->buffer + desc->used / sizeof(*d); + memset(d, 0, z * sizeof(*d)); + *d_bus = desc->buffer_bus + desc->used; + + return d; +} + +static void context_run(struct context *ctx, u32 extra) +{ + struct fw_ohci *ohci = ctx->ohci; + + reg_write(ohci, COMMAND_PTR(ctx->regs), + le32_to_cpu(ctx->last->branch_address)); + reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0); + reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra); + ctx->running = true; + flush_writes(ohci); +} + +static void context_append(struct context *ctx, + struct descriptor *d, int z, int extra) +{ + dma_addr_t d_bus; + struct descriptor_buffer *desc = ctx->buffer_tail; + struct descriptor *d_branch; + + d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d); + + desc->used += (z + extra) * sizeof(*d); + + wmb(); /* finish init of new descriptors before branch_address update */ + + d_branch = find_branch_descriptor(ctx->prev, ctx->prev_z); + d_branch->branch_address = cpu_to_le32(d_bus | z); + + /* + * VT6306 incorrectly checks only the single descriptor at the + * CommandPtr when the wake bit is written, so if it's a + * multi-descriptor block starting with an INPUT_MORE, put a copy of + * the branch address in the first descriptor. + * + * Not doing this for transmit contexts since not sure how it interacts + * with skip addresses. + */ + if (unlikely(ctx->ohci->quirks & QUIRK_IR_WAKE) && + d_branch != ctx->prev && + (ctx->prev->control & cpu_to_le16(DESCRIPTOR_CMD)) == + cpu_to_le16(DESCRIPTOR_INPUT_MORE)) { + ctx->prev->branch_address = cpu_to_le32(d_bus | z); + } + + ctx->prev = d; + ctx->prev_z = z; +} + +static void context_stop(struct context *ctx) +{ + struct fw_ohci *ohci = ctx->ohci; + u32 reg; + int i; + + reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); + ctx->running = false; + + for (i = 0; i < 1000; i++) { + reg = reg_read(ohci, CONTROL_SET(ctx->regs)); + if ((reg & CONTEXT_ACTIVE) == 0) + return; + + if (i) + udelay(10); + } + ohci_err(ohci, "DMA context still active (0x%08x)\n", reg); +} + +struct driver_data { + u8 inline_data[8]; + struct fw_packet *packet; +}; + +/* + * This function apppends a packet to the DMA queue for transmission. + * Must always be called with the ochi->lock held to ensure proper + * generation handling and locking around packet queue manipulation. + */ +static int at_context_queue_packet(struct context *ctx, + struct fw_packet *packet) +{ + struct fw_ohci *ohci = ctx->ohci; + dma_addr_t d_bus, uninitialized_var(payload_bus); + struct driver_data *driver_data; + struct descriptor *d, *last; + __le32 *header; + int z, tcode; + + d = context_get_descriptors(ctx, 4, &d_bus); + if (d == NULL) { + packet->ack = RCODE_SEND_ERROR; + return -1; + } + + d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); + d[0].res_count = cpu_to_le16(packet->timestamp); + + /* + * The DMA format for asynchronous link packets is different + * from the IEEE1394 layout, so shift the fields around + * accordingly. + */ + + tcode = (packet->header[0] >> 4) & 0x0f; + header = (__le32 *) &d[1]; + switch (tcode) { + case TCODE_WRITE_QUADLET_REQUEST: + case TCODE_WRITE_BLOCK_REQUEST: + case TCODE_WRITE_RESPONSE: + case TCODE_READ_QUADLET_REQUEST: + case TCODE_READ_BLOCK_REQUEST: + case TCODE_READ_QUADLET_RESPONSE: + case TCODE_READ_BLOCK_RESPONSE: + case TCODE_LOCK_REQUEST: + case TCODE_LOCK_RESPONSE: + header[0] = cpu_to_le32((packet->header[0] & 0xffff) | + (packet->speed << 16)); + header[1] = cpu_to_le32((packet->header[1] & 0xffff) | + (packet->header[0] & 0xffff0000)); + header[2] = cpu_to_le32(packet->header[2]); + + if (TCODE_IS_BLOCK_PACKET(tcode)) + header[3] = cpu_to_le32(packet->header[3]); + else + header[3] = (__force __le32) packet->header[3]; + + d[0].req_count = cpu_to_le16(packet->header_length); + break; + + case TCODE_LINK_INTERNAL: + header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) | + (packet->speed << 16)); + header[1] = cpu_to_le32(packet->header[1]); + header[2] = cpu_to_le32(packet->header[2]); + d[0].req_count = cpu_to_le16(12); + + if (is_ping_packet(&packet->header[1])) + d[0].control |= cpu_to_le16(DESCRIPTOR_PING); + break; + + case TCODE_STREAM_DATA: + header[0] = cpu_to_le32((packet->header[0] & 0xffff) | + (packet->speed << 16)); + header[1] = cpu_to_le32(packet->header[0] & 0xffff0000); + d[0].req_count = cpu_to_le16(8); + break; + + default: + /* BUG(); */ + packet->ack = RCODE_SEND_ERROR; + return -1; + } + + BUILD_BUG_ON(sizeof(struct driver_data) > sizeof(struct descriptor)); + driver_data = (struct driver_data *) &d[3]; + driver_data->packet = packet; + packet->driver_data = driver_data; + + if (packet->payload_length > 0) { + if (packet->payload_length > sizeof(driver_data->inline_data)) { + payload_bus = dma_map_single(ohci->card.device, + packet->payload, + packet->payload_length, + DMA_TO_DEVICE); + if (dma_mapping_error(ohci->card.device, payload_bus)) { + packet->ack = RCODE_SEND_ERROR; + return -1; + } + packet->payload_bus = payload_bus; + packet->payload_mapped = true; + } else { + memcpy(driver_data->inline_data, packet->payload, + packet->payload_length); + payload_bus = d_bus + 3 * sizeof(*d); + } + + d[2].req_count = cpu_to_le16(packet->payload_length); + d[2].data_address = cpu_to_le32(payload_bus); + last = &d[2]; + z = 3; + } else { + last = &d[0]; + z = 2; + } + + last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST | + DESCRIPTOR_IRQ_ALWAYS | + DESCRIPTOR_BRANCH_ALWAYS); + + /* FIXME: Document how the locking works. */ + if (ohci->generation != packet->generation) { + if (packet->payload_mapped) + dma_unmap_single(ohci->card.device, payload_bus, + packet->payload_length, DMA_TO_DEVICE); + packet->ack = RCODE_GENERATION; + return -1; + } + + context_append(ctx, d, z, 4 - z); + + if (ctx->running) + reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); + else + context_run(ctx, 0); + + return 0; +} + +static void at_context_flush(struct context *ctx) +{ + tasklet_disable(&ctx->tasklet); + + ctx->flushing = true; + context_tasklet((unsigned long)ctx); + ctx->flushing = false; + + tasklet_enable(&ctx->tasklet); +} + +static int handle_at_packet(struct context *context, + struct descriptor *d, + struct descriptor *last) +{ + struct driver_data *driver_data; + struct fw_packet *packet; + struct fw_ohci *ohci = context->ohci; + int evt; + + if (last->transfer_status == 0 && !context->flushing) + /* This descriptor isn't done yet, stop iteration. */ + return 0; + + driver_data = (struct driver_data *) &d[3]; + packet = driver_data->packet; + if (packet == NULL) + /* This packet was cancelled, just continue. */ + return 1; + + if (packet->payload_mapped) + dma_unmap_single(ohci->card.device, packet->payload_bus, + packet->payload_length, DMA_TO_DEVICE); + + evt = le16_to_cpu(last->transfer_status) & 0x1f; + packet->timestamp = le16_to_cpu(last->res_count); + + log_ar_at_event(ohci, 'T', packet->speed, packet->header, evt); + + switch (evt) { + case OHCI1394_evt_timeout: + /* Async response transmit timed out. */ + packet->ack = RCODE_CANCELLED; + break; + + case OHCI1394_evt_flushed: + /* + * The packet was flushed should give same error as + * when we try to use a stale generation count. + */ + packet->ack = RCODE_GENERATION; + break; + + case OHCI1394_evt_missing_ack: + if (context->flushing) + packet->ack = RCODE_GENERATION; + else { + /* + * Using a valid (current) generation count, but the + * node is not on the bus or not sending acks. + */ + packet->ack = RCODE_NO_ACK; + } + break; + + case ACK_COMPLETE + 0x10: + case ACK_PENDING + 0x10: + case ACK_BUSY_X + 0x10: + case ACK_BUSY_A + 0x10: + case ACK_BUSY_B + 0x10: + case ACK_DATA_ERROR + 0x10: + case ACK_TYPE_ERROR + 0x10: + packet->ack = evt - 0x10; + break; + + case OHCI1394_evt_no_status: + if (context->flushing) { + packet->ack = RCODE_GENERATION; + break; + } + /* fall through */ + + default: + packet->ack = RCODE_SEND_ERROR; + break; + } + + packet->callback(packet, &ohci->card, packet->ack); + + return 1; +} + +#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff) +#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f) +#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff) +#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) +#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) + +static void handle_local_rom(struct fw_ohci *ohci, + struct fw_packet *packet, u32 csr) +{ + struct fw_packet response; + int tcode, length, i; + + tcode = HEADER_GET_TCODE(packet->header[0]); + if (TCODE_IS_BLOCK_PACKET(tcode)) + length = HEADER_GET_DATA_LENGTH(packet->header[3]); + else + length = 4; + + i = csr - CSR_CONFIG_ROM; + if (i + length > CONFIG_ROM_SIZE) { + fw_fill_response(&response, packet->header, + RCODE_ADDRESS_ERROR, NULL, 0); + } else if (!TCODE_IS_READ_REQUEST(tcode)) { + fw_fill_response(&response, packet->header, + RCODE_TYPE_ERROR, NULL, 0); + } else { + fw_fill_response(&response, packet->header, RCODE_COMPLETE, + (void *) ohci->config_rom + i, length); + } + + fw_core_handle_response(&ohci->card, &response); +} + +static void handle_local_lock(struct fw_ohci *ohci, + struct fw_packet *packet, u32 csr) +{ + struct fw_packet response; + int tcode, length, ext_tcode, sel, try; + __be32 *payload, lock_old; + u32 lock_arg, lock_data; + + tcode = HEADER_GET_TCODE(packet->header[0]); + length = HEADER_GET_DATA_LENGTH(packet->header[3]); + payload = packet->payload; + ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]); + + if (tcode == TCODE_LOCK_REQUEST && + ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) { + lock_arg = be32_to_cpu(payload[0]); + lock_data = be32_to_cpu(payload[1]); + } else if (tcode == TCODE_READ_QUADLET_REQUEST) { + lock_arg = 0; + lock_data = 0; + } else { + fw_fill_response(&response, packet->header, + RCODE_TYPE_ERROR, NULL, 0); + goto out; + } + + sel = (csr - CSR_BUS_MANAGER_ID) / 4; + reg_write(ohci, OHCI1394_CSRData, lock_data); + reg_write(ohci, OHCI1394_CSRCompareData, lock_arg); + reg_write(ohci, OHCI1394_CSRControl, sel); + + for (try = 0; try < 20; try++) + if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) { + lock_old = cpu_to_be32(reg_read(ohci, + OHCI1394_CSRData)); + fw_fill_response(&response, packet->header, + RCODE_COMPLETE, + &lock_old, sizeof(lock_old)); + goto out; + } + + ohci_err(ohci, "swap not done (CSR lock timeout)\n"); + fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0); + + out: + fw_core_handle_response(&ohci->card, &response); +} + +static void handle_local_request(struct context *ctx, struct fw_packet *packet) +{ + u64 offset, csr; + + if (ctx == &ctx->ohci->at_request_ctx) { + packet->ack = ACK_PENDING; + packet->callback(packet, &ctx->ohci->card, packet->ack); + } + + offset = + ((unsigned long long) + HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) | + packet->header[2]; + csr = offset - CSR_REGISTER_BASE; + + /* Handle config rom reads. */ + if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END) + handle_local_rom(ctx->ohci, packet, csr); + else switch (csr) { + case CSR_BUS_MANAGER_ID: + case CSR_BANDWIDTH_AVAILABLE: + case CSR_CHANNELS_AVAILABLE_HI: + case CSR_CHANNELS_AVAILABLE_LO: + handle_local_lock(ctx->ohci, packet, csr); + break; + default: + if (ctx == &ctx->ohci->at_request_ctx) + fw_core_handle_request(&ctx->ohci->card, packet); + else + fw_core_handle_response(&ctx->ohci->card, packet); + break; + } + + if (ctx == &ctx->ohci->at_response_ctx) { + packet->ack = ACK_COMPLETE; + packet->callback(packet, &ctx->ohci->card, packet->ack); + } +} + +static void at_context_transmit(struct context *ctx, struct fw_packet *packet) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&ctx->ohci->lock, flags); + + if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id && + ctx->ohci->generation == packet->generation) { + spin_unlock_irqrestore(&ctx->ohci->lock, flags); + handle_local_request(ctx, packet); + return; + } + + ret = at_context_queue_packet(ctx, packet); + spin_unlock_irqrestore(&ctx->ohci->lock, flags); + + if (ret < 0) + packet->callback(packet, &ctx->ohci->card, packet->ack); + +} + +static void detect_dead_context(struct fw_ohci *ohci, + const char *name, unsigned int regs) +{ + u32 ctl; + + ctl = reg_read(ohci, CONTROL_SET(regs)); + if (ctl & CONTEXT_DEAD) + ohci_err(ohci, "DMA context %s has stopped, error code: %s\n", + name, evts[ctl & 0x1f]); +} + +static void handle_dead_contexts(struct fw_ohci *ohci) +{ + unsigned int i; + char name[8]; + + detect_dead_context(ohci, "ATReq", OHCI1394_AsReqTrContextBase); + detect_dead_context(ohci, "ATRsp", OHCI1394_AsRspTrContextBase); + detect_dead_context(ohci, "ARReq", OHCI1394_AsReqRcvContextBase); + detect_dead_context(ohci, "ARRsp", OHCI1394_AsRspRcvContextBase); + for (i = 0; i < 32; ++i) { + if (!(ohci->it_context_support & (1 << i))) + continue; + sprintf(name, "IT%u", i); + detect_dead_context(ohci, name, OHCI1394_IsoXmitContextBase(i)); + } + for (i = 0; i < 32; ++i) { + if (!(ohci->ir_context_support & (1 << i))) + continue; + sprintf(name, "IR%u", i); + detect_dead_context(ohci, name, OHCI1394_IsoRcvContextBase(i)); + } + /* TODO: maybe try to flush and restart the dead contexts */ +} + +static u32 cycle_timer_ticks(u32 cycle_timer) +{ + u32 ticks; + + ticks = cycle_timer & 0xfff; + ticks += 3072 * ((cycle_timer >> 12) & 0x1fff); + ticks += (3072 * 8000) * (cycle_timer >> 25); + + return ticks; +} + +/* + * Some controllers exhibit one or more of the following bugs when updating the + * iso cycle timer register: + * - When the lowest six bits are wrapping around to zero, a read that happens + * at the same time will return garbage in the lowest ten bits. + * - When the cycleOffset field wraps around to zero, the cycleCount field is + * not incremented for about 60 ns. + * - Occasionally, the entire register reads zero. + * + * To catch these, we read the register three times and ensure that the + * difference between each two consecutive reads is approximately the same, i.e. + * less than twice the other. Furthermore, any negative difference indicates an + * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to + * execute, so we have enough precision to compute the ratio of the differences.) + */ +static u32 get_cycle_time(struct fw_ohci *ohci) +{ + u32 c0, c1, c2; + u32 t0, t1, t2; + s32 diff01, diff12; + int i; + + c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); + + if (ohci->quirks & QUIRK_CYCLE_TIMER) { + i = 0; + c1 = c2; + c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); + do { + c0 = c1; + c1 = c2; + c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); + t0 = cycle_timer_ticks(c0); + t1 = cycle_timer_ticks(c1); + t2 = cycle_timer_ticks(c2); + diff01 = t1 - t0; + diff12 = t2 - t1; + } while ((diff01 <= 0 || diff12 <= 0 || + diff01 / diff12 >= 2 || diff12 / diff01 >= 2) + && i++ < 20); + } + + return c2; +} + +/* + * This function has to be called at least every 64 seconds. The bus_time + * field stores not only the upper 25 bits of the BUS_TIME register but also + * the most significant bit of the cycle timer in bit 6 so that we can detect + * changes in this bit. + */ +static u32 update_bus_time(struct fw_ohci *ohci) +{ + u32 cycle_time_seconds = get_cycle_time(ohci) >> 25; + + if (unlikely(!ohci->bus_time_running)) { + reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_cycle64Seconds); + ohci->bus_time = (lower_32_bits(get_seconds()) & ~0x7f) | + (cycle_time_seconds & 0x40); + ohci->bus_time_running = true; + } + + if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40)) + ohci->bus_time += 0x40; + + return ohci->bus_time | cycle_time_seconds; +} + +static int get_status_for_port(struct fw_ohci *ohci, int port_index) +{ + int reg; + + mutex_lock(&ohci->phy_reg_mutex); + reg = write_phy_reg(ohci, 7, port_index); + if (reg >= 0) + reg = read_phy_reg(ohci, 8); + mutex_unlock(&ohci->phy_reg_mutex); + if (reg < 0) + return reg; + + switch (reg & 0x0f) { + case 0x06: + return 2; /* is child node (connected to parent node) */ + case 0x0e: + return 3; /* is parent node (connected to child node) */ + } + return 1; /* not connected */ +} + +static int get_self_id_pos(struct fw_ohci *ohci, u32 self_id, + int self_id_count) +{ + int i; + u32 entry; + + for (i = 0; i < self_id_count; i++) { + entry = ohci->self_id_buffer[i]; + if ((self_id & 0xff000000) == (entry & 0xff000000)) + return -1; + if ((self_id & 0xff000000) < (entry & 0xff000000)) + return i; + } + return i; +} + +static int initiated_reset(struct fw_ohci *ohci) +{ + int reg; + int ret = 0; + + mutex_lock(&ohci->phy_reg_mutex); + reg = write_phy_reg(ohci, 7, 0xe0); /* Select page 7 */ + if (reg >= 0) { + reg = read_phy_reg(ohci, 8); + reg |= 0x40; + reg = write_phy_reg(ohci, 8, reg); /* set PMODE bit */ + if (reg >= 0) { + reg = read_phy_reg(ohci, 12); /* read register 12 */ + if (reg >= 0) { + if ((reg & 0x08) == 0x08) { + /* bit 3 indicates "initiated reset" */ + ret = 0x2; + } + } + } + } + mutex_unlock(&ohci->phy_reg_mutex); + return ret; +} + +/* + * TI TSB82AA2B and TSB12LV26 do not receive the selfID of a locally + * attached TSB41BA3D phy; see http://www.ti.com/litv/pdf/sllz059. + * Construct the selfID from phy register contents. + */ +static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count) +{ + int reg, i, pos, status; + /* link active 1, speed 3, bridge 0, contender 1, more packets 0 */ + u32 self_id = 0x8040c800; + + reg = reg_read(ohci, OHCI1394_NodeID); + if (!(reg & OHCI1394_NodeID_idValid)) { + ohci_notice(ohci, + "node ID not valid, new bus reset in progress\n"); + return -EBUSY; + } + self_id |= ((reg & 0x3f) << 24); /* phy ID */ + + reg = ohci_read_phy_reg(&ohci->card, 4); + if (reg < 0) + return reg; + self_id |= ((reg & 0x07) << 8); /* power class */ + + reg = ohci_read_phy_reg(&ohci->card, 1); + if (reg < 0) + return reg; + self_id |= ((reg & 0x3f) << 16); /* gap count */ + + for (i = 0; i < 3; i++) { + status = get_status_for_port(ohci, i); + if (status < 0) + return status; + self_id |= ((status & 0x3) << (6 - (i * 2))); + } + + self_id |= initiated_reset(ohci); + + pos = get_self_id_pos(ohci, self_id, self_id_count); + if (pos >= 0) { + memmove(&(ohci->self_id_buffer[pos+1]), + &(ohci->self_id_buffer[pos]), + (self_id_count - pos) * sizeof(*ohci->self_id_buffer)); + ohci->self_id_buffer[pos] = self_id; + self_id_count++; + } + return self_id_count; +} + +static void bus_reset_work(struct work_struct *work) +{ + struct fw_ohci *ohci = + container_of(work, struct fw_ohci, bus_reset_work); + int self_id_count, generation, new_generation, i, j; + u32 reg; + void *free_rom = NULL; + dma_addr_t free_rom_bus = 0; + bool is_new_root; + + reg = reg_read(ohci, OHCI1394_NodeID); + if (!(reg & OHCI1394_NodeID_idValid)) { + ohci_notice(ohci, + "node ID not valid, new bus reset in progress\n"); + return; + } + if ((reg & OHCI1394_NodeID_nodeNumber) == 63) { + ohci_notice(ohci, "malconfigured bus\n"); + return; + } + ohci->node_id = reg & (OHCI1394_NodeID_busNumber | + OHCI1394_NodeID_nodeNumber); + + is_new_root = (reg & OHCI1394_NodeID_root) != 0; + if (!(ohci->is_root && is_new_root)) + reg_write(ohci, OHCI1394_LinkControlSet, + OHCI1394_LinkControl_cycleMaster); + ohci->is_root = is_new_root; + + reg = reg_read(ohci, OHCI1394_SelfIDCount); + if (reg & OHCI1394_SelfIDCount_selfIDError) { + ohci_notice(ohci, "self ID receive error\n"); + return; + } + /* + * The count in the SelfIDCount register is the number of + * bytes in the self ID receive buffer. Since we also receive + * the inverted quadlets and a header quadlet, we shift one + * bit extra to get the actual number of self IDs. + */ + self_id_count = (reg >> 3) & 0xff; + + if (self_id_count > 252) { + ohci_notice(ohci, "bad selfIDSize (%08x)\n", reg); + return; + } + + generation = (cond_le32_to_cpu(ohci->self_id[0]) >> 16) & 0xff; + rmb(); + + for (i = 1, j = 0; j < self_id_count; i += 2, j++) { + u32 id = cond_le32_to_cpu(ohci->self_id[i]); + u32 id2 = cond_le32_to_cpu(ohci->self_id[i + 1]); + + if (id != ~id2) { + /* + * If the invalid data looks like a cycle start packet, + * it's likely to be the result of the cycle master + * having a wrong gap count. In this case, the self IDs + * so far are valid and should be processed so that the + * bus manager can then correct the gap count. + */ + if (id == 0xffff008f) { + ohci_notice(ohci, "ignoring spurious self IDs\n"); + self_id_count = j; + break; + } + + ohci_notice(ohci, "bad self ID %d/%d (%08x != ~%08x)\n", + j, self_id_count, id, id2); + return; + } + ohci->self_id_buffer[j] = id; + } + + if (ohci->quirks & QUIRK_TI_SLLZ059) { + self_id_count = find_and_insert_self_id(ohci, self_id_count); + if (self_id_count < 0) { + ohci_notice(ohci, + "could not construct local self ID\n"); + return; + } + } + + if (self_id_count == 0) { + ohci_notice(ohci, "no self IDs\n"); + return; + } + rmb(); + + /* + * Check the consistency of the self IDs we just read. The + * problem we face is that a new bus reset can start while we + * read out the self IDs from the DMA buffer. If this happens, + * the DMA buffer will be overwritten with new self IDs and we + * will read out inconsistent data. The OHCI specification + * (section 11.2) recommends a technique similar to + * linux/seqlock.h, where we remember the generation of the + * self IDs in the buffer before reading them out and compare + * it to the current generation after reading them out. If + * the two generations match we know we have a consistent set + * of self IDs. + */ + + new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff; + if (new_generation != generation) { + ohci_notice(ohci, "new bus reset, discarding self ids\n"); + return; + } + + /* FIXME: Document how the locking works. */ + spin_lock_irq(&ohci->lock); + + ohci->generation = -1; /* prevent AT packet queueing */ + context_stop(&ohci->at_request_ctx); + context_stop(&ohci->at_response_ctx); + + spin_unlock_irq(&ohci->lock); + + /* + * Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent + * packets in the AT queues and software needs to drain them. + * Some OHCI 1.1 controllers (JMicron) apparently require this too. + */ + at_context_flush(&ohci->at_request_ctx); + at_context_flush(&ohci->at_response_ctx); + + spin_lock_irq(&ohci->lock); + + ohci->generation = generation; + reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); + + if (ohci->quirks & QUIRK_RESET_PACKET) + ohci->request_generation = generation; + + /* + * This next bit is unrelated to the AT context stuff but we + * have to do it under the spinlock also. If a new config rom + * was set up before this reset, the old one is now no longer + * in use and we can free it. Update the config rom pointers + * to point to the current config rom and clear the + * next_config_rom pointer so a new update can take place. + */ + + if (ohci->next_config_rom != NULL) { + if (ohci->next_config_rom != ohci->config_rom) { + free_rom = ohci->config_rom; + free_rom_bus = ohci->config_rom_bus; + } + ohci->config_rom = ohci->next_config_rom; + ohci->config_rom_bus = ohci->next_config_rom_bus; + ohci->next_config_rom = NULL; + + /* + * Restore config_rom image and manually update + * config_rom registers. Writing the header quadlet + * will indicate that the config rom is ready, so we + * do that last. + */ + reg_write(ohci, OHCI1394_BusOptions, + be32_to_cpu(ohci->config_rom[2])); + ohci->config_rom[0] = ohci->next_header; + reg_write(ohci, OHCI1394_ConfigROMhdr, + be32_to_cpu(ohci->next_header)); + } + + if (param_remote_dma) { + reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0); + reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0); + } + + spin_unlock_irq(&ohci->lock); + + if (free_rom) + dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, + free_rom, free_rom_bus); + + log_selfids(ohci, generation, self_id_count); + + fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation, + self_id_count, ohci->self_id_buffer, + ohci->csr_state_setclear_abdicate); + ohci->csr_state_setclear_abdicate = false; +} + +static irqreturn_t irq_handler(int irq, void *data) +{ + struct fw_ohci *ohci = data; + u32 event, iso_event; + int i; + + event = reg_read(ohci, OHCI1394_IntEventClear); + + if (!event || !~event) + return IRQ_NONE; + + /* + * busReset and postedWriteErr must not be cleared yet + * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1) + */ + reg_write(ohci, OHCI1394_IntEventClear, + event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr)); + log_irqs(ohci, event); + + if (event & OHCI1394_selfIDComplete) + queue_work(selfid_workqueue, &ohci->bus_reset_work); + + if (event & OHCI1394_RQPkt) + tasklet_schedule(&ohci->ar_request_ctx.tasklet); + + if (event & OHCI1394_RSPkt) + tasklet_schedule(&ohci->ar_response_ctx.tasklet); + + if (event & OHCI1394_reqTxComplete) + tasklet_schedule(&ohci->at_request_ctx.tasklet); + + if (event & OHCI1394_respTxComplete) + tasklet_schedule(&ohci->at_response_ctx.tasklet); + + if (event & OHCI1394_isochRx) { + iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear); + reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event); + + while (iso_event) { + i = ffs(iso_event) - 1; + tasklet_schedule( + &ohci->ir_context_list[i].context.tasklet); + iso_event &= ~(1 << i); + } + } + + if (event & OHCI1394_isochTx) { + iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear); + reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event); + + while (iso_event) { + i = ffs(iso_event) - 1; + tasklet_schedule( + &ohci->it_context_list[i].context.tasklet); + iso_event &= ~(1 << i); + } + } + + if (unlikely(event & OHCI1394_regAccessFail)) + ohci_err(ohci, "register access failure\n"); + + if (unlikely(event & OHCI1394_postedWriteErr)) { + reg_read(ohci, OHCI1394_PostedWriteAddressHi); + reg_read(ohci, OHCI1394_PostedWriteAddressLo); + reg_write(ohci, OHCI1394_IntEventClear, + OHCI1394_postedWriteErr); + if (printk_ratelimit()) + ohci_err(ohci, "PCI posted write error\n"); + } + + if (unlikely(event & OHCI1394_cycleTooLong)) { + if (printk_ratelimit()) + ohci_notice(ohci, "isochronous cycle too long\n"); + reg_write(ohci, OHCI1394_LinkControlSet, + OHCI1394_LinkControl_cycleMaster); + } + + if (unlikely(event & OHCI1394_cycleInconsistent)) { + /* + * We need to clear this event bit in order to make + * cycleMatch isochronous I/O work. In theory we should + * stop active cycleMatch iso contexts now and restart + * them at least two cycles later. (FIXME?) + */ + if (printk_ratelimit()) + ohci_notice(ohci, "isochronous cycle inconsistent\n"); + } + + if (unlikely(event & OHCI1394_unrecoverableError)) + handle_dead_contexts(ohci); + + if (event & OHCI1394_cycle64Seconds) { + spin_lock(&ohci->lock); + update_bus_time(ohci); + spin_unlock(&ohci->lock); + } else + flush_writes(ohci); + + return IRQ_HANDLED; +} + +static int software_reset(struct fw_ohci *ohci) +{ + u32 val; + int i; + + reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset); + for (i = 0; i < 500; i++) { + val = reg_read(ohci, OHCI1394_HCControlSet); + if (!~val) + return -ENODEV; /* Card was ejected. */ + + if (!(val & OHCI1394_HCControl_softReset)) + return 0; + + msleep(1); + } + + return -EBUSY; +} + +static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length) +{ + size_t size = length * 4; + + memcpy(dest, src, size); + if (size < CONFIG_ROM_SIZE) + memset(&dest[length], 0, CONFIG_ROM_SIZE - size); +} + +static int configure_1394a_enhancements(struct fw_ohci *ohci) +{ + bool enable_1394a; + int ret, clear, set, offset; + + /* Check if the driver should configure link and PHY. */ + if (!(reg_read(ohci, OHCI1394_HCControlSet) & + OHCI1394_HCControl_programPhyEnable)) + return 0; + + /* Paranoia: check whether the PHY supports 1394a, too. */ + enable_1394a = false; + ret = read_phy_reg(ohci, 2); + if (ret < 0) + return ret; + if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) { + ret = read_paged_phy_reg(ohci, 1, 8); + if (ret < 0) + return ret; + if (ret >= 1) + enable_1394a = true; + } + + if (ohci->quirks & QUIRK_NO_1394A) + enable_1394a = false; + + /* Configure PHY and link consistently. */ + if (enable_1394a) { + clear = 0; + set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI; + } else { + clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI; + set = 0; + } + ret = update_phy_reg(ohci, 5, clear, set); + if (ret < 0) + return ret; + + if (enable_1394a) + offset = OHCI1394_HCControlSet; + else + offset = OHCI1394_HCControlClear; + reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable); + + /* Clean up: configuration has been taken care of. */ + reg_write(ohci, OHCI1394_HCControlClear, + OHCI1394_HCControl_programPhyEnable); + + return 0; +} + +static int probe_tsb41ba3d(struct fw_ohci *ohci) +{ + /* TI vendor ID = 0x080028, TSB41BA3D product ID = 0x833005 (sic) */ + static const u8 id[] = { 0x08, 0x00, 0x28, 0x83, 0x30, 0x05, }; + int reg, i; + + reg = read_phy_reg(ohci, 2); + if (reg < 0) + return reg; + if ((reg & PHY_EXTENDED_REGISTERS) != PHY_EXTENDED_REGISTERS) + return 0; + + for (i = ARRAY_SIZE(id) - 1; i >= 0; i--) { + reg = read_paged_phy_reg(ohci, 1, i + 10); + if (reg < 0) + return reg; + if (reg != id[i]) + return 0; + } + return 1; +} + +static int ohci_enable(struct fw_card *card, + const __be32 *config_rom, size_t length) +{ + struct fw_ohci *ohci = fw_ohci(card); + u32 lps, version, irqs; + int i, ret; + + if (software_reset(ohci)) { + ohci_err(ohci, "failed to reset ohci card\n"); + return -EBUSY; + } + + /* + * Now enable LPS, which we need in order to start accessing + * most of the registers. In fact, on some cards (ALI M5251), + * accessing registers in the SClk domain without LPS enabled + * will lock up the machine. Wait 50msec to make sure we have + * full link enabled. However, with some cards (well, at least + * a JMicron PCIe card), we have to try again sometimes. + * + * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but + * cannot actually use the phy at that time. These need tens of + * millisecods pause between LPS write and first phy access too. + */ + + reg_write(ohci, OHCI1394_HCControlSet, + OHCI1394_HCControl_LPS | + OHCI1394_HCControl_postedWriteEnable); + flush_writes(ohci); + + for (lps = 0, i = 0; !lps && i < 3; i++) { + msleep(50); + lps = reg_read(ohci, OHCI1394_HCControlSet) & + OHCI1394_HCControl_LPS; + } + + if (!lps) { + ohci_err(ohci, "failed to set Link Power Status\n"); + return -EIO; + } + + if (ohci->quirks & QUIRK_TI_SLLZ059) { + ret = probe_tsb41ba3d(ohci); + if (ret < 0) + return ret; + if (ret) + ohci_notice(ohci, "local TSB41BA3D phy\n"); + else + ohci->quirks &= ~QUIRK_TI_SLLZ059; + } + + reg_write(ohci, OHCI1394_HCControlClear, + OHCI1394_HCControl_noByteSwapData); + + reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus); + reg_write(ohci, OHCI1394_LinkControlSet, + OHCI1394_LinkControl_cycleTimerEnable | + OHCI1394_LinkControl_cycleMaster); + + reg_write(ohci, OHCI1394_ATRetries, + OHCI1394_MAX_AT_REQ_RETRIES | + (OHCI1394_MAX_AT_RESP_RETRIES << 4) | + (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) | + (200 << 16)); + + ohci->bus_time_running = false; + + for (i = 0; i < 32; i++) + if (ohci->ir_context_support & (1 << i)) + reg_write(ohci, OHCI1394_IsoRcvContextControlClear(i), + IR_CONTEXT_MULTI_CHANNEL_MODE); + + version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; + if (version >= OHCI_VERSION_1_1) { + reg_write(ohci, OHCI1394_InitialChannelsAvailableHi, + 0xfffffffe); + card->broadcast_channel_auto_allocated = true; + } + + /* Get implemented bits of the priority arbitration request counter. */ + reg_write(ohci, OHCI1394_FairnessControl, 0x3f); + ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f; + reg_write(ohci, OHCI1394_FairnessControl, 0); + card->priority_budget_implemented = ohci->pri_req_max != 0; + + reg_write(ohci, OHCI1394_PhyUpperBound, FW_MAX_PHYSICAL_RANGE >> 16); + reg_write(ohci, OHCI1394_IntEventClear, ~0); + reg_write(ohci, OHCI1394_IntMaskClear, ~0); + + ret = configure_1394a_enhancements(ohci); + if (ret < 0) + return ret; + + /* Activate link_on bit and contender bit in our self ID packets.*/ + ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER); + if (ret < 0) + return ret; + + /* + * When the link is not yet enabled, the atomic config rom + * update mechanism described below in ohci_set_config_rom() + * is not active. We have to update ConfigRomHeader and + * BusOptions manually, and the write to ConfigROMmap takes + * effect immediately. We tie this to the enabling of the + * link, so we have a valid config rom before enabling - the + * OHCI requires that ConfigROMhdr and BusOptions have valid + * values before enabling. + * + * However, when the ConfigROMmap is written, some controllers + * always read back quadlets 0 and 2 from the config rom to + * the ConfigRomHeader and BusOptions registers on bus reset. + * They shouldn't do that in this initial case where the link + * isn't enabled. This means we have to use the same + * workaround here, setting the bus header to 0 and then write + * the right values in the bus reset tasklet. + */ + + if (config_rom) { + ohci->next_config_rom = + dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, + &ohci->next_config_rom_bus, + GFP_KERNEL); + if (ohci->next_config_rom == NULL) + return -ENOMEM; + + copy_config_rom(ohci->next_config_rom, config_rom, length); + } else { + /* + * In the suspend case, config_rom is NULL, which + * means that we just reuse the old config rom. + */ + ohci->next_config_rom = ohci->config_rom; + ohci->next_config_rom_bus = ohci->config_rom_bus; + } + + ohci->next_header = ohci->next_config_rom[0]; + ohci->next_config_rom[0] = 0; + reg_write(ohci, OHCI1394_ConfigROMhdr, 0); + reg_write(ohci, OHCI1394_BusOptions, + be32_to_cpu(ohci->next_config_rom[2])); + reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); + + reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); + + irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete | + OHCI1394_RQPkt | OHCI1394_RSPkt | + OHCI1394_isochTx | OHCI1394_isochRx | + OHCI1394_postedWriteErr | + OHCI1394_selfIDComplete | + OHCI1394_regAccessFail | + OHCI1394_cycleInconsistent | + OHCI1394_unrecoverableError | + OHCI1394_cycleTooLong | + OHCI1394_masterIntEnable; + if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) + irqs |= OHCI1394_busReset; + reg_write(ohci, OHCI1394_IntMaskSet, irqs); + + reg_write(ohci, OHCI1394_HCControlSet, + OHCI1394_HCControl_linkEnable | + OHCI1394_HCControl_BIBimageValid); + + reg_write(ohci, OHCI1394_LinkControlSet, + OHCI1394_LinkControl_rcvSelfID | + OHCI1394_LinkControl_rcvPhyPkt); + + ar_context_run(&ohci->ar_request_ctx); + ar_context_run(&ohci->ar_response_ctx); + + flush_writes(ohci); + + /* We are ready to go, reset bus to finish initialization. */ + fw_schedule_bus_reset(&ohci->card, false, true); + + return 0; +} + +static int ohci_set_config_rom(struct fw_card *card, + const __be32 *config_rom, size_t length) +{ + struct fw_ohci *ohci; + __be32 *next_config_rom; + dma_addr_t uninitialized_var(next_config_rom_bus); + + ohci = fw_ohci(card); + + /* + * When the OHCI controller is enabled, the config rom update + * mechanism is a bit tricky, but easy enough to use. See + * section 5.5.6 in the OHCI specification. + * + * The OHCI controller caches the new config rom address in a + * shadow register (ConfigROMmapNext) and needs a bus reset + * for the changes to take place. When the bus reset is + * detected, the controller loads the new values for the + * ConfigRomHeader and BusOptions registers from the specified + * config rom and loads ConfigROMmap from the ConfigROMmapNext + * shadow register. All automatically and atomically. + * + * Now, there's a twist to this story. The automatic load of + * ConfigRomHeader and BusOptions doesn't honor the + * noByteSwapData bit, so with a be32 config rom, the + * controller will load be32 values in to these registers + * during the atomic update, even on litte endian + * architectures. The workaround we use is to put a 0 in the + * header quadlet; 0 is endian agnostic and means that the + * config rom isn't ready yet. In the bus reset tasklet we + * then set up the real values for the two registers. + * + * We use ohci->lock to avoid racing with the code that sets + * ohci->next_config_rom to NULL (see bus_reset_work). + */ + + next_config_rom = + dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, + &next_config_rom_bus, GFP_KERNEL); + if (next_config_rom == NULL) + return -ENOMEM; + + spin_lock_irq(&ohci->lock); + + /* + * If there is not an already pending config_rom update, + * push our new allocation into the ohci->next_config_rom + * and then mark the local variable as null so that we + * won't deallocate the new buffer. + * + * OTOH, if there is a pending config_rom update, just + * use that buffer with the new config_rom data, and + * let this routine free the unused DMA allocation. + */ + + if (ohci->next_config_rom == NULL) { + ohci->next_config_rom = next_config_rom; + ohci->next_config_rom_bus = next_config_rom_bus; + next_config_rom = NULL; + } + + copy_config_rom(ohci->next_config_rom, config_rom, length); + + ohci->next_header = config_rom[0]; + ohci->next_config_rom[0] = 0; + + reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); + + spin_unlock_irq(&ohci->lock); + + /* If we didn't use the DMA allocation, delete it. */ + if (next_config_rom != NULL) + dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, + next_config_rom, next_config_rom_bus); + + /* + * Now initiate a bus reset to have the changes take + * effect. We clean up the old config rom memory and DMA + * mappings in the bus reset tasklet, since the OHCI + * controller could need to access it before the bus reset + * takes effect. + */ + + fw_schedule_bus_reset(&ohci->card, true, true); + + return 0; +} + +static void ohci_send_request(struct fw_card *card, struct fw_packet *packet) +{ + struct fw_ohci *ohci = fw_ohci(card); + + at_context_transmit(&ohci->at_request_ctx, packet); +} + +static void ohci_send_response(struct fw_card *card, struct fw_packet *packet) +{ + struct fw_ohci *ohci = fw_ohci(card); + + at_context_transmit(&ohci->at_response_ctx, packet); +} + +static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet) +{ + struct fw_ohci *ohci = fw_ohci(card); + struct context *ctx = &ohci->at_request_ctx; + struct driver_data *driver_data = packet->driver_data; + int ret = -ENOENT; + + tasklet_disable(&ctx->tasklet); + + if (packet->ack != 0) + goto out; + + if (packet->payload_mapped) + dma_unmap_single(ohci->card.device, packet->payload_bus, + packet->payload_length, DMA_TO_DEVICE); + + log_ar_at_event(ohci, 'T', packet->speed, packet->header, 0x20); + driver_data->packet = NULL; + packet->ack = RCODE_CANCELLED; + packet->callback(packet, &ohci->card, packet->ack); + ret = 0; + out: + tasklet_enable(&ctx->tasklet); + + return ret; +} + +static int ohci_enable_phys_dma(struct fw_card *card, + int node_id, int generation) +{ + struct fw_ohci *ohci = fw_ohci(card); + unsigned long flags; + int n, ret = 0; + + if (param_remote_dma) + return 0; + + /* + * FIXME: Make sure this bitmask is cleared when we clear the busReset + * interrupt bit. Clear physReqResourceAllBuses on bus reset. + */ + + spin_lock_irqsave(&ohci->lock, flags); + + if (ohci->generation != generation) { + ret = -ESTALE; + goto out; + } + + /* + * Note, if the node ID contains a non-local bus ID, physical DMA is + * enabled for _all_ nodes on remote buses. + */ + + n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63; + if (n < 32) + reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n); + else + reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32)); + + flush_writes(ohci); + out: + spin_unlock_irqrestore(&ohci->lock, flags); + + return ret; +} + +static u32 ohci_read_csr(struct fw_card *card, int csr_offset) +{ + struct fw_ohci *ohci = fw_ohci(card); + unsigned long flags; + u32 value; + + switch (csr_offset) { + case CSR_STATE_CLEAR: + case CSR_STATE_SET: + if (ohci->is_root && + (reg_read(ohci, OHCI1394_LinkControlSet) & + OHCI1394_LinkControl_cycleMaster)) + value = CSR_STATE_BIT_CMSTR; + else + value = 0; + if (ohci->csr_state_setclear_abdicate) + value |= CSR_STATE_BIT_ABDICATE; + + return value; + + case CSR_NODE_IDS: + return reg_read(ohci, OHCI1394_NodeID) << 16; + + case CSR_CYCLE_TIME: + return get_cycle_time(ohci); + + case CSR_BUS_TIME: + /* + * We might be called just after the cycle timer has wrapped + * around but just before the cycle64Seconds handler, so we + * better check here, too, if the bus time needs to be updated. + */ + spin_lock_irqsave(&ohci->lock, flags); + value = update_bus_time(ohci); + spin_unlock_irqrestore(&ohci->lock, flags); + return value; + + case CSR_BUSY_TIMEOUT: + value = reg_read(ohci, OHCI1394_ATRetries); + return (value >> 4) & 0x0ffff00f; + + case CSR_PRIORITY_BUDGET: + return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) | + (ohci->pri_req_max << 8); + + default: + WARN_ON(1); + return 0; + } +} + +static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value) +{ + struct fw_ohci *ohci = fw_ohci(card); + unsigned long flags; + + switch (csr_offset) { + case CSR_STATE_CLEAR: + if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { + reg_write(ohci, OHCI1394_LinkControlClear, + OHCI1394_LinkControl_cycleMaster); + flush_writes(ohci); + } + if (value & CSR_STATE_BIT_ABDICATE) + ohci->csr_state_setclear_abdicate = false; + break; + + case CSR_STATE_SET: + if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { + reg_write(ohci, OHCI1394_LinkControlSet, + OHCI1394_LinkControl_cycleMaster); + flush_writes(ohci); + } + if (value & CSR_STATE_BIT_ABDICATE) + ohci->csr_state_setclear_abdicate = true; + break; + + case CSR_NODE_IDS: + reg_write(ohci, OHCI1394_NodeID, value >> 16); + flush_writes(ohci); + break; + + case CSR_CYCLE_TIME: + reg_write(ohci, OHCI1394_IsochronousCycleTimer, value); + reg_write(ohci, OHCI1394_IntEventSet, + OHCI1394_cycleInconsistent); + flush_writes(ohci); + break; + + case CSR_BUS_TIME: + spin_lock_irqsave(&ohci->lock, flags); + ohci->bus_time = (update_bus_time(ohci) & 0x40) | + (value & ~0x7f); + spin_unlock_irqrestore(&ohci->lock, flags); + break; + + case CSR_BUSY_TIMEOUT: + value = (value & 0xf) | ((value & 0xf) << 4) | + ((value & 0xf) << 8) | ((value & 0x0ffff000) << 4); + reg_write(ohci, OHCI1394_ATRetries, value); + flush_writes(ohci); + break; + + case CSR_PRIORITY_BUDGET: + reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f); + flush_writes(ohci); + break; + + default: + WARN_ON(1); + break; + } +} + +static void flush_iso_completions(struct iso_context *ctx) +{ + ctx->base.callback.sc(&ctx->base, ctx->last_timestamp, + ctx->header_length, ctx->header, + ctx->base.callback_data); + ctx->header_length = 0; +} + +static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr) +{ + u32 *ctx_hdr; + + if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) { + if (ctx->base.drop_overflow_headers) + return; + flush_iso_completions(ctx); + } + + ctx_hdr = ctx->header + ctx->header_length; + ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]); + + /* + * The two iso header quadlets are byteswapped to little + * endian by the controller, but we want to present them + * as big endian for consistency with the bus endianness. + */ + if (ctx->base.header_size > 0) + ctx_hdr[0] = swab32(dma_hdr[1]); /* iso packet header */ + if (ctx->base.header_size > 4) + ctx_hdr[1] = swab32(dma_hdr[0]); /* timestamp */ + if (ctx->base.header_size > 8) + memcpy(&ctx_hdr[2], &dma_hdr[2], ctx->base.header_size - 8); + ctx->header_length += ctx->base.header_size; +} + +static int handle_ir_packet_per_buffer(struct context *context, + struct descriptor *d, + struct descriptor *last) +{ + struct iso_context *ctx = + container_of(context, struct iso_context, context); + struct descriptor *pd; + u32 buffer_dma; + + for (pd = d; pd <= last; pd++) + if (pd->transfer_status) + break; + if (pd > last) + /* Descriptor(s) not done yet, stop iteration */ + return 0; + + while (!(d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))) { + d++; + buffer_dma = le32_to_cpu(d->data_address); + dma_sync_single_range_for_cpu(context->ohci->card.device, + buffer_dma & PAGE_MASK, + buffer_dma & ~PAGE_MASK, + le16_to_cpu(d->req_count), + DMA_FROM_DEVICE); + } + + copy_iso_headers(ctx, (u32 *) (last + 1)); + + if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) + flush_iso_completions(ctx); + + return 1; +} + +/* d == last because each descriptor block is only a single descriptor. */ +static int handle_ir_buffer_fill(struct context *context, + struct descriptor *d, + struct descriptor *last) +{ + struct iso_context *ctx = + container_of(context, struct iso_context, context); + unsigned int req_count, res_count, completed; + u32 buffer_dma; + + req_count = le16_to_cpu(last->req_count); + res_count = le16_to_cpu(ACCESS_ONCE(last->res_count)); + completed = req_count - res_count; + buffer_dma = le32_to_cpu(last->data_address); + + if (completed > 0) { + ctx->mc_buffer_bus = buffer_dma; + ctx->mc_completed = completed; + } + + if (res_count != 0) + /* Descriptor(s) not done yet, stop iteration */ + return 0; + + dma_sync_single_range_for_cpu(context->ohci->card.device, + buffer_dma & PAGE_MASK, + buffer_dma & ~PAGE_MASK, + completed, DMA_FROM_DEVICE); + + if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) { + ctx->base.callback.mc(&ctx->base, + buffer_dma + completed, + ctx->base.callback_data); + ctx->mc_completed = 0; + } + + return 1; +} + +static void flush_ir_buffer_fill(struct iso_context *ctx) +{ + dma_sync_single_range_for_cpu(ctx->context.ohci->card.device, + ctx->mc_buffer_bus & PAGE_MASK, + ctx->mc_buffer_bus & ~PAGE_MASK, + ctx->mc_completed, DMA_FROM_DEVICE); + + ctx->base.callback.mc(&ctx->base, + ctx->mc_buffer_bus + ctx->mc_completed, + ctx->base.callback_data); + ctx->mc_completed = 0; +} + +static inline void sync_it_packet_for_cpu(struct context *context, + struct descriptor *pd) +{ + __le16 control; + u32 buffer_dma; + + /* only packets beginning with OUTPUT_MORE* have data buffers */ + if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)) + return; + + /* skip over the OUTPUT_MORE_IMMEDIATE descriptor */ + pd += 2; + + /* + * If the packet has a header, the first OUTPUT_MORE/LAST descriptor's + * data buffer is in the context program's coherent page and must not + * be synced. + */ + if ((le32_to_cpu(pd->data_address) & PAGE_MASK) == + (context->current_bus & PAGE_MASK)) { + if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)) + return; + pd++; + } + + do { + buffer_dma = le32_to_cpu(pd->data_address); + dma_sync_single_range_for_cpu(context->ohci->card.device, + buffer_dma & PAGE_MASK, + buffer_dma & ~PAGE_MASK, + le16_to_cpu(pd->req_count), + DMA_TO_DEVICE); + control = pd->control; + pd++; + } while (!(control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))); +} + +static int handle_it_packet(struct context *context, + struct descriptor *d, + struct descriptor *last) +{ + struct iso_context *ctx = + container_of(context, struct iso_context, context); + struct descriptor *pd; + __be32 *ctx_hdr; + + for (pd = d; pd <= last; pd++) + if (pd->transfer_status) + break; + if (pd > last) + /* Descriptor(s) not done yet, stop iteration */ + return 0; + + sync_it_packet_for_cpu(context, d); + + if (ctx->header_length + 4 > PAGE_SIZE) { + if (ctx->base.drop_overflow_headers) + return 1; + flush_iso_completions(ctx); + } + + ctx_hdr = ctx->header + ctx->header_length; + ctx->last_timestamp = le16_to_cpu(last->res_count); + /* Present this value as big-endian to match the receive code */ + *ctx_hdr = cpu_to_be32((le16_to_cpu(pd->transfer_status) << 16) | + le16_to_cpu(pd->res_count)); + ctx->header_length += 4; + + if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) + flush_iso_completions(ctx); + + return 1; +} + +static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels) +{ + u32 hi = channels >> 32, lo = channels; + + reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi); + reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo); + reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi); + reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo); + mmiowb(); + ohci->mc_channels = channels; +} + +static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, + int type, int channel, size_t header_size) +{ + struct fw_ohci *ohci = fw_ohci(card); + struct iso_context *uninitialized_var(ctx); + descriptor_callback_t uninitialized_var(callback); + u64 *uninitialized_var(channels); + u32 *uninitialized_var(mask), uninitialized_var(regs); + int index, ret = -EBUSY; + + spin_lock_irq(&ohci->lock); + + switch (type) { + case FW_ISO_CONTEXT_TRANSMIT: + mask = &ohci->it_context_mask; + callback = handle_it_packet; + index = ffs(*mask) - 1; + if (index >= 0) { + *mask &= ~(1 << index); + regs = OHCI1394_IsoXmitContextBase(index); + ctx = &ohci->it_context_list[index]; + } + break; + + case FW_ISO_CONTEXT_RECEIVE: + channels = &ohci->ir_context_channels; + mask = &ohci->ir_context_mask; + callback = handle_ir_packet_per_buffer; + index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; + if (index >= 0) { + *channels &= ~(1ULL << channel); + *mask &= ~(1 << index); + regs = OHCI1394_IsoRcvContextBase(index); + ctx = &ohci->ir_context_list[index]; + } + break; + + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + mask = &ohci->ir_context_mask; + callback = handle_ir_buffer_fill; + index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1; + if (index >= 0) { + ohci->mc_allocated = true; + *mask &= ~(1 << index); + regs = OHCI1394_IsoRcvContextBase(index); + ctx = &ohci->ir_context_list[index]; + } + break; + + default: + index = -1; + ret = -ENOSYS; + } + + spin_unlock_irq(&ohci->lock); + + if (index < 0) + return ERR_PTR(ret); + + memset(ctx, 0, sizeof(*ctx)); + ctx->header_length = 0; + ctx->header = (void *) __get_free_page(GFP_KERNEL); + if (ctx->header == NULL) { + ret = -ENOMEM; + goto out; + } + ret = context_init(&ctx->context, ohci, regs, callback); + if (ret < 0) + goto out_with_header; + + if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) { + set_multichannel_mask(ohci, 0); + ctx->mc_completed = 0; + } + + return &ctx->base; + + out_with_header: + free_page((unsigned long)ctx->header); + out: + spin_lock_irq(&ohci->lock); + + switch (type) { + case FW_ISO_CONTEXT_RECEIVE: + *channels |= 1ULL << channel; + break; + + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + ohci->mc_allocated = false; + break; + } + *mask |= 1 << index; + + spin_unlock_irq(&ohci->lock); + + return ERR_PTR(ret); +} + +static int ohci_start_iso(struct fw_iso_context *base, + s32 cycle, u32 sync, u32 tags) +{ + struct iso_context *ctx = container_of(base, struct iso_context, base); + struct fw_ohci *ohci = ctx->context.ohci; + u32 control = IR_CONTEXT_ISOCH_HEADER, match; + int index; + + /* the controller cannot start without any queued packets */ + if (ctx->context.last->branch_address == 0) + return -ENODATA; + + switch (ctx->base.type) { + case FW_ISO_CONTEXT_TRANSMIT: + index = ctx - ohci->it_context_list; + match = 0; + if (cycle >= 0) + match = IT_CONTEXT_CYCLE_MATCH_ENABLE | + (cycle & 0x7fff) << 16; + + reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index); + reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index); + context_run(&ctx->context, match); + break; + + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE; + /* fall through */ + case FW_ISO_CONTEXT_RECEIVE: + index = ctx - ohci->ir_context_list; + match = (tags << 28) | (sync << 8) | ctx->base.channel; + if (cycle >= 0) { + match |= (cycle & 0x07fff) << 12; + control |= IR_CONTEXT_CYCLE_MATCH_ENABLE; + } + + reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index); + reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); + reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); + context_run(&ctx->context, control); + + ctx->sync = sync; + ctx->tags = tags; + + break; + } + + return 0; +} + +static int ohci_stop_iso(struct fw_iso_context *base) +{ + struct fw_ohci *ohci = fw_ohci(base->card); + struct iso_context *ctx = container_of(base, struct iso_context, base); + int index; + + switch (ctx->base.type) { + case FW_ISO_CONTEXT_TRANSMIT: + index = ctx - ohci->it_context_list; + reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index); + break; + + case FW_ISO_CONTEXT_RECEIVE: + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + index = ctx - ohci->ir_context_list; + reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index); + break; + } + flush_writes(ohci); + context_stop(&ctx->context); + tasklet_kill(&ctx->context.tasklet); + + return 0; +} + +static void ohci_free_iso_context(struct fw_iso_context *base) +{ + struct fw_ohci *ohci = fw_ohci(base->card); + struct iso_context *ctx = container_of(base, struct iso_context, base); + unsigned long flags; + int index; + + ohci_stop_iso(base); + context_release(&ctx->context); + free_page((unsigned long)ctx->header); + + spin_lock_irqsave(&ohci->lock, flags); + + switch (base->type) { + case FW_ISO_CONTEXT_TRANSMIT: + index = ctx - ohci->it_context_list; + ohci->it_context_mask |= 1 << index; + break; + + case FW_ISO_CONTEXT_RECEIVE: + index = ctx - ohci->ir_context_list; + ohci->ir_context_mask |= 1 << index; + ohci->ir_context_channels |= 1ULL << base->channel; + break; + + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + index = ctx - ohci->ir_context_list; + ohci->ir_context_mask |= 1 << index; + ohci->ir_context_channels |= ohci->mc_channels; + ohci->mc_channels = 0; + ohci->mc_allocated = false; + break; + } + + spin_unlock_irqrestore(&ohci->lock, flags); +} + +static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels) +{ + struct fw_ohci *ohci = fw_ohci(base->card); + unsigned long flags; + int ret; + + switch (base->type) { + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + + spin_lock_irqsave(&ohci->lock, flags); + + /* Don't allow multichannel to grab other contexts' channels. */ + if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) { + *channels = ohci->ir_context_channels; + ret = -EBUSY; + } else { + set_multichannel_mask(ohci, *channels); + ret = 0; + } + + spin_unlock_irqrestore(&ohci->lock, flags); + + break; + default: + ret = -EINVAL; + } + + return ret; +} + +#ifdef CONFIG_PM +static void ohci_resume_iso_dma(struct fw_ohci *ohci) +{ + int i; + struct iso_context *ctx; + + for (i = 0 ; i < ohci->n_ir ; i++) { + ctx = &ohci->ir_context_list[i]; + if (ctx->context.running) + ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags); + } + + for (i = 0 ; i < ohci->n_it ; i++) { + ctx = &ohci->it_context_list[i]; + if (ctx->context.running) + ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags); + } +} +#endif + +static int queue_iso_transmit(struct iso_context *ctx, + struct fw_iso_packet *packet, + struct fw_iso_buffer *buffer, + unsigned long payload) +{ + struct descriptor *d, *last, *pd; + struct fw_iso_packet *p; + __le32 *header; + dma_addr_t d_bus, page_bus; + u32 z, header_z, payload_z, irq; + u32 payload_index, payload_end_index, next_page_index; + int page, end_page, i, length, offset; + + p = packet; + payload_index = payload; + + if (p->skip) + z = 1; + else + z = 2; + if (p->header_length > 0) + z++; + + /* Determine the first page the payload isn't contained in. */ + end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT; + if (p->payload_length > 0) + payload_z = end_page - (payload_index >> PAGE_SHIFT); + else + payload_z = 0; + + z += payload_z; + + /* Get header size in number of descriptors. */ + header_z = DIV_ROUND_UP(p->header_length, sizeof(*d)); + + d = context_get_descriptors(&ctx->context, z + header_z, &d_bus); + if (d == NULL) + return -ENOMEM; + + if (!p->skip) { + d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); + d[0].req_count = cpu_to_le16(8); + /* + * Link the skip address to this descriptor itself. This causes + * a context to skip a cycle whenever lost cycles or FIFO + * overruns occur, without dropping the data. The application + * should then decide whether this is an error condition or not. + * FIXME: Make the context's cycle-lost behaviour configurable? + */ + d[0].branch_address = cpu_to_le32(d_bus | z); + + header = (__le32 *) &d[1]; + header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) | + IT_HEADER_TAG(p->tag) | + IT_HEADER_TCODE(TCODE_STREAM_DATA) | + IT_HEADER_CHANNEL(ctx->base.channel) | + IT_HEADER_SPEED(ctx->base.speed)); + header[1] = + cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length + + p->payload_length)); + } + + if (p->header_length > 0) { + d[2].req_count = cpu_to_le16(p->header_length); + d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d)); + memcpy(&d[z], p->header, p->header_length); + } + + pd = d + z - payload_z; + payload_end_index = payload_index + p->payload_length; + for (i = 0; i < payload_z; i++) { + page = payload_index >> PAGE_SHIFT; + offset = payload_index & ~PAGE_MASK; + next_page_index = (page + 1) << PAGE_SHIFT; + length = + min(next_page_index, payload_end_index) - payload_index; + pd[i].req_count = cpu_to_le16(length); + + page_bus = page_private(buffer->pages[page]); + pd[i].data_address = cpu_to_le32(page_bus + offset); + + dma_sync_single_range_for_device(ctx->context.ohci->card.device, + page_bus, offset, length, + DMA_TO_DEVICE); + + payload_index += length; + } + + if (p->interrupt) + irq = DESCRIPTOR_IRQ_ALWAYS; + else + irq = DESCRIPTOR_NO_IRQ; + + last = z == 2 ? d : d + z - 1; + last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST | + DESCRIPTOR_STATUS | + DESCRIPTOR_BRANCH_ALWAYS | + irq); + + context_append(&ctx->context, d, z, header_z); + + return 0; +} + +static int queue_iso_packet_per_buffer(struct iso_context *ctx, + struct fw_iso_packet *packet, + struct fw_iso_buffer *buffer, + unsigned long payload) +{ + struct device *device = ctx->context.ohci->card.device; + struct descriptor *d, *pd; + dma_addr_t d_bus, page_bus; + u32 z, header_z, rest; + int i, j, length; + int page, offset, packet_count, header_size, payload_per_buffer; + + /* + * The OHCI controller puts the isochronous header and trailer in the + * buffer, so we need at least 8 bytes. + */ + packet_count = packet->header_length / ctx->base.header_size; + header_size = max(ctx->base.header_size, (size_t)8); + + /* Get header size in number of descriptors. */ + header_z = DIV_ROUND_UP(header_size, sizeof(*d)); + page = payload >> PAGE_SHIFT; + offset = payload & ~PAGE_MASK; + payload_per_buffer = packet->payload_length / packet_count; + + for (i = 0; i < packet_count; i++) { + /* d points to the header descriptor */ + z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1; + d = context_get_descriptors(&ctx->context, + z + header_z, &d_bus); + if (d == NULL) + return -ENOMEM; + + d->control = cpu_to_le16(DESCRIPTOR_STATUS | + DESCRIPTOR_INPUT_MORE); + if (packet->skip && i == 0) + d->control |= cpu_to_le16(DESCRIPTOR_WAIT); + d->req_count = cpu_to_le16(header_size); + d->res_count = d->req_count; + d->transfer_status = 0; + d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d))); + + rest = payload_per_buffer; + pd = d; + for (j = 1; j < z; j++) { + pd++; + pd->control = cpu_to_le16(DESCRIPTOR_STATUS | + DESCRIPTOR_INPUT_MORE); + + if (offset + rest < PAGE_SIZE) + length = rest; + else + length = PAGE_SIZE - offset; + pd->req_count = cpu_to_le16(length); + pd->res_count = pd->req_count; + pd->transfer_status = 0; + + page_bus = page_private(buffer->pages[page]); + pd->data_address = cpu_to_le32(page_bus + offset); + + dma_sync_single_range_for_device(device, page_bus, + offset, length, + DMA_FROM_DEVICE); + + offset = (offset + length) & ~PAGE_MASK; + rest -= length; + if (offset == 0) + page++; + } + pd->control = cpu_to_le16(DESCRIPTOR_STATUS | + DESCRIPTOR_INPUT_LAST | + DESCRIPTOR_BRANCH_ALWAYS); + if (packet->interrupt && i == packet_count - 1) + pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); + + context_append(&ctx->context, d, z, header_z); + } + + return 0; +} + +static int queue_iso_buffer_fill(struct iso_context *ctx, + struct fw_iso_packet *packet, + struct fw_iso_buffer *buffer, + unsigned long payload) +{ + struct descriptor *d; + dma_addr_t d_bus, page_bus; + int page, offset, rest, z, i, length; + + page = payload >> PAGE_SHIFT; + offset = payload & ~PAGE_MASK; + rest = packet->payload_length; + + /* We need one descriptor for each page in the buffer. */ + z = DIV_ROUND_UP(offset + rest, PAGE_SIZE); + + if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count)) + return -EFAULT; + + for (i = 0; i < z; i++) { + d = context_get_descriptors(&ctx->context, 1, &d_bus); + if (d == NULL) + return -ENOMEM; + + d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | + DESCRIPTOR_BRANCH_ALWAYS); + if (packet->skip && i == 0) + d->control |= cpu_to_le16(DESCRIPTOR_WAIT); + if (packet->interrupt && i == z - 1) + d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); + + if (offset + rest < PAGE_SIZE) + length = rest; + else + length = PAGE_SIZE - offset; + d->req_count = cpu_to_le16(length); + d->res_count = d->req_count; + d->transfer_status = 0; + + page_bus = page_private(buffer->pages[page]); + d->data_address = cpu_to_le32(page_bus + offset); + + dma_sync_single_range_for_device(ctx->context.ohci->card.device, + page_bus, offset, length, + DMA_FROM_DEVICE); + + rest -= length; + offset = 0; + page++; + + context_append(&ctx->context, d, 1, 0); + } + + return 0; +} + +static int ohci_queue_iso(struct fw_iso_context *base, + struct fw_iso_packet *packet, + struct fw_iso_buffer *buffer, + unsigned long payload) +{ + struct iso_context *ctx = container_of(base, struct iso_context, base); + unsigned long flags; + int ret = -ENOSYS; + + spin_lock_irqsave(&ctx->context.ohci->lock, flags); + switch (base->type) { + case FW_ISO_CONTEXT_TRANSMIT: + ret = queue_iso_transmit(ctx, packet, buffer, payload); + break; + case FW_ISO_CONTEXT_RECEIVE: + ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload); + break; + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + ret = queue_iso_buffer_fill(ctx, packet, buffer, payload); + break; + } + spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); + + return ret; +} + +static void ohci_flush_queue_iso(struct fw_iso_context *base) +{ + struct context *ctx = + &container_of(base, struct iso_context, base)->context; + + reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); +} + +static int ohci_flush_iso_completions(struct fw_iso_context *base) +{ + struct iso_context *ctx = container_of(base, struct iso_context, base); + int ret = 0; + + tasklet_disable(&ctx->context.tasklet); + + if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) { + context_tasklet((unsigned long)&ctx->context); + + switch (base->type) { + case FW_ISO_CONTEXT_TRANSMIT: + case FW_ISO_CONTEXT_RECEIVE: + if (ctx->header_length != 0) + flush_iso_completions(ctx); + break; + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + if (ctx->mc_completed != 0) + flush_ir_buffer_fill(ctx); + break; + default: + ret = -ENOSYS; + } + + clear_bit_unlock(0, &ctx->flushing_completions); + smp_mb__after_atomic(); + } + + tasklet_enable(&ctx->context.tasklet); + + return ret; +} + +static const struct fw_card_driver ohci_driver = { + .enable = ohci_enable, + .read_phy_reg = ohci_read_phy_reg, + .update_phy_reg = ohci_update_phy_reg, + .set_config_rom = ohci_set_config_rom, + .send_request = ohci_send_request, + .send_response = ohci_send_response, + .cancel_packet = ohci_cancel_packet, + .enable_phys_dma = ohci_enable_phys_dma, + .read_csr = ohci_read_csr, + .write_csr = ohci_write_csr, + + .allocate_iso_context = ohci_allocate_iso_context, + .free_iso_context = ohci_free_iso_context, + .set_iso_channels = ohci_set_iso_channels, + .queue_iso = ohci_queue_iso, + .flush_queue_iso = ohci_flush_queue_iso, + .flush_iso_completions = ohci_flush_iso_completions, + .start_iso = ohci_start_iso, + .stop_iso = ohci_stop_iso, +}; + +#ifdef CONFIG_PPC_PMAC +static void pmac_ohci_on(struct pci_dev *dev) +{ + if (machine_is(powermac)) { + struct device_node *ofn = pci_device_to_OF_node(dev); + + if (ofn) { + pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1); + pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1); + } + } +} + +static void pmac_ohci_off(struct pci_dev *dev) +{ + if (machine_is(powermac)) { + struct device_node *ofn = pci_device_to_OF_node(dev); + + if (ofn) { + pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0); + pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0); + } + } +} +#else +static inline void pmac_ohci_on(struct pci_dev *dev) {} +static inline void pmac_ohci_off(struct pci_dev *dev) {} +#endif /* CONFIG_PPC_PMAC */ + +static int pci_probe(struct pci_dev *dev, + const struct pci_device_id *ent) +{ + struct fw_ohci *ohci; + u32 bus_options, max_receive, link_speed, version; + u64 guid; + int i, err; + size_t size; + + if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) { + dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n"); + return -ENOSYS; + } + + ohci = kzalloc(sizeof(*ohci), GFP_KERNEL); + if (ohci == NULL) { + err = -ENOMEM; + goto fail; + } + + fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev); + + pmac_ohci_on(dev); + + err = pci_enable_device(dev); + if (err) { + dev_err(&dev->dev, "failed to enable OHCI hardware\n"); + goto fail_free; + } + + pci_set_master(dev); + pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0); + pci_set_drvdata(dev, ohci); + + spin_lock_init(&ohci->lock); + mutex_init(&ohci->phy_reg_mutex); + + INIT_WORK(&ohci->bus_reset_work, bus_reset_work); + + if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM) || + pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE) { + ohci_err(ohci, "invalid MMIO resource\n"); + err = -ENXIO; + goto fail_disable; + } + + err = pci_request_region(dev, 0, ohci_driver_name); + if (err) { + ohci_err(ohci, "MMIO resource unavailable\n"); + goto fail_disable; + } + + ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE); + if (ohci->registers == NULL) { + ohci_err(ohci, "failed to remap registers\n"); + err = -ENXIO; + goto fail_iomem; + } + + for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++) + if ((ohci_quirks[i].vendor == dev->vendor) && + (ohci_quirks[i].device == (unsigned short)PCI_ANY_ID || + ohci_quirks[i].device == dev->device) && + (ohci_quirks[i].revision == (unsigned short)PCI_ANY_ID || + ohci_quirks[i].revision >= dev->revision)) { + ohci->quirks = ohci_quirks[i].flags; + break; + } + if (param_quirks) + ohci->quirks = param_quirks; + + /* + * Because dma_alloc_coherent() allocates at least one page, + * we save space by using a common buffer for the AR request/ + * response descriptors and the self IDs buffer. + */ + BUILD_BUG_ON(AR_BUFFERS * sizeof(struct descriptor) > PAGE_SIZE/4); + BUILD_BUG_ON(SELF_ID_BUF_SIZE > PAGE_SIZE/2); + ohci->misc_buffer = dma_alloc_coherent(ohci->card.device, + PAGE_SIZE, + &ohci->misc_buffer_bus, + GFP_KERNEL); + if (!ohci->misc_buffer) { + err = -ENOMEM; + goto fail_iounmap; + } + + err = ar_context_init(&ohci->ar_request_ctx, ohci, 0, + OHCI1394_AsReqRcvContextControlSet); + if (err < 0) + goto fail_misc_buf; + + err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4, + OHCI1394_AsRspRcvContextControlSet); + if (err < 0) + goto fail_arreq_ctx; + + err = context_init(&ohci->at_request_ctx, ohci, + OHCI1394_AsReqTrContextControlSet, handle_at_packet); + if (err < 0) + goto fail_arrsp_ctx; + + err = context_init(&ohci->at_response_ctx, ohci, + OHCI1394_AsRspTrContextControlSet, handle_at_packet); + if (err < 0) + goto fail_atreq_ctx; + + reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0); + ohci->ir_context_channels = ~0ULL; + ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet); + reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0); + ohci->ir_context_mask = ohci->ir_context_support; + ohci->n_ir = hweight32(ohci->ir_context_mask); + size = sizeof(struct iso_context) * ohci->n_ir; + ohci->ir_context_list = kzalloc(size, GFP_KERNEL); + + reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); + ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); + reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); + ohci->it_context_mask = ohci->it_context_support; + ohci->n_it = hweight32(ohci->it_context_mask); + size = sizeof(struct iso_context) * ohci->n_it; + ohci->it_context_list = kzalloc(size, GFP_KERNEL); + + if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) { + err = -ENOMEM; + goto fail_contexts; + } + + ohci->self_id = ohci->misc_buffer + PAGE_SIZE/2; + ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2; + + bus_options = reg_read(ohci, OHCI1394_BusOptions); + max_receive = (bus_options >> 12) & 0xf; + link_speed = bus_options & 0x7; + guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) | + reg_read(ohci, OHCI1394_GUIDLo); + + if (!(ohci->quirks & QUIRK_NO_MSI)) + pci_enable_msi(dev); + if (request_irq(dev->irq, irq_handler, + pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, + ohci_driver_name, ohci)) { + ohci_err(ohci, "failed to allocate interrupt %d\n", dev->irq); + err = -EIO; + goto fail_msi; + } + + err = fw_card_add(&ohci->card, max_receive, link_speed, guid); + if (err) + goto fail_irq; + + version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; + ohci_notice(ohci, + "added OHCI v%x.%x device as card %d, " + "%d IR + %d IT contexts, quirks 0x%x%s\n", + version >> 16, version & 0xff, ohci->card.index, + ohci->n_ir, ohci->n_it, ohci->quirks, + reg_read(ohci, OHCI1394_PhyUpperBound) ? + ", physUB" : ""); + + return 0; + + fail_irq: + free_irq(dev->irq, ohci); + fail_msi: + pci_disable_msi(dev); + fail_contexts: + kfree(ohci->ir_context_list); + kfree(ohci->it_context_list); + context_release(&ohci->at_response_ctx); + fail_atreq_ctx: + context_release(&ohci->at_request_ctx); + fail_arrsp_ctx: + ar_context_release(&ohci->ar_response_ctx); + fail_arreq_ctx: + ar_context_release(&ohci->ar_request_ctx); + fail_misc_buf: + dma_free_coherent(ohci->card.device, PAGE_SIZE, + ohci->misc_buffer, ohci->misc_buffer_bus); + fail_iounmap: + pci_iounmap(dev, ohci->registers); + fail_iomem: + pci_release_region(dev, 0); + fail_disable: + pci_disable_device(dev); + fail_free: + kfree(ohci); + pmac_ohci_off(dev); + fail: + return err; +} + +static void pci_remove(struct pci_dev *dev) +{ + struct fw_ohci *ohci = pci_get_drvdata(dev); + + /* + * If the removal is happening from the suspend state, LPS won't be + * enabled and host registers (eg., IntMaskClear) won't be accessible. + */ + if (reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_LPS) { + reg_write(ohci, OHCI1394_IntMaskClear, ~0); + flush_writes(ohci); + } + cancel_work_sync(&ohci->bus_reset_work); + fw_core_remove_card(&ohci->card); + + /* + * FIXME: Fail all pending packets here, now that the upper + * layers can't queue any more. + */ + + software_reset(ohci); + free_irq(dev->irq, ohci); + + if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom) + dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, + ohci->next_config_rom, ohci->next_config_rom_bus); + if (ohci->config_rom) + dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, + ohci->config_rom, ohci->config_rom_bus); + ar_context_release(&ohci->ar_request_ctx); + ar_context_release(&ohci->ar_response_ctx); + dma_free_coherent(ohci->card.device, PAGE_SIZE, + ohci->misc_buffer, ohci->misc_buffer_bus); + context_release(&ohci->at_request_ctx); + context_release(&ohci->at_response_ctx); + kfree(ohci->it_context_list); + kfree(ohci->ir_context_list); + pci_disable_msi(dev); + pci_iounmap(dev, ohci->registers); + pci_release_region(dev, 0); + pci_disable_device(dev); + kfree(ohci); + pmac_ohci_off(dev); + + dev_notice(&dev->dev, "removed fw-ohci device\n"); +} + +#ifdef CONFIG_PM +static int pci_suspend(struct pci_dev *dev, pm_message_t state) +{ + struct fw_ohci *ohci = pci_get_drvdata(dev); + int err; + + software_reset(ohci); + err = pci_save_state(dev); + if (err) { + ohci_err(ohci, "pci_save_state failed\n"); + return err; + } + err = pci_set_power_state(dev, pci_choose_state(dev, state)); + if (err) + ohci_err(ohci, "pci_set_power_state failed with %d\n", err); + pmac_ohci_off(dev); + + return 0; +} + +static int pci_resume(struct pci_dev *dev) +{ + struct fw_ohci *ohci = pci_get_drvdata(dev); + int err; + + pmac_ohci_on(dev); + pci_set_power_state(dev, PCI_D0); + pci_restore_state(dev); + err = pci_enable_device(dev); + if (err) { + ohci_err(ohci, "pci_enable_device failed\n"); + return err; + } + + /* Some systems don't setup GUID register on resume from ram */ + if (!reg_read(ohci, OHCI1394_GUIDLo) && + !reg_read(ohci, OHCI1394_GUIDHi)) { + reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid); + reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32)); + } + + err = ohci_enable(&ohci->card, NULL, 0); + if (err) + return err; + + ohci_resume_iso_dma(ohci); + + return 0; +} +#endif + +static const struct pci_device_id pci_table[] = { + { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) }, + { } +}; + +MODULE_DEVICE_TABLE(pci, pci_table); + +static struct pci_driver fw_ohci_pci_driver = { + .name = ohci_driver_name, + .id_table = pci_table, + .probe = pci_probe, + .remove = pci_remove, +#ifdef CONFIG_PM + .resume = pci_resume, + .suspend = pci_suspend, +#endif +}; + +static int __init fw_ohci_init(void) +{ + selfid_workqueue = alloc_workqueue(KBUILD_MODNAME, WQ_MEM_RECLAIM, 0); + if (!selfid_workqueue) + return -ENOMEM; + + return pci_register_driver(&fw_ohci_pci_driver); +} + +static void __exit fw_ohci_cleanup(void) +{ + pci_unregister_driver(&fw_ohci_pci_driver); + destroy_workqueue(selfid_workqueue); +} + +module_init(fw_ohci_init); +module_exit(fw_ohci_cleanup); + +MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); +MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers"); +MODULE_LICENSE("GPL"); + +/* Provide a module alias so root-on-sbp2 initrds don't break. */ +MODULE_ALIAS("ohci1394"); diff --git a/kernel/drivers/firewire/ohci.h b/kernel/drivers/firewire/ohci.h new file mode 100644 index 000000000..ef5e7336d --- /dev/null +++ b/kernel/drivers/firewire/ohci.h @@ -0,0 +1,158 @@ +#ifndef _FIREWIRE_OHCI_H +#define _FIREWIRE_OHCI_H + +/* OHCI register map */ + +#define OHCI1394_Version 0x000 +#define OHCI1394_GUID_ROM 0x004 +#define OHCI1394_ATRetries 0x008 +#define OHCI1394_CSRData 0x00C +#define OHCI1394_CSRCompareData 0x010 +#define OHCI1394_CSRControl 0x014 +#define OHCI1394_ConfigROMhdr 0x018 +#define OHCI1394_BusID 0x01C +#define OHCI1394_BusOptions 0x020 +#define OHCI1394_GUIDHi 0x024 +#define OHCI1394_GUIDLo 0x028 +#define OHCI1394_ConfigROMmap 0x034 +#define OHCI1394_PostedWriteAddressLo 0x038 +#define OHCI1394_PostedWriteAddressHi 0x03C +#define OHCI1394_VendorID 0x040 +#define OHCI1394_HCControlSet 0x050 +#define OHCI1394_HCControlClear 0x054 +#define OHCI1394_HCControl_BIBimageValid 0x80000000 +#define OHCI1394_HCControl_noByteSwapData 0x40000000 +#define OHCI1394_HCControl_programPhyEnable 0x00800000 +#define OHCI1394_HCControl_aPhyEnhanceEnable 0x00400000 +#define OHCI1394_HCControl_LPS 0x00080000 +#define OHCI1394_HCControl_postedWriteEnable 0x00040000 +#define OHCI1394_HCControl_linkEnable 0x00020000 +#define OHCI1394_HCControl_softReset 0x00010000 +#define OHCI1394_SelfIDBuffer 0x064 +#define OHCI1394_SelfIDCount 0x068 +#define OHCI1394_SelfIDCount_selfIDError 0x80000000 +#define OHCI1394_IRMultiChanMaskHiSet 0x070 +#define OHCI1394_IRMultiChanMaskHiClear 0x074 +#define OHCI1394_IRMultiChanMaskLoSet 0x078 +#define OHCI1394_IRMultiChanMaskLoClear 0x07C +#define OHCI1394_IntEventSet 0x080 +#define OHCI1394_IntEventClear 0x084 +#define OHCI1394_IntMaskSet 0x088 +#define OHCI1394_IntMaskClear 0x08C +#define OHCI1394_IsoXmitIntEventSet 0x090 +#define OHCI1394_IsoXmitIntEventClear 0x094 +#define OHCI1394_IsoXmitIntMaskSet 0x098 +#define OHCI1394_IsoXmitIntMaskClear 0x09C +#define OHCI1394_IsoRecvIntEventSet 0x0A0 +#define OHCI1394_IsoRecvIntEventClear 0x0A4 +#define OHCI1394_IsoRecvIntMaskSet 0x0A8 +#define OHCI1394_IsoRecvIntMaskClear 0x0AC +#define OHCI1394_InitialBandwidthAvailable 0x0B0 +#define OHCI1394_InitialChannelsAvailableHi 0x0B4 +#define OHCI1394_InitialChannelsAvailableLo 0x0B8 +#define OHCI1394_FairnessControl 0x0DC +#define OHCI1394_LinkControlSet 0x0E0 +#define OHCI1394_LinkControlClear 0x0E4 +#define OHCI1394_LinkControl_rcvSelfID (1 << 9) +#define OHCI1394_LinkControl_rcvPhyPkt (1 << 10) +#define OHCI1394_LinkControl_cycleTimerEnable (1 << 20) +#define OHCI1394_LinkControl_cycleMaster (1 << 21) +#define OHCI1394_LinkControl_cycleSource (1 << 22) +#define OHCI1394_NodeID 0x0E8 +#define OHCI1394_NodeID_idValid 0x80000000 +#define OHCI1394_NodeID_root 0x40000000 +#define OHCI1394_NodeID_nodeNumber 0x0000003f +#define OHCI1394_NodeID_busNumber 0x0000ffc0 +#define OHCI1394_PhyControl 0x0EC +#define OHCI1394_PhyControl_Read(addr) (((addr) << 8) | 0x00008000) +#define OHCI1394_PhyControl_ReadDone 0x80000000 +#define OHCI1394_PhyControl_ReadData(r) (((r) & 0x00ff0000) >> 16) +#define OHCI1394_PhyControl_Write(addr, data) (((addr) << 8) | (data) | 0x00004000) +#define OHCI1394_PhyControl_WritePending 0x00004000 +#define OHCI1394_IsochronousCycleTimer 0x0F0 +#define OHCI1394_AsReqFilterHiSet 0x100 +#define OHCI1394_AsReqFilterHiClear 0x104 +#define OHCI1394_AsReqFilterLoSet 0x108 +#define OHCI1394_AsReqFilterLoClear 0x10C +#define OHCI1394_PhyReqFilterHiSet 0x110 +#define OHCI1394_PhyReqFilterHiClear 0x114 +#define OHCI1394_PhyReqFilterLoSet 0x118 +#define OHCI1394_PhyReqFilterLoClear 0x11C +#define OHCI1394_PhyUpperBound 0x120 + +#define OHCI1394_AsReqTrContextBase 0x180 +#define OHCI1394_AsReqTrContextControlSet 0x180 +#define OHCI1394_AsReqTrContextControlClear 0x184 +#define OHCI1394_AsReqTrCommandPtr 0x18C + +#define OHCI1394_AsRspTrContextBase 0x1A0 +#define OHCI1394_AsRspTrContextControlSet 0x1A0 +#define OHCI1394_AsRspTrContextControlClear 0x1A4 +#define OHCI1394_AsRspTrCommandPtr 0x1AC + +#define OHCI1394_AsReqRcvContextBase 0x1C0 +#define OHCI1394_AsReqRcvContextControlSet 0x1C0 +#define OHCI1394_AsReqRcvContextControlClear 0x1C4 +#define OHCI1394_AsReqRcvCommandPtr 0x1CC + +#define OHCI1394_AsRspRcvContextBase 0x1E0 +#define OHCI1394_AsRspRcvContextControlSet 0x1E0 +#define OHCI1394_AsRspRcvContextControlClear 0x1E4 +#define OHCI1394_AsRspRcvCommandPtr 0x1EC + +/* Isochronous transmit registers */ +#define OHCI1394_IsoXmitContextBase(n) (0x200 + 16 * (n)) +#define OHCI1394_IsoXmitContextControlSet(n) (0x200 + 16 * (n)) +#define OHCI1394_IsoXmitContextControlClear(n) (0x204 + 16 * (n)) +#define OHCI1394_IsoXmitCommandPtr(n) (0x20C + 16 * (n)) + +/* Isochronous receive registers */ +#define OHCI1394_IsoRcvContextBase(n) (0x400 + 32 * (n)) +#define OHCI1394_IsoRcvContextControlSet(n) (0x400 + 32 * (n)) +#define OHCI1394_IsoRcvContextControlClear(n) (0x404 + 32 * (n)) +#define OHCI1394_IsoRcvCommandPtr(n) (0x40C + 32 * (n)) +#define OHCI1394_IsoRcvContextMatch(n) (0x410 + 32 * (n)) + +/* Interrupts Mask/Events */ +#define OHCI1394_reqTxComplete 0x00000001 +#define OHCI1394_respTxComplete 0x00000002 +#define OHCI1394_ARRQ 0x00000004 +#define OHCI1394_ARRS 0x00000008 +#define OHCI1394_RQPkt 0x00000010 +#define OHCI1394_RSPkt 0x00000020 +#define OHCI1394_isochTx 0x00000040 +#define OHCI1394_isochRx 0x00000080 +#define OHCI1394_postedWriteErr 0x00000100 +#define OHCI1394_lockRespErr 0x00000200 +#define OHCI1394_selfIDComplete 0x00010000 +#define OHCI1394_busReset 0x00020000 +#define OHCI1394_regAccessFail 0x00040000 +#define OHCI1394_phy 0x00080000 +#define OHCI1394_cycleSynch 0x00100000 +#define OHCI1394_cycle64Seconds 0x00200000 +#define OHCI1394_cycleLost 0x00400000 +#define OHCI1394_cycleInconsistent 0x00800000 +#define OHCI1394_unrecoverableError 0x01000000 +#define OHCI1394_cycleTooLong 0x02000000 +#define OHCI1394_phyRegRcvd 0x04000000 +#define OHCI1394_masterIntEnable 0x80000000 + +#define OHCI1394_evt_no_status 0x0 +#define OHCI1394_evt_long_packet 0x2 +#define OHCI1394_evt_missing_ack 0x3 +#define OHCI1394_evt_underrun 0x4 +#define OHCI1394_evt_overrun 0x5 +#define OHCI1394_evt_descriptor_read 0x6 +#define OHCI1394_evt_data_read 0x7 +#define OHCI1394_evt_data_write 0x8 +#define OHCI1394_evt_bus_reset 0x9 +#define OHCI1394_evt_timeout 0xa +#define OHCI1394_evt_tcode_err 0xb +#define OHCI1394_evt_reserved_b 0xc +#define OHCI1394_evt_reserved_c 0xd +#define OHCI1394_evt_unknown 0xe +#define OHCI1394_evt_flushed 0xf + +#define OHCI1394_phy_tcode 0xe + +#endif /* _FIREWIRE_OHCI_H */ diff --git a/kernel/drivers/firewire/sbp2.c b/kernel/drivers/firewire/sbp2.c new file mode 100644 index 000000000..c22606fe3 --- /dev/null +++ b/kernel/drivers/firewire/sbp2.c @@ -0,0 +1,1638 @@ +/* + * SBP2 driver (SCSI over IEEE1394) + * + * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + * The basic structure of this driver is based on the old storage driver, + * drivers/ieee1394/sbp2.c, originally written by + * James Goodwin <jamesg@filanet.com> + * with later contributions and ongoing maintenance from + * Ben Collins <bcollins@debian.org>, + * Stefan Richter <stefanr@s5r6.in-berlin.de> + * and many others. + */ + +#include <linux/blkdev.h> +#include <linux/bug.h> +#include <linux/completion.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/firewire.h> +#include <linux/firewire-constants.h> +#include <linux/init.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/kref.h> +#include <linux/list.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/stringify.h> +#include <linux/workqueue.h> + +#include <asm/byteorder.h> + +#include <scsi/scsi.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_host.h> + +/* + * So far only bridges from Oxford Semiconductor are known to support + * concurrent logins. Depending on firmware, four or two concurrent logins + * are possible on OXFW911 and newer Oxsemi bridges. + * + * Concurrent logins are useful together with cluster filesystems. + */ +static bool sbp2_param_exclusive_login = 1; +module_param_named(exclusive_login, sbp2_param_exclusive_login, bool, 0644); +MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device " + "(default = Y, use N for concurrent initiators)"); + +/* + * Flags for firmware oddities + * + * - 128kB max transfer + * Limit transfer size. Necessary for some old bridges. + * + * - 36 byte inquiry + * When scsi_mod probes the device, let the inquiry command look like that + * from MS Windows. + * + * - skip mode page 8 + * Suppress sending of mode_sense for mode page 8 if the device pretends to + * support the SCSI Primary Block commands instead of Reduced Block Commands. + * + * - fix capacity + * Tell sd_mod to correct the last sector number reported by read_capacity. + * Avoids access beyond actual disk limits on devices with an off-by-one bug. + * Don't use this with devices which don't have this bug. + * + * - delay inquiry + * Wait extra SBP2_INQUIRY_DELAY seconds after login before SCSI inquiry. + * + * - power condition + * Set the power condition field in the START STOP UNIT commands sent by + * sd_mod on suspend, resume, and shutdown (if manage_start_stop is on). + * Some disks need this to spin down or to resume properly. + * + * - override internal blacklist + * Instead of adding to the built-in blacklist, use only the workarounds + * specified in the module load parameter. + * Useful if a blacklist entry interfered with a non-broken device. + */ +#define SBP2_WORKAROUND_128K_MAX_TRANS 0x1 +#define SBP2_WORKAROUND_INQUIRY_36 0x2 +#define SBP2_WORKAROUND_MODE_SENSE_8 0x4 +#define SBP2_WORKAROUND_FIX_CAPACITY 0x8 +#define SBP2_WORKAROUND_DELAY_INQUIRY 0x10 +#define SBP2_INQUIRY_DELAY 12 +#define SBP2_WORKAROUND_POWER_CONDITION 0x20 +#define SBP2_WORKAROUND_OVERRIDE 0x100 + +static int sbp2_param_workarounds; +module_param_named(workarounds, sbp2_param_workarounds, int, 0644); +MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0" + ", 128kB max transfer = " __stringify(SBP2_WORKAROUND_128K_MAX_TRANS) + ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36) + ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8) + ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY) + ", delay inquiry = " __stringify(SBP2_WORKAROUND_DELAY_INQUIRY) + ", set power condition in start stop unit = " + __stringify(SBP2_WORKAROUND_POWER_CONDITION) + ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) + ", or a combination)"); + +/* + * We create one struct sbp2_logical_unit per SBP-2 Logical Unit Number Entry + * and one struct scsi_device per sbp2_logical_unit. + */ +struct sbp2_logical_unit { + struct sbp2_target *tgt; + struct list_head link; + struct fw_address_handler address_handler; + struct list_head orb_list; + + u64 command_block_agent_address; + u16 lun; + int login_id; + + /* + * The generation is updated once we've logged in or reconnected + * to the logical unit. Thus, I/O to the device will automatically + * fail and get retried if it happens in a window where the device + * is not ready, e.g. after a bus reset but before we reconnect. + */ + int generation; + int retries; + work_func_t workfn; + struct delayed_work work; + bool has_sdev; + bool blocked; +}; + +static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay) +{ + queue_delayed_work(fw_workqueue, &lu->work, delay); +} + +/* + * We create one struct sbp2_target per IEEE 1212 Unit Directory + * and one struct Scsi_Host per sbp2_target. + */ +struct sbp2_target { + struct fw_unit *unit; + struct list_head lu_list; + + u64 management_agent_address; + u64 guid; + int directory_id; + int node_id; + int address_high; + unsigned int workarounds; + unsigned int mgt_orb_timeout; + unsigned int max_payload; + + spinlock_t lock; + int dont_block; /* counter for each logical unit */ + int blocked; /* ditto */ +}; + +static struct fw_device *target_parent_device(struct sbp2_target *tgt) +{ + return fw_parent_device(tgt->unit); +} + +static const struct device *tgt_dev(const struct sbp2_target *tgt) +{ + return &tgt->unit->device; +} + +static const struct device *lu_dev(const struct sbp2_logical_unit *lu) +{ + return &lu->tgt->unit->device; +} + +/* Impossible login_id, to detect logout attempt before successful login */ +#define INVALID_LOGIN_ID 0x10000 + +#define SBP2_ORB_TIMEOUT 2000U /* Timeout in ms */ +#define SBP2_ORB_NULL 0x80000000 +#define SBP2_RETRY_LIMIT 0xf /* 15 retries */ +#define SBP2_CYCLE_LIMIT (0xc8 << 12) /* 200 125us cycles */ + +/* + * There is no transport protocol limit to the CDB length, but we implement + * a fixed length only. 16 bytes is enough for disks larger than 2 TB. + */ +#define SBP2_MAX_CDB_SIZE 16 + +/* + * The maximum SBP-2 data buffer size is 0xffff. We quadlet-align this + * for compatibility with earlier versions of this driver. + */ +#define SBP2_MAX_SEG_SIZE 0xfffc + +/* Unit directory keys */ +#define SBP2_CSR_UNIT_CHARACTERISTICS 0x3a +#define SBP2_CSR_FIRMWARE_REVISION 0x3c +#define SBP2_CSR_LOGICAL_UNIT_NUMBER 0x14 +#define SBP2_CSR_UNIT_UNIQUE_ID 0x8d +#define SBP2_CSR_LOGICAL_UNIT_DIRECTORY 0xd4 + +/* Management orb opcodes */ +#define SBP2_LOGIN_REQUEST 0x0 +#define SBP2_QUERY_LOGINS_REQUEST 0x1 +#define SBP2_RECONNECT_REQUEST 0x3 +#define SBP2_SET_PASSWORD_REQUEST 0x4 +#define SBP2_LOGOUT_REQUEST 0x7 +#define SBP2_ABORT_TASK_REQUEST 0xb +#define SBP2_ABORT_TASK_SET 0xc +#define SBP2_LOGICAL_UNIT_RESET 0xe +#define SBP2_TARGET_RESET_REQUEST 0xf + +/* Offsets for command block agent registers */ +#define SBP2_AGENT_STATE 0x00 +#define SBP2_AGENT_RESET 0x04 +#define SBP2_ORB_POINTER 0x08 +#define SBP2_DOORBELL 0x10 +#define SBP2_UNSOLICITED_STATUS_ENABLE 0x14 + +/* Status write response codes */ +#define SBP2_STATUS_REQUEST_COMPLETE 0x0 +#define SBP2_STATUS_TRANSPORT_FAILURE 0x1 +#define SBP2_STATUS_ILLEGAL_REQUEST 0x2 +#define SBP2_STATUS_VENDOR_DEPENDENT 0x3 + +#define STATUS_GET_ORB_HIGH(v) ((v).status & 0xffff) +#define STATUS_GET_SBP_STATUS(v) (((v).status >> 16) & 0xff) +#define STATUS_GET_LEN(v) (((v).status >> 24) & 0x07) +#define STATUS_GET_DEAD(v) (((v).status >> 27) & 0x01) +#define STATUS_GET_RESPONSE(v) (((v).status >> 28) & 0x03) +#define STATUS_GET_SOURCE(v) (((v).status >> 30) & 0x03) +#define STATUS_GET_ORB_LOW(v) ((v).orb_low) +#define STATUS_GET_DATA(v) ((v).data) + +struct sbp2_status { + u32 status; + u32 orb_low; + u8 data[24]; +}; + +struct sbp2_pointer { + __be32 high; + __be32 low; +}; + +struct sbp2_orb { + struct fw_transaction t; + struct kref kref; + dma_addr_t request_bus; + int rcode; + void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status); + struct sbp2_logical_unit *lu; + struct list_head link; +}; + +#define MANAGEMENT_ORB_LUN(v) ((v)) +#define MANAGEMENT_ORB_FUNCTION(v) ((v) << 16) +#define MANAGEMENT_ORB_RECONNECT(v) ((v) << 20) +#define MANAGEMENT_ORB_EXCLUSIVE(v) ((v) ? 1 << 28 : 0) +#define MANAGEMENT_ORB_REQUEST_FORMAT(v) ((v) << 29) +#define MANAGEMENT_ORB_NOTIFY ((1) << 31) + +#define MANAGEMENT_ORB_RESPONSE_LENGTH(v) ((v)) +#define MANAGEMENT_ORB_PASSWORD_LENGTH(v) ((v) << 16) + +struct sbp2_management_orb { + struct sbp2_orb base; + struct { + struct sbp2_pointer password; + struct sbp2_pointer response; + __be32 misc; + __be32 length; + struct sbp2_pointer status_fifo; + } request; + __be32 response[4]; + dma_addr_t response_bus; + struct completion done; + struct sbp2_status status; +}; + +struct sbp2_login_response { + __be32 misc; + struct sbp2_pointer command_block_agent; + __be32 reconnect_hold; +}; +#define COMMAND_ORB_DATA_SIZE(v) ((v)) +#define COMMAND_ORB_PAGE_SIZE(v) ((v) << 16) +#define COMMAND_ORB_PAGE_TABLE_PRESENT ((1) << 19) +#define COMMAND_ORB_MAX_PAYLOAD(v) ((v) << 20) +#define COMMAND_ORB_SPEED(v) ((v) << 24) +#define COMMAND_ORB_DIRECTION ((1) << 27) +#define COMMAND_ORB_REQUEST_FORMAT(v) ((v) << 29) +#define COMMAND_ORB_NOTIFY ((1) << 31) + +struct sbp2_command_orb { + struct sbp2_orb base; + struct { + struct sbp2_pointer next; + struct sbp2_pointer data_descriptor; + __be32 misc; + u8 command_block[SBP2_MAX_CDB_SIZE]; + } request; + struct scsi_cmnd *cmd; + + struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8))); + dma_addr_t page_table_bus; +}; + +#define SBP2_ROM_VALUE_WILDCARD ~0 /* match all */ +#define SBP2_ROM_VALUE_MISSING 0xff000000 /* not present in the unit dir. */ + +/* + * List of devices with known bugs. + * + * The firmware_revision field, masked with 0xffff00, is the best + * indicator for the type of bridge chip of a device. It yields a few + * false positives but this did not break correctly behaving devices + * so far. + */ +static const struct { + u32 firmware_revision; + u32 model; + unsigned int workarounds; +} sbp2_workarounds_table[] = { + /* DViCO Momobay CX-1 with TSB42AA9 bridge */ { + .firmware_revision = 0x002800, + .model = 0x001010, + .workarounds = SBP2_WORKAROUND_INQUIRY_36 | + SBP2_WORKAROUND_MODE_SENSE_8 | + SBP2_WORKAROUND_POWER_CONDITION, + }, + /* DViCO Momobay FX-3A with TSB42AA9A bridge */ { + .firmware_revision = 0x002800, + .model = 0x000000, + .workarounds = SBP2_WORKAROUND_POWER_CONDITION, + }, + /* Initio bridges, actually only needed for some older ones */ { + .firmware_revision = 0x000200, + .model = SBP2_ROM_VALUE_WILDCARD, + .workarounds = SBP2_WORKAROUND_INQUIRY_36, + }, + /* PL-3507 bridge with Prolific firmware */ { + .firmware_revision = 0x012800, + .model = SBP2_ROM_VALUE_WILDCARD, + .workarounds = SBP2_WORKAROUND_POWER_CONDITION, + }, + /* Symbios bridge */ { + .firmware_revision = 0xa0b800, + .model = SBP2_ROM_VALUE_WILDCARD, + .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, + }, + /* Datafab MD2-FW2 with Symbios/LSILogic SYM13FW500 bridge */ { + .firmware_revision = 0x002600, + .model = SBP2_ROM_VALUE_WILDCARD, + .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, + }, + /* + * iPod 2nd generation: needs 128k max transfer size workaround + * iPod 3rd generation: needs fix capacity workaround + */ + { + .firmware_revision = 0x0a2700, + .model = 0x000000, + .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS | + SBP2_WORKAROUND_FIX_CAPACITY, + }, + /* iPod 4th generation */ { + .firmware_revision = 0x0a2700, + .model = 0x000021, + .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, + }, + /* iPod mini */ { + .firmware_revision = 0x0a2700, + .model = 0x000022, + .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, + }, + /* iPod mini */ { + .firmware_revision = 0x0a2700, + .model = 0x000023, + .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, + }, + /* iPod Photo */ { + .firmware_revision = 0x0a2700, + .model = 0x00007e, + .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, + } +}; + +static void free_orb(struct kref *kref) +{ + struct sbp2_orb *orb = container_of(kref, struct sbp2_orb, kref); + + kfree(orb); +} + +static void sbp2_status_write(struct fw_card *card, struct fw_request *request, + int tcode, int destination, int source, + int generation, unsigned long long offset, + void *payload, size_t length, void *callback_data) +{ + struct sbp2_logical_unit *lu = callback_data; + struct sbp2_orb *orb; + struct sbp2_status status; + unsigned long flags; + + if (tcode != TCODE_WRITE_BLOCK_REQUEST || + length < 8 || length > sizeof(status)) { + fw_send_response(card, request, RCODE_TYPE_ERROR); + return; + } + + status.status = be32_to_cpup(payload); + status.orb_low = be32_to_cpup(payload + 4); + memset(status.data, 0, sizeof(status.data)); + if (length > 8) + memcpy(status.data, payload + 8, length - 8); + + if (STATUS_GET_SOURCE(status) == 2 || STATUS_GET_SOURCE(status) == 3) { + dev_notice(lu_dev(lu), + "non-ORB related status write, not handled\n"); + fw_send_response(card, request, RCODE_COMPLETE); + return; + } + + /* Lookup the orb corresponding to this status write. */ + spin_lock_irqsave(&lu->tgt->lock, flags); + list_for_each_entry(orb, &lu->orb_list, link) { + if (STATUS_GET_ORB_HIGH(status) == 0 && + STATUS_GET_ORB_LOW(status) == orb->request_bus) { + orb->rcode = RCODE_COMPLETE; + list_del(&orb->link); + break; + } + } + spin_unlock_irqrestore(&lu->tgt->lock, flags); + + if (&orb->link != &lu->orb_list) { + orb->callback(orb, &status); + kref_put(&orb->kref, free_orb); /* orb callback reference */ + } else { + dev_err(lu_dev(lu), "status write for unknown ORB\n"); + } + + fw_send_response(card, request, RCODE_COMPLETE); +} + +static void complete_transaction(struct fw_card *card, int rcode, + void *payload, size_t length, void *data) +{ + struct sbp2_orb *orb = data; + unsigned long flags; + + /* + * This is a little tricky. We can get the status write for + * the orb before we get this callback. The status write + * handler above will assume the orb pointer transaction was + * successful and set the rcode to RCODE_COMPLETE for the orb. + * So this callback only sets the rcode if it hasn't already + * been set and only does the cleanup if the transaction + * failed and we didn't already get a status write. + */ + spin_lock_irqsave(&orb->lu->tgt->lock, flags); + + if (orb->rcode == -1) + orb->rcode = rcode; + if (orb->rcode != RCODE_COMPLETE) { + list_del(&orb->link); + spin_unlock_irqrestore(&orb->lu->tgt->lock, flags); + + orb->callback(orb, NULL); + kref_put(&orb->kref, free_orb); /* orb callback reference */ + } else { + spin_unlock_irqrestore(&orb->lu->tgt->lock, flags); + } + + kref_put(&orb->kref, free_orb); /* transaction callback reference */ +} + +static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, + int node_id, int generation, u64 offset) +{ + struct fw_device *device = target_parent_device(lu->tgt); + struct sbp2_pointer orb_pointer; + unsigned long flags; + + orb_pointer.high = 0; + orb_pointer.low = cpu_to_be32(orb->request_bus); + + orb->lu = lu; + spin_lock_irqsave(&lu->tgt->lock, flags); + list_add_tail(&orb->link, &lu->orb_list); + spin_unlock_irqrestore(&lu->tgt->lock, flags); + + kref_get(&orb->kref); /* transaction callback reference */ + kref_get(&orb->kref); /* orb callback reference */ + + fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST, + node_id, generation, device->max_speed, offset, + &orb_pointer, 8, complete_transaction, orb); +} + +static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu) +{ + struct fw_device *device = target_parent_device(lu->tgt); + struct sbp2_orb *orb, *next; + struct list_head list; + int retval = -ENOENT; + + INIT_LIST_HEAD(&list); + spin_lock_irq(&lu->tgt->lock); + list_splice_init(&lu->orb_list, &list); + spin_unlock_irq(&lu->tgt->lock); + + list_for_each_entry_safe(orb, next, &list, link) { + retval = 0; + if (fw_cancel_transaction(device->card, &orb->t) == 0) + continue; + + orb->rcode = RCODE_CANCELLED; + orb->callback(orb, NULL); + kref_put(&orb->kref, free_orb); /* orb callback reference */ + } + + return retval; +} + +static void complete_management_orb(struct sbp2_orb *base_orb, + struct sbp2_status *status) +{ + struct sbp2_management_orb *orb = + container_of(base_orb, struct sbp2_management_orb, base); + + if (status) + memcpy(&orb->status, status, sizeof(*status)); + complete(&orb->done); +} + +static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, + int generation, int function, + int lun_or_login_id, void *response) +{ + struct fw_device *device = target_parent_device(lu->tgt); + struct sbp2_management_orb *orb; + unsigned int timeout; + int retval = -ENOMEM; + + if (function == SBP2_LOGOUT_REQUEST && fw_device_is_shutdown(device)) + return 0; + + orb = kzalloc(sizeof(*orb), GFP_NOIO); + if (orb == NULL) + return -ENOMEM; + + kref_init(&orb->base.kref); + orb->response_bus = + dma_map_single(device->card->device, &orb->response, + sizeof(orb->response), DMA_FROM_DEVICE); + if (dma_mapping_error(device->card->device, orb->response_bus)) + goto fail_mapping_response; + + orb->request.response.high = 0; + orb->request.response.low = cpu_to_be32(orb->response_bus); + + orb->request.misc = cpu_to_be32( + MANAGEMENT_ORB_NOTIFY | + MANAGEMENT_ORB_FUNCTION(function) | + MANAGEMENT_ORB_LUN(lun_or_login_id)); + orb->request.length = cpu_to_be32( + MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof(orb->response))); + + orb->request.status_fifo.high = + cpu_to_be32(lu->address_handler.offset >> 32); + orb->request.status_fifo.low = + cpu_to_be32(lu->address_handler.offset); + + if (function == SBP2_LOGIN_REQUEST) { + /* Ask for 2^2 == 4 seconds reconnect grace period */ + orb->request.misc |= cpu_to_be32( + MANAGEMENT_ORB_RECONNECT(2) | + MANAGEMENT_ORB_EXCLUSIVE(sbp2_param_exclusive_login)); + timeout = lu->tgt->mgt_orb_timeout; + } else { + timeout = SBP2_ORB_TIMEOUT; + } + + init_completion(&orb->done); + orb->base.callback = complete_management_orb; + + orb->base.request_bus = + dma_map_single(device->card->device, &orb->request, + sizeof(orb->request), DMA_TO_DEVICE); + if (dma_mapping_error(device->card->device, orb->base.request_bus)) + goto fail_mapping_request; + + sbp2_send_orb(&orb->base, lu, node_id, generation, + lu->tgt->management_agent_address); + + wait_for_completion_timeout(&orb->done, msecs_to_jiffies(timeout)); + + retval = -EIO; + if (sbp2_cancel_orbs(lu) == 0) { + dev_err(lu_dev(lu), "ORB reply timed out, rcode 0x%02x\n", + orb->base.rcode); + goto out; + } + + if (orb->base.rcode != RCODE_COMPLETE) { + dev_err(lu_dev(lu), "management write failed, rcode 0x%02x\n", + orb->base.rcode); + goto out; + } + + if (STATUS_GET_RESPONSE(orb->status) != 0 || + STATUS_GET_SBP_STATUS(orb->status) != 0) { + dev_err(lu_dev(lu), "error status: %d:%d\n", + STATUS_GET_RESPONSE(orb->status), + STATUS_GET_SBP_STATUS(orb->status)); + goto out; + } + + retval = 0; + out: + dma_unmap_single(device->card->device, orb->base.request_bus, + sizeof(orb->request), DMA_TO_DEVICE); + fail_mapping_request: + dma_unmap_single(device->card->device, orb->response_bus, + sizeof(orb->response), DMA_FROM_DEVICE); + fail_mapping_response: + if (response) + memcpy(response, orb->response, sizeof(orb->response)); + kref_put(&orb->base.kref, free_orb); + + return retval; +} + +static void sbp2_agent_reset(struct sbp2_logical_unit *lu) +{ + struct fw_device *device = target_parent_device(lu->tgt); + __be32 d = 0; + + fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST, + lu->tgt->node_id, lu->generation, device->max_speed, + lu->command_block_agent_address + SBP2_AGENT_RESET, + &d, 4); +} + +static void complete_agent_reset_write_no_wait(struct fw_card *card, + int rcode, void *payload, size_t length, void *data) +{ + kfree(data); +} + +static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu) +{ + struct fw_device *device = target_parent_device(lu->tgt); + struct fw_transaction *t; + static __be32 d; + + t = kmalloc(sizeof(*t), GFP_ATOMIC); + if (t == NULL) + return; + + fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST, + lu->tgt->node_id, lu->generation, device->max_speed, + lu->command_block_agent_address + SBP2_AGENT_RESET, + &d, 4, complete_agent_reset_write_no_wait, t); +} + +static inline void sbp2_allow_block(struct sbp2_target *tgt) +{ + spin_lock_irq(&tgt->lock); + --tgt->dont_block; + spin_unlock_irq(&tgt->lock); +} + +/* + * Blocks lu->tgt if all of the following conditions are met: + * - Login, INQUIRY, and high-level SCSI setup of all of the target's + * logical units have been finished (indicated by dont_block == 0). + * - lu->generation is stale. + * + * Note, scsi_block_requests() must be called while holding tgt->lock, + * otherwise it might foil sbp2_[conditionally_]unblock()'s attempt to + * unblock the target. + */ +static void sbp2_conditionally_block(struct sbp2_logical_unit *lu) +{ + struct sbp2_target *tgt = lu->tgt; + struct fw_card *card = target_parent_device(tgt)->card; + struct Scsi_Host *shost = + container_of((void *)tgt, struct Scsi_Host, hostdata[0]); + unsigned long flags; + + spin_lock_irqsave(&tgt->lock, flags); + if (!tgt->dont_block && !lu->blocked && + lu->generation != card->generation) { + lu->blocked = true; + if (++tgt->blocked == 1) + scsi_block_requests(shost); + } + spin_unlock_irqrestore(&tgt->lock, flags); +} + +/* + * Unblocks lu->tgt as soon as all its logical units can be unblocked. + * Note, it is harmless to run scsi_unblock_requests() outside the + * tgt->lock protected section. On the other hand, running it inside + * the section might clash with shost->host_lock. + */ +static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu) +{ + struct sbp2_target *tgt = lu->tgt; + struct fw_card *card = target_parent_device(tgt)->card; + struct Scsi_Host *shost = + container_of((void *)tgt, struct Scsi_Host, hostdata[0]); + bool unblock = false; + + spin_lock_irq(&tgt->lock); + if (lu->blocked && lu->generation == card->generation) { + lu->blocked = false; + unblock = --tgt->blocked == 0; + } + spin_unlock_irq(&tgt->lock); + + if (unblock) + scsi_unblock_requests(shost); +} + +/* + * Prevents future blocking of tgt and unblocks it. + * Note, it is harmless to run scsi_unblock_requests() outside the + * tgt->lock protected section. On the other hand, running it inside + * the section might clash with shost->host_lock. + */ +static void sbp2_unblock(struct sbp2_target *tgt) +{ + struct Scsi_Host *shost = + container_of((void *)tgt, struct Scsi_Host, hostdata[0]); + + spin_lock_irq(&tgt->lock); + ++tgt->dont_block; + spin_unlock_irq(&tgt->lock); + + scsi_unblock_requests(shost); +} + +static int sbp2_lun2int(u16 lun) +{ + struct scsi_lun eight_bytes_lun; + + memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun)); + eight_bytes_lun.scsi_lun[0] = (lun >> 8) & 0xff; + eight_bytes_lun.scsi_lun[1] = lun & 0xff; + + return scsilun_to_int(&eight_bytes_lun); +} + +/* + * Write retransmit retry values into the BUSY_TIMEOUT register. + * - The single-phase retry protocol is supported by all SBP-2 devices, but the + * default retry_limit value is 0 (i.e. never retry transmission). We write a + * saner value after logging into the device. + * - The dual-phase retry protocol is optional to implement, and if not + * supported, writes to the dual-phase portion of the register will be + * ignored. We try to write the original 1394-1995 default here. + * - In the case of devices that are also SBP-3-compliant, all writes are + * ignored, as the register is read-only, but contains single-phase retry of + * 15, which is what we're trying to set for all SBP-2 device anyway, so this + * write attempt is safe and yields more consistent behavior for all devices. + * + * See section 8.3.2.3.5 of the 1394-1995 spec, section 6.2 of the SBP-2 spec, + * and section 6.4 of the SBP-3 spec for further details. + */ +static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu) +{ + struct fw_device *device = target_parent_device(lu->tgt); + __be32 d = cpu_to_be32(SBP2_CYCLE_LIMIT | SBP2_RETRY_LIMIT); + + fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST, + lu->tgt->node_id, lu->generation, device->max_speed, + CSR_REGISTER_BASE + CSR_BUSY_TIMEOUT, &d, 4); +} + +static void sbp2_reconnect(struct work_struct *work); + +static void sbp2_login(struct work_struct *work) +{ + struct sbp2_logical_unit *lu = + container_of(work, struct sbp2_logical_unit, work.work); + struct sbp2_target *tgt = lu->tgt; + struct fw_device *device = target_parent_device(tgt); + struct Scsi_Host *shost; + struct scsi_device *sdev; + struct sbp2_login_response response; + int generation, node_id, local_node_id; + + if (fw_device_is_shutdown(device)) + return; + + generation = device->generation; + smp_rmb(); /* node IDs must not be older than generation */ + node_id = device->node_id; + local_node_id = device->card->node_id; + + /* If this is a re-login attempt, log out, or we might be rejected. */ + if (lu->has_sdev) + sbp2_send_management_orb(lu, device->node_id, generation, + SBP2_LOGOUT_REQUEST, lu->login_id, NULL); + + if (sbp2_send_management_orb(lu, node_id, generation, + SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) { + if (lu->retries++ < 5) { + sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); + } else { + dev_err(tgt_dev(tgt), "failed to login to LUN %04x\n", + lu->lun); + /* Let any waiting I/O fail from now on. */ + sbp2_unblock(lu->tgt); + } + return; + } + + tgt->node_id = node_id; + tgt->address_high = local_node_id << 16; + smp_wmb(); /* node IDs must not be older than generation */ + lu->generation = generation; + + lu->command_block_agent_address = + ((u64)(be32_to_cpu(response.command_block_agent.high) & 0xffff) + << 32) | be32_to_cpu(response.command_block_agent.low); + lu->login_id = be32_to_cpu(response.misc) & 0xffff; + + dev_notice(tgt_dev(tgt), "logged in to LUN %04x (%d retries)\n", + lu->lun, lu->retries); + + /* set appropriate retry limit(s) in BUSY_TIMEOUT register */ + sbp2_set_busy_timeout(lu); + + lu->workfn = sbp2_reconnect; + sbp2_agent_reset(lu); + + /* This was a re-login. */ + if (lu->has_sdev) { + sbp2_cancel_orbs(lu); + sbp2_conditionally_unblock(lu); + + return; + } + + if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY) + ssleep(SBP2_INQUIRY_DELAY); + + shost = container_of((void *)tgt, struct Scsi_Host, hostdata[0]); + sdev = __scsi_add_device(shost, 0, 0, sbp2_lun2int(lu->lun), lu); + /* + * FIXME: We are unable to perform reconnects while in sbp2_login(). + * Therefore __scsi_add_device() will get into trouble if a bus reset + * happens in parallel. It will either fail or leave us with an + * unusable sdev. As a workaround we check for this and retry the + * whole login and SCSI probing. + */ + + /* Reported error during __scsi_add_device() */ + if (IS_ERR(sdev)) + goto out_logout_login; + + /* Unreported error during __scsi_add_device() */ + smp_rmb(); /* get current card generation */ + if (generation != device->card->generation) { + scsi_remove_device(sdev); + scsi_device_put(sdev); + goto out_logout_login; + } + + /* No error during __scsi_add_device() */ + lu->has_sdev = true; + scsi_device_put(sdev); + sbp2_allow_block(tgt); + + return; + + out_logout_login: + smp_rmb(); /* generation may have changed */ + generation = device->generation; + smp_rmb(); /* node_id must not be older than generation */ + + sbp2_send_management_orb(lu, device->node_id, generation, + SBP2_LOGOUT_REQUEST, lu->login_id, NULL); + /* + * If a bus reset happened, sbp2_update will have requeued + * lu->work already. Reset the work from reconnect to login. + */ + lu->workfn = sbp2_login; +} + +static void sbp2_reconnect(struct work_struct *work) +{ + struct sbp2_logical_unit *lu = + container_of(work, struct sbp2_logical_unit, work.work); + struct sbp2_target *tgt = lu->tgt; + struct fw_device *device = target_parent_device(tgt); + int generation, node_id, local_node_id; + + if (fw_device_is_shutdown(device)) + return; + + generation = device->generation; + smp_rmb(); /* node IDs must not be older than generation */ + node_id = device->node_id; + local_node_id = device->card->node_id; + + if (sbp2_send_management_orb(lu, node_id, generation, + SBP2_RECONNECT_REQUEST, + lu->login_id, NULL) < 0) { + /* + * If reconnect was impossible even though we are in the + * current generation, fall back and try to log in again. + * + * We could check for "Function rejected" status, but + * looking at the bus generation as simpler and more general. + */ + smp_rmb(); /* get current card generation */ + if (generation == device->card->generation || + lu->retries++ >= 5) { + dev_err(tgt_dev(tgt), "failed to reconnect\n"); + lu->retries = 0; + lu->workfn = sbp2_login; + } + sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); + + return; + } + + tgt->node_id = node_id; + tgt->address_high = local_node_id << 16; + smp_wmb(); /* node IDs must not be older than generation */ + lu->generation = generation; + + dev_notice(tgt_dev(tgt), "reconnected to LUN %04x (%d retries)\n", + lu->lun, lu->retries); + + sbp2_agent_reset(lu); + sbp2_cancel_orbs(lu); + sbp2_conditionally_unblock(lu); +} + +static void sbp2_lu_workfn(struct work_struct *work) +{ + struct sbp2_logical_unit *lu = container_of(to_delayed_work(work), + struct sbp2_logical_unit, work); + lu->workfn(work); +} + +static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) +{ + struct sbp2_logical_unit *lu; + + lu = kmalloc(sizeof(*lu), GFP_KERNEL); + if (!lu) + return -ENOMEM; + + lu->address_handler.length = 0x100; + lu->address_handler.address_callback = sbp2_status_write; + lu->address_handler.callback_data = lu; + + if (fw_core_add_address_handler(&lu->address_handler, + &fw_high_memory_region) < 0) { + kfree(lu); + return -ENOMEM; + } + + lu->tgt = tgt; + lu->lun = lun_entry & 0xffff; + lu->login_id = INVALID_LOGIN_ID; + lu->retries = 0; + lu->has_sdev = false; + lu->blocked = false; + ++tgt->dont_block; + INIT_LIST_HEAD(&lu->orb_list); + lu->workfn = sbp2_login; + INIT_DELAYED_WORK(&lu->work, sbp2_lu_workfn); + + list_add_tail(&lu->link, &tgt->lu_list); + return 0; +} + +static void sbp2_get_unit_unique_id(struct sbp2_target *tgt, + const u32 *leaf) +{ + if ((leaf[0] & 0xffff0000) == 0x00020000) + tgt->guid = (u64)leaf[1] << 32 | leaf[2]; +} + +static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, + const u32 *directory) +{ + struct fw_csr_iterator ci; + int key, value; + + fw_csr_iterator_init(&ci, directory); + while (fw_csr_iterator_next(&ci, &key, &value)) + if (key == SBP2_CSR_LOGICAL_UNIT_NUMBER && + sbp2_add_logical_unit(tgt, value) < 0) + return -ENOMEM; + return 0; +} + +static int sbp2_scan_unit_dir(struct sbp2_target *tgt, const u32 *directory, + u32 *model, u32 *firmware_revision) +{ + struct fw_csr_iterator ci; + int key, value; + + fw_csr_iterator_init(&ci, directory); + while (fw_csr_iterator_next(&ci, &key, &value)) { + switch (key) { + + case CSR_DEPENDENT_INFO | CSR_OFFSET: + tgt->management_agent_address = + CSR_REGISTER_BASE + 4 * value; + break; + + case CSR_DIRECTORY_ID: + tgt->directory_id = value; + break; + + case CSR_MODEL: + *model = value; + break; + + case SBP2_CSR_FIRMWARE_REVISION: + *firmware_revision = value; + break; + + case SBP2_CSR_UNIT_CHARACTERISTICS: + /* the timeout value is stored in 500ms units */ + tgt->mgt_orb_timeout = (value >> 8 & 0xff) * 500; + break; + + case SBP2_CSR_LOGICAL_UNIT_NUMBER: + if (sbp2_add_logical_unit(tgt, value) < 0) + return -ENOMEM; + break; + + case SBP2_CSR_UNIT_UNIQUE_ID: + sbp2_get_unit_unique_id(tgt, ci.p - 1 + value); + break; + + case SBP2_CSR_LOGICAL_UNIT_DIRECTORY: + /* Adjust for the increment in the iterator */ + if (sbp2_scan_logical_unit_dir(tgt, ci.p - 1 + value) < 0) + return -ENOMEM; + break; + } + } + return 0; +} + +/* + * Per section 7.4.8 of the SBP-2 spec, a mgt_ORB_timeout value can be + * provided in the config rom. Most devices do provide a value, which + * we'll use for login management orbs, but with some sane limits. + */ +static void sbp2_clamp_management_orb_timeout(struct sbp2_target *tgt) +{ + unsigned int timeout = tgt->mgt_orb_timeout; + + if (timeout > 40000) + dev_notice(tgt_dev(tgt), "%ds mgt_ORB_timeout limited to 40s\n", + timeout / 1000); + + tgt->mgt_orb_timeout = clamp_val(timeout, 5000, 40000); +} + +static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model, + u32 firmware_revision) +{ + int i; + unsigned int w = sbp2_param_workarounds; + + if (w) + dev_notice(tgt_dev(tgt), + "Please notify linux1394-devel@lists.sf.net " + "if you need the workarounds parameter\n"); + + if (w & SBP2_WORKAROUND_OVERRIDE) + goto out; + + for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) { + + if (sbp2_workarounds_table[i].firmware_revision != + (firmware_revision & 0xffffff00)) + continue; + + if (sbp2_workarounds_table[i].model != model && + sbp2_workarounds_table[i].model != SBP2_ROM_VALUE_WILDCARD) + continue; + + w |= sbp2_workarounds_table[i].workarounds; + break; + } + out: + if (w) + dev_notice(tgt_dev(tgt), "workarounds 0x%x " + "(firmware_revision 0x%06x, model_id 0x%06x)\n", + w, firmware_revision, model); + tgt->workarounds = w; +} + +static struct scsi_host_template scsi_driver_template; +static void sbp2_remove(struct fw_unit *unit); + +static int sbp2_probe(struct fw_unit *unit, const struct ieee1394_device_id *id) +{ + struct fw_device *device = fw_parent_device(unit); + struct sbp2_target *tgt; + struct sbp2_logical_unit *lu; + struct Scsi_Host *shost; + u32 model, firmware_revision; + + /* cannot (or should not) handle targets on the local node */ + if (device->is_local) + return -ENODEV; + + if (dma_get_max_seg_size(device->card->device) > SBP2_MAX_SEG_SIZE) + WARN_ON(dma_set_max_seg_size(device->card->device, + SBP2_MAX_SEG_SIZE)); + + shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt)); + if (shost == NULL) + return -ENOMEM; + + tgt = (struct sbp2_target *)shost->hostdata; + dev_set_drvdata(&unit->device, tgt); + tgt->unit = unit; + INIT_LIST_HEAD(&tgt->lu_list); + spin_lock_init(&tgt->lock); + tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; + + if (fw_device_enable_phys_dma(device) < 0) + goto fail_shost_put; + + shost->max_cmd_len = SBP2_MAX_CDB_SIZE; + + if (scsi_add_host_with_dma(shost, &unit->device, + device->card->device) < 0) + goto fail_shost_put; + + /* implicit directory ID */ + tgt->directory_id = ((unit->directory - device->config_rom) * 4 + + CSR_CONFIG_ROM) & 0xffffff; + + firmware_revision = SBP2_ROM_VALUE_MISSING; + model = SBP2_ROM_VALUE_MISSING; + + if (sbp2_scan_unit_dir(tgt, unit->directory, &model, + &firmware_revision) < 0) + goto fail_remove; + + sbp2_clamp_management_orb_timeout(tgt); + sbp2_init_workarounds(tgt, model, firmware_revision); + + /* + * At S100 we can do 512 bytes per packet, at S200 1024 bytes, + * and so on up to 4096 bytes. The SBP-2 max_payload field + * specifies the max payload size as 2 ^ (max_payload + 2), so + * if we set this to max_speed + 7, we get the right value. + */ + tgt->max_payload = min3(device->max_speed + 7, 10U, + device->card->max_receive - 1); + + /* Do the login in a workqueue so we can easily reschedule retries. */ + list_for_each_entry(lu, &tgt->lu_list, link) + sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); + + return 0; + + fail_remove: + sbp2_remove(unit); + return -ENOMEM; + + fail_shost_put: + scsi_host_put(shost); + return -ENOMEM; +} + +static void sbp2_update(struct fw_unit *unit) +{ + struct sbp2_target *tgt = dev_get_drvdata(&unit->device); + struct sbp2_logical_unit *lu; + + fw_device_enable_phys_dma(fw_parent_device(unit)); + + /* + * Fw-core serializes sbp2_update() against sbp2_remove(). + * Iteration over tgt->lu_list is therefore safe here. + */ + list_for_each_entry(lu, &tgt->lu_list, link) { + sbp2_conditionally_block(lu); + lu->retries = 0; + sbp2_queue_work(lu, 0); + } +} + +static void sbp2_remove(struct fw_unit *unit) +{ + struct fw_device *device = fw_parent_device(unit); + struct sbp2_target *tgt = dev_get_drvdata(&unit->device); + struct sbp2_logical_unit *lu, *next; + struct Scsi_Host *shost = + container_of((void *)tgt, struct Scsi_Host, hostdata[0]); + struct scsi_device *sdev; + + /* prevent deadlocks */ + sbp2_unblock(tgt); + + list_for_each_entry_safe(lu, next, &tgt->lu_list, link) { + cancel_delayed_work_sync(&lu->work); + sdev = scsi_device_lookup(shost, 0, 0, sbp2_lun2int(lu->lun)); + if (sdev) { + scsi_remove_device(sdev); + scsi_device_put(sdev); + } + if (lu->login_id != INVALID_LOGIN_ID) { + int generation, node_id; + /* + * tgt->node_id may be obsolete here if we failed + * during initial login or after a bus reset where + * the topology changed. + */ + generation = device->generation; + smp_rmb(); /* node_id vs. generation */ + node_id = device->node_id; + sbp2_send_management_orb(lu, node_id, generation, + SBP2_LOGOUT_REQUEST, + lu->login_id, NULL); + } + fw_core_remove_address_handler(&lu->address_handler); + list_del(&lu->link); + kfree(lu); + } + scsi_remove_host(shost); + dev_notice(&unit->device, "released target %d:0:0\n", shost->host_no); + + scsi_host_put(shost); +} + +#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e +#define SBP2_SW_VERSION_ENTRY 0x00010483 + +static const struct ieee1394_device_id sbp2_id_table[] = { + { + .match_flags = IEEE1394_MATCH_SPECIFIER_ID | + IEEE1394_MATCH_VERSION, + .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY, + .version = SBP2_SW_VERSION_ENTRY, + }, + { } +}; + +static struct fw_driver sbp2_driver = { + .driver = { + .owner = THIS_MODULE, + .name = KBUILD_MODNAME, + .bus = &fw_bus_type, + }, + .probe = sbp2_probe, + .update = sbp2_update, + .remove = sbp2_remove, + .id_table = sbp2_id_table, +}; + +static void sbp2_unmap_scatterlist(struct device *card_device, + struct sbp2_command_orb *orb) +{ + scsi_dma_unmap(orb->cmd); + + if (orb->request.misc & cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT)) + dma_unmap_single(card_device, orb->page_table_bus, + sizeof(orb->page_table), DMA_TO_DEVICE); +} + +static unsigned int sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data) +{ + int sam_status; + int sfmt = (sbp2_status[0] >> 6) & 0x03; + + if (sfmt == 2 || sfmt == 3) { + /* + * Reserved for future standardization (2) or + * Status block format vendor-dependent (3) + */ + return DID_ERROR << 16; + } + + sense_data[0] = 0x70 | sfmt | (sbp2_status[1] & 0x80); + sense_data[1] = 0x0; + sense_data[2] = ((sbp2_status[1] << 1) & 0xe0) | (sbp2_status[1] & 0x0f); + sense_data[3] = sbp2_status[4]; + sense_data[4] = sbp2_status[5]; + sense_data[5] = sbp2_status[6]; + sense_data[6] = sbp2_status[7]; + sense_data[7] = 10; + sense_data[8] = sbp2_status[8]; + sense_data[9] = sbp2_status[9]; + sense_data[10] = sbp2_status[10]; + sense_data[11] = sbp2_status[11]; + sense_data[12] = sbp2_status[2]; + sense_data[13] = sbp2_status[3]; + sense_data[14] = sbp2_status[12]; + sense_data[15] = sbp2_status[13]; + + sam_status = sbp2_status[0] & 0x3f; + + switch (sam_status) { + case SAM_STAT_GOOD: + case SAM_STAT_CHECK_CONDITION: + case SAM_STAT_CONDITION_MET: + case SAM_STAT_BUSY: + case SAM_STAT_RESERVATION_CONFLICT: + case SAM_STAT_COMMAND_TERMINATED: + return DID_OK << 16 | sam_status; + + default: + return DID_ERROR << 16; + } +} + +static void complete_command_orb(struct sbp2_orb *base_orb, + struct sbp2_status *status) +{ + struct sbp2_command_orb *orb = + container_of(base_orb, struct sbp2_command_orb, base); + struct fw_device *device = target_parent_device(base_orb->lu->tgt); + int result; + + if (status != NULL) { + if (STATUS_GET_DEAD(*status)) + sbp2_agent_reset_no_wait(base_orb->lu); + + switch (STATUS_GET_RESPONSE(*status)) { + case SBP2_STATUS_REQUEST_COMPLETE: + result = DID_OK << 16; + break; + case SBP2_STATUS_TRANSPORT_FAILURE: + result = DID_BUS_BUSY << 16; + break; + case SBP2_STATUS_ILLEGAL_REQUEST: + case SBP2_STATUS_VENDOR_DEPENDENT: + default: + result = DID_ERROR << 16; + break; + } + + if (result == DID_OK << 16 && STATUS_GET_LEN(*status) > 1) + result = sbp2_status_to_sense_data(STATUS_GET_DATA(*status), + orb->cmd->sense_buffer); + } else { + /* + * If the orb completes with status == NULL, something + * went wrong, typically a bus reset happened mid-orb + * or when sending the write (less likely). + */ + result = DID_BUS_BUSY << 16; + sbp2_conditionally_block(base_orb->lu); + } + + dma_unmap_single(device->card->device, orb->base.request_bus, + sizeof(orb->request), DMA_TO_DEVICE); + sbp2_unmap_scatterlist(device->card->device, orb); + + orb->cmd->result = result; + orb->cmd->scsi_done(orb->cmd); +} + +static int sbp2_map_scatterlist(struct sbp2_command_orb *orb, + struct fw_device *device, struct sbp2_logical_unit *lu) +{ + struct scatterlist *sg = scsi_sglist(orb->cmd); + int i, n; + + n = scsi_dma_map(orb->cmd); + if (n <= 0) + goto fail; + + /* + * Handle the special case where there is only one element in + * the scatter list by converting it to an immediate block + * request. This is also a workaround for broken devices such + * as the second generation iPod which doesn't support page + * tables. + */ + if (n == 1) { + orb->request.data_descriptor.high = + cpu_to_be32(lu->tgt->address_high); + orb->request.data_descriptor.low = + cpu_to_be32(sg_dma_address(sg)); + orb->request.misc |= + cpu_to_be32(COMMAND_ORB_DATA_SIZE(sg_dma_len(sg))); + return 0; + } + + for_each_sg(sg, sg, n, i) { + orb->page_table[i].high = cpu_to_be32(sg_dma_len(sg) << 16); + orb->page_table[i].low = cpu_to_be32(sg_dma_address(sg)); + } + + orb->page_table_bus = + dma_map_single(device->card->device, orb->page_table, + sizeof(orb->page_table), DMA_TO_DEVICE); + if (dma_mapping_error(device->card->device, orb->page_table_bus)) + goto fail_page_table; + + /* + * The data_descriptor pointer is the one case where we need + * to fill in the node ID part of the address. All other + * pointers assume that the data referenced reside on the + * initiator (i.e. us), but data_descriptor can refer to data + * on other nodes so we need to put our ID in descriptor.high. + */ + orb->request.data_descriptor.high = cpu_to_be32(lu->tgt->address_high); + orb->request.data_descriptor.low = cpu_to_be32(orb->page_table_bus); + orb->request.misc |= cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT | + COMMAND_ORB_DATA_SIZE(n)); + + return 0; + + fail_page_table: + scsi_dma_unmap(orb->cmd); + fail: + return -ENOMEM; +} + +/* SCSI stack integration */ + +static int sbp2_scsi_queuecommand(struct Scsi_Host *shost, + struct scsi_cmnd *cmd) +{ + struct sbp2_logical_unit *lu = cmd->device->hostdata; + struct fw_device *device = target_parent_device(lu->tgt); + struct sbp2_command_orb *orb; + int generation, retval = SCSI_MLQUEUE_HOST_BUSY; + + orb = kzalloc(sizeof(*orb), GFP_ATOMIC); + if (orb == NULL) + return SCSI_MLQUEUE_HOST_BUSY; + + /* Initialize rcode to something not RCODE_COMPLETE. */ + orb->base.rcode = -1; + kref_init(&orb->base.kref); + orb->cmd = cmd; + orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL); + orb->request.misc = cpu_to_be32( + COMMAND_ORB_MAX_PAYLOAD(lu->tgt->max_payload) | + COMMAND_ORB_SPEED(device->max_speed) | + COMMAND_ORB_NOTIFY); + + if (cmd->sc_data_direction == DMA_FROM_DEVICE) + orb->request.misc |= cpu_to_be32(COMMAND_ORB_DIRECTION); + + generation = device->generation; + smp_rmb(); /* sbp2_map_scatterlist looks at tgt->address_high */ + + if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0) + goto out; + + memcpy(orb->request.command_block, cmd->cmnd, cmd->cmd_len); + + orb->base.callback = complete_command_orb; + orb->base.request_bus = + dma_map_single(device->card->device, &orb->request, + sizeof(orb->request), DMA_TO_DEVICE); + if (dma_mapping_error(device->card->device, orb->base.request_bus)) { + sbp2_unmap_scatterlist(device->card->device, orb); + goto out; + } + + sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, generation, + lu->command_block_agent_address + SBP2_ORB_POINTER); + retval = 0; + out: + kref_put(&orb->base.kref, free_orb); + return retval; +} + +static int sbp2_scsi_slave_alloc(struct scsi_device *sdev) +{ + struct sbp2_logical_unit *lu = sdev->hostdata; + + /* (Re-)Adding logical units via the SCSI stack is not supported. */ + if (!lu) + return -ENOSYS; + + sdev->allow_restart = 1; + + /* + * SBP-2 does not require any alignment, but we set it anyway + * for compatibility with earlier versions of this driver. + */ + blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1); + + if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36) + sdev->inquiry_len = 36; + + return 0; +} + +static int sbp2_scsi_slave_configure(struct scsi_device *sdev) +{ + struct sbp2_logical_unit *lu = sdev->hostdata; + + sdev->use_10_for_rw = 1; + + if (sbp2_param_exclusive_login) + sdev->manage_start_stop = 1; + + if (sdev->type == TYPE_ROM) + sdev->use_10_for_ms = 1; + + if (sdev->type == TYPE_DISK && + lu->tgt->workarounds & SBP2_WORKAROUND_MODE_SENSE_8) + sdev->skip_ms_page_8 = 1; + + if (lu->tgt->workarounds & SBP2_WORKAROUND_FIX_CAPACITY) + sdev->fix_capacity = 1; + + if (lu->tgt->workarounds & SBP2_WORKAROUND_POWER_CONDITION) + sdev->start_stop_pwr_cond = 1; + + if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS) + blk_queue_max_hw_sectors(sdev->request_queue, 128 * 1024 / 512); + + return 0; +} + +/* + * Called by scsi stack when something has really gone wrong. Usually + * called when a command has timed-out for some reason. + */ +static int sbp2_scsi_abort(struct scsi_cmnd *cmd) +{ + struct sbp2_logical_unit *lu = cmd->device->hostdata; + + dev_notice(lu_dev(lu), "sbp2_scsi_abort\n"); + sbp2_agent_reset(lu); + sbp2_cancel_orbs(lu); + + return SUCCESS; +} + +/* + * Format of /sys/bus/scsi/devices/.../ieee1394_id: + * u64 EUI-64 : u24 directory_ID : u16 LUN (all printed in hexadecimal) + * + * This is the concatenation of target port identifier and logical unit + * identifier as per SAM-2...SAM-4 annex A. + */ +static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct sbp2_logical_unit *lu; + + if (!sdev) + return 0; + + lu = sdev->hostdata; + + return sprintf(buf, "%016llx:%06x:%04x\n", + (unsigned long long)lu->tgt->guid, + lu->tgt->directory_id, lu->lun); +} + +static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL); + +static struct device_attribute *sbp2_scsi_sysfs_attrs[] = { + &dev_attr_ieee1394_id, + NULL +}; + +static struct scsi_host_template scsi_driver_template = { + .module = THIS_MODULE, + .name = "SBP-2 IEEE-1394", + .proc_name = "sbp2", + .queuecommand = sbp2_scsi_queuecommand, + .slave_alloc = sbp2_scsi_slave_alloc, + .slave_configure = sbp2_scsi_slave_configure, + .eh_abort_handler = sbp2_scsi_abort, + .this_id = -1, + .sg_tablesize = SG_ALL, + .use_clustering = ENABLE_CLUSTERING, + .cmd_per_lun = 1, + .can_queue = 1, + .sdev_attrs = sbp2_scsi_sysfs_attrs, +}; + +MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); +MODULE_DESCRIPTION("SCSI over IEEE1394"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table); + +/* Provide a module alias so root-on-sbp2 initrds don't break. */ +MODULE_ALIAS("sbp2"); + +static int __init sbp2_init(void) +{ + return driver_register(&sbp2_driver.driver); +} + +static void __exit sbp2_cleanup(void) +{ + driver_unregister(&sbp2_driver.driver); +} + +module_init(sbp2_init); +module_exit(sbp2_cleanup); |