diff options
Diffstat (limited to 'kernel/arch/cris/arch-v32/drivers')
17 files changed, 9801 insertions, 0 deletions
diff --git a/kernel/arch/cris/arch-v32/drivers/Kconfig b/kernel/arch/cris/arch-v32/drivers/Kconfig new file mode 100644 index 000000000..4fc16b44f --- /dev/null +++ b/kernel/arch/cris/arch-v32/drivers/Kconfig @@ -0,0 +1,430 @@ +if ETRAX_ARCH_V32 + +config ETRAX_ETHERNET + bool "Ethernet support" + depends on ETRAX_ARCH_V32 && NETDEVICES + select MII + help + This option enables the ETRAX FS built-in 10/100Mbit Ethernet + controller. + +config ETRAX_NO_PHY + bool "PHY not present" + depends on ETRAX_ETHERNET + help + This option disables all MDIO communication with an ethernet + transceiver connected to the MII interface. This option shall + typically be enabled if the MII interface is connected to a + switch. This option should normally be disabled. If enabled, + speed and duplex will be locked to 100 Mbit and full duplex. + +config ETRAXFS_SERIAL + bool "Serial-port support" + depends on ETRAX_ARCH_V32 + select SERIAL_CORE + select SERIAL_CORE_CONSOLE + help + Enables the ETRAX FS serial driver for ser0 (ttyS0) + You probably want this enabled. + +config ETRAX_RS485 + bool "RS-485 support" + depends on ETRAXFS_SERIAL + help + Enables support for RS-485 serial communication. + +config ETRAX_RS485_DISABLE_RECEIVER + bool "Disable serial receiver" + depends on ETRAX_RS485 + help + It is necessary to disable the serial receiver to avoid serial + loopback. Not all products are able to do this in software only. + +config ETRAX_SERIAL_PORT0 + bool "Serial port 0 enabled" + depends on ETRAXFS_SERIAL + help + Enables the ETRAX FS serial driver for ser0 (ttyS0) + Normally you want this on. You can control what DMA channels to use + if you do not need DMA to something else. + ser0 can use dma4 or dma6 for output and dma5 or dma7 for input. + +config ETRAX_SERIAL_PORT1 + bool "Serial port 1 enabled" + depends on ETRAXFS_SERIAL + help + Enables the ETRAX FS serial driver for ser1 (ttyS1). + +config ETRAX_SERIAL_PORT2 + bool "Serial port 2 enabled" + depends on ETRAXFS_SERIAL + help + Enables the ETRAX FS serial driver for ser2 (ttyS2). + +config ETRAX_SERIAL_PORT3 + bool "Serial port 3 enabled" + depends on ETRAXFS_SERIAL + help + Enables the ETRAX FS serial driver for ser3 (ttyS3). + +config ETRAX_SYNCHRONOUS_SERIAL + bool "Synchronous serial-port support" + depends on ETRAX_ARCH_V32 + help + Enables the ETRAX FS synchronous serial driver. + +config ETRAX_SYNCHRONOUS_SERIAL_PORT0 + bool "Synchronous serial port 0 enabled" + depends on ETRAX_SYNCHRONOUS_SERIAL + help + Enabled synchronous serial port 0. + +config ETRAX_SYNCHRONOUS_SERIAL0_DMA + bool "Enable DMA on synchronous serial port 0." + depends on ETRAX_SYNCHRONOUS_SERIAL_PORT0 + help + A synchronous serial port can run in manual or DMA mode. + Selecting this option will make it run in DMA mode. + +config ETRAX_SYNCHRONOUS_SERIAL_PORT1 + bool "Synchronous serial port 1 enabled" + depends on ETRAX_SYNCHRONOUS_SERIAL && ETRAXFS + help + Enabled synchronous serial port 1. + +config ETRAX_SYNCHRONOUS_SERIAL1_DMA + bool "Enable DMA on synchronous serial port 1." + depends on ETRAX_SYNCHRONOUS_SERIAL_PORT1 + help + A synchronous serial port can run in manual or DMA mode. + Selecting this option will make it run in DMA mode. + +config ETRAX_AXISFLASHMAP + bool "Axis flash-map support" + depends on ETRAX_ARCH_V32 + select MTD + select MTD_CFI + select MTD_CFI_AMDSTD + select MTD_JEDECPROBE + select MTD_BLOCK + select MTD_COMPLEX_MAPPINGS + select MTD_MTDRAM + help + This option enables MTD mapping of flash devices. Needed to use + flash memories. If unsure, say Y. + +config ETRAX_AXISFLASHMAP_MTD0WHOLE + bool "MTD0 is whole boot flash device" + depends on ETRAX_AXISFLASHMAP + help + When this option is not set, mtd0 refers to the first partition + on the boot flash device. When set, mtd0 refers to the whole + device, with mtd1 referring to the first partition etc. + +config ETRAX_PTABLE_SECTOR + int "Byte-offset of partition table sector" + depends on ETRAX_AXISFLASHMAP + default "65536" + help + Byte-offset of the partition table in the first flash chip. + The default value is 64kB and should not be changed unless + you know exactly what you are doing. The only valid reason + for changing this is when the flash block size is bigger + than 64kB (e.g. when using two parallel 16 bit flashes). + +config ETRAX_NANDFLASH + bool "NAND flash support" + depends on ETRAX_ARCH_V32 + select MTD_NAND + select MTD_NAND_IDS + help + This option enables MTD mapping of NAND flash devices. Needed to use + NAND flash memories. If unsure, say Y. + +config ETRAX_NANDBOOT + bool "Boot from NAND flash" + depends on ETRAX_NANDFLASH + help + This options enables booting from NAND flash devices. + Say Y if your boot code, kernel and root file system is in + NAND flash. Say N if they are in NOR flash. + +config ETRAX_I2C + bool "I2C driver" + depends on ETRAX_ARCH_V32 + help + This option enables the I2C driver used by e.g. the RTC driver. + +config ETRAX_V32_I2C_DATA_PORT + string "I2C data pin" + depends on ETRAX_I2C + help + The pin to use for I2C data. + +config ETRAX_V32_I2C_CLK_PORT + string "I2C clock pin" + depends on ETRAX_I2C + help + The pin to use for I2C clock. + +config ETRAX_GPIO + bool "GPIO support" + depends on ETRAX_ARCH_V32 + ---help--- + Enables the ETRAX general port device (major 120, minors 0-4). + You can use this driver to access the general port bits. It supports + these ioctl's: + #include <linux/etraxgpio.h> + fd = open("/dev/gpioa", O_RDWR); // or /dev/gpiob + ioctl(fd, _IO(ETRAXGPIO_IOCTYPE, IO_SETBITS), bits_to_set); + ioctl(fd, _IO(ETRAXGPIO_IOCTYPE, IO_CLRBITS), bits_to_clear); + err = ioctl(fd, _IO(ETRAXGPIO_IOCTYPE, IO_READ_INBITS), &val); + Remember that you need to setup the port directions appropriately in + the General configuration. + +config ETRAX_VIRTUAL_GPIO + bool "Virtual GPIO support" + depends on ETRAX_GPIO + help + Enables the virtual Etrax general port device (major 120, minor 6). + It uses an I/O expander for the I2C-bus. + +config ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN + int "Virtual GPIO interrupt pin on PA pin" + range 0 7 + depends on ETRAX_VIRTUAL_GPIO + help + The pin to use on PA for virtual gpio interrupt. + +config ETRAX_PA_CHANGEABLE_DIR + hex "PA user changeable dir mask" + depends on ETRAX_GPIO + default "0x00" if ETRAXFS + default "0x00000000" if !ETRAXFS + help + This is a bitmask (8 bits) with information of what bits in PA that a + user can change direction on using ioctl's. + Bit set = changeable. + You probably want 0 here, but it depends on your hardware. + +config ETRAX_PA_CHANGEABLE_BITS + hex "PA user changeable bits mask" + depends on ETRAX_GPIO + default "0x00" if ETRAXFS + default "0x00000000" if !ETRAXFS + help + This is a bitmask (8 bits) with information of what bits in PA + that a user can change the value on using ioctl's. + Bit set = changeable. + +config ETRAX_PB_CHANGEABLE_DIR + hex "PB user changeable dir mask" + depends on ETRAX_GPIO + default "0x00000" if ETRAXFS + default "0x00000000" if !ETRAXFS + help + This is a bitmask (18 bits) with information of what bits in PB + that a user can change direction on using ioctl's. + Bit set = changeable. + You probably want 0 here, but it depends on your hardware. + +config ETRAX_PB_CHANGEABLE_BITS + hex "PB user changeable bits mask" + depends on ETRAX_GPIO + default "0x00000" if ETRAXFS + default "0x00000000" if !ETRAXFS + help + This is a bitmask (18 bits) with information of what bits in PB + that a user can change the value on using ioctl's. + Bit set = changeable. + +config ETRAX_PC_CHANGEABLE_DIR + hex "PC user changeable dir mask" + depends on ETRAX_GPIO + default "0x00000" if ETRAXFS + default "0x00000000" if !ETRAXFS + help + This is a bitmask (18 bits) with information of what bits in PC + that a user can change direction on using ioctl's. + Bit set = changeable. + You probably want 0 here, but it depends on your hardware. + +config ETRAX_PC_CHANGEABLE_BITS + hex "PC user changeable bits mask" + depends on ETRAX_GPIO + default "0x00000" if ETRAXFS + default "0x00000000" if ETRAXFS + help + This is a bitmask (18 bits) with information of what bits in PC + that a user can change the value on using ioctl's. + Bit set = changeable. + +config ETRAX_PD_CHANGEABLE_DIR + hex "PD user changeable dir mask" + depends on ETRAX_GPIO && ETRAXFS + default "0x00000" + help + This is a bitmask (18 bits) with information of what bits in PD + that a user can change direction on using ioctl's. + Bit set = changeable. + You probably want 0x00000 here, but it depends on your hardware. + +config ETRAX_PD_CHANGEABLE_BITS + hex "PD user changeable bits mask" + depends on ETRAX_GPIO && ETRAXFS + default "0x00000" + help + This is a bitmask (18 bits) with information of what bits in PD + that a user can change the value on using ioctl's. + Bit set = changeable. + +config ETRAX_PE_CHANGEABLE_DIR + hex "PE user changeable dir mask" + depends on ETRAX_GPIO && ETRAXFS + default "0x00000" + help + This is a bitmask (18 bits) with information of what bits in PE + that a user can change direction on using ioctl's. + Bit set = changeable. + You probably want 0x00000 here, but it depends on your hardware. + +config ETRAX_PE_CHANGEABLE_BITS + hex "PE user changeable bits mask" + depends on ETRAX_GPIO && ETRAXFS + default "0x00000" + help + This is a bitmask (18 bits) with information of what bits in PE + that a user can change the value on using ioctl's. + Bit set = changeable. + +config ETRAX_PV_CHANGEABLE_DIR + hex "PV user changeable dir mask" + depends on ETRAX_VIRTUAL_GPIO + default "0x0000" + help + This is a bitmask (16 bits) with information of what bits in PV + that a user can change direction on using ioctl's. + Bit set = changeable. + You probably want 0x0000 here, but it depends on your hardware. + +config ETRAX_PV_CHANGEABLE_BITS + hex "PV user changeable bits mask" + depends on ETRAX_VIRTUAL_GPIO + default "0x0000" + help + This is a bitmask (16 bits) with information of what bits in PV + that a user can change the value on using ioctl's. + Bit set = changeable. + +config ETRAX_CARDBUS + bool "Cardbus support" + depends on ETRAX_ARCH_V32 + help + Enabled the ETRAX Cardbus driver. + +config PCI + bool + depends on ETRAX_CARDBUS + default y + select HAVE_GENERIC_DMA_COHERENT + +config ETRAX_IOP_FW_LOAD + tristate "IO-processor hotplug firmware loading support" + depends on ETRAX_ARCH_V32 + select FW_LOADER + help + Enables IO-processor hotplug firmware loading support. + +config ETRAX_STREAMCOPROC + tristate "Stream co-processor driver enabled" + depends on ETRAX_ARCH_V32 + help + This option enables a driver for the stream co-processor + for cryptographic operations. + +config ETRAX_MMC_IOP + tristate "MMC/SD host driver using IO-processor" + depends on ETRAX_ARCH_V32 && MMC + help + This option enables the SD/MMC host controller interface. + The host controller is implemented using the built in + IO-Processor. Only the SPU is used in this implementation. + +config ETRAX_SPI_MMC +# Make this one of several "choices" (possible simultaneously but +# suggested uniquely) when an IOP driver emerges for "real" MMC/SD +# protocol support. + tristate + depends on !ETRAX_MMC_IOP + default MMC + select SPI + select MMC_SPI + +# While the board info is MMC_SPI only, the drivers are written to be +# independent of MMC_SPI, so we'll keep SPI non-dependent on the +# MMC_SPI config choices (well, except for a single depends-on-line +# for the board-info file until a separate non-MMC SPI board file +# emerges). +# FIXME: When that happens, we'll need to be able to ask for and +# configure non-MMC SPI ports together with MMC_SPI ports (if multiple +# SPI ports are enabled). + +config SPI_ETRAX_SSER + tristate + depends on SPI_MASTER && ETRAX_ARCH_V32 + select SPI_BITBANG + help + This enables using an synchronous serial (sser) port as a + SPI master controller on Axis ETRAX FS and later. The + driver can be configured to use any sser port. + +config SPI_ETRAX_GPIO + tristate + depends on SPI_MASTER && ETRAX_ARCH_V32 + select SPI_BITBANG + help + This enables using GPIO pins port as a SPI master controller + on Axis ETRAX FS and later. The driver can be configured to + use any GPIO pins. + +config ETRAX_SPI_SSER0 + tristate "SPI using synchronous serial port 0 (sser0)" + depends on ETRAX_SPI_MMC + default m if MMC_SPI=m + default y if MMC_SPI=y + default y if MMC_SPI=n + select SPI_ETRAX_SSER + help + Say Y for an MMC/SD socket connected to synchronous serial port 0, + or for devices using the SPI protocol on that port. Say m if you + want to build it as a module, which will be named spi_crisv32_sser. + (You need to select MMC separately.) + +config ETRAX_SPI_SSER1 + tristate "SPI using synchronous serial port 1 (sser1)" + depends on ETRAX_SPI_MMC + default m if MMC_SPI=m && ETRAX_SPI_SSER0=n + default y if MMC_SPI=y && ETRAX_SPI_SSER0=n + default y if MMC_SPI=n && ETRAX_SPI_SSER0=n + select SPI_ETRAX_SSER + help + Say Y for an MMC/SD socket connected to synchronous serial port 1, + or for devices using the SPI protocol on that port. Say m if you + want to build it as a module, which will be named spi_crisv32_sser. + (You need to select MMC separately.) + +config ETRAX_SPI_GPIO + tristate "Bitbanged SPI using gpio pins" + depends on ETRAX_SPI_MMC + select SPI_ETRAX_GPIO + default m if MMC_SPI=m && ETRAX_SPI_SSER0=n && ETRAX_SPI_SSER1=n + default y if MMC_SPI=y && ETRAX_SPI_SSER0=n && ETRAX_SPI_SSER1=n + default y if MMC_SPI=n && ETRAX_SPI_SSER0=n && ETRAX_SPI_SSER1=n + help + Say Y for an MMC/SD socket connected to general I/O pins (but not + a complete synchronous serial ports), or for devices using the SPI + protocol on general I/O pins. Slow and slows down the system. + Say m to build it as a module, which will be called spi_crisv32_gpio. + (You need to select MMC separately.) + +endif diff --git a/kernel/arch/cris/arch-v32/drivers/Makefile b/kernel/arch/cris/arch-v32/drivers/Makefile new file mode 100644 index 000000000..15fbfefce --- /dev/null +++ b/kernel/arch/cris/arch-v32/drivers/Makefile @@ -0,0 +1,12 @@ +# +# Makefile for Etrax-specific drivers +# + +obj-$(CONFIG_ETRAX_STREAMCOPROC) += cryptocop.o +obj-$(CONFIG_ETRAX_AXISFLASHMAP) += axisflashmap.o +obj-$(CONFIG_ETRAXFS) += mach-fs/ +obj-$(CONFIG_CRIS_MACH_ARTPEC3) += mach-a3/ +obj-$(CONFIG_ETRAX_IOP_FW_LOAD) += iop_fw_load.o +obj-$(CONFIG_ETRAX_I2C) += i2c.o +obj-$(CONFIG_ETRAX_SYNCHRONOUS_SERIAL) += sync_serial.o +obj-$(CONFIG_PCI) += pci/ diff --git a/kernel/arch/cris/arch-v32/drivers/axisflashmap.c b/kernel/arch/cris/arch-v32/drivers/axisflashmap.c new file mode 100644 index 000000000..28dd77144 --- /dev/null +++ b/kernel/arch/cris/arch-v32/drivers/axisflashmap.c @@ -0,0 +1,619 @@ +/* + * Physical mapping layer for MTD using the Axis partitiontable format + * + * Copyright (c) 2001-2007 Axis Communications AB + * + * This file is under the GPL. + * + * First partition is always sector 0 regardless of if we find a partitiontable + * or not. In the start of the next sector, there can be a partitiontable that + * tells us what other partitions to define. If there isn't, we use a default + * partition split defined below. + * + */ + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/slab.h> + +#include <linux/mtd/concat.h> +#include <linux/mtd/map.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/mtdram.h> +#include <linux/mtd/partitions.h> + +#include <asm/axisflashmap.h> +#include <asm/mmu.h> + +#define MEM_CSE0_SIZE (0x04000000) +#define MEM_CSE1_SIZE (0x04000000) + +#define FLASH_UNCACHED_ADDR KSEG_E +#define FLASH_CACHED_ADDR KSEG_F + +#define PAGESIZE (512) + +#if CONFIG_ETRAX_FLASH_BUSWIDTH==1 +#define flash_data __u8 +#elif CONFIG_ETRAX_FLASH_BUSWIDTH==2 +#define flash_data __u16 +#elif CONFIG_ETRAX_FLASH_BUSWIDTH==4 +#define flash_data __u32 +#endif + +/* From head.S */ +extern unsigned long romfs_in_flash; /* 1 when romfs_start, _length in flash */ +extern unsigned long romfs_start, romfs_length; +extern unsigned long nand_boot; /* 1 when booted from nand flash */ + +struct partition_name { + char name[6]; +}; + +/* The master mtd for the entire flash. */ +struct mtd_info* axisflash_mtd = NULL; + +/* Map driver functions. */ + +static map_word flash_read(struct map_info *map, unsigned long ofs) +{ + map_word tmp; + tmp.x[0] = *(flash_data *)(map->map_priv_1 + ofs); + return tmp; +} + +static void flash_copy_from(struct map_info *map, void *to, + unsigned long from, ssize_t len) +{ + memcpy(to, (void *)(map->map_priv_1 + from), len); +} + +static void flash_write(struct map_info *map, map_word d, unsigned long adr) +{ + *(flash_data *)(map->map_priv_1 + adr) = (flash_data)d.x[0]; +} + +/* + * The map for chip select e0. + * + * We run into tricky coherence situations if we mix cached with uncached + * accesses to we only use the uncached version here. + * + * The size field is the total size where the flash chips may be mapped on the + * chip select. MTD probes should find all devices there and it does not matter + * if there are unmapped gaps or aliases (mirrors of flash devices). The MTD + * probes will ignore them. + * + * The start address in map_priv_1 is in virtual memory so we cannot use + * MEM_CSE0_START but must rely on that FLASH_UNCACHED_ADDR is the start + * address of cse0. + */ +static struct map_info map_cse0 = { + .name = "cse0", + .size = MEM_CSE0_SIZE, + .bankwidth = CONFIG_ETRAX_FLASH_BUSWIDTH, + .read = flash_read, + .copy_from = flash_copy_from, + .write = flash_write, + .map_priv_1 = FLASH_UNCACHED_ADDR +}; + +/* + * The map for chip select e1. + * + * If there was a gap between cse0 and cse1, map_priv_1 would get the wrong + * address, but there isn't. + */ +static struct map_info map_cse1 = { + .name = "cse1", + .size = MEM_CSE1_SIZE, + .bankwidth = CONFIG_ETRAX_FLASH_BUSWIDTH, + .read = flash_read, + .copy_from = flash_copy_from, + .write = flash_write, + .map_priv_1 = FLASH_UNCACHED_ADDR + MEM_CSE0_SIZE +}; + +#define MAX_PARTITIONS 7 +#ifdef CONFIG_ETRAX_NANDBOOT +#define NUM_DEFAULT_PARTITIONS 4 +#define DEFAULT_ROOTFS_PARTITION_NO 2 +#define DEFAULT_MEDIA_SIZE 0x2000000 /* 32 megs */ +#else +#define NUM_DEFAULT_PARTITIONS 3 +#define DEFAULT_ROOTFS_PARTITION_NO (-1) +#define DEFAULT_MEDIA_SIZE 0x800000 /* 8 megs */ +#endif + +#if (MAX_PARTITIONS < NUM_DEFAULT_PARTITIONS) +#error MAX_PARTITIONS must be >= than NUM_DEFAULT_PARTITIONS +#endif + +/* Initialize the ones normally used. */ +static struct mtd_partition axis_partitions[MAX_PARTITIONS] = { + { + .name = "part0", + .size = CONFIG_ETRAX_PTABLE_SECTOR, + .offset = 0 + }, + { + .name = "part1", + .size = 0, + .offset = 0 + }, + { + .name = "part2", + .size = 0, + .offset = 0 + }, + { + .name = "part3", + .size = 0, + .offset = 0 + }, + { + .name = "part4", + .size = 0, + .offset = 0 + }, + { + .name = "part5", + .size = 0, + .offset = 0 + }, + { + .name = "part6", + .size = 0, + .offset = 0 + }, +}; + + +/* If no partition-table was found, we use this default-set. + * Default flash size is 8MB (NOR). CONFIG_ETRAX_PTABLE_SECTOR is most + * likely the size of one flash block and "filesystem"-partition needs + * to be >=5 blocks to be able to use JFFS. + */ +static struct mtd_partition axis_default_partitions[NUM_DEFAULT_PARTITIONS] = { + { + .name = "boot firmware", + .size = CONFIG_ETRAX_PTABLE_SECTOR, + .offset = 0 + }, + { + .name = "kernel", + .size = 10 * CONFIG_ETRAX_PTABLE_SECTOR, + .offset = CONFIG_ETRAX_PTABLE_SECTOR + }, +#define FILESYSTEM_SECTOR (11 * CONFIG_ETRAX_PTABLE_SECTOR) +#ifdef CONFIG_ETRAX_NANDBOOT + { + .name = "rootfs", + .size = 10 * CONFIG_ETRAX_PTABLE_SECTOR, + .offset = FILESYSTEM_SECTOR + }, +#undef FILESYSTEM_SECTOR +#define FILESYSTEM_SECTOR (21 * CONFIG_ETRAX_PTABLE_SECTOR) +#endif + { + .name = "rwfs", + .size = DEFAULT_MEDIA_SIZE - FILESYSTEM_SECTOR, + .offset = FILESYSTEM_SECTOR + } +}; + +#ifdef CONFIG_ETRAX_AXISFLASHMAP_MTD0WHOLE +/* Main flash device */ +static struct mtd_partition main_partition = { + .name = "main", + .size = 0, + .offset = 0 +}; +#endif + +/* Auxiliary partition if we find another flash */ +static struct mtd_partition aux_partition = { + .name = "aux", + .size = 0, + .offset = 0 +}; + +/* + * Probe a chip select for AMD-compatible (JEDEC) or CFI-compatible flash + * chips in that order (because the amd_flash-driver is faster). + */ +static struct mtd_info *probe_cs(struct map_info *map_cs) +{ + struct mtd_info *mtd_cs = NULL; + + printk(KERN_INFO + "%s: Probing a 0x%08lx bytes large window at 0x%08lx.\n", + map_cs->name, map_cs->size, map_cs->map_priv_1); + +#ifdef CONFIG_MTD_CFI + mtd_cs = do_map_probe("cfi_probe", map_cs); +#endif +#ifdef CONFIG_MTD_JEDECPROBE + if (!mtd_cs) + mtd_cs = do_map_probe("jedec_probe", map_cs); +#endif + + return mtd_cs; +} + +/* + * Probe each chip select individually for flash chips. If there are chips on + * both cse0 and cse1, the mtd_info structs will be concatenated to one struct + * so that MTD partitions can cross chip boundries. + * + * The only known restriction to how you can mount your chips is that each + * chip select must hold similar flash chips. But you need external hardware + * to do that anyway and you can put totally different chips on cse0 and cse1 + * so it isn't really much of a restriction. + */ +extern struct mtd_info* __init crisv32_nand_flash_probe (void); +static struct mtd_info *flash_probe(void) +{ + struct mtd_info *mtd_cse0; + struct mtd_info *mtd_cse1; + struct mtd_info *mtd_total; + struct mtd_info *mtds[2]; + int count = 0; + + if ((mtd_cse0 = probe_cs(&map_cse0)) != NULL) + mtds[count++] = mtd_cse0; + if ((mtd_cse1 = probe_cs(&map_cse1)) != NULL) + mtds[count++] = mtd_cse1; + + if (!mtd_cse0 && !mtd_cse1) { + /* No chip found. */ + return NULL; + } + + if (count > 1) { + /* Since the concatenation layer adds a small overhead we + * could try to figure out if the chips in cse0 and cse1 are + * identical and reprobe the whole cse0+cse1 window. But since + * flash chips are slow, the overhead is relatively small. + * So we use the MTD concatenation layer instead of further + * complicating the probing procedure. + */ + mtd_total = mtd_concat_create(mtds, count, "cse0+cse1"); + if (!mtd_total) { + printk(KERN_ERR "%s and %s: Concatenation failed!\n", + map_cse0.name, map_cse1.name); + + /* The best we can do now is to only use what we found + * at cse0. */ + mtd_total = mtd_cse0; + map_destroy(mtd_cse1); + } + } else + mtd_total = mtd_cse0 ? mtd_cse0 : mtd_cse1; + + return mtd_total; +} + +/* + * Probe the flash chip(s) and, if it succeeds, read the partition-table + * and register the partitions with MTD. + */ +static int __init init_axis_flash(void) +{ + struct mtd_info *main_mtd; + struct mtd_info *aux_mtd = NULL; + int err = 0; + int pidx = 0; + struct partitiontable_head *ptable_head = NULL; + struct partitiontable_entry *ptable; + int ptable_ok = 0; + static char page[PAGESIZE]; + size_t len; + int ram_rootfs_partition = -1; /* -1 => no RAM rootfs partition */ + int part; + + /* We need a root fs. If it resides in RAM, we need to use an + * MTDRAM device, so it must be enabled in the kernel config, + * but its size must be configured as 0 so as not to conflict + * with our usage. + */ +#if !defined(CONFIG_MTD_MTDRAM) || (CONFIG_MTDRAM_TOTAL_SIZE != 0) || (CONFIG_MTDRAM_ABS_POS != 0) + if (!romfs_in_flash && !nand_boot) { + printk(KERN_EMERG "axisflashmap: Cannot create an MTD RAM " + "device; configure CONFIG_MTD_MTDRAM with size = 0!\n"); + panic("This kernel cannot boot from RAM!\n"); + } +#endif + + main_mtd = flash_probe(); + if (main_mtd) + printk(KERN_INFO "%s: 0x%08x bytes of NOR flash memory.\n", + main_mtd->name, main_mtd->size); + +#ifdef CONFIG_ETRAX_NANDFLASH + aux_mtd = crisv32_nand_flash_probe(); + if (aux_mtd) + printk(KERN_INFO "%s: 0x%08x bytes of NAND flash memory.\n", + aux_mtd->name, aux_mtd->size); + +#ifdef CONFIG_ETRAX_NANDBOOT + { + struct mtd_info *tmp_mtd; + + printk(KERN_INFO "axisflashmap: Set to boot from NAND flash, " + "making NAND flash primary device.\n"); + tmp_mtd = main_mtd; + main_mtd = aux_mtd; + aux_mtd = tmp_mtd; + } +#endif /* CONFIG_ETRAX_NANDBOOT */ +#endif /* CONFIG_ETRAX_NANDFLASH */ + + if (!main_mtd && !aux_mtd) { + /* There's no reason to use this module if no flash chip can + * be identified. Make sure that's understood. + */ + printk(KERN_INFO "axisflashmap: Found no flash chip.\n"); + } + +#if 0 /* Dump flash memory so we can see what is going on */ + if (main_mtd) { + int sectoraddr, i; + for (sectoraddr = 0; sectoraddr < 2*65536+4096; + sectoraddr += PAGESIZE) { + main_mtd->read(main_mtd, sectoraddr, PAGESIZE, &len, + page); + printk(KERN_INFO + "Sector at %d (length %d):\n", + sectoraddr, len); + for (i = 0; i < PAGESIZE; i += 16) { + printk(KERN_INFO + "%02x %02x %02x %02x " + "%02x %02x %02x %02x " + "%02x %02x %02x %02x " + "%02x %02x %02x %02x\n", + page[i] & 255, page[i+1] & 255, + page[i+2] & 255, page[i+3] & 255, + page[i+4] & 255, page[i+5] & 255, + page[i+6] & 255, page[i+7] & 255, + page[i+8] & 255, page[i+9] & 255, + page[i+10] & 255, page[i+11] & 255, + page[i+12] & 255, page[i+13] & 255, + page[i+14] & 255, page[i+15] & 255); + } + } + } +#endif + + if (main_mtd) { + main_mtd->owner = THIS_MODULE; + axisflash_mtd = main_mtd; + + loff_t ptable_sector = CONFIG_ETRAX_PTABLE_SECTOR; + + /* First partition (rescue) is always set to the default. */ + pidx++; +#ifdef CONFIG_ETRAX_NANDBOOT + /* We know where the partition table should be located, + * it will be in first good block after that. + */ + int blockstat; + do { + blockstat = mtd_block_isbad(main_mtd, ptable_sector); + if (blockstat < 0) + ptable_sector = 0; /* read error */ + else if (blockstat) + ptable_sector += main_mtd->erasesize; + } while (blockstat && ptable_sector); +#endif + if (ptable_sector) { + mtd_read(main_mtd, ptable_sector, PAGESIZE, &len, + page); + ptable_head = &((struct partitiontable *) page)->head; + } + +#if 0 /* Dump partition table so we can see what is going on */ + printk(KERN_INFO + "axisflashmap: flash read %d bytes at 0x%08x, data: " + "%02x %02x %02x %02x %02x %02x %02x %02x\n", + len, CONFIG_ETRAX_PTABLE_SECTOR, + page[0] & 255, page[1] & 255, + page[2] & 255, page[3] & 255, + page[4] & 255, page[5] & 255, + page[6] & 255, page[7] & 255); + printk(KERN_INFO + "axisflashmap: partition table offset %d, data: " + "%02x %02x %02x %02x %02x %02x %02x %02x\n", + PARTITION_TABLE_OFFSET, + page[PARTITION_TABLE_OFFSET+0] & 255, + page[PARTITION_TABLE_OFFSET+1] & 255, + page[PARTITION_TABLE_OFFSET+2] & 255, + page[PARTITION_TABLE_OFFSET+3] & 255, + page[PARTITION_TABLE_OFFSET+4] & 255, + page[PARTITION_TABLE_OFFSET+5] & 255, + page[PARTITION_TABLE_OFFSET+6] & 255, + page[PARTITION_TABLE_OFFSET+7] & 255); +#endif + } + + if (ptable_head && (ptable_head->magic == PARTITION_TABLE_MAGIC) + && (ptable_head->size < + (MAX_PARTITIONS * sizeof(struct partitiontable_entry) + + PARTITIONTABLE_END_MARKER_SIZE)) + && (*(unsigned long*)((void*)ptable_head + sizeof(*ptable_head) + + ptable_head->size - + PARTITIONTABLE_END_MARKER_SIZE) + == PARTITIONTABLE_END_MARKER)) { + /* Looks like a start, sane length and end of a + * partition table, lets check csum etc. + */ + struct partitiontable_entry *max_addr = + (struct partitiontable_entry *) + ((unsigned long)ptable_head + sizeof(*ptable_head) + + ptable_head->size); + unsigned long offset = CONFIG_ETRAX_PTABLE_SECTOR; + unsigned char *p; + unsigned long csum = 0; + + ptable = (struct partitiontable_entry *) + ((unsigned long)ptable_head + sizeof(*ptable_head)); + + /* Lets be PARANOID, and check the checksum. */ + p = (unsigned char*) ptable; + + while (p <= (unsigned char*)max_addr) { + csum += *p++; + csum += *p++; + csum += *p++; + csum += *p++; + } + ptable_ok = (csum == ptable_head->checksum); + + /* Read the entries and use/show the info. */ + printk(KERN_INFO "axisflashmap: " + "Found a%s partition table at 0x%p-0x%p.\n", + (ptable_ok ? " valid" : "n invalid"), ptable_head, + max_addr); + + /* We have found a working bootblock. Now read the + * partition table. Scan the table. It ends with 0xffffffff. + */ + while (ptable_ok + && ptable->offset != PARTITIONTABLE_END_MARKER + && ptable < max_addr + && pidx < MAX_PARTITIONS - 1) { + + axis_partitions[pidx].offset = offset + ptable->offset; +#ifdef CONFIG_ETRAX_NANDFLASH + if (main_mtd->type == MTD_NANDFLASH) { + axis_partitions[pidx].size = + (((ptable+1)->offset == + PARTITIONTABLE_END_MARKER) ? + main_mtd->size : + ((ptable+1)->offset + offset)) - + (ptable->offset + offset); + + } else +#endif /* CONFIG_ETRAX_NANDFLASH */ + axis_partitions[pidx].size = ptable->size; +#ifdef CONFIG_ETRAX_NANDBOOT + /* Save partition number of jffs2 ro partition. + * Needed if RAM booting or root file system in RAM. + */ + if (!nand_boot && + ram_rootfs_partition < 0 && /* not already set */ + ptable->type == PARTITION_TYPE_JFFS2 && + (ptable->flags & PARTITION_FLAGS_READONLY_MASK) == + PARTITION_FLAGS_READONLY) + ram_rootfs_partition = pidx; +#endif /* CONFIG_ETRAX_NANDBOOT */ + pidx++; + ptable++; + } + } + + /* Decide whether to use default partition table. */ + /* Only use default table if we actually have a device (main_mtd) */ + + struct mtd_partition *partition = &axis_partitions[0]; + if (main_mtd && !ptable_ok) { + memcpy(axis_partitions, axis_default_partitions, + sizeof(axis_default_partitions)); + pidx = NUM_DEFAULT_PARTITIONS; + ram_rootfs_partition = DEFAULT_ROOTFS_PARTITION_NO; + } + + /* Add artificial partitions for rootfs if necessary */ + if (romfs_in_flash) { + /* rootfs is in directly accessible flash memory = NOR flash. + Add an overlapping device for the rootfs partition. */ + printk(KERN_INFO "axisflashmap: Adding partition for " + "overlapping root file system image\n"); + axis_partitions[pidx].size = romfs_length; + axis_partitions[pidx].offset = romfs_start - FLASH_CACHED_ADDR; + axis_partitions[pidx].name = "romfs"; + axis_partitions[pidx].mask_flags |= MTD_WRITEABLE; + ram_rootfs_partition = -1; + pidx++; + } else if (romfs_length && !nand_boot) { + /* romfs exists in memory, but not in flash, so must be in RAM. + * Configure an MTDRAM partition. */ + if (ram_rootfs_partition < 0) { + /* None set yet, put it at the end */ + ram_rootfs_partition = pidx; + pidx++; + } + printk(KERN_INFO "axisflashmap: Adding partition for " + "root file system image in RAM\n"); + axis_partitions[ram_rootfs_partition].size = romfs_length; + axis_partitions[ram_rootfs_partition].offset = romfs_start; + axis_partitions[ram_rootfs_partition].name = "romfs"; + axis_partitions[ram_rootfs_partition].mask_flags |= + MTD_WRITEABLE; + } + +#ifdef CONFIG_ETRAX_AXISFLASHMAP_MTD0WHOLE + if (main_mtd) { + main_partition.size = main_mtd->size; + err = mtd_device_register(main_mtd, &main_partition, 1); + if (err) + panic("axisflashmap: Could not initialize " + "partition for whole main mtd device!\n"); + } +#endif + + /* Now, register all partitions with mtd. + * We do this one at a time so we can slip in an MTDRAM device + * in the proper place if required. */ + + for (part = 0; part < pidx; part++) { + if (part == ram_rootfs_partition) { + /* add MTDRAM partition here */ + struct mtd_info *mtd_ram; + + mtd_ram = kmalloc(sizeof(struct mtd_info), GFP_KERNEL); + if (!mtd_ram) + panic("axisflashmap: Couldn't allocate memory " + "for mtd_info!\n"); + printk(KERN_INFO "axisflashmap: Adding RAM partition " + "for rootfs image.\n"); + err = mtdram_init_device(mtd_ram, + (void *)partition[part].offset, + partition[part].size, + partition[part].name); + if (err) + panic("axisflashmap: Could not initialize " + "MTD RAM device!\n"); + /* JFFS2 likes to have an erasesize. Keep potential + * JFFS2 rootfs happy by providing one. Since image + * was most likely created for main mtd, use that + * erasesize, if available. Otherwise, make a guess. */ + mtd_ram->erasesize = (main_mtd ? main_mtd->erasesize : + CONFIG_ETRAX_PTABLE_SECTOR); + } else { + err = mtd_device_register(main_mtd, &partition[part], + 1); + if (err) + panic("axisflashmap: Could not add mtd " + "partition %d\n", part); + } + } + + if (aux_mtd) { + aux_partition.size = aux_mtd->size; + err = mtd_device_register(aux_mtd, &aux_partition, 1); + if (err) + panic("axisflashmap: Could not initialize " + "aux mtd device!\n"); + + } + + return err; +} + +/* This adds the above to the kernels init-call chain. */ +module_init(init_axis_flash); + +EXPORT_SYMBOL(axisflash_mtd); diff --git a/kernel/arch/cris/arch-v32/drivers/cryptocop.c b/kernel/arch/cris/arch-v32/drivers/cryptocop.c new file mode 100644 index 000000000..877da1908 --- /dev/null +++ b/kernel/arch/cris/arch-v32/drivers/cryptocop.c @@ -0,0 +1,3536 @@ +/* + * Stream co-processor driver for the ETRAX FS + * + * Copyright (C) 2003-2007 Axis Communications AB + */ + +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/spinlock.h> +#include <linux/stddef.h> + +#include <asm/uaccess.h> +#include <asm/io.h> +#include <linux/atomic.h> + +#include <linux/list.h> +#include <linux/interrupt.h> + +#include <asm/signal.h> +#include <asm/irq.h> + +#include <dma.h> +#include <hwregs/dma.h> +#include <hwregs/reg_map.h> +#include <hwregs/reg_rdwr.h> +#include <hwregs/intr_vect_defs.h> + +#include <hwregs/strcop.h> +#include <hwregs/strcop_defs.h> +#include <cryptocop.h> + +#ifdef CONFIG_ETRAXFS +#define IN_DMA 9 +#define OUT_DMA 8 +#define IN_DMA_INST regi_dma9 +#define OUT_DMA_INST regi_dma8 +#define DMA_IRQ DMA9_INTR_VECT +#else +#define IN_DMA 3 +#define OUT_DMA 2 +#define IN_DMA_INST regi_dma3 +#define OUT_DMA_INST regi_dma2 +#define DMA_IRQ DMA3_INTR_VECT +#endif + +#define DESCR_ALLOC_PAD (31) + +struct cryptocop_dma_desc { + char *free_buf; /* If non-null will be kfreed in free_cdesc() */ + dma_descr_data *dma_descr; + + unsigned char dma_descr_buf[sizeof(dma_descr_data) + DESCR_ALLOC_PAD]; + + unsigned int from_pool:1; /* If 1 'allocated' from the descriptor pool. */ + struct cryptocop_dma_desc *next; +}; + + +struct cryptocop_int_operation{ + void *alloc_ptr; + cryptocop_session_id sid; + + dma_descr_context ctx_out; + dma_descr_context ctx_in; + + /* DMA descriptors allocated by driver. */ + struct cryptocop_dma_desc *cdesc_out; + struct cryptocop_dma_desc *cdesc_in; + + /* Strcop config to use. */ + cryptocop_3des_mode tdes_mode; + cryptocop_csum_type csum_mode; + + /* DMA descrs provided by consumer. */ + dma_descr_data *ddesc_out; + dma_descr_data *ddesc_in; +}; + + +struct cryptocop_tfrm_ctx { + cryptocop_tfrm_id tid; + unsigned int blocklength; + + unsigned int start_ix; + + struct cryptocop_tfrm_cfg *tcfg; + struct cryptocop_transform_ctx *tctx; + + unsigned char previous_src; + unsigned char current_src; + + /* Values to use in metadata out. */ + unsigned char hash_conf; + unsigned char hash_mode; + unsigned char ciph_conf; + unsigned char cbcmode; + unsigned char decrypt; + + unsigned int requires_padding:1; + unsigned int strict_block_length:1; + unsigned int active:1; + unsigned int done:1; + size_t consumed; + size_t produced; + + /* Pad (input) descriptors to put in the DMA out list when the transform + * output is put on the DMA in list. */ + struct cryptocop_dma_desc *pad_descs; + + struct cryptocop_tfrm_ctx *prev_src; + struct cryptocop_tfrm_ctx *curr_src; + + /* Mapping to HW. */ + unsigned char unit_no; +}; + + +struct cryptocop_private{ + cryptocop_session_id sid; + struct cryptocop_private *next; +}; + +/* Session list. */ + +struct cryptocop_transform_ctx{ + struct cryptocop_transform_init init; + unsigned char dec_key[CRYPTOCOP_MAX_KEY_LENGTH]; + unsigned int dec_key_set:1; + + struct cryptocop_transform_ctx *next; +}; + + +struct cryptocop_session{ + cryptocop_session_id sid; + + struct cryptocop_transform_ctx *tfrm_ctx; + + struct cryptocop_session *next; +}; + +/* Priority levels for jobs sent to the cryptocop. Checksum operations from + kernel have highest priority since TCPIP stack processing must not + be a bottleneck. */ +typedef enum { + cryptocop_prio_kernel_csum = 0, + cryptocop_prio_kernel = 1, + cryptocop_prio_user = 2, + cryptocop_prio_no_prios = 3 +} cryptocop_queue_priority; + +struct cryptocop_prio_queue{ + struct list_head jobs; + cryptocop_queue_priority prio; +}; + +struct cryptocop_prio_job{ + struct list_head node; + cryptocop_queue_priority prio; + + struct cryptocop_operation *oper; + struct cryptocop_int_operation *iop; +}; + +struct ioctl_job_cb_ctx { + unsigned int processed:1; +}; + + +static struct cryptocop_session *cryptocop_sessions = NULL; +spinlock_t cryptocop_sessions_lock; + +/* Next Session ID to assign. */ +static cryptocop_session_id next_sid = 1; + +/* Pad for checksum. */ +static const char csum_zero_pad[1] = {0x00}; + +/* Trash buffer for mem2mem operations. */ +#define MEM2MEM_DISCARD_BUF_LENGTH (512) +static unsigned char mem2mem_discard_buf[MEM2MEM_DISCARD_BUF_LENGTH]; + +/* Descriptor pool. */ +/* FIXME Tweak this value. */ +#define CRYPTOCOP_DESCRIPTOR_POOL_SIZE (100) +static struct cryptocop_dma_desc descr_pool[CRYPTOCOP_DESCRIPTOR_POOL_SIZE]; +static struct cryptocop_dma_desc *descr_pool_free_list; +static int descr_pool_no_free; +static spinlock_t descr_pool_lock; + +/* Lock to stop cryptocop to start processing of a new operation. The holder + of this lock MUST call cryptocop_start_job() after it is unlocked. */ +spinlock_t cryptocop_process_lock; + +static struct cryptocop_prio_queue cryptocop_job_queues[cryptocop_prio_no_prios]; +static spinlock_t cryptocop_job_queue_lock; +static struct cryptocop_prio_job *cryptocop_running_job = NULL; +static spinlock_t running_job_lock; + +/* The interrupt handler appends completed jobs to this list. The scehduled + * tasklet removes them upon sending the response to the crypto consumer. */ +static struct list_head cryptocop_completed_jobs; +static spinlock_t cryptocop_completed_jobs_lock; + +DECLARE_WAIT_QUEUE_HEAD(cryptocop_ioc_process_wq); + + +/** Local functions. **/ + +static int cryptocop_open(struct inode *, struct file *); + +static int cryptocop_release(struct inode *, struct file *); + +static long cryptocop_ioctl(struct file *file, + unsigned int cmd, unsigned long arg); + +static void cryptocop_start_job(void); + +static int cryptocop_job_queue_insert(cryptocop_queue_priority prio, struct cryptocop_operation *operation); +static int cryptocop_job_setup(struct cryptocop_prio_job **pj, struct cryptocop_operation *operation); + +static int cryptocop_job_queue_init(void); +static void cryptocop_job_queue_close(void); + +static int create_md5_pad(int alloc_flag, unsigned long long hashed_length, char **pad, size_t *pad_length); + +static int create_sha1_pad(int alloc_flag, unsigned long long hashed_length, char **pad, size_t *pad_length); + +static int transform_ok(struct cryptocop_transform_init *tinit); + +static struct cryptocop_session *get_session(cryptocop_session_id sid); + +static struct cryptocop_transform_ctx *get_transform_ctx(struct cryptocop_session *sess, cryptocop_tfrm_id tid); + +static void delete_internal_operation(struct cryptocop_int_operation *iop); + +static void get_aes_decrypt_key(unsigned char *dec_key, const unsigned char *key, unsigned int keylength); + +static int init_stream_coprocessor(void); + +static void __exit exit_stream_coprocessor(void); + +/*#define LDEBUG*/ +#ifdef LDEBUG +#define DEBUG(s) s +#define DEBUG_API(s) s +static void print_cryptocop_operation(struct cryptocop_operation *cop); +static void print_dma_descriptors(struct cryptocop_int_operation *iop); +static void print_strcop_crypto_op(struct strcop_crypto_op *cop); +static void print_lock_status(void); +static void print_user_dma_lists(struct cryptocop_dma_list_operation *dma_op); +#define assert(s) do{if (!(s)) panic(#s);} while(0); +#else +#define DEBUG(s) +#define DEBUG_API(s) +#define assert(s) +#endif + + +/* Transform constants. */ +#define DES_BLOCK_LENGTH (8) +#define AES_BLOCK_LENGTH (16) +#define MD5_BLOCK_LENGTH (64) +#define SHA1_BLOCK_LENGTH (64) +#define CSUM_BLOCK_LENGTH (2) +#define MD5_STATE_LENGTH (16) +#define SHA1_STATE_LENGTH (20) + +/* The device number. */ +#define CRYPTOCOP_MAJOR (254) +#define CRYPTOCOP_MINOR (0) + + + +const struct file_operations cryptocop_fops = { + .owner = THIS_MODULE, + .open = cryptocop_open, + .release = cryptocop_release, + .unlocked_ioctl = cryptocop_ioctl, + .llseek = noop_llseek, +}; + + +static void free_cdesc(struct cryptocop_dma_desc *cdesc) +{ + DEBUG(printk("free_cdesc: cdesc 0x%p, from_pool=%d\n", cdesc, cdesc->from_pool)); + kfree(cdesc->free_buf); + + if (cdesc->from_pool) { + unsigned long int flags; + spin_lock_irqsave(&descr_pool_lock, flags); + cdesc->next = descr_pool_free_list; + descr_pool_free_list = cdesc; + ++descr_pool_no_free; + spin_unlock_irqrestore(&descr_pool_lock, flags); + } else { + kfree(cdesc); + } +} + + +static struct cryptocop_dma_desc *alloc_cdesc(int alloc_flag) +{ + int use_pool = (alloc_flag & GFP_ATOMIC) ? 1 : 0; + struct cryptocop_dma_desc *cdesc; + + if (use_pool) { + unsigned long int flags; + spin_lock_irqsave(&descr_pool_lock, flags); + if (!descr_pool_free_list) { + spin_unlock_irqrestore(&descr_pool_lock, flags); + DEBUG_API(printk("alloc_cdesc: pool is empty\n")); + return NULL; + } + cdesc = descr_pool_free_list; + descr_pool_free_list = descr_pool_free_list->next; + --descr_pool_no_free; + spin_unlock_irqrestore(&descr_pool_lock, flags); + cdesc->from_pool = 1; + } else { + cdesc = kmalloc(sizeof(struct cryptocop_dma_desc), alloc_flag); + if (!cdesc) { + DEBUG_API(printk("alloc_cdesc: kmalloc\n")); + return NULL; + } + cdesc->from_pool = 0; + } + cdesc->dma_descr = (dma_descr_data*)(((unsigned long int)cdesc + offsetof(struct cryptocop_dma_desc, dma_descr_buf) + DESCR_ALLOC_PAD) & ~0x0000001F); + + cdesc->next = NULL; + + cdesc->free_buf = NULL; + cdesc->dma_descr->out_eop = 0; + cdesc->dma_descr->in_eop = 0; + cdesc->dma_descr->intr = 0; + cdesc->dma_descr->eol = 0; + cdesc->dma_descr->wait = 0; + cdesc->dma_descr->buf = NULL; + cdesc->dma_descr->after = NULL; + + DEBUG_API(printk("alloc_cdesc: return 0x%p, cdesc->dma_descr=0x%p, from_pool=%d\n", cdesc, cdesc->dma_descr, cdesc->from_pool)); + return cdesc; +} + + +static void setup_descr_chain(struct cryptocop_dma_desc *cd) +{ + DEBUG(printk("setup_descr_chain: entering\n")); + while (cd) { + if (cd->next) { + cd->dma_descr->next = (dma_descr_data*)virt_to_phys(cd->next->dma_descr); + } else { + cd->dma_descr->next = NULL; + } + cd = cd->next; + } + DEBUG(printk("setup_descr_chain: exit\n")); +} + + +/* Create a pad descriptor for the transform. + * Return -1 for error, 0 if pad created. */ +static int create_pad_descriptor(struct cryptocop_tfrm_ctx *tc, struct cryptocop_dma_desc **pad_desc, int alloc_flag) +{ + struct cryptocop_dma_desc *cdesc = NULL; + int error = 0; + struct strcop_meta_out mo = { + .ciphsel = src_none, + .hashsel = src_none, + .csumsel = src_none + }; + char *pad; + size_t plen; + + DEBUG(printk("create_pad_descriptor: start.\n")); + /* Setup pad descriptor. */ + + DEBUG(printk("create_pad_descriptor: setting up padding.\n")); + cdesc = alloc_cdesc(alloc_flag); + if (!cdesc){ + DEBUG_API(printk("create_pad_descriptor: alloc pad desc\n")); + goto error_cleanup; + } + switch (tc->unit_no) { + case src_md5: + error = create_md5_pad(alloc_flag, tc->consumed, &pad, &plen); + if (error){ + DEBUG_API(printk("create_pad_descriptor: create_md5_pad_failed\n")); + goto error_cleanup; + } + cdesc->free_buf = pad; + mo.hashsel = src_dma; + mo.hashconf = tc->hash_conf; + mo.hashmode = tc->hash_mode; + break; + case src_sha1: + error = create_sha1_pad(alloc_flag, tc->consumed, &pad, &plen); + if (error){ + DEBUG_API(printk("create_pad_descriptor: create_sha1_pad_failed\n")); + goto error_cleanup; + } + cdesc->free_buf = pad; + mo.hashsel = src_dma; + mo.hashconf = tc->hash_conf; + mo.hashmode = tc->hash_mode; + break; + case src_csum: + if (tc->consumed % tc->blocklength){ + pad = (char*)csum_zero_pad; + plen = 1; + } else { + pad = (char*)cdesc; /* Use any pointer. */ + plen = 0; + } + mo.csumsel = src_dma; + break; + } + cdesc->dma_descr->wait = 1; + cdesc->dma_descr->out_eop = 1; /* Since this is a pad output is pushed. EOP is ok here since the padded unit is the only one active. */ + cdesc->dma_descr->buf = (char*)virt_to_phys((char*)pad); + cdesc->dma_descr->after = cdesc->dma_descr->buf + plen; + + cdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, mo); + *pad_desc = cdesc; + + return 0; + + error_cleanup: + if (cdesc) free_cdesc(cdesc); + return -1; +} + + +static int setup_key_dl_desc(struct cryptocop_tfrm_ctx *tc, struct cryptocop_dma_desc **kd, int alloc_flag) +{ + struct cryptocop_dma_desc *key_desc = alloc_cdesc(alloc_flag); + struct strcop_meta_out mo = {0}; + + DEBUG(printk("setup_key_dl_desc\n")); + + if (!key_desc) { + DEBUG_API(printk("setup_key_dl_desc: failed descriptor allocation.\n")); + return -ENOMEM; + } + + /* Download key. */ + if ((tc->tctx->init.alg == cryptocop_alg_aes) && (tc->tcfg->flags & CRYPTOCOP_DECRYPT)) { + /* Precook the AES decrypt key. */ + if (!tc->tctx->dec_key_set){ + get_aes_decrypt_key(tc->tctx->dec_key, tc->tctx->init.key, tc->tctx->init.keylen); + tc->tctx->dec_key_set = 1; + } + key_desc->dma_descr->buf = (char*)virt_to_phys(tc->tctx->dec_key); + key_desc->dma_descr->after = key_desc->dma_descr->buf + tc->tctx->init.keylen/8; + } else { + key_desc->dma_descr->buf = (char*)virt_to_phys(tc->tctx->init.key); + key_desc->dma_descr->after = key_desc->dma_descr->buf + tc->tctx->init.keylen/8; + } + /* Setup metadata. */ + mo.dlkey = 1; + switch (tc->tctx->init.keylen) { + case 64: + mo.decrypt = 0; + mo.hashmode = 0; + break; + case 128: + mo.decrypt = 0; + mo.hashmode = 1; + break; + case 192: + mo.decrypt = 1; + mo.hashmode = 0; + break; + case 256: + mo.decrypt = 1; + mo.hashmode = 1; + break; + default: + break; + } + mo.ciphsel = mo.hashsel = mo.csumsel = src_none; + key_desc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, mo); + + key_desc->dma_descr->out_eop = 1; + key_desc->dma_descr->wait = 1; + key_desc->dma_descr->intr = 0; + + *kd = key_desc; + return 0; +} + +static int setup_cipher_iv_desc(struct cryptocop_tfrm_ctx *tc, struct cryptocop_dma_desc **id, int alloc_flag) +{ + struct cryptocop_dma_desc *iv_desc = alloc_cdesc(alloc_flag); + struct strcop_meta_out mo = {0}; + + DEBUG(printk("setup_cipher_iv_desc\n")); + + if (!iv_desc) { + DEBUG_API(printk("setup_cipher_iv_desc: failed CBC IV descriptor allocation.\n")); + return -ENOMEM; + } + /* Download IV. */ + iv_desc->dma_descr->buf = (char*)virt_to_phys(tc->tcfg->iv); + iv_desc->dma_descr->after = iv_desc->dma_descr->buf + tc->blocklength; + + /* Setup metadata. */ + mo.hashsel = mo.csumsel = src_none; + mo.ciphsel = src_dma; + mo.ciphconf = tc->ciph_conf; + mo.cbcmode = tc->cbcmode; + + iv_desc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, mo); + + iv_desc->dma_descr->out_eop = 0; + iv_desc->dma_descr->wait = 1; + iv_desc->dma_descr->intr = 0; + + *id = iv_desc; + return 0; +} + +/* Map the ouput length of the transform to operation output starting on the inject index. */ +static int create_input_descriptors(struct cryptocop_operation *operation, struct cryptocop_tfrm_ctx *tc, struct cryptocop_dma_desc **id, int alloc_flag) +{ + int err = 0; + struct cryptocop_dma_desc head = {0}; + struct cryptocop_dma_desc *outdesc = &head; + size_t iov_offset = 0; + size_t out_ix = 0; + int outiov_ix = 0; + struct strcop_meta_in mi = {0}; + + size_t out_length = tc->produced; + int rem_length; + int dlength; + + assert(out_length != 0); + if (((tc->produced + tc->tcfg->inject_ix) > operation->tfrm_op.outlen) || (tc->produced && (operation->tfrm_op.outlen == 0))) { + DEBUG_API(printk("create_input_descriptors: operation outdata too small\n")); + return -EINVAL; + } + /* Traverse the out iovec until the result inject index is reached. */ + while ((outiov_ix < operation->tfrm_op.outcount) && ((out_ix + operation->tfrm_op.outdata[outiov_ix].iov_len) <= tc->tcfg->inject_ix)){ + out_ix += operation->tfrm_op.outdata[outiov_ix].iov_len; + outiov_ix++; + } + if (outiov_ix >= operation->tfrm_op.outcount){ + DEBUG_API(printk("create_input_descriptors: operation outdata too small\n")); + return -EINVAL; + } + iov_offset = tc->tcfg->inject_ix - out_ix; + mi.dmasel = tc->unit_no; + + /* Setup the output descriptors. */ + while ((out_length > 0) && (outiov_ix < operation->tfrm_op.outcount)) { + outdesc->next = alloc_cdesc(alloc_flag); + if (!outdesc->next) { + DEBUG_API(printk("create_input_descriptors: alloc_cdesc\n")); + err = -ENOMEM; + goto error_cleanup; + } + outdesc = outdesc->next; + rem_length = operation->tfrm_op.outdata[outiov_ix].iov_len - iov_offset; + dlength = (out_length < rem_length) ? out_length : rem_length; + + DEBUG(printk("create_input_descriptors:\n" + "outiov_ix=%d, rem_length=%d, dlength=%d\n" + "iov_offset=%d, outdata[outiov_ix].iov_len=%d\n" + "outcount=%d, outiov_ix=%d\n", + outiov_ix, rem_length, dlength, iov_offset, operation->tfrm_op.outdata[outiov_ix].iov_len, operation->tfrm_op.outcount, outiov_ix)); + + outdesc->dma_descr->buf = (char*)virt_to_phys(operation->tfrm_op.outdata[outiov_ix].iov_base + iov_offset); + outdesc->dma_descr->after = outdesc->dma_descr->buf + dlength; + outdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi); + + out_length -= dlength; + iov_offset += dlength; + if (iov_offset >= operation->tfrm_op.outdata[outiov_ix].iov_len) { + iov_offset = 0; + ++outiov_ix; + } + } + if (out_length > 0){ + DEBUG_API(printk("create_input_descriptors: not enough room for output, %d remained\n", out_length)); + err = -EINVAL; + goto error_cleanup; + } + /* Set sync in last descriptor. */ + mi.sync = 1; + outdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi); + + *id = head.next; + return 0; + + error_cleanup: + while (head.next) { + outdesc = head.next->next; + free_cdesc(head.next); + head.next = outdesc; + } + return err; +} + + +static int create_output_descriptors(struct cryptocop_operation *operation, int *iniov_ix, int *iniov_offset, size_t desc_len, struct cryptocop_dma_desc **current_out_cdesc, struct strcop_meta_out *meta_out, int alloc_flag) +{ + while (desc_len != 0) { + struct cryptocop_dma_desc *cdesc; + int rem_length = operation->tfrm_op.indata[*iniov_ix].iov_len - *iniov_offset; + int dlength = (desc_len < rem_length) ? desc_len : rem_length; + + cdesc = alloc_cdesc(alloc_flag); + if (!cdesc) { + DEBUG_API(printk("create_output_descriptors: alloc_cdesc\n")); + return -ENOMEM; + } + (*current_out_cdesc)->next = cdesc; + (*current_out_cdesc) = cdesc; + + cdesc->free_buf = NULL; + + cdesc->dma_descr->buf = (char*)virt_to_phys(operation->tfrm_op.indata[*iniov_ix].iov_base + *iniov_offset); + cdesc->dma_descr->after = cdesc->dma_descr->buf + dlength; + + assert(desc_len >= dlength); + desc_len -= dlength; + *iniov_offset += dlength; + if (*iniov_offset >= operation->tfrm_op.indata[*iniov_ix].iov_len) { + *iniov_offset = 0; + ++(*iniov_ix); + if (*iniov_ix > operation->tfrm_op.incount) { + DEBUG_API(printk("create_output_descriptors: not enough indata in operation.")); + return -EINVAL; + } + } + cdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, (*meta_out)); + } /* while (desc_len != 0) */ + /* Last DMA descriptor gets a 'wait' bit to signal expected change in metadata. */ + (*current_out_cdesc)->dma_descr->wait = 1; /* This will set extraneous WAIT in some situations, e.g. when padding hashes and checksums. */ + + return 0; +} + + +static int append_input_descriptors(struct cryptocop_operation *operation, struct cryptocop_dma_desc **current_in_cdesc, struct cryptocop_dma_desc **current_out_cdesc, struct cryptocop_tfrm_ctx *tc, int alloc_flag) +{ + DEBUG(printk("append_input_descriptors, tc=0x%p, unit_no=%d\n", tc, tc->unit_no)); + if (tc->tcfg) { + int failed = 0; + struct cryptocop_dma_desc *idescs = NULL; + DEBUG(printk("append_input_descriptors: pushing output, consumed %d produced %d bytes.\n", tc->consumed, tc->produced)); + if (tc->pad_descs) { + DEBUG(printk("append_input_descriptors: append pad descriptors to DMA out list.\n")); + while (tc->pad_descs) { + DEBUG(printk("append descriptor 0x%p\n", tc->pad_descs)); + (*current_out_cdesc)->next = tc->pad_descs; + tc->pad_descs = tc->pad_descs->next; + (*current_out_cdesc) = (*current_out_cdesc)->next; + } + } + + /* Setup and append output descriptors to DMA in list. */ + if (tc->unit_no == src_dma){ + /* mem2mem. Setup DMA in descriptors to discard all input prior to the requested mem2mem data. */ + struct strcop_meta_in mi = {.sync = 0, .dmasel = src_dma}; + unsigned int start_ix = tc->start_ix; + while (start_ix){ + unsigned int desclen = start_ix < MEM2MEM_DISCARD_BUF_LENGTH ? start_ix : MEM2MEM_DISCARD_BUF_LENGTH; + (*current_in_cdesc)->next = alloc_cdesc(alloc_flag); + if (!(*current_in_cdesc)->next){ + DEBUG_API(printk("append_input_descriptors: alloc_cdesc mem2mem discard failed\n")); + return -ENOMEM; + } + (*current_in_cdesc) = (*current_in_cdesc)->next; + (*current_in_cdesc)->dma_descr->buf = (char*)virt_to_phys(mem2mem_discard_buf); + (*current_in_cdesc)->dma_descr->after = (*current_in_cdesc)->dma_descr->buf + desclen; + (*current_in_cdesc)->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi); + start_ix -= desclen; + } + mi.sync = 1; + (*current_in_cdesc)->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi); + } + + failed = create_input_descriptors(operation, tc, &idescs, alloc_flag); + if (failed){ + DEBUG_API(printk("append_input_descriptors: output descriptor setup failed\n")); + return failed; + } + DEBUG(printk("append_input_descriptors: append output descriptors to DMA in list.\n")); + while (idescs) { + DEBUG(printk("append descriptor 0x%p\n", idescs)); + (*current_in_cdesc)->next = idescs; + idescs = idescs->next; + (*current_in_cdesc) = (*current_in_cdesc)->next; + } + } + return 0; +} + + + +static int cryptocop_setup_dma_list(struct cryptocop_operation *operation, struct cryptocop_int_operation **int_op, int alloc_flag) +{ + struct cryptocop_session *sess; + struct cryptocop_transform_ctx *tctx; + + struct cryptocop_tfrm_ctx digest_ctx = { + .previous_src = src_none, + .current_src = src_none, + .start_ix = 0, + .requires_padding = 1, + .strict_block_length = 0, + .hash_conf = 0, + .hash_mode = 0, + .ciph_conf = 0, + .cbcmode = 0, + .decrypt = 0, + .consumed = 0, + .produced = 0, + .pad_descs = NULL, + .active = 0, + .done = 0, + .prev_src = NULL, + .curr_src = NULL, + .tcfg = NULL}; + struct cryptocop_tfrm_ctx cipher_ctx = { + .previous_src = src_none, + .current_src = src_none, + .start_ix = 0, + .requires_padding = 0, + .strict_block_length = 1, + .hash_conf = 0, + .hash_mode = 0, + .ciph_conf = 0, + .cbcmode = 0, + .decrypt = 0, + .consumed = 0, + .produced = 0, + .pad_descs = NULL, + .active = 0, + .done = 0, + .prev_src = NULL, + .curr_src = NULL, + .tcfg = NULL}; + struct cryptocop_tfrm_ctx csum_ctx = { + .previous_src = src_none, + .current_src = src_none, + .start_ix = 0, + .blocklength = 2, + .requires_padding = 1, + .strict_block_length = 0, + .hash_conf = 0, + .hash_mode = 0, + .ciph_conf = 0, + .cbcmode = 0, + .decrypt = 0, + .consumed = 0, + .produced = 0, + .pad_descs = NULL, + .active = 0, + .done = 0, + .tcfg = NULL, + .prev_src = NULL, + .curr_src = NULL, + .unit_no = src_csum}; + struct cryptocop_tfrm_cfg *tcfg = operation->tfrm_op.tfrm_cfg; + + unsigned int indata_ix = 0; + + /* iovec accounting. */ + int iniov_ix = 0; + int iniov_offset = 0; + + /* Operation descriptor cfg traversal pointer. */ + struct cryptocop_desc *odsc; + + int failed = 0; + /* List heads for allocated descriptors. */ + struct cryptocop_dma_desc out_cdesc_head = {0}; + struct cryptocop_dma_desc in_cdesc_head = {0}; + + struct cryptocop_dma_desc *current_out_cdesc = &out_cdesc_head; + struct cryptocop_dma_desc *current_in_cdesc = &in_cdesc_head; + + struct cryptocop_tfrm_ctx *output_tc = NULL; + void *iop_alloc_ptr; + + assert(operation != NULL); + assert(int_op != NULL); + + DEBUG(printk("cryptocop_setup_dma_list: start\n")); + DEBUG(print_cryptocop_operation(operation)); + + sess = get_session(operation->sid); + if (!sess) { + DEBUG_API(printk("cryptocop_setup_dma_list: no session found for operation.\n")); + failed = -EINVAL; + goto error_cleanup; + } + iop_alloc_ptr = kmalloc(DESCR_ALLOC_PAD + sizeof(struct cryptocop_int_operation), alloc_flag); + if (!iop_alloc_ptr) { + DEBUG_API(printk("cryptocop_setup_dma_list: kmalloc cryptocop_int_operation\n")); + failed = -ENOMEM; + goto error_cleanup; + } + (*int_op) = (struct cryptocop_int_operation*)(((unsigned long int)(iop_alloc_ptr + DESCR_ALLOC_PAD + offsetof(struct cryptocop_int_operation, ctx_out)) & ~0x0000001F) - offsetof(struct cryptocop_int_operation, ctx_out)); + DEBUG(memset((*int_op), 0xff, sizeof(struct cryptocop_int_operation))); + (*int_op)->alloc_ptr = iop_alloc_ptr; + DEBUG(printk("cryptocop_setup_dma_list: *int_op=0x%p, alloc_ptr=0x%p\n", *int_op, (*int_op)->alloc_ptr)); + + (*int_op)->sid = operation->sid; + (*int_op)->cdesc_out = NULL; + (*int_op)->cdesc_in = NULL; + (*int_op)->tdes_mode = cryptocop_3des_ede; + (*int_op)->csum_mode = cryptocop_csum_le; + (*int_op)->ddesc_out = NULL; + (*int_op)->ddesc_in = NULL; + + /* Scan operation->tfrm_op.tfrm_cfg for bad configuration and set up the local contexts. */ + if (!tcfg) { + DEBUG_API(printk("cryptocop_setup_dma_list: no configured transforms in operation.\n")); + failed = -EINVAL; + goto error_cleanup; + } + while (tcfg) { + tctx = get_transform_ctx(sess, tcfg->tid); + if (!tctx) { + DEBUG_API(printk("cryptocop_setup_dma_list: no transform id %d in session.\n", tcfg->tid)); + failed = -EINVAL; + goto error_cleanup; + } + if (tcfg->inject_ix > operation->tfrm_op.outlen){ + DEBUG_API(printk("cryptocop_setup_dma_list: transform id %d inject_ix (%d) > operation->tfrm_op.outlen(%d)", tcfg->tid, tcfg->inject_ix, operation->tfrm_op.outlen)); + failed = -EINVAL; + goto error_cleanup; + } + switch (tctx->init.alg){ + case cryptocop_alg_mem2mem: + if (cipher_ctx.tcfg != NULL){ + DEBUG_API(printk("cryptocop_setup_dma_list: multiple ciphers in operation.\n")); + failed = -EINVAL; + goto error_cleanup; + } + /* mem2mem is handled as a NULL cipher. */ + cipher_ctx.cbcmode = 0; + cipher_ctx.decrypt = 0; + cipher_ctx.blocklength = 1; + cipher_ctx.ciph_conf = 0; + cipher_ctx.unit_no = src_dma; + cipher_ctx.tcfg = tcfg; + cipher_ctx.tctx = tctx; + break; + case cryptocop_alg_des: + case cryptocop_alg_3des: + case cryptocop_alg_aes: + /* cipher */ + if (cipher_ctx.tcfg != NULL){ + DEBUG_API(printk("cryptocop_setup_dma_list: multiple ciphers in operation.\n")); + failed = -EINVAL; + goto error_cleanup; + } + cipher_ctx.tcfg = tcfg; + cipher_ctx.tctx = tctx; + if (cipher_ctx.tcfg->flags & CRYPTOCOP_DECRYPT){ + cipher_ctx.decrypt = 1; + } + switch (tctx->init.cipher_mode) { + case cryptocop_cipher_mode_ecb: + cipher_ctx.cbcmode = 0; + break; + case cryptocop_cipher_mode_cbc: + cipher_ctx.cbcmode = 1; + break; + default: + DEBUG_API(printk("cryptocop_setup_dma_list: cipher_ctx, bad cipher mode==%d\n", tctx->init.cipher_mode)); + failed = -EINVAL; + goto error_cleanup; + } + DEBUG(printk("cryptocop_setup_dma_list: cipher_ctx, set CBC mode==%d\n", cipher_ctx.cbcmode)); + switch (tctx->init.alg){ + case cryptocop_alg_des: + cipher_ctx.ciph_conf = 0; + cipher_ctx.unit_no = src_des; + cipher_ctx.blocklength = DES_BLOCK_LENGTH; + break; + case cryptocop_alg_3des: + cipher_ctx.ciph_conf = 1; + cipher_ctx.unit_no = src_des; + cipher_ctx.blocklength = DES_BLOCK_LENGTH; + break; + case cryptocop_alg_aes: + cipher_ctx.ciph_conf = 2; + cipher_ctx.unit_no = src_aes; + cipher_ctx.blocklength = AES_BLOCK_LENGTH; + break; + default: + panic("cryptocop_setup_dma_list: impossible algorithm %d\n", tctx->init.alg); + } + (*int_op)->tdes_mode = tctx->init.tdes_mode; + break; + case cryptocop_alg_md5: + case cryptocop_alg_sha1: + /* digest */ + if (digest_ctx.tcfg != NULL){ + DEBUG_API(printk("cryptocop_setup_dma_list: multiple digests in operation.\n")); + failed = -EINVAL; + goto error_cleanup; + } + digest_ctx.tcfg = tcfg; + digest_ctx.tctx = tctx; + digest_ctx.hash_mode = 0; /* Don't use explicit IV in this API. */ + switch (tctx->init.alg){ + case cryptocop_alg_md5: + digest_ctx.blocklength = MD5_BLOCK_LENGTH; + digest_ctx.unit_no = src_md5; + digest_ctx.hash_conf = 1; /* 1 => MD-5 */ + break; + case cryptocop_alg_sha1: + digest_ctx.blocklength = SHA1_BLOCK_LENGTH; + digest_ctx.unit_no = src_sha1; + digest_ctx.hash_conf = 0; /* 0 => SHA-1 */ + break; + default: + panic("cryptocop_setup_dma_list: impossible digest algorithm\n"); + } + break; + case cryptocop_alg_csum: + /* digest */ + if (csum_ctx.tcfg != NULL){ + DEBUG_API(printk("cryptocop_setup_dma_list: multiple checksums in operation.\n")); + failed = -EINVAL; + goto error_cleanup; + } + (*int_op)->csum_mode = tctx->init.csum_mode; + csum_ctx.tcfg = tcfg; + csum_ctx.tctx = tctx; + break; + default: + /* no algorithm. */ + DEBUG_API(printk("cryptocop_setup_dma_list: invalid algorithm %d specified in tfrm %d.\n", tctx->init.alg, tcfg->tid)); + failed = -EINVAL; + goto error_cleanup; + } + tcfg = tcfg->next; + } + /* Download key if a cipher is used. */ + if (cipher_ctx.tcfg && (cipher_ctx.tctx->init.alg != cryptocop_alg_mem2mem)){ + struct cryptocop_dma_desc *key_desc = NULL; + + failed = setup_key_dl_desc(&cipher_ctx, &key_desc, alloc_flag); + if (failed) { + DEBUG_API(printk("cryptocop_setup_dma_list: setup key dl\n")); + goto error_cleanup; + } + current_out_cdesc->next = key_desc; + current_out_cdesc = key_desc; + indata_ix += (unsigned int)(key_desc->dma_descr->after - key_desc->dma_descr->buf); + + /* Download explicit IV if a cipher is used and CBC mode and explicit IV selected. */ + if ((cipher_ctx.tctx->init.cipher_mode == cryptocop_cipher_mode_cbc) && (cipher_ctx.tcfg->flags & CRYPTOCOP_EXPLICIT_IV)) { + struct cryptocop_dma_desc *iv_desc = NULL; + + DEBUG(printk("cryptocop_setup_dma_list: setup cipher CBC IV descriptor.\n")); + + failed = setup_cipher_iv_desc(&cipher_ctx, &iv_desc, alloc_flag); + if (failed) { + DEBUG_API(printk("cryptocop_setup_dma_list: CBC IV descriptor.\n")); + goto error_cleanup; + } + current_out_cdesc->next = iv_desc; + current_out_cdesc = iv_desc; + indata_ix += (unsigned int)(iv_desc->dma_descr->after - iv_desc->dma_descr->buf); + } + } + + /* Process descriptors. */ + odsc = operation->tfrm_op.desc; + while (odsc) { + struct cryptocop_desc_cfg *dcfg = odsc->cfg; + struct strcop_meta_out meta_out = {0}; + size_t desc_len = odsc->length; + int active_count, eop_needed_count; + + output_tc = NULL; + + DEBUG(printk("cryptocop_setup_dma_list: parsing an operation descriptor\n")); + + while (dcfg) { + struct cryptocop_tfrm_ctx *tc = NULL; + + DEBUG(printk("cryptocop_setup_dma_list: parsing an operation descriptor configuration.\n")); + /* Get the local context for the transform and mark it as the output unit if it produces output. */ + if (digest_ctx.tcfg && (digest_ctx.tcfg->tid == dcfg->tid)){ + tc = &digest_ctx; + } else if (cipher_ctx.tcfg && (cipher_ctx.tcfg->tid == dcfg->tid)){ + tc = &cipher_ctx; + } else if (csum_ctx.tcfg && (csum_ctx.tcfg->tid == dcfg->tid)){ + tc = &csum_ctx; + } + if (!tc) { + DEBUG_API(printk("cryptocop_setup_dma_list: invalid transform %d specified in descriptor.\n", dcfg->tid)); + failed = -EINVAL; + goto error_cleanup; + } + if (tc->done) { + DEBUG_API(printk("cryptocop_setup_dma_list: completed transform %d reused.\n", dcfg->tid)); + failed = -EINVAL; + goto error_cleanup; + } + if (!tc->active) { + tc->start_ix = indata_ix; + tc->active = 1; + } + + tc->previous_src = tc->current_src; + tc->prev_src = tc->curr_src; + /* Map source unit id to DMA source config. */ + switch (dcfg->src){ + case cryptocop_source_dma: + tc->current_src = src_dma; + break; + case cryptocop_source_des: + tc->current_src = src_des; + break; + case cryptocop_source_3des: + tc->current_src = src_des; + break; + case cryptocop_source_aes: + tc->current_src = src_aes; + break; + case cryptocop_source_md5: + case cryptocop_source_sha1: + case cryptocop_source_csum: + case cryptocop_source_none: + default: + /* We do not allow using accumulating style units (SHA-1, MD5, checksum) as sources to other units. + */ + DEBUG_API(printk("cryptocop_setup_dma_list: bad unit source configured %d.\n", dcfg->src)); + failed = -EINVAL; + goto error_cleanup; + } + if (tc->current_src != src_dma) { + /* Find the unit we are sourcing from. */ + if (digest_ctx.unit_no == tc->current_src){ + tc->curr_src = &digest_ctx; + } else if (cipher_ctx.unit_no == tc->current_src){ + tc->curr_src = &cipher_ctx; + } else if (csum_ctx.unit_no == tc->current_src){ + tc->curr_src = &csum_ctx; + } + if ((tc->curr_src == tc) && (tc->unit_no != src_dma)){ + DEBUG_API(printk("cryptocop_setup_dma_list: unit %d configured to source from itself.\n", tc->unit_no)); + failed = -EINVAL; + goto error_cleanup; + } + } else { + tc->curr_src = NULL; + } + + /* Detect source switch. */ + DEBUG(printk("cryptocop_setup_dma_list: tc->active=%d tc->unit_no=%d tc->current_src=%d tc->previous_src=%d, tc->curr_src=0x%p, tc->prev_srv=0x%p\n", tc->active, tc->unit_no, tc->current_src, tc->previous_src, tc->curr_src, tc->prev_src)); + if (tc->active && (tc->current_src != tc->previous_src)) { + /* Only allow source switch when both the old source unit and the new one have + * no pending data to process (i.e. the consumed length must be a multiple of the + * transform blocklength). */ + /* Note: if the src == NULL we are actually sourcing from DMA out. */ + if (((tc->prev_src != NULL) && (tc->prev_src->consumed % tc->prev_src->blocklength)) || + ((tc->curr_src != NULL) && (tc->curr_src->consumed % tc->curr_src->blocklength))) + { + DEBUG_API(printk("cryptocop_setup_dma_list: can only disconnect from or connect to a unit on a multiple of the blocklength, old: cons=%d, prod=%d, block=%d, new: cons=%d prod=%d, block=%d.\n", tc->prev_src ? tc->prev_src->consumed : INT_MIN, tc->prev_src ? tc->prev_src->produced : INT_MIN, tc->prev_src ? tc->prev_src->blocklength : INT_MIN, tc->curr_src ? tc->curr_src->consumed : INT_MIN, tc->curr_src ? tc->curr_src->produced : INT_MIN, tc->curr_src ? tc->curr_src->blocklength : INT_MIN)); + failed = -EINVAL; + goto error_cleanup; + } + } + /* Detect unit deactivation. */ + if (dcfg->last) { + /* Length check of this is handled below. */ + tc->done = 1; + } + dcfg = dcfg->next; + } /* while (dcfg) */ + DEBUG(printk("cryptocop_setup_dma_list: parsing operation descriptor configuration complete.\n")); + + if (cipher_ctx.active && (cipher_ctx.curr_src != NULL) && !cipher_ctx.curr_src->active){ + DEBUG_API(printk("cryptocop_setup_dma_list: cipher source from inactive unit %d\n", cipher_ctx.curr_src->unit_no)); + failed = -EINVAL; + goto error_cleanup; + } + if (digest_ctx.active && (digest_ctx.curr_src != NULL) && !digest_ctx.curr_src->active){ + DEBUG_API(printk("cryptocop_setup_dma_list: digest source from inactive unit %d\n", digest_ctx.curr_src->unit_no)); + failed = -EINVAL; + goto error_cleanup; + } + if (csum_ctx.active && (csum_ctx.curr_src != NULL) && !csum_ctx.curr_src->active){ + DEBUG_API(printk("cryptocop_setup_dma_list: cipher source from inactive unit %d\n", csum_ctx.curr_src->unit_no)); + failed = -EINVAL; + goto error_cleanup; + } + + /* Update consumed and produced lengths. + + The consumed length accounting here is actually cheating. If a unit source from DMA (or any + other unit that process data in blocks of one octet) it is correct, but if it source from a + block processing unit, i.e. a cipher, it will be temporarily incorrect at some times. However + since it is only allowed--by the HW--to change source to or from a block processing unit at times where that + unit has processed an exact multiple of its block length the end result will be correct. + Beware that if the source change restriction change this code will need to be (much) reworked. + */ + DEBUG(printk("cryptocop_setup_dma_list: desc->length=%d, desc_len=%d.\n", odsc->length, desc_len)); + + if (csum_ctx.active) { + csum_ctx.consumed += desc_len; + if (csum_ctx.done) { + csum_ctx.produced = 2; + } + DEBUG(printk("cryptocop_setup_dma_list: csum_ctx producing: consumed=%d, produced=%d, blocklength=%d.\n", csum_ctx.consumed, csum_ctx.produced, csum_ctx.blocklength)); + } + if (digest_ctx.active) { + digest_ctx.consumed += desc_len; + if (digest_ctx.done) { + if (digest_ctx.unit_no == src_md5) { + digest_ctx.produced = MD5_STATE_LENGTH; + } else { + digest_ctx.produced = SHA1_STATE_LENGTH; + } + } + DEBUG(printk("cryptocop_setup_dma_list: digest_ctx producing: consumed=%d, produced=%d, blocklength=%d.\n", digest_ctx.consumed, digest_ctx.produced, digest_ctx.blocklength)); + } + if (cipher_ctx.active) { + /* Ciphers are allowed only to source from DMA out. That is filtered above. */ + assert(cipher_ctx.current_src == src_dma); + cipher_ctx.consumed += desc_len; + cipher_ctx.produced = cipher_ctx.blocklength * (cipher_ctx.consumed / cipher_ctx.blocklength); + if (cipher_ctx.cbcmode && !(cipher_ctx.tcfg->flags & CRYPTOCOP_EXPLICIT_IV) && cipher_ctx.produced){ + cipher_ctx.produced -= cipher_ctx.blocklength; /* Compensate for CBC iv. */ + } + DEBUG(printk("cryptocop_setup_dma_list: cipher_ctx producing: consumed=%d, produced=%d, blocklength=%d.\n", cipher_ctx.consumed, cipher_ctx.produced, cipher_ctx.blocklength)); + } + + /* Setup the DMA out descriptors. */ + /* Configure the metadata. */ + active_count = 0; + eop_needed_count = 0; + if (cipher_ctx.active) { + ++active_count; + if (cipher_ctx.unit_no == src_dma){ + /* mem2mem */ + meta_out.ciphsel = src_none; + } else { + meta_out.ciphsel = cipher_ctx.current_src; + } + meta_out.ciphconf = cipher_ctx.ciph_conf; + meta_out.cbcmode = cipher_ctx.cbcmode; + meta_out.decrypt = cipher_ctx.decrypt; + DEBUG(printk("set ciphsel=%d ciphconf=%d cbcmode=%d decrypt=%d\n", meta_out.ciphsel, meta_out.ciphconf, meta_out.cbcmode, meta_out.decrypt)); + if (cipher_ctx.done) ++eop_needed_count; + } else { + meta_out.ciphsel = src_none; + } + + if (digest_ctx.active) { + ++active_count; + meta_out.hashsel = digest_ctx.current_src; + meta_out.hashconf = digest_ctx.hash_conf; + meta_out.hashmode = 0; /* Explicit mode is not used here. */ + DEBUG(printk("set hashsel=%d hashconf=%d hashmode=%d\n", meta_out.hashsel, meta_out.hashconf, meta_out.hashmode)); + if (digest_ctx.done) { + assert(digest_ctx.pad_descs == NULL); + failed = create_pad_descriptor(&digest_ctx, &digest_ctx.pad_descs, alloc_flag); + if (failed) { + DEBUG_API(printk("cryptocop_setup_dma_list: failed digest pad creation.\n")); + goto error_cleanup; + } + } + } else { + meta_out.hashsel = src_none; + } + + if (csum_ctx.active) { + ++active_count; + meta_out.csumsel = csum_ctx.current_src; + if (csum_ctx.done) { + assert(csum_ctx.pad_descs == NULL); + failed = create_pad_descriptor(&csum_ctx, &csum_ctx.pad_descs, alloc_flag); + if (failed) { + DEBUG_API(printk("cryptocop_setup_dma_list: failed csum pad creation.\n")); + goto error_cleanup; + } + } + } else { + meta_out.csumsel = src_none; + } + DEBUG(printk("cryptocop_setup_dma_list: %d eop needed, %d active units\n", eop_needed_count, active_count)); + /* Setup DMA out descriptors for the indata. */ + failed = create_output_descriptors(operation, &iniov_ix, &iniov_offset, desc_len, ¤t_out_cdesc, &meta_out, alloc_flag); + if (failed) { + DEBUG_API(printk("cryptocop_setup_dma_list: create_output_descriptors %d\n", failed)); + goto error_cleanup; + } + /* Setup out EOP. If there are active units that are not done here they cannot get an EOP + * so we ust setup a zero length descriptor to DMA to signal EOP only to done units. + * If there is a pad descriptor EOP for the padded unit will be EOPed by it. + */ + assert(active_count >= eop_needed_count); + assert((eop_needed_count == 0) || (eop_needed_count == 1)); + if (eop_needed_count) { + /* This means that the bulk operation (cipeher/m2m) is terminated. */ + if (active_count > 1) { + /* Use zero length EOP descriptor. */ + struct cryptocop_dma_desc *ed = alloc_cdesc(alloc_flag); + struct strcop_meta_out ed_mo = {0}; + if (!ed) { + DEBUG_API(printk("cryptocop_setup_dma_list: alloc EOP descriptor for cipher\n")); + failed = -ENOMEM; + goto error_cleanup; + } + + assert(cipher_ctx.active && cipher_ctx.done); + + if (cipher_ctx.unit_no == src_dma){ + /* mem2mem */ + ed_mo.ciphsel = src_none; + } else { + ed_mo.ciphsel = cipher_ctx.current_src; + } + ed_mo.ciphconf = cipher_ctx.ciph_conf; + ed_mo.cbcmode = cipher_ctx.cbcmode; + ed_mo.decrypt = cipher_ctx.decrypt; + + ed->free_buf = NULL; + ed->dma_descr->wait = 1; + ed->dma_descr->out_eop = 1; + + ed->dma_descr->buf = (char*)virt_to_phys(&ed); /* Use any valid physical address for zero length descriptor. */ + ed->dma_descr->after = ed->dma_descr->buf; + ed->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, ed_mo); + current_out_cdesc->next = ed; + current_out_cdesc = ed; + } else { + /* Set EOP in the current out descriptor since the only active module is + * the one needing the EOP. */ + + current_out_cdesc->dma_descr->out_eop = 1; + } + } + + if (cipher_ctx.done && cipher_ctx.active) cipher_ctx.active = 0; + if (digest_ctx.done && digest_ctx.active) digest_ctx.active = 0; + if (csum_ctx.done && csum_ctx.active) csum_ctx.active = 0; + indata_ix += odsc->length; + odsc = odsc->next; + } /* while (odsc) */ /* Process descriptors. */ + DEBUG(printk("cryptocop_setup_dma_list: done parsing operation descriptors\n")); + if (cipher_ctx.tcfg && (cipher_ctx.active || !cipher_ctx.done)){ + DEBUG_API(printk("cryptocop_setup_dma_list: cipher operation not terminated.\n")); + failed = -EINVAL; + goto error_cleanup; + } + if (digest_ctx.tcfg && (digest_ctx.active || !digest_ctx.done)){ + DEBUG_API(printk("cryptocop_setup_dma_list: digest operation not terminated.\n")); + failed = -EINVAL; + goto error_cleanup; + } + if (csum_ctx.tcfg && (csum_ctx.active || !csum_ctx.done)){ + DEBUG_API(printk("cryptocop_setup_dma_list: csum operation not terminated.\n")); + failed = -EINVAL; + goto error_cleanup; + } + + failed = append_input_descriptors(operation, ¤t_in_cdesc, ¤t_out_cdesc, &cipher_ctx, alloc_flag); + if (failed){ + DEBUG_API(printk("cryptocop_setup_dma_list: append_input_descriptors cipher_ctx %d\n", failed)); + goto error_cleanup; + } + failed = append_input_descriptors(operation, ¤t_in_cdesc, ¤t_out_cdesc, &digest_ctx, alloc_flag); + if (failed){ + DEBUG_API(printk("cryptocop_setup_dma_list: append_input_descriptors cipher_ctx %d\n", failed)); + goto error_cleanup; + } + failed = append_input_descriptors(operation, ¤t_in_cdesc, ¤t_out_cdesc, &csum_ctx, alloc_flag); + if (failed){ + DEBUG_API(printk("cryptocop_setup_dma_list: append_input_descriptors cipher_ctx %d\n", failed)); + goto error_cleanup; + } + + DEBUG(printk("cryptocop_setup_dma_list: int_op=0x%p, *int_op=0x%p\n", int_op, *int_op)); + (*int_op)->cdesc_out = out_cdesc_head.next; + (*int_op)->cdesc_in = in_cdesc_head.next; + DEBUG(printk("cryptocop_setup_dma_list: out_cdesc_head=0x%p in_cdesc_head=0x%p\n", (*int_op)->cdesc_out, (*int_op)->cdesc_in)); + + setup_descr_chain(out_cdesc_head.next); + setup_descr_chain(in_cdesc_head.next); + + /* Last but not least: mark the last DMA in descriptor for a INTR and EOL and the the + * last DMA out descriptor for EOL. + */ + current_in_cdesc->dma_descr->intr = 1; + current_in_cdesc->dma_descr->eol = 1; + current_out_cdesc->dma_descr->eol = 1; + + /* Setup DMA contexts. */ + (*int_op)->ctx_out.next = NULL; + (*int_op)->ctx_out.eol = 1; + (*int_op)->ctx_out.intr = 0; + (*int_op)->ctx_out.store_mode = 0; + (*int_op)->ctx_out.en = 0; + (*int_op)->ctx_out.dis = 0; + (*int_op)->ctx_out.md0 = 0; + (*int_op)->ctx_out.md1 = 0; + (*int_op)->ctx_out.md2 = 0; + (*int_op)->ctx_out.md3 = 0; + (*int_op)->ctx_out.md4 = 0; + (*int_op)->ctx_out.saved_data = (dma_descr_data*)virt_to_phys((*int_op)->cdesc_out->dma_descr); + (*int_op)->ctx_out.saved_data_buf = (*int_op)->cdesc_out->dma_descr->buf; /* Already physical address. */ + + (*int_op)->ctx_in.next = NULL; + (*int_op)->ctx_in.eol = 1; + (*int_op)->ctx_in.intr = 0; + (*int_op)->ctx_in.store_mode = 0; + (*int_op)->ctx_in.en = 0; + (*int_op)->ctx_in.dis = 0; + (*int_op)->ctx_in.md0 = 0; + (*int_op)->ctx_in.md1 = 0; + (*int_op)->ctx_in.md2 = 0; + (*int_op)->ctx_in.md3 = 0; + (*int_op)->ctx_in.md4 = 0; + + (*int_op)->ctx_in.saved_data = (dma_descr_data*)virt_to_phys((*int_op)->cdesc_in->dma_descr); + (*int_op)->ctx_in.saved_data_buf = (*int_op)->cdesc_in->dma_descr->buf; /* Already physical address. */ + + DEBUG(printk("cryptocop_setup_dma_list: done\n")); + return 0; + +error_cleanup: + { + /* Free all allocated resources. */ + struct cryptocop_dma_desc *tmp_cdesc; + while (digest_ctx.pad_descs){ + tmp_cdesc = digest_ctx.pad_descs->next; + free_cdesc(digest_ctx.pad_descs); + digest_ctx.pad_descs = tmp_cdesc; + } + while (csum_ctx.pad_descs){ + tmp_cdesc = csum_ctx.pad_descs->next; + free_cdesc(csum_ctx.pad_descs); + csum_ctx.pad_descs = tmp_cdesc; + } + assert(cipher_ctx.pad_descs == NULL); /* The ciphers are never padded. */ + + if (*int_op != NULL) delete_internal_operation(*int_op); + } + DEBUG_API(printk("cryptocop_setup_dma_list: done with error %d\n", failed)); + return failed; +} + + +static void delete_internal_operation(struct cryptocop_int_operation *iop) +{ + void *ptr = iop->alloc_ptr; + struct cryptocop_dma_desc *cd = iop->cdesc_out; + struct cryptocop_dma_desc *next; + + DEBUG(printk("delete_internal_operation: iop=0x%p, alloc_ptr=0x%p\n", iop, ptr)); + + while (cd) { + next = cd->next; + free_cdesc(cd); + cd = next; + } + cd = iop->cdesc_in; + while (cd) { + next = cd->next; + free_cdesc(cd); + cd = next; + } + kfree(ptr); +} + +#define MD5_MIN_PAD_LENGTH (9) +#define MD5_PAD_LENGTH_FIELD_LENGTH (8) + +static int create_md5_pad(int alloc_flag, unsigned long long hashed_length, char **pad, size_t *pad_length) +{ + size_t padlen = MD5_BLOCK_LENGTH - (hashed_length % MD5_BLOCK_LENGTH); + unsigned char *p; + int i; + unsigned long long int bit_length = hashed_length << 3; + + if (padlen < MD5_MIN_PAD_LENGTH) padlen += MD5_BLOCK_LENGTH; + + p = kzalloc(padlen, alloc_flag); + if (!p) return -ENOMEM; + + *p = 0x80; + + DEBUG(printk("create_md5_pad: hashed_length=%lld bits == %lld bytes\n", bit_length, hashed_length)); + + i = padlen - MD5_PAD_LENGTH_FIELD_LENGTH; + while (bit_length != 0){ + p[i++] = bit_length % 0x100; + bit_length >>= 8; + } + + *pad = (char*)p; + *pad_length = padlen; + + return 0; +} + +#define SHA1_MIN_PAD_LENGTH (9) +#define SHA1_PAD_LENGTH_FIELD_LENGTH (8) + +static int create_sha1_pad(int alloc_flag, unsigned long long hashed_length, char **pad, size_t *pad_length) +{ + size_t padlen = SHA1_BLOCK_LENGTH - (hashed_length % SHA1_BLOCK_LENGTH); + unsigned char *p; + int i; + unsigned long long int bit_length = hashed_length << 3; + + if (padlen < SHA1_MIN_PAD_LENGTH) padlen += SHA1_BLOCK_LENGTH; + + p = kzalloc(padlen, alloc_flag); + if (!p) return -ENOMEM; + + *p = 0x80; + + DEBUG(printk("create_sha1_pad: hashed_length=%lld bits == %lld bytes\n", bit_length, hashed_length)); + + i = padlen - 1; + while (bit_length != 0){ + p[i--] = bit_length % 0x100; + bit_length >>= 8; + } + + *pad = (char*)p; + *pad_length = padlen; + + return 0; +} + + +static int transform_ok(struct cryptocop_transform_init *tinit) +{ + switch (tinit->alg){ + case cryptocop_alg_csum: + switch (tinit->csum_mode){ + case cryptocop_csum_le: + case cryptocop_csum_be: + break; + default: + DEBUG_API(printk("transform_ok: Bad mode set for csum transform\n")); + return -EINVAL; + } + case cryptocop_alg_mem2mem: + case cryptocop_alg_md5: + case cryptocop_alg_sha1: + if (tinit->keylen != 0) { + DEBUG_API(printk("transform_ok: non-zero keylength, %d, for a digest/csum algorithm\n", tinit->keylen)); + return -EINVAL; /* This check is a bit strict. */ + } + break; + case cryptocop_alg_des: + if (tinit->keylen != 64) { + DEBUG_API(printk("transform_ok: keylen %d invalid for DES\n", tinit->keylen)); + return -EINVAL; + } + break; + case cryptocop_alg_3des: + if (tinit->keylen != 192) { + DEBUG_API(printk("transform_ok: keylen %d invalid for 3DES\n", tinit->keylen)); + return -EINVAL; + } + break; + case cryptocop_alg_aes: + if (tinit->keylen != 128 && tinit->keylen != 192 && tinit->keylen != 256) { + DEBUG_API(printk("transform_ok: keylen %d invalid for AES\n", tinit->keylen)); + return -EINVAL; + } + break; + case cryptocop_no_alg: + default: + DEBUG_API(printk("transform_ok: no such algorithm %d\n", tinit->alg)); + return -EINVAL; + } + + switch (tinit->alg){ + case cryptocop_alg_des: + case cryptocop_alg_3des: + case cryptocop_alg_aes: + if (tinit->cipher_mode != cryptocop_cipher_mode_ecb && tinit->cipher_mode != cryptocop_cipher_mode_cbc) return -EINVAL; + default: + break; + } + return 0; +} + + +int cryptocop_new_session(cryptocop_session_id *sid, struct cryptocop_transform_init *tinit, int alloc_flag) +{ + struct cryptocop_session *sess; + struct cryptocop_transform_init *tfrm_in = tinit; + struct cryptocop_transform_init *tmp_in; + int no_tfrms = 0; + int i; + unsigned long int flags; + + init_stream_coprocessor(); /* For safety if we are called early */ + + while (tfrm_in){ + int err; + ++no_tfrms; + if ((err = transform_ok(tfrm_in))) { + DEBUG_API(printk("cryptocop_new_session, bad transform\n")); + return err; + } + tfrm_in = tfrm_in->next; + } + if (0 == no_tfrms) { + DEBUG_API(printk("cryptocop_new_session, no transforms specified\n")); + return -EINVAL; + } + + sess = kmalloc(sizeof(struct cryptocop_session), alloc_flag); + if (!sess){ + DEBUG_API(printk("cryptocop_new_session, kmalloc cryptocop_session\n")); + return -ENOMEM; + } + + sess->tfrm_ctx = kmalloc(no_tfrms * sizeof(struct cryptocop_transform_ctx), alloc_flag); + if (!sess->tfrm_ctx) { + DEBUG_API(printk("cryptocop_new_session, kmalloc cryptocop_transform_ctx\n")); + kfree(sess); + return -ENOMEM; + } + + tfrm_in = tinit; + for (i = 0; i < no_tfrms; i++){ + tmp_in = tfrm_in->next; + while (tmp_in){ + if (tmp_in->tid == tfrm_in->tid) { + DEBUG_API(printk("cryptocop_new_session, duplicate transform ids\n")); + kfree(sess->tfrm_ctx); + kfree(sess); + return -EINVAL; + } + tmp_in = tmp_in->next; + } + memcpy(&sess->tfrm_ctx[i].init, tfrm_in, sizeof(struct cryptocop_transform_init)); + sess->tfrm_ctx[i].dec_key_set = 0; + sess->tfrm_ctx[i].next = &sess->tfrm_ctx[i] + 1; + + tfrm_in = tfrm_in->next; + } + sess->tfrm_ctx[i-1].next = NULL; + + spin_lock_irqsave(&cryptocop_sessions_lock, flags); + sess->sid = next_sid; + next_sid++; + /* TODO If we are really paranoid we should do duplicate check to handle sid wraparound. + * OTOH 2^64 is a really large number of session. */ + if (next_sid == 0) next_sid = 1; + + /* Prepend to session list. */ + sess->next = cryptocop_sessions; + cryptocop_sessions = sess; + spin_unlock_irqrestore(&cryptocop_sessions_lock, flags); + *sid = sess->sid; + return 0; +} + + +int cryptocop_free_session(cryptocop_session_id sid) +{ + struct cryptocop_transform_ctx *tc; + struct cryptocop_session *sess = NULL; + struct cryptocop_session *psess = NULL; + unsigned long int flags; + int i; + LIST_HEAD(remove_list); + struct list_head *node, *tmp; + struct cryptocop_prio_job *pj; + + DEBUG(printk("cryptocop_free_session: sid=%lld\n", sid)); + + spin_lock_irqsave(&cryptocop_sessions_lock, flags); + sess = cryptocop_sessions; + while (sess && sess->sid != sid){ + psess = sess; + sess = sess->next; + } + if (sess){ + if (psess){ + psess->next = sess->next; + } else { + cryptocop_sessions = sess->next; + } + } + spin_unlock_irqrestore(&cryptocop_sessions_lock, flags); + + if (!sess) return -EINVAL; + + /* Remove queued jobs. */ + spin_lock_irqsave(&cryptocop_job_queue_lock, flags); + + for (i = 0; i < cryptocop_prio_no_prios; i++){ + if (!list_empty(&(cryptocop_job_queues[i].jobs))){ + list_for_each_safe(node, tmp, &(cryptocop_job_queues[i].jobs)) { + pj = list_entry(node, struct cryptocop_prio_job, node); + if (pj->oper->sid == sid) { + list_move_tail(node, &remove_list); + } + } + } + } + spin_unlock_irqrestore(&cryptocop_job_queue_lock, flags); + + list_for_each_safe(node, tmp, &remove_list) { + list_del(node); + pj = list_entry(node, struct cryptocop_prio_job, node); + pj->oper->operation_status = -EAGAIN; /* EAGAIN is not ideal for job/session terminated but it's the best choice I know of. */ + DEBUG(printk("cryptocop_free_session: pj=0x%p, pj->oper=0x%p, pj->iop=0x%p\n", pj, pj->oper, pj->iop)); + pj->oper->cb(pj->oper, pj->oper->cb_data); + delete_internal_operation(pj->iop); + kfree(pj); + } + + tc = sess->tfrm_ctx; + /* Erase keying data. */ + while (tc){ + DEBUG(printk("cryptocop_free_session: memset keys, tfrm id=%d\n", tc->init.tid)); + memset(tc->init.key, 0xff, CRYPTOCOP_MAX_KEY_LENGTH); + memset(tc->dec_key, 0xff, CRYPTOCOP_MAX_KEY_LENGTH); + tc = tc->next; + } + kfree(sess->tfrm_ctx); + kfree(sess); + + return 0; +} + +static struct cryptocop_session *get_session(cryptocop_session_id sid) +{ + struct cryptocop_session *sess; + unsigned long int flags; + + spin_lock_irqsave(&cryptocop_sessions_lock, flags); + sess = cryptocop_sessions; + while (sess && (sess->sid != sid)){ + sess = sess->next; + } + spin_unlock_irqrestore(&cryptocop_sessions_lock, flags); + + return sess; +} + +static struct cryptocop_transform_ctx *get_transform_ctx(struct cryptocop_session *sess, cryptocop_tfrm_id tid) +{ + struct cryptocop_transform_ctx *tc = sess->tfrm_ctx; + + DEBUG(printk("get_transform_ctx, sess=0x%p, tid=%d\n", sess, tid)); + assert(sess != NULL); + while (tc && tc->init.tid != tid){ + DEBUG(printk("tc=0x%p, tc->next=0x%p\n", tc, tc->next)); + tc = tc->next; + } + DEBUG(printk("get_transform_ctx, returning tc=0x%p\n", tc)); + return tc; +} + + + +/* The AES s-transform matrix (s-box). */ +static const u8 aes_sbox[256] = { + 99, 124, 119, 123, 242, 107, 111, 197, 48, 1, 103, 43, 254, 215, 171, 118, + 202, 130, 201, 125, 250, 89, 71, 240, 173, 212, 162, 175, 156, 164, 114, 192, + 183, 253, 147, 38, 54, 63, 247, 204, 52, 165, 229, 241, 113, 216, 49, 21, + 4, 199, 35, 195, 24, 150, 5, 154, 7, 18, 128, 226, 235, 39, 178, 117, + 9, 131, 44, 26, 27, 110, 90, 160, 82, 59, 214, 179, 41, 227, 47, 132, + 83, 209, 0, 237, 32, 252, 177, 91, 106, 203, 190, 57, 74, 76, 88, 207, + 208, 239, 170, 251, 67, 77, 51, 133, 69, 249, 2, 127, 80, 60, 159, 168, + 81, 163, 64, 143, 146, 157, 56, 245, 188, 182, 218, 33, 16, 255, 243, 210, + 205, 12, 19, 236, 95, 151, 68, 23, 196, 167, 126, 61, 100, 93, 25, 115, + 96, 129, 79, 220, 34, 42, 144, 136, 70, 238, 184, 20, 222, 94, 11, 219, + 224, 50, 58, 10, 73, 6, 36, 92, 194, 211, 172, 98, 145, 149, 228, 121, + 231, 200, 55, 109, 141, 213, 78, 169, 108, 86, 244, 234, 101, 122, 174, 8, + 186, 120, 37, 46, 28, 166, 180, 198, 232, 221, 116, 31, 75, 189, 139, 138, + 112, 62, 181, 102, 72, 3, 246, 14, 97, 53, 87, 185, 134, 193, 29, 158, + 225, 248, 152, 17, 105, 217, 142, 148, 155, 30, 135, 233, 206, 85, 40, 223, + 140, 161, 137, 13, 191, 230, 66, 104, 65, 153, 45, 15, 176, 84, 187, 22 +}; + +/* AES has a 32 bit word round constants for each round in the + * key schedule. round_constant[i] is really Rcon[i+1] in FIPS187. + */ +static u32 round_constant[11] = { + 0x01000000, 0x02000000, 0x04000000, 0x08000000, + 0x10000000, 0x20000000, 0x40000000, 0x80000000, + 0x1B000000, 0x36000000, 0x6C000000 +}; + +/* Apply the s-box to each of the four occtets in w. */ +static u32 aes_ks_subword(const u32 w) +{ + u8 bytes[4]; + + *(u32*)(&bytes[0]) = w; + bytes[0] = aes_sbox[bytes[0]]; + bytes[1] = aes_sbox[bytes[1]]; + bytes[2] = aes_sbox[bytes[2]]; + bytes[3] = aes_sbox[bytes[3]]; + return *(u32*)(&bytes[0]); +} + +/* The encrypt (forward) Rijndael key schedule algorithm pseudo code: + * (Note that AES words are 32 bit long) + * + * KeyExpansion(byte key[4*Nk], word w[Nb*(Nr+1)], Nk){ + * word temp + * i = 0 + * while (i < Nk) { + * w[i] = word(key[4*i, 4*i + 1, 4*i + 2, 4*i + 3]) + * i = i + 1 + * } + * i = Nk + * + * while (i < (Nb * (Nr + 1))) { + * temp = w[i - 1] + * if ((i mod Nk) == 0) { + * temp = SubWord(RotWord(temp)) xor Rcon[i/Nk] + * } + * else if ((Nk > 6) && ((i mod Nk) == 4)) { + * temp = SubWord(temp) + * } + * w[i] = w[i - Nk] xor temp + * } + * RotWord(t) does a 8 bit cyclic shift left on a 32 bit word. + * SubWord(t) applies the AES s-box individually to each octet + * in a 32 bit word. + * + * For AES Nk can have the values 4, 6, and 8 (corresponding to + * values for Nr of 10, 12, and 14). Nb is always 4. + * + * To construct w[i], w[i - 1] and w[i - Nk] must be + * available. Consequently we must keep a state of the last Nk words + * to be able to create the last round keys. + */ +static void get_aes_decrypt_key(unsigned char *dec_key, const unsigned char *key, unsigned int keylength) +{ + u32 temp; + u32 w_ring[8]; /* nk is max 8, use elements 0..(nk - 1) as a ringbuffer */ + u8 w_last_ix; + int i; + u8 nr, nk; + + switch (keylength){ + case 128: + nk = 4; + nr = 10; + break; + case 192: + nk = 6; + nr = 12; + break; + case 256: + nk = 8; + nr = 14; + break; + default: + panic("stream co-processor: bad aes key length in get_aes_decrypt_key\n"); + }; + + /* Need to do host byte order correction here since key is byte oriented and the + * kx algorithm is word (u32) oriented. */ + for (i = 0; i < nk; i+=1) { + w_ring[i] = be32_to_cpu(*(u32*)&key[4*i]); + } + + i = (int)nk; + w_last_ix = i - 1; + while (i < (4 * (nr + 2))) { + temp = w_ring[w_last_ix]; + if (!(i % nk)) { + /* RotWord(temp) */ + temp = (temp << 8) | (temp >> 24); + temp = aes_ks_subword(temp); + temp ^= round_constant[i/nk - 1]; + } else if ((nk > 6) && ((i % nk) == 4)) { + temp = aes_ks_subword(temp); + } + w_last_ix = (w_last_ix + 1) % nk; /* This is the same as (i-Nk) mod Nk */ + temp ^= w_ring[w_last_ix]; + w_ring[w_last_ix] = temp; + + /* We need the round keys for round Nr+1 and Nr+2 (round key + * Nr+2 is the round key beyond the last one used when + * encrypting). Rounds are numbered starting from 0, Nr=10 + * implies 11 rounds are used in encryption/decryption. + */ + if (i >= (4 * nr)) { + /* Need to do host byte order correction here, the key + * is byte oriented. */ + *(u32*)dec_key = cpu_to_be32(temp); + dec_key += 4; + } + ++i; + } +} + + +/**** Job/operation management. ****/ + +int cryptocop_job_queue_insert_csum(struct cryptocop_operation *operation) +{ + return cryptocop_job_queue_insert(cryptocop_prio_kernel_csum, operation); +} + +int cryptocop_job_queue_insert_crypto(struct cryptocop_operation *operation) +{ + return cryptocop_job_queue_insert(cryptocop_prio_kernel, operation); +} + +int cryptocop_job_queue_insert_user_job(struct cryptocop_operation *operation) +{ + return cryptocop_job_queue_insert(cryptocop_prio_user, operation); +} + +static int cryptocop_job_queue_insert(cryptocop_queue_priority prio, struct cryptocop_operation *operation) +{ + int ret; + struct cryptocop_prio_job *pj = NULL; + unsigned long int flags; + + DEBUG(printk("cryptocop_job_queue_insert(%d, 0x%p)\n", prio, operation)); + + if (!operation || !operation->cb){ + DEBUG_API(printk("cryptocop_job_queue_insert oper=0x%p, NULL operation or callback\n", operation)); + return -EINVAL; + } + + if ((ret = cryptocop_job_setup(&pj, operation)) != 0){ + DEBUG_API(printk("cryptocop_job_queue_insert: job setup failed\n")); + return ret; + } + assert(pj != NULL); + + spin_lock_irqsave(&cryptocop_job_queue_lock, flags); + list_add_tail(&pj->node, &cryptocop_job_queues[prio].jobs); + spin_unlock_irqrestore(&cryptocop_job_queue_lock, flags); + + /* Make sure a job is running */ + cryptocop_start_job(); + return 0; +} + +static void cryptocop_do_tasklet(unsigned long unused); +DECLARE_TASKLET (cryptocop_tasklet, cryptocop_do_tasklet, 0); + +static void cryptocop_do_tasklet(unsigned long unused) +{ + struct list_head *node; + struct cryptocop_prio_job *pj = NULL; + unsigned long flags; + + DEBUG(printk("cryptocop_do_tasklet: entering\n")); + + do { + spin_lock_irqsave(&cryptocop_completed_jobs_lock, flags); + if (!list_empty(&cryptocop_completed_jobs)){ + node = cryptocop_completed_jobs.next; + list_del(node); + pj = list_entry(node, struct cryptocop_prio_job, node); + } else { + pj = NULL; + } + spin_unlock_irqrestore(&cryptocop_completed_jobs_lock, flags); + if (pj) { + assert(pj->oper != NULL); + + /* Notify consumer of operation completeness. */ + DEBUG(printk("cryptocop_do_tasklet: callback 0x%p, data 0x%p\n", pj->oper->cb, pj->oper->cb_data)); + + pj->oper->operation_status = 0; /* Job is completed. */ + pj->oper->cb(pj->oper, pj->oper->cb_data); + delete_internal_operation(pj->iop); + kfree(pj); + } + } while (pj != NULL); + + DEBUG(printk("cryptocop_do_tasklet: exiting\n")); +} + +static irqreturn_t +dma_done_interrupt(int irq, void *dev_id) +{ + struct cryptocop_prio_job *done_job; + reg_dma_rw_ack_intr ack_intr = { + .data = 1, + }; + + REG_WR(dma, IN_DMA_INST, rw_ack_intr, ack_intr); + + DEBUG(printk("cryptocop DMA done\n")); + + spin_lock(&running_job_lock); + if (cryptocop_running_job == NULL){ + printk("stream co-processor got interrupt when not busy\n"); + spin_unlock(&running_job_lock); + return IRQ_HANDLED; + } + done_job = cryptocop_running_job; + cryptocop_running_job = NULL; + spin_unlock(&running_job_lock); + + /* Start processing a job. */ + if (!spin_trylock(&cryptocop_process_lock)){ + DEBUG(printk("cryptocop irq handler, not starting a job\n")); + } else { + cryptocop_start_job(); + spin_unlock(&cryptocop_process_lock); + } + + done_job->oper->operation_status = 0; /* Job is completed. */ + if (done_job->oper->fast_callback){ + /* This operation wants callback from interrupt. */ + done_job->oper->cb(done_job->oper, done_job->oper->cb_data); + delete_internal_operation(done_job->iop); + kfree(done_job); + } else { + spin_lock(&cryptocop_completed_jobs_lock); + list_add_tail(&(done_job->node), &cryptocop_completed_jobs); + spin_unlock(&cryptocop_completed_jobs_lock); + tasklet_schedule(&cryptocop_tasklet); + } + + DEBUG(printk("cryptocop leave irq handler\n")); + return IRQ_HANDLED; +} + + +/* Setup interrupts and DMA channels. */ +static int init_cryptocop(void) +{ + unsigned long flags; + reg_dma_rw_cfg dma_cfg = {.en = 1}; + reg_dma_rw_intr_mask intr_mask_in = {.data = regk_dma_yes}; /* Only want descriptor interrupts from the DMA in channel. */ + reg_dma_rw_ack_intr ack_intr = {.data = 1,.in_eop = 1 }; + reg_strcop_rw_cfg strcop_cfg = { + .ipend = regk_strcop_little, + .td1 = regk_strcop_e, + .td2 = regk_strcop_d, + .td3 = regk_strcop_e, + .ignore_sync = 0, + .en = 1 + }; + + if (request_irq(DMA_IRQ, dma_done_interrupt, 0, + "stream co-processor DMA", NULL)) + panic("request_irq stream co-processor irq dma9"); + + (void)crisv32_request_dma(OUT_DMA, "strcop", DMA_PANIC_ON_ERROR, + 0, dma_strp); + (void)crisv32_request_dma(IN_DMA, "strcop", DMA_PANIC_ON_ERROR, + 0, dma_strp); + + local_irq_save(flags); + + /* Reset and enable the cryptocop. */ + strcop_cfg.en = 0; + REG_WR(strcop, regi_strcop, rw_cfg, strcop_cfg); + strcop_cfg.en = 1; + REG_WR(strcop, regi_strcop, rw_cfg, strcop_cfg); + + /* Enable DMAs. */ + REG_WR(dma, IN_DMA_INST, rw_cfg, dma_cfg); /* input DMA */ + REG_WR(dma, OUT_DMA_INST, rw_cfg, dma_cfg); /* output DMA */ + + /* Set up wordsize = 4 for DMAs. */ + DMA_WR_CMD(OUT_DMA_INST, regk_dma_set_w_size4); + DMA_WR_CMD(IN_DMA_INST, regk_dma_set_w_size4); + + /* Enable interrupts. */ + REG_WR(dma, IN_DMA_INST, rw_intr_mask, intr_mask_in); + + /* Clear intr ack. */ + REG_WR(dma, IN_DMA_INST, rw_ack_intr, ack_intr); + + local_irq_restore(flags); + + return 0; +} + +/* Free used cryptocop hw resources (interrupt and DMA channels). */ +static void release_cryptocop(void) +{ + unsigned long flags; + reg_dma_rw_cfg dma_cfg = {.en = 0}; + reg_dma_rw_intr_mask intr_mask_in = {0}; + reg_dma_rw_ack_intr ack_intr = {.data = 1,.in_eop = 1 }; + + local_irq_save(flags); + + /* Clear intr ack. */ + REG_WR(dma, IN_DMA_INST, rw_ack_intr, ack_intr); + + /* Disable DMAs. */ + REG_WR(dma, IN_DMA_INST, rw_cfg, dma_cfg); /* input DMA */ + REG_WR(dma, OUT_DMA_INST, rw_cfg, dma_cfg); /* output DMA */ + + /* Disable interrupts. */ + REG_WR(dma, IN_DMA_INST, rw_intr_mask, intr_mask_in); + + local_irq_restore(flags); + + free_irq(DMA_IRQ, NULL); + + (void)crisv32_free_dma(OUT_DMA); + (void)crisv32_free_dma(IN_DMA); +} + + +/* Init job queue. */ +static int cryptocop_job_queue_init(void) +{ + int i; + + INIT_LIST_HEAD(&cryptocop_completed_jobs); + + for (i = 0; i < cryptocop_prio_no_prios; i++){ + cryptocop_job_queues[i].prio = (cryptocop_queue_priority)i; + INIT_LIST_HEAD(&cryptocop_job_queues[i].jobs); + } + return 0; +} + + +static void cryptocop_job_queue_close(void) +{ + struct list_head *node, *tmp; + struct cryptocop_prio_job *pj = NULL; + unsigned long int process_flags, flags; + int i; + + /* FIXME: This is as yet untested code. */ + + /* Stop strcop from getting an operation to process while we are closing the + module. */ + spin_lock_irqsave(&cryptocop_process_lock, process_flags); + + /* Empty the job queue. */ + for (i = 0; i < cryptocop_prio_no_prios; i++){ + if (!list_empty(&(cryptocop_job_queues[i].jobs))){ + list_for_each_safe(node, tmp, &(cryptocop_job_queues[i].jobs)) { + pj = list_entry(node, struct cryptocop_prio_job, node); + list_del(node); + + /* Call callback to notify consumer of job removal. */ + DEBUG(printk("cryptocop_job_queue_close: callback 0x%p, data 0x%p\n", pj->oper->cb, pj->oper->cb_data)); + pj->oper->operation_status = -EINTR; /* Job is terminated without completion. */ + pj->oper->cb(pj->oper, pj->oper->cb_data); + + delete_internal_operation(pj->iop); + kfree(pj); + } + } + } + spin_unlock_irqrestore(&cryptocop_process_lock, process_flags); + + /* Remove the running job, if any. */ + spin_lock_irqsave(&running_job_lock, flags); + if (cryptocop_running_job){ + reg_strcop_rw_cfg rw_cfg; + reg_dma_rw_cfg dma_out_cfg, dma_in_cfg; + + /* Stop DMA. */ + dma_out_cfg = REG_RD(dma, OUT_DMA_INST, rw_cfg); + dma_out_cfg.en = regk_dma_no; + REG_WR(dma, OUT_DMA_INST, rw_cfg, dma_out_cfg); + + dma_in_cfg = REG_RD(dma, IN_DMA_INST, rw_cfg); + dma_in_cfg.en = regk_dma_no; + REG_WR(dma, IN_DMA_INST, rw_cfg, dma_in_cfg); + + /* Disble the cryptocop. */ + rw_cfg = REG_RD(strcop, regi_strcop, rw_cfg); + rw_cfg.en = 0; + REG_WR(strcop, regi_strcop, rw_cfg, rw_cfg); + + pj = cryptocop_running_job; + cryptocop_running_job = NULL; + + /* Call callback to notify consumer of job removal. */ + DEBUG(printk("cryptocop_job_queue_close: callback 0x%p, data 0x%p\n", pj->oper->cb, pj->oper->cb_data)); + pj->oper->operation_status = -EINTR; /* Job is terminated without completion. */ + pj->oper->cb(pj->oper, pj->oper->cb_data); + + delete_internal_operation(pj->iop); + kfree(pj); + } + spin_unlock_irqrestore(&running_job_lock, flags); + + /* Remove completed jobs, if any. */ + spin_lock_irqsave(&cryptocop_completed_jobs_lock, flags); + + list_for_each_safe(node, tmp, &cryptocop_completed_jobs) { + pj = list_entry(node, struct cryptocop_prio_job, node); + list_del(node); + /* Call callback to notify consumer of job removal. */ + DEBUG(printk("cryptocop_job_queue_close: callback 0x%p, data 0x%p\n", pj->oper->cb, pj->oper->cb_data)); + pj->oper->operation_status = -EINTR; /* Job is terminated without completion. */ + pj->oper->cb(pj->oper, pj->oper->cb_data); + + delete_internal_operation(pj->iop); + kfree(pj); + } + spin_unlock_irqrestore(&cryptocop_completed_jobs_lock, flags); +} + + +static void cryptocop_start_job(void) +{ + int i; + struct cryptocop_prio_job *pj; + unsigned long int flags; + unsigned long int running_job_flags; + reg_strcop_rw_cfg rw_cfg = {.en = 1, .ignore_sync = 0}; + + DEBUG(printk("cryptocop_start_job: entering\n")); + + spin_lock_irqsave(&running_job_lock, running_job_flags); + if (cryptocop_running_job != NULL){ + /* Already running. */ + DEBUG(printk("cryptocop_start_job: already running, exit\n")); + spin_unlock_irqrestore(&running_job_lock, running_job_flags); + return; + } + spin_lock_irqsave(&cryptocop_job_queue_lock, flags); + + /* Check the queues in priority order. */ + for (i = cryptocop_prio_kernel_csum; (i < cryptocop_prio_no_prios) && list_empty(&cryptocop_job_queues[i].jobs); i++); + if (i == cryptocop_prio_no_prios) { + spin_unlock_irqrestore(&cryptocop_job_queue_lock, flags); + spin_unlock_irqrestore(&running_job_lock, running_job_flags); + DEBUG(printk("cryptocop_start_job: no jobs to run\n")); + return; /* No jobs to run */ + } + DEBUG(printk("starting job for prio %d\n", i)); + + /* TODO: Do not starve lower priority jobs. Let in a lower + * prio job for every N-th processed higher prio job or some + * other scheduling policy. This could reasonably be + * tweakable since the optimal balance would depend on the + * type of load on the system. */ + + /* Pull the DMA lists from the job and start the DMA client. */ + pj = list_entry(cryptocop_job_queues[i].jobs.next, struct cryptocop_prio_job, node); + list_del(&pj->node); + spin_unlock_irqrestore(&cryptocop_job_queue_lock, flags); + cryptocop_running_job = pj; + + /* Set config register (3DES and CSUM modes). */ + switch (pj->iop->tdes_mode){ + case cryptocop_3des_eee: + rw_cfg.td1 = regk_strcop_e; + rw_cfg.td2 = regk_strcop_e; + rw_cfg.td3 = regk_strcop_e; + break; + case cryptocop_3des_eed: + rw_cfg.td1 = regk_strcop_e; + rw_cfg.td2 = regk_strcop_e; + rw_cfg.td3 = regk_strcop_d; + break; + case cryptocop_3des_ede: + rw_cfg.td1 = regk_strcop_e; + rw_cfg.td2 = regk_strcop_d; + rw_cfg.td3 = regk_strcop_e; + break; + case cryptocop_3des_edd: + rw_cfg.td1 = regk_strcop_e; + rw_cfg.td2 = regk_strcop_d; + rw_cfg.td3 = regk_strcop_d; + break; + case cryptocop_3des_dee: + rw_cfg.td1 = regk_strcop_d; + rw_cfg.td2 = regk_strcop_e; + rw_cfg.td3 = regk_strcop_e; + break; + case cryptocop_3des_ded: + rw_cfg.td1 = regk_strcop_d; + rw_cfg.td2 = regk_strcop_e; + rw_cfg.td3 = regk_strcop_d; + break; + case cryptocop_3des_dde: + rw_cfg.td1 = regk_strcop_d; + rw_cfg.td2 = regk_strcop_d; + rw_cfg.td3 = regk_strcop_e; + break; + case cryptocop_3des_ddd: + rw_cfg.td1 = regk_strcop_d; + rw_cfg.td2 = regk_strcop_d; + rw_cfg.td3 = regk_strcop_d; + break; + default: + DEBUG(printk("cryptocop_setup_dma_list: bad 3DES mode\n")); + } + switch (pj->iop->csum_mode){ + case cryptocop_csum_le: + rw_cfg.ipend = regk_strcop_little; + break; + case cryptocop_csum_be: + rw_cfg.ipend = regk_strcop_big; + break; + default: + DEBUG(printk("cryptocop_setup_dma_list: bad checksum mode\n")); + } + REG_WR(strcop, regi_strcop, rw_cfg, rw_cfg); + + DEBUG(printk("cryptocop_start_job: starting DMA, new cryptocop_running_job=0x%p\n" + "ctx_in: 0x%p, phys: 0x%p\n" + "ctx_out: 0x%p, phys: 0x%p\n", + pj, + &pj->iop->ctx_in, (char*)virt_to_phys(&pj->iop->ctx_in), + &pj->iop->ctx_out, (char*)virt_to_phys(&pj->iop->ctx_out))); + + /* Start input DMA. */ + flush_dma_context(&pj->iop->ctx_in); + DMA_START_CONTEXT(IN_DMA_INST, virt_to_phys(&pj->iop->ctx_in)); + + /* Start output DMA. */ + DMA_START_CONTEXT(OUT_DMA_INST, virt_to_phys(&pj->iop->ctx_out)); + + spin_unlock_irqrestore(&running_job_lock, running_job_flags); + DEBUG(printk("cryptocop_start_job: exiting\n")); +} + + +static int cryptocop_job_setup(struct cryptocop_prio_job **pj, struct cryptocop_operation *operation) +{ + int err; + int alloc_flag = operation->in_interrupt ? GFP_ATOMIC : GFP_KERNEL; + void *iop_alloc_ptr = NULL; + + *pj = kmalloc(sizeof (struct cryptocop_prio_job), alloc_flag); + if (!*pj) return -ENOMEM; + + DEBUG(printk("cryptocop_job_setup: operation=0x%p\n", operation)); + + (*pj)->oper = operation; + DEBUG(printk("cryptocop_job_setup, cb=0x%p cb_data=0x%p\n", (*pj)->oper->cb, (*pj)->oper->cb_data)); + + if (operation->use_dmalists) { + DEBUG(print_user_dma_lists(&operation->list_op)); + if (!operation->list_op.inlist || !operation->list_op.outlist || !operation->list_op.out_data_buf || !operation->list_op.in_data_buf){ + DEBUG_API(printk("cryptocop_job_setup: bad indata (use_dmalists)\n")); + kfree(*pj); + return -EINVAL; + } + iop_alloc_ptr = kmalloc(DESCR_ALLOC_PAD + sizeof(struct cryptocop_int_operation), alloc_flag); + if (!iop_alloc_ptr) { + DEBUG_API(printk("cryptocop_job_setup: kmalloc cryptocop_int_operation\n")); + kfree(*pj); + return -ENOMEM; + } + (*pj)->iop = (struct cryptocop_int_operation*)(((unsigned long int)(iop_alloc_ptr + DESCR_ALLOC_PAD + offsetof(struct cryptocop_int_operation, ctx_out)) & ~0x0000001F) - offsetof(struct cryptocop_int_operation, ctx_out)); + DEBUG(memset((*pj)->iop, 0xff, sizeof(struct cryptocop_int_operation))); + (*pj)->iop->alloc_ptr = iop_alloc_ptr; + (*pj)->iop->sid = operation->sid; + (*pj)->iop->cdesc_out = NULL; + (*pj)->iop->cdesc_in = NULL; + (*pj)->iop->tdes_mode = operation->list_op.tdes_mode; + (*pj)->iop->csum_mode = operation->list_op.csum_mode; + (*pj)->iop->ddesc_out = operation->list_op.outlist; + (*pj)->iop->ddesc_in = operation->list_op.inlist; + + /* Setup DMA contexts. */ + (*pj)->iop->ctx_out.next = NULL; + (*pj)->iop->ctx_out.eol = 1; + (*pj)->iop->ctx_out.saved_data = operation->list_op.outlist; + (*pj)->iop->ctx_out.saved_data_buf = operation->list_op.out_data_buf; + + (*pj)->iop->ctx_in.next = NULL; + (*pj)->iop->ctx_in.eol = 1; + (*pj)->iop->ctx_in.saved_data = operation->list_op.inlist; + (*pj)->iop->ctx_in.saved_data_buf = operation->list_op.in_data_buf; + } else { + if ((err = cryptocop_setup_dma_list(operation, &(*pj)->iop, alloc_flag))) { + DEBUG_API(printk("cryptocop_job_setup: cryptocop_setup_dma_list failed %d\n", err)); + kfree(*pj); + return err; + } + } + DEBUG(print_dma_descriptors((*pj)->iop)); + + DEBUG(printk("cryptocop_job_setup, DMA list setup successful\n")); + + return 0; +} + +static int cryptocop_open(struct inode *inode, struct file *filp) +{ + int p = iminor(inode); + + if (p != CRYPTOCOP_MINOR) return -EINVAL; + + filp->private_data = NULL; + return 0; +} + + +static int cryptocop_release(struct inode *inode, struct file *filp) +{ + struct cryptocop_private *dev = filp->private_data; + struct cryptocop_private *dev_next; + + while (dev){ + dev_next = dev->next; + if (dev->sid != CRYPTOCOP_SESSION_ID_NONE) { + (void)cryptocop_free_session(dev->sid); + } + kfree(dev); + dev = dev_next; + } + + return 0; +} + + +static int cryptocop_ioctl_close_session(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + struct cryptocop_private *dev = filp->private_data; + struct cryptocop_private *prev_dev = NULL; + struct strcop_session_op *sess_op = (struct strcop_session_op *)arg; + struct strcop_session_op sop; + int err; + + DEBUG(printk("cryptocop_ioctl_close_session\n")); + + if (!access_ok(VERIFY_READ, sess_op, sizeof(struct strcop_session_op))) + return -EFAULT; + err = copy_from_user(&sop, sess_op, sizeof(struct strcop_session_op)); + if (err) return -EFAULT; + + while (dev && (dev->sid != sop.ses_id)) { + prev_dev = dev; + dev = dev->next; + } + if (dev){ + if (prev_dev){ + prev_dev->next = dev->next; + } else { + filp->private_data = dev->next; + } + err = cryptocop_free_session(dev->sid); + if (err) return -EFAULT; + } else { + DEBUG_API(printk("cryptocop_ioctl_close_session: session %lld not found\n", sop.ses_id)); + return -EINVAL; + } + return 0; +} + + +static void ioctl_process_job_callback(struct cryptocop_operation *op, void*cb_data) +{ + struct ioctl_job_cb_ctx *jc = (struct ioctl_job_cb_ctx *)cb_data; + + DEBUG(printk("ioctl_process_job_callback: op=0x%p, cb_data=0x%p\n", op, cb_data)); + + jc->processed = 1; + wake_up(&cryptocop_ioc_process_wq); +} + + +#define CRYPTOCOP_IOCTL_CIPHER_TID (1) +#define CRYPTOCOP_IOCTL_DIGEST_TID (2) +#define CRYPTOCOP_IOCTL_CSUM_TID (3) + +static size_t first_cfg_change_ix(struct strcop_crypto_op *crp_op) +{ + size_t ch_ix = 0; + + if (crp_op->do_cipher) ch_ix = crp_op->cipher_start; + if (crp_op->do_digest && (crp_op->digest_start < ch_ix)) ch_ix = crp_op->digest_start; + if (crp_op->do_csum && (crp_op->csum_start < ch_ix)) ch_ix = crp_op->csum_start; + + DEBUG(printk("first_cfg_change_ix: ix=%d\n", ch_ix)); + return ch_ix; +} + + +static size_t next_cfg_change_ix(struct strcop_crypto_op *crp_op, size_t ix) +{ + size_t ch_ix = INT_MAX; + size_t tmp_ix = 0; + + if (crp_op->do_cipher && ((crp_op->cipher_start + crp_op->cipher_len) > ix)){ + if (crp_op->cipher_start > ix) { + ch_ix = crp_op->cipher_start; + } else { + ch_ix = crp_op->cipher_start + crp_op->cipher_len; + } + } + if (crp_op->do_digest && ((crp_op->digest_start + crp_op->digest_len) > ix)){ + if (crp_op->digest_start > ix) { + tmp_ix = crp_op->digest_start; + } else { + tmp_ix = crp_op->digest_start + crp_op->digest_len; + } + if (tmp_ix < ch_ix) ch_ix = tmp_ix; + } + if (crp_op->do_csum && ((crp_op->csum_start + crp_op->csum_len) > ix)){ + if (crp_op->csum_start > ix) { + tmp_ix = crp_op->csum_start; + } else { + tmp_ix = crp_op->csum_start + crp_op->csum_len; + } + if (tmp_ix < ch_ix) ch_ix = tmp_ix; + } + if (ch_ix == INT_MAX) ch_ix = ix; + DEBUG(printk("next_cfg_change_ix prev ix=%d, next ix=%d\n", ix, ch_ix)); + return ch_ix; +} + + +/* Map map_length bytes from the pages starting on *pageix and *pageoffset to iovecs starting on *iovix. + * Return -1 for ok, 0 for fail. */ +static int map_pages_to_iovec(struct iovec *iov, int iovlen, int *iovix, struct page **pages, int nopages, int *pageix, int *pageoffset, int map_length ) +{ + int tmplen; + + assert(iov != NULL); + assert(iovix != NULL); + assert(pages != NULL); + assert(pageix != NULL); + assert(pageoffset != NULL); + + DEBUG(printk("map_pages_to_iovec, map_length=%d, iovlen=%d, *iovix=%d, nopages=%d, *pageix=%d, *pageoffset=%d\n", map_length, iovlen, *iovix, nopages, *pageix, *pageoffset)); + + while (map_length > 0){ + DEBUG(printk("map_pages_to_iovec, map_length=%d, iovlen=%d, *iovix=%d, nopages=%d, *pageix=%d, *pageoffset=%d\n", map_length, iovlen, *iovix, nopages, *pageix, *pageoffset)); + if (*iovix >= iovlen){ + DEBUG_API(printk("map_page_to_iovec: *iovix=%d >= iovlen=%d\n", *iovix, iovlen)); + return 0; + } + if (*pageix >= nopages){ + DEBUG_API(printk("map_page_to_iovec: *pageix=%d >= nopages=%d\n", *pageix, nopages)); + return 0; + } + iov[*iovix].iov_base = (unsigned char*)page_address(pages[*pageix]) + *pageoffset; + tmplen = PAGE_SIZE - *pageoffset; + if (tmplen < map_length){ + (*pageoffset) = 0; + (*pageix)++; + } else { + tmplen = map_length; + (*pageoffset) += map_length; + } + DEBUG(printk("mapping %d bytes from page %d (or %d) to iovec %d\n", tmplen, *pageix, *pageix-1, *iovix)); + iov[*iovix].iov_len = tmplen; + map_length -= tmplen; + (*iovix)++; + } + DEBUG(printk("map_page_to_iovec, exit, *iovix=%d\n", *iovix)); + return -1; +} + + + +static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) +{ + int i; + struct cryptocop_private *dev = filp->private_data; + struct strcop_crypto_op *crp_oper = (struct strcop_crypto_op *)arg; + struct strcop_crypto_op oper = {0}; + int err = 0; + struct cryptocop_operation *cop = NULL; + + struct ioctl_job_cb_ctx *jc = NULL; + + struct page **inpages = NULL; + struct page **outpages = NULL; + int noinpages = 0; + int nooutpages = 0; + + struct cryptocop_desc descs[5]; /* Max 5 descriptors are needed, there are three transforms that + * can get connected/disconnected on different places in the indata. */ + struct cryptocop_desc_cfg dcfgs[5*3]; + int desc_ix = 0; + int dcfg_ix = 0; + struct cryptocop_tfrm_cfg ciph_tcfg = {0}; + struct cryptocop_tfrm_cfg digest_tcfg = {0}; + struct cryptocop_tfrm_cfg csum_tcfg = {0}; + + unsigned char *digest_result = NULL; + int digest_length = 0; + int cblocklen = 0; + unsigned char csum_result[CSUM_BLOCK_LENGTH]; + struct cryptocop_session *sess; + + int iovlen = 0; + int iovix = 0; + int pageix = 0; + int pageoffset = 0; + + size_t prev_ix = 0; + size_t next_ix; + + int cipher_active, digest_active, csum_active; + int end_digest, end_csum; + int digest_done = 0; + int cipher_done = 0; + int csum_done = 0; + + DEBUG(printk("cryptocop_ioctl_process\n")); + + if (!access_ok(VERIFY_WRITE, crp_oper, sizeof(struct strcop_crypto_op))){ + DEBUG_API(printk("cryptocop_ioctl_process: !access_ok crp_oper!\n")); + return -EFAULT; + } + if (copy_from_user(&oper, crp_oper, sizeof(struct strcop_crypto_op))) { + DEBUG_API(printk("cryptocop_ioctl_process: copy_from_user\n")); + return -EFAULT; + } + DEBUG(print_strcop_crypto_op(&oper)); + + while (dev && dev->sid != oper.ses_id) dev = dev->next; + if (!dev){ + DEBUG_API(printk("cryptocop_ioctl_process: session %lld not found\n", oper.ses_id)); + return -EINVAL; + } + + /* Check buffers. */ + if (((oper.indata + oper.inlen) < oper.indata) || ((oper.cipher_outdata + oper.cipher_outlen) < oper.cipher_outdata)){ + DEBUG_API(printk("cryptocop_ioctl_process: user buffers wrapped around, bad user!\n")); + return -EINVAL; + } + + if (!access_ok(VERIFY_WRITE, oper.cipher_outdata, oper.cipher_outlen)){ + DEBUG_API(printk("cryptocop_ioctl_process: !access_ok out data!\n")); + return -EFAULT; + } + if (!access_ok(VERIFY_READ, oper.indata, oper.inlen)){ + DEBUG_API(printk("cryptocop_ioctl_process: !access_ok in data!\n")); + return -EFAULT; + } + + cop = kmalloc(sizeof(struct cryptocop_operation), GFP_KERNEL); + if (!cop) { + DEBUG_API(printk("cryptocop_ioctl_process: kmalloc\n")); + return -ENOMEM; + } + jc = kmalloc(sizeof(struct ioctl_job_cb_ctx), GFP_KERNEL); + if (!jc) { + DEBUG_API(printk("cryptocop_ioctl_process: kmalloc\n")); + err = -ENOMEM; + goto error_cleanup; + } + jc->processed = 0; + + cop->cb_data = jc; + cop->cb = ioctl_process_job_callback; + cop->operation_status = 0; + cop->use_dmalists = 0; + cop->in_interrupt = 0; + cop->fast_callback = 0; + cop->tfrm_op.tfrm_cfg = NULL; + cop->tfrm_op.desc = NULL; + cop->tfrm_op.indata = NULL; + cop->tfrm_op.incount = 0; + cop->tfrm_op.inlen = 0; + cop->tfrm_op.outdata = NULL; + cop->tfrm_op.outcount = 0; + cop->tfrm_op.outlen = 0; + + sess = get_session(oper.ses_id); + if (!sess){ + DEBUG_API(printk("cryptocop_ioctl_process: bad session id.\n")); + kfree(cop); + kfree(jc); + return -EINVAL; + } + + if (oper.do_cipher) { + unsigned int cipher_outlen = 0; + struct cryptocop_transform_ctx *tc = get_transform_ctx(sess, CRYPTOCOP_IOCTL_CIPHER_TID); + if (!tc) { + DEBUG_API(printk("cryptocop_ioctl_process: no cipher transform in session.\n")); + err = -EINVAL; + goto error_cleanup; + } + ciph_tcfg.tid = CRYPTOCOP_IOCTL_CIPHER_TID; + ciph_tcfg.inject_ix = 0; + ciph_tcfg.flags = 0; + if ((oper.cipher_start < 0) || (oper.cipher_len <= 0) || (oper.cipher_start > oper.inlen) || ((oper.cipher_start + oper.cipher_len) > oper.inlen)){ + DEBUG_API(printk("cryptocop_ioctl_process: bad cipher length\n")); + kfree(cop); + kfree(jc); + return -EINVAL; + } + cblocklen = tc->init.alg == cryptocop_alg_aes ? AES_BLOCK_LENGTH : DES_BLOCK_LENGTH; + if (oper.cipher_len % cblocklen) { + kfree(cop); + kfree(jc); + DEBUG_API(printk("cryptocop_ioctl_process: cipher inlength not multiple of block length.\n")); + return -EINVAL; + } + cipher_outlen = oper.cipher_len; + if (tc->init.cipher_mode == cryptocop_cipher_mode_cbc){ + if (oper.cipher_explicit) { + ciph_tcfg.flags |= CRYPTOCOP_EXPLICIT_IV; + memcpy(ciph_tcfg.iv, oper.cipher_iv, cblocklen); + } else { + cipher_outlen = oper.cipher_len - cblocklen; + } + } else { + if (oper.cipher_explicit){ + kfree(cop); + kfree(jc); + DEBUG_API(printk("cryptocop_ioctl_process: explicit_iv when not CBC mode\n")); + return -EINVAL; + } + } + if (oper.cipher_outlen != cipher_outlen) { + kfree(cop); + kfree(jc); + DEBUG_API(printk("cryptocop_ioctl_process: cipher_outlen incorrect, should be %d not %d.\n", cipher_outlen, oper.cipher_outlen)); + return -EINVAL; + } + + if (oper.decrypt){ + ciph_tcfg.flags |= CRYPTOCOP_DECRYPT; + } else { + ciph_tcfg.flags |= CRYPTOCOP_ENCRYPT; + } + ciph_tcfg.next = cop->tfrm_op.tfrm_cfg; + cop->tfrm_op.tfrm_cfg = &ciph_tcfg; + } + if (oper.do_digest){ + struct cryptocop_transform_ctx *tc = get_transform_ctx(sess, CRYPTOCOP_IOCTL_DIGEST_TID); + if (!tc) { + DEBUG_API(printk("cryptocop_ioctl_process: no digest transform in session.\n")); + err = -EINVAL; + goto error_cleanup; + } + digest_length = tc->init.alg == cryptocop_alg_md5 ? 16 : 20; + digest_result = kmalloc(digest_length, GFP_KERNEL); + if (!digest_result) { + DEBUG_API(printk("cryptocop_ioctl_process: kmalloc digest_result\n")); + err = -EINVAL; + goto error_cleanup; + } + DEBUG(memset(digest_result, 0xff, digest_length)); + + digest_tcfg.tid = CRYPTOCOP_IOCTL_DIGEST_TID; + digest_tcfg.inject_ix = 0; + ciph_tcfg.inject_ix += digest_length; + if ((oper.digest_start < 0) || (oper.digest_len <= 0) || (oper.digest_start > oper.inlen) || ((oper.digest_start + oper.digest_len) > oper.inlen)){ + DEBUG_API(printk("cryptocop_ioctl_process: bad digest length\n")); + err = -EINVAL; + goto error_cleanup; + } + + digest_tcfg.next = cop->tfrm_op.tfrm_cfg; + cop->tfrm_op.tfrm_cfg = &digest_tcfg; + } + if (oper.do_csum){ + csum_tcfg.tid = CRYPTOCOP_IOCTL_CSUM_TID; + csum_tcfg.inject_ix = digest_length; + ciph_tcfg.inject_ix += 2; + + if ((oper.csum_start < 0) || (oper.csum_len <= 0) || (oper.csum_start > oper.inlen) || ((oper.csum_start + oper.csum_len) > oper.inlen)){ + DEBUG_API(printk("cryptocop_ioctl_process: bad csum length\n")); + kfree(cop); + kfree(jc); + return -EINVAL; + } + + csum_tcfg.next = cop->tfrm_op.tfrm_cfg; + cop->tfrm_op.tfrm_cfg = &csum_tcfg; + } + + prev_ix = first_cfg_change_ix(&oper); + if (prev_ix > oper.inlen) { + DEBUG_API(printk("cryptocop_ioctl_process: length mismatch\n")); + nooutpages = noinpages = 0; + err = -EINVAL; + goto error_cleanup; + } + DEBUG(printk("cryptocop_ioctl_process: inlen=%d, cipher_outlen=%d\n", oper.inlen, oper.cipher_outlen)); + + /* Map user pages for in and out data of the operation. */ + noinpages = (((unsigned long int)(oper.indata + prev_ix) & ~PAGE_MASK) + oper.inlen - 1 - prev_ix + ~PAGE_MASK) >> PAGE_SHIFT; + DEBUG(printk("cryptocop_ioctl_process: noinpages=%d\n", noinpages)); + inpages = kmalloc(noinpages * sizeof(struct page*), GFP_KERNEL); + if (!inpages){ + DEBUG_API(printk("cryptocop_ioctl_process: kmalloc inpages\n")); + nooutpages = noinpages = 0; + err = -ENOMEM; + goto error_cleanup; + } + if (oper.do_cipher){ + nooutpages = (((unsigned long int)oper.cipher_outdata & ~PAGE_MASK) + oper.cipher_outlen - 1 + ~PAGE_MASK) >> PAGE_SHIFT; + DEBUG(printk("cryptocop_ioctl_process: nooutpages=%d\n", nooutpages)); + outpages = kmalloc(nooutpages * sizeof(struct page*), GFP_KERNEL); + if (!outpages){ + DEBUG_API(printk("cryptocop_ioctl_process: kmalloc outpages\n")); + nooutpages = noinpages = 0; + err = -ENOMEM; + goto error_cleanup; + } + } + + /* Acquire the mm page semaphore. */ + down_read(¤t->mm->mmap_sem); + + err = get_user_pages(current, + current->mm, + (unsigned long int)(oper.indata + prev_ix), + noinpages, + 0, /* read access only for in data */ + 0, /* no force */ + inpages, + NULL); + + if (err < 0) { + up_read(¤t->mm->mmap_sem); + nooutpages = noinpages = 0; + DEBUG_API(printk("cryptocop_ioctl_process: get_user_pages indata\n")); + goto error_cleanup; + } + noinpages = err; + if (oper.do_cipher){ + err = get_user_pages(current, + current->mm, + (unsigned long int)oper.cipher_outdata, + nooutpages, + 1, /* write access for out data */ + 0, /* no force */ + outpages, + NULL); + up_read(¤t->mm->mmap_sem); + if (err < 0) { + nooutpages = 0; + DEBUG_API(printk("cryptocop_ioctl_process: get_user_pages outdata\n")); + goto error_cleanup; + } + nooutpages = err; + } else { + up_read(¤t->mm->mmap_sem); + } + + /* Add 6 to nooutpages to make room for possibly inserted buffers for storing digest and + * csum output and splits when units are (dis-)connected. */ + cop->tfrm_op.indata = kmalloc((noinpages) * sizeof(struct iovec), GFP_KERNEL); + cop->tfrm_op.outdata = kmalloc((6 + nooutpages) * sizeof(struct iovec), GFP_KERNEL); + if (!cop->tfrm_op.indata || !cop->tfrm_op.outdata) { + DEBUG_API(printk("cryptocop_ioctl_process: kmalloc iovecs\n")); + err = -ENOMEM; + goto error_cleanup; + } + + cop->tfrm_op.inlen = oper.inlen - prev_ix; + cop->tfrm_op.outlen = 0; + if (oper.do_cipher) cop->tfrm_op.outlen += oper.cipher_outlen; + if (oper.do_digest) cop->tfrm_op.outlen += digest_length; + if (oper.do_csum) cop->tfrm_op.outlen += 2; + + /* Setup the in iovecs. */ + cop->tfrm_op.incount = noinpages; + if (noinpages > 1){ + size_t tmplen = cop->tfrm_op.inlen; + + cop->tfrm_op.indata[0].iov_len = PAGE_SIZE - ((unsigned long int)(oper.indata + prev_ix) & ~PAGE_MASK); + cop->tfrm_op.indata[0].iov_base = (unsigned char*)page_address(inpages[0]) + ((unsigned long int)(oper.indata + prev_ix) & ~PAGE_MASK); + tmplen -= cop->tfrm_op.indata[0].iov_len; + for (i = 1; i<noinpages; i++){ + cop->tfrm_op.indata[i].iov_len = tmplen < PAGE_SIZE ? tmplen : PAGE_SIZE; + cop->tfrm_op.indata[i].iov_base = (unsigned char*)page_address(inpages[i]); + tmplen -= PAGE_SIZE; + } + } else { + cop->tfrm_op.indata[0].iov_len = oper.inlen - prev_ix; + cop->tfrm_op.indata[0].iov_base = (unsigned char*)page_address(inpages[0]) + ((unsigned long int)(oper.indata + prev_ix) & ~PAGE_MASK); + } + + iovlen = nooutpages + 6; + pageoffset = oper.do_cipher ? ((unsigned long int)oper.cipher_outdata & ~PAGE_MASK) : 0; + + next_ix = next_cfg_change_ix(&oper, prev_ix); + if (prev_ix == next_ix){ + DEBUG_API(printk("cryptocop_ioctl_process: length configuration broken.\n")); + err = -EINVAL; /* This should be impossible barring bugs. */ + goto error_cleanup; + } + while (prev_ix != next_ix){ + end_digest = end_csum = cipher_active = digest_active = csum_active = 0; + descs[desc_ix].cfg = NULL; + descs[desc_ix].length = next_ix - prev_ix; + + if (oper.do_cipher && (oper.cipher_start < next_ix) && (prev_ix < (oper.cipher_start + oper.cipher_len))) { + dcfgs[dcfg_ix].tid = CRYPTOCOP_IOCTL_CIPHER_TID; + dcfgs[dcfg_ix].src = cryptocop_source_dma; + cipher_active = 1; + + if (next_ix == (oper.cipher_start + oper.cipher_len)){ + cipher_done = 1; + dcfgs[dcfg_ix].last = 1; + } else { + dcfgs[dcfg_ix].last = 0; + } + dcfgs[dcfg_ix].next = descs[desc_ix].cfg; + descs[desc_ix].cfg = &dcfgs[dcfg_ix]; + ++dcfg_ix; + } + if (oper.do_digest && (oper.digest_start < next_ix) && (prev_ix < (oper.digest_start + oper.digest_len))) { + digest_active = 1; + dcfgs[dcfg_ix].tid = CRYPTOCOP_IOCTL_DIGEST_TID; + dcfgs[dcfg_ix].src = cryptocop_source_dma; + if (next_ix == (oper.digest_start + oper.digest_len)){ + assert(!digest_done); + digest_done = 1; + dcfgs[dcfg_ix].last = 1; + } else { + dcfgs[dcfg_ix].last = 0; + } + dcfgs[dcfg_ix].next = descs[desc_ix].cfg; + descs[desc_ix].cfg = &dcfgs[dcfg_ix]; + ++dcfg_ix; + } + if (oper.do_csum && (oper.csum_start < next_ix) && (prev_ix < (oper.csum_start + oper.csum_len))){ + csum_active = 1; + dcfgs[dcfg_ix].tid = CRYPTOCOP_IOCTL_CSUM_TID; + dcfgs[dcfg_ix].src = cryptocop_source_dma; + if (next_ix == (oper.csum_start + oper.csum_len)){ + csum_done = 1; + dcfgs[dcfg_ix].last = 1; + } else { + dcfgs[dcfg_ix].last = 0; + } + dcfgs[dcfg_ix].next = descs[desc_ix].cfg; + descs[desc_ix].cfg = &dcfgs[dcfg_ix]; + ++dcfg_ix; + } + if (!descs[desc_ix].cfg){ + DEBUG_API(printk("cryptocop_ioctl_process: data segment %d (%d to %d) had no active transforms\n", desc_ix, prev_ix, next_ix)); + err = -EINVAL; + goto error_cleanup; + } + descs[desc_ix].next = &(descs[desc_ix]) + 1; + ++desc_ix; + prev_ix = next_ix; + next_ix = next_cfg_change_ix(&oper, prev_ix); + } + if (desc_ix > 0){ + descs[desc_ix-1].next = NULL; + } else { + descs[0].next = NULL; + } + if (oper.do_digest) { + DEBUG(printk("cryptocop_ioctl_process: mapping %d byte digest output to iovec %d\n", digest_length, iovix)); + /* Add outdata iovec, length == <length of type of digest> */ + cop->tfrm_op.outdata[iovix].iov_base = digest_result; + cop->tfrm_op.outdata[iovix].iov_len = digest_length; + ++iovix; + } + if (oper.do_csum) { + /* Add outdata iovec, length == 2, the length of csum. */ + DEBUG(printk("cryptocop_ioctl_process: mapping 2 byte csum output to iovec %d\n", iovix)); + /* Add outdata iovec, length == <length of type of digest> */ + cop->tfrm_op.outdata[iovix].iov_base = csum_result; + cop->tfrm_op.outdata[iovix].iov_len = 2; + ++iovix; + } + if (oper.do_cipher) { + if (!map_pages_to_iovec(cop->tfrm_op.outdata, iovlen, &iovix, outpages, nooutpages, &pageix, &pageoffset, oper.cipher_outlen)){ + DEBUG_API(printk("cryptocop_ioctl_process: failed to map pages to iovec.\n")); + err = -ENOSYS; /* This should be impossible barring bugs. */ + goto error_cleanup; + } + } + DEBUG(printk("cryptocop_ioctl_process: setting cop->tfrm_op.outcount %d\n", iovix)); + cop->tfrm_op.outcount = iovix; + assert(iovix <= (nooutpages + 6)); + + cop->sid = oper.ses_id; + cop->tfrm_op.desc = &descs[0]; + + DEBUG(printk("cryptocop_ioctl_process: inserting job, cb_data=0x%p\n", cop->cb_data)); + + if ((err = cryptocop_job_queue_insert_user_job(cop)) != 0) { + DEBUG_API(printk("cryptocop_ioctl_process: insert job %d\n", err)); + err = -EINVAL; + goto error_cleanup; + } + + DEBUG(printk("cryptocop_ioctl_process: begin wait for result\n")); + + wait_event(cryptocop_ioc_process_wq, (jc->processed != 0)); + DEBUG(printk("cryptocop_ioctl_process: end wait for result\n")); + if (!jc->processed){ + printk(KERN_WARNING "cryptocop_ioctl_process: job not processed at completion\n"); + err = -EIO; + goto error_cleanup; + } + + /* Job process done. Cipher output should already be correct in job so no post processing of outdata. */ + DEBUG(printk("cryptocop_ioctl_process: operation_status = %d\n", cop->operation_status)); + if (cop->operation_status == 0){ + if (oper.do_digest){ + DEBUG(printk("cryptocop_ioctl_process: copy %d bytes digest to user\n", digest_length)); + err = copy_to_user((unsigned char*)crp_oper + offsetof(struct strcop_crypto_op, digest), digest_result, digest_length); + if (0 != err){ + DEBUG_API(printk("cryptocop_ioctl_process: copy_to_user, digest length %d, err %d\n", digest_length, err)); + err = -EFAULT; + goto error_cleanup; + } + } + if (oper.do_csum){ + DEBUG(printk("cryptocop_ioctl_process: copy 2 bytes checksum to user\n")); + err = copy_to_user((unsigned char*)crp_oper + offsetof(struct strcop_crypto_op, csum), csum_result, 2); + if (0 != err){ + DEBUG_API(printk("cryptocop_ioctl_process: copy_to_user, csum, err %d\n", err)); + err = -EFAULT; + goto error_cleanup; + } + } + err = 0; + } else { + DEBUG(printk("cryptocop_ioctl_process: returning err = operation_status = %d\n", cop->operation_status)); + err = cop->operation_status; + } + + error_cleanup: + /* Release page caches. */ + for (i = 0; i < noinpages; i++){ + put_page(inpages[i]); + } + for (i = 0; i < nooutpages; i++){ + int spdl_err; + /* Mark output pages dirty. */ + spdl_err = set_page_dirty_lock(outpages[i]); + DEBUG(if (spdl_err < 0)printk("cryptocop_ioctl_process: set_page_dirty_lock returned %d\n", spdl_err)); + } + for (i = 0; i < nooutpages; i++){ + put_page(outpages[i]); + } + + kfree(digest_result); + kfree(inpages); + kfree(outpages); + if (cop){ + kfree(cop->tfrm_op.indata); + kfree(cop->tfrm_op.outdata); + kfree(cop); + } + kfree(jc); + + DEBUG(print_lock_status()); + + return err; +} + + +static int cryptocop_ioctl_create_session(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) +{ + cryptocop_session_id sid; + int err; + struct cryptocop_private *dev; + struct strcop_session_op *sess_op = (struct strcop_session_op *)arg; + struct strcop_session_op sop; + struct cryptocop_transform_init *tis = NULL; + struct cryptocop_transform_init ti_cipher = {0}; + struct cryptocop_transform_init ti_digest = {0}; + struct cryptocop_transform_init ti_csum = {0}; + + if (!access_ok(VERIFY_WRITE, sess_op, sizeof(struct strcop_session_op))) + return -EFAULT; + err = copy_from_user(&sop, sess_op, sizeof(struct strcop_session_op)); + if (err) return -EFAULT; + if (sop.cipher != cryptocop_cipher_none) { + if (!access_ok(VERIFY_READ, sop.key, sop.keylen)) return -EFAULT; + } + DEBUG(printk("cryptocop_ioctl_create_session, sess_op:\n")); + + DEBUG(printk("\tcipher:%d\n" + "\tcipher_mode:%d\n" + "\tdigest:%d\n" + "\tcsum:%d\n", + (int)sop.cipher, + (int)sop.cmode, + (int)sop.digest, + (int)sop.csum)); + + if (sop.cipher != cryptocop_cipher_none){ + /* Init the cipher. */ + switch (sop.cipher){ + case cryptocop_cipher_des: + ti_cipher.alg = cryptocop_alg_des; + break; + case cryptocop_cipher_3des: + ti_cipher.alg = cryptocop_alg_3des; + break; + case cryptocop_cipher_aes: + ti_cipher.alg = cryptocop_alg_aes; + break; + default: + DEBUG_API(printk("create session, bad cipher algorithm %d\n", sop.cipher)); + return -EINVAL; + }; + DEBUG(printk("setting cipher transform %d\n", ti_cipher.alg)); + copy_from_user(ti_cipher.key, sop.key, sop.keylen/8); + ti_cipher.keylen = sop.keylen; + switch (sop.cmode){ + case cryptocop_cipher_mode_cbc: + case cryptocop_cipher_mode_ecb: + ti_cipher.cipher_mode = sop.cmode; + break; + default: + DEBUG_API(printk("create session, bad cipher mode %d\n", sop.cmode)); + return -EINVAL; + } + DEBUG(printk("cryptocop_ioctl_create_session: setting CBC mode %d\n", ti_cipher.cipher_mode)); + switch (sop.des3_mode){ + case cryptocop_3des_eee: + case cryptocop_3des_eed: + case cryptocop_3des_ede: + case cryptocop_3des_edd: + case cryptocop_3des_dee: + case cryptocop_3des_ded: + case cryptocop_3des_dde: + case cryptocop_3des_ddd: + ti_cipher.tdes_mode = sop.des3_mode; + break; + default: + DEBUG_API(printk("create session, bad 3DES mode %d\n", sop.des3_mode)); + return -EINVAL; + } + ti_cipher.tid = CRYPTOCOP_IOCTL_CIPHER_TID; + ti_cipher.next = tis; + tis = &ti_cipher; + } /* if (sop.cipher != cryptocop_cipher_none) */ + if (sop.digest != cryptocop_digest_none){ + DEBUG(printk("setting digest transform\n")); + switch (sop.digest){ + case cryptocop_digest_md5: + ti_digest.alg = cryptocop_alg_md5; + break; + case cryptocop_digest_sha1: + ti_digest.alg = cryptocop_alg_sha1; + break; + default: + DEBUG_API(printk("create session, bad digest algorithm %d\n", sop.digest)); + return -EINVAL; + } + ti_digest.tid = CRYPTOCOP_IOCTL_DIGEST_TID; + ti_digest.next = tis; + tis = &ti_digest; + } /* if (sop.digest != cryptocop_digest_none) */ + if (sop.csum != cryptocop_csum_none){ + DEBUG(printk("setting csum transform\n")); + switch (sop.csum){ + case cryptocop_csum_le: + case cryptocop_csum_be: + ti_csum.csum_mode = sop.csum; + break; + default: + DEBUG_API(printk("create session, bad checksum algorithm %d\n", sop.csum)); + return -EINVAL; + } + ti_csum.alg = cryptocop_alg_csum; + ti_csum.tid = CRYPTOCOP_IOCTL_CSUM_TID; + ti_csum.next = tis; + tis = &ti_csum; + } /* (sop.csum != cryptocop_csum_none) */ + dev = kmalloc(sizeof(struct cryptocop_private), GFP_KERNEL); + if (!dev){ + DEBUG_API(printk("create session, alloc dev\n")); + return -ENOMEM; + } + + err = cryptocop_new_session(&sid, tis, GFP_KERNEL); + DEBUG({ if (err) printk("create session, cryptocop_new_session %d\n", err);}); + + if (err) { + kfree(dev); + return err; + } + sess_op->ses_id = sid; + dev->sid = sid; + dev->next = filp->private_data; + filp->private_data = dev; + + return 0; +} + +static long cryptocop_ioctl_unlocked(struct inode *inode, + struct file *filp, unsigned int cmd, unsigned long arg) +{ + int err = 0; + if (_IOC_TYPE(cmd) != ETRAXCRYPTOCOP_IOCTYPE) { + DEBUG_API(printk("cryptocop_ioctl: wrong type\n")); + return -ENOTTY; + } + if (_IOC_NR(cmd) > CRYPTOCOP_IO_MAXNR){ + return -ENOTTY; + } + /* Access check of the argument. Some commands, e.g. create session and process op, + needs additional checks. Those are handled in the command handling functions. */ + if (_IOC_DIR(cmd) & _IOC_READ) + err = !access_ok(VERIFY_WRITE, (void *)arg, _IOC_SIZE(cmd)); + else if (_IOC_DIR(cmd) & _IOC_WRITE) + err = !access_ok(VERIFY_READ, (void *)arg, _IOC_SIZE(cmd)); + if (err) return -EFAULT; + + switch (cmd) { + case CRYPTOCOP_IO_CREATE_SESSION: + return cryptocop_ioctl_create_session(inode, filp, cmd, arg); + case CRYPTOCOP_IO_CLOSE_SESSION: + return cryptocop_ioctl_close_session(inode, filp, cmd, arg); + case CRYPTOCOP_IO_PROCESS_OP: + return cryptocop_ioctl_process(inode, filp, cmd, arg); + default: + DEBUG_API(printk("cryptocop_ioctl: unknown command\n")); + return -ENOTTY; + } + return 0; +} + +static long +cryptocop_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + long ret; + + mutex_lock(&cryptocop_mutex); + ret = cryptocop_ioctl_unlocked(file_inode(filp), filp, cmd, arg); + mutex_unlock(&cryptocop_mutex); + + return ret; +} + + +#ifdef LDEBUG +static void print_dma_descriptors(struct cryptocop_int_operation *iop) +{ + struct cryptocop_dma_desc *cdesc_out = iop->cdesc_out; + struct cryptocop_dma_desc *cdesc_in = iop->cdesc_in; + int i; + + printk("print_dma_descriptors start\n"); + + printk("iop:\n"); + printk("\tsid: 0x%lld\n", iop->sid); + + printk("\tcdesc_out: 0x%p\n", iop->cdesc_out); + printk("\tcdesc_in: 0x%p\n", iop->cdesc_in); + printk("\tddesc_out: 0x%p\n", iop->ddesc_out); + printk("\tddesc_in: 0x%p\n", iop->ddesc_in); + + printk("\niop->ctx_out: 0x%p phys: 0x%p\n", &iop->ctx_out, (char*)virt_to_phys(&iop->ctx_out)); + printk("\tnext: 0x%p\n" + "\tsaved_data: 0x%p\n" + "\tsaved_data_buf: 0x%p\n", + iop->ctx_out.next, + iop->ctx_out.saved_data, + iop->ctx_out.saved_data_buf); + + printk("\niop->ctx_in: 0x%p phys: 0x%p\n", &iop->ctx_in, (char*)virt_to_phys(&iop->ctx_in)); + printk("\tnext: 0x%p\n" + "\tsaved_data: 0x%p\n" + "\tsaved_data_buf: 0x%p\n", + iop->ctx_in.next, + iop->ctx_in.saved_data, + iop->ctx_in.saved_data_buf); + + i = 0; + while (cdesc_out) { + dma_descr_data *td; + printk("cdesc_out %d, desc=0x%p\n", i, cdesc_out->dma_descr); + printk("\n\tvirt_to_phys(desc): 0x%p\n", (char*)virt_to_phys(cdesc_out->dma_descr)); + td = cdesc_out->dma_descr; + printk("\n\tbuf: 0x%p\n" + "\tafter: 0x%p\n" + "\tmd: 0x%04x\n" + "\tnext: 0x%p\n", + td->buf, + td->after, + td->md, + td->next); + printk("flags:\n" + "\twait:\t%d\n" + "\teol:\t%d\n" + "\touteop:\t%d\n" + "\tineop:\t%d\n" + "\tintr:\t%d\n", + td->wait, + td->eol, + td->out_eop, + td->in_eop, + td->intr); + cdesc_out = cdesc_out->next; + i++; + } + i = 0; + while (cdesc_in) { + dma_descr_data *td; + printk("cdesc_in %d, desc=0x%p\n", i, cdesc_in->dma_descr); + printk("\n\tvirt_to_phys(desc): 0x%p\n", (char*)virt_to_phys(cdesc_in->dma_descr)); + td = cdesc_in->dma_descr; + printk("\n\tbuf: 0x%p\n" + "\tafter: 0x%p\n" + "\tmd: 0x%04x\n" + "\tnext: 0x%p\n", + td->buf, + td->after, + td->md, + td->next); + printk("flags:\n" + "\twait:\t%d\n" + "\teol:\t%d\n" + "\touteop:\t%d\n" + "\tineop:\t%d\n" + "\tintr:\t%d\n", + td->wait, + td->eol, + td->out_eop, + td->in_eop, + td->intr); + cdesc_in = cdesc_in->next; + i++; + } + + printk("print_dma_descriptors end\n"); +} + + +static void print_strcop_crypto_op(struct strcop_crypto_op *cop) +{ + printk("print_strcop_crypto_op, 0x%p\n", cop); + + /* Indata. */ + printk("indata=0x%p\n" + "inlen=%d\n" + "do_cipher=%d\n" + "decrypt=%d\n" + "cipher_explicit=%d\n" + "cipher_start=%d\n" + "cipher_len=%d\n" + "outdata=0x%p\n" + "outlen=%d\n", + cop->indata, + cop->inlen, + cop->do_cipher, + cop->decrypt, + cop->cipher_explicit, + cop->cipher_start, + cop->cipher_len, + cop->cipher_outdata, + cop->cipher_outlen); + + printk("do_digest=%d\n" + "digest_start=%d\n" + "digest_len=%d\n", + cop->do_digest, + cop->digest_start, + cop->digest_len); + + printk("do_csum=%d\n" + "csum_start=%d\n" + "csum_len=%d\n", + cop->do_csum, + cop->csum_start, + cop->csum_len); +} + +static void print_cryptocop_operation(struct cryptocop_operation *cop) +{ + struct cryptocop_desc *d; + struct cryptocop_tfrm_cfg *tc; + struct cryptocop_desc_cfg *dc; + int i; + + printk("print_cryptocop_operation, cop=0x%p\n\n", cop); + printk("sid: %lld\n", cop->sid); + printk("operation_status=%d\n" + "use_dmalists=%d\n" + "in_interrupt=%d\n" + "fast_callback=%d\n", + cop->operation_status, + cop->use_dmalists, + cop->in_interrupt, + cop->fast_callback); + + if (cop->use_dmalists){ + print_user_dma_lists(&cop->list_op); + } else { + printk("cop->tfrm_op\n" + "tfrm_cfg=0x%p\n" + "desc=0x%p\n" + "indata=0x%p\n" + "incount=%d\n" + "inlen=%d\n" + "outdata=0x%p\n" + "outcount=%d\n" + "outlen=%d\n\n", + cop->tfrm_op.tfrm_cfg, + cop->tfrm_op.desc, + cop->tfrm_op.indata, + cop->tfrm_op.incount, + cop->tfrm_op.inlen, + cop->tfrm_op.outdata, + cop->tfrm_op.outcount, + cop->tfrm_op.outlen); + + tc = cop->tfrm_op.tfrm_cfg; + while (tc){ + printk("tfrm_cfg, 0x%p\n" + "tid=%d\n" + "flags=%d\n" + "inject_ix=%d\n" + "next=0x%p\n", + tc, + tc->tid, + tc->flags, + tc->inject_ix, + tc->next); + tc = tc->next; + } + d = cop->tfrm_op.desc; + while (d){ + printk("\n======================desc, 0x%p\n" + "length=%d\n" + "cfg=0x%p\n" + "next=0x%p\n", + d, + d->length, + d->cfg, + d->next); + dc = d->cfg; + while (dc){ + printk("=========desc_cfg, 0x%p\n" + "tid=%d\n" + "src=%d\n" + "last=%d\n" + "next=0x%p\n", + dc, + dc->tid, + dc->src, + dc->last, + dc->next); + dc = dc->next; + } + d = d->next; + } + printk("\n====iniov\n"); + for (i = 0; i < cop->tfrm_op.incount; i++){ + printk("indata[%d]\n" + "base=0x%p\n" + "len=%d\n", + i, + cop->tfrm_op.indata[i].iov_base, + cop->tfrm_op.indata[i].iov_len); + } + printk("\n====outiov\n"); + for (i = 0; i < cop->tfrm_op.outcount; i++){ + printk("outdata[%d]\n" + "base=0x%p\n" + "len=%d\n", + i, + cop->tfrm_op.outdata[i].iov_base, + cop->tfrm_op.outdata[i].iov_len); + } + } + printk("------------end print_cryptocop_operation\n"); +} + + +static void print_user_dma_lists(struct cryptocop_dma_list_operation *dma_op) +{ + dma_descr_data *dd; + int i; + + printk("print_user_dma_lists, dma_op=0x%p\n", dma_op); + + printk("out_data_buf = 0x%p, phys_to_virt(out_data_buf) = 0x%p\n", dma_op->out_data_buf, phys_to_virt((unsigned long int)dma_op->out_data_buf)); + printk("in_data_buf = 0x%p, phys_to_virt(in_data_buf) = 0x%p\n", dma_op->in_data_buf, phys_to_virt((unsigned long int)dma_op->in_data_buf)); + + printk("##############outlist\n"); + dd = phys_to_virt((unsigned long int)dma_op->outlist); + i = 0; + while (dd != NULL) { + printk("#%d phys_to_virt(desc) 0x%p\n", i, dd); + printk("\n\tbuf: 0x%p\n" + "\tafter: 0x%p\n" + "\tmd: 0x%04x\n" + "\tnext: 0x%p\n", + dd->buf, + dd->after, + dd->md, + dd->next); + printk("flags:\n" + "\twait:\t%d\n" + "\teol:\t%d\n" + "\touteop:\t%d\n" + "\tineop:\t%d\n" + "\tintr:\t%d\n", + dd->wait, + dd->eol, + dd->out_eop, + dd->in_eop, + dd->intr); + if (dd->eol) + dd = NULL; + else + dd = phys_to_virt((unsigned long int)dd->next); + ++i; + } + + printk("##############inlist\n"); + dd = phys_to_virt((unsigned long int)dma_op->inlist); + i = 0; + while (dd != NULL) { + printk("#%d phys_to_virt(desc) 0x%p\n", i, dd); + printk("\n\tbuf: 0x%p\n" + "\tafter: 0x%p\n" + "\tmd: 0x%04x\n" + "\tnext: 0x%p\n", + dd->buf, + dd->after, + dd->md, + dd->next); + printk("flags:\n" + "\twait:\t%d\n" + "\teol:\t%d\n" + "\touteop:\t%d\n" + "\tineop:\t%d\n" + "\tintr:\t%d\n", + dd->wait, + dd->eol, + dd->out_eop, + dd->in_eop, + dd->intr); + if (dd->eol) + dd = NULL; + else + dd = phys_to_virt((unsigned long int)dd->next); + ++i; + } +} + + +static void print_lock_status(void) +{ + printk("**********************print_lock_status\n"); + printk("cryptocop_completed_jobs_lock %d\n", spin_is_locked(&cryptocop_completed_jobs_lock)); + printk("cryptocop_job_queue_lock %d\n", spin_is_locked(&cryptocop_job_queue_lock)); + printk("descr_pool_lock %d\n", spin_is_locked(&descr_pool_lock)); + printk("cryptocop_sessions_lock %d\n", spin_is_locked(cryptocop_sessions_lock)); + printk("running_job_lock %d\n", spin_is_locked(running_job_lock)); + printk("cryptocop_process_lock %d\n", spin_is_locked(cryptocop_process_lock)); +} +#endif /* LDEBUG */ + + +static const char cryptocop_name[] = "ETRAX FS stream co-processor"; + +static int init_stream_coprocessor(void) +{ + int err; + int i; + static int initialized = 0; + + if (initialized) + return 0; + + initialized = 1; + + printk("ETRAX FS stream co-processor driver v0.01, (c) 2003 Axis Communications AB\n"); + + err = register_chrdev(CRYPTOCOP_MAJOR, cryptocop_name, &cryptocop_fops); + if (err < 0) { + printk(KERN_ERR "stream co-processor: could not get major number.\n"); + return err; + } + + err = init_cryptocop(); + if (err) { + (void)unregister_chrdev(CRYPTOCOP_MAJOR, cryptocop_name); + return err; + } + err = cryptocop_job_queue_init(); + if (err) { + release_cryptocop(); + (void)unregister_chrdev(CRYPTOCOP_MAJOR, cryptocop_name); + return err; + } + /* Init the descriptor pool. */ + for (i = 0; i < CRYPTOCOP_DESCRIPTOR_POOL_SIZE - 1; i++) { + descr_pool[i].from_pool = 1; + descr_pool[i].next = &descr_pool[i + 1]; + } + descr_pool[i].from_pool = 1; + descr_pool[i].next = NULL; + descr_pool_free_list = &descr_pool[0]; + descr_pool_no_free = CRYPTOCOP_DESCRIPTOR_POOL_SIZE; + + spin_lock_init(&cryptocop_completed_jobs_lock); + spin_lock_init(&cryptocop_job_queue_lock); + spin_lock_init(&descr_pool_lock); + spin_lock_init(&cryptocop_sessions_lock); + spin_lock_init(&running_job_lock); + spin_lock_init(&cryptocop_process_lock); + + cryptocop_sessions = NULL; + next_sid = 1; + + cryptocop_running_job = NULL; + + printk("stream co-processor: init done.\n"); + return 0; +} + +static void __exit exit_stream_coprocessor(void) +{ + release_cryptocop(); + cryptocop_job_queue_close(); +} + +module_init(init_stream_coprocessor); +module_exit(exit_stream_coprocessor); + diff --git a/kernel/arch/cris/arch-v32/drivers/i2c.c b/kernel/arch/cris/arch-v32/drivers/i2c.c new file mode 100644 index 000000000..3b2c82ce8 --- /dev/null +++ b/kernel/arch/cris/arch-v32/drivers/i2c.c @@ -0,0 +1,751 @@ +/*!*************************************************************************** +*! +*! FILE NAME : i2c.c +*! +*! DESCRIPTION: implements an interface for IIC/I2C, both directly from other +*! kernel modules (i2c_writereg/readreg) and from userspace using +*! ioctl()'s +*! +*! Nov 30 1998 Torbjorn Eliasson Initial version. +*! Bjorn Wesen Elinux kernel version. +*! Jan 14 2000 Johan Adolfsson Fixed PB shadow register stuff - +*! don't use PB_I2C if DS1302 uses same bits, +*! use PB. +*| June 23 2003 Pieter Grimmerink Added 'i2c_sendnack'. i2c_readreg now +*| generates nack on last received byte, +*| instead of ack. +*| i2c_getack changed data level while clock +*| was high, causing DS75 to see a stop condition +*! +*! --------------------------------------------------------------------------- +*! +*! (C) Copyright 1999-2007 Axis Communications AB, LUND, SWEDEN +*! +*!***************************************************************************/ + +/****************** INCLUDE FILES SECTION ***********************************/ + +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/string.h> +#include <linux/init.h> +#include <linux/mutex.h> + +#include <asm/etraxi2c.h> + +#include <asm/io.h> +#include <asm/delay.h> + +#include "i2c.h" + +/****************** I2C DEFINITION SECTION *************************/ + +#define D(x) + +#define I2C_MAJOR 123 /* LOCAL/EXPERIMENTAL */ +static DEFINE_MUTEX(i2c_mutex); +static const char i2c_name[] = "i2c"; + +#define CLOCK_LOW_TIME 8 +#define CLOCK_HIGH_TIME 8 +#define START_CONDITION_HOLD_TIME 8 +#define STOP_CONDITION_HOLD_TIME 8 +#define ENABLE_OUTPUT 0x01 +#define ENABLE_INPUT 0x00 +#define I2C_CLOCK_HIGH 1 +#define I2C_CLOCK_LOW 0 +#define I2C_DATA_HIGH 1 +#define I2C_DATA_LOW 0 + +#define i2c_enable() +#define i2c_disable() + +/* enable or disable output-enable, to select output or input on the i2c bus */ + +#define i2c_dir_out() crisv32_io_set_dir(&cris_i2c_data, crisv32_io_dir_out) +#define i2c_dir_in() crisv32_io_set_dir(&cris_i2c_data, crisv32_io_dir_in) + +/* control the i2c clock and data signals */ + +#define i2c_clk(x) crisv32_io_set(&cris_i2c_clk, x) +#define i2c_data(x) crisv32_io_set(&cris_i2c_data, x) + +/* read a bit from the i2c interface */ + +#define i2c_getbit() crisv32_io_rd(&cris_i2c_data) + +#define i2c_delay(usecs) udelay(usecs) + +static DEFINE_SPINLOCK(i2c_lock); /* Protect directions etc */ + +/****************** VARIABLE SECTION ************************************/ + +static struct crisv32_iopin cris_i2c_clk; +static struct crisv32_iopin cris_i2c_data; + +/****************** FUNCTION DEFINITION SECTION *************************/ + + +/* generate i2c start condition */ + +void +i2c_start(void) +{ + /* + * SCL=1 SDA=1 + */ + i2c_dir_out(); + i2c_delay(CLOCK_HIGH_TIME/6); + i2c_data(I2C_DATA_HIGH); + i2c_clk(I2C_CLOCK_HIGH); + i2c_delay(CLOCK_HIGH_TIME); + /* + * SCL=1 SDA=0 + */ + i2c_data(I2C_DATA_LOW); + i2c_delay(START_CONDITION_HOLD_TIME); + /* + * SCL=0 SDA=0 + */ + i2c_clk(I2C_CLOCK_LOW); + i2c_delay(CLOCK_LOW_TIME); +} + +/* generate i2c stop condition */ + +void +i2c_stop(void) +{ + i2c_dir_out(); + + /* + * SCL=0 SDA=0 + */ + i2c_clk(I2C_CLOCK_LOW); + i2c_data(I2C_DATA_LOW); + i2c_delay(CLOCK_LOW_TIME*2); + /* + * SCL=1 SDA=0 + */ + i2c_clk(I2C_CLOCK_HIGH); + i2c_delay(CLOCK_HIGH_TIME*2); + /* + * SCL=1 SDA=1 + */ + i2c_data(I2C_DATA_HIGH); + i2c_delay(STOP_CONDITION_HOLD_TIME); + + i2c_dir_in(); +} + +/* write a byte to the i2c interface */ + +void +i2c_outbyte(unsigned char x) +{ + int i; + + i2c_dir_out(); + + for (i = 0; i < 8; i++) { + if (x & 0x80) { + i2c_data(I2C_DATA_HIGH); + } else { + i2c_data(I2C_DATA_LOW); + } + + i2c_delay(CLOCK_LOW_TIME/2); + i2c_clk(I2C_CLOCK_HIGH); + i2c_delay(CLOCK_HIGH_TIME); + i2c_clk(I2C_CLOCK_LOW); + i2c_delay(CLOCK_LOW_TIME/2); + x <<= 1; + } + i2c_data(I2C_DATA_LOW); + i2c_delay(CLOCK_LOW_TIME/2); + + /* + * enable input + */ + i2c_dir_in(); +} + +/* read a byte from the i2c interface */ + +unsigned char +i2c_inbyte(void) +{ + unsigned char aBitByte = 0; + int i; + + /* Switch off I2C to get bit */ + i2c_disable(); + i2c_dir_in(); + i2c_delay(CLOCK_HIGH_TIME/2); + + /* Get bit */ + aBitByte |= i2c_getbit(); + + /* Enable I2C */ + i2c_enable(); + i2c_delay(CLOCK_LOW_TIME/2); + + for (i = 1; i < 8; i++) { + aBitByte <<= 1; + /* Clock pulse */ + i2c_clk(I2C_CLOCK_HIGH); + i2c_delay(CLOCK_HIGH_TIME); + i2c_clk(I2C_CLOCK_LOW); + i2c_delay(CLOCK_LOW_TIME); + + /* Switch off I2C to get bit */ + i2c_disable(); + i2c_dir_in(); + i2c_delay(CLOCK_HIGH_TIME/2); + + /* Get bit */ + aBitByte |= i2c_getbit(); + + /* Enable I2C */ + i2c_enable(); + i2c_delay(CLOCK_LOW_TIME/2); + } + i2c_clk(I2C_CLOCK_HIGH); + i2c_delay(CLOCK_HIGH_TIME); + + /* + * we leave the clock low, getbyte is usually followed + * by sendack/nack, they assume the clock to be low + */ + i2c_clk(I2C_CLOCK_LOW); + return aBitByte; +} + +/*#--------------------------------------------------------------------------- +*# +*# FUNCTION NAME: i2c_getack +*# +*# DESCRIPTION : checks if ack was received from ic2 +*# +*#--------------------------------------------------------------------------*/ + +int +i2c_getack(void) +{ + int ack = 1; + /* + * enable output + */ + i2c_dir_out(); + /* + * Release data bus by setting + * data high + */ + i2c_data(I2C_DATA_HIGH); + /* + * enable input + */ + i2c_dir_in(); + i2c_delay(CLOCK_HIGH_TIME/4); + /* + * generate ACK clock pulse + */ + i2c_clk(I2C_CLOCK_HIGH); +#if 0 + /* + * Use PORT PB instead of I2C + * for input. (I2C not working) + */ + i2c_clk(1); + i2c_data(1); + /* + * switch off I2C + */ + i2c_data(1); + i2c_disable(); + i2c_dir_in(); +#endif + + /* + * now wait for ack + */ + i2c_delay(CLOCK_HIGH_TIME/2); + /* + * check for ack + */ + if (i2c_getbit()) + ack = 0; + i2c_delay(CLOCK_HIGH_TIME/2); + if (!ack) { + if (!i2c_getbit()) /* receiver pulld SDA low */ + ack = 1; + i2c_delay(CLOCK_HIGH_TIME/2); + } + + /* + * our clock is high now, make sure data is low + * before we enable our output. If we keep data high + * and enable output, we would generate a stop condition. + */ +#if 0 + i2c_data(I2C_DATA_LOW); + + /* + * end clock pulse + */ + i2c_enable(); + i2c_dir_out(); +#endif + i2c_clk(I2C_CLOCK_LOW); + i2c_delay(CLOCK_HIGH_TIME/4); + /* + * enable output + */ + i2c_dir_out(); + /* + * remove ACK clock pulse + */ + i2c_data(I2C_DATA_HIGH); + i2c_delay(CLOCK_LOW_TIME/2); + return ack; +} + +/*#--------------------------------------------------------------------------- +*# +*# FUNCTION NAME: I2C::sendAck +*# +*# DESCRIPTION : Send ACK on received data +*# +*#--------------------------------------------------------------------------*/ +void +i2c_sendack(void) +{ + /* + * enable output + */ + i2c_delay(CLOCK_LOW_TIME); + i2c_dir_out(); + /* + * set ack pulse high + */ + i2c_data(I2C_DATA_LOW); + /* + * generate clock pulse + */ + i2c_delay(CLOCK_HIGH_TIME/6); + i2c_clk(I2C_CLOCK_HIGH); + i2c_delay(CLOCK_HIGH_TIME); + i2c_clk(I2C_CLOCK_LOW); + i2c_delay(CLOCK_LOW_TIME/6); + /* + * reset data out + */ + i2c_data(I2C_DATA_HIGH); + i2c_delay(CLOCK_LOW_TIME); + + i2c_dir_in(); +} + +/*#--------------------------------------------------------------------------- +*# +*# FUNCTION NAME: i2c_sendnack +*# +*# DESCRIPTION : Sends NACK on received data +*# +*#--------------------------------------------------------------------------*/ +void +i2c_sendnack(void) +{ + /* + * enable output + */ + i2c_delay(CLOCK_LOW_TIME); + i2c_dir_out(); + /* + * set data high + */ + i2c_data(I2C_DATA_HIGH); + /* + * generate clock pulse + */ + i2c_delay(CLOCK_HIGH_TIME/6); + i2c_clk(I2C_CLOCK_HIGH); + i2c_delay(CLOCK_HIGH_TIME); + i2c_clk(I2C_CLOCK_LOW); + i2c_delay(CLOCK_LOW_TIME); + + i2c_dir_in(); +} + +/*#--------------------------------------------------------------------------- +*# +*# FUNCTION NAME: i2c_write +*# +*# DESCRIPTION : Writes a value to an I2C device +*# +*#--------------------------------------------------------------------------*/ +int +i2c_write(unsigned char theSlave, void *data, size_t nbytes) +{ + int error, cntr = 3; + unsigned char bytes_wrote = 0; + unsigned char value; + unsigned long flags; + + spin_lock_irqsave(&i2c_lock, flags); + + do { + error = 0; + + i2c_start(); + /* + * send slave address + */ + i2c_outbyte((theSlave & 0xfe)); + /* + * wait for ack + */ + if (!i2c_getack()) + error = 1; + /* + * send data + */ + for (bytes_wrote = 0; bytes_wrote < nbytes; bytes_wrote++) { + memcpy(&value, data + bytes_wrote, sizeof value); + i2c_outbyte(value); + /* + * now it's time to wait for ack + */ + if (!i2c_getack()) + error |= 4; + } + /* + * end byte stream + */ + i2c_stop(); + + } while (error && cntr--); + + i2c_delay(CLOCK_LOW_TIME); + + spin_unlock_irqrestore(&i2c_lock, flags); + + return -error; +} + +/*#--------------------------------------------------------------------------- +*# +*# FUNCTION NAME: i2c_read +*# +*# DESCRIPTION : Reads a value from an I2C device +*# +*#--------------------------------------------------------------------------*/ +int +i2c_read(unsigned char theSlave, void *data, size_t nbytes) +{ + unsigned char b = 0; + unsigned char bytes_read = 0; + int error, cntr = 3; + unsigned long flags; + + spin_lock_irqsave(&i2c_lock, flags); + + do { + error = 0; + memset(data, 0, nbytes); + /* + * generate start condition + */ + i2c_start(); + /* + * send slave address + */ + i2c_outbyte((theSlave | 0x01)); + /* + * wait for ack + */ + if (!i2c_getack()) + error = 1; + /* + * fetch data + */ + for (bytes_read = 0; bytes_read < nbytes; bytes_read++) { + b = i2c_inbyte(); + memcpy(data + bytes_read, &b, sizeof b); + + if (bytes_read < (nbytes - 1)) + i2c_sendack(); + } + /* + * last received byte needs to be nacked + * instead of acked + */ + i2c_sendnack(); + /* + * end sequence + */ + i2c_stop(); + } while (error && cntr--); + + spin_unlock_irqrestore(&i2c_lock, flags); + + return -error; +} + +/*#--------------------------------------------------------------------------- +*# +*# FUNCTION NAME: i2c_writereg +*# +*# DESCRIPTION : Writes a value to an I2C device +*# +*#--------------------------------------------------------------------------*/ +int +i2c_writereg(unsigned char theSlave, unsigned char theReg, + unsigned char theValue) +{ + int error, cntr = 3; + unsigned long flags; + + spin_lock_irqsave(&i2c_lock, flags); + + do { + error = 0; + + i2c_start(); + /* + * send slave address + */ + i2c_outbyte((theSlave & 0xfe)); + /* + * wait for ack + */ + if(!i2c_getack()) + error = 1; + /* + * now select register + */ + i2c_dir_out(); + i2c_outbyte(theReg); + /* + * now it's time to wait for ack + */ + if(!i2c_getack()) + error |= 2; + /* + * send register register data + */ + i2c_outbyte(theValue); + /* + * now it's time to wait for ack + */ + if(!i2c_getack()) + error |= 4; + /* + * end byte stream + */ + i2c_stop(); + } while(error && cntr--); + + i2c_delay(CLOCK_LOW_TIME); + + spin_unlock_irqrestore(&i2c_lock, flags); + + return -error; +} + +/*#--------------------------------------------------------------------------- +*# +*# FUNCTION NAME: i2c_readreg +*# +*# DESCRIPTION : Reads a value from the decoder registers. +*# +*#--------------------------------------------------------------------------*/ +unsigned char +i2c_readreg(unsigned char theSlave, unsigned char theReg) +{ + unsigned char b = 0; + int error, cntr = 3; + unsigned long flags; + + spin_lock_irqsave(&i2c_lock, flags); + + do { + error = 0; + /* + * generate start condition + */ + i2c_start(); + + /* + * send slave address + */ + i2c_outbyte((theSlave & 0xfe)); + /* + * wait for ack + */ + if(!i2c_getack()) + error = 1; + /* + * now select register + */ + i2c_dir_out(); + i2c_outbyte(theReg); + /* + * now it's time to wait for ack + */ + if(!i2c_getack()) + error |= 2; + /* + * repeat start condition + */ + i2c_delay(CLOCK_LOW_TIME); + i2c_start(); + /* + * send slave address + */ + i2c_outbyte(theSlave | 0x01); + /* + * wait for ack + */ + if(!i2c_getack()) + error |= 4; + /* + * fetch register + */ + b = i2c_inbyte(); + /* + * last received byte needs to be nacked + * instead of acked + */ + i2c_sendnack(); + /* + * end sequence + */ + i2c_stop(); + + } while(error && cntr--); + + spin_unlock_irqrestore(&i2c_lock, flags); + + return b; +} + +static int +i2c_open(struct inode *inode, struct file *filp) +{ + return 0; +} + +static int +i2c_release(struct inode *inode, struct file *filp) +{ + return 0; +} + +/* Main device API. ioctl's to write or read to/from i2c registers. + */ + +static long +i2c_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int ret; + if(_IOC_TYPE(cmd) != ETRAXI2C_IOCTYPE) { + return -ENOTTY; + } + + switch (_IOC_NR(cmd)) { + case I2C_WRITEREG: + /* write to an i2c slave */ + D(printk("i2cw %d %d %d\n", + I2C_ARGSLAVE(arg), + I2C_ARGREG(arg), + I2C_ARGVALUE(arg))); + + mutex_lock(&i2c_mutex); + ret = i2c_writereg(I2C_ARGSLAVE(arg), + I2C_ARGREG(arg), + I2C_ARGVALUE(arg)); + mutex_unlock(&i2c_mutex); + return ret; + + case I2C_READREG: + { + unsigned char val; + /* read from an i2c slave */ + D(printk("i2cr %d %d ", + I2C_ARGSLAVE(arg), + I2C_ARGREG(arg))); + mutex_lock(&i2c_mutex); + val = i2c_readreg(I2C_ARGSLAVE(arg), I2C_ARGREG(arg)); + mutex_unlock(&i2c_mutex); + D(printk("= %d\n", val)); + return val; + } + default: + return -EINVAL; + + } + + return 0; +} + +static const struct file_operations i2c_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = i2c_ioctl, + .open = i2c_open, + .release = i2c_release, + .llseek = noop_llseek, +}; + +static int __init i2c_init(void) +{ + static int res; + static int first = 1; + + if (!first) + return res; + + first = 0; + + /* Setup and enable the DATA and CLK pins */ + + res = crisv32_io_get_name(&cris_i2c_data, + CONFIG_ETRAX_V32_I2C_DATA_PORT); + if (res < 0) + return res; + + res = crisv32_io_get_name(&cris_i2c_clk, CONFIG_ETRAX_V32_I2C_CLK_PORT); + crisv32_io_set_dir(&cris_i2c_clk, crisv32_io_dir_out); + + return res; +} + + +static int __init i2c_register(void) +{ + int res; + + res = i2c_init(); + if (res < 0) + return res; + + /* register char device */ + + res = register_chrdev(I2C_MAJOR, i2c_name, &i2c_fops); + if (res < 0) { + printk(KERN_ERR "i2c: couldn't get a major number.\n"); + return res; + } + + printk(KERN_INFO + "I2C driver v2.2, (c) 1999-2007 Axis Communications AB\n"); + + return 0; +} +/* this makes sure that i2c_init is called during boot */ +module_init(i2c_register); + +/****************** END OF FILE i2c.c ********************************/ diff --git a/kernel/arch/cris/arch-v32/drivers/i2c.h b/kernel/arch/cris/arch-v32/drivers/i2c.h new file mode 100644 index 000000000..d9cc856f8 --- /dev/null +++ b/kernel/arch/cris/arch-v32/drivers/i2c.h @@ -0,0 +1,16 @@ + +#include <linux/init.h> + +/* High level I2C actions */ +int i2c_write(unsigned char theSlave, void *data, size_t nbytes); +int i2c_read(unsigned char theSlave, void *data, size_t nbytes); +int i2c_writereg(unsigned char theSlave, unsigned char theReg, unsigned char theValue); +unsigned char i2c_readreg(unsigned char theSlave, unsigned char theReg); + +/* Low level I2C */ +void i2c_start(void); +void i2c_stop(void); +void i2c_outbyte(unsigned char x); +unsigned char i2c_inbyte(void); +int i2c_getack(void); +void i2c_sendack(void); diff --git a/kernel/arch/cris/arch-v32/drivers/iop_fw_load.c b/kernel/arch/cris/arch-v32/drivers/iop_fw_load.c new file mode 100644 index 000000000..2f8ea0f7a --- /dev/null +++ b/kernel/arch/cris/arch-v32/drivers/iop_fw_load.c @@ -0,0 +1,230 @@ +/* + * Firmware loader for ETRAX FS IO-Processor + * + * Copyright (C) 2004 Axis Communications AB + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/device.h> +#include <linux/firmware.h> + +#include <hwregs/reg_rdwr.h> +#include <hwregs/reg_map.h> +#include <hwregs/iop/iop_reg_space.h> +#include <hwregs/iop/iop_mpu_macros.h> +#include <hwregs/iop/iop_mpu_defs.h> +#include <hwregs/iop/iop_spu_defs.h> +#include <hwregs/iop/iop_sw_cpu_defs.h> + +#define IOP_TIMEOUT 100 + +#error "This driver is broken with regard to its driver core usage." +#error "Please contact <greg@kroah.com> for details on how to fix it properly." + +static struct device iop_spu_device[2] = { + { .init_name = "iop-spu0", }, + { .init_name = "iop-spu1", }, +}; + +static struct device iop_mpu_device = { + .init_name = "iop-mpu", +}; + +static int wait_mpu_idle(void) +{ + reg_iop_mpu_r_stat mpu_stat; + unsigned int timeout = IOP_TIMEOUT; + + do { + mpu_stat = REG_RD(iop_mpu, regi_iop_mpu, r_stat); + } while (mpu_stat.instr_reg_busy == regk_iop_mpu_yes && --timeout > 0); + if (timeout == 0) { + printk(KERN_ERR "Timeout waiting for MPU to be idle\n"); + return -EBUSY; + } + return 0; +} + +int iop_fw_load_spu(const unsigned char *fw_name, unsigned int spu_inst) +{ + reg_iop_sw_cpu_rw_mc_ctrl mc_ctrl = { + .wr_spu0_mem = regk_iop_sw_cpu_no, + .wr_spu1_mem = regk_iop_sw_cpu_no, + .size = 4, + .cmd = regk_iop_sw_cpu_reg_copy, + .keep_owner = regk_iop_sw_cpu_yes + }; + reg_iop_spu_rw_ctrl spu_ctrl = { + .en = regk_iop_spu_no, + .fsm = regk_iop_spu_no, + }; + reg_iop_sw_cpu_r_mc_stat mc_stat; + const struct firmware *fw_entry; + u32 *data; + unsigned int timeout; + int retval, i; + + if (spu_inst > 1) + return -ENODEV; + + /* get firmware */ + retval = request_firmware(&fw_entry, + fw_name, + &iop_spu_device[spu_inst]); + if (retval != 0) + { + printk(KERN_ERR + "iop_load_spu: Failed to load firmware \"%s\"\n", + fw_name); + return retval; + } + data = (u32 *) fw_entry->data; + + /* acquire ownership of memory controller */ + switch (spu_inst) { + case 0: + mc_ctrl.wr_spu0_mem = regk_iop_sw_cpu_yes; + REG_WR(iop_spu, regi_iop_spu0, rw_ctrl, spu_ctrl); + break; + case 1: + mc_ctrl.wr_spu1_mem = regk_iop_sw_cpu_yes; + REG_WR(iop_spu, regi_iop_spu1, rw_ctrl, spu_ctrl); + break; + } + timeout = IOP_TIMEOUT; + do { + REG_WR(iop_sw_cpu, regi_iop_sw_cpu, rw_mc_ctrl, mc_ctrl); + mc_stat = REG_RD(iop_sw_cpu, regi_iop_sw_cpu, r_mc_stat); + } while (mc_stat.owned_by_cpu == regk_iop_sw_cpu_no && --timeout > 0); + if (timeout == 0) { + printk(KERN_ERR "Timeout waiting to acquire MC\n"); + retval = -EBUSY; + goto out; + } + + /* write to SPU memory */ + for (i = 0; i < (fw_entry->size/4); i++) { + switch (spu_inst) { + case 0: + REG_WR_INT(iop_spu, regi_iop_spu0, rw_seq_pc, (i*4)); + break; + case 1: + REG_WR_INT(iop_spu, regi_iop_spu1, rw_seq_pc, (i*4)); + break; + } + REG_WR_INT(iop_sw_cpu, regi_iop_sw_cpu, rw_mc_data, *data); + data++; + } + + /* release ownership of memory controller */ + (void) REG_RD(iop_sw_cpu, regi_iop_sw_cpu, rs_mc_data); + + out: + release_firmware(fw_entry); + return retval; +} + +int iop_fw_load_mpu(unsigned char *fw_name) +{ + const unsigned int start_addr = 0; + reg_iop_mpu_rw_ctrl mpu_ctrl; + const struct firmware *fw_entry; + u32 *data; + int retval, i; + + /* get firmware */ + retval = request_firmware(&fw_entry, fw_name, &iop_mpu_device); + if (retval != 0) + { + printk(KERN_ERR + "iop_load_spu: Failed to load firmware \"%s\"\n", + fw_name); + return retval; + } + data = (u32 *) fw_entry->data; + + /* disable MPU */ + mpu_ctrl.en = regk_iop_mpu_no; + REG_WR(iop_mpu, regi_iop_mpu, rw_ctrl, mpu_ctrl); + /* put start address in R0 */ + REG_WR_VECT(iop_mpu, regi_iop_mpu, rw_r, 0, start_addr); + /* write to memory by executing 'SWX i, 4, R0' for each word */ + if ((retval = wait_mpu_idle()) != 0) + goto out; + REG_WR(iop_mpu, regi_iop_mpu, rw_instr, MPU_SWX_IIR_INSTR(0, 4, 0)); + for (i = 0; i < (fw_entry->size / 4); i++) { + REG_WR_INT(iop_mpu, regi_iop_mpu, rw_immediate, *data); + if ((retval = wait_mpu_idle()) != 0) + goto out; + data++; + } + + out: + release_firmware(fw_entry); + return retval; +} + +int iop_start_mpu(unsigned int start_addr) +{ + reg_iop_mpu_rw_ctrl mpu_ctrl = { .en = regk_iop_mpu_yes }; + int retval; + + /* disable MPU */ + if ((retval = wait_mpu_idle()) != 0) + goto out; + REG_WR(iop_mpu, regi_iop_mpu, rw_instr, MPU_HALT()); + if ((retval = wait_mpu_idle()) != 0) + goto out; + /* set PC and wait for it to bite */ + if ((retval = wait_mpu_idle()) != 0) + goto out; + REG_WR_INT(iop_mpu, regi_iop_mpu, rw_instr, MPU_BA_I(start_addr)); + if ((retval = wait_mpu_idle()) != 0) + goto out; + /* make sure the MPU starts executing with interrupts disabled */ + REG_WR(iop_mpu, regi_iop_mpu, rw_instr, MPU_DI()); + if ((retval = wait_mpu_idle()) != 0) + goto out; + /* enable MPU */ + REG_WR(iop_mpu, regi_iop_mpu, rw_ctrl, mpu_ctrl); + out: + return retval; +} + +static int __init iop_fw_load_init(void) +{ +#if 0 + /* + * static struct devices can not be added directly to sysfs by ignoring + * the driver model infrastructure. To fix this properly, please use + * the platform_bus to register these devices to be able to properly + * use the firmware infrastructure. + */ + device_initialize(&iop_spu_device[0]); + kobject_set_name(&iop_spu_device[0].kobj, "iop-spu0"); + kobject_add(&iop_spu_device[0].kobj); + device_initialize(&iop_spu_device[1]); + kobject_set_name(&iop_spu_device[1].kobj, "iop-spu1"); + kobject_add(&iop_spu_device[1].kobj); + device_initialize(&iop_mpu_device); + kobject_set_name(&iop_mpu_device.kobj, "iop-mpu"); + kobject_add(&iop_mpu_device.kobj); +#endif + return 0; +} + +static void __exit iop_fw_load_exit(void) +{ +} + +module_init(iop_fw_load_init); +module_exit(iop_fw_load_exit); + +MODULE_DESCRIPTION("ETRAX FS IO-Processor Firmware Loader"); +MODULE_LICENSE("GPL"); + +EXPORT_SYMBOL(iop_fw_load_spu); +EXPORT_SYMBOL(iop_fw_load_mpu); +EXPORT_SYMBOL(iop_start_mpu); diff --git a/kernel/arch/cris/arch-v32/drivers/mach-a3/Makefile b/kernel/arch/cris/arch-v32/drivers/mach-a3/Makefile new file mode 100644 index 000000000..5c6d2a2a0 --- /dev/null +++ b/kernel/arch/cris/arch-v32/drivers/mach-a3/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for Etrax-specific drivers +# + +obj-$(CONFIG_ETRAX_NANDFLASH) += nandflash.o +obj-$(CONFIG_ETRAX_GPIO) += gpio.o diff --git a/kernel/arch/cris/arch-v32/drivers/mach-a3/gpio.c b/kernel/arch/cris/arch-v32/drivers/mach-a3/gpio.c new file mode 100644 index 000000000..74f9fe809 --- /dev/null +++ b/kernel/arch/cris/arch-v32/drivers/mach-a3/gpio.c @@ -0,0 +1,999 @@ +/* + * Artec-3 general port I/O device + * + * Copyright (c) 2007 Axis Communications AB + * + * Authors: Bjorn Wesen (initial version) + * Ola Knutsson (LED handling) + * Johan Adolfsson (read/set directions, write, port G, + * port to ETRAX FS. + * Ricard Wanderlof (PWM for Artpec-3) + * + */ + +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/ioport.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/string.h> +#include <linux/poll.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/mutex.h> + +#include <asm/etraxgpio.h> +#include <hwregs/reg_map.h> +#include <hwregs/reg_rdwr.h> +#include <hwregs/gio_defs.h> +#include <hwregs/intr_vect_defs.h> +#include <asm/io.h> +#include <asm/irq.h> +#include <mach/pinmux.h> + +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO +#include "../i2c.h" + +#define VIRT_I2C_ADDR 0x40 +#endif + +/* The following gio ports on ARTPEC-3 is available: + * pa 32 bits + * pb 32 bits + * pc 16 bits + * each port has a rw_px_dout, r_px_din and rw_px_oe register. + */ + +#define GPIO_MAJOR 120 /* experimental MAJOR number */ + +#define I2C_INTERRUPT_BITS 0x300 /* i2c0_done and i2c1_done bits */ + +#define D(x) + +#if 0 +static int dp_cnt; +#define DP(x) \ + do { \ + dp_cnt++; \ + if (dp_cnt % 1000 == 0) \ + x; \ + } while (0) +#else +#define DP(x) +#endif + +static DEFINE_MUTEX(gpio_mutex); +static char gpio_name[] = "etrax gpio"; + +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO +static int virtual_gpio_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); +#endif +static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +static ssize_t gpio_write(struct file *file, const char __user *buf, + size_t count, loff_t *off); +static int gpio_open(struct inode *inode, struct file *filp); +static int gpio_release(struct inode *inode, struct file *filp); +static unsigned int gpio_poll(struct file *filp, + struct poll_table_struct *wait); + +/* private data per open() of this driver */ + +struct gpio_private { + struct gpio_private *next; + /* The IO_CFG_WRITE_MODE_VALUE only support 8 bits: */ + unsigned char clk_mask; + unsigned char data_mask; + unsigned char write_msb; + unsigned char pad1; + /* These fields are generic */ + unsigned long highalarm, lowalarm; + wait_queue_head_t alarm_wq; + int minor; +}; + +static void gpio_set_alarm(struct gpio_private *priv); +static int gpio_leds_ioctl(unsigned int cmd, unsigned long arg); +static int gpio_pwm_ioctl(struct gpio_private *priv, unsigned int cmd, + unsigned long arg); + + +/* linked list of alarms to check for */ + +static struct gpio_private *alarmlist; + +static int wanted_interrupts; + +static DEFINE_SPINLOCK(gpio_lock); + +#define NUM_PORTS (GPIO_MINOR_LAST+1) +#define GIO_REG_RD_ADDR(reg) \ + (unsigned long *)(regi_gio + REG_RD_ADDR_gio_##reg) +#define GIO_REG_WR_ADDR(reg) \ + (unsigned long *)(regi_gio + REG_WR_ADDR_gio_##reg) +static unsigned long led_dummy; +static unsigned long port_d_dummy; /* Only input on Artpec-3 */ +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO +static unsigned long port_e_dummy; /* Non existent on Artpec-3 */ +static unsigned long virtual_dummy; +static unsigned long virtual_rw_pv_oe = CONFIG_ETRAX_DEF_GIO_PV_OE; +static unsigned short cached_virtual_gpio_read; +#endif + +static unsigned long *data_out[NUM_PORTS] = { + GIO_REG_WR_ADDR(rw_pa_dout), + GIO_REG_WR_ADDR(rw_pb_dout), + &led_dummy, + GIO_REG_WR_ADDR(rw_pc_dout), + &port_d_dummy, +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO + &port_e_dummy, + &virtual_dummy, +#endif +}; + +static unsigned long *data_in[NUM_PORTS] = { + GIO_REG_RD_ADDR(r_pa_din), + GIO_REG_RD_ADDR(r_pb_din), + &led_dummy, + GIO_REG_RD_ADDR(r_pc_din), + GIO_REG_RD_ADDR(r_pd_din), +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO + &port_e_dummy, + &virtual_dummy, +#endif +}; + +static unsigned long changeable_dir[NUM_PORTS] = { + CONFIG_ETRAX_PA_CHANGEABLE_DIR, + CONFIG_ETRAX_PB_CHANGEABLE_DIR, + 0, + CONFIG_ETRAX_PC_CHANGEABLE_DIR, + 0, +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO + 0, + CONFIG_ETRAX_PV_CHANGEABLE_DIR, +#endif +}; + +static unsigned long changeable_bits[NUM_PORTS] = { + CONFIG_ETRAX_PA_CHANGEABLE_BITS, + CONFIG_ETRAX_PB_CHANGEABLE_BITS, + 0, + CONFIG_ETRAX_PC_CHANGEABLE_BITS, + 0, +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO + 0, + CONFIG_ETRAX_PV_CHANGEABLE_BITS, +#endif +}; + +static unsigned long *dir_oe[NUM_PORTS] = { + GIO_REG_WR_ADDR(rw_pa_oe), + GIO_REG_WR_ADDR(rw_pb_oe), + &led_dummy, + GIO_REG_WR_ADDR(rw_pc_oe), + &port_d_dummy, +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO + &port_e_dummy, + &virtual_rw_pv_oe, +#endif +}; + +static void gpio_set_alarm(struct gpio_private *priv) +{ + int bit; + int intr_cfg; + int mask; + int pins; + unsigned long flags; + + spin_lock_irqsave(&gpio_lock, flags); + intr_cfg = REG_RD_INT(gio, regi_gio, rw_intr_cfg); + pins = REG_RD_INT(gio, regi_gio, rw_intr_pins); + mask = REG_RD_INT(gio, regi_gio, rw_intr_mask) & I2C_INTERRUPT_BITS; + + for (bit = 0; bit < 32; bit++) { + int intr = bit % 8; + int pin = bit / 8; + if (priv->minor < GPIO_MINOR_LEDS) + pin += priv->minor * 4; + else + pin += (priv->minor - 1) * 4; + + if (priv->highalarm & (1<<bit)) { + intr_cfg |= (regk_gio_hi << (intr * 3)); + mask |= 1 << intr; + wanted_interrupts = mask & 0xff; + pins |= pin << (intr * 4); + } else if (priv->lowalarm & (1<<bit)) { + intr_cfg |= (regk_gio_lo << (intr * 3)); + mask |= 1 << intr; + wanted_interrupts = mask & 0xff; + pins |= pin << (intr * 4); + } + } + + REG_WR_INT(gio, regi_gio, rw_intr_cfg, intr_cfg); + REG_WR_INT(gio, regi_gio, rw_intr_pins, pins); + REG_WR_INT(gio, regi_gio, rw_intr_mask, mask); + + spin_unlock_irqrestore(&gpio_lock, flags); +} + +static unsigned int gpio_poll(struct file *file, struct poll_table_struct *wait) +{ + unsigned int mask = 0; + struct gpio_private *priv = file->private_data; + unsigned long data; + unsigned long tmp; + + if (priv->minor >= GPIO_MINOR_PWM0 && + priv->minor <= GPIO_MINOR_LAST_PWM) + return 0; + + poll_wait(file, &priv->alarm_wq, wait); + if (priv->minor <= GPIO_MINOR_D) { + data = readl(data_in[priv->minor]); + REG_WR_INT(gio, regi_gio, rw_ack_intr, wanted_interrupts); + tmp = REG_RD_INT(gio, regi_gio, rw_intr_mask); + tmp &= I2C_INTERRUPT_BITS; + tmp |= wanted_interrupts; + REG_WR_INT(gio, regi_gio, rw_intr_mask, tmp); + } else + return 0; + + if ((data & priv->highalarm) || (~data & priv->lowalarm)) + mask = POLLIN|POLLRDNORM; + + DP(printk(KERN_DEBUG "gpio_poll ready: mask 0x%08X\n", mask)); + return mask; +} + +static irqreturn_t gpio_interrupt(int irq, void *dev_id) +{ + reg_gio_rw_intr_mask intr_mask; + reg_gio_r_masked_intr masked_intr; + reg_gio_rw_ack_intr ack_intr; + unsigned long flags; + unsigned long tmp; + unsigned long tmp2; +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO + unsigned char enable_gpiov_ack = 0; +#endif + + /* Find what PA interrupts are active */ + masked_intr = REG_RD(gio, regi_gio, r_masked_intr); + tmp = REG_TYPE_CONV(unsigned long, reg_gio_r_masked_intr, masked_intr); + + /* Find those that we have enabled */ + spin_lock_irqsave(&gpio_lock, flags); + tmp &= wanted_interrupts; + spin_unlock_irqrestore(&gpio_lock, flags); + +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO + /* Something changed on virtual GPIO. Interrupt is acked by + * reading the device. + */ + if (tmp & (1 << CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN)) { + i2c_read(VIRT_I2C_ADDR, (void *)&cached_virtual_gpio_read, + sizeof(cached_virtual_gpio_read)); + enable_gpiov_ack = 1; + } +#endif + + /* Ack them */ + ack_intr = REG_TYPE_CONV(reg_gio_rw_ack_intr, unsigned long, tmp); + REG_WR(gio, regi_gio, rw_ack_intr, ack_intr); + + /* Disable those interrupts.. */ + intr_mask = REG_RD(gio, regi_gio, rw_intr_mask); + tmp2 = REG_TYPE_CONV(unsigned long, reg_gio_rw_intr_mask, intr_mask); + tmp2 &= ~tmp; +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO + /* Do not disable interrupt on virtual GPIO. Changes on virtual + * pins are only noticed by an interrupt. + */ + if (enable_gpiov_ack) + tmp2 |= (1 << CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN); +#endif + intr_mask = REG_TYPE_CONV(reg_gio_rw_intr_mask, unsigned long, tmp2); + REG_WR(gio, regi_gio, rw_intr_mask, intr_mask); + + return IRQ_RETVAL(tmp); +} + +static void gpio_write_bit(unsigned long *port, unsigned char data, int bit, + unsigned char clk_mask, unsigned char data_mask) +{ + unsigned long shadow = readl(port) & ~clk_mask; + writel(shadow, port); + if (data & 1 << bit) + shadow |= data_mask; + else + shadow &= ~data_mask; + writel(shadow, port); + /* For FPGA: min 5.0ns (DCC) before CCLK high */ + shadow |= clk_mask; + writel(shadow, port); +} + +static void gpio_write_byte(struct gpio_private *priv, unsigned long *port, + unsigned char data) +{ + int i; + + if (priv->write_msb) + for (i = 7; i >= 0; i--) + gpio_write_bit(port, data, i, priv->clk_mask, + priv->data_mask); + else + for (i = 0; i <= 7; i++) + gpio_write_bit(port, data, i, priv->clk_mask, + priv->data_mask); +} + + +static ssize_t gpio_write(struct file *file, const char __user *buf, + size_t count, loff_t *off) +{ + struct gpio_private *priv = file->private_data; + unsigned long flags; + ssize_t retval = count; + /* Only bits 0-7 may be used for write operations but allow all + devices except leds... */ +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO + if (priv->minor == GPIO_MINOR_V) + return -EFAULT; +#endif + if (priv->minor == GPIO_MINOR_LEDS) + return -EFAULT; + + if (priv->minor >= GPIO_MINOR_PWM0 && + priv->minor <= GPIO_MINOR_LAST_PWM) + return -EFAULT; + + if (!access_ok(VERIFY_READ, buf, count)) + return -EFAULT; + + /* It must have been configured using the IO_CFG_WRITE_MODE */ + /* Perhaps a better error code? */ + if (priv->clk_mask == 0 || priv->data_mask == 0) + return -EPERM; + + D(printk(KERN_DEBUG "gpio_write: %lu to data 0x%02X clk 0x%02X " + "msb: %i\n", + count, priv->data_mask, priv->clk_mask, priv->write_msb)); + + spin_lock_irqsave(&gpio_lock, flags); + + while (count--) + gpio_write_byte(priv, data_out[priv->minor], *buf++); + + spin_unlock_irqrestore(&gpio_lock, flags); + return retval; +} + +static int gpio_open(struct inode *inode, struct file *filp) +{ + struct gpio_private *priv; + int p = iminor(inode); + + if (p > GPIO_MINOR_LAST_PWM || + (p > GPIO_MINOR_LAST && p < GPIO_MINOR_PWM0)) + return -EINVAL; + + priv = kmalloc(sizeof(struct gpio_private), GFP_KERNEL); + + if (!priv) + return -ENOMEM; + + mutex_lock(&gpio_mutex); + memset(priv, 0, sizeof(*priv)); + + priv->minor = p; + filp->private_data = priv; + + /* initialize the io/alarm struct, not for PWM ports though */ + if (p <= GPIO_MINOR_LAST) { + + priv->clk_mask = 0; + priv->data_mask = 0; + priv->highalarm = 0; + priv->lowalarm = 0; + + init_waitqueue_head(&priv->alarm_wq); + + /* link it into our alarmlist */ + spin_lock_irq(&gpio_lock); + priv->next = alarmlist; + alarmlist = priv; + spin_unlock_irq(&gpio_lock); + } + + mutex_unlock(&gpio_mutex); + return 0; +} + +static int gpio_release(struct inode *inode, struct file *filp) +{ + struct gpio_private *p; + struct gpio_private *todel; + /* local copies while updating them: */ + unsigned long a_high, a_low; + + /* prepare to free private structure */ + todel = filp->private_data; + + /* unlink from alarmlist - only for non-PWM ports though */ + if (todel->minor <= GPIO_MINOR_LAST) { + spin_lock_irq(&gpio_lock); + p = alarmlist; + + if (p == todel) + alarmlist = todel->next; + else { + while (p->next != todel) + p = p->next; + p->next = todel->next; + } + + /* Check if there are still any alarms set */ + p = alarmlist; + a_high = 0; + a_low = 0; + while (p) { + if (p->minor == GPIO_MINOR_A) { +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO + p->lowalarm |= (1 << CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN); +#endif + a_high |= p->highalarm; + a_low |= p->lowalarm; + } + + p = p->next; + } + +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO + /* Variable 'a_low' needs to be set here again + * to ensure that interrupt for virtual GPIO is handled. + */ + a_low |= (1 << CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN); +#endif + + spin_unlock_irq(&gpio_lock); + } + kfree(todel); + + return 0; +} + +/* Main device API. ioctl's to read/set/clear bits, as well as to + * set alarms to wait for using a subsequent select(). + */ + +inline unsigned long setget_input(struct gpio_private *priv, unsigned long arg) +{ + /* Set direction 0=unchanged 1=input, + * return mask with 1=input + */ + unsigned long flags; + unsigned long dir_shadow; + + spin_lock_irqsave(&gpio_lock, flags); + + dir_shadow = readl(dir_oe[priv->minor]) & + ~(arg & changeable_dir[priv->minor]); + writel(dir_shadow, dir_oe[priv->minor]); + + spin_unlock_irqrestore(&gpio_lock, flags); + + if (priv->minor == GPIO_MINOR_C) + dir_shadow ^= 0xFFFF; /* Only 16 bits */ +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO + else if (priv->minor == GPIO_MINOR_V) + dir_shadow ^= 0xFFFF; /* Only 16 bits */ +#endif + else + dir_shadow ^= 0xFFFFFFFF; /* PA, PB and PD 32 bits */ + + return dir_shadow; + +} /* setget_input */ + +static inline unsigned long setget_output(struct gpio_private *priv, + unsigned long arg) +{ + unsigned long flags; + unsigned long dir_shadow; + + spin_lock_irqsave(&gpio_lock, flags); + + dir_shadow = readl(dir_oe[priv->minor]) | + (arg & changeable_dir[priv->minor]); + writel(dir_shadow, dir_oe[priv->minor]); + + spin_unlock_irqrestore(&gpio_lock, flags); + return dir_shadow; +} /* setget_output */ + +static long gpio_ioctl_unlocked(struct file *file, + unsigned int cmd, unsigned long arg) +{ + unsigned long flags; + unsigned long val; + unsigned long shadow; + struct gpio_private *priv = file->private_data; + + if (_IOC_TYPE(cmd) != ETRAXGPIO_IOCTYPE) + return -ENOTTY; + + /* Check for special ioctl handlers first */ + +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO + if (priv->minor == GPIO_MINOR_V) + return virtual_gpio_ioctl(file, cmd, arg); +#endif + + if (priv->minor == GPIO_MINOR_LEDS) + return gpio_leds_ioctl(cmd, arg); + + if (priv->minor >= GPIO_MINOR_PWM0 && + priv->minor <= GPIO_MINOR_LAST_PWM) + return gpio_pwm_ioctl(priv, cmd, arg); + + switch (_IOC_NR(cmd)) { + case IO_READBITS: /* Use IO_READ_INBITS and IO_READ_OUTBITS instead */ + /* Read the port. */ + return readl(data_in[priv->minor]); + case IO_SETBITS: + spin_lock_irqsave(&gpio_lock, flags); + /* Set changeable bits with a 1 in arg. */ + shadow = readl(data_out[priv->minor]) | + (arg & changeable_bits[priv->minor]); + writel(shadow, data_out[priv->minor]); + spin_unlock_irqrestore(&gpio_lock, flags); + break; + case IO_CLRBITS: + spin_lock_irqsave(&gpio_lock, flags); + /* Clear changeable bits with a 1 in arg. */ + shadow = readl(data_out[priv->minor]) & + ~(arg & changeable_bits[priv->minor]); + writel(shadow, data_out[priv->minor]); + spin_unlock_irqrestore(&gpio_lock, flags); + break; + case IO_HIGHALARM: + /* Set alarm when bits with 1 in arg go high. */ + priv->highalarm |= arg; + gpio_set_alarm(priv); + break; + case IO_LOWALARM: + /* Set alarm when bits with 1 in arg go low. */ + priv->lowalarm |= arg; + gpio_set_alarm(priv); + break; + case IO_CLRALARM: + /* Clear alarm for bits with 1 in arg. */ + priv->highalarm &= ~arg; + priv->lowalarm &= ~arg; + gpio_set_alarm(priv); + break; + case IO_READDIR: /* Use IO_SETGET_INPUT/OUTPUT instead! */ + /* Read direction 0=input 1=output */ + return readl(dir_oe[priv->minor]); + + case IO_SETINPUT: /* Use IO_SETGET_INPUT instead! */ + /* Set direction 0=unchanged 1=input, + * return mask with 1=input + */ + return setget_input(priv, arg); + + case IO_SETOUTPUT: /* Use IO_SETGET_OUTPUT instead! */ + /* Set direction 0=unchanged 1=output, + * return mask with 1=output + */ + return setget_output(priv, arg); + + case IO_CFG_WRITE_MODE: + { + int res = -EPERM; + unsigned long dir_shadow, clk_mask, data_mask, write_msb; + + clk_mask = arg & 0xFF; + data_mask = (arg >> 8) & 0xFF; + write_msb = (arg >> 16) & 0x01; + + /* Check if we're allowed to change the bits and + * the direction is correct + */ + spin_lock_irqsave(&gpio_lock, flags); + dir_shadow = readl(dir_oe[priv->minor]); + if ((clk_mask & changeable_bits[priv->minor]) && + (data_mask & changeable_bits[priv->minor]) && + (clk_mask & dir_shadow) && + (data_mask & dir_shadow)) { + priv->clk_mask = clk_mask; + priv->data_mask = data_mask; + priv->write_msb = write_msb; + res = 0; + } + spin_unlock_irqrestore(&gpio_lock, flags); + + return res; + } + case IO_READ_INBITS: + /* *arg is result of reading the input pins */ + val = readl(data_in[priv->minor]); + if (copy_to_user((void __user *)arg, &val, sizeof(val))) + return -EFAULT; + return 0; + case IO_READ_OUTBITS: + /* *arg is result of reading the output shadow */ + val = *data_out[priv->minor]; + if (copy_to_user((void __user *)arg, &val, sizeof(val))) + return -EFAULT; + break; + case IO_SETGET_INPUT: + /* bits set in *arg is set to input, + * *arg updated with current input pins. + */ + if (copy_from_user(&val, (void __user *)arg, sizeof(val))) + return -EFAULT; + val = setget_input(priv, val); + if (copy_to_user((void __user *)arg, &val, sizeof(val))) + return -EFAULT; + break; + case IO_SETGET_OUTPUT: + /* bits set in *arg is set to output, + * *arg updated with current output pins. + */ + if (copy_from_user(&val, (void __user *)arg, sizeof(val))) + return -EFAULT; + val = setget_output(priv, val); + if (copy_to_user((void __user *)arg, &val, sizeof(val))) + return -EFAULT; + break; + default: + return -EINVAL; + } /* switch */ + + return 0; +} + +static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + long ret; + + mutex_lock(&gpio_mutex); + ret = gpio_ioctl_unlocked(file, cmd, arg); + mutex_unlock(&gpio_mutex); + + return ret; +} + +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO +static int virtual_gpio_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + unsigned long flags; + unsigned short val; + unsigned short shadow; + struct gpio_private *priv = file->private_data; + + switch (_IOC_NR(cmd)) { + case IO_SETBITS: + spin_lock_irqsave(&gpio_lock, flags); + /* Set changeable bits with a 1 in arg. */ + i2c_read(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow)); + shadow |= ~readl(dir_oe[priv->minor]) | + (arg & changeable_bits[priv->minor]); + i2c_write(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow)); + spin_unlock_irqrestore(&gpio_lock, flags); + break; + case IO_CLRBITS: + spin_lock_irqsave(&gpio_lock, flags); + /* Clear changeable bits with a 1 in arg. */ + i2c_read(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow)); + shadow |= ~readl(dir_oe[priv->minor]) & + ~(arg & changeable_bits[priv->minor]); + i2c_write(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow)); + spin_unlock_irqrestore(&gpio_lock, flags); + break; + case IO_HIGHALARM: + /* Set alarm when bits with 1 in arg go high. */ + priv->highalarm |= arg; + break; + case IO_LOWALARM: + /* Set alarm when bits with 1 in arg go low. */ + priv->lowalarm |= arg; + break; + case IO_CLRALARM: + /* Clear alarm for bits with 1 in arg. */ + priv->highalarm &= ~arg; + priv->lowalarm &= ~arg; + break; + case IO_CFG_WRITE_MODE: + { + unsigned long dir_shadow; + dir_shadow = readl(dir_oe[priv->minor]); + + priv->clk_mask = arg & 0xFF; + priv->data_mask = (arg >> 8) & 0xFF; + priv->write_msb = (arg >> 16) & 0x01; + /* Check if we're allowed to change the bits and + * the direction is correct + */ + if (!((priv->clk_mask & changeable_bits[priv->minor]) && + (priv->data_mask & changeable_bits[priv->minor]) && + (priv->clk_mask & dir_shadow) && + (priv->data_mask & dir_shadow))) { + priv->clk_mask = 0; + priv->data_mask = 0; + return -EPERM; + } + break; + } + case IO_READ_INBITS: + /* *arg is result of reading the input pins */ + val = cached_virtual_gpio_read & ~readl(dir_oe[priv->minor]); + if (copy_to_user((void __user *)arg, &val, sizeof(val))) + return -EFAULT; + return 0; + + case IO_READ_OUTBITS: + /* *arg is result of reading the output shadow */ + i2c_read(VIRT_I2C_ADDR, (void *)&val, sizeof(val)); + val &= readl(dir_oe[priv->minor]); + if (copy_to_user((void __user *)arg, &val, sizeof(val))) + return -EFAULT; + break; + case IO_SETGET_INPUT: + { + /* bits set in *arg is set to input, + * *arg updated with current input pins. + */ + unsigned short input_mask = ~readl(dir_oe[priv->minor]); + if (copy_from_user(&val, (void __user *)arg, sizeof(val))) + return -EFAULT; + val = setget_input(priv, val); + if (copy_to_user((void __user *)arg, &val, sizeof(val))) + return -EFAULT; + if ((input_mask & val) != input_mask) { + /* Input pins changed. All ports desired as input + * should be set to logic 1. + */ + unsigned short change = input_mask ^ val; + i2c_read(VIRT_I2C_ADDR, (void *)&shadow, + sizeof(shadow)); + shadow &= ~change; + shadow |= val; + i2c_write(VIRT_I2C_ADDR, (void *)&shadow, + sizeof(shadow)); + } + break; + } + case IO_SETGET_OUTPUT: + /* bits set in *arg is set to output, + * *arg updated with current output pins. + */ + if (copy_from_user(&val, (void __user *)arg, sizeof(val))) + return -EFAULT; + val = setget_output(priv, val); + if (copy_to_user((void __user *)arg, &val, sizeof(val))) + return -EFAULT; + break; + default: + return -EINVAL; + } /* switch */ + return 0; +} +#endif /* CONFIG_ETRAX_VIRTUAL_GPIO */ + +static int gpio_leds_ioctl(unsigned int cmd, unsigned long arg) +{ + unsigned char green; + unsigned char red; + + switch (_IOC_NR(cmd)) { + case IO_LEDACTIVE_SET: + green = ((unsigned char) arg) & 1; + red = (((unsigned char) arg) >> 1) & 1; + CRIS_LED_ACTIVE_SET_G(green); + CRIS_LED_ACTIVE_SET_R(red); + break; + + default: + return -EINVAL; + } /* switch */ + + return 0; +} + +static int gpio_pwm_set_mode(unsigned long arg, int pwm_port) +{ + int pinmux_pwm = pinmux_pwm0 + pwm_port; + int mode; + reg_gio_rw_pwm0_ctrl rw_pwm_ctrl = { + .ccd_val = 0, + .ccd_override = regk_gio_no, + .mode = regk_gio_no + }; + int allocstatus; + + if (get_user(mode, &((struct io_pwm_set_mode *) arg)->mode)) + return -EFAULT; + rw_pwm_ctrl.mode = mode; + if (mode != PWM_OFF) + allocstatus = crisv32_pinmux_alloc_fixed(pinmux_pwm); + else + allocstatus = crisv32_pinmux_dealloc_fixed(pinmux_pwm); + if (allocstatus) + return allocstatus; + REG_WRITE(reg_gio_rw_pwm0_ctrl, REG_ADDR(gio, regi_gio, rw_pwm0_ctrl) + + 12 * pwm_port, rw_pwm_ctrl); + return 0; +} + +static int gpio_pwm_set_period(unsigned long arg, int pwm_port) +{ + struct io_pwm_set_period periods; + reg_gio_rw_pwm0_var rw_pwm_widths; + + if (copy_from_user(&periods, (void __user *)arg, sizeof(periods))) + return -EFAULT; + if (periods.lo > 8191 || periods.hi > 8191) + return -EINVAL; + rw_pwm_widths.lo = periods.lo; + rw_pwm_widths.hi = periods.hi; + REG_WRITE(reg_gio_rw_pwm0_var, REG_ADDR(gio, regi_gio, rw_pwm0_var) + + 12 * pwm_port, rw_pwm_widths); + return 0; +} + +static int gpio_pwm_set_duty(unsigned long arg, int pwm_port) +{ + unsigned int duty; + reg_gio_rw_pwm0_data rw_pwm_duty; + + if (get_user(duty, &((struct io_pwm_set_duty *) arg)->duty)) + return -EFAULT; + if (duty > 255) + return -EINVAL; + rw_pwm_duty.data = duty; + REG_WRITE(reg_gio_rw_pwm0_data, REG_ADDR(gio, regi_gio, rw_pwm0_data) + + 12 * pwm_port, rw_pwm_duty); + return 0; +} + +static int gpio_pwm_ioctl(struct gpio_private *priv, unsigned int cmd, + unsigned long arg) +{ + int pwm_port = priv->minor - GPIO_MINOR_PWM0; + + switch (_IOC_NR(cmd)) { + case IO_PWM_SET_MODE: + return gpio_pwm_set_mode(arg, pwm_port); + case IO_PWM_SET_PERIOD: + return gpio_pwm_set_period(arg, pwm_port); + case IO_PWM_SET_DUTY: + return gpio_pwm_set_duty(arg, pwm_port); + default: + return -EINVAL; + } + return 0; +} + +static const struct file_operations gpio_fops = { + .owner = THIS_MODULE, + .poll = gpio_poll, + .unlocked_ioctl = gpio_ioctl, + .write = gpio_write, + .open = gpio_open, + .release = gpio_release, + .llseek = noop_llseek, +}; + +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO +static void __init virtual_gpio_init(void) +{ + reg_gio_rw_intr_cfg intr_cfg; + reg_gio_rw_intr_mask intr_mask; + unsigned short shadow; + + shadow = ~virtual_rw_pv_oe; /* Input ports should be set to logic 1 */ + shadow |= CONFIG_ETRAX_DEF_GIO_PV_OUT; + i2c_write(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow)); + + /* Set interrupt mask and on what state the interrupt shall trigger. + * For virtual gpio the interrupt shall trigger on logic '0'. + */ + intr_cfg = REG_RD(gio, regi_gio, rw_intr_cfg); + intr_mask = REG_RD(gio, regi_gio, rw_intr_mask); + + switch (CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN) { + case 0: + intr_cfg.pa0 = regk_gio_lo; + intr_mask.pa0 = regk_gio_yes; + break; + case 1: + intr_cfg.pa1 = regk_gio_lo; + intr_mask.pa1 = regk_gio_yes; + break; + case 2: + intr_cfg.pa2 = regk_gio_lo; + intr_mask.pa2 = regk_gio_yes; + break; + case 3: + intr_cfg.pa3 = regk_gio_lo; + intr_mask.pa3 = regk_gio_yes; + break; + case 4: + intr_cfg.pa4 = regk_gio_lo; + intr_mask.pa4 = regk_gio_yes; + break; + case 5: + intr_cfg.pa5 = regk_gio_lo; + intr_mask.pa5 = regk_gio_yes; + break; + case 6: + intr_cfg.pa6 = regk_gio_lo; + intr_mask.pa6 = regk_gio_yes; + break; + case 7: + intr_cfg.pa7 = regk_gio_lo; + intr_mask.pa7 = regk_gio_yes; + break; + } + + REG_WR(gio, regi_gio, rw_intr_cfg, intr_cfg); + REG_WR(gio, regi_gio, rw_intr_mask, intr_mask); +} +#endif + +/* main driver initialization routine, called from mem.c */ + +static int __init gpio_init(void) +{ + int res; + + printk(KERN_INFO "ETRAX FS GPIO driver v2.7, (c) 2003-2008 " + "Axis Communications AB\n"); + + /* do the formalities */ + + res = register_chrdev(GPIO_MAJOR, gpio_name, &gpio_fops); + if (res < 0) { + printk(KERN_ERR "gpio: couldn't get a major number.\n"); + return res; + } + + /* Clear all leds */ + CRIS_LED_NETWORK_GRP0_SET(0); + CRIS_LED_NETWORK_GRP1_SET(0); + CRIS_LED_ACTIVE_SET(0); + CRIS_LED_DISK_READ(0); + CRIS_LED_DISK_WRITE(0); + + int res2 = request_irq(GIO_INTR_VECT, gpio_interrupt, + IRQF_SHARED, "gpio", &alarmlist); + if (res2) { + printk(KERN_ERR "err: irq for gpio\n"); + return res2; + } + + /* No IRQs by default. */ + REG_WR_INT(gio, regi_gio, rw_intr_pins, 0); + +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO + virtual_gpio_init(); +#endif + + return res; +} + +/* this makes sure that gpio_init is called during kernel boot */ + +module_init(gpio_init); diff --git a/kernel/arch/cris/arch-v32/drivers/mach-a3/nandflash.c b/kernel/arch/cris/arch-v32/drivers/mach-a3/nandflash.c new file mode 100644 index 000000000..7fb52128d --- /dev/null +++ b/kernel/arch/cris/arch-v32/drivers/mach-a3/nandflash.c @@ -0,0 +1,180 @@ +/* + * arch/cris/arch-v32/drivers/nandflash.c + * + * Copyright (c) 2007 + * + * Derived from drivers/mtd/nand/spia.c + * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/nand.h> +#include <linux/mtd/partitions.h> +#include <arch/memmap.h> +#include <hwregs/reg_map.h> +#include <hwregs/reg_rdwr.h> +#include <hwregs/pio_defs.h> +#include <pinmux.h> +#include <asm/io.h> + +#define MANUAL_ALE_CLE_CONTROL 1 + +#define regf_ALE a0 +#define regf_CLE a1 +#define regf_NCE ce0_n + +#define CLE_BIT 10 +#define ALE_BIT 11 +#define CE_BIT 12 + +struct mtd_info_wrapper { + struct mtd_info info; + struct nand_chip chip; +}; + +/* Bitmask for control pins */ +#define PIN_BITMASK ((1 << CE_BIT) | (1 << CLE_BIT) | (1 << ALE_BIT)) + +static struct mtd_info *crisv32_mtd; +/* + * hardware specific access to control-lines + */ +static void crisv32_hwcontrol(struct mtd_info *mtd, int cmd, + unsigned int ctrl) +{ + unsigned long flags; + reg_pio_rw_dout dout; + struct nand_chip *this = mtd->priv; + + local_irq_save(flags); + + /* control bits change */ + if (ctrl & NAND_CTRL_CHANGE) { + dout = REG_RD(pio, regi_pio, rw_dout); + dout.regf_NCE = (ctrl & NAND_NCE) ? 0 : 1; + +#if !MANUAL_ALE_CLE_CONTROL + if (ctrl & NAND_ALE) { + /* A0 = ALE high */ + this->IO_ADDR_W = (void __iomem *)REG_ADDR(pio, + regi_pio, rw_io_access1); + } else if (ctrl & NAND_CLE) { + /* A1 = CLE high */ + this->IO_ADDR_W = (void __iomem *)REG_ADDR(pio, + regi_pio, rw_io_access2); + } else { + /* A1 = CLE and A0 = ALE low */ + this->IO_ADDR_W = (void __iomem *)REG_ADDR(pio, + regi_pio, rw_io_access0); + } +#else + + dout.regf_CLE = (ctrl & NAND_CLE) ? 1 : 0; + dout.regf_ALE = (ctrl & NAND_ALE) ? 1 : 0; +#endif + REG_WR(pio, regi_pio, rw_dout, dout); + } + + /* command to chip */ + if (cmd != NAND_CMD_NONE) + writeb(cmd, this->IO_ADDR_W); + + local_irq_restore(flags); +} + +/* +* read device ready pin +*/ +static int crisv32_device_ready(struct mtd_info *mtd) +{ + reg_pio_r_din din = REG_RD(pio, regi_pio, r_din); + return din.rdy; +} + +/* + * Main initialization routine + */ +struct mtd_info *__init crisv32_nand_flash_probe(void) +{ + void __iomem *read_cs; + void __iomem *write_cs; + + struct mtd_info_wrapper *wrapper; + struct nand_chip *this; + int err = 0; + + reg_pio_rw_man_ctrl man_ctrl = { + .regf_NCE = regk_pio_yes, +#if MANUAL_ALE_CLE_CONTROL + .regf_ALE = regk_pio_yes, + .regf_CLE = regk_pio_yes +#endif + }; + reg_pio_rw_oe oe = { + .regf_NCE = regk_pio_yes, +#if MANUAL_ALE_CLE_CONTROL + .regf_ALE = regk_pio_yes, + .regf_CLE = regk_pio_yes +#endif + }; + reg_pio_rw_dout dout = { .regf_NCE = 1 }; + + /* Allocate pio pins to pio */ + crisv32_pinmux_alloc_fixed(pinmux_pio); + /* Set up CE, ALE, CLE (ce0_n, a0, a1) for manual control and output */ + REG_WR(pio, regi_pio, rw_man_ctrl, man_ctrl); + REG_WR(pio, regi_pio, rw_dout, dout); + REG_WR(pio, regi_pio, rw_oe, oe); + + /* Allocate memory for MTD device structure and private data */ + wrapper = kzalloc(sizeof(struct mtd_info_wrapper), GFP_KERNEL); + if (!wrapper) { + printk(KERN_ERR "Unable to allocate CRISv32 NAND MTD " + "device structure.\n"); + err = -ENOMEM; + return NULL; + } + + read_cs = write_cs = (void __iomem *)REG_ADDR(pio, regi_pio, + rw_io_access0); + + /* Get pointer to private data */ + this = &wrapper->chip; + crisv32_mtd = &wrapper->info; + + /* Link the private data with the MTD structure */ + crisv32_mtd->priv = this; + + /* Set address of NAND IO lines */ + this->IO_ADDR_R = read_cs; + this->IO_ADDR_W = write_cs; + this->cmd_ctrl = crisv32_hwcontrol; + this->dev_ready = crisv32_device_ready; + /* 20 us command delay time */ + this->chip_delay = 20; + this->ecc.mode = NAND_ECC_SOFT; + + /* Enable the following for a flash based bad block table */ + /* this->bbt_options = NAND_BBT_USE_FLASH; */ + + /* Scan to find existence of the device */ + if (nand_scan(crisv32_mtd, 1)) { + err = -ENXIO; + goto out_mtd; + } + + return crisv32_mtd; + +out_mtd: + kfree(wrapper); + return NULL; +} + diff --git a/kernel/arch/cris/arch-v32/drivers/mach-fs/Makefile b/kernel/arch/cris/arch-v32/drivers/mach-fs/Makefile new file mode 100644 index 000000000..5c6d2a2a0 --- /dev/null +++ b/kernel/arch/cris/arch-v32/drivers/mach-fs/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for Etrax-specific drivers +# + +obj-$(CONFIG_ETRAX_NANDFLASH) += nandflash.o +obj-$(CONFIG_ETRAX_GPIO) += gpio.o diff --git a/kernel/arch/cris/arch-v32/drivers/mach-fs/gpio.c b/kernel/arch/cris/arch-v32/drivers/mach-fs/gpio.c new file mode 100644 index 000000000..009f4ee1b --- /dev/null +++ b/kernel/arch/cris/arch-v32/drivers/mach-fs/gpio.c @@ -0,0 +1,979 @@ +/* + * ETRAX CRISv32 general port I/O device + * + * Copyright (c) 1999-2006 Axis Communications AB + * + * Authors: Bjorn Wesen (initial version) + * Ola Knutsson (LED handling) + * Johan Adolfsson (read/set directions, write, port G, + * port to ETRAX FS. + * + */ + +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/ioport.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/string.h> +#include <linux/poll.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/mutex.h> + +#include <asm/etraxgpio.h> +#include <hwregs/reg_map.h> +#include <hwregs/reg_rdwr.h> +#include <hwregs/gio_defs.h> +#include <hwregs/intr_vect_defs.h> +#include <asm/io.h> +#include <asm/irq.h> + +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO +#include "../i2c.h" + +#define VIRT_I2C_ADDR 0x40 +#endif + +/* The following gio ports on ETRAX FS is available: + * pa 8 bits, supports interrupts off, hi, low, set, posedge, negedge anyedge + * pb 18 bits + * pc 18 bits + * pd 18 bits + * pe 18 bits + * each port has a rw_px_dout, r_px_din and rw_px_oe register. + */ + +#define GPIO_MAJOR 120 /* experimental MAJOR number */ + +#define D(x) + +#if 0 +static int dp_cnt; +#define DP(x) \ + do { \ + dp_cnt++; \ + if (dp_cnt % 1000 == 0) \ + x; \ + } while (0) +#else +#define DP(x) +#endif + +static DEFINE_MUTEX(gpio_mutex); +static char gpio_name[] = "etrax gpio"; + +#if 0 +static wait_queue_head_t *gpio_wq; +#endif + +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO +static int virtual_gpio_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); +#endif +static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +static ssize_t gpio_write(struct file *file, const char *buf, size_t count, + loff_t *off); +static int gpio_open(struct inode *inode, struct file *filp); +static int gpio_release(struct inode *inode, struct file *filp); +static unsigned int gpio_poll(struct file *filp, + struct poll_table_struct *wait); + +/* private data per open() of this driver */ + +struct gpio_private { + struct gpio_private *next; + /* The IO_CFG_WRITE_MODE_VALUE only support 8 bits: */ + unsigned char clk_mask; + unsigned char data_mask; + unsigned char write_msb; + unsigned char pad1; + /* These fields are generic */ + unsigned long highalarm, lowalarm; + wait_queue_head_t alarm_wq; + int minor; +}; + +/* linked list of alarms to check for */ + +static struct gpio_private *alarmlist; + +static int gpio_some_alarms; /* Set if someone uses alarm */ +static unsigned long gpio_pa_high_alarms; +static unsigned long gpio_pa_low_alarms; + +static DEFINE_SPINLOCK(alarm_lock); + +#define NUM_PORTS (GPIO_MINOR_LAST+1) +#define GIO_REG_RD_ADDR(reg) \ + (volatile unsigned long *)(regi_gio + REG_RD_ADDR_gio_##reg) +#define GIO_REG_WR_ADDR(reg) \ + (volatile unsigned long *)(regi_gio + REG_RD_ADDR_gio_##reg) +unsigned long led_dummy; +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO +static unsigned long virtual_dummy; +static unsigned long virtual_rw_pv_oe = CONFIG_ETRAX_DEF_GIO_PV_OE; +static unsigned short cached_virtual_gpio_read; +#endif + +static volatile unsigned long *data_out[NUM_PORTS] = { + GIO_REG_WR_ADDR(rw_pa_dout), + GIO_REG_WR_ADDR(rw_pb_dout), + &led_dummy, + GIO_REG_WR_ADDR(rw_pc_dout), + GIO_REG_WR_ADDR(rw_pd_dout), + GIO_REG_WR_ADDR(rw_pe_dout), +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO + &virtual_dummy, +#endif +}; + +static volatile unsigned long *data_in[NUM_PORTS] = { + GIO_REG_RD_ADDR(r_pa_din), + GIO_REG_RD_ADDR(r_pb_din), + &led_dummy, + GIO_REG_RD_ADDR(r_pc_din), + GIO_REG_RD_ADDR(r_pd_din), + GIO_REG_RD_ADDR(r_pe_din), +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO + &virtual_dummy, +#endif +}; + +static unsigned long changeable_dir[NUM_PORTS] = { + CONFIG_ETRAX_PA_CHANGEABLE_DIR, + CONFIG_ETRAX_PB_CHANGEABLE_DIR, + 0, + CONFIG_ETRAX_PC_CHANGEABLE_DIR, + CONFIG_ETRAX_PD_CHANGEABLE_DIR, + CONFIG_ETRAX_PE_CHANGEABLE_DIR, +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO + CONFIG_ETRAX_PV_CHANGEABLE_DIR, +#endif +}; + +static unsigned long changeable_bits[NUM_PORTS] = { + CONFIG_ETRAX_PA_CHANGEABLE_BITS, + CONFIG_ETRAX_PB_CHANGEABLE_BITS, + 0, + CONFIG_ETRAX_PC_CHANGEABLE_BITS, + CONFIG_ETRAX_PD_CHANGEABLE_BITS, + CONFIG_ETRAX_PE_CHANGEABLE_BITS, +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO + CONFIG_ETRAX_PV_CHANGEABLE_BITS, +#endif +}; + +static volatile unsigned long *dir_oe[NUM_PORTS] = { + GIO_REG_WR_ADDR(rw_pa_oe), + GIO_REG_WR_ADDR(rw_pb_oe), + &led_dummy, + GIO_REG_WR_ADDR(rw_pc_oe), + GIO_REG_WR_ADDR(rw_pd_oe), + GIO_REG_WR_ADDR(rw_pe_oe), +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO + &virtual_rw_pv_oe, +#endif +}; + + + +static unsigned int gpio_poll(struct file *file, struct poll_table_struct *wait) +{ + unsigned int mask = 0; + struct gpio_private *priv = file->private_data; + unsigned long data; + poll_wait(file, &priv->alarm_wq, wait); + if (priv->minor == GPIO_MINOR_A) { + reg_gio_rw_intr_cfg intr_cfg; + unsigned long tmp; + unsigned long flags; + + local_irq_save(flags); + data = REG_TYPE_CONV(unsigned long, reg_gio_r_pa_din, + REG_RD(gio, regi_gio, r_pa_din)); + /* PA has support for interrupt + * lets activate high for those low and with highalarm set + */ + intr_cfg = REG_RD(gio, regi_gio, rw_intr_cfg); + + tmp = ~data & priv->highalarm & 0xFF; + if (tmp & (1 << 0)) + intr_cfg.pa0 = regk_gio_hi; + if (tmp & (1 << 1)) + intr_cfg.pa1 = regk_gio_hi; + if (tmp & (1 << 2)) + intr_cfg.pa2 = regk_gio_hi; + if (tmp & (1 << 3)) + intr_cfg.pa3 = regk_gio_hi; + if (tmp & (1 << 4)) + intr_cfg.pa4 = regk_gio_hi; + if (tmp & (1 << 5)) + intr_cfg.pa5 = regk_gio_hi; + if (tmp & (1 << 6)) + intr_cfg.pa6 = regk_gio_hi; + if (tmp & (1 << 7)) + intr_cfg.pa7 = regk_gio_hi; + /* + * lets activate low for those high and with lowalarm set + */ + tmp = data & priv->lowalarm & 0xFF; + if (tmp & (1 << 0)) + intr_cfg.pa0 = regk_gio_lo; + if (tmp & (1 << 1)) + intr_cfg.pa1 = regk_gio_lo; + if (tmp & (1 << 2)) + intr_cfg.pa2 = regk_gio_lo; + if (tmp & (1 << 3)) + intr_cfg.pa3 = regk_gio_lo; + if (tmp & (1 << 4)) + intr_cfg.pa4 = regk_gio_lo; + if (tmp & (1 << 5)) + intr_cfg.pa5 = regk_gio_lo; + if (tmp & (1 << 6)) + intr_cfg.pa6 = regk_gio_lo; + if (tmp & (1 << 7)) + intr_cfg.pa7 = regk_gio_lo; + + REG_WR(gio, regi_gio, rw_intr_cfg, intr_cfg); + local_irq_restore(flags); + } else if (priv->minor <= GPIO_MINOR_E) + data = *data_in[priv->minor]; + else + return 0; + + if ((data & priv->highalarm) || (~data & priv->lowalarm)) + mask = POLLIN|POLLRDNORM; + + DP(printk(KERN_DEBUG "gpio_poll ready: mask 0x%08X\n", mask)); + return mask; +} + +int etrax_gpio_wake_up_check(void) +{ + struct gpio_private *priv; + unsigned long data = 0; + unsigned long flags; + int ret = 0; + spin_lock_irqsave(&alarm_lock, flags); + priv = alarmlist; + while (priv) { +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO + if (priv->minor == GPIO_MINOR_V) + data = (unsigned long)cached_virtual_gpio_read; + else { + data = *data_in[priv->minor]; + if (priv->minor == GPIO_MINOR_A) + priv->lowalarm |= (1 << CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN); + } +#else + data = *data_in[priv->minor]; +#endif + if ((data & priv->highalarm) || + (~data & priv->lowalarm)) { + DP(printk(KERN_DEBUG + "etrax_gpio_wake_up_check %i\n", priv->minor)); + wake_up_interruptible(&priv->alarm_wq); + ret = 1; + } + priv = priv->next; + } + spin_unlock_irqrestore(&alarm_lock, flags); + return ret; +} + +static irqreturn_t +gpio_poll_timer_interrupt(int irq, void *dev_id) +{ + if (gpio_some_alarms) + return IRQ_RETVAL(etrax_gpio_wake_up_check()); + return IRQ_NONE; +} + +static irqreturn_t +gpio_pa_interrupt(int irq, void *dev_id) +{ + reg_gio_rw_intr_mask intr_mask; + reg_gio_r_masked_intr masked_intr; + reg_gio_rw_ack_intr ack_intr; + unsigned long tmp; + unsigned long tmp2; +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO + unsigned char enable_gpiov_ack = 0; +#endif + + /* Find what PA interrupts are active */ + masked_intr = REG_RD(gio, regi_gio, r_masked_intr); + tmp = REG_TYPE_CONV(unsigned long, reg_gio_r_masked_intr, masked_intr); + + /* Find those that we have enabled */ + spin_lock(&alarm_lock); + tmp &= (gpio_pa_high_alarms | gpio_pa_low_alarms); + spin_unlock(&alarm_lock); + +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO + /* Something changed on virtual GPIO. Interrupt is acked by + * reading the device. + */ + if (tmp & (1 << CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN)) { + i2c_read(VIRT_I2C_ADDR, (void *)&cached_virtual_gpio_read, + sizeof(cached_virtual_gpio_read)); + enable_gpiov_ack = 1; + } +#endif + + /* Ack them */ + ack_intr = REG_TYPE_CONV(reg_gio_rw_ack_intr, unsigned long, tmp); + REG_WR(gio, regi_gio, rw_ack_intr, ack_intr); + + /* Disable those interrupts.. */ + intr_mask = REG_RD(gio, regi_gio, rw_intr_mask); + tmp2 = REG_TYPE_CONV(unsigned long, reg_gio_rw_intr_mask, intr_mask); + tmp2 &= ~tmp; +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO + /* Do not disable interrupt on virtual GPIO. Changes on virtual + * pins are only noticed by an interrupt. + */ + if (enable_gpiov_ack) + tmp2 |= (1 << CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN); +#endif + intr_mask = REG_TYPE_CONV(reg_gio_rw_intr_mask, unsigned long, tmp2); + REG_WR(gio, regi_gio, rw_intr_mask, intr_mask); + + if (gpio_some_alarms) + return IRQ_RETVAL(etrax_gpio_wake_up_check()); + return IRQ_NONE; +} + + +static ssize_t gpio_write(struct file *file, const char *buf, size_t count, + loff_t *off) +{ + struct gpio_private *priv = file->private_data; + unsigned char data, clk_mask, data_mask, write_msb; + unsigned long flags; + unsigned long shadow; + volatile unsigned long *port; + ssize_t retval = count; + /* Only bits 0-7 may be used for write operations but allow all + devices except leds... */ +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO + if (priv->minor == GPIO_MINOR_V) + return -EFAULT; +#endif + if (priv->minor == GPIO_MINOR_LEDS) + return -EFAULT; + + if (!access_ok(VERIFY_READ, buf, count)) + return -EFAULT; + clk_mask = priv->clk_mask; + data_mask = priv->data_mask; + /* It must have been configured using the IO_CFG_WRITE_MODE */ + /* Perhaps a better error code? */ + if (clk_mask == 0 || data_mask == 0) + return -EPERM; + write_msb = priv->write_msb; + D(printk(KERN_DEBUG "gpio_write: %lu to data 0x%02X clk 0x%02X " + "msb: %i\n", count, data_mask, clk_mask, write_msb)); + port = data_out[priv->minor]; + + while (count--) { + int i; + data = *buf++; + if (priv->write_msb) { + for (i = 7; i >= 0; i--) { + local_irq_save(flags); + shadow = *port; + *port = shadow &= ~clk_mask; + if (data & 1<<i) + *port = shadow |= data_mask; + else + *port = shadow &= ~data_mask; + /* For FPGA: min 5.0ns (DCC) before CCLK high */ + *port = shadow |= clk_mask; + local_irq_restore(flags); + } + } else { + for (i = 0; i <= 7; i++) { + local_irq_save(flags); + shadow = *port; + *port = shadow &= ~clk_mask; + if (data & 1<<i) + *port = shadow |= data_mask; + else + *port = shadow &= ~data_mask; + /* For FPGA: min 5.0ns (DCC) before CCLK high */ + *port = shadow |= clk_mask; + local_irq_restore(flags); + } + } + } + return retval; +} + + + +static int +gpio_open(struct inode *inode, struct file *filp) +{ + struct gpio_private *priv; + int p = iminor(inode); + + if (p > GPIO_MINOR_LAST) + return -EINVAL; + + priv = kmalloc(sizeof(struct gpio_private), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + mutex_lock(&gpio_mutex); + memset(priv, 0, sizeof(*priv)); + + priv->minor = p; + + /* initialize the io/alarm struct */ + + priv->clk_mask = 0; + priv->data_mask = 0; + priv->highalarm = 0; + priv->lowalarm = 0; + init_waitqueue_head(&priv->alarm_wq); + + filp->private_data = (void *)priv; + + /* link it into our alarmlist */ + spin_lock_irq(&alarm_lock); + priv->next = alarmlist; + alarmlist = priv; + spin_unlock_irq(&alarm_lock); + + mutex_unlock(&gpio_mutex); + return 0; +} + +static int +gpio_release(struct inode *inode, struct file *filp) +{ + struct gpio_private *p; + struct gpio_private *todel; + /* local copies while updating them: */ + unsigned long a_high, a_low; + unsigned long some_alarms; + + /* unlink from alarmlist and free the private structure */ + + spin_lock_irq(&alarm_lock); + p = alarmlist; + todel = filp->private_data; + + if (p == todel) { + alarmlist = todel->next; + } else { + while (p->next != todel) + p = p->next; + p->next = todel->next; + } + + kfree(todel); + /* Check if there are still any alarms set */ + p = alarmlist; + some_alarms = 0; + a_high = 0; + a_low = 0; + while (p) { + if (p->minor == GPIO_MINOR_A) { +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO + p->lowalarm |= (1 << CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN); +#endif + a_high |= p->highalarm; + a_low |= p->lowalarm; + } + + if (p->highalarm | p->lowalarm) + some_alarms = 1; + p = p->next; + } + +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO + /* Variables 'some_alarms' and 'a_low' needs to be set here again + * to ensure that interrupt for virtual GPIO is handled. + */ + some_alarms = 1; + a_low |= (1 << CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN); +#endif + + gpio_some_alarms = some_alarms; + gpio_pa_high_alarms = a_high; + gpio_pa_low_alarms = a_low; + spin_unlock_irq(&alarm_lock); + + return 0; +} + +/* Main device API. ioctl's to read/set/clear bits, as well as to + * set alarms to wait for using a subsequent select(). + */ + +inline unsigned long setget_input(struct gpio_private *priv, unsigned long arg) +{ + /* Set direction 0=unchanged 1=input, + * return mask with 1=input + */ + unsigned long flags; + unsigned long dir_shadow; + + local_irq_save(flags); + dir_shadow = *dir_oe[priv->minor]; + dir_shadow &= ~(arg & changeable_dir[priv->minor]); + *dir_oe[priv->minor] = dir_shadow; + local_irq_restore(flags); + + if (priv->minor == GPIO_MINOR_A) + dir_shadow ^= 0xFF; /* Only 8 bits */ +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO + else if (priv->minor == GPIO_MINOR_V) + dir_shadow ^= 0xFFFF; /* Only 16 bits */ +#endif + else + dir_shadow ^= 0x3FFFF; /* Only 18 bits */ + return dir_shadow; + +} /* setget_input */ + +inline unsigned long setget_output(struct gpio_private *priv, unsigned long arg) +{ + unsigned long flags; + unsigned long dir_shadow; + + local_irq_save(flags); + dir_shadow = *dir_oe[priv->minor]; + dir_shadow |= (arg & changeable_dir[priv->minor]); + *dir_oe[priv->minor] = dir_shadow; + local_irq_restore(flags); + return dir_shadow; +} /* setget_output */ + +static int gpio_leds_ioctl(unsigned int cmd, unsigned long arg); + +static int +gpio_ioctl_unlocked(struct file *file, unsigned int cmd, unsigned long arg) +{ + unsigned long flags; + unsigned long val; + unsigned long shadow; + struct gpio_private *priv = file->private_data; + if (_IOC_TYPE(cmd) != ETRAXGPIO_IOCTYPE) + return -EINVAL; + +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO + if (priv->minor == GPIO_MINOR_V) + return virtual_gpio_ioctl(file, cmd, arg); +#endif + + switch (_IOC_NR(cmd)) { + case IO_READBITS: /* Use IO_READ_INBITS and IO_READ_OUTBITS instead */ + /* Read the port. */ + return *data_in[priv->minor]; + break; + case IO_SETBITS: + local_irq_save(flags); + /* Set changeable bits with a 1 in arg. */ + shadow = *data_out[priv->minor]; + shadow |= (arg & changeable_bits[priv->minor]); + *data_out[priv->minor] = shadow; + local_irq_restore(flags); + break; + case IO_CLRBITS: + local_irq_save(flags); + /* Clear changeable bits with a 1 in arg. */ + shadow = *data_out[priv->minor]; + shadow &= ~(arg & changeable_bits[priv->minor]); + *data_out[priv->minor] = shadow; + local_irq_restore(flags); + break; + case IO_HIGHALARM: + /* Set alarm when bits with 1 in arg go high. */ + priv->highalarm |= arg; + spin_lock_irqsave(&alarm_lock, flags); + gpio_some_alarms = 1; + if (priv->minor == GPIO_MINOR_A) + gpio_pa_high_alarms |= arg; + spin_unlock_irqrestore(&alarm_lock, flags); + break; + case IO_LOWALARM: + /* Set alarm when bits with 1 in arg go low. */ + priv->lowalarm |= arg; + spin_lock_irqsave(&alarm_lock, flags); + gpio_some_alarms = 1; + if (priv->minor == GPIO_MINOR_A) + gpio_pa_low_alarms |= arg; + spin_unlock_irqrestore(&alarm_lock, flags); + break; + case IO_CLRALARM: + /* Clear alarm for bits with 1 in arg. */ + priv->highalarm &= ~arg; + priv->lowalarm &= ~arg; + spin_lock_irqsave(&alarm_lock, flags); + if (priv->minor == GPIO_MINOR_A) { + if (gpio_pa_high_alarms & arg || + gpio_pa_low_alarms & arg) + /* Must update the gpio_pa_*alarms masks */ + ; + } + spin_unlock_irqrestore(&alarm_lock, flags); + break; + case IO_READDIR: /* Use IO_SETGET_INPUT/OUTPUT instead! */ + /* Read direction 0=input 1=output */ + return *dir_oe[priv->minor]; + case IO_SETINPUT: /* Use IO_SETGET_INPUT instead! */ + /* Set direction 0=unchanged 1=input, + * return mask with 1=input + */ + return setget_input(priv, arg); + break; + case IO_SETOUTPUT: /* Use IO_SETGET_OUTPUT instead! */ + /* Set direction 0=unchanged 1=output, + * return mask with 1=output + */ + return setget_output(priv, arg); + + case IO_CFG_WRITE_MODE: + { + unsigned long dir_shadow; + dir_shadow = *dir_oe[priv->minor]; + + priv->clk_mask = arg & 0xFF; + priv->data_mask = (arg >> 8) & 0xFF; + priv->write_msb = (arg >> 16) & 0x01; + /* Check if we're allowed to change the bits and + * the direction is correct + */ + if (!((priv->clk_mask & changeable_bits[priv->minor]) && + (priv->data_mask & changeable_bits[priv->minor]) && + (priv->clk_mask & dir_shadow) && + (priv->data_mask & dir_shadow))) { + priv->clk_mask = 0; + priv->data_mask = 0; + return -EPERM; + } + break; + } + case IO_READ_INBITS: + /* *arg is result of reading the input pins */ + val = *data_in[priv->minor]; + if (copy_to_user((unsigned long *)arg, &val, sizeof(val))) + return -EFAULT; + return 0; + break; + case IO_READ_OUTBITS: + /* *arg is result of reading the output shadow */ + val = *data_out[priv->minor]; + if (copy_to_user((unsigned long *)arg, &val, sizeof(val))) + return -EFAULT; + break; + case IO_SETGET_INPUT: + /* bits set in *arg is set to input, + * *arg updated with current input pins. + */ + if (copy_from_user(&val, (unsigned long *)arg, sizeof(val))) + return -EFAULT; + val = setget_input(priv, val); + if (copy_to_user((unsigned long *)arg, &val, sizeof(val))) + return -EFAULT; + break; + case IO_SETGET_OUTPUT: + /* bits set in *arg is set to output, + * *arg updated with current output pins. + */ + if (copy_from_user(&val, (unsigned long *)arg, sizeof(val))) + return -EFAULT; + val = setget_output(priv, val); + if (copy_to_user((unsigned long *)arg, &val, sizeof(val))) + return -EFAULT; + break; + default: + if (priv->minor == GPIO_MINOR_LEDS) + return gpio_leds_ioctl(cmd, arg); + else + return -EINVAL; + } /* switch */ + + return 0; +} + +static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + long ret; + + mutex_lock(&gpio_mutex); + ret = gpio_ioctl_unlocked(file, cmd, arg); + mutex_unlock(&gpio_mutex); + + return ret; +} + +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO +static int +virtual_gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + unsigned long flags; + unsigned short val; + unsigned short shadow; + struct gpio_private *priv = file->private_data; + + switch (_IOC_NR(cmd)) { + case IO_SETBITS: + local_irq_save(flags); + /* Set changeable bits with a 1 in arg. */ + i2c_read(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow)); + shadow |= ~*dir_oe[priv->minor]; + shadow |= (arg & changeable_bits[priv->minor]); + i2c_write(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow)); + local_irq_restore(flags); + break; + case IO_CLRBITS: + local_irq_save(flags); + /* Clear changeable bits with a 1 in arg. */ + i2c_read(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow)); + shadow |= ~*dir_oe[priv->minor]; + shadow &= ~(arg & changeable_bits[priv->minor]); + i2c_write(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow)); + local_irq_restore(flags); + break; + case IO_HIGHALARM: + /* Set alarm when bits with 1 in arg go high. */ + priv->highalarm |= arg; + spin_lock(&alarm_lock); + gpio_some_alarms = 1; + spin_unlock(&alarm_lock); + break; + case IO_LOWALARM: + /* Set alarm when bits with 1 in arg go low. */ + priv->lowalarm |= arg; + spin_lock(&alarm_lock); + gpio_some_alarms = 1; + spin_unlock(&alarm_lock); + break; + case IO_CLRALARM: + /* Clear alarm for bits with 1 in arg. */ + priv->highalarm &= ~arg; + priv->lowalarm &= ~arg; + spin_lock(&alarm_lock); + spin_unlock(&alarm_lock); + break; + case IO_CFG_WRITE_MODE: + { + unsigned long dir_shadow; + dir_shadow = *dir_oe[priv->minor]; + + priv->clk_mask = arg & 0xFF; + priv->data_mask = (arg >> 8) & 0xFF; + priv->write_msb = (arg >> 16) & 0x01; + /* Check if we're allowed to change the bits and + * the direction is correct + */ + if (!((priv->clk_mask & changeable_bits[priv->minor]) && + (priv->data_mask & changeable_bits[priv->minor]) && + (priv->clk_mask & dir_shadow) && + (priv->data_mask & dir_shadow))) { + priv->clk_mask = 0; + priv->data_mask = 0; + return -EPERM; + } + break; + } + case IO_READ_INBITS: + /* *arg is result of reading the input pins */ + val = cached_virtual_gpio_read; + val &= ~*dir_oe[priv->minor]; + if (copy_to_user((unsigned long *)arg, &val, sizeof(val))) + return -EFAULT; + return 0; + break; + case IO_READ_OUTBITS: + /* *arg is result of reading the output shadow */ + i2c_read(VIRT_I2C_ADDR, (void *)&val, sizeof(val)); + val &= *dir_oe[priv->minor]; + if (copy_to_user((unsigned long *)arg, &val, sizeof(val))) + return -EFAULT; + break; + case IO_SETGET_INPUT: + { + /* bits set in *arg is set to input, + * *arg updated with current input pins. + */ + unsigned short input_mask = ~*dir_oe[priv->minor]; + if (copy_from_user(&val, (unsigned long *)arg, sizeof(val))) + return -EFAULT; + val = setget_input(priv, val); + if (copy_to_user((unsigned long *)arg, &val, sizeof(val))) + return -EFAULT; + if ((input_mask & val) != input_mask) { + /* Input pins changed. All ports desired as input + * should be set to logic 1. + */ + unsigned short change = input_mask ^ val; + i2c_read(VIRT_I2C_ADDR, (void *)&shadow, + sizeof(shadow)); + shadow &= ~change; + shadow |= val; + i2c_write(VIRT_I2C_ADDR, (void *)&shadow, + sizeof(shadow)); + } + break; + } + case IO_SETGET_OUTPUT: + /* bits set in *arg is set to output, + * *arg updated with current output pins. + */ + if (copy_from_user(&val, (unsigned long *)arg, sizeof(val))) + return -EFAULT; + val = setget_output(priv, val); + if (copy_to_user((unsigned long *)arg, &val, sizeof(val))) + return -EFAULT; + break; + default: + return -EINVAL; + } /* switch */ + return 0; +} +#endif /* CONFIG_ETRAX_VIRTUAL_GPIO */ + +static int +gpio_leds_ioctl(unsigned int cmd, unsigned long arg) +{ + unsigned char green; + unsigned char red; + + switch (_IOC_NR(cmd)) { + case IO_LEDACTIVE_SET: + green = ((unsigned char) arg) & 1; + red = (((unsigned char) arg) >> 1) & 1; + CRIS_LED_ACTIVE_SET_G(green); + CRIS_LED_ACTIVE_SET_R(red); + break; + + default: + return -EINVAL; + } /* switch */ + + return 0; +} + +static const struct file_operations gpio_fops = { + .owner = THIS_MODULE, + .poll = gpio_poll, + .unlocked_ioctl = gpio_ioctl, + .write = gpio_write, + .open = gpio_open, + .release = gpio_release, + .llseek = noop_llseek, +}; + +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO +static void +virtual_gpio_init(void) +{ + reg_gio_rw_intr_cfg intr_cfg; + reg_gio_rw_intr_mask intr_mask; + unsigned short shadow; + + shadow = ~virtual_rw_pv_oe; /* Input ports should be set to logic 1 */ + shadow |= CONFIG_ETRAX_DEF_GIO_PV_OUT; + i2c_write(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow)); + + /* Set interrupt mask and on what state the interrupt shall trigger. + * For virtual gpio the interrupt shall trigger on logic '0'. + */ + intr_cfg = REG_RD(gio, regi_gio, rw_intr_cfg); + intr_mask = REG_RD(gio, regi_gio, rw_intr_mask); + + switch (CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN) { + case 0: + intr_cfg.pa0 = regk_gio_lo; + intr_mask.pa0 = regk_gio_yes; + break; + case 1: + intr_cfg.pa1 = regk_gio_lo; + intr_mask.pa1 = regk_gio_yes; + break; + case 2: + intr_cfg.pa2 = regk_gio_lo; + intr_mask.pa2 = regk_gio_yes; + break; + case 3: + intr_cfg.pa3 = regk_gio_lo; + intr_mask.pa3 = regk_gio_yes; + break; + case 4: + intr_cfg.pa4 = regk_gio_lo; + intr_mask.pa4 = regk_gio_yes; + break; + case 5: + intr_cfg.pa5 = regk_gio_lo; + intr_mask.pa5 = regk_gio_yes; + break; + case 6: + intr_cfg.pa6 = regk_gio_lo; + intr_mask.pa6 = regk_gio_yes; + break; + case 7: + intr_cfg.pa7 = regk_gio_lo; + intr_mask.pa7 = regk_gio_yes; + break; + } + + REG_WR(gio, regi_gio, rw_intr_cfg, intr_cfg); + REG_WR(gio, regi_gio, rw_intr_mask, intr_mask); + + gpio_pa_low_alarms |= (1 << CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN); + gpio_some_alarms = 1; +} +#endif + +/* main driver initialization routine, called from mem.c */ + +static __init int +gpio_init(void) +{ + int res; + + /* do the formalities */ + + res = register_chrdev(GPIO_MAJOR, gpio_name, &gpio_fops); + if (res < 0) { + printk(KERN_ERR "gpio: couldn't get a major number.\n"); + return res; + } + + /* Clear all leds */ + CRIS_LED_NETWORK_GRP0_SET(0); + CRIS_LED_NETWORK_GRP1_SET(0); + CRIS_LED_ACTIVE_SET(0); + CRIS_LED_DISK_READ(0); + CRIS_LED_DISK_WRITE(0); + + printk(KERN_INFO "ETRAX FS GPIO driver v2.5, (c) 2003-2007 " + "Axis Communications AB\n"); + /* We call etrax_gpio_wake_up_check() from timer interrupt */ + if (request_irq(TIMER0_INTR_VECT, gpio_poll_timer_interrupt, + IRQF_SHARED, "gpio poll", &alarmlist)) + printk(KERN_ERR "timer0 irq for gpio\n"); + + if (request_irq(GIO_INTR_VECT, gpio_pa_interrupt, + IRQF_SHARED, "gpio PA", &alarmlist)) + printk(KERN_ERR "PA irq for gpio\n"); + +#ifdef CONFIG_ETRAX_VIRTUAL_GPIO + virtual_gpio_init(); +#endif + + return res; +} + +/* this makes sure that gpio_init is called during kernel boot */ + +module_init(gpio_init); diff --git a/kernel/arch/cris/arch-v32/drivers/mach-fs/nandflash.c b/kernel/arch/cris/arch-v32/drivers/mach-fs/nandflash.c new file mode 100644 index 000000000..e03238454 --- /dev/null +++ b/kernel/arch/cris/arch-v32/drivers/mach-fs/nandflash.c @@ -0,0 +1,174 @@ +/* + * arch/cris/arch-v32/drivers/nandflash.c + * + * Copyright (c) 2004 + * + * Derived from drivers/mtd/nand/spia.c + * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/nand.h> +#include <linux/mtd/partitions.h> +#include <arch/memmap.h> +#include <hwregs/reg_map.h> +#include <hwregs/reg_rdwr.h> +#include <hwregs/gio_defs.h> +#include <hwregs/bif_core_defs.h> +#include <asm/io.h> + +#define CE_BIT 4 +#define CLE_BIT 5 +#define ALE_BIT 6 +#define BY_BIT 7 + +struct mtd_info_wrapper { + struct mtd_info info; + struct nand_chip chip; +}; + +/* Bitmask for control pins */ +#define PIN_BITMASK ((1 << CE_BIT) | (1 << CLE_BIT) | (1 << ALE_BIT)) + +/* Bitmask for mtd nand control bits */ +#define CTRL_BITMASK (NAND_NCE | NAND_CLE | NAND_ALE) + + +static struct mtd_info *crisv32_mtd; +/* + * hardware specific access to control-lines + */ +static void crisv32_hwcontrol(struct mtd_info *mtd, int cmd, + unsigned int ctrl) +{ + unsigned long flags; + reg_gio_rw_pa_dout dout; + struct nand_chip *this = mtd->priv; + + local_irq_save(flags); + + /* control bits change */ + if (ctrl & NAND_CTRL_CHANGE) { + dout = REG_RD(gio, regi_gio, rw_pa_dout); + dout.data &= ~PIN_BITMASK; + +#if (CE_BIT == 4 && NAND_NCE == 1 && \ + CLE_BIT == 5 && NAND_CLE == 2 && \ + ALE_BIT == 6 && NAND_ALE == 4) + /* Pins in same order as control bits, but shifted. + * Optimize for this case; works for 2.6.18 */ + dout.data |= ((ctrl & CTRL_BITMASK) ^ NAND_NCE) << CE_BIT; +#else + /* the slow way */ + if (!(ctrl & NAND_NCE)) + dout.data |= (1 << CE_BIT); + if (ctrl & NAND_CLE) + dout.data |= (1 << CLE_BIT); + if (ctrl & NAND_ALE) + dout.data |= (1 << ALE_BIT); +#endif + REG_WR(gio, regi_gio, rw_pa_dout, dout); + } + + /* command to chip */ + if (cmd != NAND_CMD_NONE) + writeb(cmd, this->IO_ADDR_W); + + local_irq_restore(flags); +} + +/* +* read device ready pin +*/ +static int crisv32_device_ready(struct mtd_info *mtd) +{ + reg_gio_r_pa_din din = REG_RD(gio, regi_gio, r_pa_din); + return ((din.data & (1 << BY_BIT)) >> BY_BIT); +} + +/* + * Main initialization routine + */ +struct mtd_info *__init crisv32_nand_flash_probe(void) +{ + void __iomem *read_cs; + void __iomem *write_cs; + + reg_bif_core_rw_grp3_cfg bif_cfg = REG_RD(bif_core, regi_bif_core, + rw_grp3_cfg); + reg_gio_rw_pa_oe pa_oe = REG_RD(gio, regi_gio, rw_pa_oe); + struct mtd_info_wrapper *wrapper; + struct nand_chip *this; + int err = 0; + + /* Allocate memory for MTD device structure and private data */ + wrapper = kzalloc(sizeof(struct mtd_info_wrapper), GFP_KERNEL); + if (!wrapper) { + printk(KERN_ERR "Unable to allocate CRISv32 NAND MTD " + "device structure.\n"); + err = -ENOMEM; + return NULL; + } + + read_cs = ioremap(MEM_CSP0_START | MEM_NON_CACHEABLE, 8192); + write_cs = ioremap(MEM_CSP1_START | MEM_NON_CACHEABLE, 8192); + + if (!read_cs || !write_cs) { + printk(KERN_ERR "CRISv32 NAND ioremap failed\n"); + err = -EIO; + goto out_mtd; + } + + /* Get pointer to private data */ + this = &wrapper->chip; + crisv32_mtd = &wrapper->info; + + pa_oe.oe |= 1 << CE_BIT; + pa_oe.oe |= 1 << ALE_BIT; + pa_oe.oe |= 1 << CLE_BIT; + pa_oe.oe &= ~(1 << BY_BIT); + REG_WR(gio, regi_gio, rw_pa_oe, pa_oe); + + bif_cfg.gated_csp0 = regk_bif_core_rd; + bif_cfg.gated_csp1 = regk_bif_core_wr; + REG_WR(bif_core, regi_bif_core, rw_grp3_cfg, bif_cfg); + + /* Link the private data with the MTD structure */ + crisv32_mtd->priv = this; + + /* Set address of NAND IO lines */ + this->IO_ADDR_R = read_cs; + this->IO_ADDR_W = write_cs; + this->cmd_ctrl = crisv32_hwcontrol; + this->dev_ready = crisv32_device_ready; + /* 20 us command delay time */ + this->chip_delay = 20; + this->ecc.mode = NAND_ECC_SOFT; + + /* Enable the following for a flash based bad block table */ + /* this->bbt_options = NAND_BBT_USE_FLASH; */ + + /* Scan to find existence of the device */ + if (nand_scan(crisv32_mtd, 1)) { + err = -ENXIO; + goto out_ior; + } + + return crisv32_mtd; + +out_ior: + iounmap((void *)read_cs); + iounmap((void *)write_cs); +out_mtd: + kfree(wrapper); + return NULL; +} + diff --git a/kernel/arch/cris/arch-v32/drivers/pci/Makefile b/kernel/arch/cris/arch-v32/drivers/pci/Makefile new file mode 100644 index 000000000..bff7482f2 --- /dev/null +++ b/kernel/arch/cris/arch-v32/drivers/pci/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for Etrax cardbus driver +# + +obj-$(CONFIG_ETRAX_CARDBUS) += bios.o dma.o diff --git a/kernel/arch/cris/arch-v32/drivers/pci/bios.c b/kernel/arch/cris/arch-v32/drivers/pci/bios.c new file mode 100644 index 000000000..64a5fb937 --- /dev/null +++ b/kernel/arch/cris/arch-v32/drivers/pci/bios.c @@ -0,0 +1,99 @@ +#include <linux/pci.h> +#include <linux/kernel.h> +#include <arch/hwregs/intr_vect.h> + +void pcibios_fixup_bus(struct pci_bus *b) +{ +} + +void pcibios_set_master(struct pci_dev *dev) +{ + u8 lat; + pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat); + printk(KERN_DEBUG "PCI: Setting latency timer of device %s to %d\n", pci_name(dev), lat); + pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); +} + +int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, + enum pci_mmap_state mmap_state, int write_combine) +{ + unsigned long prot; + + /* Leave vm_pgoff as-is, the PCI space address is the physical + * address on this platform. + */ + prot = pgprot_val(vma->vm_page_prot); + vma->vm_page_prot = __pgprot(prot); + + /* Write-combine setting is ignored, it is changed via the mtrr + * interfaces on this platform. + */ + if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) + return -EAGAIN; + + return 0; +} + +resource_size_t +pcibios_align_resource(void *data, const struct resource *res, + resource_size_t size, resource_size_t align) +{ + resource_size_t start = res->start; + + if ((res->flags & IORESOURCE_IO) && (start & 0x300)) + start = (start + 0x3ff) & ~0x3ff; + + return start; +} + +int pcibios_enable_resources(struct pci_dev *dev, int mask) +{ + u16 cmd, old_cmd; + int idx; + struct resource *r; + + pci_read_config_word(dev, PCI_COMMAND, &cmd); + old_cmd = cmd; + for(idx=0; idx<6; idx++) { + /* Only set up the requested stuff */ + if (!(mask & (1<<idx))) + continue; + + r = &dev->resource[idx]; + if (!r->start && r->end) { + printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev)); + return -EINVAL; + } + if (r->flags & IORESOURCE_IO) + cmd |= PCI_COMMAND_IO; + if (r->flags & IORESOURCE_MEM) + cmd |= PCI_COMMAND_MEMORY; + } + if (dev->resource[PCI_ROM_RESOURCE].start) + cmd |= PCI_COMMAND_MEMORY; + if (cmd != old_cmd) { + printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd); + pci_write_config_word(dev, PCI_COMMAND, cmd); + } + return 0; +} + +int pcibios_enable_irq(struct pci_dev *dev) +{ + dev->irq = EXT_INTR_VECT; + return 0; +} + +int pcibios_enable_device(struct pci_dev *dev, int mask) +{ + int err; + + if ((err = pcibios_enable_resources(dev, mask)) < 0) + return err; + + if (!dev->msi_enabled) + pcibios_enable_irq(dev); + return 0; +} diff --git a/kernel/arch/cris/arch-v32/drivers/pci/dma.c b/kernel/arch/cris/arch-v32/drivers/pci/dma.c new file mode 100644 index 000000000..ee55578d9 --- /dev/null +++ b/kernel/arch/cris/arch-v32/drivers/pci/dma.c @@ -0,0 +1,50 @@ +/* + * Dynamic DMA mapping support. + * + * On cris there is no hardware dynamic DMA address translation, + * so consistent alloc/free are merely page allocation/freeing. + * The rest of the dynamic DMA mapping interface is implemented + * in asm/pci.h. + * + * Borrowed from i386. + */ + +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/string.h> +#include <linux/pci.h> +#include <linux/gfp.h> +#include <asm/io.h> + +void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp) +{ + void *ret; + int order = get_order(size); + /* ignore region specifiers */ + gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); + + if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) + return ret; + + if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) + gfp |= GFP_DMA; + + ret = (void *)__get_free_pages(gfp, order); + + if (ret != NULL) { + memset(ret, 0, size); + *dma_handle = virt_to_phys(ret); + } + return ret; +} + +void dma_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle) +{ + int order = get_order(size); + + if (!dma_release_from_coherent(dev, order, vaddr)) + free_pages((unsigned long)vaddr, order); +} + diff --git a/kernel/arch/cris/arch-v32/drivers/sync_serial.c b/kernel/arch/cris/arch-v32/drivers/sync_serial.c new file mode 100644 index 000000000..4dda9bd6b --- /dev/null +++ b/kernel/arch/cris/arch-v32/drivers/sync_serial.c @@ -0,0 +1,1709 @@ +/* + * Simple synchronous serial port driver for ETRAX FS and ARTPEC-3. + * + * Copyright (c) 2005, 2008 Axis Communications AB + * Author: Mikael Starvik + * + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/major.h> +#include <linux/sched.h> +#include <linux/mutex.h> +#include <linux/interrupt.h> +#include <linux/poll.h> +#include <linux/fs.h> +#include <linux/cdev.h> +#include <linux/device.h> +#include <linux/wait.h> + +#include <asm/io.h> +#include <mach/dma.h> +#include <pinmux.h> +#include <hwregs/reg_rdwr.h> +#include <hwregs/sser_defs.h> +#include <hwregs/timer_defs.h> +#include <hwregs/dma_defs.h> +#include <hwregs/dma.h> +#include <hwregs/intr_vect_defs.h> +#include <hwregs/intr_vect.h> +#include <hwregs/reg_map.h> +#include <asm/sync_serial.h> + + +/* The receiver is a bit tricky because of the continuous stream of data.*/ +/* */ +/* Three DMA descriptors are linked together. Each DMA descriptor is */ +/* responsible for port->bufchunk of a common buffer. */ +/* */ +/* +---------------------------------------------+ */ +/* | +----------+ +----------+ +----------+ | */ +/* +-> | Descr[0] |-->| Descr[1] |-->| Descr[2] |-+ */ +/* +----------+ +----------+ +----------+ */ +/* | | | */ +/* v v v */ +/* +-------------------------------------+ */ +/* | BUFFER | */ +/* +-------------------------------------+ */ +/* |<- data_avail ->| */ +/* readp writep */ +/* */ +/* If the application keeps up the pace readp will be right after writep.*/ +/* If the application can't keep the pace we have to throw away data. */ +/* The idea is that readp should be ready with the data pointed out by */ +/* Descr[i] when the DMA has filled in Descr[i+1]. */ +/* Otherwise we will discard */ +/* the rest of the data pointed out by Descr1 and set readp to the start */ +/* of Descr2 */ + +/* IN_BUFFER_SIZE should be a multiple of 6 to make sure that 24 bit */ +/* words can be handled */ +#define IN_DESCR_SIZE SSP_INPUT_CHUNK_SIZE +#define NBR_IN_DESCR (8*6) +#define IN_BUFFER_SIZE (IN_DESCR_SIZE * NBR_IN_DESCR) + +#define NBR_OUT_DESCR 8 +#define OUT_BUFFER_SIZE (1024 * NBR_OUT_DESCR) + +#define DEFAULT_FRAME_RATE 0 +#define DEFAULT_WORD_RATE 7 + +/* To be removed when we move to pure udev. */ +#define SYNC_SERIAL_MAJOR 125 + +/* NOTE: Enabling some debug will likely cause overrun or underrun, + * especially if manual mode is used. + */ +#define DEBUG(x) +#define DEBUGREAD(x) +#define DEBUGWRITE(x) +#define DEBUGPOLL(x) +#define DEBUGRXINT(x) +#define DEBUGTXINT(x) +#define DEBUGTRDMA(x) +#define DEBUGOUTBUF(x) + +enum syncser_irq_setup { + no_irq_setup = 0, + dma_irq_setup = 1, + manual_irq_setup = 2, +}; + +struct sync_port { + unsigned long regi_sser; + unsigned long regi_dmain; + unsigned long regi_dmaout; + + /* Interrupt vectors. */ + unsigned long dma_in_intr_vect; /* Used for DMA in. */ + unsigned long dma_out_intr_vect; /* Used for DMA out. */ + unsigned long syncser_intr_vect; /* Used when no DMA. */ + + /* DMA number for in and out. */ + unsigned int dma_in_nbr; + unsigned int dma_out_nbr; + + /* DMA owner. */ + enum dma_owner req_dma; + + char started; /* 1 if port has been started */ + char port_nbr; /* Port 0 or 1 */ + char busy; /* 1 if port is busy */ + + char enabled; /* 1 if port is enabled */ + char use_dma; /* 1 if port uses dma */ + char tr_running; + + enum syncser_irq_setup init_irqs; + int output; + int input; + + /* Next byte to be read by application */ + unsigned char *readp; + /* Next byte to be written by etrax */ + unsigned char *writep; + + unsigned int in_buffer_size; + unsigned int in_buffer_len; + unsigned int inbufchunk; + /* Data buffers for in and output. */ + unsigned char out_buffer[OUT_BUFFER_SIZE] __aligned(32); + unsigned char in_buffer[IN_BUFFER_SIZE] __aligned(32); + unsigned char flip[IN_BUFFER_SIZE] __aligned(32); + struct timespec timestamp[NBR_IN_DESCR]; + struct dma_descr_data *next_rx_desc; + struct dma_descr_data *prev_rx_desc; + + struct timeval last_timestamp; + int read_ts_idx; + int write_ts_idx; + + /* Pointer to the first available descriptor in the ring, + * unless active_tr_descr == catch_tr_descr and a dma + * transfer is active */ + struct dma_descr_data *active_tr_descr; + + /* Pointer to the first allocated descriptor in the ring */ + struct dma_descr_data *catch_tr_descr; + + /* Pointer to the descriptor with the current end-of-list */ + struct dma_descr_data *prev_tr_descr; + int full; + + /* Pointer to the first byte being read by DMA + * or current position in out_buffer if not using DMA. */ + unsigned char *out_rd_ptr; + + /* Number of bytes currently locked for being read by DMA */ + int out_buf_count; + + dma_descr_context in_context __aligned(32); + dma_descr_context out_context __aligned(32); + dma_descr_data in_descr[NBR_IN_DESCR] __aligned(16); + dma_descr_data out_descr[NBR_OUT_DESCR] __aligned(16); + + wait_queue_head_t out_wait_q; + wait_queue_head_t in_wait_q; + + spinlock_t lock; +}; + +static DEFINE_MUTEX(sync_serial_mutex); +static int etrax_sync_serial_init(void); +static void initialize_port(int portnbr); +static inline int sync_data_avail(struct sync_port *port); + +static int sync_serial_open(struct inode *, struct file *); +static int sync_serial_release(struct inode *, struct file *); +static unsigned int sync_serial_poll(struct file *filp, poll_table *wait); + +static long sync_serial_ioctl(struct file *file, + unsigned int cmd, unsigned long arg); +static int sync_serial_ioctl_unlocked(struct file *file, + unsigned int cmd, unsigned long arg); +static ssize_t sync_serial_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos); +static ssize_t sync_serial_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos); + +#if ((defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \ + defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \ + (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \ + defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))) +#define SYNC_SER_DMA +#else +#define SYNC_SER_MANUAL +#endif + +#ifdef SYNC_SER_DMA +static void start_dma_out(struct sync_port *port, const char *data, int count); +static void start_dma_in(struct sync_port *port); +static irqreturn_t tr_interrupt(int irq, void *dev_id); +static irqreturn_t rx_interrupt(int irq, void *dev_id); +#endif +#ifdef SYNC_SER_MANUAL +static void send_word(struct sync_port *port); +static irqreturn_t manual_interrupt(int irq, void *dev_id); +#endif + +#define artpec_pinmux_alloc_fixed crisv32_pinmux_alloc_fixed +#define artpec_request_dma crisv32_request_dma +#define artpec_free_dma crisv32_free_dma + +#ifdef CONFIG_ETRAXFS +/* ETRAX FS */ +#define DMA_OUT_NBR0 SYNC_SER0_TX_DMA_NBR +#define DMA_IN_NBR0 SYNC_SER0_RX_DMA_NBR +#define DMA_OUT_NBR1 SYNC_SER1_TX_DMA_NBR +#define DMA_IN_NBR1 SYNC_SER1_RX_DMA_NBR +#define PINMUX_SSER0 pinmux_sser0 +#define PINMUX_SSER1 pinmux_sser1 +#define SYNCSER_INST0 regi_sser0 +#define SYNCSER_INST1 regi_sser1 +#define SYNCSER_INTR_VECT0 SSER0_INTR_VECT +#define SYNCSER_INTR_VECT1 SSER1_INTR_VECT +#define OUT_DMA_INST0 regi_dma4 +#define IN_DMA_INST0 regi_dma5 +#define DMA_OUT_INTR_VECT0 DMA4_INTR_VECT +#define DMA_OUT_INTR_VECT1 DMA7_INTR_VECT +#define DMA_IN_INTR_VECT0 DMA5_INTR_VECT +#define DMA_IN_INTR_VECT1 DMA6_INTR_VECT +#define REQ_DMA_SYNCSER0 dma_sser0 +#define REQ_DMA_SYNCSER1 dma_sser1 +#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA) +#define PORT1_DMA 1 +#else +#define PORT1_DMA 0 +#endif +#elif defined(CONFIG_CRIS_MACH_ARTPEC3) +/* ARTPEC-3 */ +#define DMA_OUT_NBR0 SYNC_SER_TX_DMA_NBR +#define DMA_IN_NBR0 SYNC_SER_RX_DMA_NBR +#define PINMUX_SSER0 pinmux_sser +#define SYNCSER_INST0 regi_sser +#define SYNCSER_INTR_VECT0 SSER_INTR_VECT +#define OUT_DMA_INST0 regi_dma6 +#define IN_DMA_INST0 regi_dma7 +#define DMA_OUT_INTR_VECT0 DMA6_INTR_VECT +#define DMA_IN_INTR_VECT0 DMA7_INTR_VECT +#define REQ_DMA_SYNCSER0 dma_sser +#define REQ_DMA_SYNCSER1 dma_sser +#endif + +#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA) +#define PORT0_DMA 1 +#else +#define PORT0_DMA 0 +#endif + +/* The ports */ +static struct sync_port ports[] = { + { + .regi_sser = SYNCSER_INST0, + .regi_dmaout = OUT_DMA_INST0, + .regi_dmain = IN_DMA_INST0, + .use_dma = PORT0_DMA, + .dma_in_intr_vect = DMA_IN_INTR_VECT0, + .dma_out_intr_vect = DMA_OUT_INTR_VECT0, + .dma_in_nbr = DMA_IN_NBR0, + .dma_out_nbr = DMA_OUT_NBR0, + .req_dma = REQ_DMA_SYNCSER0, + .syncser_intr_vect = SYNCSER_INTR_VECT0, + }, +#ifdef CONFIG_ETRAXFS + { + .regi_sser = SYNCSER_INST1, + .regi_dmaout = regi_dma6, + .regi_dmain = regi_dma7, + .use_dma = PORT1_DMA, + .dma_in_intr_vect = DMA_IN_INTR_VECT1, + .dma_out_intr_vect = DMA_OUT_INTR_VECT1, + .dma_in_nbr = DMA_IN_NBR1, + .dma_out_nbr = DMA_OUT_NBR1, + .req_dma = REQ_DMA_SYNCSER1, + .syncser_intr_vect = SYNCSER_INTR_VECT1, + }, +#endif +}; + +#define NBR_PORTS ARRAY_SIZE(ports) + +static const struct file_operations syncser_fops = { + .owner = THIS_MODULE, + .write = sync_serial_write, + .read = sync_serial_read, + .poll = sync_serial_poll, + .unlocked_ioctl = sync_serial_ioctl, + .open = sync_serial_open, + .release = sync_serial_release, + .llseek = noop_llseek, +}; + +static dev_t syncser_first; +static int minor_count = NBR_PORTS; +#define SYNCSER_NAME "syncser" +static struct cdev *syncser_cdev; +static struct class *syncser_class; + +static void sync_serial_start_port(struct sync_port *port) +{ + reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg); + reg_sser_rw_tr_cfg tr_cfg = + REG_RD(sser, port->regi_sser, rw_tr_cfg); + reg_sser_rw_rec_cfg rec_cfg = + REG_RD(sser, port->regi_sser, rw_rec_cfg); + cfg.en = regk_sser_yes; + tr_cfg.tr_en = regk_sser_yes; + rec_cfg.rec_en = regk_sser_yes; + REG_WR(sser, port->regi_sser, rw_cfg, cfg); + REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); + REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); + port->started = 1; +} + +static void __init initialize_port(int portnbr) +{ + struct sync_port *port = &ports[portnbr]; + reg_sser_rw_cfg cfg = { 0 }; + reg_sser_rw_frm_cfg frm_cfg = { 0 }; + reg_sser_rw_tr_cfg tr_cfg = { 0 }; + reg_sser_rw_rec_cfg rec_cfg = { 0 }; + + DEBUG(pr_info("Init sync serial port %d\n", portnbr)); + + port->port_nbr = portnbr; + port->init_irqs = no_irq_setup; + + port->out_rd_ptr = port->out_buffer; + port->out_buf_count = 0; + + port->output = 1; + port->input = 0; + + port->readp = port->flip; + port->writep = port->flip; + port->in_buffer_size = IN_BUFFER_SIZE; + port->in_buffer_len = 0; + port->inbufchunk = IN_DESCR_SIZE; + + port->read_ts_idx = 0; + port->write_ts_idx = 0; + + init_waitqueue_head(&port->out_wait_q); + init_waitqueue_head(&port->in_wait_q); + + spin_lock_init(&port->lock); + + cfg.out_clk_src = regk_sser_intern_clk; + cfg.out_clk_pol = regk_sser_pos; + cfg.clk_od_mode = regk_sser_no; + cfg.clk_dir = regk_sser_out; + cfg.gate_clk = regk_sser_no; + cfg.base_freq = regk_sser_f29_493; + cfg.clk_div = 256; + REG_WR(sser, port->regi_sser, rw_cfg, cfg); + + frm_cfg.wordrate = DEFAULT_WORD_RATE; + frm_cfg.type = regk_sser_edge; + frm_cfg.frame_pin_dir = regk_sser_out; + frm_cfg.frame_pin_use = regk_sser_frm; + frm_cfg.status_pin_dir = regk_sser_in; + frm_cfg.status_pin_use = regk_sser_hold; + frm_cfg.out_on = regk_sser_tr; + frm_cfg.tr_delay = 1; + REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg); + + tr_cfg.urun_stop = regk_sser_no; + tr_cfg.sample_size = 7; + tr_cfg.sh_dir = regk_sser_msbfirst; + tr_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no; +#if 0 + tr_cfg.rate_ctrl = regk_sser_bulk; + tr_cfg.data_pin_use = regk_sser_dout; +#else + tr_cfg.rate_ctrl = regk_sser_iso; + tr_cfg.data_pin_use = regk_sser_dout; +#endif + tr_cfg.bulk_wspace = 1; + REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); + + rec_cfg.sample_size = 7; + rec_cfg.sh_dir = regk_sser_msbfirst; + rec_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no; + rec_cfg.fifo_thr = regk_sser_inf; + REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); + +#ifdef SYNC_SER_DMA + { + int i; + /* Setup the descriptor ring for dma out/transmit. */ + for (i = 0; i < NBR_OUT_DESCR; i++) { + dma_descr_data *descr = &port->out_descr[i]; + descr->wait = 0; + descr->intr = 1; + descr->eol = 0; + descr->out_eop = 0; + descr->next = + (dma_descr_data *)virt_to_phys(&descr[i+1]); + } + } + + /* Create a ring from the list. */ + port->out_descr[NBR_OUT_DESCR-1].next = + (dma_descr_data *)virt_to_phys(&port->out_descr[0]); + + /* Setup context for traversing the ring. */ + port->active_tr_descr = &port->out_descr[0]; + port->prev_tr_descr = &port->out_descr[NBR_OUT_DESCR-1]; + port->catch_tr_descr = &port->out_descr[0]; +#endif +} + +static inline int sync_data_avail(struct sync_port *port) +{ + return port->in_buffer_len; +} + +static int sync_serial_open(struct inode *inode, struct file *file) +{ + int ret = 0; + int dev = iminor(inode); + struct sync_port *port; +#ifdef SYNC_SER_DMA + reg_dma_rw_cfg cfg = { .en = regk_dma_yes }; + reg_dma_rw_intr_mask intr_mask = { .data = regk_dma_yes }; +#endif + + DEBUG(pr_debug("Open sync serial port %d\n", dev)); + + if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) { + DEBUG(pr_info("Invalid minor %d\n", dev)); + return -ENODEV; + } + port = &ports[dev]; + /* Allow open this device twice (assuming one reader and one writer) */ + if (port->busy == 2) { + DEBUG(pr_info("syncser%d is busy\n", dev)); + return -EBUSY; + } + + mutex_lock(&sync_serial_mutex); + + /* Clear any stale date left in the flip buffer */ + port->readp = port->writep = port->flip; + port->in_buffer_len = 0; + port->read_ts_idx = 0; + port->write_ts_idx = 0; + + if (port->init_irqs != no_irq_setup) { + /* Init only on first call. */ + port->busy++; + mutex_unlock(&sync_serial_mutex); + return 0; + } + if (port->use_dma) { +#ifdef SYNC_SER_DMA + const char *tmp; + DEBUG(pr_info("Using DMA for syncser%d\n", dev)); + + tmp = dev == 0 ? "syncser0 tx" : "syncser1 tx"; + if (request_irq(port->dma_out_intr_vect, tr_interrupt, 0, + tmp, port)) { + pr_err("Can't alloc syncser%d TX IRQ", dev); + ret = -EBUSY; + goto unlock_and_exit; + } + if (artpec_request_dma(port->dma_out_nbr, tmp, + DMA_VERBOSE_ON_ERROR, 0, port->req_dma)) { + free_irq(port->dma_out_intr_vect, port); + pr_err("Can't alloc syncser%d TX DMA", dev); + ret = -EBUSY; + goto unlock_and_exit; + } + tmp = dev == 0 ? "syncser0 rx" : "syncser1 rx"; + if (request_irq(port->dma_in_intr_vect, rx_interrupt, 0, + tmp, port)) { + artpec_free_dma(port->dma_out_nbr); + free_irq(port->dma_out_intr_vect, port); + pr_err("Can't alloc syncser%d RX IRQ", dev); + ret = -EBUSY; + goto unlock_and_exit; + } + if (artpec_request_dma(port->dma_in_nbr, tmp, + DMA_VERBOSE_ON_ERROR, 0, port->req_dma)) { + artpec_free_dma(port->dma_out_nbr); + free_irq(port->dma_out_intr_vect, port); + free_irq(port->dma_in_intr_vect, port); + pr_err("Can't alloc syncser%d RX DMA", dev); + ret = -EBUSY; + goto unlock_and_exit; + } + /* Enable DMAs */ + REG_WR(dma, port->regi_dmain, rw_cfg, cfg); + REG_WR(dma, port->regi_dmaout, rw_cfg, cfg); + /* Enable DMA IRQs */ + REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask); + REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask); + /* Set up wordsize = 1 for DMAs. */ + DMA_WR_CMD(port->regi_dmain, regk_dma_set_w_size1); + DMA_WR_CMD(port->regi_dmaout, regk_dma_set_w_size1); + + start_dma_in(port); + port->init_irqs = dma_irq_setup; +#endif + } else { /* !port->use_dma */ +#ifdef SYNC_SER_MANUAL + const char *tmp = dev == 0 ? "syncser0 manual irq" : + "syncser1 manual irq"; + if (request_irq(port->syncser_intr_vect, manual_interrupt, + 0, tmp, port)) { + pr_err("Can't alloc syncser%d manual irq", + dev); + ret = -EBUSY; + goto unlock_and_exit; + } + port->init_irqs = manual_irq_setup; +#else + panic("sync_serial: Manual mode not supported\n"); +#endif /* SYNC_SER_MANUAL */ + } + port->busy++; + ret = 0; + +unlock_and_exit: + mutex_unlock(&sync_serial_mutex); + return ret; +} + +static int sync_serial_release(struct inode *inode, struct file *file) +{ + int dev = iminor(inode); + struct sync_port *port; + + if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) { + DEBUG(pr_info("Invalid minor %d\n", dev)); + return -ENODEV; + } + port = &ports[dev]; + if (port->busy) + port->busy--; + if (!port->busy) + /* XXX */; + return 0; +} + +static unsigned int sync_serial_poll(struct file *file, poll_table *wait) +{ + int dev = iminor(file_inode(file)); + unsigned int mask = 0; + struct sync_port *port; + DEBUGPOLL( + static unsigned int prev_mask; + ); + + port = &ports[dev]; + + if (!port->started) + sync_serial_start_port(port); + + poll_wait(file, &port->out_wait_q, wait); + poll_wait(file, &port->in_wait_q, wait); + + /* No active transfer, descriptors are available */ + if (port->output && !port->tr_running) + mask |= POLLOUT | POLLWRNORM; + + /* Descriptor and buffer space available. */ + if (port->output && + port->active_tr_descr != port->catch_tr_descr && + port->out_buf_count < OUT_BUFFER_SIZE) + mask |= POLLOUT | POLLWRNORM; + + /* At least an inbufchunk of data */ + if (port->input && sync_data_avail(port) >= port->inbufchunk) + mask |= POLLIN | POLLRDNORM; + + DEBUGPOLL( + if (mask != prev_mask) + pr_info("sync_serial_poll: mask 0x%08X %s %s\n", + mask, + mask & POLLOUT ? "POLLOUT" : "", + mask & POLLIN ? "POLLIN" : ""); + prev_mask = mask; + ); + return mask; +} + +static ssize_t __sync_serial_read(struct file *file, + char __user *buf, + size_t count, + loff_t *ppos, + struct timespec *ts) +{ + unsigned long flags; + int dev = MINOR(file_inode(file)->i_rdev); + int avail; + struct sync_port *port; + unsigned char *start; + unsigned char *end; + + if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) { + DEBUG(pr_info("Invalid minor %d\n", dev)); + return -ENODEV; + } + port = &ports[dev]; + + if (!port->started) + sync_serial_start_port(port); + + /* Calculate number of available bytes */ + /* Save pointers to avoid that they are modified by interrupt */ + spin_lock_irqsave(&port->lock, flags); + start = port->readp; + end = port->writep; + spin_unlock_irqrestore(&port->lock, flags); + + while ((start == end) && !port->in_buffer_len) { + if (file->f_flags & O_NONBLOCK) + return -EAGAIN; + + wait_event_interruptible(port->in_wait_q, + !(start == end && !port->full)); + + if (signal_pending(current)) + return -EINTR; + + spin_lock_irqsave(&port->lock, flags); + start = port->readp; + end = port->writep; + spin_unlock_irqrestore(&port->lock, flags); + } + + DEBUGREAD(pr_info("R%d c %d ri %u wi %u /%u\n", + dev, count, + start - port->flip, end - port->flip, + port->in_buffer_size)); + + /* Lazy read, never return wrapped data. */ + if (end > start) + avail = end - start; + else + avail = port->flip + port->in_buffer_size - start; + + count = count > avail ? avail : count; + if (copy_to_user(buf, start, count)) + return -EFAULT; + + /* If timestamp requested, find timestamp of first returned byte + * and copy it. + * N.B: Applications that request timstamps MUST read data in + * chunks that are multiples of IN_DESCR_SIZE. + * Otherwise the timestamps will not be aligned to the data read. + */ + if (ts != NULL) { + int idx = port->read_ts_idx; + memcpy(ts, &port->timestamp[idx], sizeof(struct timespec)); + port->read_ts_idx += count / IN_DESCR_SIZE; + if (port->read_ts_idx >= NBR_IN_DESCR) + port->read_ts_idx = 0; + } + + spin_lock_irqsave(&port->lock, flags); + port->readp += count; + /* Check for wrap */ + if (port->readp >= port->flip + port->in_buffer_size) + port->readp = port->flip; + port->in_buffer_len -= count; + port->full = 0; + spin_unlock_irqrestore(&port->lock, flags); + + DEBUGREAD(pr_info("r %d\n", count)); + + return count; +} + +static ssize_t sync_serial_input(struct file *file, unsigned long arg) +{ + struct ssp_request req; + int count; + int ret; + + /* Copy the request structure from user-mode. */ + ret = copy_from_user(&req, (struct ssp_request __user *)arg, + sizeof(struct ssp_request)); + + if (ret) { + DEBUG(pr_info("sync_serial_input copy from user failed\n")); + return -EFAULT; + } + + /* To get the timestamps aligned, make sure that 'len' + * is a multiple of IN_DESCR_SIZE. + */ + if ((req.len % IN_DESCR_SIZE) != 0) { + DEBUG(pr_info("sync_serial: req.len %x, IN_DESCR_SIZE %x\n", + req.len, IN_DESCR_SIZE)); + return -EFAULT; + } + + /* Do the actual read. */ + /* Note that req.buf is actually a pointer to user space. */ + count = __sync_serial_read(file, req.buf, req.len, + NULL, &req.ts); + + if (count < 0) { + DEBUG(pr_info("sync_serial_input read failed\n")); + return count; + } + + /* Copy the request back to user-mode. */ + ret = copy_to_user((struct ssp_request __user *)arg, &req, + sizeof(struct ssp_request)); + + if (ret) { + DEBUG(pr_info("syncser input copy2user failed\n")); + return -EFAULT; + } + + /* Return the number of bytes read. */ + return count; +} + + +static int sync_serial_ioctl_unlocked(struct file *file, + unsigned int cmd, unsigned long arg) +{ + int return_val = 0; + int dma_w_size = regk_dma_set_w_size1; + int dev = iminor(file_inode(file)); + struct sync_port *port; + reg_sser_rw_tr_cfg tr_cfg; + reg_sser_rw_rec_cfg rec_cfg; + reg_sser_rw_frm_cfg frm_cfg; + reg_sser_rw_cfg gen_cfg; + reg_sser_rw_intr_mask intr_mask; + + if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) { + DEBUG(pr_info("Invalid minor %d\n", dev)); + return -1; + } + + if (cmd == SSP_INPUT) + return sync_serial_input(file, arg); + + port = &ports[dev]; + spin_lock_irq(&port->lock); + + tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg); + rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg); + frm_cfg = REG_RD(sser, port->regi_sser, rw_frm_cfg); + gen_cfg = REG_RD(sser, port->regi_sser, rw_cfg); + intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask); + + switch (cmd) { + case SSP_SPEED: + if (GET_SPEED(arg) == CODEC) { + unsigned int freq; + + gen_cfg.base_freq = regk_sser_f32; + + /* Clock divider will internally be + * gen_cfg.clk_div + 1. + */ + + freq = GET_FREQ(arg); + switch (freq) { + case FREQ_32kHz: + case FREQ_64kHz: + case FREQ_128kHz: + case FREQ_256kHz: + gen_cfg.clk_div = 125 * + (1 << (freq - FREQ_256kHz)) - 1; + break; + case FREQ_512kHz: + gen_cfg.clk_div = 62; + break; + case FREQ_1MHz: + case FREQ_2MHz: + case FREQ_4MHz: + gen_cfg.clk_div = 8 * (1 << freq) - 1; + break; + } + } else if (GET_SPEED(arg) == CODEC_f32768) { + gen_cfg.base_freq = regk_sser_f32_768; + switch (GET_FREQ(arg)) { + case FREQ_4096kHz: + gen_cfg.clk_div = 7; + break; + default: + spin_unlock_irq(&port->lock); + return -EINVAL; + } + } else { + gen_cfg.base_freq = regk_sser_f29_493; + switch (GET_SPEED(arg)) { + case SSP150: + gen_cfg.clk_div = 29493000 / (150 * 8) - 1; + break; + case SSP300: + gen_cfg.clk_div = 29493000 / (300 * 8) - 1; + break; + case SSP600: + gen_cfg.clk_div = 29493000 / (600 * 8) - 1; + break; + case SSP1200: + gen_cfg.clk_div = 29493000 / (1200 * 8) - 1; + break; + case SSP2400: + gen_cfg.clk_div = 29493000 / (2400 * 8) - 1; + break; + case SSP4800: + gen_cfg.clk_div = 29493000 / (4800 * 8) - 1; + break; + case SSP9600: + gen_cfg.clk_div = 29493000 / (9600 * 8) - 1; + break; + case SSP19200: + gen_cfg.clk_div = 29493000 / (19200 * 8) - 1; + break; + case SSP28800: + gen_cfg.clk_div = 29493000 / (28800 * 8) - 1; + break; + case SSP57600: + gen_cfg.clk_div = 29493000 / (57600 * 8) - 1; + break; + case SSP115200: + gen_cfg.clk_div = 29493000 / (115200 * 8) - 1; + break; + case SSP230400: + gen_cfg.clk_div = 29493000 / (230400 * 8) - 1; + break; + case SSP460800: + gen_cfg.clk_div = 29493000 / (460800 * 8) - 1; + break; + case SSP921600: + gen_cfg.clk_div = 29493000 / (921600 * 8) - 1; + break; + case SSP3125000: + gen_cfg.base_freq = regk_sser_f100; + gen_cfg.clk_div = 100000000 / (3125000 * 8) - 1; + break; + + } + } + frm_cfg.wordrate = GET_WORD_RATE(arg); + + break; + case SSP_MODE: + switch (arg) { + case MASTER_OUTPUT: + port->output = 1; + port->input = 0; + frm_cfg.out_on = regk_sser_tr; + frm_cfg.frame_pin_dir = regk_sser_out; + gen_cfg.clk_dir = regk_sser_out; + break; + case SLAVE_OUTPUT: + port->output = 1; + port->input = 0; + frm_cfg.frame_pin_dir = regk_sser_in; + gen_cfg.clk_dir = regk_sser_in; + break; + case MASTER_INPUT: + port->output = 0; + port->input = 1; + frm_cfg.frame_pin_dir = regk_sser_out; + frm_cfg.out_on = regk_sser_intern_tb; + gen_cfg.clk_dir = regk_sser_out; + break; + case SLAVE_INPUT: + port->output = 0; + port->input = 1; + frm_cfg.frame_pin_dir = regk_sser_in; + gen_cfg.clk_dir = regk_sser_in; + break; + case MASTER_BIDIR: + port->output = 1; + port->input = 1; + frm_cfg.frame_pin_dir = regk_sser_out; + frm_cfg.out_on = regk_sser_intern_tb; + gen_cfg.clk_dir = regk_sser_out; + break; + case SLAVE_BIDIR: + port->output = 1; + port->input = 1; + frm_cfg.frame_pin_dir = regk_sser_in; + gen_cfg.clk_dir = regk_sser_in; + break; + default: + spin_unlock_irq(&port->lock); + return -EINVAL; + } + if (!port->use_dma || arg == MASTER_OUTPUT || + arg == SLAVE_OUTPUT) + intr_mask.rdav = regk_sser_yes; + break; + case SSP_FRAME_SYNC: + if (arg & NORMAL_SYNC) { + frm_cfg.rec_delay = 1; + frm_cfg.tr_delay = 1; + } else if (arg & EARLY_SYNC) + frm_cfg.rec_delay = frm_cfg.tr_delay = 0; + else if (arg & LATE_SYNC) { + frm_cfg.tr_delay = 2; + frm_cfg.rec_delay = 2; + } else if (arg & SECOND_WORD_SYNC) { + frm_cfg.rec_delay = 7; + frm_cfg.tr_delay = 1; + } + + tr_cfg.bulk_wspace = frm_cfg.tr_delay; + frm_cfg.early_wend = regk_sser_yes; + if (arg & BIT_SYNC) + frm_cfg.type = regk_sser_edge; + else if (arg & WORD_SYNC) + frm_cfg.type = regk_sser_level; + else if (arg & EXTENDED_SYNC) + frm_cfg.early_wend = regk_sser_no; + + if (arg & SYNC_ON) + frm_cfg.frame_pin_use = regk_sser_frm; + else if (arg & SYNC_OFF) + frm_cfg.frame_pin_use = regk_sser_gio0; + + dma_w_size = regk_dma_set_w_size2; + if (arg & WORD_SIZE_8) { + rec_cfg.sample_size = tr_cfg.sample_size = 7; + dma_w_size = regk_dma_set_w_size1; + } else if (arg & WORD_SIZE_12) + rec_cfg.sample_size = tr_cfg.sample_size = 11; + else if (arg & WORD_SIZE_16) + rec_cfg.sample_size = tr_cfg.sample_size = 15; + else if (arg & WORD_SIZE_24) + rec_cfg.sample_size = tr_cfg.sample_size = 23; + else if (arg & WORD_SIZE_32) + rec_cfg.sample_size = tr_cfg.sample_size = 31; + + if (arg & BIT_ORDER_MSB) + rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst; + else if (arg & BIT_ORDER_LSB) + rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_lsbfirst; + + if (arg & FLOW_CONTROL_ENABLE) { + frm_cfg.status_pin_use = regk_sser_frm; + rec_cfg.fifo_thr = regk_sser_thr16; + } else if (arg & FLOW_CONTROL_DISABLE) { + frm_cfg.status_pin_use = regk_sser_gio0; + rec_cfg.fifo_thr = regk_sser_inf; + } + + if (arg & CLOCK_NOT_GATED) + gen_cfg.gate_clk = regk_sser_no; + else if (arg & CLOCK_GATED) + gen_cfg.gate_clk = regk_sser_yes; + + break; + case SSP_IPOLARITY: + /* NOTE!! negedge is considered NORMAL */ + if (arg & CLOCK_NORMAL) + rec_cfg.clk_pol = regk_sser_neg; + else if (arg & CLOCK_INVERT) + rec_cfg.clk_pol = regk_sser_pos; + + if (arg & FRAME_NORMAL) + frm_cfg.level = regk_sser_pos_hi; + else if (arg & FRAME_INVERT) + frm_cfg.level = regk_sser_neg_lo; + + if (arg & STATUS_NORMAL) + gen_cfg.hold_pol = regk_sser_pos; + else if (arg & STATUS_INVERT) + gen_cfg.hold_pol = regk_sser_neg; + break; + case SSP_OPOLARITY: + if (arg & CLOCK_NORMAL) + gen_cfg.out_clk_pol = regk_sser_pos; + else if (arg & CLOCK_INVERT) + gen_cfg.out_clk_pol = regk_sser_neg; + + if (arg & FRAME_NORMAL) + frm_cfg.level = regk_sser_pos_hi; + else if (arg & FRAME_INVERT) + frm_cfg.level = regk_sser_neg_lo; + + if (arg & STATUS_NORMAL) + gen_cfg.hold_pol = regk_sser_pos; + else if (arg & STATUS_INVERT) + gen_cfg.hold_pol = regk_sser_neg; + break; + case SSP_SPI: + rec_cfg.fifo_thr = regk_sser_inf; + rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst; + rec_cfg.sample_size = tr_cfg.sample_size = 7; + frm_cfg.frame_pin_use = regk_sser_frm; + frm_cfg.type = regk_sser_level; + frm_cfg.tr_delay = 1; + frm_cfg.level = regk_sser_neg_lo; + if (arg & SPI_SLAVE) { + rec_cfg.clk_pol = regk_sser_neg; + gen_cfg.clk_dir = regk_sser_in; + port->input = 1; + port->output = 0; + } else { + gen_cfg.out_clk_pol = regk_sser_pos; + port->input = 0; + port->output = 1; + gen_cfg.clk_dir = regk_sser_out; + } + break; + case SSP_INBUFCHUNK: + break; + default: + return_val = -1; + } + + + if (port->started) { + rec_cfg.rec_en = port->input; + gen_cfg.en = (port->output | port->input); + } + + REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); + REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); + REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg); + REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask); + REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg); + + + if (cmd == SSP_FRAME_SYNC && (arg & (WORD_SIZE_8 | WORD_SIZE_12 | + WORD_SIZE_16 | WORD_SIZE_24 | WORD_SIZE_32))) { + int en = gen_cfg.en; + gen_cfg.en = 0; + REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg); + /* ##### Should DMA be stoped before we change dma size? */ + DMA_WR_CMD(port->regi_dmain, dma_w_size); + DMA_WR_CMD(port->regi_dmaout, dma_w_size); + gen_cfg.en = en; + REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg); + } + + spin_unlock_irq(&port->lock); + return return_val; +} + +static long sync_serial_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + long ret; + + mutex_lock(&sync_serial_mutex); + ret = sync_serial_ioctl_unlocked(file, cmd, arg); + mutex_unlock(&sync_serial_mutex); + + return ret; +} + +/* NOTE: sync_serial_write does not support concurrency */ +static ssize_t sync_serial_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + int dev = iminor(file_inode(file)); + DECLARE_WAITQUEUE(wait, current); + struct sync_port *port; + int trunc_count; + unsigned long flags; + int bytes_free; + int out_buf_count; + + unsigned char *rd_ptr; /* First allocated byte in the buffer */ + unsigned char *wr_ptr; /* First free byte in the buffer */ + unsigned char *buf_stop_ptr; /* Last byte + 1 */ + + if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) { + DEBUG(pr_info("Invalid minor %d\n", dev)); + return -ENODEV; + } + port = &ports[dev]; + + /* |<- OUT_BUFFER_SIZE ->| + * |<- out_buf_count ->| + * |<- trunc_count ->| ...->| + * ______________________________________________________ + * | free | data | free | + * |_________|___________________|________________________| + * ^ rd_ptr ^ wr_ptr + */ + DEBUGWRITE(pr_info("W d%d c %u a: %p c: %p\n", + port->port_nbr, count, port->active_tr_descr, + port->catch_tr_descr)); + + /* Read variables that may be updated by interrupts */ + spin_lock_irqsave(&port->lock, flags); + rd_ptr = port->out_rd_ptr; + out_buf_count = port->out_buf_count; + spin_unlock_irqrestore(&port->lock, flags); + + /* Check if resources are available */ + if (port->tr_running && + ((port->use_dma && port->active_tr_descr == port->catch_tr_descr) || + out_buf_count >= OUT_BUFFER_SIZE)) { + DEBUGWRITE(pr_info("sser%d full\n", dev)); + return -EAGAIN; + } + + buf_stop_ptr = port->out_buffer + OUT_BUFFER_SIZE; + + /* Determine pointer to the first free byte, before copying. */ + wr_ptr = rd_ptr + out_buf_count; + if (wr_ptr >= buf_stop_ptr) + wr_ptr -= OUT_BUFFER_SIZE; + + /* If we wrap the ring buffer, let the user space program handle it by + * truncating the data. This could be more elegant, small buffer + * fragments may occur. + */ + bytes_free = OUT_BUFFER_SIZE - out_buf_count; + if (wr_ptr + bytes_free > buf_stop_ptr) + bytes_free = buf_stop_ptr - wr_ptr; + trunc_count = (count < bytes_free) ? count : bytes_free; + + if (copy_from_user(wr_ptr, buf, trunc_count)) + return -EFAULT; + + DEBUGOUTBUF(pr_info("%-4d + %-4d = %-4d %p %p %p\n", + out_buf_count, trunc_count, + port->out_buf_count, port->out_buffer, + wr_ptr, buf_stop_ptr)); + + /* Make sure transmitter/receiver is running */ + if (!port->started) { + reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg); + reg_sser_rw_rec_cfg rec_cfg = + REG_RD(sser, port->regi_sser, rw_rec_cfg); + cfg.en = regk_sser_yes; + rec_cfg.rec_en = port->input; + REG_WR(sser, port->regi_sser, rw_cfg, cfg); + REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); + port->started = 1; + } + + /* Setup wait if blocking */ + if (!(file->f_flags & O_NONBLOCK)) { + add_wait_queue(&port->out_wait_q, &wait); + set_current_state(TASK_INTERRUPTIBLE); + } + + spin_lock_irqsave(&port->lock, flags); + port->out_buf_count += trunc_count; + if (port->use_dma) { +#ifdef SYNC_SER_DMA + start_dma_out(port, wr_ptr, trunc_count); +#endif + } else if (!port->tr_running) { +#ifdef SYNC_SER_MANUAL + reg_sser_rw_intr_mask intr_mask; + intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask); + /* Start sender by writing data */ + send_word(port); + /* and enable transmitter ready IRQ */ + intr_mask.trdy = 1; + REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask); +#endif + } + spin_unlock_irqrestore(&port->lock, flags); + + /* Exit if non blocking */ + if (file->f_flags & O_NONBLOCK) { + DEBUGWRITE(pr_info("w d%d c %u %08x\n", + port->port_nbr, trunc_count, + REG_RD_INT(dma, port->regi_dmaout, r_intr))); + return trunc_count; + } + + schedule(); + remove_wait_queue(&port->out_wait_q, &wait); + + if (signal_pending(current)) + return -EINTR; + + DEBUGWRITE(pr_info("w d%d c %u\n", port->port_nbr, trunc_count)); + return trunc_count; +} + +static ssize_t sync_serial_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + return __sync_serial_read(file, buf, count, ppos, NULL); +} + +#ifdef SYNC_SER_MANUAL +static void send_word(struct sync_port *port) +{ + reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg); + reg_sser_rw_tr_data tr_data = {0}; + + switch (tr_cfg.sample_size) { + case 8: + port->out_buf_count--; + tr_data.data = *port->out_rd_ptr++; + REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); + if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) + port->out_rd_ptr = port->out_buffer; + break; + case 12: + { + int data = (*port->out_rd_ptr++) << 8; + data |= *port->out_rd_ptr++; + port->out_buf_count -= 2; + tr_data.data = data; + REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); + if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) + port->out_rd_ptr = port->out_buffer; + break; + } + case 16: + port->out_buf_count -= 2; + tr_data.data = *(unsigned short *)port->out_rd_ptr; + REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); + port->out_rd_ptr += 2; + if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) + port->out_rd_ptr = port->out_buffer; + break; + case 24: + port->out_buf_count -= 3; + tr_data.data = *(unsigned short *)port->out_rd_ptr; + REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); + port->out_rd_ptr += 2; + tr_data.data = *port->out_rd_ptr++; + REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); + if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) + port->out_rd_ptr = port->out_buffer; + break; + case 32: + port->out_buf_count -= 4; + tr_data.data = *(unsigned short *)port->out_rd_ptr; + REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); + port->out_rd_ptr += 2; + tr_data.data = *(unsigned short *)port->out_rd_ptr; + REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); + port->out_rd_ptr += 2; + if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) + port->out_rd_ptr = port->out_buffer; + break; + } +} +#endif + +#ifdef SYNC_SER_DMA +static void start_dma_out(struct sync_port *port, const char *data, int count) +{ + port->active_tr_descr->buf = (char *)virt_to_phys((char *)data); + port->active_tr_descr->after = port->active_tr_descr->buf + count; + port->active_tr_descr->intr = 1; + + port->active_tr_descr->eol = 1; + port->prev_tr_descr->eol = 0; + + DEBUGTRDMA(pr_info("Inserting eolr:%p eol@:%p\n", + port->prev_tr_descr, port->active_tr_descr)); + port->prev_tr_descr = port->active_tr_descr; + port->active_tr_descr = phys_to_virt((int)port->active_tr_descr->next); + + if (!port->tr_running) { + reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, + rw_tr_cfg); + + port->out_context.next = NULL; + port->out_context.saved_data = + (dma_descr_data *)virt_to_phys(port->prev_tr_descr); + port->out_context.saved_data_buf = port->prev_tr_descr->buf; + + DMA_START_CONTEXT(port->regi_dmaout, + virt_to_phys((char *)&port->out_context)); + + tr_cfg.tr_en = regk_sser_yes; + REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); + DEBUGTRDMA(pr_info("dma s\n");); + } else { + DMA_CONTINUE_DATA(port->regi_dmaout); + DEBUGTRDMA(pr_info("dma c\n");); + } + + port->tr_running = 1; +} + +static void start_dma_in(struct sync_port *port) +{ + int i; + char *buf; + unsigned long flags; + spin_lock_irqsave(&port->lock, flags); + port->writep = port->flip; + spin_unlock_irqrestore(&port->lock, flags); + + buf = (char *)virt_to_phys(port->in_buffer); + for (i = 0; i < NBR_IN_DESCR; i++) { + port->in_descr[i].buf = buf; + port->in_descr[i].after = buf + port->inbufchunk; + port->in_descr[i].intr = 1; + port->in_descr[i].next = + (dma_descr_data *)virt_to_phys(&port->in_descr[i+1]); + port->in_descr[i].buf = buf; + buf += port->inbufchunk; + } + /* Link the last descriptor to the first */ + port->in_descr[i-1].next = + (dma_descr_data *)virt_to_phys(&port->in_descr[0]); + port->in_descr[i-1].eol = regk_sser_yes; + port->next_rx_desc = &port->in_descr[0]; + port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR - 1]; + port->in_context.saved_data = + (dma_descr_data *)virt_to_phys(&port->in_descr[0]); + port->in_context.saved_data_buf = port->in_descr[0].buf; + DMA_START_CONTEXT(port->regi_dmain, virt_to_phys(&port->in_context)); +} + +static irqreturn_t tr_interrupt(int irq, void *dev_id) +{ + reg_dma_r_masked_intr masked; + reg_dma_rw_ack_intr ack_intr = { .data = regk_dma_yes }; + reg_dma_rw_stat stat; + int i; + int found = 0; + int stop_sser = 0; + + for (i = 0; i < NBR_PORTS; i++) { + struct sync_port *port = &ports[i]; + if (!port->enabled || !port->use_dma) + continue; + + /* IRQ active for the port? */ + masked = REG_RD(dma, port->regi_dmaout, r_masked_intr); + if (!masked.data) + continue; + + found = 1; + + /* Check if we should stop the DMA transfer */ + stat = REG_RD(dma, port->regi_dmaout, rw_stat); + if (stat.list_state == regk_dma_data_at_eol) + stop_sser = 1; + + /* Clear IRQ */ + REG_WR(dma, port->regi_dmaout, rw_ack_intr, ack_intr); + + if (!stop_sser) { + /* The DMA has completed a descriptor, EOL was not + * encountered, so step relevant descriptor and + * datapointers forward. */ + int sent; + sent = port->catch_tr_descr->after - + port->catch_tr_descr->buf; + DEBUGTXINT(pr_info("%-4d - %-4d = %-4d\t" + "in descr %p (ac: %p)\n", + port->out_buf_count, sent, + port->out_buf_count - sent, + port->catch_tr_descr, + port->active_tr_descr);); + port->out_buf_count -= sent; + port->catch_tr_descr = + phys_to_virt((int) port->catch_tr_descr->next); + port->out_rd_ptr = + phys_to_virt((int) port->catch_tr_descr->buf); + } else { + reg_sser_rw_tr_cfg tr_cfg; + int j, sent; + /* EOL handler. + * Note that if an EOL was encountered during the irq + * locked section of sync_ser_write the DMA will be + * restarted and the eol flag will be cleared. + * The remaining descriptors will be traversed by + * the descriptor interrupts as usual. + */ + j = 0; + while (!port->catch_tr_descr->eol) { + sent = port->catch_tr_descr->after - + port->catch_tr_descr->buf; + DEBUGOUTBUF(pr_info( + "traversing descr %p -%d (%d)\n", + port->catch_tr_descr, + sent, + port->out_buf_count)); + port->out_buf_count -= sent; + port->catch_tr_descr = phys_to_virt( + (int)port->catch_tr_descr->next); + j++; + if (j >= NBR_OUT_DESCR) { + /* TODO: Reset and recover */ + panic("sync_serial: missing eol"); + } + } + sent = port->catch_tr_descr->after - + port->catch_tr_descr->buf; + DEBUGOUTBUF(pr_info("eol at descr %p -%d (%d)\n", + port->catch_tr_descr, + sent, + port->out_buf_count)); + + port->out_buf_count -= sent; + + /* Update read pointer to first free byte, we + * may already be writing data there. */ + port->out_rd_ptr = + phys_to_virt((int) port->catch_tr_descr->after); + if (port->out_rd_ptr > port->out_buffer + + OUT_BUFFER_SIZE) + port->out_rd_ptr = port->out_buffer; + + tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg); + DEBUGTXINT(pr_info( + "tr_int DMA stop %d, set catch @ %p\n", + port->out_buf_count, + port->active_tr_descr)); + if (port->out_buf_count != 0) + pr_err("sync_ser: buf not empty after eol\n"); + port->catch_tr_descr = port->active_tr_descr; + port->tr_running = 0; + tr_cfg.tr_en = regk_sser_no; + REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); + } + /* wake up the waiting process */ + wake_up_interruptible(&port->out_wait_q); + } + return IRQ_RETVAL(found); +} /* tr_interrupt */ + + +static inline void handle_rx_packet(struct sync_port *port) +{ + int idx; + reg_dma_rw_ack_intr ack_intr = { .data = regk_dma_yes }; + unsigned long flags; + + DEBUGRXINT(pr_info("!")); + spin_lock_irqsave(&port->lock, flags); + + /* If we overrun the user experience is crap regardless if we + * drop new or old data. Its much easier to get it right when + * dropping new data so lets do that. + */ + if ((port->writep + port->inbufchunk <= + port->flip + port->in_buffer_size) && + (port->in_buffer_len + port->inbufchunk < IN_BUFFER_SIZE)) { + memcpy(port->writep, + phys_to_virt((unsigned)port->next_rx_desc->buf), + port->inbufchunk); + port->writep += port->inbufchunk; + if (port->writep >= port->flip + port->in_buffer_size) + port->writep = port->flip; + + /* Timestamp the new data chunk. */ + if (port->write_ts_idx == NBR_IN_DESCR) + port->write_ts_idx = 0; + idx = port->write_ts_idx++; + do_posix_clock_monotonic_gettime(&port->timestamp[idx]); + port->in_buffer_len += port->inbufchunk; + } + spin_unlock_irqrestore(&port->lock, flags); + + port->next_rx_desc->eol = 1; + port->prev_rx_desc->eol = 0; + /* Cache bug workaround */ + flush_dma_descr(port->prev_rx_desc, 0); + port->prev_rx_desc = port->next_rx_desc; + port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next); + /* Cache bug workaround */ + flush_dma_descr(port->prev_rx_desc, 1); + /* wake up the waiting process */ + wake_up_interruptible(&port->in_wait_q); + DMA_CONTINUE(port->regi_dmain); + REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr); + +} + +static irqreturn_t rx_interrupt(int irq, void *dev_id) +{ + reg_dma_r_masked_intr masked; + + int i; + int found = 0; + + DEBUG(pr_info("rx_interrupt\n")); + + for (i = 0; i < NBR_PORTS; i++) { + struct sync_port *port = &ports[i]; + + if (!port->enabled || !port->use_dma) + continue; + + masked = REG_RD(dma, port->regi_dmain, r_masked_intr); + + if (!masked.data) + continue; + + /* Descriptor interrupt */ + found = 1; + while (REG_RD(dma, port->regi_dmain, rw_data) != + virt_to_phys(port->next_rx_desc)) + handle_rx_packet(port); + } + return IRQ_RETVAL(found); +} /* rx_interrupt */ +#endif /* SYNC_SER_DMA */ + +#ifdef SYNC_SER_MANUAL +static irqreturn_t manual_interrupt(int irq, void *dev_id) +{ + unsigned long flags; + int i; + int found = 0; + reg_sser_r_masked_intr masked; + + for (i = 0; i < NBR_PORTS; i++) { + struct sync_port *port = &ports[i]; + + if (!port->enabled || port->use_dma) + continue; + + masked = REG_RD(sser, port->regi_sser, r_masked_intr); + /* Data received? */ + if (masked.rdav) { + reg_sser_rw_rec_cfg rec_cfg = + REG_RD(sser, port->regi_sser, rw_rec_cfg); + reg_sser_r_rec_data data = REG_RD(sser, + port->regi_sser, r_rec_data); + found = 1; + /* Read data */ + spin_lock_irqsave(&port->lock, flags); + switch (rec_cfg.sample_size) { + case 8: + *port->writep++ = data.data & 0xff; + break; + case 12: + *port->writep = (data.data & 0x0ff0) >> 4; + *(port->writep + 1) = data.data & 0x0f; + port->writep += 2; + break; + case 16: + *(unsigned short *)port->writep = data.data; + port->writep += 2; + break; + case 24: + *(unsigned int *)port->writep = data.data; + port->writep += 3; + break; + case 32: + *(unsigned int *)port->writep = data.data; + port->writep += 4; + break; + } + + /* Wrap? */ + if (port->writep >= port->flip + port->in_buffer_size) + port->writep = port->flip; + if (port->writep == port->readp) { + /* Receive buf overrun, discard oldest data */ + port->readp++; + /* Wrap? */ + if (port->readp >= port->flip + + port->in_buffer_size) + port->readp = port->flip; + } + spin_unlock_irqrestore(&port->lock, flags); + if (sync_data_avail(port) >= port->inbufchunk) + /* Wake up application */ + wake_up_interruptible(&port->in_wait_q); + } + + /* Transmitter ready? */ + if (masked.trdy) { + found = 1; + /* More data to send */ + if (port->out_buf_count > 0) + send_word(port); + else { + /* Transmission finished */ + reg_sser_rw_intr_mask intr_mask; + intr_mask = REG_RD(sser, port->regi_sser, + rw_intr_mask); + intr_mask.trdy = 0; + REG_WR(sser, port->regi_sser, + rw_intr_mask, intr_mask); + /* Wake up application */ + wake_up_interruptible(&port->out_wait_q); + } + } + } + return IRQ_RETVAL(found); +} +#endif + +static int __init etrax_sync_serial_init(void) +{ +#if 1 + /* This code will be removed when we move to udev for all devices. */ + syncser_first = MKDEV(SYNC_SERIAL_MAJOR, 0); + if (register_chrdev_region(syncser_first, minor_count, SYNCSER_NAME)) { + pr_err("Failed to register major %d\n", SYNC_SERIAL_MAJOR); + return -1; + } +#else + /* Allocate dynamic major number. */ + if (alloc_chrdev_region(&syncser_first, 0, minor_count, SYNCSER_NAME)) { + pr_err("Failed to allocate character device region\n"); + return -1; + } +#endif + syncser_cdev = cdev_alloc(); + if (!syncser_cdev) { + pr_err("Failed to allocate cdev for syncser\n"); + unregister_chrdev_region(syncser_first, minor_count); + return -1; + } + cdev_init(syncser_cdev, &syncser_fops); + + /* Create a sysfs class for syncser */ + syncser_class = class_create(THIS_MODULE, "syncser_class"); + + /* Initialize Ports */ +#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) + if (artpec_pinmux_alloc_fixed(PINMUX_SSER0)) { + pr_warn("Unable to alloc pins for synchronous serial port 0\n"); + unregister_chrdev_region(syncser_first, minor_count); + return -EIO; + } + initialize_port(0); + ports[0].enabled = 1; + /* Register with sysfs so udev can pick it up. */ + device_create(syncser_class, NULL, syncser_first, NULL, + "%s%d", SYNCSER_NAME, 0); +#endif + +#if defined(CONFIG_ETRAXFS) && defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) + if (artpec_pinmux_alloc_fixed(PINMUX_SSER1)) { + pr_warn("Unable to alloc pins for synchronous serial port 1\n"); + unregister_chrdev_region(syncser_first, minor_count); + class_destroy(syncser_class); + return -EIO; + } + initialize_port(1); + ports[1].enabled = 1; + /* Register with sysfs so udev can pick it up. */ + device_create(syncser_class, NULL, syncser_first, NULL, + "%s%d", SYNCSER_NAME, 0); +#endif + + /* Add it to system */ + if (cdev_add(syncser_cdev, syncser_first, minor_count) < 0) { + pr_err("Failed to add syncser as char device\n"); + device_destroy(syncser_class, syncser_first); + class_destroy(syncser_class); + cdev_del(syncser_cdev); + unregister_chrdev_region(syncser_first, minor_count); + return -1; + } + + + pr_info("ARTPEC synchronous serial port (%s: %d, %d)\n", + SYNCSER_NAME, MAJOR(syncser_first), MINOR(syncser_first)); + + return 0; +} + +static void __exit etrax_sync_serial_exit(void) +{ + int i; + device_destroy(syncser_class, syncser_first); + class_destroy(syncser_class); + + if (syncser_cdev) { + cdev_del(syncser_cdev); + unregister_chrdev_region(syncser_first, minor_count); + } + for (i = 0; i < NBR_PORTS; i++) { + struct sync_port *port = &ports[i]; + if (port->init_irqs == dma_irq_setup) { + /* Free dma irqs and dma channels. */ +#ifdef SYNC_SER_DMA + artpec_free_dma(port->dma_in_nbr); + artpec_free_dma(port->dma_out_nbr); + free_irq(port->dma_out_intr_vect, port); + free_irq(port->dma_in_intr_vect, port); +#endif + } else if (port->init_irqs == manual_irq_setup) { + /* Free manual irq. */ + free_irq(port->syncser_intr_vect, port); + } + } + + pr_info("ARTPEC synchronous serial port unregistered\n"); +} + +module_init(etrax_sync_serial_init); +module_exit(etrax_sync_serial_exit); + +MODULE_LICENSE("GPL"); + |