From 9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 Mon Sep 17 00:00:00 2001 From: Yunhong Jiang Date: Tue, 4 Aug 2015 12:17:53 -0700 Subject: Add the rt linux 4.1.3-rt3 as base Import the rt linux 4.1.3-rt3 as OPNFV kvm base. It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and the base is: commit 0917f823c59692d751951bf5ea699a2d1e2f26a2 Author: Sebastian Andrzej Siewior Date: Sat Jul 25 12:13:34 2015 +0200 Prepare v4.1.3-rt3 Signed-off-by: Sebastian Andrzej Siewior We lose all the git history this way and it's not good. We should apply another opnfv project repo in future. Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423 Signed-off-by: Yunhong Jiang --- kernel/drivers/char/Kconfig | 615 +++ kernel/drivers/char/Makefile | 64 + kernel/drivers/char/agp/Kconfig | 162 + kernel/drivers/char/agp/Makefile | 22 + kernel/drivers/char/agp/agp.h | 290 ++ kernel/drivers/char/agp/ali-agp.c | 422 ++ kernel/drivers/char/agp/alpha-agp.c | 222 + kernel/drivers/char/agp/amd-k7-agp.c | 566 +++ kernel/drivers/char/agp/amd64-agp.c | 818 ++++ kernel/drivers/char/agp/ati-agp.c | 584 +++ kernel/drivers/char/agp/backend.c | 366 ++ kernel/drivers/char/agp/compat_ioctl.c | 287 ++ kernel/drivers/char/agp/compat_ioctl.h | 106 + kernel/drivers/char/agp/efficeon-agp.c | 478 +++ kernel/drivers/char/agp/frontend.c | 1068 +++++ kernel/drivers/char/agp/generic.c | 1424 +++++++ kernel/drivers/char/agp/hp-agp.c | 553 +++ kernel/drivers/char/agp/i460-agp.c | 659 +++ kernel/drivers/char/agp/intel-agp.c | 924 +++++ kernel/drivers/char/agp/intel-agp.h | 193 + kernel/drivers/char/agp/intel-gtt.c | 1442 +++++++ kernel/drivers/char/agp/isoch.c | 470 +++ kernel/drivers/char/agp/nvidia-agp.c | 476 +++ kernel/drivers/char/agp/parisc-agp.c | 426 ++ kernel/drivers/char/agp/sgi-agp.c | 338 ++ kernel/drivers/char/agp/sis-agp.c | 452 ++ kernel/drivers/char/agp/sworks-agp.c | 569 +++ kernel/drivers/char/agp/uninorth-agp.c | 722 ++++ kernel/drivers/char/agp/via-agp.c | 598 +++ kernel/drivers/char/apm-emulation.c | 745 ++++ kernel/drivers/char/applicom.c | 842 ++++ kernel/drivers/char/applicom.h | 85 + kernel/drivers/char/bfin-otp.c | 275 ++ kernel/drivers/char/bsr.c | 361 ++ kernel/drivers/char/ds1302.c | 357 ++ kernel/drivers/char/ds1620.c | 435 ++ kernel/drivers/char/dsp56k.c | 534 +++ kernel/drivers/char/dtlk.c | 663 +++ kernel/drivers/char/efirtc.c | 408 ++ kernel/drivers/char/generic_nvram.c | 174 + kernel/drivers/char/genrtc.c | 539 +++ kernel/drivers/char/hangcheck-timer.c | 188 + kernel/drivers/char/hpet.c | 1104 +++++ kernel/drivers/char/hw_random/Kconfig | 377 ++ kernel/drivers/char/hw_random/Makefile | 33 + kernel/drivers/char/hw_random/amd-rng.c | 169 + kernel/drivers/char/hw_random/atmel-rng.c | 148 + kernel/drivers/char/hw_random/bcm2835-rng.c | 111 + kernel/drivers/char/hw_random/bcm63xx-rng.c | 146 + kernel/drivers/char/hw_random/core.c | 590 +++ kernel/drivers/char/hw_random/exynos-rng.c | 172 + kernel/drivers/char/hw_random/geode-rng.c | 139 + kernel/drivers/char/hw_random/intel-rng.c | 418 ++ kernel/drivers/char/hw_random/iproc-rng200.c | 239 ++ kernel/drivers/char/hw_random/ixp4xx-rng.c | 75 + kernel/drivers/char/hw_random/msm-rng.c | 187 + kernel/drivers/char/hw_random/mxc-rnga.c | 217 + kernel/drivers/char/hw_random/n2-asm.S | 79 + kernel/drivers/char/hw_random/n2-drv.c | 759 ++++ kernel/drivers/char/hw_random/n2rng.h | 118 + kernel/drivers/char/hw_random/nomadik-rng.c | 106 + kernel/drivers/char/hw_random/octeon-rng.c | 128 + kernel/drivers/char/hw_random/omap-rng.c | 460 ++ kernel/drivers/char/hw_random/omap3-rom-rng.c | 139 + kernel/drivers/char/hw_random/pasemi-rng.c | 155 + kernel/drivers/char/hw_random/powernv-rng.c | 81 + kernel/drivers/char/hw_random/ppc4xx-rng.c | 146 + kernel/drivers/char/hw_random/pseries-rng.c | 106 + kernel/drivers/char/hw_random/timeriomem-rng.c | 213 + kernel/drivers/char/hw_random/tpm-rng.c | 50 + kernel/drivers/char/hw_random/tx4939-rng.c | 168 + kernel/drivers/char/hw_random/via-rng.c | 231 ++ kernel/drivers/char/hw_random/virtio-rng.c | 212 + kernel/drivers/char/hw_random/xgene-rng.c | 433 ++ kernel/drivers/char/i8k.c | 1007 +++++ kernel/drivers/char/ipmi/Kconfig | 90 + kernel/drivers/char/ipmi/Makefile | 13 + kernel/drivers/char/ipmi/ipmi_bt_sm.c | 705 ++++ kernel/drivers/char/ipmi/ipmi_devintf.c | 992 +++++ kernel/drivers/char/ipmi/ipmi_kcs_sm.c | 551 +++ kernel/drivers/char/ipmi/ipmi_msghandler.c | 4615 +++++++++++++++++++++ kernel/drivers/char/ipmi/ipmi_powernv.c | 311 ++ kernel/drivers/char/ipmi/ipmi_poweroff.c | 749 ++++ kernel/drivers/char/ipmi/ipmi_si_intf.c | 3874 +++++++++++++++++ kernel/drivers/char/ipmi/ipmi_si_sm.h | 141 + kernel/drivers/char/ipmi/ipmi_smic_sm.c | 600 +++ kernel/drivers/char/ipmi/ipmi_ssif.c | 2017 +++++++++ kernel/drivers/char/ipmi/ipmi_watchdog.c | 1385 +++++++ kernel/drivers/char/lp.c | 1063 +++++ kernel/drivers/char/mbcs.c | 852 ++++ kernel/drivers/char/mbcs.h | 553 +++ kernel/drivers/char/mem.c | 872 ++++ kernel/drivers/char/misc.c | 301 ++ kernel/drivers/char/mmtimer.c | 858 ++++ kernel/drivers/char/msm_smd_pkt.c | 465 +++ kernel/drivers/char/mspec.c | 450 ++ kernel/drivers/char/mwave/3780i.c | 738 ++++ kernel/drivers/char/mwave/3780i.h | 358 ++ kernel/drivers/char/mwave/Makefile | 15 + kernel/drivers/char/mwave/README | 47 + kernel/drivers/char/mwave/mwavedd.c | 697 ++++ kernel/drivers/char/mwave/mwavedd.h | 152 + kernel/drivers/char/mwave/mwavepub.h | 89 + kernel/drivers/char/mwave/smapi.c | 570 +++ kernel/drivers/char/mwave/smapi.h | 80 + kernel/drivers/char/mwave/tp3780i.c | 580 +++ kernel/drivers/char/mwave/tp3780i.h | 103 + kernel/drivers/char/nsc_gpio.c | 139 + kernel/drivers/char/nvram.c | 724 ++++ kernel/drivers/char/nwbutton.c | 247 ++ kernel/drivers/char/nwbutton.h | 40 + kernel/drivers/char/nwflash.c | 654 +++ kernel/drivers/char/pc8736x_gpio.c | 352 ++ kernel/drivers/char/pcmcia/Kconfig | 56 + kernel/drivers/char/pcmcia/Makefile | 9 + kernel/drivers/char/pcmcia/cm4000_cs.c | 1924 +++++++++ kernel/drivers/char/pcmcia/cm4040_cs.c | 687 +++ kernel/drivers/char/pcmcia/cm4040_cs.h | 47 + kernel/drivers/char/pcmcia/synclink_cs.c | 4340 +++++++++++++++++++ kernel/drivers/char/ppdev.c | 811 ++++ kernel/drivers/char/ps3flash.c | 458 ++ kernel/drivers/char/random.c | 1776 ++++++++ kernel/drivers/char/raw.c | 369 ++ kernel/drivers/char/rtc.c | 1427 +++++++ kernel/drivers/char/scx200_gpio.c | 132 + kernel/drivers/char/snsc.c | 465 +++ kernel/drivers/char/snsc.h | 92 + kernel/drivers/char/snsc_event.c | 303 ++ kernel/drivers/char/sonypi.c | 1563 +++++++ kernel/drivers/char/tb0219.c | 371 ++ kernel/drivers/char/tile-srom.c | 472 +++ kernel/drivers/char/tlclk.c | 937 +++++ kernel/drivers/char/toshiba.c | 546 +++ kernel/drivers/char/tpm/Kconfig | 126 + kernel/drivers/char/tpm/Makefile | 25 + kernel/drivers/char/tpm/st33zp24/Kconfig | 30 + kernel/drivers/char/tpm/st33zp24/Makefile | 12 + kernel/drivers/char/tpm/st33zp24/i2c.c | 276 ++ kernel/drivers/char/tpm/st33zp24/spi.c | 399 ++ kernel/drivers/char/tpm/st33zp24/st33zp24.c | 698 ++++ kernel/drivers/char/tpm/st33zp24/st33zp24.h | 37 + kernel/drivers/char/tpm/tpm-chip.c | 268 ++ kernel/drivers/char/tpm/tpm-dev.c | 183 + kernel/drivers/char/tpm/tpm-interface.c | 1056 +++++ kernel/drivers/char/tpm/tpm-sysfs.c | 299 ++ kernel/drivers/char/tpm/tpm.h | 439 ++ kernel/drivers/char/tpm/tpm2-cmd.c | 646 +++ kernel/drivers/char/tpm/tpm_acpi.c | 109 + kernel/drivers/char/tpm/tpm_atmel.c | 227 + kernel/drivers/char/tpm/tpm_atmel.h | 131 + kernel/drivers/char/tpm/tpm_crb.c | 344 ++ kernel/drivers/char/tpm/tpm_eventlog.c | 414 ++ kernel/drivers/char/tpm/tpm_eventlog.h | 86 + kernel/drivers/char/tpm/tpm_i2c_atmel.c | 230 + kernel/drivers/char/tpm/tpm_i2c_infineon.c | 724 ++++ kernel/drivers/char/tpm/tpm_i2c_nuvoton.c | 654 +++ kernel/drivers/char/tpm/tpm_ibmvtpm.c | 699 ++++ kernel/drivers/char/tpm/tpm_ibmvtpm.h | 76 + kernel/drivers/char/tpm/tpm_infineon.c | 629 +++ kernel/drivers/char/tpm/tpm_nsc.c | 383 ++ kernel/drivers/char/tpm/tpm_of.c | 73 + kernel/drivers/char/tpm/tpm_ppi.c | 371 ++ kernel/drivers/char/tpm/tpm_tis.c | 1013 +++++ kernel/drivers/char/tpm/xen-tpmfront.c | 401 ++ kernel/drivers/char/ttyprintk.c | 228 + kernel/drivers/char/uv_mmtimer.c | 220 + kernel/drivers/char/virtio_console.c | 2320 +++++++++++ kernel/drivers/char/xilinx_hwicap/Makefile | 7 + kernel/drivers/char/xilinx_hwicap/buffer_icap.c | 365 ++ kernel/drivers/char/xilinx_hwicap/buffer_icap.h | 54 + kernel/drivers/char/xilinx_hwicap/fifo_icap.c | 393 ++ kernel/drivers/char/xilinx_hwicap/fifo_icap.h | 59 + kernel/drivers/char/xilinx_hwicap/xilinx_hwicap.c | 894 ++++ kernel/drivers/char/xilinx_hwicap/xilinx_hwicap.h | 219 + kernel/drivers/char/xillybus/Kconfig | 33 + kernel/drivers/char/xillybus/Makefile | 7 + kernel/drivers/char/xillybus/xillybus.h | 160 + kernel/drivers/char/xillybus/xillybus_core.c | 2105 ++++++++++ kernel/drivers/char/xillybus/xillybus_of.c | 186 + kernel/drivers/char/xillybus/xillybus_pcie.c | 228 + 180 files changed, 92001 insertions(+) create mode 100644 kernel/drivers/char/Kconfig create mode 100644 kernel/drivers/char/Makefile create mode 100644 kernel/drivers/char/agp/Kconfig create mode 100644 kernel/drivers/char/agp/Makefile create mode 100644 kernel/drivers/char/agp/agp.h create mode 100644 kernel/drivers/char/agp/ali-agp.c create mode 100644 kernel/drivers/char/agp/alpha-agp.c create mode 100644 kernel/drivers/char/agp/amd-k7-agp.c create mode 100644 kernel/drivers/char/agp/amd64-agp.c create mode 100644 kernel/drivers/char/agp/ati-agp.c create mode 100644 kernel/drivers/char/agp/backend.c create mode 100644 kernel/drivers/char/agp/compat_ioctl.c create mode 100644 kernel/drivers/char/agp/compat_ioctl.h create mode 100644 kernel/drivers/char/agp/efficeon-agp.c create mode 100644 kernel/drivers/char/agp/frontend.c create mode 100644 kernel/drivers/char/agp/generic.c create mode 100644 kernel/drivers/char/agp/hp-agp.c create mode 100644 kernel/drivers/char/agp/i460-agp.c create mode 100644 kernel/drivers/char/agp/intel-agp.c create mode 100644 kernel/drivers/char/agp/intel-agp.h create mode 100644 kernel/drivers/char/agp/intel-gtt.c create mode 100644 kernel/drivers/char/agp/isoch.c create mode 100644 kernel/drivers/char/agp/nvidia-agp.c create mode 100644 kernel/drivers/char/agp/parisc-agp.c create mode 100644 kernel/drivers/char/agp/sgi-agp.c create mode 100644 kernel/drivers/char/agp/sis-agp.c create mode 100644 kernel/drivers/char/agp/sworks-agp.c create mode 100644 kernel/drivers/char/agp/uninorth-agp.c create mode 100644 kernel/drivers/char/agp/via-agp.c create mode 100644 kernel/drivers/char/apm-emulation.c create mode 100644 kernel/drivers/char/applicom.c create mode 100644 kernel/drivers/char/applicom.h create mode 100644 kernel/drivers/char/bfin-otp.c create mode 100644 kernel/drivers/char/bsr.c create mode 100644 kernel/drivers/char/ds1302.c create mode 100644 kernel/drivers/char/ds1620.c create mode 100644 kernel/drivers/char/dsp56k.c create mode 100644 kernel/drivers/char/dtlk.c create mode 100644 kernel/drivers/char/efirtc.c create mode 100644 kernel/drivers/char/generic_nvram.c create mode 100644 kernel/drivers/char/genrtc.c create mode 100644 kernel/drivers/char/hangcheck-timer.c create mode 100644 kernel/drivers/char/hpet.c create mode 100644 kernel/drivers/char/hw_random/Kconfig create mode 100644 kernel/drivers/char/hw_random/Makefile create mode 100644 kernel/drivers/char/hw_random/amd-rng.c create mode 100644 kernel/drivers/char/hw_random/atmel-rng.c create mode 100644 kernel/drivers/char/hw_random/bcm2835-rng.c create mode 100644 kernel/drivers/char/hw_random/bcm63xx-rng.c create mode 100644 kernel/drivers/char/hw_random/core.c create mode 100644 kernel/drivers/char/hw_random/exynos-rng.c create mode 100644 kernel/drivers/char/hw_random/geode-rng.c create mode 100644 kernel/drivers/char/hw_random/intel-rng.c create mode 100644 kernel/drivers/char/hw_random/iproc-rng200.c create mode 100644 kernel/drivers/char/hw_random/ixp4xx-rng.c create mode 100644 kernel/drivers/char/hw_random/msm-rng.c create mode 100644 kernel/drivers/char/hw_random/mxc-rnga.c create mode 100644 kernel/drivers/char/hw_random/n2-asm.S create mode 100644 kernel/drivers/char/hw_random/n2-drv.c create mode 100644 kernel/drivers/char/hw_random/n2rng.h create mode 100644 kernel/drivers/char/hw_random/nomadik-rng.c create mode 100644 kernel/drivers/char/hw_random/octeon-rng.c create mode 100644 kernel/drivers/char/hw_random/omap-rng.c create mode 100644 kernel/drivers/char/hw_random/omap3-rom-rng.c create mode 100644 kernel/drivers/char/hw_random/pasemi-rng.c create mode 100644 kernel/drivers/char/hw_random/powernv-rng.c create mode 100644 kernel/drivers/char/hw_random/ppc4xx-rng.c create mode 100644 kernel/drivers/char/hw_random/pseries-rng.c create mode 100644 kernel/drivers/char/hw_random/timeriomem-rng.c create mode 100644 kernel/drivers/char/hw_random/tpm-rng.c create mode 100644 kernel/drivers/char/hw_random/tx4939-rng.c create mode 100644 kernel/drivers/char/hw_random/via-rng.c create mode 100644 kernel/drivers/char/hw_random/virtio-rng.c create mode 100644 kernel/drivers/char/hw_random/xgene-rng.c create mode 100644 kernel/drivers/char/i8k.c create mode 100644 kernel/drivers/char/ipmi/Kconfig create mode 100644 kernel/drivers/char/ipmi/Makefile create mode 100644 kernel/drivers/char/ipmi/ipmi_bt_sm.c create mode 100644 kernel/drivers/char/ipmi/ipmi_devintf.c create mode 100644 kernel/drivers/char/ipmi/ipmi_kcs_sm.c create mode 100644 kernel/drivers/char/ipmi/ipmi_msghandler.c create mode 100644 kernel/drivers/char/ipmi/ipmi_powernv.c create mode 100644 kernel/drivers/char/ipmi/ipmi_poweroff.c create mode 100644 kernel/drivers/char/ipmi/ipmi_si_intf.c create mode 100644 kernel/drivers/char/ipmi/ipmi_si_sm.h create mode 100644 kernel/drivers/char/ipmi/ipmi_smic_sm.c create mode 100644 kernel/drivers/char/ipmi/ipmi_ssif.c create mode 100644 kernel/drivers/char/ipmi/ipmi_watchdog.c create mode 100644 kernel/drivers/char/lp.c create mode 100644 kernel/drivers/char/mbcs.c create mode 100644 kernel/drivers/char/mbcs.h create mode 100644 kernel/drivers/char/mem.c create mode 100644 kernel/drivers/char/misc.c create mode 100644 kernel/drivers/char/mmtimer.c create mode 100644 kernel/drivers/char/msm_smd_pkt.c create mode 100644 kernel/drivers/char/mspec.c create mode 100644 kernel/drivers/char/mwave/3780i.c create mode 100644 kernel/drivers/char/mwave/3780i.h create mode 100644 kernel/drivers/char/mwave/Makefile create mode 100644 kernel/drivers/char/mwave/README create mode 100644 kernel/drivers/char/mwave/mwavedd.c create mode 100644 kernel/drivers/char/mwave/mwavedd.h create mode 100644 kernel/drivers/char/mwave/mwavepub.h create mode 100644 kernel/drivers/char/mwave/smapi.c create mode 100644 kernel/drivers/char/mwave/smapi.h create mode 100644 kernel/drivers/char/mwave/tp3780i.c create mode 100644 kernel/drivers/char/mwave/tp3780i.h create mode 100644 kernel/drivers/char/nsc_gpio.c create mode 100644 kernel/drivers/char/nvram.c create mode 100644 kernel/drivers/char/nwbutton.c create mode 100644 kernel/drivers/char/nwbutton.h create mode 100644 kernel/drivers/char/nwflash.c create mode 100644 kernel/drivers/char/pc8736x_gpio.c create mode 100644 kernel/drivers/char/pcmcia/Kconfig create mode 100644 kernel/drivers/char/pcmcia/Makefile create mode 100644 kernel/drivers/char/pcmcia/cm4000_cs.c create mode 100644 kernel/drivers/char/pcmcia/cm4040_cs.c create mode 100644 kernel/drivers/char/pcmcia/cm4040_cs.h create mode 100644 kernel/drivers/char/pcmcia/synclink_cs.c create mode 100644 kernel/drivers/char/ppdev.c create mode 100644 kernel/drivers/char/ps3flash.c create mode 100644 kernel/drivers/char/random.c create mode 100644 kernel/drivers/char/raw.c create mode 100644 kernel/drivers/char/rtc.c create mode 100644 kernel/drivers/char/scx200_gpio.c create mode 100644 kernel/drivers/char/snsc.c create mode 100644 kernel/drivers/char/snsc.h create mode 100644 kernel/drivers/char/snsc_event.c create mode 100644 kernel/drivers/char/sonypi.c create mode 100644 kernel/drivers/char/tb0219.c create mode 100644 kernel/drivers/char/tile-srom.c create mode 100644 kernel/drivers/char/tlclk.c create mode 100644 kernel/drivers/char/toshiba.c create mode 100644 kernel/drivers/char/tpm/Kconfig create mode 100644 kernel/drivers/char/tpm/Makefile create mode 100644 kernel/drivers/char/tpm/st33zp24/Kconfig create mode 100644 kernel/drivers/char/tpm/st33zp24/Makefile create mode 100644 kernel/drivers/char/tpm/st33zp24/i2c.c create mode 100644 kernel/drivers/char/tpm/st33zp24/spi.c create mode 100644 kernel/drivers/char/tpm/st33zp24/st33zp24.c create mode 100644 kernel/drivers/char/tpm/st33zp24/st33zp24.h create mode 100644 kernel/drivers/char/tpm/tpm-chip.c create mode 100644 kernel/drivers/char/tpm/tpm-dev.c create mode 100644 kernel/drivers/char/tpm/tpm-interface.c create mode 100644 kernel/drivers/char/tpm/tpm-sysfs.c create mode 100644 kernel/drivers/char/tpm/tpm.h create mode 100644 kernel/drivers/char/tpm/tpm2-cmd.c create mode 100644 kernel/drivers/char/tpm/tpm_acpi.c create mode 100644 kernel/drivers/char/tpm/tpm_atmel.c create mode 100644 kernel/drivers/char/tpm/tpm_atmel.h create mode 100644 kernel/drivers/char/tpm/tpm_crb.c create mode 100644 kernel/drivers/char/tpm/tpm_eventlog.c create mode 100644 kernel/drivers/char/tpm/tpm_eventlog.h create mode 100644 kernel/drivers/char/tpm/tpm_i2c_atmel.c create mode 100644 kernel/drivers/char/tpm/tpm_i2c_infineon.c create mode 100644 kernel/drivers/char/tpm/tpm_i2c_nuvoton.c create mode 100644 kernel/drivers/char/tpm/tpm_ibmvtpm.c create mode 100644 kernel/drivers/char/tpm/tpm_ibmvtpm.h create mode 100644 kernel/drivers/char/tpm/tpm_infineon.c create mode 100644 kernel/drivers/char/tpm/tpm_nsc.c create mode 100644 kernel/drivers/char/tpm/tpm_of.c create mode 100644 kernel/drivers/char/tpm/tpm_ppi.c create mode 100644 kernel/drivers/char/tpm/tpm_tis.c create mode 100644 kernel/drivers/char/tpm/xen-tpmfront.c create mode 100644 kernel/drivers/char/ttyprintk.c create mode 100644 kernel/drivers/char/uv_mmtimer.c create mode 100644 kernel/drivers/char/virtio_console.c create mode 100644 kernel/drivers/char/xilinx_hwicap/Makefile create mode 100644 kernel/drivers/char/xilinx_hwicap/buffer_icap.c create mode 100644 kernel/drivers/char/xilinx_hwicap/buffer_icap.h create mode 100644 kernel/drivers/char/xilinx_hwicap/fifo_icap.c create mode 100644 kernel/drivers/char/xilinx_hwicap/fifo_icap.h create mode 100644 kernel/drivers/char/xilinx_hwicap/xilinx_hwicap.c create mode 100644 kernel/drivers/char/xilinx_hwicap/xilinx_hwicap.h create mode 100644 kernel/drivers/char/xillybus/Kconfig create mode 100644 kernel/drivers/char/xillybus/Makefile create mode 100644 kernel/drivers/char/xillybus/xillybus.h create mode 100644 kernel/drivers/char/xillybus/xillybus_core.c create mode 100644 kernel/drivers/char/xillybus/xillybus_of.c create mode 100644 kernel/drivers/char/xillybus/xillybus_pcie.c (limited to 'kernel/drivers/char') diff --git a/kernel/drivers/char/Kconfig b/kernel/drivers/char/Kconfig new file mode 100644 index 000000000..a4af82217 --- /dev/null +++ b/kernel/drivers/char/Kconfig @@ -0,0 +1,615 @@ +# +# Character device configuration +# + +menu "Character devices" + +source "drivers/tty/Kconfig" + +config DEVMEM + bool "/dev/mem virtual device support" + default y + help + Say Y here if you want to support the /dev/mem device. + The /dev/mem device is used to access areas of physical + memory. + When in doubt, say "Y". + +config DEVKMEM + bool "/dev/kmem virtual device support" + default y + help + Say Y here if you want to support the /dev/kmem device. The + /dev/kmem device is rarely used, but can be used for certain + kind of kernel debugging operations. + When in doubt, say "N". + +config SGI_SNSC + bool "SGI Altix system controller communication support" + depends on (IA64_SGI_SN2 || IA64_GENERIC) + help + If you have an SGI Altix and you want to enable system + controller communication from user space (you want this!), + say Y. Otherwise, say N. + +config SGI_TIOCX + bool "SGI TIO CX driver support" + depends on (IA64_SGI_SN2 || IA64_GENERIC) + help + If you have an SGI Altix and you have fpga devices attached + to your TIO, say Y here, otherwise say N. + +config SGI_MBCS + tristate "SGI FPGA Core Services driver support" + depends on SGI_TIOCX + help + If you have an SGI Altix with an attached SABrick + say Y or M here, otherwise say N. + +source "drivers/tty/serial/Kconfig" + +config TTY_PRINTK + tristate "TTY driver to output user messages via printk" + depends on EXPERT && TTY + default n + ---help--- + If you say Y here, the support for writing user messages (i.e. + console messages) via printk is available. + + The feature is useful to inline user messages with kernel + messages. + In order to use this feature, you should output user messages + to /dev/ttyprintk or redirect console to this TTY. + + If unsure, say N. + +config BFIN_OTP + tristate "Blackfin On-Chip OTP Memory Support" + depends on BLACKFIN && (BF51x || BF52x || BF54x) + default y + help + If you say Y here, you will get support for a character device + interface into the One Time Programmable memory pages that are + stored on the Blackfin processor. This will not get you access + to the secure memory pages however. You will need to write your + own secure code and reader for that. + + To compile this driver as a module, choose M here: the module + will be called bfin-otp. + + If unsure, it is safe to say Y. + +config BFIN_OTP_WRITE_ENABLE + bool "Enable writing support of OTP pages" + depends on BFIN_OTP + default n + help + If you say Y here, you will enable support for writing of the + OTP pages. This is dangerous by nature as you can only program + the pages once, so only enable this option when you actually + need it so as to not inadvertently clobber data. + + If unsure, say N. + +config PRINTER + tristate "Parallel printer support" + depends on PARPORT + ---help--- + If you intend to attach a printer to the parallel port of your Linux + box (as opposed to using a serial printer; if the connector at the + printer has 9 or 25 holes ["female"], then it's serial), say Y. + Also read the Printing-HOWTO, available from + . + + It is possible to share one parallel port among several devices + (e.g. printer and ZIP drive) and it is safe to compile the + corresponding drivers into the kernel. + + To compile this driver as a module, choose M here and read + . The module will be called lp. + + If you have several parallel ports, you can specify which ports to + use with the "lp" kernel command line option. (Try "man bootparam" + or see the documentation of your boot loader (lilo or loadlin) about + how to pass options to the kernel at boot time.) The syntax of the + "lp" command line option can be found in . + + If you have more than 8 printers, you need to increase the LP_NO + macro in lp.c and the PARPORT_MAX macro in parport.h. + +config LP_CONSOLE + bool "Support for console on line printer" + depends on PRINTER + ---help--- + If you want kernel messages to be printed out as they occur, you + can have a console on the printer. This option adds support for + doing that; to actually get it to happen you need to pass the + option "console=lp0" to the kernel at boot time. + + If the printer is out of paper (or off, or unplugged, or too + busy..) the kernel will stall until the printer is ready again. + By defining CONSOLE_LP_STRICT to 0 (at your own risk) you + can make the kernel continue when this happens, + but it'll lose the kernel messages. + + If unsure, say N. + +config PPDEV + tristate "Support for user-space parallel port device drivers" + depends on PARPORT + ---help--- + Saying Y to this adds support for /dev/parport device nodes. This + is needed for programs that want portable access to the parallel + port, for instance deviceid (which displays Plug-and-Play device + IDs). + + This is the parallel port equivalent of SCSI generic support (sg). + It is safe to say N to this -- it is not needed for normal printing + or parallel port CD-ROM/disk support. + + To compile this driver as a module, choose M here: the + module will be called ppdev. + + If unsure, say N. + +source "drivers/tty/hvc/Kconfig" + +config VIRTIO_CONSOLE + tristate "Virtio console" + depends on VIRTIO && TTY + select HVC_DRIVER + help + Virtio console for use with lguest and other hypervisors. + + Also serves as a general-purpose serial device for data + transfer between the guest and host. Character devices at + /dev/vportNpn will be created when corresponding ports are + found, where N is the device number and n is the port number + within that device. If specified by the host, a sysfs + attribute called 'name' will be populated with a name for + the port which can be used by udev scripts to create a + symlink to the device. + +config IBM_BSR + tristate "IBM POWER Barrier Synchronization Register support" + depends on PPC_PSERIES + help + This devices exposes a hardware mechanism for fast synchronization + of threads across a large system which avoids bouncing a cacheline + between several cores on a system + +source "drivers/char/ipmi/Kconfig" + +config DS1620 + tristate "NetWinder thermometer support" + depends on ARCH_NETWINDER + help + Say Y here to include support for the thermal management hardware + found in the NetWinder. This driver allows the user to control the + temperature set points and to read the current temperature. + + It is also possible to say M here to build it as a module (ds1620) + It is recommended to be used on a NetWinder, but it is not a + necessity. + +config NWBUTTON + tristate "NetWinder Button" + depends on ARCH_NETWINDER + ---help--- + If you say Y here and create a character device node /dev/nwbutton + with major and minor numbers 10 and 158 ("man mknod"), then every + time the orange button is pressed a number of times, the number of + times the button was pressed will be written to that device. + + This is most useful for applications, as yet unwritten, which + perform actions based on how many times the button is pressed in a + row. + + Do not hold the button down for too long, as the driver does not + alter the behaviour of the hardware reset circuitry attached to the + button; it will still execute a hard reset if the button is held + down for longer than approximately five seconds. + + To compile this driver as a module, choose M here: the + module will be called nwbutton. + + Most people will answer Y to this question and "Reboot Using Button" + below to be able to initiate a system shutdown from the button. + +config NWBUTTON_REBOOT + bool "Reboot Using Button" + depends on NWBUTTON + help + If you say Y here, then you will be able to initiate a system + shutdown and reboot by pressing the orange button a number of times. + The number of presses to initiate the shutdown is two by default, + but this can be altered by modifying the value of NUM_PRESSES_REBOOT + in nwbutton.h and recompiling the driver or, if you compile the + driver as a module, you can specify the number of presses at load + time with "insmod button reboot_count=". + +config NWFLASH + tristate "NetWinder flash support" + depends on ARCH_NETWINDER + ---help--- + If you say Y here and create a character device /dev/flash with + major 10 and minor 160 you can manipulate the flash ROM containing + the NetWinder firmware. Be careful as accidentally overwriting the + flash contents can render your computer unbootable. On no account + allow random users access to this device. :-) + + To compile this driver as a module, choose M here: the + module will be called nwflash. + + If you're not sure, say N. + +source "drivers/char/hw_random/Kconfig" + +config NVRAM + tristate "/dev/nvram support" + depends on ATARI || X86 || (ARM && RTC_DRV_CMOS) || GENERIC_NVRAM + ---help--- + If you say Y here and create a character special file /dev/nvram + with major number 10 and minor number 144 using mknod ("man mknod"), + you get read and write access to the extra bytes of non-volatile + memory in the real time clock (RTC), which is contained in every PC + and most Ataris. The actual number of bytes varies, depending on the + nvram in the system, but is usually 114 (128-14 for the RTC). + + This memory is conventionally called "CMOS RAM" on PCs and "NVRAM" + on Ataris. /dev/nvram may be used to view settings there, or to + change them (with some utility). It could also be used to frequently + save a few bits of very important data that may not be lost over + power-off and for which writing to disk is too insecure. Note + however that most NVRAM space in a PC belongs to the BIOS and you + should NEVER idly tamper with it. See Ralf Brown's interrupt list + for a guide to the use of CMOS bytes by your BIOS. + + On Atari machines, /dev/nvram is always configured and does not need + to be selected. + + To compile this driver as a module, choose M here: the + module will be called nvram. + +# +# These legacy RTC drivers just cause too many conflicts with the generic +# RTC framework ... let's not even try to coexist any more. +# +if RTC_LIB=n + +config RTC + tristate "Enhanced Real Time Clock Support (legacy PC RTC driver)" + depends on !PPC && !PARISC && !IA64 && !M68K && !SPARC && !FRV \ + && !ARM && !SUPERH && !S390 && !AVR32 && !BLACKFIN && !UML + ---help--- + If you say Y here and create a character special file /dev/rtc with + major number 10 and minor number 135 using mknod ("man mknod"), you + will get access to the real time clock (or hardware clock) built + into your computer. + + Every PC has such a clock built in. It can be used to generate + signals from as low as 1Hz up to 8192Hz, and can also be used + as a 24 hour alarm. It reports status information via the file + /proc/driver/rtc and its behaviour is set by various ioctls on + /dev/rtc. + + If you run Linux on a multiprocessor machine and said Y to + "Symmetric Multi Processing" above, you should say Y here to read + and set the RTC in an SMP compatible fashion. + + If you think you have a use for such a device (such as periodic data + sampling), then say Y here, and read + for details. + + To compile this driver as a module, choose M here: the + module will be called rtc. + +config JS_RTC + tristate "Enhanced Real Time Clock Support" + depends on SPARC32 && PCI + ---help--- + If you say Y here and create a character special file /dev/rtc with + major number 10 and minor number 135 using mknod ("man mknod"), you + will get access to the real time clock (or hardware clock) built + into your computer. + + Every PC has such a clock built in. It can be used to generate + signals from as low as 1Hz up to 8192Hz, and can also be used + as a 24 hour alarm. It reports status information via the file + /proc/driver/rtc and its behaviour is set by various ioctls on + /dev/rtc. + + If you think you have a use for such a device (such as periodic data + sampling), then say Y here, and read + for details. + + To compile this driver as a module, choose M here: the + module will be called js-rtc. + +config GEN_RTC + tristate "Generic /dev/rtc emulation" + depends on RTC!=y && !IA64 && !ARM && !M32R && !MIPS && !SPARC && !FRV && !S390 && !SUPERH && !AVR32 && !BLACKFIN && !UML + ---help--- + If you say Y here and create a character special file /dev/rtc with + major number 10 and minor number 135 using mknod ("man mknod"), you + will get access to the real time clock (or hardware clock) built + into your computer. + + It reports status information via the file /proc/driver/rtc and its + behaviour is set by various ioctls on /dev/rtc. If you enable the + "extended RTC operation" below it will also provide an emulation + for RTC_UIE which is required by some programs and may improve + precision in some cases. + + To compile this driver as a module, choose M here: the + module will be called genrtc. + +config GEN_RTC_X + bool "Extended RTC operation" + depends on GEN_RTC + help + Provides an emulation for RTC_UIE which is required by some programs + and may improve precision of the generic RTC support in some cases. + +config EFI_RTC + bool "EFI Real Time Clock Services" + depends on IA64 + +config DS1302 + tristate "DS1302 RTC support" + depends on M32R && (PLAT_M32700UT || PLAT_OPSPUT) + help + If you say Y here and create a character special file /dev/rtc with + major number 121 and minor number 0 using mknod ("man mknod"), you + will get access to the real time clock (or hardware clock) built + into your computer. + +endif # RTC_LIB + +config DTLK + tristate "Double Talk PC internal speech card support" + depends on ISA + help + This driver is for the DoubleTalk PC, a speech synthesizer + manufactured by RC Systems (). It is also + called the `internal DoubleTalk'. + + To compile this driver as a module, choose M here: the + module will be called dtlk. + +config XILINX_HWICAP + tristate "Xilinx HWICAP Support" + depends on XILINX_VIRTEX || MICROBLAZE + help + This option enables support for Xilinx Internal Configuration + Access Port (ICAP) driver. The ICAP is used on Xilinx Virtex + FPGA platforms to partially reconfigure the FPGA at runtime. + + If unsure, say N. + +config R3964 + tristate "Siemens R3964 line discipline" + depends on TTY + ---help--- + This driver allows synchronous communication with devices using the + Siemens R3964 packet protocol. Unless you are dealing with special + hardware like PLCs, you are unlikely to need this. + + To compile this driver as a module, choose M here: the + module will be called n_r3964. + + If unsure, say N. + +config APPLICOM + tristate "Applicom intelligent fieldbus card support" + depends on PCI + ---help--- + This driver provides the kernel-side support for the intelligent + fieldbus cards made by Applicom International. More information + about these cards can be found on the WWW at the address + , or by email from David Woodhouse + . + + To compile this driver as a module, choose M here: the + module will be called applicom. + + If unsure, say N. + +config SONYPI + tristate "Sony Vaio Programmable I/O Control Device support" + depends on X86_32 && PCI && INPUT + ---help--- + This driver enables access to the Sony Programmable I/O Control + Device which can be found in many (all ?) Sony Vaio laptops. + + If you have one of those laptops, read + , and say Y or M here. + + To compile this driver as a module, choose M here: the + module will be called sonypi. + +config GPIO_TB0219 + tristate "TANBAC TB0219 GPIO support" + depends on TANBAC_TB022X + select GPIO_VR41XX + +source "drivers/char/pcmcia/Kconfig" + +config MWAVE + tristate "ACP Modem (Mwave) support" + depends on X86 && TTY + select SERIAL_8250 + ---help--- + The ACP modem (Mwave) for Linux is a WinModem. It is composed of a + kernel driver and a user level application. Together these components + support direct attachment to public switched telephone networks (PSTNs) + and support selected world wide countries. + + This version of the ACP Modem driver supports the IBM Thinkpad 600E, + 600, and 770 that include on board ACP modem hardware. + + The modem also supports the standard communications port interface + (ttySx) and is compatible with the Hayes AT Command Set. + + The user level application needed to use this driver can be found at + the IBM Linux Technology Center (LTC) web site: + . + + If you own one of the above IBM Thinkpads which has the Mwave chipset + in it, say Y. + + To compile this driver as a module, choose M here: the + module will be called mwave. + +config SCx200_GPIO + tristate "NatSemi SCx200 GPIO Support" + depends on SCx200 + select NSC_GPIO + help + Give userspace access to the GPIO pins on the National + Semiconductor SCx200 processors. + + If compiled as a module, it will be called scx200_gpio. + +config PC8736x_GPIO + tristate "NatSemi PC8736x GPIO Support" + depends on X86_32 && !UML + default SCx200_GPIO # mostly N + select NSC_GPIO # needed for support routines + help + Give userspace access to the GPIO pins on the National + Semiconductor PC-8736x (x=[03456]) SuperIO chip. The chip + has multiple functional units, inc several managed by + hwmon/pc87360 driver. Tested with PC-87366 + + If compiled as a module, it will be called pc8736x_gpio. + +config NSC_GPIO + tristate "NatSemi Base GPIO Support" + depends on X86_32 + # selected by SCx200_GPIO and PC8736x_GPIO + # what about 2 selectors differing: m != y + help + Common support used (and needed) by scx200_gpio and + pc8736x_gpio drivers. If those drivers are built as + modules, this one will be too, named nsc_gpio + +config RAW_DRIVER + tristate "RAW driver (/dev/raw/rawN)" + depends on BLOCK + help + The raw driver permits block devices to be bound to /dev/raw/rawN. + Once bound, I/O against /dev/raw/rawN uses efficient zero-copy I/O. + See the raw(8) manpage for more details. + + Applications should preferably open the device (eg /dev/hda1) + with the O_DIRECT flag. + +config MAX_RAW_DEVS + int "Maximum number of RAW devices to support (1-65536)" + depends on RAW_DRIVER + range 1 65536 + default "256" + help + The maximum number of RAW devices that are supported. + Default is 256. Increase this number in case you need lots of + raw devices. + +config HPET + bool "HPET - High Precision Event Timer" if (X86 || IA64) + default n + depends on ACPI + help + If you say Y here, you will have a miscdevice named "/dev/hpet/". Each + open selects one of the timers supported by the HPET. The timers are + non-periodic and/or periodic. + +config HPET_MMAP + bool "Allow mmap of HPET" + default y + depends on HPET + help + If you say Y here, user applications will be able to mmap + the HPET registers. + +config HPET_MMAP_DEFAULT + bool "Enable HPET MMAP access by default" + default y + depends on HPET_MMAP + help + In some hardware implementations, the page containing HPET + registers may also contain other things that shouldn't be + exposed to the user. This option selects the default (if + kernel parameter hpet_mmap is not set) user access to the + registers for applications that require it. + +config HANGCHECK_TIMER + tristate "Hangcheck timer" + depends on X86 || IA64 || PPC64 || S390 + help + The hangcheck-timer module detects when the system has gone + out to lunch past a certain margin. It can reboot the system + or merely print a warning. + +config MMTIMER + tristate "MMTIMER Memory mapped RTC for SGI Altix" + depends on IA64_GENERIC || IA64_SGI_SN2 + default y + help + The mmtimer device allows direct userspace access to the + Altix system timer. + +config UV_MMTIMER + tristate "UV_MMTIMER Memory mapped RTC for SGI UV" + depends on X86_UV + default m + help + The uv_mmtimer device allows direct userspace access to the + UV system timer. + +source "drivers/char/tpm/Kconfig" + +config TELCLOCK + tristate "Telecom clock driver for ATCA SBC" + depends on X86 + default n + help + The telecom clock device is specific to the MPCBL0010 and MPCBL0050 + ATCA computers and allows direct userspace access to the + configuration of the telecom clock configuration settings. This + device is used for hardware synchronization across the ATCA backplane + fabric. Upon loading, the driver exports a sysfs directory, + /sys/devices/platform/telco_clock, with a number of files for + controlling the behavior of this hardware. + +config DEVPORT + bool + depends on !M68K + depends on ISA || PCI + default y + +source "drivers/s390/char/Kconfig" + +config MSM_SMD_PKT + bool "Enable device interface for some SMD packet ports" + default n + depends on MSM_SMD + help + Enables userspace clients to read and write to some packet SMD + ports via device interface for MSM chipset. + +config TILE_SROM + bool "Character-device access via hypervisor to the Tilera SPI ROM" + depends on TILE + default y + ---help--- + This device provides character-level read-write access + to the SROM, typically via the "0", "1", and "2" devices + in /dev/srom/. The Tilera hypervisor makes the flash + device appear much like a simple EEPROM, and knows + how to partition a single ROM for multiple purposes. + +source "drivers/char/xillybus/Kconfig" + +endmenu + diff --git a/kernel/drivers/char/Makefile b/kernel/drivers/char/Makefile new file mode 100644 index 000000000..d06cde260 --- /dev/null +++ b/kernel/drivers/char/Makefile @@ -0,0 +1,64 @@ +# +# Makefile for the kernel character device drivers. +# + +obj-y += mem.o random.o +obj-$(CONFIG_TTY_PRINTK) += ttyprintk.o +obj-y += misc.o +obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o +obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o +obj-$(CONFIG_RAW_DRIVER) += raw.o +obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o +obj-$(CONFIG_MSM_SMD_PKT) += msm_smd_pkt.o +obj-$(CONFIG_MSPEC) += mspec.o +obj-$(CONFIG_MMTIMER) += mmtimer.o +obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o +obj-$(CONFIG_IBM_BSR) += bsr.o +obj-$(CONFIG_SGI_MBCS) += mbcs.o +obj-$(CONFIG_BFIN_OTP) += bfin-otp.o + +obj-$(CONFIG_PRINTER) += lp.o + +obj-$(CONFIG_APM_EMULATION) += apm-emulation.o + +obj-$(CONFIG_DTLK) += dtlk.o +obj-$(CONFIG_APPLICOM) += applicom.o +obj-$(CONFIG_SONYPI) += sonypi.o +obj-$(CONFIG_RTC) += rtc.o +obj-$(CONFIG_HPET) += hpet.o +obj-$(CONFIG_GEN_RTC) += genrtc.o +obj-$(CONFIG_EFI_RTC) += efirtc.o +obj-$(CONFIG_DS1302) += ds1302.o +obj-$(CONFIG_XILINX_HWICAP) += xilinx_hwicap/ +ifeq ($(CONFIG_GENERIC_NVRAM),y) + obj-$(CONFIG_NVRAM) += generic_nvram.o +else + obj-$(CONFIG_NVRAM) += nvram.o +endif +obj-$(CONFIG_TOSHIBA) += toshiba.o +obj-$(CONFIG_I8K) += i8k.o +obj-$(CONFIG_DS1620) += ds1620.o +obj-$(CONFIG_HW_RANDOM) += hw_random/ +obj-$(CONFIG_PPDEV) += ppdev.o +obj-$(CONFIG_NWBUTTON) += nwbutton.o +obj-$(CONFIG_NWFLASH) += nwflash.o +obj-$(CONFIG_SCx200_GPIO) += scx200_gpio.o +obj-$(CONFIG_PC8736x_GPIO) += pc8736x_gpio.o +obj-$(CONFIG_NSC_GPIO) += nsc_gpio.o +obj-$(CONFIG_GPIO_TB0219) += tb0219.o +obj-$(CONFIG_TELCLOCK) += tlclk.o + +obj-$(CONFIG_MWAVE) += mwave/ +obj-y += agp/ +obj-$(CONFIG_PCMCIA) += pcmcia/ + +obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o +obj-$(CONFIG_TCG_TPM) += tpm/ + +obj-$(CONFIG_PS3_FLASH) += ps3flash.o + +obj-$(CONFIG_JS_RTC) += js-rtc.o +js-rtc-y = rtc.o + +obj-$(CONFIG_TILE_SROM) += tile-srom.o +obj-$(CONFIG_XILLYBUS) += xillybus/ diff --git a/kernel/drivers/char/agp/Kconfig b/kernel/drivers/char/agp/Kconfig new file mode 100644 index 000000000..c528f96ee --- /dev/null +++ b/kernel/drivers/char/agp/Kconfig @@ -0,0 +1,162 @@ +menuconfig AGP + tristate "/dev/agpgart (AGP Support)" + depends on ALPHA || IA64 || PARISC || PPC || X86 + depends on PCI + ---help--- + AGP (Accelerated Graphics Port) is a bus system mainly used to + connect graphics cards to the rest of the system. + + If you have an AGP system and you say Y here, it will be possible to + use the AGP features of your 3D rendering video card. This code acts + as a sort of "AGP driver" for the motherboard's chipset. + + If you need more texture memory than you can get with the AGP GART + (theoretically up to 256 MB, but in practice usually 64 or 128 MB + due to kernel allocation issues), you could use PCI accesses + and have up to a couple gigs of texture space. + + Note that this is the only means to have X/GLX use + write-combining with MTRR support on the AGP bus. Without it, OpenGL + direct rendering will be a lot slower but still faster than PIO. + + To compile this driver as a module, choose M here: the + module will be called agpgart. + + You should say Y here if you want to use GLX or DRI. + + If unsure, say N. + +config AGP_ALI + tristate "ALI chipset support" + depends on AGP && X86_32 + ---help--- + This option gives you AGP support for the GLX component of + X on the following ALi chipsets. The supported chipsets + include M1541, M1621, M1631, M1632, M1641,M1647,and M1651. + For the ALi-chipset question, ALi suggests you refer to + . + + The M1541 chipset can do AGP 1x and 2x, but note that there is an + acknowledged incompatibility with Matrox G200 cards. Due to + timing issues, this chipset cannot do AGP 2x with the G200. + This is a hardware limitation. AGP 1x seems to be fine, though. + +config AGP_ATI + tristate "ATI chipset support" + depends on AGP && X86_32 + ---help--- + This option gives you AGP support for the GLX component of + X on the ATI RadeonIGP family of chipsets. + +config AGP_AMD + tristate "AMD Irongate, 761, and 762 chipset support" + depends on AGP && X86_32 + help + This option gives you AGP support for the GLX component of + X on AMD Irongate, 761, and 762 chipsets. + +config AGP_AMD64 + tristate "AMD Opteron/Athlon64 on-CPU GART support" + depends on AGP && X86 && AMD_NB + help + This option gives you AGP support for the GLX component of + X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs. + You still need an external AGP bridge like the AMD 8151, VIA + K8T400M, SiS755. It may also support other AGP bridges when loaded + with agp_try_unsupported=1. + +config AGP_INTEL + tristate "Intel 440LX/BX/GX, I8xx and E7x05 chipset support" + depends on AGP && X86 + select INTEL_GTT + help + This option gives you AGP support for the GLX component of X + on Intel 440LX/BX/GX, 815, 820, 830, 840, 845, 850, 860, 875, + E7205 and E7505 chipsets and full support for the 810, 815, 830M, + 845G, 852GM, 855GM, 865G and I915 integrated graphics chipsets. + + + +config AGP_NVIDIA + tristate "NVIDIA nForce/nForce2 chipset support" + depends on AGP && X86_32 + help + This option gives you AGP support for the GLX component of + X on NVIDIA chipsets including nForce and nForce2 + +config AGP_SIS + tristate "SiS chipset support" + depends on AGP && X86 + help + This option gives you AGP support for the GLX component of + X on Silicon Integrated Systems [SiS] chipsets. + + Note that 5591/5592 AGP chipsets are NOT supported. + + +config AGP_SWORKS + tristate "Serverworks LE/HE chipset support" + depends on AGP && X86_32 + help + Say Y here to support the Serverworks AGP card. See + for product descriptions and images. + +config AGP_VIA + tristate "VIA chipset support" + depends on AGP && X86 + help + This option gives you AGP support for the GLX component of + X on VIA MVP3/Apollo Pro chipsets. + +config AGP_I460 + tristate "Intel 460GX chipset support" + depends on AGP && (IA64_DIG || IA64_GENERIC) + help + This option gives you AGP GART support for the Intel 460GX chipset + for IA64 processors. + +config AGP_HP_ZX1 + tristate "HP ZX1 chipset AGP support" + depends on AGP && (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC) + help + This option gives you AGP GART support for the HP ZX1 chipset + for IA64 processors. + +config AGP_PARISC + tristate "HP Quicksilver AGP support" + depends on AGP && PARISC && 64BIT + help + This option gives you AGP GART support for the HP Quicksilver + AGP bus adapter on HP PA-RISC machines (Ok, just on the C8000 + workstation...) + +config AGP_ALPHA_CORE + tristate "Alpha AGP support" + depends on AGP && (ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL) + default AGP + +config AGP_UNINORTH + tristate "Apple UniNorth & U3 AGP support" + depends on AGP && PPC_PMAC + help + This option gives you AGP support for Apple machines with a + UniNorth or U3 (Apple G5) bridge. + +config AGP_EFFICEON + tristate "Transmeta Efficeon support" + depends on AGP && X86_32 + help + This option gives you AGP support for the Transmeta Efficeon + series processors with integrated northbridges. + +config AGP_SGI_TIOCA + tristate "SGI TIO chipset AGP support" + depends on AGP && (IA64_SGI_SN2 || IA64_GENERIC) + help + This option gives you AGP GART support for the SGI TIO chipset + for IA64 processors. + +config INTEL_GTT + tristate + depends on X86 && PCI + diff --git a/kernel/drivers/char/agp/Makefile b/kernel/drivers/char/agp/Makefile new file mode 100644 index 000000000..604489bcd --- /dev/null +++ b/kernel/drivers/char/agp/Makefile @@ -0,0 +1,22 @@ +agpgart-y := backend.o frontend.o generic.o isoch.o + +agpgart-$(CONFIG_COMPAT) += compat_ioctl.o + +obj-$(CONFIG_AGP) += agpgart.o +obj-$(CONFIG_AGP_ALI) += ali-agp.o +obj-$(CONFIG_AGP_ATI) += ati-agp.o +obj-$(CONFIG_AGP_AMD) += amd-k7-agp.o +obj-$(CONFIG_AGP_AMD64) += amd64-agp.o +obj-$(CONFIG_AGP_ALPHA_CORE) += alpha-agp.o +obj-$(CONFIG_AGP_EFFICEON) += efficeon-agp.o +obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o +obj-$(CONFIG_AGP_PARISC) += parisc-agp.o +obj-$(CONFIG_AGP_I460) += i460-agp.o +obj-$(CONFIG_AGP_INTEL) += intel-agp.o +obj-$(CONFIG_INTEL_GTT) += intel-gtt.o +obj-$(CONFIG_AGP_NVIDIA) += nvidia-agp.o +obj-$(CONFIG_AGP_SGI_TIOCA) += sgi-agp.o +obj-$(CONFIG_AGP_SIS) += sis-agp.o +obj-$(CONFIG_AGP_SWORKS) += sworks-agp.o +obj-$(CONFIG_AGP_UNINORTH) += uninorth-agp.o +obj-$(CONFIG_AGP_VIA) += via-agp.o diff --git a/kernel/drivers/char/agp/agp.h b/kernel/drivers/char/agp/agp.h new file mode 100644 index 000000000..4eb1c772d --- /dev/null +++ b/kernel/drivers/char/agp/agp.h @@ -0,0 +1,290 @@ +/* + * AGPGART + * Copyright (C) 2004 Silicon Graphics, Inc. + * Copyright (C) 2002-2004 Dave Jones + * Copyright (C) 1999 Jeff Hartmann + * Copyright (C) 1999 Precision Insight, Inc. + * Copyright (C) 1999 Xi Graphics, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _AGP_BACKEND_PRIV_H +#define _AGP_BACKEND_PRIV_H 1 + +#include /* for flush_agp_cache() */ + +#define PFX "agpgart: " + +//#define AGP_DEBUG 1 +#ifdef AGP_DEBUG +#define DBG(x,y...) printk (KERN_DEBUG PFX "%s: " x "\n", __func__ , ## y) +#else +#define DBG(x,y...) do { } while (0) +#endif + +extern struct agp_bridge_data *agp_bridge; + +enum aper_size_type { + U8_APER_SIZE, + U16_APER_SIZE, + U32_APER_SIZE, + LVL2_APER_SIZE, + FIXED_APER_SIZE +}; + +struct gatt_mask { + unsigned long mask; + u32 type; + /* totally device specific, for integrated chipsets that + * might have different types of memory masks. For other + * devices this will probably be ignored */ +}; + +#define AGP_PAGE_DESTROY_UNMAP 1 +#define AGP_PAGE_DESTROY_FREE 2 + +struct aper_size_info_8 { + int size; + int num_entries; + int page_order; + u8 size_value; +}; + +struct aper_size_info_16 { + int size; + int num_entries; + int page_order; + u16 size_value; +}; + +struct aper_size_info_32 { + int size; + int num_entries; + int page_order; + u32 size_value; +}; + +struct aper_size_info_lvl2 { + int size; + int num_entries; + u32 size_value; +}; + +struct aper_size_info_fixed { + int size; + int num_entries; + int page_order; +}; + +struct agp_bridge_driver { + struct module *owner; + const void *aperture_sizes; + int num_aperture_sizes; + enum aper_size_type size_type; + bool cant_use_aperture; + bool needs_scratch_page; + const struct gatt_mask *masks; + int (*fetch_size)(void); + int (*configure)(void); + void (*agp_enable)(struct agp_bridge_data *, u32); + void (*cleanup)(void); + void (*tlb_flush)(struct agp_memory *); + unsigned long (*mask_memory)(struct agp_bridge_data *, dma_addr_t, int); + void (*cache_flush)(void); + int (*create_gatt_table)(struct agp_bridge_data *); + int (*free_gatt_table)(struct agp_bridge_data *); + int (*insert_memory)(struct agp_memory *, off_t, int); + int (*remove_memory)(struct agp_memory *, off_t, int); + struct agp_memory *(*alloc_by_type) (size_t, int); + void (*free_by_type)(struct agp_memory *); + struct page *(*agp_alloc_page)(struct agp_bridge_data *); + int (*agp_alloc_pages)(struct agp_bridge_data *, struct agp_memory *, size_t); + void (*agp_destroy_page)(struct page *, int flags); + void (*agp_destroy_pages)(struct agp_memory *); + int (*agp_type_to_mask_type) (struct agp_bridge_data *, int); +}; + +struct agp_bridge_data { + const struct agp_version *version; + const struct agp_bridge_driver *driver; + const struct vm_operations_struct *vm_ops; + void *previous_size; + void *current_size; + void *dev_private_data; + struct pci_dev *dev; + u32 __iomem *gatt_table; + u32 *gatt_table_real; + unsigned long scratch_page; + struct page *scratch_page_page; + dma_addr_t scratch_page_dma; + unsigned long gart_bus_addr; + unsigned long gatt_bus_addr; + u32 mode; + enum chipset_type type; + unsigned long *key_list; + atomic_t current_memory_agp; + atomic_t agp_in_use; + int max_memory_agp; /* in number of pages */ + int aperture_size_idx; + int capndx; + int flags; + char major_version; + char minor_version; + struct list_head list; + u32 apbase_config; + /* list of agp_memory mapped to the aperture */ + struct list_head mapped_list; + spinlock_t mapped_lock; +}; + +#define KB(x) ((x) * 1024) +#define MB(x) (KB (KB (x))) +#define GB(x) (MB (KB (x))) + +#define A_SIZE_8(x) ((struct aper_size_info_8 *) x) +#define A_SIZE_16(x) ((struct aper_size_info_16 *) x) +#define A_SIZE_32(x) ((struct aper_size_info_32 *) x) +#define A_SIZE_LVL2(x) ((struct aper_size_info_lvl2 *) x) +#define A_SIZE_FIX(x) ((struct aper_size_info_fixed *) x) +#define A_IDX8(bridge) (A_SIZE_8((bridge)->driver->aperture_sizes) + i) +#define A_IDX16(bridge) (A_SIZE_16((bridge)->driver->aperture_sizes) + i) +#define A_IDX32(bridge) (A_SIZE_32((bridge)->driver->aperture_sizes) + i) +#define MAXKEY (4096 * 32) + +#define PGE_EMPTY(b, p) (!(p) || (p) == (unsigned long) (b)->scratch_page) + + +struct agp_device_ids { + unsigned short device_id; /* first, to make table easier to read */ + enum chipset_type chipset; + const char *chipset_name; + int (*chipset_setup) (struct pci_dev *pdev); /* used to override generic */ +}; + +/* Driver registration */ +struct agp_bridge_data *agp_alloc_bridge(void); +void agp_put_bridge(struct agp_bridge_data *bridge); +int agp_add_bridge(struct agp_bridge_data *bridge); +void agp_remove_bridge(struct agp_bridge_data *bridge); + +/* Frontend routines. */ +int agp_frontend_initialize(void); +void agp_frontend_cleanup(void); + +/* Generic routines. */ +void agp_generic_enable(struct agp_bridge_data *bridge, u32 mode); +int agp_generic_create_gatt_table(struct agp_bridge_data *bridge); +int agp_generic_free_gatt_table(struct agp_bridge_data *bridge); +struct agp_memory *agp_create_memory(int scratch_pages); +int agp_generic_insert_memory(struct agp_memory *mem, off_t pg_start, int type); +int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type); +struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type); +void agp_generic_free_by_type(struct agp_memory *curr); +struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge); +int agp_generic_alloc_pages(struct agp_bridge_data *agp_bridge, + struct agp_memory *memory, size_t page_count); +void agp_generic_destroy_page(struct page *page, int flags); +void agp_generic_destroy_pages(struct agp_memory *memory); +void agp_free_key(int key); +int agp_num_entries(void); +u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 mode, u32 command); +void agp_device_command(u32 command, bool agp_v3); +int agp_3_5_enable(struct agp_bridge_data *bridge); +void global_cache_flush(void); +void get_agp_version(struct agp_bridge_data *bridge); +unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge, + dma_addr_t phys, int type); +int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge, + int type); +struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev); + +/* generic functions for user-populated AGP memory types */ +struct agp_memory *agp_generic_alloc_user(size_t page_count, int type); +void agp_alloc_page_array(size_t size, struct agp_memory *mem); +static inline void agp_free_page_array(struct agp_memory *mem) +{ + kvfree(mem->pages); +} + + +/* generic routines for agp>=3 */ +int agp3_generic_fetch_size(void); +void agp3_generic_tlbflush(struct agp_memory *mem); +int agp3_generic_configure(void); +void agp3_generic_cleanup(void); + +/* aperture sizes have been standardised since v3 */ +#define AGP_GENERIC_SIZES_ENTRIES 11 +extern const struct aper_size_info_16 agp3_generic_sizes[]; + +extern int agp_off; +extern int agp_try_unsupported_boot; + +long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg); + +/* Chipset independent registers (from AGP Spec) */ +#define AGP_APBASE 0x10 +#define AGP_APERTURE_BAR 0 + +#define AGPSTAT 0x4 +#define AGPCMD 0x8 +#define AGPNISTAT 0xc +#define AGPCTRL 0x10 +#define AGPAPSIZE 0x14 +#define AGPNEPG 0x16 +#define AGPGARTLO 0x18 +#define AGPGARTHI 0x1c +#define AGPNICMD 0x20 + +#define AGP_MAJOR_VERSION_SHIFT (20) +#define AGP_MINOR_VERSION_SHIFT (16) + +#define AGPSTAT_RQ_DEPTH (0xff000000) +#define AGPSTAT_RQ_DEPTH_SHIFT 24 + +#define AGPSTAT_CAL_MASK (1<<12|1<<11|1<<10) +#define AGPSTAT_ARQSZ (1<<15|1<<14|1<<13) +#define AGPSTAT_ARQSZ_SHIFT 13 + +#define AGPSTAT_SBA (1<<9) +#define AGPSTAT_AGP_ENABLE (1<<8) +#define AGPSTAT_FW (1<<4) +#define AGPSTAT_MODE_3_0 (1<<3) + +#define AGPSTAT2_1X (1<<0) +#define AGPSTAT2_2X (1<<1) +#define AGPSTAT2_4X (1<<2) + +#define AGPSTAT3_RSVD (1<<2) +#define AGPSTAT3_8X (1<<1) +#define AGPSTAT3_4X (1) + +#define AGPCTRL_APERENB (1<<8) +#define AGPCTRL_GTLBEN (1<<7) + +#define AGP2_RESERVED_MASK 0x00fffcc8 +#define AGP3_RESERVED_MASK 0x00ff00c4 + +#define AGP_ERRATA_FASTWRITES 1<<0 +#define AGP_ERRATA_SBA 1<<1 +#define AGP_ERRATA_1X 1<<2 + +#endif /* _AGP_BACKEND_PRIV_H */ diff --git a/kernel/drivers/char/agp/ali-agp.c b/kernel/drivers/char/agp/ali-agp.c new file mode 100644 index 000000000..dcbbb4ea3 --- /dev/null +++ b/kernel/drivers/char/agp/ali-agp.c @@ -0,0 +1,422 @@ +/* + * ALi AGPGART routines. + */ + +#include +#include +#include +#include +#include +#include /* PAGE_SIZE */ +#include "agp.h" + +#define ALI_AGPCTRL 0xb8 +#define ALI_ATTBASE 0xbc +#define ALI_TLBCTRL 0xc0 +#define ALI_TAGCTRL 0xc4 +#define ALI_CACHE_FLUSH_CTRL 0xD0 +#define ALI_CACHE_FLUSH_ADDR_MASK 0xFFFFF000 +#define ALI_CACHE_FLUSH_EN 0x100 + +static int ali_fetch_size(void) +{ + int i; + u32 temp; + struct aper_size_info_32 *values; + + pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp); + temp &= ~(0xfffffff0); + values = A_SIZE_32(agp_bridge->driver->aperture_sizes); + + for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { + if (temp == values[i].size_value) { + agp_bridge->previous_size = + agp_bridge->current_size = (void *) (values + i); + agp_bridge->aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + +static void ali_tlbflush(struct agp_memory *mem) +{ + u32 temp; + + pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp); + temp &= 0xfffffff0; + temp |= (1<<0 | 1<<1); + pci_write_config_dword(agp_bridge->dev, ALI_TAGCTRL, temp); +} + +static void ali_cleanup(void) +{ + struct aper_size_info_32 *previous_size; + u32 temp; + + previous_size = A_SIZE_32(agp_bridge->previous_size); + + pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp); +// clear tag + pci_write_config_dword(agp_bridge->dev, ALI_TAGCTRL, + ((temp & 0xffffff00) | 0x00000001|0x00000002)); + + pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp); + pci_write_config_dword(agp_bridge->dev, ALI_ATTBASE, + ((temp & 0x00000ff0) | previous_size->size_value)); +} + +static int ali_configure(void) +{ + u32 temp; + struct aper_size_info_32 *current_size; + + current_size = A_SIZE_32(agp_bridge->current_size); + + /* aperture size and gatt addr */ + pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp); + temp = (((temp & 0x00000ff0) | (agp_bridge->gatt_bus_addr & 0xfffff000)) + | (current_size->size_value & 0xf)); + pci_write_config_dword(agp_bridge->dev, ALI_ATTBASE, temp); + + /* tlb control */ + pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp); + pci_write_config_dword(agp_bridge->dev, ALI_TLBCTRL, ((temp & 0xffffff00) | 0x00000010)); + + /* address to map to */ + agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, + AGP_APERTURE_BAR); + +#if 0 + if (agp_bridge->type == ALI_M1541) { + u32 nlvm_addr = 0; + + switch (current_size->size_value) { + case 0: break; + case 1: nlvm_addr = 0x100000;break; + case 2: nlvm_addr = 0x200000;break; + case 3: nlvm_addr = 0x400000;break; + case 4: nlvm_addr = 0x800000;break; + case 6: nlvm_addr = 0x1000000;break; + case 7: nlvm_addr = 0x2000000;break; + case 8: nlvm_addr = 0x4000000;break; + case 9: nlvm_addr = 0x8000000;break; + case 10: nlvm_addr = 0x10000000;break; + default: break; + } + nlvm_addr--; + nlvm_addr&=0xfff00000; + + nlvm_addr+= agp_bridge->gart_bus_addr; + nlvm_addr|=(agp_bridge->gart_bus_addr>>12); + dev_info(&agp_bridge->dev->dev, "nlvm top &base = %8x\n", + nlvm_addr); + } +#endif + + pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp); + temp &= 0xffffff7f; //enable TLB + pci_write_config_dword(agp_bridge->dev, ALI_TLBCTRL, temp); + + return 0; +} + + +static void m1541_cache_flush(void) +{ + int i, page_count; + u32 temp; + + global_cache_flush(); + + page_count = 1 << A_SIZE_32(agp_bridge->current_size)->page_order; + for (i = 0; i < PAGE_SIZE * page_count; i += PAGE_SIZE) { + pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, + &temp); + pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, + (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | + (agp_bridge->gatt_bus_addr + i)) | + ALI_CACHE_FLUSH_EN)); + } +} + +static struct page *m1541_alloc_page(struct agp_bridge_data *bridge) +{ + struct page *page = agp_generic_alloc_page(agp_bridge); + u32 temp; + + if (!page) + return NULL; + + pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp); + pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, + (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | + page_to_phys(page)) | ALI_CACHE_FLUSH_EN )); + return page; +} + +static void ali_destroy_page(struct page *page, int flags) +{ + if (page) { + if (flags & AGP_PAGE_DESTROY_UNMAP) { + global_cache_flush(); /* is this really needed? --hch */ + agp_generic_destroy_page(page, flags); + } else + agp_generic_destroy_page(page, flags); + } +} + +static void m1541_destroy_page(struct page *page, int flags) +{ + u32 temp; + + if (page == NULL) + return; + + if (flags & AGP_PAGE_DESTROY_UNMAP) { + global_cache_flush(); + + pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp); + pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, + (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | + page_to_phys(page)) | ALI_CACHE_FLUSH_EN)); + } + agp_generic_destroy_page(page, flags); +} + + +/* Setup function */ + +static const struct aper_size_info_32 ali_generic_sizes[7] = +{ + {256, 65536, 6, 10}, + {128, 32768, 5, 9}, + {64, 16384, 4, 8}, + {32, 8192, 3, 7}, + {16, 4096, 2, 6}, + {8, 2048, 1, 4}, + {4, 1024, 0, 3} +}; + +static const struct agp_bridge_driver ali_generic_bridge = { + .owner = THIS_MODULE, + .aperture_sizes = ali_generic_sizes, + .size_type = U32_APER_SIZE, + .num_aperture_sizes = 7, + .needs_scratch_page = true, + .configure = ali_configure, + .fetch_size = ali_fetch_size, + .cleanup = ali_cleanup, + .tlb_flush = ali_tlbflush, + .mask_memory = agp_generic_mask_memory, + .masks = NULL, + .agp_enable = agp_generic_enable, + .cache_flush = global_cache_flush, + .create_gatt_table = agp_generic_create_gatt_table, + .free_gatt_table = agp_generic_free_gatt_table, + .insert_memory = agp_generic_insert_memory, + .remove_memory = agp_generic_remove_memory, + .alloc_by_type = agp_generic_alloc_by_type, + .free_by_type = agp_generic_free_by_type, + .agp_alloc_page = agp_generic_alloc_page, + .agp_destroy_page = ali_destroy_page, + .agp_type_to_mask_type = agp_generic_type_to_mask_type, +}; + +static const struct agp_bridge_driver ali_m1541_bridge = { + .owner = THIS_MODULE, + .aperture_sizes = ali_generic_sizes, + .size_type = U32_APER_SIZE, + .num_aperture_sizes = 7, + .configure = ali_configure, + .fetch_size = ali_fetch_size, + .cleanup = ali_cleanup, + .tlb_flush = ali_tlbflush, + .mask_memory = agp_generic_mask_memory, + .masks = NULL, + .agp_enable = agp_generic_enable, + .cache_flush = m1541_cache_flush, + .create_gatt_table = agp_generic_create_gatt_table, + .free_gatt_table = agp_generic_free_gatt_table, + .insert_memory = agp_generic_insert_memory, + .remove_memory = agp_generic_remove_memory, + .alloc_by_type = agp_generic_alloc_by_type, + .free_by_type = agp_generic_free_by_type, + .agp_alloc_page = m1541_alloc_page, + .agp_destroy_page = m1541_destroy_page, + .agp_type_to_mask_type = agp_generic_type_to_mask_type, +}; + + +static struct agp_device_ids ali_agp_device_ids[] = +{ + { + .device_id = PCI_DEVICE_ID_AL_M1541, + .chipset_name = "M1541", + }, + { + .device_id = PCI_DEVICE_ID_AL_M1621, + .chipset_name = "M1621", + }, + { + .device_id = PCI_DEVICE_ID_AL_M1631, + .chipset_name = "M1631", + }, + { + .device_id = PCI_DEVICE_ID_AL_M1632, + .chipset_name = "M1632", + }, + { + .device_id = PCI_DEVICE_ID_AL_M1641, + .chipset_name = "M1641", + }, + { + .device_id = PCI_DEVICE_ID_AL_M1644, + .chipset_name = "M1644", + }, + { + .device_id = PCI_DEVICE_ID_AL_M1647, + .chipset_name = "M1647", + }, + { + .device_id = PCI_DEVICE_ID_AL_M1651, + .chipset_name = "M1651", + }, + { + .device_id = PCI_DEVICE_ID_AL_M1671, + .chipset_name = "M1671", + }, + { + .device_id = PCI_DEVICE_ID_AL_M1681, + .chipset_name = "M1681", + }, + { + .device_id = PCI_DEVICE_ID_AL_M1683, + .chipset_name = "M1683", + }, + + { }, /* dummy final entry, always present */ +}; + +static int agp_ali_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct agp_device_ids *devs = ali_agp_device_ids; + struct agp_bridge_data *bridge; + u8 hidden_1621_id, cap_ptr; + int j; + + cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); + if (!cap_ptr) + return -ENODEV; + + /* probe for known chipsets */ + for (j = 0; devs[j].chipset_name; j++) { + if (pdev->device == devs[j].device_id) + goto found; + } + + dev_err(&pdev->dev, "unsupported ALi chipset [%04x/%04x])\n", + pdev->vendor, pdev->device); + return -ENODEV; + + +found: + bridge = agp_alloc_bridge(); + if (!bridge) + return -ENOMEM; + + bridge->dev = pdev; + bridge->capndx = cap_ptr; + + switch (pdev->device) { + case PCI_DEVICE_ID_AL_M1541: + bridge->driver = &ali_m1541_bridge; + break; + case PCI_DEVICE_ID_AL_M1621: + pci_read_config_byte(pdev, 0xFB, &hidden_1621_id); + switch (hidden_1621_id) { + case 0x31: + devs[j].chipset_name = "M1631"; + break; + case 0x32: + devs[j].chipset_name = "M1632"; + break; + case 0x41: + devs[j].chipset_name = "M1641"; + break; + case 0x43: + devs[j].chipset_name = "M1621"; + break; + case 0x47: + devs[j].chipset_name = "M1647"; + break; + case 0x51: + devs[j].chipset_name = "M1651"; + break; + default: + break; + } + /*FALLTHROUGH*/ + default: + bridge->driver = &ali_generic_bridge; + } + + dev_info(&pdev->dev, "ALi %s chipset\n", devs[j].chipset_name); + + /* Fill in the mode register */ + pci_read_config_dword(pdev, + bridge->capndx+PCI_AGP_STATUS, + &bridge->mode); + + pci_set_drvdata(pdev, bridge); + return agp_add_bridge(bridge); +} + +static void agp_ali_remove(struct pci_dev *pdev) +{ + struct agp_bridge_data *bridge = pci_get_drvdata(pdev); + + agp_remove_bridge(bridge); + agp_put_bridge(bridge); +} + +static struct pci_device_id agp_ali_pci_table[] = { + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_AL, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { } +}; + +MODULE_DEVICE_TABLE(pci, agp_ali_pci_table); + +static struct pci_driver agp_ali_pci_driver = { + .name = "agpgart-ali", + .id_table = agp_ali_pci_table, + .probe = agp_ali_probe, + .remove = agp_ali_remove, +}; + +static int __init agp_ali_init(void) +{ + if (agp_off) + return -EINVAL; + return pci_register_driver(&agp_ali_pci_driver); +} + +static void __exit agp_ali_cleanup(void) +{ + pci_unregister_driver(&agp_ali_pci_driver); +} + +module_init(agp_ali_init); +module_exit(agp_ali_cleanup); + +MODULE_AUTHOR("Dave Jones"); +MODULE_LICENSE("GPL and additional rights"); + diff --git a/kernel/drivers/char/agp/alpha-agp.c b/kernel/drivers/char/agp/alpha-agp.c new file mode 100644 index 000000000..199b8e99f --- /dev/null +++ b/kernel/drivers/char/agp/alpha-agp.c @@ -0,0 +1,222 @@ +#include +#include +#include +#include +#include +#include + +#include +#include +#include "../../../arch/alpha/kernel/pci_impl.h" + +#include "agp.h" + +static int alpha_core_agp_vm_fault(struct vm_area_struct *vma, + struct vm_fault *vmf) +{ + alpha_agp_info *agp = agp_bridge->dev_private_data; + dma_addr_t dma_addr; + unsigned long pa; + struct page *page; + + dma_addr = (unsigned long)vmf->virtual_address - vma->vm_start + + agp->aperture.bus_base; + pa = agp->ops->translate(agp, dma_addr); + + if (pa == (unsigned long)-EINVAL) + return VM_FAULT_SIGBUS; /* no translation */ + + /* + * Get the page, inc the use count, and return it + */ + page = virt_to_page(__va(pa)); + get_page(page); + vmf->page = page; + return 0; +} + +static struct aper_size_info_fixed alpha_core_agp_sizes[] = +{ + { 0, 0, 0 }, /* filled in by alpha_core_agp_setup */ +}; + +static const struct vm_operations_struct alpha_core_agp_vm_ops = { + .fault = alpha_core_agp_vm_fault, +}; + + +static int alpha_core_agp_fetch_size(void) +{ + return alpha_core_agp_sizes[0].size; +} + +static int alpha_core_agp_configure(void) +{ + alpha_agp_info *agp = agp_bridge->dev_private_data; + agp_bridge->gart_bus_addr = agp->aperture.bus_base; + return 0; +} + +static void alpha_core_agp_cleanup(void) +{ + alpha_agp_info *agp = agp_bridge->dev_private_data; + + agp->ops->cleanup(agp); +} + +static void alpha_core_agp_tlbflush(struct agp_memory *mem) +{ + alpha_agp_info *agp = agp_bridge->dev_private_data; + alpha_mv.mv_pci_tbi(agp->hose, 0, -1); +} + +static void alpha_core_agp_enable(struct agp_bridge_data *bridge, u32 mode) +{ + alpha_agp_info *agp = bridge->dev_private_data; + + agp->mode.lw = agp_collect_device_status(bridge, mode, + agp->capability.lw); + + agp->mode.bits.enable = 1; + agp->ops->configure(agp); + + agp_device_command(agp->mode.lw, false); +} + +static int alpha_core_agp_insert_memory(struct agp_memory *mem, off_t pg_start, + int type) +{ + alpha_agp_info *agp = agp_bridge->dev_private_data; + int num_entries, status; + void *temp; + + if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES) + return -EINVAL; + + temp = agp_bridge->current_size; + num_entries = A_SIZE_FIX(temp)->num_entries; + if ((pg_start + mem->page_count) > num_entries) + return -EINVAL; + + status = agp->ops->bind(agp, pg_start, mem); + mb(); + alpha_core_agp_tlbflush(mem); + + return status; +} + +static int alpha_core_agp_remove_memory(struct agp_memory *mem, off_t pg_start, + int type) +{ + alpha_agp_info *agp = agp_bridge->dev_private_data; + int status; + + status = agp->ops->unbind(agp, pg_start, mem); + alpha_core_agp_tlbflush(mem); + return status; +} + +static int alpha_core_agp_create_free_gatt_table(struct agp_bridge_data *a) +{ + return 0; +} + +struct agp_bridge_driver alpha_core_agp_driver = { + .owner = THIS_MODULE, + .aperture_sizes = alpha_core_agp_sizes, + .num_aperture_sizes = 1, + .size_type = FIXED_APER_SIZE, + .cant_use_aperture = true, + .masks = NULL, + + .fetch_size = alpha_core_agp_fetch_size, + .configure = alpha_core_agp_configure, + .agp_enable = alpha_core_agp_enable, + .cleanup = alpha_core_agp_cleanup, + .tlb_flush = alpha_core_agp_tlbflush, + .mask_memory = agp_generic_mask_memory, + .cache_flush = global_cache_flush, + .create_gatt_table = alpha_core_agp_create_free_gatt_table, + .free_gatt_table = alpha_core_agp_create_free_gatt_table, + .insert_memory = alpha_core_agp_insert_memory, + .remove_memory = alpha_core_agp_remove_memory, + .alloc_by_type = agp_generic_alloc_by_type, + .free_by_type = agp_generic_free_by_type, + .agp_alloc_page = agp_generic_alloc_page, + .agp_alloc_pages = agp_generic_alloc_pages, + .agp_destroy_page = agp_generic_destroy_page, + .agp_destroy_pages = agp_generic_destroy_pages, + .agp_type_to_mask_type = agp_generic_type_to_mask_type, +}; + +struct agp_bridge_data *alpha_bridge; + +int __init +alpha_core_agp_setup(void) +{ + alpha_agp_info *agp = alpha_mv.agp_info(); + struct pci_dev *pdev; /* faked */ + struct aper_size_info_fixed *aper_size; + + if (!agp) + return -ENODEV; + if (agp->ops->setup(agp)) + return -ENODEV; + + /* + * Build the aperture size descriptor + */ + aper_size = alpha_core_agp_sizes; + aper_size->size = agp->aperture.size / (1024 * 1024); + aper_size->num_entries = agp->aperture.size / PAGE_SIZE; + aper_size->page_order = __ffs(aper_size->num_entries / 1024); + + /* + * Build a fake pci_dev struct + */ + pdev = pci_alloc_dev(NULL); + if (!pdev) + return -ENOMEM; + pdev->vendor = 0xffff; + pdev->device = 0xffff; + pdev->sysdata = agp->hose; + + alpha_bridge = agp_alloc_bridge(); + if (!alpha_bridge) + goto fail; + + alpha_bridge->driver = &alpha_core_agp_driver; + alpha_bridge->vm_ops = &alpha_core_agp_vm_ops; + alpha_bridge->current_size = aper_size; /* only 1 size */ + alpha_bridge->dev_private_data = agp; + alpha_bridge->dev = pdev; + alpha_bridge->mode = agp->capability.lw; + + printk(KERN_INFO PFX "Detected AGP on hose %d\n", agp->hose->index); + return agp_add_bridge(alpha_bridge); + + fail: + kfree(pdev); + return -ENOMEM; +} + +static int __init agp_alpha_core_init(void) +{ + if (agp_off) + return -EINVAL; + if (alpha_mv.agp_info) + return alpha_core_agp_setup(); + return -ENODEV; +} + +static void __exit agp_alpha_core_cleanup(void) +{ + agp_remove_bridge(alpha_bridge); + agp_put_bridge(alpha_bridge); +} + +module_init(agp_alpha_core_init); +module_exit(agp_alpha_core_cleanup); + +MODULE_AUTHOR("Jeff Wiedemeier "); +MODULE_LICENSE("GPL and additional rights"); diff --git a/kernel/drivers/char/agp/amd-k7-agp.c b/kernel/drivers/char/agp/amd-k7-agp.c new file mode 100644 index 000000000..3661a51e9 --- /dev/null +++ b/kernel/drivers/char/agp/amd-k7-agp.c @@ -0,0 +1,566 @@ +/* + * AMD K7 AGPGART routines. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "agp.h" + +#define AMD_MMBASE_BAR 1 +#define AMD_APSIZE 0xac +#define AMD_MODECNTL 0xb0 +#define AMD_MODECNTL2 0xb2 +#define AMD_GARTENABLE 0x02 /* In mmio region (16-bit register) */ +#define AMD_ATTBASE 0x04 /* In mmio region (32-bit register) */ +#define AMD_TLBFLUSH 0x0c /* In mmio region (32-bit register) */ +#define AMD_CACHEENTRY 0x10 /* In mmio region (32-bit register) */ + +static struct pci_device_id agp_amdk7_pci_table[]; + +struct amd_page_map { + unsigned long *real; + unsigned long __iomem *remapped; +}; + +static struct _amd_irongate_private { + volatile u8 __iomem *registers; + struct amd_page_map **gatt_pages; + int num_tables; +} amd_irongate_private; + +static int amd_create_page_map(struct amd_page_map *page_map) +{ + int i; + + page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL); + if (page_map->real == NULL) + return -ENOMEM; + + set_memory_uc((unsigned long)page_map->real, 1); + page_map->remapped = page_map->real; + + for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) { + writel(agp_bridge->scratch_page, page_map->remapped+i); + readl(page_map->remapped+i); /* PCI Posting. */ + } + + return 0; +} + +static void amd_free_page_map(struct amd_page_map *page_map) +{ + set_memory_wb((unsigned long)page_map->real, 1); + free_page((unsigned long) page_map->real); +} + +static void amd_free_gatt_pages(void) +{ + int i; + struct amd_page_map **tables; + struct amd_page_map *entry; + + tables = amd_irongate_private.gatt_pages; + for (i = 0; i < amd_irongate_private.num_tables; i++) { + entry = tables[i]; + if (entry != NULL) { + if (entry->real != NULL) + amd_free_page_map(entry); + kfree(entry); + } + } + kfree(tables); + amd_irongate_private.gatt_pages = NULL; +} + +static int amd_create_gatt_pages(int nr_tables) +{ + struct amd_page_map **tables; + struct amd_page_map *entry; + int retval = 0; + int i; + + tables = kzalloc((nr_tables + 1) * sizeof(struct amd_page_map *),GFP_KERNEL); + if (tables == NULL) + return -ENOMEM; + + for (i = 0; i < nr_tables; i++) { + entry = kzalloc(sizeof(struct amd_page_map), GFP_KERNEL); + tables[i] = entry; + if (entry == NULL) { + retval = -ENOMEM; + break; + } + retval = amd_create_page_map(entry); + if (retval != 0) + break; + } + amd_irongate_private.num_tables = i; + amd_irongate_private.gatt_pages = tables; + + if (retval != 0) + amd_free_gatt_pages(); + + return retval; +} + +/* Since we don't need contiguous memory we just try + * to get the gatt table once + */ + +#define GET_PAGE_DIR_OFF(addr) (addr >> 22) +#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \ + GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr)) +#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) +#define GET_GATT(addr) (amd_irongate_private.gatt_pages[\ + GET_PAGE_DIR_IDX(addr)]->remapped) + +static int amd_create_gatt_table(struct agp_bridge_data *bridge) +{ + struct aper_size_info_lvl2 *value; + struct amd_page_map page_dir; + unsigned long __iomem *cur_gatt; + unsigned long addr; + int retval; + int i; + + value = A_SIZE_LVL2(agp_bridge->current_size); + retval = amd_create_page_map(&page_dir); + if (retval != 0) + return retval; + + retval = amd_create_gatt_pages(value->num_entries / 1024); + if (retval != 0) { + amd_free_page_map(&page_dir); + return retval; + } + + agp_bridge->gatt_table_real = (u32 *)page_dir.real; + agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped; + agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real); + + /* Get the address for the gart region. + * This is a bus address even on the alpha, b/c its + * used to program the agp master not the cpu + */ + + addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR); + agp_bridge->gart_bus_addr = addr; + + /* Calculate the agp offset */ + for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) { + writel(virt_to_phys(amd_irongate_private.gatt_pages[i]->real) | 1, + page_dir.remapped+GET_PAGE_DIR_OFF(addr)); + readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ + } + + for (i = 0; i < value->num_entries; i++) { + addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; + cur_gatt = GET_GATT(addr); + writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); + readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */ + } + + return 0; +} + +static int amd_free_gatt_table(struct agp_bridge_data *bridge) +{ + struct amd_page_map page_dir; + + page_dir.real = (unsigned long *)agp_bridge->gatt_table_real; + page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table; + + amd_free_gatt_pages(); + amd_free_page_map(&page_dir); + return 0; +} + +static int amd_irongate_fetch_size(void) +{ + int i; + u32 temp; + struct aper_size_info_lvl2 *values; + + pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp); + temp = (temp & 0x0000000e); + values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes); + for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { + if (temp == values[i].size_value) { + agp_bridge->previous_size = + agp_bridge->current_size = (void *) (values + i); + + agp_bridge->aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + +static int amd_irongate_configure(void) +{ + struct aper_size_info_lvl2 *current_size; + phys_addr_t reg; + u32 temp; + u16 enable_reg; + + current_size = A_SIZE_LVL2(agp_bridge->current_size); + + if (!amd_irongate_private.registers) { + /* Get the memory mapped registers */ + reg = pci_resource_start(agp_bridge->dev, AMD_MMBASE_BAR); + amd_irongate_private.registers = (volatile u8 __iomem *) ioremap(reg, 4096); + if (!amd_irongate_private.registers) + return -ENOMEM; + } + + /* Write out the address of the gatt table */ + writel(agp_bridge->gatt_bus_addr, amd_irongate_private.registers+AMD_ATTBASE); + readl(amd_irongate_private.registers+AMD_ATTBASE); /* PCI Posting. */ + + /* Write the Sync register */ + pci_write_config_byte(agp_bridge->dev, AMD_MODECNTL, 0x80); + + /* Set indexing mode */ + pci_write_config_byte(agp_bridge->dev, AMD_MODECNTL2, 0x00); + + /* Write the enable register */ + enable_reg = readw(amd_irongate_private.registers+AMD_GARTENABLE); + enable_reg = (enable_reg | 0x0004); + writew(enable_reg, amd_irongate_private.registers+AMD_GARTENABLE); + readw(amd_irongate_private.registers+AMD_GARTENABLE); /* PCI Posting. */ + + /* Write out the size register */ + pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp); + temp = (((temp & ~(0x0000000e)) | current_size->size_value) | 1); + pci_write_config_dword(agp_bridge->dev, AMD_APSIZE, temp); + + /* Flush the tlb */ + writel(1, amd_irongate_private.registers+AMD_TLBFLUSH); + readl(amd_irongate_private.registers+AMD_TLBFLUSH); /* PCI Posting.*/ + return 0; +} + +static void amd_irongate_cleanup(void) +{ + struct aper_size_info_lvl2 *previous_size; + u32 temp; + u16 enable_reg; + + previous_size = A_SIZE_LVL2(agp_bridge->previous_size); + + enable_reg = readw(amd_irongate_private.registers+AMD_GARTENABLE); + enable_reg = (enable_reg & ~(0x0004)); + writew(enable_reg, amd_irongate_private.registers+AMD_GARTENABLE); + readw(amd_irongate_private.registers+AMD_GARTENABLE); /* PCI Posting. */ + + /* Write back the previous size and disable gart translation */ + pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp); + temp = ((temp & ~(0x0000000f)) | previous_size->size_value); + pci_write_config_dword(agp_bridge->dev, AMD_APSIZE, temp); + iounmap((void __iomem *) amd_irongate_private.registers); +} + +/* + * This routine could be implemented by taking the addresses + * written to the GATT, and flushing them individually. However + * currently it just flushes the whole table. Which is probably + * more efficient, since agp_memory blocks can be a large number of + * entries. + */ + +static void amd_irongate_tlbflush(struct agp_memory *temp) +{ + writel(1, amd_irongate_private.registers+AMD_TLBFLUSH); + readl(amd_irongate_private.registers+AMD_TLBFLUSH); /* PCI Posting. */ +} + +static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type) +{ + int i, j, num_entries; + unsigned long __iomem *cur_gatt; + unsigned long addr; + + num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; + + if (type != mem->type || + agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) + return -EINVAL; + + if ((pg_start + mem->page_count) > num_entries) + return -EINVAL; + + j = pg_start; + while (j < (pg_start + mem->page_count)) { + addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; + cur_gatt = GET_GATT(addr); + if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr)))) + return -EBUSY; + j++; + } + + if (!mem->is_flushed) { + global_cache_flush(); + mem->is_flushed = true; + } + + for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { + addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; + cur_gatt = GET_GATT(addr); + writel(agp_generic_mask_memory(agp_bridge, + page_to_phys(mem->pages[i]), + mem->type), + cur_gatt+GET_GATT_OFF(addr)); + readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */ + } + amd_irongate_tlbflush(mem); + return 0; +} + +static int amd_remove_memory(struct agp_memory *mem, off_t pg_start, int type) +{ + int i; + unsigned long __iomem *cur_gatt; + unsigned long addr; + + if (type != mem->type || + agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) + return -EINVAL; + + for (i = pg_start; i < (mem->page_count + pg_start); i++) { + addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; + cur_gatt = GET_GATT(addr); + writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); + readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */ + } + + amd_irongate_tlbflush(mem); + return 0; +} + +static const struct aper_size_info_lvl2 amd_irongate_sizes[7] = +{ + {2048, 524288, 0x0000000c}, + {1024, 262144, 0x0000000a}, + {512, 131072, 0x00000008}, + {256, 65536, 0x00000006}, + {128, 32768, 0x00000004}, + {64, 16384, 0x00000002}, + {32, 8192, 0x00000000} +}; + +static const struct gatt_mask amd_irongate_masks[] = +{ + {.mask = 1, .type = 0} +}; + +static const struct agp_bridge_driver amd_irongate_driver = { + .owner = THIS_MODULE, + .aperture_sizes = amd_irongate_sizes, + .size_type = LVL2_APER_SIZE, + .num_aperture_sizes = 7, + .needs_scratch_page = true, + .configure = amd_irongate_configure, + .fetch_size = amd_irongate_fetch_size, + .cleanup = amd_irongate_cleanup, + .tlb_flush = amd_irongate_tlbflush, + .mask_memory = agp_generic_mask_memory, + .masks = amd_irongate_masks, + .agp_enable = agp_generic_enable, + .cache_flush = global_cache_flush, + .create_gatt_table = amd_create_gatt_table, + .free_gatt_table = amd_free_gatt_table, + .insert_memory = amd_insert_memory, + .remove_memory = amd_remove_memory, + .alloc_by_type = agp_generic_alloc_by_type, + .free_by_type = agp_generic_free_by_type, + .agp_alloc_page = agp_generic_alloc_page, + .agp_alloc_pages = agp_generic_alloc_pages, + .agp_destroy_page = agp_generic_destroy_page, + .agp_destroy_pages = agp_generic_destroy_pages, + .agp_type_to_mask_type = agp_generic_type_to_mask_type, +}; + +static struct agp_device_ids amd_agp_device_ids[] = +{ + { + .device_id = PCI_DEVICE_ID_AMD_FE_GATE_7006, + .chipset_name = "Irongate", + }, + { + .device_id = PCI_DEVICE_ID_AMD_FE_GATE_700E, + .chipset_name = "761", + }, + { + .device_id = PCI_DEVICE_ID_AMD_FE_GATE_700C, + .chipset_name = "760MP", + }, + { }, /* dummy final entry, always present */ +}; + +static int agp_amdk7_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct agp_bridge_data *bridge; + u8 cap_ptr; + int j; + + cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); + if (!cap_ptr) + return -ENODEV; + + j = ent - agp_amdk7_pci_table; + dev_info(&pdev->dev, "AMD %s chipset\n", + amd_agp_device_ids[j].chipset_name); + + bridge = agp_alloc_bridge(); + if (!bridge) + return -ENOMEM; + + bridge->driver = &amd_irongate_driver; + bridge->dev_private_data = &amd_irongate_private, + bridge->dev = pdev; + bridge->capndx = cap_ptr; + + /* 751 Errata (22564_B-1.PDF) + erratum 20: strobe glitch with Nvidia NV10 GeForce cards. + system controller may experience noise due to strong drive strengths + */ + if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_7006) { + struct pci_dev *gfxcard=NULL; + + cap_ptr = 0; + while (!cap_ptr) { + gfxcard = pci_get_class(PCI_CLASS_DISPLAY_VGA<<8, gfxcard); + if (!gfxcard) { + dev_info(&pdev->dev, "no AGP VGA controller\n"); + return -ENODEV; + } + cap_ptr = pci_find_capability(gfxcard, PCI_CAP_ID_AGP); + } + + /* With so many variants of NVidia cards, it's simpler just + to blacklist them all, and then whitelist them as needed + (if necessary at all). */ + if (gfxcard->vendor == PCI_VENDOR_ID_NVIDIA) { + agp_bridge->flags |= AGP_ERRATA_1X; + dev_info(&pdev->dev, "AMD 751 chipset with NVidia GeForce; forcing 1X due to errata\n"); + } + pci_dev_put(gfxcard); + } + + /* 761 Errata (23613_F.pdf) + * Revisions B0/B1 were a disaster. + * erratum 44: SYSCLK/AGPCLK skew causes 2X failures -- Force mode to 1X + * erratum 45: Timing problem prevents fast writes -- Disable fast write. + * erratum 46: Setup violation on AGP SBA pins - Disable side band addressing. + * With this lot disabled, we should prevent lockups. */ + if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_700E) { + if (pdev->revision == 0x10 || pdev->revision == 0x11) { + agp_bridge->flags = AGP_ERRATA_FASTWRITES; + agp_bridge->flags |= AGP_ERRATA_SBA; + agp_bridge->flags |= AGP_ERRATA_1X; + dev_info(&pdev->dev, "AMD 761 chipset with errata; disabling AGP fast writes & SBA and forcing to 1X\n"); + } + } + + /* Fill in the mode register */ + pci_read_config_dword(pdev, + bridge->capndx+PCI_AGP_STATUS, + &bridge->mode); + + pci_set_drvdata(pdev, bridge); + return agp_add_bridge(bridge); +} + +static void agp_amdk7_remove(struct pci_dev *pdev) +{ + struct agp_bridge_data *bridge = pci_get_drvdata(pdev); + + agp_remove_bridge(bridge); + agp_put_bridge(bridge); +} + +#ifdef CONFIG_PM + +static int agp_amdk7_suspend(struct pci_dev *pdev, pm_message_t state) +{ + pci_save_state(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +static int agp_amdk7_resume(struct pci_dev *pdev) +{ + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + return amd_irongate_driver.configure(); +} + +#endif /* CONFIG_PM */ + +/* must be the same order as name table above */ +static struct pci_device_id agp_amdk7_pci_table[] = { + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_AMD, + .device = PCI_DEVICE_ID_AMD_FE_GATE_7006, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_AMD, + .device = PCI_DEVICE_ID_AMD_FE_GATE_700E, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_AMD, + .device = PCI_DEVICE_ID_AMD_FE_GATE_700C, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { } +}; + +MODULE_DEVICE_TABLE(pci, agp_amdk7_pci_table); + +static struct pci_driver agp_amdk7_pci_driver = { + .name = "agpgart-amdk7", + .id_table = agp_amdk7_pci_table, + .probe = agp_amdk7_probe, + .remove = agp_amdk7_remove, +#ifdef CONFIG_PM + .suspend = agp_amdk7_suspend, + .resume = agp_amdk7_resume, +#endif +}; + +static int __init agp_amdk7_init(void) +{ + if (agp_off) + return -EINVAL; + return pci_register_driver(&agp_amdk7_pci_driver); +} + +static void __exit agp_amdk7_cleanup(void) +{ + pci_unregister_driver(&agp_amdk7_pci_driver); +} + +module_init(agp_amdk7_init); +module_exit(agp_amdk7_cleanup); + +MODULE_LICENSE("GPL and additional rights"); diff --git a/kernel/drivers/char/agp/amd64-agp.c b/kernel/drivers/char/agp/amd64-agp.c new file mode 100644 index 000000000..0ef350010 --- /dev/null +++ b/kernel/drivers/char/agp/amd64-agp.c @@ -0,0 +1,818 @@ +/* + * Copyright 2001-2003 SuSE Labs. + * Distributed under the GNU public license, v2. + * + * This is a GART driver for the AMD Opteron/Athlon64 on-CPU northbridge. + * It also includes support for the AMD 8151 AGP bridge, + * although it doesn't actually do much, as all the real + * work is done in the northbridge(s). + */ + +#include +#include +#include +#include +#include +#include /* PAGE_SIZE */ +#include +#include +#include +#include "agp.h" + +/* NVIDIA K8 registers */ +#define NVIDIA_X86_64_0_APBASE 0x10 +#define NVIDIA_X86_64_1_APBASE1 0x50 +#define NVIDIA_X86_64_1_APLIMIT1 0x54 +#define NVIDIA_X86_64_1_APSIZE 0xa8 +#define NVIDIA_X86_64_1_APBASE2 0xd8 +#define NVIDIA_X86_64_1_APLIMIT2 0xdc + +/* ULi K8 registers */ +#define ULI_X86_64_BASE_ADDR 0x10 +#define ULI_X86_64_HTT_FEA_REG 0x50 +#define ULI_X86_64_ENU_SCR_REG 0x54 + +static struct resource *aperture_resource; +static bool __initdata agp_try_unsupported = 1; +static int agp_bridges_found; + +static void amd64_tlbflush(struct agp_memory *temp) +{ + amd_flush_garts(); +} + +static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type) +{ + int i, j, num_entries; + long long tmp; + int mask_type; + struct agp_bridge_data *bridge = mem->bridge; + u32 pte; + + num_entries = agp_num_entries(); + + if (type != mem->type) + return -EINVAL; + mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); + if (mask_type != 0) + return -EINVAL; + + + /* Make sure we can fit the range in the gatt table. */ + /* FIXME: could wrap */ + if (((unsigned long)pg_start + mem->page_count) > num_entries) + return -EINVAL; + + j = pg_start; + + /* gatt table should be empty. */ + while (j < (pg_start + mem->page_count)) { + if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) + return -EBUSY; + j++; + } + + if (!mem->is_flushed) { + global_cache_flush(); + mem->is_flushed = true; + } + + for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { + tmp = agp_bridge->driver->mask_memory(agp_bridge, + page_to_phys(mem->pages[i]), + mask_type); + + BUG_ON(tmp & 0xffffff0000000ffcULL); + pte = (tmp & 0x000000ff00000000ULL) >> 28; + pte |=(tmp & 0x00000000fffff000ULL); + pte |= GPTE_VALID | GPTE_COHERENT; + + writel(pte, agp_bridge->gatt_table+j); + readl(agp_bridge->gatt_table+j); /* PCI Posting. */ + } + amd64_tlbflush(mem); + return 0; +} + +/* + * This hack alters the order element according + * to the size of a long. It sucks. I totally disown this, even + * though it does appear to work for the most part. + */ +static struct aper_size_info_32 amd64_aperture_sizes[7] = +{ + {32, 8192, 3+(sizeof(long)/8), 0 }, + {64, 16384, 4+(sizeof(long)/8), 1<<1 }, + {128, 32768, 5+(sizeof(long)/8), 1<<2 }, + {256, 65536, 6+(sizeof(long)/8), 1<<1 | 1<<2 }, + {512, 131072, 7+(sizeof(long)/8), 1<<3 }, + {1024, 262144, 8+(sizeof(long)/8), 1<<1 | 1<<3}, + {2048, 524288, 9+(sizeof(long)/8), 1<<2 | 1<<3} +}; + + +/* + * Get the current Aperture size from the x86-64. + * Note, that there may be multiple x86-64's, but we just return + * the value from the first one we find. The set_size functions + * keep the rest coherent anyway. Or at least should do. + */ +static int amd64_fetch_size(void) +{ + struct pci_dev *dev; + int i; + u32 temp; + struct aper_size_info_32 *values; + + dev = node_to_amd_nb(0)->misc; + if (dev==NULL) + return 0; + + pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &temp); + temp = (temp & 0xe); + values = A_SIZE_32(amd64_aperture_sizes); + + for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { + if (temp == values[i].size_value) { + agp_bridge->previous_size = + agp_bridge->current_size = (void *) (values + i); + + agp_bridge->aperture_size_idx = i; + return values[i].size; + } + } + return 0; +} + +/* + * In a multiprocessor x86-64 system, this function gets + * called once for each CPU. + */ +static u64 amd64_configure(struct pci_dev *hammer, u64 gatt_table) +{ + u64 aperturebase; + u32 tmp; + u64 aper_base; + + /* Address to map to */ + pci_read_config_dword(hammer, AMD64_GARTAPERTUREBASE, &tmp); + aperturebase = tmp << 25; + aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK); + + enable_gart_translation(hammer, gatt_table); + + return aper_base; +} + + +static const struct aper_size_info_32 amd_8151_sizes[7] = +{ + {2048, 524288, 9, 0x00000000 }, /* 0 0 0 0 0 0 */ + {1024, 262144, 8, 0x00000400 }, /* 1 0 0 0 0 0 */ + {512, 131072, 7, 0x00000600 }, /* 1 1 0 0 0 0 */ + {256, 65536, 6, 0x00000700 }, /* 1 1 1 0 0 0 */ + {128, 32768, 5, 0x00000720 }, /* 1 1 1 1 0 0 */ + {64, 16384, 4, 0x00000730 }, /* 1 1 1 1 1 0 */ + {32, 8192, 3, 0x00000738 } /* 1 1 1 1 1 1 */ +}; + +static int amd_8151_configure(void) +{ + unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real); + int i; + + if (!amd_nb_has_feature(AMD_NB_GART)) + return 0; + + /* Configure AGP regs in each x86-64 host bridge. */ + for (i = 0; i < amd_nb_num(); i++) { + agp_bridge->gart_bus_addr = + amd64_configure(node_to_amd_nb(i)->misc, gatt_bus); + } + amd_flush_garts(); + return 0; +} + + +static void amd64_cleanup(void) +{ + u32 tmp; + int i; + + if (!amd_nb_has_feature(AMD_NB_GART)) + return; + + for (i = 0; i < amd_nb_num(); i++) { + struct pci_dev *dev = node_to_amd_nb(i)->misc; + /* disable gart translation */ + pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp); + tmp &= ~GARTEN; + pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, tmp); + } +} + + +static const struct agp_bridge_driver amd_8151_driver = { + .owner = THIS_MODULE, + .aperture_sizes = amd_8151_sizes, + .size_type = U32_APER_SIZE, + .num_aperture_sizes = 7, + .needs_scratch_page = true, + .configure = amd_8151_configure, + .fetch_size = amd64_fetch_size, + .cleanup = amd64_cleanup, + .tlb_flush = amd64_tlbflush, + .mask_memory = agp_generic_mask_memory, + .masks = NULL, + .agp_enable = agp_generic_enable, + .cache_flush = global_cache_flush, + .create_gatt_table = agp_generic_create_gatt_table, + .free_gatt_table = agp_generic_free_gatt_table, + .insert_memory = amd64_insert_memory, + .remove_memory = agp_generic_remove_memory, + .alloc_by_type = agp_generic_alloc_by_type, + .free_by_type = agp_generic_free_by_type, + .agp_alloc_page = agp_generic_alloc_page, + .agp_alloc_pages = agp_generic_alloc_pages, + .agp_destroy_page = agp_generic_destroy_page, + .agp_destroy_pages = agp_generic_destroy_pages, + .agp_type_to_mask_type = agp_generic_type_to_mask_type, +}; + +/* Some basic sanity checks for the aperture. */ +static int agp_aperture_valid(u64 aper, u32 size) +{ + if (!aperture_valid(aper, size, 32*1024*1024)) + return 0; + + /* Request the Aperture. This catches cases when someone else + already put a mapping in there - happens with some very broken BIOS + + Maybe better to use pci_assign_resource/pci_enable_device instead + trusting the bridges? */ + if (!aperture_resource && + !(aperture_resource = request_mem_region(aper, size, "aperture"))) { + printk(KERN_ERR PFX "Aperture conflicts with PCI mapping.\n"); + return 0; + } + return 1; +} + +/* + * W*s centric BIOS sometimes only set up the aperture in the AGP + * bridge, not the northbridge. On AMD64 this is handled early + * in aperture.c, but when IOMMU is not enabled or we run + * on a 32bit kernel this needs to be redone. + * Unfortunately it is impossible to fix the aperture here because it's too late + * to allocate that much memory. But at least error out cleanly instead of + * crashing. + */ +static int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, u16 cap) +{ + u64 aper, nb_aper; + int order = 0; + u32 nb_order, nb_base; + u16 apsize; + + pci_read_config_dword(nb, AMD64_GARTAPERTURECTL, &nb_order); + nb_order = (nb_order >> 1) & 7; + pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base); + nb_aper = nb_base << 25; + + /* Northbridge seems to contain crap. Try the AGP bridge. */ + + pci_read_config_word(agp, cap+0x14, &apsize); + if (apsize == 0xffff) { + if (agp_aperture_valid(nb_aper, (32*1024*1024)<=0 && aper + (32ULL<<(20 + order)) > 0x100000000ULL) { + dev_info(&agp->dev, "aperture size %u MB is not right, using settings from NB\n", + 32 << order); + order = nb_order; + } + + if (nb_order >= order) { + if (agp_aperture_valid(nb_aper, (32*1024*1024)<dev, "aperture from AGP @ %Lx size %u MB\n", + aper, 32 << order); + if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<> 25); + + return 0; +} + +static int cache_nbs(struct pci_dev *pdev, u32 cap_ptr) +{ + int i; + + if (amd_cache_northbridges() < 0) + return -ENODEV; + + if (!amd_nb_has_feature(AMD_NB_GART)) + return -ENODEV; + + i = 0; + for (i = 0; i < amd_nb_num(); i++) { + struct pci_dev *dev = node_to_amd_nb(i)->misc; + if (fix_northbridge(dev, pdev, cap_ptr) < 0) { + dev_err(&dev->dev, "no usable aperture found\n"); +#ifdef __x86_64__ + /* should port this to i386 */ + dev_err(&dev->dev, "consider rebooting with iommu=memaper=2 to get a good aperture\n"); +#endif + return -1; + } + } + return 0; +} + +/* Handle AMD 8151 quirks */ +static void amd8151_init(struct pci_dev *pdev, struct agp_bridge_data *bridge) +{ + char *revstring; + + switch (pdev->revision) { + case 0x01: revstring="A0"; break; + case 0x02: revstring="A1"; break; + case 0x11: revstring="B0"; break; + case 0x12: revstring="B1"; break; + case 0x13: revstring="B2"; break; + case 0x14: revstring="B3"; break; + default: revstring="??"; break; + } + + dev_info(&pdev->dev, "AMD 8151 AGP Bridge rev %s\n", revstring); + + /* + * Work around errata. + * Chips before B2 stepping incorrectly reporting v3.5 + */ + if (pdev->revision < 0x13) { + dev_info(&pdev->dev, "correcting AGP revision (reports 3.5, is really 3.0)\n"); + bridge->major_version = 3; + bridge->minor_version = 0; + } +} + + +static const struct aper_size_info_32 uli_sizes[7] = +{ + {256, 65536, 6, 10}, + {128, 32768, 5, 9}, + {64, 16384, 4, 8}, + {32, 8192, 3, 7}, + {16, 4096, 2, 6}, + {8, 2048, 1, 4}, + {4, 1024, 0, 3} +}; +static int uli_agp_init(struct pci_dev *pdev) +{ + u32 httfea,baseaddr,enuscr; + struct pci_dev *dev1; + int i, ret; + unsigned size = amd64_fetch_size(); + + dev_info(&pdev->dev, "setting up ULi AGP\n"); + dev1 = pci_get_slot (pdev->bus,PCI_DEVFN(0,0)); + if (dev1 == NULL) { + dev_info(&pdev->dev, "can't find ULi secondary device\n"); + return -ENODEV; + } + + for (i = 0; i < ARRAY_SIZE(uli_sizes); i++) + if (uli_sizes[i].size == size) + break; + + if (i == ARRAY_SIZE(uli_sizes)) { + dev_info(&pdev->dev, "no ULi size found for %d\n", size); + ret = -ENODEV; + goto put; + } + + /* shadow x86-64 registers into ULi registers */ + pci_read_config_dword (node_to_amd_nb(0)->misc, AMD64_GARTAPERTUREBASE, + &httfea); + + /* if x86-64 aperture base is beyond 4G, exit here */ + if ((httfea & 0x7fff) >> (32 - 25)) { + ret = -ENODEV; + goto put; + } + + httfea = (httfea& 0x7fff) << 25; + + pci_read_config_dword(pdev, ULI_X86_64_BASE_ADDR, &baseaddr); + baseaddr&= ~PCI_BASE_ADDRESS_MEM_MASK; + baseaddr|= httfea; + pci_write_config_dword(pdev, ULI_X86_64_BASE_ADDR, baseaddr); + + enuscr= httfea+ (size * 1024 * 1024) - 1; + pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea); + pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr); + ret = 0; +put: + pci_dev_put(dev1); + return ret; +} + + +static const struct aper_size_info_32 nforce3_sizes[5] = +{ + {512, 131072, 7, 0x00000000 }, + {256, 65536, 6, 0x00000008 }, + {128, 32768, 5, 0x0000000C }, + {64, 16384, 4, 0x0000000E }, + {32, 8192, 3, 0x0000000F } +}; + +/* Handle shadow device of the Nvidia NForce3 */ +/* CHECK-ME original 2.4 version set up some IORRs. Check if that is needed. */ +static int nforce3_agp_init(struct pci_dev *pdev) +{ + u32 tmp, apbase, apbar, aplimit; + struct pci_dev *dev1; + int i, ret; + unsigned size = amd64_fetch_size(); + + dev_info(&pdev->dev, "setting up Nforce3 AGP\n"); + + dev1 = pci_get_slot(pdev->bus, PCI_DEVFN(11, 0)); + if (dev1 == NULL) { + dev_info(&pdev->dev, "can't find Nforce3 secondary device\n"); + return -ENODEV; + } + + for (i = 0; i < ARRAY_SIZE(nforce3_sizes); i++) + if (nforce3_sizes[i].size == size) + break; + + if (i == ARRAY_SIZE(nforce3_sizes)) { + dev_info(&pdev->dev, "no NForce3 size found for %d\n", size); + ret = -ENODEV; + goto put; + } + + pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp); + tmp &= ~(0xf); + tmp |= nforce3_sizes[i].size_value; + pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp); + + /* shadow x86-64 registers into NVIDIA registers */ + pci_read_config_dword (node_to_amd_nb(0)->misc, AMD64_GARTAPERTUREBASE, + &apbase); + + /* if x86-64 aperture base is beyond 4G, exit here */ + if ( (apbase & 0x7fff) >> (32 - 25) ) { + dev_info(&pdev->dev, "aperture base > 4G\n"); + ret = -ENODEV; + goto put; + } + + apbase = (apbase & 0x7fff) << 25; + + pci_read_config_dword(pdev, NVIDIA_X86_64_0_APBASE, &apbar); + apbar &= ~PCI_BASE_ADDRESS_MEM_MASK; + apbar |= apbase; + pci_write_config_dword(pdev, NVIDIA_X86_64_0_APBASE, apbar); + + aplimit = apbase + (size * 1024 * 1024) - 1; + pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE1, apbase); + pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT1, aplimit); + pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase); + pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit); + + ret = 0; +put: + pci_dev_put(dev1); + + return ret; +} + +static int agp_amd64_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct agp_bridge_data *bridge; + u8 cap_ptr; + int err; + + /* The Highlander principle */ + if (agp_bridges_found) + return -ENODEV; + + cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); + if (!cap_ptr) + return -ENODEV; + + /* Could check for AGPv3 here */ + + bridge = agp_alloc_bridge(); + if (!bridge) + return -ENOMEM; + + if (pdev->vendor == PCI_VENDOR_ID_AMD && + pdev->device == PCI_DEVICE_ID_AMD_8151_0) { + amd8151_init(pdev, bridge); + } else { + dev_info(&pdev->dev, "AGP bridge [%04x/%04x]\n", + pdev->vendor, pdev->device); + } + + bridge->driver = &amd_8151_driver; + bridge->dev = pdev; + bridge->capndx = cap_ptr; + + /* Fill in the mode register */ + pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode); + + if (cache_nbs(pdev, cap_ptr) == -1) { + agp_put_bridge(bridge); + return -ENODEV; + } + + if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) { + int ret = nforce3_agp_init(pdev); + if (ret) { + agp_put_bridge(bridge); + return ret; + } + } + + if (pdev->vendor == PCI_VENDOR_ID_AL) { + int ret = uli_agp_init(pdev); + if (ret) { + agp_put_bridge(bridge); + return ret; + } + } + + pci_set_drvdata(pdev, bridge); + err = agp_add_bridge(bridge); + if (err < 0) + return err; + + agp_bridges_found++; + return 0; +} + +static void agp_amd64_remove(struct pci_dev *pdev) +{ + struct agp_bridge_data *bridge = pci_get_drvdata(pdev); + + release_mem_region(virt_to_phys(bridge->gatt_table_real), + amd64_aperture_sizes[bridge->aperture_size_idx].size); + agp_remove_bridge(bridge); + agp_put_bridge(bridge); + + agp_bridges_found--; +} + +#ifdef CONFIG_PM + +static int agp_amd64_suspend(struct pci_dev *pdev, pm_message_t state) +{ + pci_save_state(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +static int agp_amd64_resume(struct pci_dev *pdev) +{ + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) + nforce3_agp_init(pdev); + + return amd_8151_configure(); +} + +#endif /* CONFIG_PM */ + +static struct pci_device_id agp_amd64_pci_table[] = { + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_AMD, + .device = PCI_DEVICE_ID_AMD_8151_0, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + /* ULi M1689 */ + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_AL, + .device = PCI_DEVICE_ID_AL_M1689, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + /* VIA K8T800Pro */ + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_VIA, + .device = PCI_DEVICE_ID_VIA_K8T800PRO_0, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + /* VIA K8T800 */ + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_VIA, + .device = PCI_DEVICE_ID_VIA_8385_0, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + /* VIA K8M800 / K8N800 */ + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_VIA, + .device = PCI_DEVICE_ID_VIA_8380_0, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + /* VIA K8M890 / K8N890 */ + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_VIA, + .device = PCI_DEVICE_ID_VIA_VT3336, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + /* VIA K8T890 */ + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_VIA, + .device = PCI_DEVICE_ID_VIA_3238_0, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + /* VIA K8T800/K8M800/K8N800 */ + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_VIA, + .device = PCI_DEVICE_ID_VIA_838X_1, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + /* NForce3 */ + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_DEVICE_ID_NVIDIA_NFORCE3, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_DEVICE_ID_NVIDIA_NFORCE3S, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + /* SIS 755 */ + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_SI, + .device = PCI_DEVICE_ID_SI_755, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + /* SIS 760 */ + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_SI, + .device = PCI_DEVICE_ID_SI_760, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + /* ALI/ULI M1695 */ + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_AL, + .device = 0x1695, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + + { } +}; + +MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table); + +static const struct pci_device_id agp_amd64_pci_promisc_table[] = { + { PCI_DEVICE_CLASS(0, 0) }, + { } +}; + +static struct pci_driver agp_amd64_pci_driver = { + .name = "agpgart-amd64", + .id_table = agp_amd64_pci_table, + .probe = agp_amd64_probe, + .remove = agp_amd64_remove, +#ifdef CONFIG_PM + .suspend = agp_amd64_suspend, + .resume = agp_amd64_resume, +#endif +}; + + +/* Not static due to IOMMU code calling it early. */ +int __init agp_amd64_init(void) +{ + int err = 0; + + if (agp_off) + return -EINVAL; + + err = pci_register_driver(&agp_amd64_pci_driver); + if (err < 0) + return err; + + if (agp_bridges_found == 0) { + if (!agp_try_unsupported && !agp_try_unsupported_boot) { + printk(KERN_INFO PFX "No supported AGP bridge found.\n"); +#ifdef MODULE + printk(KERN_INFO PFX "You can try agp_try_unsupported=1\n"); +#else + printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n"); +#endif + pci_unregister_driver(&agp_amd64_pci_driver); + return -ENODEV; + } + + /* First check that we have at least one AMD64 NB */ + if (!pci_dev_present(amd_nb_misc_ids)) { + pci_unregister_driver(&agp_amd64_pci_driver); + return -ENODEV; + } + + /* Look for any AGP bridge */ + agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table; + err = driver_attach(&agp_amd64_pci_driver.driver); + if (err == 0 && agp_bridges_found == 0) { + pci_unregister_driver(&agp_amd64_pci_driver); + err = -ENODEV; + } + } + return err; +} + +static int __init agp_amd64_mod_init(void) +{ +#ifndef MODULE + if (gart_iommu_aperture) + return agp_bridges_found ? 0 : -ENODEV; +#endif + return agp_amd64_init(); +} + +static void __exit agp_amd64_cleanup(void) +{ +#ifndef MODULE + if (gart_iommu_aperture) + return; +#endif + if (aperture_resource) + release_resource(aperture_resource); + pci_unregister_driver(&agp_amd64_pci_driver); +} + +module_init(agp_amd64_mod_init); +module_exit(agp_amd64_cleanup); + +MODULE_AUTHOR("Dave Jones, Andi Kleen"); +module_param(agp_try_unsupported, bool, 0); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/agp/ati-agp.c b/kernel/drivers/char/agp/ati-agp.c new file mode 100644 index 000000000..75a9786a7 --- /dev/null +++ b/kernel/drivers/char/agp/ati-agp.c @@ -0,0 +1,584 @@ +/* + * ATi AGPGART routines. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "agp.h" + +#define ATI_GART_MMBASE_BAR 1 +#define ATI_RS100_APSIZE 0xac +#define ATI_RS100_IG_AGPMODE 0xb0 +#define ATI_RS300_APSIZE 0xf8 +#define ATI_RS300_IG_AGPMODE 0xfc +#define ATI_GART_FEATURE_ID 0x00 +#define ATI_GART_BASE 0x04 +#define ATI_GART_CACHE_SZBASE 0x08 +#define ATI_GART_CACHE_CNTRL 0x0c +#define ATI_GART_CACHE_ENTRY_CNTRL 0x10 + + +static const struct aper_size_info_lvl2 ati_generic_sizes[7] = +{ + {2048, 524288, 0x0000000c}, + {1024, 262144, 0x0000000a}, + {512, 131072, 0x00000008}, + {256, 65536, 0x00000006}, + {128, 32768, 0x00000004}, + {64, 16384, 0x00000002}, + {32, 8192, 0x00000000} +}; + +static struct gatt_mask ati_generic_masks[] = +{ + { .mask = 1, .type = 0} +}; + + +struct ati_page_map { + unsigned long *real; + unsigned long __iomem *remapped; +}; + +static struct _ati_generic_private { + volatile u8 __iomem *registers; + struct ati_page_map **gatt_pages; + int num_tables; +} ati_generic_private; + +static int ati_create_page_map(struct ati_page_map *page_map) +{ + int i, err = 0; + + page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL); + if (page_map->real == NULL) + return -ENOMEM; + + set_memory_uc((unsigned long)page_map->real, 1); + err = map_page_into_agp(virt_to_page(page_map->real)); + page_map->remapped = page_map->real; + + for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) { + writel(agp_bridge->scratch_page, page_map->remapped+i); + readl(page_map->remapped+i); /* PCI Posting. */ + } + + return 0; +} + + +static void ati_free_page_map(struct ati_page_map *page_map) +{ + unmap_page_from_agp(virt_to_page(page_map->real)); + set_memory_wb((unsigned long)page_map->real, 1); + free_page((unsigned long) page_map->real); +} + + +static void ati_free_gatt_pages(void) +{ + int i; + struct ati_page_map **tables; + struct ati_page_map *entry; + + tables = ati_generic_private.gatt_pages; + for (i = 0; i < ati_generic_private.num_tables; i++) { + entry = tables[i]; + if (entry != NULL) { + if (entry->real != NULL) + ati_free_page_map(entry); + kfree(entry); + } + } + kfree(tables); +} + + +static int ati_create_gatt_pages(int nr_tables) +{ + struct ati_page_map **tables; + struct ati_page_map *entry; + int retval = 0; + int i; + + tables = kzalloc((nr_tables + 1) * sizeof(struct ati_page_map *),GFP_KERNEL); + if (tables == NULL) + return -ENOMEM; + + for (i = 0; i < nr_tables; i++) { + entry = kzalloc(sizeof(struct ati_page_map), GFP_KERNEL); + tables[i] = entry; + if (entry == NULL) { + retval = -ENOMEM; + break; + } + retval = ati_create_page_map(entry); + if (retval != 0) + break; + } + ati_generic_private.num_tables = i; + ati_generic_private.gatt_pages = tables; + + if (retval != 0) + ati_free_gatt_pages(); + + return retval; +} + +static int is_r200(void) +{ + if ((agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS100) || + (agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS200) || + (agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS200_B) || + (agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS250)) + return 1; + return 0; +} + +static int ati_fetch_size(void) +{ + int i; + u32 temp; + struct aper_size_info_lvl2 *values; + + if (is_r200()) + pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp); + else + pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp); + + temp = (temp & 0x0000000e); + values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes); + for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { + if (temp == values[i].size_value) { + agp_bridge->previous_size = + agp_bridge->current_size = (void *) (values + i); + + agp_bridge->aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + +static void ati_tlbflush(struct agp_memory * mem) +{ + writel(1, ati_generic_private.registers+ATI_GART_CACHE_CNTRL); + readl(ati_generic_private.registers+ATI_GART_CACHE_CNTRL); /* PCI Posting. */ +} + +static void ati_cleanup(void) +{ + struct aper_size_info_lvl2 *previous_size; + u32 temp; + + previous_size = A_SIZE_LVL2(agp_bridge->previous_size); + + /* Write back the previous size and disable gart translation */ + if (is_r200()) { + pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp); + temp = ((temp & ~(0x0000000f)) | previous_size->size_value); + pci_write_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, temp); + } else { + pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp); + temp = ((temp & ~(0x0000000f)) | previous_size->size_value); + pci_write_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, temp); + } + iounmap((volatile u8 __iomem *)ati_generic_private.registers); +} + + +static int ati_configure(void) +{ + phys_addr_t reg; + u32 temp; + + /* Get the memory mapped registers */ + reg = pci_resource_start(agp_bridge->dev, ATI_GART_MMBASE_BAR); + ati_generic_private.registers = (volatile u8 __iomem *) ioremap(reg, 4096); + + if (!ati_generic_private.registers) + return -ENOMEM; + + if (is_r200()) + pci_write_config_dword(agp_bridge->dev, ATI_RS100_IG_AGPMODE, 0x20000); + else + pci_write_config_dword(agp_bridge->dev, ATI_RS300_IG_AGPMODE, 0x20000); + + /* address to map to */ + /* + agp_bridge.gart_bus_addr = pci_bus_address(agp_bridge.dev, + AGP_APERTURE_BAR); + printk(KERN_INFO PFX "IGP320 gart_bus_addr: %x\n", agp_bridge.gart_bus_addr); + */ + writel(0x60000, ati_generic_private.registers+ATI_GART_FEATURE_ID); + readl(ati_generic_private.registers+ATI_GART_FEATURE_ID); /* PCI Posting.*/ + + /* SIGNALED_SYSTEM_ERROR @ NB_STATUS */ + pci_read_config_dword(agp_bridge->dev, PCI_COMMAND, &temp); + pci_write_config_dword(agp_bridge->dev, PCI_COMMAND, temp | (1<<14)); + + /* Write out the address of the gatt table */ + writel(agp_bridge->gatt_bus_addr, ati_generic_private.registers+ATI_GART_BASE); + readl(ati_generic_private.registers+ATI_GART_BASE); /* PCI Posting. */ + + return 0; +} + + +#ifdef CONFIG_PM +static int agp_ati_suspend(struct pci_dev *dev, pm_message_t state) +{ + pci_save_state(dev); + pci_set_power_state(dev, PCI_D3hot); + + return 0; +} + +static int agp_ati_resume(struct pci_dev *dev) +{ + pci_set_power_state(dev, PCI_D0); + pci_restore_state(dev); + + return ati_configure(); +} +#endif + +/* + *Since we don't need contiguous memory we just try + * to get the gatt table once + */ + +#define GET_PAGE_DIR_OFF(addr) (addr >> 22) +#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \ + GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr)) +#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) +#undef GET_GATT +#define GET_GATT(addr) (ati_generic_private.gatt_pages[\ + GET_PAGE_DIR_IDX(addr)]->remapped) + +static int ati_insert_memory(struct agp_memory * mem, + off_t pg_start, int type) +{ + int i, j, num_entries; + unsigned long __iomem *cur_gatt; + unsigned long addr; + int mask_type; + + num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; + + mask_type = agp_generic_type_to_mask_type(mem->bridge, type); + if (mask_type != 0 || type != mem->type) + return -EINVAL; + + if (mem->page_count == 0) + return 0; + + if ((pg_start + mem->page_count) > num_entries) + return -EINVAL; + + j = pg_start; + while (j < (pg_start + mem->page_count)) { + addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; + cur_gatt = GET_GATT(addr); + if (!PGE_EMPTY(agp_bridge,readl(cur_gatt+GET_GATT_OFF(addr)))) + return -EBUSY; + j++; + } + + if (!mem->is_flushed) { + /*CACHE_FLUSH(); */ + global_cache_flush(); + mem->is_flushed = true; + } + + for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { + addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; + cur_gatt = GET_GATT(addr); + writel(agp_bridge->driver->mask_memory(agp_bridge, + page_to_phys(mem->pages[i]), + mem->type), + cur_gatt+GET_GATT_OFF(addr)); + } + readl(GET_GATT(agp_bridge->gart_bus_addr)); /* PCI posting */ + agp_bridge->driver->tlb_flush(mem); + return 0; +} + +static int ati_remove_memory(struct agp_memory * mem, off_t pg_start, + int type) +{ + int i; + unsigned long __iomem *cur_gatt; + unsigned long addr; + int mask_type; + + mask_type = agp_generic_type_to_mask_type(mem->bridge, type); + if (mask_type != 0 || type != mem->type) + return -EINVAL; + + if (mem->page_count == 0) + return 0; + + for (i = pg_start; i < (mem->page_count + pg_start); i++) { + addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; + cur_gatt = GET_GATT(addr); + writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); + } + + readl(GET_GATT(agp_bridge->gart_bus_addr)); /* PCI posting */ + agp_bridge->driver->tlb_flush(mem); + return 0; +} + +static int ati_create_gatt_table(struct agp_bridge_data *bridge) +{ + struct aper_size_info_lvl2 *value; + struct ati_page_map page_dir; + unsigned long __iomem *cur_gatt; + unsigned long addr; + int retval; + u32 temp; + int i; + struct aper_size_info_lvl2 *current_size; + + value = A_SIZE_LVL2(agp_bridge->current_size); + retval = ati_create_page_map(&page_dir); + if (retval != 0) + return retval; + + retval = ati_create_gatt_pages(value->num_entries / 1024); + if (retval != 0) { + ati_free_page_map(&page_dir); + return retval; + } + + agp_bridge->gatt_table_real = (u32 *)page_dir.real; + agp_bridge->gatt_table = (u32 __iomem *) page_dir.remapped; + agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real); + + /* Write out the size register */ + current_size = A_SIZE_LVL2(agp_bridge->current_size); + + if (is_r200()) { + pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp); + temp = (((temp & ~(0x0000000e)) | current_size->size_value) + | 0x00000001); + pci_write_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, temp); + pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp); + } else { + pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp); + temp = (((temp & ~(0x0000000e)) | current_size->size_value) + | 0x00000001); + pci_write_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, temp); + pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp); + } + + /* + * Get the address for the gart region. + * This is a bus address even on the alpha, b/c its + * used to program the agp master not the cpu + */ + addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR); + agp_bridge->gart_bus_addr = addr; + + /* Calculate the agp offset */ + for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) { + writel(virt_to_phys(ati_generic_private.gatt_pages[i]->real) | 1, + page_dir.remapped+GET_PAGE_DIR_OFF(addr)); + readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ + } + + for (i = 0; i < value->num_entries; i++) { + addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; + cur_gatt = GET_GATT(addr); + writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); + } + + return 0; +} + +static int ati_free_gatt_table(struct agp_bridge_data *bridge) +{ + struct ati_page_map page_dir; + + page_dir.real = (unsigned long *)agp_bridge->gatt_table_real; + page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table; + + ati_free_gatt_pages(); + ati_free_page_map(&page_dir); + return 0; +} + +static const struct agp_bridge_driver ati_generic_bridge = { + .owner = THIS_MODULE, + .aperture_sizes = ati_generic_sizes, + .size_type = LVL2_APER_SIZE, + .num_aperture_sizes = 7, + .needs_scratch_page = true, + .configure = ati_configure, + .fetch_size = ati_fetch_size, + .cleanup = ati_cleanup, + .tlb_flush = ati_tlbflush, + .mask_memory = agp_generic_mask_memory, + .masks = ati_generic_masks, + .agp_enable = agp_generic_enable, + .cache_flush = global_cache_flush, + .create_gatt_table = ati_create_gatt_table, + .free_gatt_table = ati_free_gatt_table, + .insert_memory = ati_insert_memory, + .remove_memory = ati_remove_memory, + .alloc_by_type = agp_generic_alloc_by_type, + .free_by_type = agp_generic_free_by_type, + .agp_alloc_page = agp_generic_alloc_page, + .agp_alloc_pages = agp_generic_alloc_pages, + .agp_destroy_page = agp_generic_destroy_page, + .agp_destroy_pages = agp_generic_destroy_pages, + .agp_type_to_mask_type = agp_generic_type_to_mask_type, +}; + + +static struct agp_device_ids ati_agp_device_ids[] = +{ + { + .device_id = PCI_DEVICE_ID_ATI_RS100, + .chipset_name = "IGP320/M", + }, + { + .device_id = PCI_DEVICE_ID_ATI_RS200, + .chipset_name = "IGP330/340/345/350/M", + }, + { + .device_id = PCI_DEVICE_ID_ATI_RS200_B, + .chipset_name = "IGP345M", + }, + { + .device_id = PCI_DEVICE_ID_ATI_RS250, + .chipset_name = "IGP7000/M", + }, + { + .device_id = PCI_DEVICE_ID_ATI_RS300_100, + .chipset_name = "IGP9100/M", + }, + { + .device_id = PCI_DEVICE_ID_ATI_RS300_133, + .chipset_name = "IGP9100/M", + }, + { + .device_id = PCI_DEVICE_ID_ATI_RS300_166, + .chipset_name = "IGP9100/M", + }, + { + .device_id = PCI_DEVICE_ID_ATI_RS300_200, + .chipset_name = "IGP9100/M", + }, + { + .device_id = PCI_DEVICE_ID_ATI_RS350_133, + .chipset_name = "IGP9000/M", + }, + { + .device_id = PCI_DEVICE_ID_ATI_RS350_200, + .chipset_name = "IGP9100/M", + }, + { }, /* dummy final entry, always present */ +}; + +static int agp_ati_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct agp_device_ids *devs = ati_agp_device_ids; + struct agp_bridge_data *bridge; + u8 cap_ptr; + int j; + + cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); + if (!cap_ptr) + return -ENODEV; + + /* probe for known chipsets */ + for (j = 0; devs[j].chipset_name; j++) { + if (pdev->device == devs[j].device_id) + goto found; + } + + dev_err(&pdev->dev, "unsupported Ati chipset [%04x/%04x])\n", + pdev->vendor, pdev->device); + return -ENODEV; + +found: + bridge = agp_alloc_bridge(); + if (!bridge) + return -ENOMEM; + + bridge->dev = pdev; + bridge->capndx = cap_ptr; + + bridge->driver = &ati_generic_bridge; + + dev_info(&pdev->dev, "Ati %s chipset\n", devs[j].chipset_name); + + /* Fill in the mode register */ + pci_read_config_dword(pdev, + bridge->capndx+PCI_AGP_STATUS, + &bridge->mode); + + pci_set_drvdata(pdev, bridge); + return agp_add_bridge(bridge); +} + +static void agp_ati_remove(struct pci_dev *pdev) +{ + struct agp_bridge_data *bridge = pci_get_drvdata(pdev); + + agp_remove_bridge(bridge); + agp_put_bridge(bridge); +} + +static struct pci_device_id agp_ati_pci_table[] = { + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_ATI, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { } +}; + +MODULE_DEVICE_TABLE(pci, agp_ati_pci_table); + +static struct pci_driver agp_ati_pci_driver = { + .name = "agpgart-ati", + .id_table = agp_ati_pci_table, + .probe = agp_ati_probe, + .remove = agp_ati_remove, +#ifdef CONFIG_PM + .suspend = agp_ati_suspend, + .resume = agp_ati_resume, +#endif +}; + +static int __init agp_ati_init(void) +{ + if (agp_off) + return -EINVAL; + return pci_register_driver(&agp_ati_pci_driver); +} + +static void __exit agp_ati_cleanup(void) +{ + pci_unregister_driver(&agp_ati_pci_driver); +} + +module_init(agp_ati_init); +module_exit(agp_ati_cleanup); + +MODULE_AUTHOR("Dave Jones"); +MODULE_LICENSE("GPL and additional rights"); + diff --git a/kernel/drivers/char/agp/backend.c b/kernel/drivers/char/agp/backend.c new file mode 100644 index 000000000..38ffb281d --- /dev/null +++ b/kernel/drivers/char/agp/backend.c @@ -0,0 +1,366 @@ +/* + * AGPGART driver backend routines. + * Copyright (C) 2004 Silicon Graphics, Inc. + * Copyright (C) 2002-2003 Dave Jones. + * Copyright (C) 1999 Jeff Hartmann. + * Copyright (C) 1999 Precision Insight, Inc. + * Copyright (C) 1999 Xi Graphics, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * JEFF HARTMANN, DAVE JONES, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * TODO: + * - Allocate more than order 0 pages to avoid too much linear map splitting. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "agp.h" + +/* Due to XFree86 brain-damage, we can't go to 1.0 until they + * fix some real stupidity. It's only by chance we can bump + * past 0.99 at all due to some boolean logic error. */ +#define AGPGART_VERSION_MAJOR 0 +#define AGPGART_VERSION_MINOR 103 +static const struct agp_version agp_current_version = +{ + .major = AGPGART_VERSION_MAJOR, + .minor = AGPGART_VERSION_MINOR, +}; + +struct agp_bridge_data *(*agp_find_bridge)(struct pci_dev *) = + &agp_generic_find_bridge; + +struct agp_bridge_data *agp_bridge; +LIST_HEAD(agp_bridges); +EXPORT_SYMBOL(agp_bridge); +EXPORT_SYMBOL(agp_bridges); +EXPORT_SYMBOL(agp_find_bridge); + +/** + * agp_backend_acquire - attempt to acquire an agp backend. + * + */ +struct agp_bridge_data *agp_backend_acquire(struct pci_dev *pdev) +{ + struct agp_bridge_data *bridge; + + bridge = agp_find_bridge(pdev); + + if (!bridge) + return NULL; + + if (atomic_read(&bridge->agp_in_use)) + return NULL; + atomic_inc(&bridge->agp_in_use); + return bridge; +} +EXPORT_SYMBOL(agp_backend_acquire); + + +/** + * agp_backend_release - release the lock on the agp backend. + * + * The caller must insure that the graphics aperture translation table + * is read for use by another entity. + * + * (Ensure that all memory it bound is unbound.) + */ +void agp_backend_release(struct agp_bridge_data *bridge) +{ + + if (bridge) + atomic_dec(&bridge->agp_in_use); +} +EXPORT_SYMBOL(agp_backend_release); + + +static const struct { int mem, agp; } maxes_table[] = { + {0, 0}, + {32, 4}, + {64, 28}, + {128, 96}, + {256, 204}, + {512, 440}, + {1024, 942}, + {2048, 1920}, + {4096, 3932} +}; + +static int agp_find_max(void) +{ + long memory, index, result; + +#if PAGE_SHIFT < 20 + memory = totalram_pages >> (20 - PAGE_SHIFT); +#else + memory = totalram_pages << (PAGE_SHIFT - 20); +#endif + index = 1; + + while ((memory > maxes_table[index].mem) && (index < 8)) + index++; + + result = maxes_table[index - 1].agp + + ( (memory - maxes_table[index - 1].mem) * + (maxes_table[index].agp - maxes_table[index - 1].agp)) / + (maxes_table[index].mem - maxes_table[index - 1].mem); + + result = result << (20 - PAGE_SHIFT); + return result; +} + + +static int agp_backend_initialize(struct agp_bridge_data *bridge) +{ + int size_value, rc, got_gatt=0, got_keylist=0; + + bridge->max_memory_agp = agp_find_max(); + bridge->version = &agp_current_version; + + if (bridge->driver->needs_scratch_page) { + struct page *page = bridge->driver->agp_alloc_page(bridge); + + if (!page) { + dev_err(&bridge->dev->dev, + "can't get memory for scratch page\n"); + return -ENOMEM; + } + + bridge->scratch_page_page = page; + bridge->scratch_page_dma = page_to_phys(page); + + bridge->scratch_page = bridge->driver->mask_memory(bridge, + bridge->scratch_page_dma, 0); + } + + size_value = bridge->driver->fetch_size(); + if (size_value == 0) { + dev_err(&bridge->dev->dev, "can't determine aperture size\n"); + rc = -EINVAL; + goto err_out; + } + if (bridge->driver->create_gatt_table(bridge)) { + dev_err(&bridge->dev->dev, + "can't get memory for graphics translation table\n"); + rc = -ENOMEM; + goto err_out; + } + got_gatt = 1; + + bridge->key_list = vzalloc(PAGE_SIZE * 4); + if (bridge->key_list == NULL) { + dev_err(&bridge->dev->dev, + "can't allocate memory for key lists\n"); + rc = -ENOMEM; + goto err_out; + } + got_keylist = 1; + + /* FIXME vmalloc'd memory not guaranteed contiguous */ + + if (bridge->driver->configure()) { + dev_err(&bridge->dev->dev, "error configuring host chipset\n"); + rc = -EINVAL; + goto err_out; + } + INIT_LIST_HEAD(&bridge->mapped_list); + spin_lock_init(&bridge->mapped_lock); + + return 0; + +err_out: + if (bridge->driver->needs_scratch_page) { + struct page *page = bridge->scratch_page_page; + + bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_UNMAP); + bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_FREE); + } + if (got_gatt) + bridge->driver->free_gatt_table(bridge); + if (got_keylist) { + vfree(bridge->key_list); + bridge->key_list = NULL; + } + return rc; +} + +/* cannot be __exit b/c as it could be called from __init code */ +static void agp_backend_cleanup(struct agp_bridge_data *bridge) +{ + if (bridge->driver->cleanup) + bridge->driver->cleanup(); + if (bridge->driver->free_gatt_table) + bridge->driver->free_gatt_table(bridge); + + vfree(bridge->key_list); + bridge->key_list = NULL; + + if (bridge->driver->agp_destroy_page && + bridge->driver->needs_scratch_page) { + struct page *page = bridge->scratch_page_page; + + bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_UNMAP); + bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_FREE); + } +} + +/* When we remove the global variable agp_bridge from all drivers + * then agp_alloc_bridge and agp_generic_find_bridge need to be updated + */ + +struct agp_bridge_data *agp_alloc_bridge(void) +{ + struct agp_bridge_data *bridge; + + bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); + if (!bridge) + return NULL; + + atomic_set(&bridge->agp_in_use, 0); + atomic_set(&bridge->current_memory_agp, 0); + + if (list_empty(&agp_bridges)) + agp_bridge = bridge; + + return bridge; +} +EXPORT_SYMBOL(agp_alloc_bridge); + + +void agp_put_bridge(struct agp_bridge_data *bridge) +{ + kfree(bridge); + + if (list_empty(&agp_bridges)) + agp_bridge = NULL; +} +EXPORT_SYMBOL(agp_put_bridge); + + +int agp_add_bridge(struct agp_bridge_data *bridge) +{ + int error; + + if (agp_off) { + error = -ENODEV; + goto err_put_bridge; + } + + if (!bridge->dev) { + printk (KERN_DEBUG PFX "Erk, registering with no pci_dev!\n"); + error = -EINVAL; + goto err_put_bridge; + } + + /* Grab reference on the chipset driver. */ + if (!try_module_get(bridge->driver->owner)) { + dev_info(&bridge->dev->dev, "can't lock chipset driver\n"); + error = -EINVAL; + goto err_put_bridge; + } + + error = agp_backend_initialize(bridge); + if (error) { + dev_info(&bridge->dev->dev, + "agp_backend_initialize() failed\n"); + goto err_out; + } + + if (list_empty(&agp_bridges)) { + error = agp_frontend_initialize(); + if (error) { + dev_info(&bridge->dev->dev, + "agp_frontend_initialize() failed\n"); + goto frontend_err; + } + + dev_info(&bridge->dev->dev, "AGP aperture is %dM @ 0x%lx\n", + bridge->driver->fetch_size(), bridge->gart_bus_addr); + + } + + list_add(&bridge->list, &agp_bridges); + return 0; + +frontend_err: + agp_backend_cleanup(bridge); +err_out: + module_put(bridge->driver->owner); +err_put_bridge: + agp_put_bridge(bridge); + return error; +} +EXPORT_SYMBOL_GPL(agp_add_bridge); + + +void agp_remove_bridge(struct agp_bridge_data *bridge) +{ + agp_backend_cleanup(bridge); + list_del(&bridge->list); + if (list_empty(&agp_bridges)) + agp_frontend_cleanup(); + module_put(bridge->driver->owner); +} +EXPORT_SYMBOL_GPL(agp_remove_bridge); + +int agp_off; +int agp_try_unsupported_boot; +EXPORT_SYMBOL(agp_off); +EXPORT_SYMBOL(agp_try_unsupported_boot); + +static int __init agp_init(void) +{ + if (!agp_off) + printk(KERN_INFO "Linux agpgart interface v%d.%d\n", + AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR); + return 0; +} + +static void __exit agp_exit(void) +{ +} + +#ifndef MODULE +static __init int agp_setup(char *s) +{ + if (!strcmp(s,"off")) + agp_off = 1; + if (!strcmp(s,"try_unsupported")) + agp_try_unsupported_boot = 1; + return 1; +} +__setup("agp=", agp_setup); +#endif + +MODULE_AUTHOR("Dave Jones, Jeff Hartmann"); +MODULE_DESCRIPTION("AGP GART driver"); +MODULE_LICENSE("GPL and additional rights"); +MODULE_ALIAS_MISCDEV(AGPGART_MINOR); + +module_init(agp_init); +module_exit(agp_exit); + diff --git a/kernel/drivers/char/agp/compat_ioctl.c b/kernel/drivers/char/agp/compat_ioctl.c new file mode 100644 index 000000000..a48e05b31 --- /dev/null +++ b/kernel/drivers/char/agp/compat_ioctl.c @@ -0,0 +1,287 @@ +/* + * AGPGART driver frontend compatibility ioctls + * Copyright (C) 2004 Silicon Graphics, Inc. + * Copyright (C) 2002-2003 Dave Jones + * Copyright (C) 1999 Jeff Hartmann + * Copyright (C) 1999 Precision Insight, Inc. + * Copyright (C) 1999 Xi Graphics, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include +#include +#include +#include +#include +#include "agp.h" +#include "compat_ioctl.h" + +static int compat_agpioc_info_wrap(struct agp_file_private *priv, void __user *arg) +{ + struct agp_info32 userinfo; + struct agp_kern_info kerninfo; + + agp_copy_info(agp_bridge, &kerninfo); + + userinfo.version.major = kerninfo.version.major; + userinfo.version.minor = kerninfo.version.minor; + userinfo.bridge_id = kerninfo.device->vendor | + (kerninfo.device->device << 16); + userinfo.agp_mode = kerninfo.mode; + userinfo.aper_base = (compat_long_t)kerninfo.aper_base; + userinfo.aper_size = kerninfo.aper_size; + userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory; + userinfo.pg_used = kerninfo.current_memory; + + if (copy_to_user(arg, &userinfo, sizeof(userinfo))) + return -EFAULT; + + return 0; +} + +static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg) +{ + struct agp_region32 ureserve; + struct agp_region kreserve; + struct agp_client *client; + struct agp_file_private *client_priv; + + DBG(""); + if (copy_from_user(&ureserve, arg, sizeof(ureserve))) + return -EFAULT; + + if ((unsigned) ureserve.seg_count >= ~0U/sizeof(struct agp_segment32)) + return -EFAULT; + + kreserve.pid = ureserve.pid; + kreserve.seg_count = ureserve.seg_count; + + client = agp_find_client_by_pid(kreserve.pid); + + if (kreserve.seg_count == 0) { + /* remove a client */ + client_priv = agp_find_private(kreserve.pid); + + if (client_priv != NULL) { + set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags); + set_bit(AGP_FF_IS_VALID, &client_priv->access_flags); + } + if (client == NULL) { + /* client is already removed */ + return 0; + } + return agp_remove_client(kreserve.pid); + } else { + struct agp_segment32 *usegment; + struct agp_segment *ksegment; + int seg; + + if (ureserve.seg_count >= 16384) + return -EINVAL; + + usegment = kmalloc(sizeof(*usegment) * ureserve.seg_count, GFP_KERNEL); + if (!usegment) + return -ENOMEM; + + ksegment = kmalloc(sizeof(*ksegment) * kreserve.seg_count, GFP_KERNEL); + if (!ksegment) { + kfree(usegment); + return -ENOMEM; + } + + if (copy_from_user(usegment, (void __user *) ureserve.seg_list, + sizeof(*usegment) * ureserve.seg_count)) { + kfree(usegment); + kfree(ksegment); + return -EFAULT; + } + + for (seg = 0; seg < ureserve.seg_count; seg++) { + ksegment[seg].pg_start = usegment[seg].pg_start; + ksegment[seg].pg_count = usegment[seg].pg_count; + ksegment[seg].prot = usegment[seg].prot; + } + + kfree(usegment); + kreserve.seg_list = ksegment; + + if (client == NULL) { + /* Create the client and add the segment */ + client = agp_create_client(kreserve.pid); + + if (client == NULL) { + kfree(ksegment); + return -ENOMEM; + } + client_priv = agp_find_private(kreserve.pid); + + if (client_priv != NULL) { + set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags); + set_bit(AGP_FF_IS_VALID, &client_priv->access_flags); + } + } + return agp_create_segment(client, &kreserve); + } + /* Will never really happen */ + return -EINVAL; +} + +static int compat_agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg) +{ + struct agp_memory *memory; + struct agp_allocate32 alloc; + + DBG(""); + if (copy_from_user(&alloc, arg, sizeof(alloc))) + return -EFAULT; + + memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type); + + if (memory == NULL) + return -ENOMEM; + + alloc.key = memory->key; + alloc.physical = memory->physical; + + if (copy_to_user(arg, &alloc, sizeof(alloc))) { + agp_free_memory_wrap(memory); + return -EFAULT; + } + return 0; +} + +static int compat_agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg) +{ + struct agp_bind32 bind_info; + struct agp_memory *memory; + + DBG(""); + if (copy_from_user(&bind_info, arg, sizeof(bind_info))) + return -EFAULT; + + memory = agp_find_mem_by_key(bind_info.key); + + if (memory == NULL) + return -EINVAL; + + return agp_bind_memory(memory, bind_info.pg_start); +} + +static int compat_agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg) +{ + struct agp_memory *memory; + struct agp_unbind32 unbind; + + DBG(""); + if (copy_from_user(&unbind, arg, sizeof(unbind))) + return -EFAULT; + + memory = agp_find_mem_by_key(unbind.key); + + if (memory == NULL) + return -EINVAL; + + return agp_unbind_memory(memory); +} + +long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct agp_file_private *curr_priv = file->private_data; + int ret_val = -ENOTTY; + + mutex_lock(&(agp_fe.agp_mutex)); + + if ((agp_fe.current_controller == NULL) && + (cmd != AGPIOC_ACQUIRE32)) { + ret_val = -EINVAL; + goto ioctl_out; + } + if ((agp_fe.backend_acquired != true) && + (cmd != AGPIOC_ACQUIRE32)) { + ret_val = -EBUSY; + goto ioctl_out; + } + if (cmd != AGPIOC_ACQUIRE32) { + if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) { + ret_val = -EPERM; + goto ioctl_out; + } + /* Use the original pid of the controller, + * in case it's threaded */ + + if (agp_fe.current_controller->pid != curr_priv->my_pid) { + ret_val = -EBUSY; + goto ioctl_out; + } + } + + switch (cmd) { + case AGPIOC_INFO32: + ret_val = compat_agpioc_info_wrap(curr_priv, (void __user *) arg); + break; + + case AGPIOC_ACQUIRE32: + ret_val = agpioc_acquire_wrap(curr_priv); + break; + + case AGPIOC_RELEASE32: + ret_val = agpioc_release_wrap(curr_priv); + break; + + case AGPIOC_SETUP32: + ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg); + break; + + case AGPIOC_RESERVE32: + ret_val = compat_agpioc_reserve_wrap(curr_priv, (void __user *) arg); + break; + + case AGPIOC_PROTECT32: + ret_val = agpioc_protect_wrap(curr_priv); + break; + + case AGPIOC_ALLOCATE32: + ret_val = compat_agpioc_allocate_wrap(curr_priv, (void __user *) arg); + break; + + case AGPIOC_DEALLOCATE32: + ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg); + break; + + case AGPIOC_BIND32: + ret_val = compat_agpioc_bind_wrap(curr_priv, (void __user *) arg); + break; + + case AGPIOC_UNBIND32: + ret_val = compat_agpioc_unbind_wrap(curr_priv, (void __user *) arg); + break; + + case AGPIOC_CHIPSET_FLUSH32: + break; + } + +ioctl_out: + DBG("ioctl returns %d\n", ret_val); + mutex_unlock(&(agp_fe.agp_mutex)); + return ret_val; +} + diff --git a/kernel/drivers/char/agp/compat_ioctl.h b/kernel/drivers/char/agp/compat_ioctl.h new file mode 100644 index 000000000..f30e0fd97 --- /dev/null +++ b/kernel/drivers/char/agp/compat_ioctl.h @@ -0,0 +1,106 @@ +/* + * Copyright (C) 1999 Jeff Hartmann + * Copyright (C) 1999 Precision Insight, Inc. + * Copyright (C) 1999 Xi Graphics, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _AGP_COMPAT_IOCTL_H +#define _AGP_COMPAT_IOCTL_H + +#include +#include + +#define AGPIOC_INFO32 _IOR (AGPIOC_BASE, 0, compat_uptr_t) +#define AGPIOC_ACQUIRE32 _IO (AGPIOC_BASE, 1) +#define AGPIOC_RELEASE32 _IO (AGPIOC_BASE, 2) +#define AGPIOC_SETUP32 _IOW (AGPIOC_BASE, 3, compat_uptr_t) +#define AGPIOC_RESERVE32 _IOW (AGPIOC_BASE, 4, compat_uptr_t) +#define AGPIOC_PROTECT32 _IOW (AGPIOC_BASE, 5, compat_uptr_t) +#define AGPIOC_ALLOCATE32 _IOWR(AGPIOC_BASE, 6, compat_uptr_t) +#define AGPIOC_DEALLOCATE32 _IOW (AGPIOC_BASE, 7, compat_int_t) +#define AGPIOC_BIND32 _IOW (AGPIOC_BASE, 8, compat_uptr_t) +#define AGPIOC_UNBIND32 _IOW (AGPIOC_BASE, 9, compat_uptr_t) +#define AGPIOC_CHIPSET_FLUSH32 _IO (AGPIOC_BASE, 10) + +struct agp_info32 { + struct agp_version version; /* version of the driver */ + u32 bridge_id; /* bridge vendor/device */ + u32 agp_mode; /* mode info of bridge */ + compat_long_t aper_base; /* base of aperture */ + compat_size_t aper_size; /* size of aperture */ + compat_size_t pg_total; /* max pages (swap + system) */ + compat_size_t pg_system; /* max pages (system) */ + compat_size_t pg_used; /* current pages used */ +}; + +/* + * The "prot" down below needs still a "sleep" flag somehow ... + */ +struct agp_segment32 { + compat_off_t pg_start; /* starting page to populate */ + compat_size_t pg_count; /* number of pages */ + compat_int_t prot; /* prot flags for mmap */ +}; + +struct agp_region32 { + compat_pid_t pid; /* pid of process */ + compat_size_t seg_count; /* number of segments */ + struct agp_segment32 *seg_list; +}; + +struct agp_allocate32 { + compat_int_t key; /* tag of allocation */ + compat_size_t pg_count; /* number of pages */ + u32 type; /* 0 == normal, other devspec */ + u32 physical; /* device specific (some devices + * need a phys address of the + * actual page behind the gatt + * table) */ +}; + +struct agp_bind32 { + compat_int_t key; /* tag of allocation */ + compat_off_t pg_start; /* starting page to populate */ +}; + +struct agp_unbind32 { + compat_int_t key; /* tag of allocation */ + u32 priority; /* priority for paging out */ +}; + +extern struct agp_front_data agp_fe; + +int agpioc_acquire_wrap(struct agp_file_private *priv); +int agpioc_release_wrap(struct agp_file_private *priv); +int agpioc_protect_wrap(struct agp_file_private *priv); +int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg); +int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg); +struct agp_file_private *agp_find_private(pid_t pid); +struct agp_client *agp_create_client(pid_t id); +int agp_remove_client(pid_t id); +int agp_create_segment(struct agp_client *client, struct agp_region *region); +void agp_free_memory_wrap(struct agp_memory *memory); +struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type); +struct agp_memory *agp_find_mem_by_key(int key); +struct agp_client *agp_find_client_by_pid(pid_t id); + +#endif /* _AGP_COMPAT_H */ diff --git a/kernel/drivers/char/agp/efficeon-agp.c b/kernel/drivers/char/agp/efficeon-agp.c new file mode 100644 index 000000000..533cb6d22 --- /dev/null +++ b/kernel/drivers/char/agp/efficeon-agp.c @@ -0,0 +1,478 @@ +/* + * Transmeta's Efficeon AGPGART driver. + * + * Based upon a diff by Linus around November '02. + * + * Ported to the 2.6 kernel by Carlos Puchol + * and H. Peter Anvin . + */ + +/* + * NOTE-cpg-040217: + * + * - when compiled as a module, after loading the module, + * it will refuse to unload, indicating it is in use, + * when it is not. + * - no s3 (suspend to ram) testing. + * - tested on the efficeon integrated nothbridge for tens + * of iterations of starting x and glxgears. + * - tested with radeon 9000 and radeon mobility m9 cards + * - tested with c3/c4 enabled (with the mobility m9 card) + */ + +#include +#include +#include +#include +#include +#include +#include +#include "agp.h" +#include "intel-agp.h" + +/* + * The real differences to the generic AGP code is + * in the GART mappings - a two-level setup with the + * first level being an on-chip 64-entry table. + * + * The page array is filled through the ATTPAGE register + * (Aperture Translation Table Page Register) at 0xB8. Bits: + * 31:20: physical page address + * 11:9: Page Attribute Table Index (PATI) + * must match the PAT index for the + * mapped pages (the 2nd level page table pages + * themselves should be just regular WB-cacheable, + * so this is normally zero.) + * 8: Present + * 7:6: reserved, write as zero + * 5:0: GATT directory index: which 1st-level entry + * + * The Efficeon AGP spec requires pages to be WB-cacheable + * but to be explicitly CLFLUSH'd after any changes. + */ +#define EFFICEON_ATTPAGE 0xb8 +#define EFFICEON_L1_SIZE 64 /* Number of PDE pages */ + +#define EFFICEON_PATI (0 << 9) +#define EFFICEON_PRESENT (1 << 8) + +static struct _efficeon_private { + unsigned long l1_table[EFFICEON_L1_SIZE]; +} efficeon_private; + +static const struct gatt_mask efficeon_generic_masks[] = +{ + {.mask = 0x00000001, .type = 0} +}; + +/* This function does the same thing as mask_memory() for this chipset... */ +static inline unsigned long efficeon_mask_memory(struct page *page) +{ + unsigned long addr = page_to_phys(page); + return addr | 0x00000001; +} + +static const struct aper_size_info_lvl2 efficeon_generic_sizes[4] = +{ + {256, 65536, 0}, + {128, 32768, 32}, + {64, 16384, 48}, + {32, 8192, 56} +}; + +/* + * Control interfaces are largely identical to + * the legacy Intel 440BX.. + */ + +static int efficeon_fetch_size(void) +{ + int i; + u16 temp; + struct aper_size_info_lvl2 *values; + + pci_read_config_word(agp_bridge->dev, INTEL_APSIZE, &temp); + values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes); + + for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { + if (temp == values[i].size_value) { + agp_bridge->previous_size = + agp_bridge->current_size = (void *) (values + i); + agp_bridge->aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + +static void efficeon_tlbflush(struct agp_memory * mem) +{ + printk(KERN_DEBUG PFX "efficeon_tlbflush()\n"); + pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2200); + pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280); +} + +static void efficeon_cleanup(void) +{ + u16 temp; + struct aper_size_info_lvl2 *previous_size; + + printk(KERN_DEBUG PFX "efficeon_cleanup()\n"); + previous_size = A_SIZE_LVL2(agp_bridge->previous_size); + pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp); + pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9)); + pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, + previous_size->size_value); +} + +static int efficeon_configure(void) +{ + u16 temp2; + struct aper_size_info_lvl2 *current_size; + + printk(KERN_DEBUG PFX "efficeon_configure()\n"); + + current_size = A_SIZE_LVL2(agp_bridge->current_size); + + /* aperture size */ + pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, + current_size->size_value); + + /* address to map to */ + agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, + AGP_APERTURE_BAR); + + /* agpctrl */ + pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280); + + /* paccfg/nbxcfg */ + pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2); + pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, + (temp2 & ~(1 << 10)) | (1 << 9) | (1 << 11)); + /* clear any possible error conditions */ + pci_write_config_byte(agp_bridge->dev, INTEL_ERRSTS + 1, 7); + return 0; +} + +static int efficeon_free_gatt_table(struct agp_bridge_data *bridge) +{ + int index, freed = 0; + + for (index = 0; index < EFFICEON_L1_SIZE; index++) { + unsigned long page = efficeon_private.l1_table[index]; + if (page) { + efficeon_private.l1_table[index] = 0; + ClearPageReserved(virt_to_page((char *)page)); + free_page(page); + freed++; + } + printk(KERN_DEBUG PFX "efficeon_free_gatt_table(%p, %02x, %08x)\n", + agp_bridge->dev, EFFICEON_ATTPAGE, index); + pci_write_config_dword(agp_bridge->dev, + EFFICEON_ATTPAGE, index); + } + printk(KERN_DEBUG PFX "efficeon_free_gatt_table() freed %d pages\n", freed); + return 0; +} + + +/* + * Since we don't need contiguous memory we just try + * to get the gatt table once + */ + +#define GET_PAGE_DIR_OFF(addr) (addr >> 22) +#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \ + GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr)) +#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) +#undef GET_GATT +#define GET_GATT(addr) (efficeon_private.gatt_pages[\ + GET_PAGE_DIR_IDX(addr)]->remapped) + +static int efficeon_create_gatt_table(struct agp_bridge_data *bridge) +{ + int index; + const int pati = EFFICEON_PATI; + const int present = EFFICEON_PRESENT; + const int clflush_chunk = ((cpuid_ebx(1) >> 8) & 0xff) << 3; + int num_entries, l1_pages; + + num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; + + printk(KERN_DEBUG PFX "efficeon_create_gatt_table(%d)\n", num_entries); + + /* There are 2^10 PTE pages per PDE page */ + BUG_ON(num_entries & 0x3ff); + l1_pages = num_entries >> 10; + + for (index = 0 ; index < l1_pages ; index++) { + int offset; + unsigned long page; + unsigned long value; + + page = efficeon_private.l1_table[index]; + BUG_ON(page); + + page = get_zeroed_page(GFP_KERNEL); + if (!page) { + efficeon_free_gatt_table(agp_bridge); + return -ENOMEM; + } + SetPageReserved(virt_to_page((char *)page)); + + for (offset = 0; offset < PAGE_SIZE; offset += clflush_chunk) + clflush((char *)page+offset); + + efficeon_private.l1_table[index] = page; + + value = virt_to_phys((unsigned long *)page) | pati | present | index; + + pci_write_config_dword(agp_bridge->dev, + EFFICEON_ATTPAGE, value); + } + + return 0; +} + +static int efficeon_insert_memory(struct agp_memory * mem, off_t pg_start, int type) +{ + int i, count = mem->page_count, num_entries; + unsigned int *page, *last_page; + const int clflush_chunk = ((cpuid_ebx(1) >> 8) & 0xff) << 3; + const unsigned long clflush_mask = ~(clflush_chunk-1); + + printk(KERN_DEBUG PFX "efficeon_insert_memory(%lx, %d)\n", pg_start, count); + + num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; + if ((pg_start + mem->page_count) > num_entries) + return -EINVAL; + if (type != 0 || mem->type != 0) + return -EINVAL; + + if (!mem->is_flushed) { + global_cache_flush(); + mem->is_flushed = true; + } + + last_page = NULL; + for (i = 0; i < count; i++) { + int index = pg_start + i; + unsigned long insert = efficeon_mask_memory(mem->pages[i]); + + page = (unsigned int *) efficeon_private.l1_table[index >> 10]; + + if (!page) + continue; + + page += (index & 0x3ff); + *page = insert; + + /* clflush is slow, so don't clflush until we have to */ + if (last_page && + (((unsigned long)page^(unsigned long)last_page) & + clflush_mask)) + clflush(last_page); + + last_page = page; + } + + if ( last_page ) + clflush(last_page); + + agp_bridge->driver->tlb_flush(mem); + return 0; +} + +static int efficeon_remove_memory(struct agp_memory * mem, off_t pg_start, int type) +{ + int i, count = mem->page_count, num_entries; + + printk(KERN_DEBUG PFX "efficeon_remove_memory(%lx, %d)\n", pg_start, count); + + num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; + + if ((pg_start + mem->page_count) > num_entries) + return -EINVAL; + if (type != 0 || mem->type != 0) + return -EINVAL; + + for (i = 0; i < count; i++) { + int index = pg_start + i; + unsigned int *page = (unsigned int *) efficeon_private.l1_table[index >> 10]; + + if (!page) + continue; + page += (index & 0x3ff); + *page = 0; + } + agp_bridge->driver->tlb_flush(mem); + return 0; +} + + +static const struct agp_bridge_driver efficeon_driver = { + .owner = THIS_MODULE, + .aperture_sizes = efficeon_generic_sizes, + .size_type = LVL2_APER_SIZE, + .num_aperture_sizes = 4, + .configure = efficeon_configure, + .fetch_size = efficeon_fetch_size, + .cleanup = efficeon_cleanup, + .tlb_flush = efficeon_tlbflush, + .mask_memory = agp_generic_mask_memory, + .masks = efficeon_generic_masks, + .agp_enable = agp_generic_enable, + .cache_flush = global_cache_flush, + + // Efficeon-specific GATT table setup / populate / teardown + .create_gatt_table = efficeon_create_gatt_table, + .free_gatt_table = efficeon_free_gatt_table, + .insert_memory = efficeon_insert_memory, + .remove_memory = efficeon_remove_memory, + .cant_use_aperture = false, // true might be faster? + + // Generic + .alloc_by_type = agp_generic_alloc_by_type, + .free_by_type = agp_generic_free_by_type, + .agp_alloc_page = agp_generic_alloc_page, + .agp_alloc_pages = agp_generic_alloc_pages, + .agp_destroy_page = agp_generic_destroy_page, + .agp_destroy_pages = agp_generic_destroy_pages, + .agp_type_to_mask_type = agp_generic_type_to_mask_type, +}; + +static int agp_efficeon_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct agp_bridge_data *bridge; + u8 cap_ptr; + struct resource *r; + + cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); + if (!cap_ptr) + return -ENODEV; + + /* Probe for Efficeon controller */ + if (pdev->device != PCI_DEVICE_ID_EFFICEON) { + printk(KERN_ERR PFX "Unsupported Efficeon chipset (device id: %04x)\n", + pdev->device); + return -ENODEV; + } + + printk(KERN_INFO PFX "Detected Transmeta Efficeon TM8000 series chipset\n"); + + bridge = agp_alloc_bridge(); + if (!bridge) + return -ENOMEM; + + bridge->driver = &efficeon_driver; + bridge->dev = pdev; + bridge->capndx = cap_ptr; + + /* + * If the device has not been properly setup, the following will catch + * the problem and should stop the system from crashing. + * 20030610 - hamish@zot.org + */ + if (pci_enable_device(pdev)) { + printk(KERN_ERR PFX "Unable to Enable PCI device\n"); + agp_put_bridge(bridge); + return -ENODEV; + } + + /* + * The following fixes the case where the BIOS has "forgotten" to + * provide an address range for the GART. + * 20030610 - hamish@zot.org + */ + r = &pdev->resource[0]; + if (!r->start && r->end) { + if (pci_assign_resource(pdev, 0)) { + printk(KERN_ERR PFX "could not assign resource 0\n"); + agp_put_bridge(bridge); + return -ENODEV; + } + } + + /* Fill in the mode register */ + if (cap_ptr) { + pci_read_config_dword(pdev, + bridge->capndx+PCI_AGP_STATUS, + &bridge->mode); + } + + pci_set_drvdata(pdev, bridge); + return agp_add_bridge(bridge); +} + +static void agp_efficeon_remove(struct pci_dev *pdev) +{ + struct agp_bridge_data *bridge = pci_get_drvdata(pdev); + + agp_remove_bridge(bridge); + agp_put_bridge(bridge); +} + +#ifdef CONFIG_PM +static int agp_efficeon_suspend(struct pci_dev *dev, pm_message_t state) +{ + return 0; +} + +static int agp_efficeon_resume(struct pci_dev *pdev) +{ + printk(KERN_DEBUG PFX "agp_efficeon_resume()\n"); + return efficeon_configure(); +} +#endif + +static struct pci_device_id agp_efficeon_pci_table[] = { + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_TRANSMETA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { } +}; + +MODULE_DEVICE_TABLE(pci, agp_efficeon_pci_table); + +static struct pci_driver agp_efficeon_pci_driver = { + .name = "agpgart-efficeon", + .id_table = agp_efficeon_pci_table, + .probe = agp_efficeon_probe, + .remove = agp_efficeon_remove, +#ifdef CONFIG_PM + .suspend = agp_efficeon_suspend, + .resume = agp_efficeon_resume, +#endif +}; + +static int __init agp_efficeon_init(void) +{ + static int agp_initialised=0; + + if (agp_off) + return -EINVAL; + + if (agp_initialised == 1) + return 0; + agp_initialised=1; + + return pci_register_driver(&agp_efficeon_pci_driver); +} + +static void __exit agp_efficeon_cleanup(void) +{ + pci_unregister_driver(&agp_efficeon_pci_driver); +} + +module_init(agp_efficeon_init); +module_exit(agp_efficeon_cleanup); + +MODULE_AUTHOR("Carlos Puchol "); +MODULE_LICENSE("GPL and additional rights"); diff --git a/kernel/drivers/char/agp/frontend.c b/kernel/drivers/char/agp/frontend.c new file mode 100644 index 000000000..09f17eb73 --- /dev/null +++ b/kernel/drivers/char/agp/frontend.c @@ -0,0 +1,1068 @@ +/* + * AGPGART driver frontend + * Copyright (C) 2004 Silicon Graphics, Inc. + * Copyright (C) 2002-2003 Dave Jones + * Copyright (C) 1999 Jeff Hartmann + * Copyright (C) 1999 Precision Insight, Inc. + * Copyright (C) 1999 Xi Graphics, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "agp.h" + +struct agp_front_data agp_fe; + +struct agp_memory *agp_find_mem_by_key(int key) +{ + struct agp_memory *curr; + + if (agp_fe.current_controller == NULL) + return NULL; + + curr = agp_fe.current_controller->pool; + + while (curr != NULL) { + if (curr->key == key) + break; + curr = curr->next; + } + + DBG("key=%d -> mem=%p", key, curr); + return curr; +} + +static void agp_remove_from_pool(struct agp_memory *temp) +{ + struct agp_memory *prev; + struct agp_memory *next; + + /* Check to see if this is even in the memory pool */ + + DBG("mem=%p", temp); + if (agp_find_mem_by_key(temp->key) != NULL) { + next = temp->next; + prev = temp->prev; + + if (prev != NULL) { + prev->next = next; + if (next != NULL) + next->prev = prev; + + } else { + /* This is the first item on the list */ + if (next != NULL) + next->prev = NULL; + + agp_fe.current_controller->pool = next; + } + } +} + +/* + * Routines for managing each client's segment list - + * These routines handle adding and removing segments + * to each auth'ed client. + */ + +static struct +agp_segment_priv *agp_find_seg_in_client(const struct agp_client *client, + unsigned long offset, + int size, pgprot_t page_prot) +{ + struct agp_segment_priv *seg; + int num_segments, i; + off_t pg_start; + size_t pg_count; + + pg_start = offset / 4096; + pg_count = size / 4096; + seg = *(client->segments); + num_segments = client->num_segments; + + for (i = 0; i < client->num_segments; i++) { + if ((seg[i].pg_start == pg_start) && + (seg[i].pg_count == pg_count) && + (pgprot_val(seg[i].prot) == pgprot_val(page_prot))) { + return seg + i; + } + } + + return NULL; +} + +static void agp_remove_seg_from_client(struct agp_client *client) +{ + DBG("client=%p", client); + + if (client->segments != NULL) { + if (*(client->segments) != NULL) { + DBG("Freeing %p from client %p", *(client->segments), client); + kfree(*(client->segments)); + } + DBG("Freeing %p from client %p", client->segments, client); + kfree(client->segments); + client->segments = NULL; + } +} + +static void agp_add_seg_to_client(struct agp_client *client, + struct agp_segment_priv ** seg, int num_segments) +{ + struct agp_segment_priv **prev_seg; + + prev_seg = client->segments; + + if (prev_seg != NULL) + agp_remove_seg_from_client(client); + + DBG("Adding seg %p (%d segments) to client %p", seg, num_segments, client); + client->num_segments = num_segments; + client->segments = seg; +} + +static pgprot_t agp_convert_mmap_flags(int prot) +{ + unsigned long prot_bits; + + prot_bits = calc_vm_prot_bits(prot) | VM_SHARED; + return vm_get_page_prot(prot_bits); +} + +int agp_create_segment(struct agp_client *client, struct agp_region *region) +{ + struct agp_segment_priv **ret_seg; + struct agp_segment_priv *seg; + struct agp_segment *user_seg; + size_t i; + + seg = kzalloc((sizeof(struct agp_segment_priv) * region->seg_count), GFP_KERNEL); + if (seg == NULL) { + kfree(region->seg_list); + region->seg_list = NULL; + return -ENOMEM; + } + user_seg = region->seg_list; + + for (i = 0; i < region->seg_count; i++) { + seg[i].pg_start = user_seg[i].pg_start; + seg[i].pg_count = user_seg[i].pg_count; + seg[i].prot = agp_convert_mmap_flags(user_seg[i].prot); + } + kfree(region->seg_list); + region->seg_list = NULL; + + ret_seg = kmalloc(sizeof(void *), GFP_KERNEL); + if (ret_seg == NULL) { + kfree(seg); + return -ENOMEM; + } + *ret_seg = seg; + agp_add_seg_to_client(client, ret_seg, region->seg_count); + return 0; +} + +/* End - Routines for managing each client's segment list */ + +/* This function must only be called when current_controller != NULL */ +static void agp_insert_into_pool(struct agp_memory * temp) +{ + struct agp_memory *prev; + + prev = agp_fe.current_controller->pool; + + if (prev != NULL) { + prev->prev = temp; + temp->next = prev; + } + agp_fe.current_controller->pool = temp; +} + + +/* File private list routines */ + +struct agp_file_private *agp_find_private(pid_t pid) +{ + struct agp_file_private *curr; + + curr = agp_fe.file_priv_list; + + while (curr != NULL) { + if (curr->my_pid == pid) + return curr; + curr = curr->next; + } + + return NULL; +} + +static void agp_insert_file_private(struct agp_file_private * priv) +{ + struct agp_file_private *prev; + + prev = agp_fe.file_priv_list; + + if (prev != NULL) + prev->prev = priv; + priv->next = prev; + agp_fe.file_priv_list = priv; +} + +static void agp_remove_file_private(struct agp_file_private * priv) +{ + struct agp_file_private *next; + struct agp_file_private *prev; + + next = priv->next; + prev = priv->prev; + + if (prev != NULL) { + prev->next = next; + + if (next != NULL) + next->prev = prev; + + } else { + if (next != NULL) + next->prev = NULL; + + agp_fe.file_priv_list = next; + } +} + +/* End - File flag list routines */ + +/* + * Wrappers for agp_free_memory & agp_allocate_memory + * These make sure that internal lists are kept updated. + */ +void agp_free_memory_wrap(struct agp_memory *memory) +{ + agp_remove_from_pool(memory); + agp_free_memory(memory); +} + +struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type) +{ + struct agp_memory *memory; + + memory = agp_allocate_memory(agp_bridge, pg_count, type); + if (memory == NULL) + return NULL; + + agp_insert_into_pool(memory); + return memory; +} + +/* Routines for managing the list of controllers - + * These routines manage the current controller, and the list of + * controllers + */ + +static struct agp_controller *agp_find_controller_by_pid(pid_t id) +{ + struct agp_controller *controller; + + controller = agp_fe.controllers; + + while (controller != NULL) { + if (controller->pid == id) + return controller; + controller = controller->next; + } + + return NULL; +} + +static struct agp_controller *agp_create_controller(pid_t id) +{ + struct agp_controller *controller; + + controller = kzalloc(sizeof(struct agp_controller), GFP_KERNEL); + if (controller == NULL) + return NULL; + + controller->pid = id; + return controller; +} + +static int agp_insert_controller(struct agp_controller *controller) +{ + struct agp_controller *prev_controller; + + prev_controller = agp_fe.controllers; + controller->next = prev_controller; + + if (prev_controller != NULL) + prev_controller->prev = controller; + + agp_fe.controllers = controller; + + return 0; +} + +static void agp_remove_all_clients(struct agp_controller *controller) +{ + struct agp_client *client; + struct agp_client *temp; + + client = controller->clients; + + while (client) { + struct agp_file_private *priv; + + temp = client; + agp_remove_seg_from_client(temp); + priv = agp_find_private(temp->pid); + + if (priv != NULL) { + clear_bit(AGP_FF_IS_VALID, &priv->access_flags); + clear_bit(AGP_FF_IS_CLIENT, &priv->access_flags); + } + client = client->next; + kfree(temp); + } +} + +static void agp_remove_all_memory(struct agp_controller *controller) +{ + struct agp_memory *memory; + struct agp_memory *temp; + + memory = controller->pool; + + while (memory) { + temp = memory; + memory = memory->next; + agp_free_memory_wrap(temp); + } +} + +static int agp_remove_controller(struct agp_controller *controller) +{ + struct agp_controller *prev_controller; + struct agp_controller *next_controller; + + prev_controller = controller->prev; + next_controller = controller->next; + + if (prev_controller != NULL) { + prev_controller->next = next_controller; + if (next_controller != NULL) + next_controller->prev = prev_controller; + + } else { + if (next_controller != NULL) + next_controller->prev = NULL; + + agp_fe.controllers = next_controller; + } + + agp_remove_all_memory(controller); + agp_remove_all_clients(controller); + + if (agp_fe.current_controller == controller) { + agp_fe.current_controller = NULL; + agp_fe.backend_acquired = false; + agp_backend_release(agp_bridge); + } + kfree(controller); + return 0; +} + +static void agp_controller_make_current(struct agp_controller *controller) +{ + struct agp_client *clients; + + clients = controller->clients; + + while (clients != NULL) { + struct agp_file_private *priv; + + priv = agp_find_private(clients->pid); + + if (priv != NULL) { + set_bit(AGP_FF_IS_VALID, &priv->access_flags); + set_bit(AGP_FF_IS_CLIENT, &priv->access_flags); + } + clients = clients->next; + } + + agp_fe.current_controller = controller; +} + +static void agp_controller_release_current(struct agp_controller *controller, + struct agp_file_private *controller_priv) +{ + struct agp_client *clients; + + clear_bit(AGP_FF_IS_VALID, &controller_priv->access_flags); + clients = controller->clients; + + while (clients != NULL) { + struct agp_file_private *priv; + + priv = agp_find_private(clients->pid); + + if (priv != NULL) + clear_bit(AGP_FF_IS_VALID, &priv->access_flags); + + clients = clients->next; + } + + agp_fe.current_controller = NULL; + agp_fe.used_by_controller = false; + agp_backend_release(agp_bridge); +} + +/* + * Routines for managing client lists - + * These routines are for managing the list of auth'ed clients. + */ + +static struct agp_client +*agp_find_client_in_controller(struct agp_controller *controller, pid_t id) +{ + struct agp_client *client; + + if (controller == NULL) + return NULL; + + client = controller->clients; + + while (client != NULL) { + if (client->pid == id) + return client; + client = client->next; + } + + return NULL; +} + +static struct agp_controller *agp_find_controller_for_client(pid_t id) +{ + struct agp_controller *controller; + + controller = agp_fe.controllers; + + while (controller != NULL) { + if ((agp_find_client_in_controller(controller, id)) != NULL) + return controller; + controller = controller->next; + } + + return NULL; +} + +struct agp_client *agp_find_client_by_pid(pid_t id) +{ + struct agp_client *temp; + + if (agp_fe.current_controller == NULL) + return NULL; + + temp = agp_find_client_in_controller(agp_fe.current_controller, id); + return temp; +} + +static void agp_insert_client(struct agp_client *client) +{ + struct agp_client *prev_client; + + prev_client = agp_fe.current_controller->clients; + client->next = prev_client; + + if (prev_client != NULL) + prev_client->prev = client; + + agp_fe.current_controller->clients = client; + agp_fe.current_controller->num_clients++; +} + +struct agp_client *agp_create_client(pid_t id) +{ + struct agp_client *new_client; + + new_client = kzalloc(sizeof(struct agp_client), GFP_KERNEL); + if (new_client == NULL) + return NULL; + + new_client->pid = id; + agp_insert_client(new_client); + return new_client; +} + +int agp_remove_client(pid_t id) +{ + struct agp_client *client; + struct agp_client *prev_client; + struct agp_client *next_client; + struct agp_controller *controller; + + controller = agp_find_controller_for_client(id); + if (controller == NULL) + return -EINVAL; + + client = agp_find_client_in_controller(controller, id); + if (client == NULL) + return -EINVAL; + + prev_client = client->prev; + next_client = client->next; + + if (prev_client != NULL) { + prev_client->next = next_client; + if (next_client != NULL) + next_client->prev = prev_client; + + } else { + if (next_client != NULL) + next_client->prev = NULL; + controller->clients = next_client; + } + + controller->num_clients--; + agp_remove_seg_from_client(client); + kfree(client); + return 0; +} + +/* End - Routines for managing client lists */ + +/* File Operations */ + +static int agp_mmap(struct file *file, struct vm_area_struct *vma) +{ + unsigned int size, current_size; + unsigned long offset; + struct agp_client *client; + struct agp_file_private *priv = file->private_data; + struct agp_kern_info kerninfo; + + mutex_lock(&(agp_fe.agp_mutex)); + + if (agp_fe.backend_acquired != true) + goto out_eperm; + + if (!(test_bit(AGP_FF_IS_VALID, &priv->access_flags))) + goto out_eperm; + + agp_copy_info(agp_bridge, &kerninfo); + size = vma->vm_end - vma->vm_start; + current_size = kerninfo.aper_size; + current_size = current_size * 0x100000; + offset = vma->vm_pgoff << PAGE_SHIFT; + DBG("%lx:%lx", offset, offset+size); + + if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) { + if ((size + offset) > current_size) + goto out_inval; + + client = agp_find_client_by_pid(current->pid); + + if (client == NULL) + goto out_eperm; + + if (!agp_find_seg_in_client(client, offset, size, vma->vm_page_prot)) + goto out_inval; + + DBG("client vm_ops=%p", kerninfo.vm_ops); + if (kerninfo.vm_ops) { + vma->vm_ops = kerninfo.vm_ops; + } else if (io_remap_pfn_range(vma, vma->vm_start, + (kerninfo.aper_base + offset) >> PAGE_SHIFT, + size, + pgprot_writecombine(vma->vm_page_prot))) { + goto out_again; + } + mutex_unlock(&(agp_fe.agp_mutex)); + return 0; + } + + if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) { + if (size != current_size) + goto out_inval; + + DBG("controller vm_ops=%p", kerninfo.vm_ops); + if (kerninfo.vm_ops) { + vma->vm_ops = kerninfo.vm_ops; + } else if (io_remap_pfn_range(vma, vma->vm_start, + kerninfo.aper_base >> PAGE_SHIFT, + size, + pgprot_writecombine(vma->vm_page_prot))) { + goto out_again; + } + mutex_unlock(&(agp_fe.agp_mutex)); + return 0; + } + +out_eperm: + mutex_unlock(&(agp_fe.agp_mutex)); + return -EPERM; + +out_inval: + mutex_unlock(&(agp_fe.agp_mutex)); + return -EINVAL; + +out_again: + mutex_unlock(&(agp_fe.agp_mutex)); + return -EAGAIN; +} + +static int agp_release(struct inode *inode, struct file *file) +{ + struct agp_file_private *priv = file->private_data; + + mutex_lock(&(agp_fe.agp_mutex)); + + DBG("priv=%p", priv); + + if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) { + struct agp_controller *controller; + + controller = agp_find_controller_by_pid(priv->my_pid); + + if (controller != NULL) { + if (controller == agp_fe.current_controller) + agp_controller_release_current(controller, priv); + agp_remove_controller(controller); + controller = NULL; + } + } + + if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) + agp_remove_client(priv->my_pid); + + agp_remove_file_private(priv); + kfree(priv); + file->private_data = NULL; + mutex_unlock(&(agp_fe.agp_mutex)); + return 0; +} + +static int agp_open(struct inode *inode, struct file *file) +{ + int minor = iminor(inode); + struct agp_file_private *priv; + struct agp_client *client; + + if (minor != AGPGART_MINOR) + return -ENXIO; + + mutex_lock(&(agp_fe.agp_mutex)); + + priv = kzalloc(sizeof(struct agp_file_private), GFP_KERNEL); + if (priv == NULL) { + mutex_unlock(&(agp_fe.agp_mutex)); + return -ENOMEM; + } + + set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags); + priv->my_pid = current->pid; + + if (capable(CAP_SYS_RAWIO)) + /* Root priv, can be controller */ + set_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags); + + client = agp_find_client_by_pid(current->pid); + + if (client != NULL) { + set_bit(AGP_FF_IS_CLIENT, &priv->access_flags); + set_bit(AGP_FF_IS_VALID, &priv->access_flags); + } + file->private_data = (void *) priv; + agp_insert_file_private(priv); + DBG("private=%p, client=%p", priv, client); + + mutex_unlock(&(agp_fe.agp_mutex)); + + return 0; +} + +static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg) +{ + struct agp_info userinfo; + struct agp_kern_info kerninfo; + + agp_copy_info(agp_bridge, &kerninfo); + + memset(&userinfo, 0, sizeof(userinfo)); + userinfo.version.major = kerninfo.version.major; + userinfo.version.minor = kerninfo.version.minor; + userinfo.bridge_id = kerninfo.device->vendor | + (kerninfo.device->device << 16); + userinfo.agp_mode = kerninfo.mode; + userinfo.aper_base = kerninfo.aper_base; + userinfo.aper_size = kerninfo.aper_size; + userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory; + userinfo.pg_used = kerninfo.current_memory; + + if (copy_to_user(arg, &userinfo, sizeof(struct agp_info))) + return -EFAULT; + + return 0; +} + +int agpioc_acquire_wrap(struct agp_file_private *priv) +{ + struct agp_controller *controller; + + DBG(""); + + if (!(test_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags))) + return -EPERM; + + if (agp_fe.current_controller != NULL) + return -EBUSY; + + if (!agp_bridge) + return -ENODEV; + + if (atomic_read(&agp_bridge->agp_in_use)) + return -EBUSY; + + atomic_inc(&agp_bridge->agp_in_use); + + agp_fe.backend_acquired = true; + + controller = agp_find_controller_by_pid(priv->my_pid); + + if (controller != NULL) { + agp_controller_make_current(controller); + } else { + controller = agp_create_controller(priv->my_pid); + + if (controller == NULL) { + agp_fe.backend_acquired = false; + agp_backend_release(agp_bridge); + return -ENOMEM; + } + agp_insert_controller(controller); + agp_controller_make_current(controller); + } + + set_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags); + set_bit(AGP_FF_IS_VALID, &priv->access_flags); + return 0; +} + +int agpioc_release_wrap(struct agp_file_private *priv) +{ + DBG(""); + agp_controller_release_current(agp_fe.current_controller, priv); + return 0; +} + +int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg) +{ + struct agp_setup mode; + + DBG(""); + if (copy_from_user(&mode, arg, sizeof(struct agp_setup))) + return -EFAULT; + + agp_enable(agp_bridge, mode.agp_mode); + return 0; +} + +static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg) +{ + struct agp_region reserve; + struct agp_client *client; + struct agp_file_private *client_priv; + + DBG(""); + if (copy_from_user(&reserve, arg, sizeof(struct agp_region))) + return -EFAULT; + + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment)) + return -EFAULT; + + client = agp_find_client_by_pid(reserve.pid); + + if (reserve.seg_count == 0) { + /* remove a client */ + client_priv = agp_find_private(reserve.pid); + + if (client_priv != NULL) { + set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags); + set_bit(AGP_FF_IS_VALID, &client_priv->access_flags); + } + if (client == NULL) { + /* client is already removed */ + return 0; + } + return agp_remove_client(reserve.pid); + } else { + struct agp_segment *segment; + + if (reserve.seg_count >= 16384) + return -EINVAL; + + segment = kmalloc((sizeof(struct agp_segment) * reserve.seg_count), + GFP_KERNEL); + + if (segment == NULL) + return -ENOMEM; + + if (copy_from_user(segment, (void __user *) reserve.seg_list, + sizeof(struct agp_segment) * reserve.seg_count)) { + kfree(segment); + return -EFAULT; + } + reserve.seg_list = segment; + + if (client == NULL) { + /* Create the client and add the segment */ + client = agp_create_client(reserve.pid); + + if (client == NULL) { + kfree(segment); + return -ENOMEM; + } + client_priv = agp_find_private(reserve.pid); + + if (client_priv != NULL) { + set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags); + set_bit(AGP_FF_IS_VALID, &client_priv->access_flags); + } + } + return agp_create_segment(client, &reserve); + } + /* Will never really happen */ + return -EINVAL; +} + +int agpioc_protect_wrap(struct agp_file_private *priv) +{ + DBG(""); + /* This function is not currently implemented */ + return -EINVAL; +} + +static int agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg) +{ + struct agp_memory *memory; + struct agp_allocate alloc; + + DBG(""); + if (copy_from_user(&alloc, arg, sizeof(struct agp_allocate))) + return -EFAULT; + + if (alloc.type >= AGP_USER_TYPES) + return -EINVAL; + + memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type); + + if (memory == NULL) + return -ENOMEM; + + alloc.key = memory->key; + alloc.physical = memory->physical; + + if (copy_to_user(arg, &alloc, sizeof(struct agp_allocate))) { + agp_free_memory_wrap(memory); + return -EFAULT; + } + return 0; +} + +int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg) +{ + struct agp_memory *memory; + + DBG(""); + memory = agp_find_mem_by_key(arg); + + if (memory == NULL) + return -EINVAL; + + agp_free_memory_wrap(memory); + return 0; +} + +static int agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg) +{ + struct agp_bind bind_info; + struct agp_memory *memory; + + DBG(""); + if (copy_from_user(&bind_info, arg, sizeof(struct agp_bind))) + return -EFAULT; + + memory = agp_find_mem_by_key(bind_info.key); + + if (memory == NULL) + return -EINVAL; + + return agp_bind_memory(memory, bind_info.pg_start); +} + +static int agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg) +{ + struct agp_memory *memory; + struct agp_unbind unbind; + + DBG(""); + if (copy_from_user(&unbind, arg, sizeof(struct agp_unbind))) + return -EFAULT; + + memory = agp_find_mem_by_key(unbind.key); + + if (memory == NULL) + return -EINVAL; + + return agp_unbind_memory(memory); +} + +static long agp_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct agp_file_private *curr_priv = file->private_data; + int ret_val = -ENOTTY; + + DBG("priv=%p, cmd=%x", curr_priv, cmd); + mutex_lock(&(agp_fe.agp_mutex)); + + if ((agp_fe.current_controller == NULL) && + (cmd != AGPIOC_ACQUIRE)) { + ret_val = -EINVAL; + goto ioctl_out; + } + if ((agp_fe.backend_acquired != true) && + (cmd != AGPIOC_ACQUIRE)) { + ret_val = -EBUSY; + goto ioctl_out; + } + if (cmd != AGPIOC_ACQUIRE) { + if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) { + ret_val = -EPERM; + goto ioctl_out; + } + /* Use the original pid of the controller, + * in case it's threaded */ + + if (agp_fe.current_controller->pid != curr_priv->my_pid) { + ret_val = -EBUSY; + goto ioctl_out; + } + } + + switch (cmd) { + case AGPIOC_INFO: + ret_val = agpioc_info_wrap(curr_priv, (void __user *) arg); + break; + + case AGPIOC_ACQUIRE: + ret_val = agpioc_acquire_wrap(curr_priv); + break; + + case AGPIOC_RELEASE: + ret_val = agpioc_release_wrap(curr_priv); + break; + + case AGPIOC_SETUP: + ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg); + break; + + case AGPIOC_RESERVE: + ret_val = agpioc_reserve_wrap(curr_priv, (void __user *) arg); + break; + + case AGPIOC_PROTECT: + ret_val = agpioc_protect_wrap(curr_priv); + break; + + case AGPIOC_ALLOCATE: + ret_val = agpioc_allocate_wrap(curr_priv, (void __user *) arg); + break; + + case AGPIOC_DEALLOCATE: + ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg); + break; + + case AGPIOC_BIND: + ret_val = agpioc_bind_wrap(curr_priv, (void __user *) arg); + break; + + case AGPIOC_UNBIND: + ret_val = agpioc_unbind_wrap(curr_priv, (void __user *) arg); + break; + + case AGPIOC_CHIPSET_FLUSH: + break; + } + +ioctl_out: + DBG("ioctl returns %d\n", ret_val); + mutex_unlock(&(agp_fe.agp_mutex)); + return ret_val; +} + +static const struct file_operations agp_fops = +{ + .owner = THIS_MODULE, + .llseek = no_llseek, + .unlocked_ioctl = agp_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = compat_agp_ioctl, +#endif + .mmap = agp_mmap, + .open = agp_open, + .release = agp_release, +}; + +static struct miscdevice agp_miscdev = +{ + .minor = AGPGART_MINOR, + .name = "agpgart", + .fops = &agp_fops +}; + +int agp_frontend_initialize(void) +{ + memset(&agp_fe, 0, sizeof(struct agp_front_data)); + mutex_init(&(agp_fe.agp_mutex)); + + if (misc_register(&agp_miscdev)) { + printk(KERN_ERR PFX "unable to get minor: %d\n", AGPGART_MINOR); + return -EIO; + } + return 0; +} + +void agp_frontend_cleanup(void) +{ + misc_deregister(&agp_miscdev); +} diff --git a/kernel/drivers/char/agp/generic.c b/kernel/drivers/char/agp/generic.c new file mode 100644 index 000000000..f002fa5d1 --- /dev/null +++ b/kernel/drivers/char/agp/generic.c @@ -0,0 +1,1424 @@ +/* + * AGPGART driver. + * Copyright (C) 2004 Silicon Graphics, Inc. + * Copyright (C) 2002-2005 Dave Jones. + * Copyright (C) 1999 Jeff Hartmann. + * Copyright (C) 1999 Precision Insight, Inc. + * Copyright (C) 1999 Xi Graphics, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * TODO: + * - Allocate more than order 0 pages to avoid too much linear map splitting. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "agp.h" + +__u32 *agp_gatt_table; +int agp_memory_reserved; + +/* + * Needed by the Nforce GART driver for the time being. Would be + * nice to do this some other way instead of needing this export. + */ +EXPORT_SYMBOL_GPL(agp_memory_reserved); + +/* + * Generic routines for handling agp_memory structures - + * They use the basic page allocation routines to do the brunt of the work. + */ + +void agp_free_key(int key) +{ + if (key < 0) + return; + + if (key < MAXKEY) + clear_bit(key, agp_bridge->key_list); +} +EXPORT_SYMBOL(agp_free_key); + + +static int agp_get_key(void) +{ + int bit; + + bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY); + if (bit < MAXKEY) { + set_bit(bit, agp_bridge->key_list); + return bit; + } + return -1; +} + +/* + * Use kmalloc if possible for the page list. Otherwise fall back to + * vmalloc. This speeds things up and also saves memory for small AGP + * regions. + */ + +void agp_alloc_page_array(size_t size, struct agp_memory *mem) +{ + mem->pages = NULL; + + if (size <= 2*PAGE_SIZE) + mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); + if (mem->pages == NULL) { + mem->pages = vmalloc(size); + } +} +EXPORT_SYMBOL(agp_alloc_page_array); + +static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages) +{ + struct agp_memory *new; + unsigned long alloc_size = num_agp_pages*sizeof(struct page *); + + if (INT_MAX/sizeof(struct page *) < num_agp_pages) + return NULL; + + new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); + if (new == NULL) + return NULL; + + new->key = agp_get_key(); + + if (new->key < 0) { + kfree(new); + return NULL; + } + + agp_alloc_page_array(alloc_size, new); + + if (new->pages == NULL) { + agp_free_key(new->key); + kfree(new); + return NULL; + } + new->num_scratch_pages = 0; + return new; +} + +struct agp_memory *agp_create_memory(int scratch_pages) +{ + struct agp_memory *new; + + new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); + if (new == NULL) + return NULL; + + new->key = agp_get_key(); + + if (new->key < 0) { + kfree(new); + return NULL; + } + + agp_alloc_page_array(PAGE_SIZE * scratch_pages, new); + + if (new->pages == NULL) { + agp_free_key(new->key); + kfree(new); + return NULL; + } + new->num_scratch_pages = scratch_pages; + new->type = AGP_NORMAL_MEMORY; + return new; +} +EXPORT_SYMBOL(agp_create_memory); + +/** + * agp_free_memory - free memory associated with an agp_memory pointer. + * + * @curr: agp_memory pointer to be freed. + * + * It is the only function that can be called when the backend is not owned + * by the caller. (So it can free memory on client death.) + */ +void agp_free_memory(struct agp_memory *curr) +{ + size_t i; + + if (curr == NULL) + return; + + if (curr->is_bound) + agp_unbind_memory(curr); + + if (curr->type >= AGP_USER_TYPES) { + agp_generic_free_by_type(curr); + return; + } + + if (curr->type != 0) { + curr->bridge->driver->free_by_type(curr); + return; + } + if (curr->page_count != 0) { + if (curr->bridge->driver->agp_destroy_pages) { + curr->bridge->driver->agp_destroy_pages(curr); + } else { + + for (i = 0; i < curr->page_count; i++) { + curr->bridge->driver->agp_destroy_page( + curr->pages[i], + AGP_PAGE_DESTROY_UNMAP); + } + for (i = 0; i < curr->page_count; i++) { + curr->bridge->driver->agp_destroy_page( + curr->pages[i], + AGP_PAGE_DESTROY_FREE); + } + } + } + agp_free_key(curr->key); + agp_free_page_array(curr); + kfree(curr); +} +EXPORT_SYMBOL(agp_free_memory); + +#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) + +/** + * agp_allocate_memory - allocate a group of pages of a certain type. + * + * @page_count: size_t argument of the number of pages + * @type: u32 argument of the type of memory to be allocated. + * + * Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which + * maps to physical ram. Any other type is device dependent. + * + * It returns NULL whenever memory is unavailable. + */ +struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge, + size_t page_count, u32 type) +{ + int scratch_pages; + struct agp_memory *new; + size_t i; + int cur_memory; + + if (!bridge) + return NULL; + + cur_memory = atomic_read(&bridge->current_memory_agp); + if ((cur_memory + page_count > bridge->max_memory_agp) || + (cur_memory + page_count < page_count)) + return NULL; + + if (type >= AGP_USER_TYPES) { + new = agp_generic_alloc_user(page_count, type); + if (new) + new->bridge = bridge; + return new; + } + + if (type != 0) { + new = bridge->driver->alloc_by_type(page_count, type); + if (new) + new->bridge = bridge; + return new; + } + + scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; + + new = agp_create_memory(scratch_pages); + + if (new == NULL) + return NULL; + + if (bridge->driver->agp_alloc_pages) { + if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) { + agp_free_memory(new); + return NULL; + } + new->bridge = bridge; + return new; + } + + for (i = 0; i < page_count; i++) { + struct page *page = bridge->driver->agp_alloc_page(bridge); + + if (page == NULL) { + agp_free_memory(new); + return NULL; + } + new->pages[i] = page; + new->page_count++; + } + new->bridge = bridge; + + return new; +} +EXPORT_SYMBOL(agp_allocate_memory); + + +/* End - Generic routines for handling agp_memory structures */ + + +static int agp_return_size(void) +{ + int current_size; + void *temp; + + temp = agp_bridge->current_size; + + switch (agp_bridge->driver->size_type) { + case U8_APER_SIZE: + current_size = A_SIZE_8(temp)->size; + break; + case U16_APER_SIZE: + current_size = A_SIZE_16(temp)->size; + break; + case U32_APER_SIZE: + current_size = A_SIZE_32(temp)->size; + break; + case LVL2_APER_SIZE: + current_size = A_SIZE_LVL2(temp)->size; + break; + case FIXED_APER_SIZE: + current_size = A_SIZE_FIX(temp)->size; + break; + default: + current_size = 0; + break; + } + + current_size -= (agp_memory_reserved / (1024*1024)); + if (current_size <0) + current_size = 0; + return current_size; +} + + +int agp_num_entries(void) +{ + int num_entries; + void *temp; + + temp = agp_bridge->current_size; + + switch (agp_bridge->driver->size_type) { + case U8_APER_SIZE: + num_entries = A_SIZE_8(temp)->num_entries; + break; + case U16_APER_SIZE: + num_entries = A_SIZE_16(temp)->num_entries; + break; + case U32_APER_SIZE: + num_entries = A_SIZE_32(temp)->num_entries; + break; + case LVL2_APER_SIZE: + num_entries = A_SIZE_LVL2(temp)->num_entries; + break; + case FIXED_APER_SIZE: + num_entries = A_SIZE_FIX(temp)->num_entries; + break; + default: + num_entries = 0; + break; + } + + num_entries -= agp_memory_reserved>>PAGE_SHIFT; + if (num_entries<0) + num_entries = 0; + return num_entries; +} +EXPORT_SYMBOL_GPL(agp_num_entries); + + +/** + * agp_copy_info - copy bridge state information + * + * @info: agp_kern_info pointer. The caller should insure that this pointer is valid. + * + * This function copies information about the agp bridge device and the state of + * the agp backend into an agp_kern_info pointer. + */ +int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info) +{ + memset(info, 0, sizeof(struct agp_kern_info)); + if (!bridge) { + info->chipset = NOT_SUPPORTED; + return -EIO; + } + + info->version.major = bridge->version->major; + info->version.minor = bridge->version->minor; + info->chipset = SUPPORTED; + info->device = bridge->dev; + if (bridge->mode & AGPSTAT_MODE_3_0) + info->mode = bridge->mode & ~AGP3_RESERVED_MASK; + else + info->mode = bridge->mode & ~AGP2_RESERVED_MASK; + info->aper_base = bridge->gart_bus_addr; + info->aper_size = agp_return_size(); + info->max_memory = bridge->max_memory_agp; + info->current_memory = atomic_read(&bridge->current_memory_agp); + info->cant_use_aperture = bridge->driver->cant_use_aperture; + info->vm_ops = bridge->vm_ops; + info->page_mask = ~0UL; + return 0; +} +EXPORT_SYMBOL(agp_copy_info); + +/* End - Routine to copy over information structure */ + +/* + * Routines for handling swapping of agp_memory into the GATT - + * These routines take agp_memory and insert them into the GATT. + * They call device specific routines to actually write to the GATT. + */ + +/** + * agp_bind_memory - Bind an agp_memory structure into the GATT. + * + * @curr: agp_memory pointer + * @pg_start: an offset into the graphics aperture translation table + * + * It returns -EINVAL if the pointer == NULL. + * It returns -EBUSY if the area of the table requested is already in use. + */ +int agp_bind_memory(struct agp_memory *curr, off_t pg_start) +{ + int ret_val; + + if (curr == NULL) + return -EINVAL; + + if (curr->is_bound) { + printk(KERN_INFO PFX "memory %p is already bound!\n", curr); + return -EINVAL; + } + if (!curr->is_flushed) { + curr->bridge->driver->cache_flush(); + curr->is_flushed = true; + } + + ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type); + + if (ret_val != 0) + return ret_val; + + curr->is_bound = true; + curr->pg_start = pg_start; + spin_lock(&agp_bridge->mapped_lock); + list_add(&curr->mapped_list, &agp_bridge->mapped_list); + spin_unlock(&agp_bridge->mapped_lock); + + return 0; +} +EXPORT_SYMBOL(agp_bind_memory); + + +/** + * agp_unbind_memory - Removes an agp_memory structure from the GATT + * + * @curr: agp_memory pointer to be removed from the GATT. + * + * It returns -EINVAL if this piece of agp_memory is not currently bound to + * the graphics aperture translation table or if the agp_memory pointer == NULL + */ +int agp_unbind_memory(struct agp_memory *curr) +{ + int ret_val; + + if (curr == NULL) + return -EINVAL; + + if (!curr->is_bound) { + printk(KERN_INFO PFX "memory %p was not bound!\n", curr); + return -EINVAL; + } + + ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type); + + if (ret_val != 0) + return ret_val; + + curr->is_bound = false; + curr->pg_start = 0; + spin_lock(&curr->bridge->mapped_lock); + list_del(&curr->mapped_list); + spin_unlock(&curr->bridge->mapped_lock); + return 0; +} +EXPORT_SYMBOL(agp_unbind_memory); + + +/* End - Routines for handling swapping of agp_memory into the GATT */ + + +/* Generic Agp routines - Start */ +static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat) +{ + u32 tmp; + + if (*requested_mode & AGP2_RESERVED_MASK) { + printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n", + *requested_mode & AGP2_RESERVED_MASK, *requested_mode); + *requested_mode &= ~AGP2_RESERVED_MASK; + } + + /* + * Some dumb bridges are programmed to disobey the AGP2 spec. + * This is likely a BIOS misprogramming rather than poweron default, or + * it would be a lot more common. + * https://bugs.freedesktop.org/show_bug.cgi?id=8816 + * AGPv2 spec 6.1.9 states: + * The RATE field indicates the data transfer rates supported by this + * device. A.G.P. devices must report all that apply. + * Fix them up as best we can. + */ + switch (*bridge_agpstat & 7) { + case 4: + *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X); + printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate. " + "Fixing up support for x2 & x1\n"); + break; + case 2: + *bridge_agpstat |= AGPSTAT2_1X; + printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate. " + "Fixing up support for x1\n"); + break; + default: + break; + } + + /* Check the speed bits make sense. Only one should be set. */ + tmp = *requested_mode & 7; + switch (tmp) { + case 0: + printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm); + *requested_mode |= AGPSTAT2_1X; + break; + case 1: + case 2: + break; + case 3: + *requested_mode &= ~(AGPSTAT2_1X); /* rate=2 */ + break; + case 4: + break; + case 5: + case 6: + case 7: + *requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/ + break; + } + + /* disable SBA if it's not supported */ + if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA))) + *bridge_agpstat &= ~AGPSTAT_SBA; + + /* Set rate */ + if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X))) + *bridge_agpstat &= ~AGPSTAT2_4X; + + if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X))) + *bridge_agpstat &= ~AGPSTAT2_2X; + + if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X))) + *bridge_agpstat &= ~AGPSTAT2_1X; + + /* Now we know what mode it should be, clear out the unwanted bits. */ + if (*bridge_agpstat & AGPSTAT2_4X) + *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X); /* 4X */ + + if (*bridge_agpstat & AGPSTAT2_2X) + *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X); /* 2X */ + + if (*bridge_agpstat & AGPSTAT2_1X) + *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); /* 1X */ + + /* Apply any errata. */ + if (agp_bridge->flags & AGP_ERRATA_FASTWRITES) + *bridge_agpstat &= ~AGPSTAT_FW; + + if (agp_bridge->flags & AGP_ERRATA_SBA) + *bridge_agpstat &= ~AGPSTAT_SBA; + + if (agp_bridge->flags & AGP_ERRATA_1X) { + *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); + *bridge_agpstat |= AGPSTAT2_1X; + } + + /* If we've dropped down to 1X, disable fast writes. */ + if (*bridge_agpstat & AGPSTAT2_1X) + *bridge_agpstat &= ~AGPSTAT_FW; +} + +/* + * requested_mode = Mode requested by (typically) X. + * bridge_agpstat = PCI_AGP_STATUS from agp bridge. + * vga_agpstat = PCI_AGP_STATUS from graphic card. + */ +static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat) +{ + u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat; + u32 tmp; + + if (*requested_mode & AGP3_RESERVED_MASK) { + printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n", + *requested_mode & AGP3_RESERVED_MASK, *requested_mode); + *requested_mode &= ~AGP3_RESERVED_MASK; + } + + /* Check the speed bits make sense. */ + tmp = *requested_mode & 7; + if (tmp == 0) { + printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm); + *requested_mode |= AGPSTAT3_4X; + } + if (tmp >= 3) { + printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4); + *requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X; + } + + /* ARQSZ - Set the value to the maximum one. + * Don't allow the mode register to override values. */ + *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) | + max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ))); + + /* Calibration cycle. + * Don't allow the mode register to override values. */ + *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) | + min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK))); + + /* SBA *must* be supported for AGP v3 */ + *bridge_agpstat |= AGPSTAT_SBA; + + /* + * Set speed. + * Check for invalid speeds. This can happen when applications + * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware + */ + if (*requested_mode & AGPSTAT_MODE_3_0) { + /* + * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode, + * have been passed a 3.0 mode, but with 2.x speed bits set. + * AGP2.x 4x -> AGP3.0 4x. + */ + if (*requested_mode & AGPSTAT2_4X) { + printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n", + current->comm, *requested_mode); + *requested_mode &= ~AGPSTAT2_4X; + *requested_mode |= AGPSTAT3_4X; + } + } else { + /* + * The caller doesn't know what they are doing. We are in 3.0 mode, + * but have been passed an AGP 2.x mode. + * Convert AGP 1x,2x,4x -> AGP 3.0 4x. + */ + printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n", + current->comm, *requested_mode); + *requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X); + *requested_mode |= AGPSTAT3_4X; + } + + if (*requested_mode & AGPSTAT3_8X) { + if (!(*bridge_agpstat & AGPSTAT3_8X)) { + *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); + *bridge_agpstat |= AGPSTAT3_4X; + printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm); + return; + } + if (!(*vga_agpstat & AGPSTAT3_8X)) { + *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); + *bridge_agpstat |= AGPSTAT3_4X; + printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm); + return; + } + /* All set, bridge & device can do AGP x8*/ + *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); + goto done; + + } else if (*requested_mode & AGPSTAT3_4X) { + *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); + *bridge_agpstat |= AGPSTAT3_4X; + goto done; + + } else { + + /* + * If we didn't specify an AGP mode, we see if both + * the graphics card, and the bridge can do x8, and use if so. + * If not, we fall back to x4 mode. + */ + if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) { + printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode " + "supported by bridge & card (x8).\n"); + *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); + *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); + } else { + printk(KERN_INFO PFX "Fell back to AGPx4 mode because "); + if (!(*bridge_agpstat & AGPSTAT3_8X)) { + printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n", + *bridge_agpstat, origbridge); + *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); + *bridge_agpstat |= AGPSTAT3_4X; + } + if (!(*vga_agpstat & AGPSTAT3_8X)) { + printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n", + *vga_agpstat, origvga); + *vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); + *vga_agpstat |= AGPSTAT3_4X; + } + } + } + +done: + /* Apply any errata. */ + if (agp_bridge->flags & AGP_ERRATA_FASTWRITES) + *bridge_agpstat &= ~AGPSTAT_FW; + + if (agp_bridge->flags & AGP_ERRATA_SBA) + *bridge_agpstat &= ~AGPSTAT_SBA; + + if (agp_bridge->flags & AGP_ERRATA_1X) { + *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); + *bridge_agpstat |= AGPSTAT2_1X; + } +} + + +/** + * agp_collect_device_status - determine correct agp_cmd from various agp_stat's + * @bridge: an agp_bridge_data struct allocated for the AGP host bridge. + * @requested_mode: requested agp_stat from userspace (Typically from X) + * @bridge_agpstat: current agp_stat from AGP bridge. + * + * This function will hunt for an AGP graphics card, and try to match + * the requested mode to the capabilities of both the bridge and the card. + */ +u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat) +{ + struct pci_dev *device = NULL; + u32 vga_agpstat; + u8 cap_ptr; + + for (;;) { + device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device); + if (!device) { + printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n"); + return 0; + } + cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP); + if (cap_ptr) + break; + } + + /* + * Ok, here we have a AGP device. Disable impossible + * settings, and adjust the readqueue to the minimum. + */ + pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat); + + /* adjust RQ depth */ + bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) | + min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH), + min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH)))); + + /* disable FW if it's not supported */ + if (!((bridge_agpstat & AGPSTAT_FW) && + (vga_agpstat & AGPSTAT_FW) && + (requested_mode & AGPSTAT_FW))) + bridge_agpstat &= ~AGPSTAT_FW; + + /* Check to see if we are operating in 3.0 mode */ + if (agp_bridge->mode & AGPSTAT_MODE_3_0) + agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat); + else + agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat); + + pci_dev_put(device); + return bridge_agpstat; +} +EXPORT_SYMBOL(agp_collect_device_status); + + +void agp_device_command(u32 bridge_agpstat, bool agp_v3) +{ + struct pci_dev *device = NULL; + int mode; + + mode = bridge_agpstat & 0x7; + if (agp_v3) + mode *= 4; + + for_each_pci_dev(device) { + u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP); + if (!agp) + continue; + + dev_info(&device->dev, "putting AGP V%d device into %dx mode\n", + agp_v3 ? 3 : 2, mode); + pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat); + } +} +EXPORT_SYMBOL(agp_device_command); + + +void get_agp_version(struct agp_bridge_data *bridge) +{ + u32 ncapid; + + /* Exit early if already set by errata workarounds. */ + if (bridge->major_version != 0) + return; + + pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid); + bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf; + bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf; +} +EXPORT_SYMBOL(get_agp_version); + + +void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode) +{ + u32 bridge_agpstat, temp; + + get_agp_version(agp_bridge); + + dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n", + agp_bridge->major_version, agp_bridge->minor_version); + + pci_read_config_dword(agp_bridge->dev, + agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat); + + bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat); + if (bridge_agpstat == 0) + /* Something bad happened. FIXME: Return error code? */ + return; + + bridge_agpstat |= AGPSTAT_AGP_ENABLE; + + /* Do AGP version specific frobbing. */ + if (bridge->major_version >= 3) { + if (bridge->mode & AGPSTAT_MODE_3_0) { + /* If we have 3.5, we can do the isoch stuff. */ + if (bridge->minor_version >= 5) + agp_3_5_enable(bridge); + agp_device_command(bridge_agpstat, true); + return; + } else { + /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/ + bridge_agpstat &= ~(7<<10) ; + pci_read_config_dword(bridge->dev, + bridge->capndx+AGPCTRL, &temp); + temp |= (1<<9); + pci_write_config_dword(bridge->dev, + bridge->capndx+AGPCTRL, temp); + + dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n"); + } + } + + /* AGP v<3 */ + agp_device_command(bridge_agpstat, false); +} +EXPORT_SYMBOL(agp_generic_enable); + + +int agp_generic_create_gatt_table(struct agp_bridge_data *bridge) +{ + char *table; + char *table_end; + int size; + int page_order; + int num_entries; + int i; + void *temp; + struct page *page; + + /* The generic routines can't handle 2 level gatt's */ + if (bridge->driver->size_type == LVL2_APER_SIZE) + return -EINVAL; + + table = NULL; + i = bridge->aperture_size_idx; + temp = bridge->current_size; + size = page_order = num_entries = 0; + + if (bridge->driver->size_type != FIXED_APER_SIZE) { + do { + switch (bridge->driver->size_type) { + case U8_APER_SIZE: + size = A_SIZE_8(temp)->size; + page_order = + A_SIZE_8(temp)->page_order; + num_entries = + A_SIZE_8(temp)->num_entries; + break; + case U16_APER_SIZE: + size = A_SIZE_16(temp)->size; + page_order = A_SIZE_16(temp)->page_order; + num_entries = A_SIZE_16(temp)->num_entries; + break; + case U32_APER_SIZE: + size = A_SIZE_32(temp)->size; + page_order = A_SIZE_32(temp)->page_order; + num_entries = A_SIZE_32(temp)->num_entries; + break; + /* This case will never really happen. */ + case FIXED_APER_SIZE: + case LVL2_APER_SIZE: + default: + size = page_order = num_entries = 0; + break; + } + + table = alloc_gatt_pages(page_order); + + if (table == NULL) { + i++; + switch (bridge->driver->size_type) { + case U8_APER_SIZE: + bridge->current_size = A_IDX8(bridge); + break; + case U16_APER_SIZE: + bridge->current_size = A_IDX16(bridge); + break; + case U32_APER_SIZE: + bridge->current_size = A_IDX32(bridge); + break; + /* These cases will never really happen. */ + case FIXED_APER_SIZE: + case LVL2_APER_SIZE: + default: + break; + } + temp = bridge->current_size; + } else { + bridge->aperture_size_idx = i; + } + } while (!table && (i < bridge->driver->num_aperture_sizes)); + } else { + size = ((struct aper_size_info_fixed *) temp)->size; + page_order = ((struct aper_size_info_fixed *) temp)->page_order; + num_entries = ((struct aper_size_info_fixed *) temp)->num_entries; + table = alloc_gatt_pages(page_order); + } + + if (table == NULL) + return -ENOMEM; + + table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); + + for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) + SetPageReserved(page); + + bridge->gatt_table_real = (u32 *) table; + agp_gatt_table = (void *)table; + + bridge->driver->cache_flush(); +#ifdef CONFIG_X86 + if (set_memory_uc((unsigned long)table, 1 << page_order)) + printk(KERN_WARNING "Could not set GATT table memory to UC!\n"); + + bridge->gatt_table = (u32 __iomem *)table; +#else + bridge->gatt_table = ioremap_nocache(virt_to_phys(table), + (PAGE_SIZE * (1 << page_order))); + bridge->driver->cache_flush(); +#endif + + if (bridge->gatt_table == NULL) { + for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) + ClearPageReserved(page); + + free_gatt_pages(table, page_order); + + return -ENOMEM; + } + bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real); + + /* AK: bogus, should encode addresses > 4GB */ + for (i = 0; i < num_entries; i++) { + writel(bridge->scratch_page, bridge->gatt_table+i); + readl(bridge->gatt_table+i); /* PCI Posting. */ + } + + return 0; +} +EXPORT_SYMBOL(agp_generic_create_gatt_table); + +int agp_generic_free_gatt_table(struct agp_bridge_data *bridge) +{ + int page_order; + char *table, *table_end; + void *temp; + struct page *page; + + temp = bridge->current_size; + + switch (bridge->driver->size_type) { + case U8_APER_SIZE: + page_order = A_SIZE_8(temp)->page_order; + break; + case U16_APER_SIZE: + page_order = A_SIZE_16(temp)->page_order; + break; + case U32_APER_SIZE: + page_order = A_SIZE_32(temp)->page_order; + break; + case FIXED_APER_SIZE: + page_order = A_SIZE_FIX(temp)->page_order; + break; + case LVL2_APER_SIZE: + /* The generic routines can't deal with 2 level gatt's */ + return -EINVAL; + default: + page_order = 0; + break; + } + + /* Do not worry about freeing memory, because if this is + * called, then all agp memory is deallocated and removed + * from the table. */ + +#ifdef CONFIG_X86 + set_memory_wb((unsigned long)bridge->gatt_table, 1 << page_order); +#else + iounmap(bridge->gatt_table); +#endif + table = (char *) bridge->gatt_table_real; + table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); + + for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) + ClearPageReserved(page); + + free_gatt_pages(bridge->gatt_table_real, page_order); + + agp_gatt_table = NULL; + bridge->gatt_table = NULL; + bridge->gatt_table_real = NULL; + bridge->gatt_bus_addr = 0; + + return 0; +} +EXPORT_SYMBOL(agp_generic_free_gatt_table); + + +int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) +{ + int num_entries; + size_t i; + off_t j; + void *temp; + struct agp_bridge_data *bridge; + int mask_type; + + bridge = mem->bridge; + if (!bridge) + return -EINVAL; + + if (mem->page_count == 0) + return 0; + + temp = bridge->current_size; + + switch (bridge->driver->size_type) { + case U8_APER_SIZE: + num_entries = A_SIZE_8(temp)->num_entries; + break; + case U16_APER_SIZE: + num_entries = A_SIZE_16(temp)->num_entries; + break; + case U32_APER_SIZE: + num_entries = A_SIZE_32(temp)->num_entries; + break; + case FIXED_APER_SIZE: + num_entries = A_SIZE_FIX(temp)->num_entries; + break; + case LVL2_APER_SIZE: + /* The generic routines can't deal with 2 level gatt's */ + return -EINVAL; + default: + num_entries = 0; + break; + } + + num_entries -= agp_memory_reserved/PAGE_SIZE; + if (num_entries < 0) num_entries = 0; + + if (type != mem->type) + return -EINVAL; + + mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); + if (mask_type != 0) { + /* The generic routines know nothing of memory types */ + return -EINVAL; + } + + if (((pg_start + mem->page_count) > num_entries) || + ((pg_start + mem->page_count) < pg_start)) + return -EINVAL; + + j = pg_start; + + while (j < (pg_start + mem->page_count)) { + if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j))) + return -EBUSY; + j++; + } + + if (!mem->is_flushed) { + bridge->driver->cache_flush(); + mem->is_flushed = true; + } + + for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { + writel(bridge->driver->mask_memory(bridge, + page_to_phys(mem->pages[i]), + mask_type), + bridge->gatt_table+j); + } + readl(bridge->gatt_table+j-1); /* PCI Posting. */ + + bridge->driver->tlb_flush(mem); + return 0; +} +EXPORT_SYMBOL(agp_generic_insert_memory); + + +int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type) +{ + size_t i; + struct agp_bridge_data *bridge; + int mask_type, num_entries; + + bridge = mem->bridge; + if (!bridge) + return -EINVAL; + + if (mem->page_count == 0) + return 0; + + if (type != mem->type) + return -EINVAL; + + num_entries = agp_num_entries(); + if (((pg_start + mem->page_count) > num_entries) || + ((pg_start + mem->page_count) < pg_start)) + return -EINVAL; + + mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); + if (mask_type != 0) { + /* The generic routines know nothing of memory types */ + return -EINVAL; + } + + /* AK: bogus, should encode addresses > 4GB */ + for (i = pg_start; i < (mem->page_count + pg_start); i++) { + writel(bridge->scratch_page, bridge->gatt_table+i); + } + readl(bridge->gatt_table+i-1); /* PCI Posting. */ + + bridge->driver->tlb_flush(mem); + return 0; +} +EXPORT_SYMBOL(agp_generic_remove_memory); + +struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type) +{ + return NULL; +} +EXPORT_SYMBOL(agp_generic_alloc_by_type); + +void agp_generic_free_by_type(struct agp_memory *curr) +{ + agp_free_page_array(curr); + agp_free_key(curr->key); + kfree(curr); +} +EXPORT_SYMBOL(agp_generic_free_by_type); + +struct agp_memory *agp_generic_alloc_user(size_t page_count, int type) +{ + struct agp_memory *new; + int i; + int pages; + + pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; + new = agp_create_user_memory(page_count); + if (new == NULL) + return NULL; + + for (i = 0; i < page_count; i++) + new->pages[i] = NULL; + new->page_count = 0; + new->type = type; + new->num_scratch_pages = pages; + + return new; +} +EXPORT_SYMBOL(agp_generic_alloc_user); + +/* + * Basic Page Allocation Routines - + * These routines handle page allocation and by default they reserve the allocated + * memory. They also handle incrementing the current_memory_agp value, Which is checked + * against a maximum value. + */ + +int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *mem, size_t num_pages) +{ + struct page * page; + int i, ret = -ENOMEM; + + for (i = 0; i < num_pages; i++) { + page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); + /* agp_free_memory() needs gart address */ + if (page == NULL) + goto out; + +#ifndef CONFIG_X86 + map_page_into_agp(page); +#endif + get_page(page); + atomic_inc(&agp_bridge->current_memory_agp); + + mem->pages[i] = page; + mem->page_count++; + } + +#ifdef CONFIG_X86 + set_pages_array_uc(mem->pages, num_pages); +#endif + ret = 0; +out: + return ret; +} +EXPORT_SYMBOL(agp_generic_alloc_pages); + +struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge) +{ + struct page * page; + + page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); + if (page == NULL) + return NULL; + + map_page_into_agp(page); + + get_page(page); + atomic_inc(&agp_bridge->current_memory_agp); + return page; +} +EXPORT_SYMBOL(agp_generic_alloc_page); + +void agp_generic_destroy_pages(struct agp_memory *mem) +{ + int i; + struct page *page; + + if (!mem) + return; + +#ifdef CONFIG_X86 + set_pages_array_wb(mem->pages, mem->page_count); +#endif + + for (i = 0; i < mem->page_count; i++) { + page = mem->pages[i]; + +#ifndef CONFIG_X86 + unmap_page_from_agp(page); +#endif + put_page(page); + __free_page(page); + atomic_dec(&agp_bridge->current_memory_agp); + mem->pages[i] = NULL; + } +} +EXPORT_SYMBOL(agp_generic_destroy_pages); + +void agp_generic_destroy_page(struct page *page, int flags) +{ + if (page == NULL) + return; + + if (flags & AGP_PAGE_DESTROY_UNMAP) + unmap_page_from_agp(page); + + if (flags & AGP_PAGE_DESTROY_FREE) { + put_page(page); + __free_page(page); + atomic_dec(&agp_bridge->current_memory_agp); + } +} +EXPORT_SYMBOL(agp_generic_destroy_page); + +/* End Basic Page Allocation Routines */ + + +/** + * agp_enable - initialise the agp point-to-point connection. + * + * @mode: agp mode register value to configure with. + */ +void agp_enable(struct agp_bridge_data *bridge, u32 mode) +{ + if (!bridge) + return; + bridge->driver->agp_enable(bridge, mode); +} +EXPORT_SYMBOL(agp_enable); + +/* When we remove the global variable agp_bridge from all drivers + * then agp_alloc_bridge and agp_generic_find_bridge need to be updated + */ + +struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev) +{ + if (list_empty(&agp_bridges)) + return NULL; + + return agp_bridge; +} + +static void ipi_handler(void *null) +{ + flush_agp_cache(); +} + +void global_cache_flush(void) +{ + if (on_each_cpu(ipi_handler, NULL, 1) != 0) + panic(PFX "timed out waiting for the other CPUs!\n"); +} +EXPORT_SYMBOL(global_cache_flush); + +unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge, + dma_addr_t addr, int type) +{ + /* memory type is ignored in the generic routine */ + if (bridge->driver->masks) + return addr | bridge->driver->masks[0].mask; + else + return addr; +} +EXPORT_SYMBOL(agp_generic_mask_memory); + +int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge, + int type) +{ + if (type >= AGP_USER_TYPES) + return 0; + return type; +} +EXPORT_SYMBOL(agp_generic_type_to_mask_type); + +/* + * These functions are implemented according to the AGPv3 spec, + * which covers implementation details that had previously been + * left open. + */ + +int agp3_generic_fetch_size(void) +{ + u16 temp_size; + int i; + struct aper_size_info_16 *values; + + pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size); + values = A_SIZE_16(agp_bridge->driver->aperture_sizes); + + for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { + if (temp_size == values[i].size_value) { + agp_bridge->previous_size = + agp_bridge->current_size = (void *) (values + i); + + agp_bridge->aperture_size_idx = i; + return values[i].size; + } + } + return 0; +} +EXPORT_SYMBOL(agp3_generic_fetch_size); + +void agp3_generic_tlbflush(struct agp_memory *mem) +{ + u32 ctrl; + pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl); + pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN); + pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl); +} +EXPORT_SYMBOL(agp3_generic_tlbflush); + +int agp3_generic_configure(void) +{ + u32 temp; + struct aper_size_info_16 *current_size; + + current_size = A_SIZE_16(agp_bridge->current_size); + + agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, + AGP_APERTURE_BAR); + + /* set aperture size */ + pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value); + /* set gart pointer */ + pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr); + /* enable aperture and GTLB */ + pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp); + pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN); + return 0; +} +EXPORT_SYMBOL(agp3_generic_configure); + +void agp3_generic_cleanup(void) +{ + u32 ctrl; + pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl); + pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB); +} +EXPORT_SYMBOL(agp3_generic_cleanup); + +const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] = +{ + {4096, 1048576, 10,0x000}, + {2048, 524288, 9, 0x800}, + {1024, 262144, 8, 0xc00}, + { 512, 131072, 7, 0xe00}, + { 256, 65536, 6, 0xf00}, + { 128, 32768, 5, 0xf20}, + { 64, 16384, 4, 0xf30}, + { 32, 8192, 3, 0xf38}, + { 16, 4096, 2, 0xf3c}, + { 8, 2048, 1, 0xf3e}, + { 4, 1024, 0, 0xf3f} +}; +EXPORT_SYMBOL(agp3_generic_sizes); + diff --git a/kernel/drivers/char/agp/hp-agp.c b/kernel/drivers/char/agp/hp-agp.c new file mode 100644 index 000000000..3695773ce --- /dev/null +++ b/kernel/drivers/char/agp/hp-agp.c @@ -0,0 +1,553 @@ +/* + * HP zx1 AGPGART routines. + * + * (c) Copyright 2002, 2003 Hewlett-Packard Development Company, L.P. + * Bjorn Helgaas + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "agp.h" + +#define HP_ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */ + +/* HP ZX1 IOC registers */ +#define HP_ZX1_IBASE 0x300 +#define HP_ZX1_IMASK 0x308 +#define HP_ZX1_PCOM 0x310 +#define HP_ZX1_TCNFG 0x318 +#define HP_ZX1_PDIR_BASE 0x320 + +#define HP_ZX1_IOVA_BASE GB(1UL) +#define HP_ZX1_IOVA_SIZE GB(1UL) +#define HP_ZX1_GART_SIZE (HP_ZX1_IOVA_SIZE / 2) +#define HP_ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL + +#define HP_ZX1_PDIR_VALID_BIT 0x8000000000000000UL +#define HP_ZX1_IOVA_TO_PDIR(va) ((va - hp_private.iova_base) >> hp_private.io_tlb_shift) + +#define AGP8X_MODE_BIT 3 +#define AGP8X_MODE (1 << AGP8X_MODE_BIT) + +/* AGP bridge need not be PCI device, but DRM thinks it is. */ +static struct pci_dev fake_bridge_dev; + +static int hp_zx1_gart_found; + +static struct aper_size_info_fixed hp_zx1_sizes[] = +{ + {0, 0, 0}, /* filled in by hp_zx1_fetch_size() */ +}; + +static struct gatt_mask hp_zx1_masks[] = +{ + {.mask = HP_ZX1_PDIR_VALID_BIT, .type = 0} +}; + +static struct _hp_private { + volatile u8 __iomem *ioc_regs; + volatile u8 __iomem *lba_regs; + int lba_cap_offset; + u64 *io_pdir; // PDIR for entire IOVA + u64 *gatt; // PDIR just for GART (subset of above) + u64 gatt_entries; + u64 iova_base; + u64 gart_base; + u64 gart_size; + u64 io_pdir_size; + int io_pdir_owner; // do we own it, or share it with sba_iommu? + int io_page_size; + int io_tlb_shift; + int io_tlb_ps; // IOC ps config + int io_pages_per_kpage; +} hp_private; + +static int __init hp_zx1_ioc_shared(void) +{ + struct _hp_private *hp = &hp_private; + + printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR shared with sba_iommu\n"); + + /* + * IOC already configured by sba_iommu module; just use + * its setup. We assume: + * - IOVA space is 1Gb in size + * - first 512Mb is IOMMU, second 512Mb is GART + */ + hp->io_tlb_ps = readq(hp->ioc_regs+HP_ZX1_TCNFG); + switch (hp->io_tlb_ps) { + case 0: hp->io_tlb_shift = 12; break; + case 1: hp->io_tlb_shift = 13; break; + case 2: hp->io_tlb_shift = 14; break; + case 3: hp->io_tlb_shift = 16; break; + default: + printk(KERN_ERR PFX "Invalid IOTLB page size " + "configuration 0x%x\n", hp->io_tlb_ps); + hp->gatt = NULL; + hp->gatt_entries = 0; + return -ENODEV; + } + hp->io_page_size = 1 << hp->io_tlb_shift; + hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size; + + hp->iova_base = readq(hp->ioc_regs+HP_ZX1_IBASE) & ~0x1; + hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - HP_ZX1_GART_SIZE; + + hp->gart_size = HP_ZX1_GART_SIZE; + hp->gatt_entries = hp->gart_size / hp->io_page_size; + + hp->io_pdir = phys_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE)); + hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)]; + + if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) { + /* Normal case when no AGP device in system */ + hp->gatt = NULL; + hp->gatt_entries = 0; + printk(KERN_ERR PFX "No reserved IO PDIR entry found; " + "GART disabled\n"); + return -ENODEV; + } + + return 0; +} + +static int __init +hp_zx1_ioc_owner (void) +{ + struct _hp_private *hp = &hp_private; + + printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR dedicated to GART\n"); + + /* + * Select an IOV page size no larger than system page size. + */ + if (PAGE_SIZE >= KB(64)) { + hp->io_tlb_shift = 16; + hp->io_tlb_ps = 3; + } else if (PAGE_SIZE >= KB(16)) { + hp->io_tlb_shift = 14; + hp->io_tlb_ps = 2; + } else if (PAGE_SIZE >= KB(8)) { + hp->io_tlb_shift = 13; + hp->io_tlb_ps = 1; + } else { + hp->io_tlb_shift = 12; + hp->io_tlb_ps = 0; + } + hp->io_page_size = 1 << hp->io_tlb_shift; + hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size; + + hp->iova_base = HP_ZX1_IOVA_BASE; + hp->gart_size = HP_ZX1_GART_SIZE; + hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - hp->gart_size; + + hp->gatt_entries = hp->gart_size / hp->io_page_size; + hp->io_pdir_size = (HP_ZX1_IOVA_SIZE / hp->io_page_size) * sizeof(u64); + + return 0; +} + +static int __init +hp_zx1_ioc_init (u64 hpa) +{ + struct _hp_private *hp = &hp_private; + + hp->ioc_regs = ioremap(hpa, 1024); + if (!hp->ioc_regs) + return -ENOMEM; + + /* + * If the IOTLB is currently disabled, we can take it over. + * Otherwise, we have to share with sba_iommu. + */ + hp->io_pdir_owner = (readq(hp->ioc_regs+HP_ZX1_IBASE) & 0x1) == 0; + + if (hp->io_pdir_owner) + return hp_zx1_ioc_owner(); + + return hp_zx1_ioc_shared(); +} + +static int +hp_zx1_lba_find_capability (volatile u8 __iomem *hpa, int cap) +{ + u16 status; + u8 pos, id; + int ttl = 48; + + status = readw(hpa+PCI_STATUS); + if (!(status & PCI_STATUS_CAP_LIST)) + return 0; + pos = readb(hpa+PCI_CAPABILITY_LIST); + while (ttl-- && pos >= 0x40) { + pos &= ~3; + id = readb(hpa+pos+PCI_CAP_LIST_ID); + if (id == 0xff) + break; + if (id == cap) + return pos; + pos = readb(hpa+pos+PCI_CAP_LIST_NEXT); + } + return 0; +} + +static int __init +hp_zx1_lba_init (u64 hpa) +{ + struct _hp_private *hp = &hp_private; + int cap; + + hp->lba_regs = ioremap(hpa, 256); + if (!hp->lba_regs) + return -ENOMEM; + + hp->lba_cap_offset = hp_zx1_lba_find_capability(hp->lba_regs, PCI_CAP_ID_AGP); + + cap = readl(hp->lba_regs+hp->lba_cap_offset) & 0xff; + if (cap != PCI_CAP_ID_AGP) { + printk(KERN_ERR PFX "Invalid capability ID 0x%02x at 0x%x\n", + cap, hp->lba_cap_offset); + iounmap(hp->lba_regs); + return -ENODEV; + } + + return 0; +} + +static int +hp_zx1_fetch_size(void) +{ + int size; + + size = hp_private.gart_size / MB(1); + hp_zx1_sizes[0].size = size; + agp_bridge->current_size = (void *) &hp_zx1_sizes[0]; + return size; +} + +static int +hp_zx1_configure (void) +{ + struct _hp_private *hp = &hp_private; + + agp_bridge->gart_bus_addr = hp->gart_base; + agp_bridge->capndx = hp->lba_cap_offset; + agp_bridge->mode = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS); + + if (hp->io_pdir_owner) { + writel(virt_to_phys(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE); + readl(hp->ioc_regs+HP_ZX1_PDIR_BASE); + writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG); + readl(hp->ioc_regs+HP_ZX1_TCNFG); + writel((unsigned int)(~(HP_ZX1_IOVA_SIZE-1)), hp->ioc_regs+HP_ZX1_IMASK); + readl(hp->ioc_regs+HP_ZX1_IMASK); + writel(hp->iova_base|1, hp->ioc_regs+HP_ZX1_IBASE); + readl(hp->ioc_regs+HP_ZX1_IBASE); + writel(hp->iova_base|ilog2(HP_ZX1_IOVA_SIZE), hp->ioc_regs+HP_ZX1_PCOM); + readl(hp->ioc_regs+HP_ZX1_PCOM); + } + + return 0; +} + +static void +hp_zx1_cleanup (void) +{ + struct _hp_private *hp = &hp_private; + + if (hp->ioc_regs) { + if (hp->io_pdir_owner) { + writeq(0, hp->ioc_regs+HP_ZX1_IBASE); + readq(hp->ioc_regs+HP_ZX1_IBASE); + } + iounmap(hp->ioc_regs); + } + if (hp->lba_regs) + iounmap(hp->lba_regs); +} + +static void +hp_zx1_tlbflush (struct agp_memory *mem) +{ + struct _hp_private *hp = &hp_private; + + writeq(hp->gart_base | ilog2(hp->gart_size), hp->ioc_regs+HP_ZX1_PCOM); + readq(hp->ioc_regs+HP_ZX1_PCOM); +} + +static int +hp_zx1_create_gatt_table (struct agp_bridge_data *bridge) +{ + struct _hp_private *hp = &hp_private; + int i; + + if (hp->io_pdir_owner) { + hp->io_pdir = (u64 *) __get_free_pages(GFP_KERNEL, + get_order(hp->io_pdir_size)); + if (!hp->io_pdir) { + printk(KERN_ERR PFX "Couldn't allocate contiguous " + "memory for I/O PDIR\n"); + hp->gatt = NULL; + hp->gatt_entries = 0; + return -ENOMEM; + } + memset(hp->io_pdir, 0, hp->io_pdir_size); + + hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)]; + } + + for (i = 0; i < hp->gatt_entries; i++) { + hp->gatt[i] = (unsigned long) agp_bridge->scratch_page; + } + + return 0; +} + +static int +hp_zx1_free_gatt_table (struct agp_bridge_data *bridge) +{ + struct _hp_private *hp = &hp_private; + + if (hp->io_pdir_owner) + free_pages((unsigned long) hp->io_pdir, + get_order(hp->io_pdir_size)); + else + hp->gatt[0] = HP_ZX1_SBA_IOMMU_COOKIE; + return 0; +} + +static int +hp_zx1_insert_memory (struct agp_memory *mem, off_t pg_start, int type) +{ + struct _hp_private *hp = &hp_private; + int i, k; + off_t j, io_pg_start; + int io_pg_count; + + if (type != mem->type || + agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) { + return -EINVAL; + } + + io_pg_start = hp->io_pages_per_kpage * pg_start; + io_pg_count = hp->io_pages_per_kpage * mem->page_count; + if ((io_pg_start + io_pg_count) > hp->gatt_entries) { + return -EINVAL; + } + + j = io_pg_start; + while (j < (io_pg_start + io_pg_count)) { + if (hp->gatt[j]) { + return -EBUSY; + } + j++; + } + + if (!mem->is_flushed) { + global_cache_flush(); + mem->is_flushed = true; + } + + for (i = 0, j = io_pg_start; i < mem->page_count; i++) { + unsigned long paddr; + + paddr = page_to_phys(mem->pages[i]); + for (k = 0; + k < hp->io_pages_per_kpage; + k++, j++, paddr += hp->io_page_size) { + hp->gatt[j] = HP_ZX1_PDIR_VALID_BIT | paddr; + } + } + + agp_bridge->driver->tlb_flush(mem); + return 0; +} + +static int +hp_zx1_remove_memory (struct agp_memory *mem, off_t pg_start, int type) +{ + struct _hp_private *hp = &hp_private; + int i, io_pg_start, io_pg_count; + + if (type != mem->type || + agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) { + return -EINVAL; + } + + io_pg_start = hp->io_pages_per_kpage * pg_start; + io_pg_count = hp->io_pages_per_kpage * mem->page_count; + for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) { + hp->gatt[i] = agp_bridge->scratch_page; + } + + agp_bridge->driver->tlb_flush(mem); + return 0; +} + +static unsigned long +hp_zx1_mask_memory (struct agp_bridge_data *bridge, dma_addr_t addr, int type) +{ + return HP_ZX1_PDIR_VALID_BIT | addr; +} + +static void +hp_zx1_enable (struct agp_bridge_data *bridge, u32 mode) +{ + struct _hp_private *hp = &hp_private; + u32 command; + + command = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS); + command = agp_collect_device_status(bridge, mode, command); + command |= 0x00000100; + + writel(command, hp->lba_regs+hp->lba_cap_offset+PCI_AGP_COMMAND); + + agp_device_command(command, (mode & AGP8X_MODE) != 0); +} + +const struct agp_bridge_driver hp_zx1_driver = { + .owner = THIS_MODULE, + .size_type = FIXED_APER_SIZE, + .configure = hp_zx1_configure, + .fetch_size = hp_zx1_fetch_size, + .cleanup = hp_zx1_cleanup, + .tlb_flush = hp_zx1_tlbflush, + .mask_memory = hp_zx1_mask_memory, + .masks = hp_zx1_masks, + .agp_enable = hp_zx1_enable, + .cache_flush = global_cache_flush, + .create_gatt_table = hp_zx1_create_gatt_table, + .free_gatt_table = hp_zx1_free_gatt_table, + .insert_memory = hp_zx1_insert_memory, + .remove_memory = hp_zx1_remove_memory, + .alloc_by_type = agp_generic_alloc_by_type, + .free_by_type = agp_generic_free_by_type, + .agp_alloc_page = agp_generic_alloc_page, + .agp_alloc_pages = agp_generic_alloc_pages, + .agp_destroy_page = agp_generic_destroy_page, + .agp_destroy_pages = agp_generic_destroy_pages, + .agp_type_to_mask_type = agp_generic_type_to_mask_type, + .cant_use_aperture = true, +}; + +static int __init +hp_zx1_setup (u64 ioc_hpa, u64 lba_hpa) +{ + struct agp_bridge_data *bridge; + int error = 0; + + error = hp_zx1_ioc_init(ioc_hpa); + if (error) + goto fail; + + error = hp_zx1_lba_init(lba_hpa); + if (error) + goto fail; + + bridge = agp_alloc_bridge(); + if (!bridge) { + error = -ENOMEM; + goto fail; + } + bridge->driver = &hp_zx1_driver; + + fake_bridge_dev.vendor = PCI_VENDOR_ID_HP; + fake_bridge_dev.device = PCI_DEVICE_ID_HP_PCIX_LBA; + bridge->dev = &fake_bridge_dev; + + error = agp_add_bridge(bridge); + fail: + if (error) + hp_zx1_cleanup(); + return error; +} + +static acpi_status __init +zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret) +{ + acpi_handle handle, parent; + acpi_status status; + struct acpi_device_info *info; + u64 lba_hpa, sba_hpa, length; + int match; + + status = hp_acpi_csr_space(obj, &lba_hpa, &length); + if (ACPI_FAILURE(status)) + return AE_OK; /* keep looking for another bridge */ + + /* Look for an enclosing IOC scope and find its CSR space */ + handle = obj; + do { + status = acpi_get_object_info(handle, &info); + if (ACPI_SUCCESS(status) && (info->valid & ACPI_VALID_HID)) { + /* TBD check _CID also */ + match = (strcmp(info->hardware_id.string, "HWP0001") == 0); + kfree(info); + if (match) { + status = hp_acpi_csr_space(handle, &sba_hpa, &length); + if (ACPI_SUCCESS(status)) + break; + else { + printk(KERN_ERR PFX "Detected HP ZX1 " + "AGP LBA but no IOC.\n"); + return AE_OK; + } + } + } + + status = acpi_get_parent(handle, &parent); + handle = parent; + } while (ACPI_SUCCESS(status)); + + if (ACPI_FAILURE(status)) + return AE_OK; /* found no enclosing IOC */ + + if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa)) + return AE_OK; + + printk(KERN_INFO PFX "Detected HP ZX1 %s AGP chipset " + "(ioc=%llx, lba=%llx)\n", (char *)context, + sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa); + + hp_zx1_gart_found = 1; + return AE_CTRL_TERMINATE; /* we only support one bridge; quit looking */ +} + +static int __init +agp_hp_init (void) +{ + if (agp_off) + return -EINVAL; + + acpi_get_devices("HWP0003", zx1_gart_probe, "HWP0003", NULL); + if (hp_zx1_gart_found) + return 0; + + acpi_get_devices("HWP0007", zx1_gart_probe, "HWP0007", NULL); + if (hp_zx1_gart_found) + return 0; + + return -ENODEV; +} + +static void __exit +agp_hp_cleanup (void) +{ +} + +module_init(agp_hp_init); +module_exit(agp_hp_cleanup); + +MODULE_LICENSE("GPL and additional rights"); diff --git a/kernel/drivers/char/agp/i460-agp.c b/kernel/drivers/char/agp/i460-agp.c new file mode 100644 index 000000000..15b240ea4 --- /dev/null +++ b/kernel/drivers/char/agp/i460-agp.c @@ -0,0 +1,659 @@ +/* + * For documentation on the i460 AGP interface, see Chapter 7 (AGP Subsystem) of + * the "Intel 460GTX Chipset Software Developer's Manual": + * http://www.intel.com/design/archives/itanium/downloads/248704.htm + */ +/* + * 460GX support by Chris Ahna + * Clean up & simplification by David Mosberger-Tang + */ +#include +#include +#include +#include +#include +#include +#include + +#include "agp.h" + +#define INTEL_I460_BAPBASE 0x98 +#define INTEL_I460_GXBCTL 0xa0 +#define INTEL_I460_AGPSIZ 0xa2 +#define INTEL_I460_ATTBASE 0xfe200000 +#define INTEL_I460_GATT_VALID (1UL << 24) +#define INTEL_I460_GATT_COHERENT (1UL << 25) + +/* + * The i460 can operate with large (4MB) pages, but there is no sane way to support this + * within the current kernel/DRM environment, so we disable the relevant code for now. + * See also comments in ia64_alloc_page()... + */ +#define I460_LARGE_IO_PAGES 0 + +#if I460_LARGE_IO_PAGES +# define I460_IO_PAGE_SHIFT i460.io_page_shift +#else +# define I460_IO_PAGE_SHIFT 12 +#endif + +#define I460_IOPAGES_PER_KPAGE (PAGE_SIZE >> I460_IO_PAGE_SHIFT) +#define I460_KPAGES_PER_IOPAGE (1 << (I460_IO_PAGE_SHIFT - PAGE_SHIFT)) +#define I460_SRAM_IO_DISABLE (1 << 4) +#define I460_BAPBASE_ENABLE (1 << 3) +#define I460_AGPSIZ_MASK 0x7 +#define I460_4M_PS (1 << 1) + +/* Control bits for Out-Of-GART coherency and Burst Write Combining */ +#define I460_GXBCTL_OOG (1UL << 0) +#define I460_GXBCTL_BWC (1UL << 2) + +/* + * gatt_table entries are 32-bits wide on the i460; the generic code ought to declare the + * gatt_table and gatt_table_real pointers a "void *"... + */ +#define RD_GATT(index) readl((u32 *) i460.gatt + (index)) +#define WR_GATT(index, val) writel((val), (u32 *) i460.gatt + (index)) +/* + * The 460 spec says we have to read the last location written to make sure that all + * writes have taken effect + */ +#define WR_FLUSH_GATT(index) RD_GATT(index) + +static unsigned long i460_mask_memory (struct agp_bridge_data *bridge, + dma_addr_t addr, int type); + +static struct { + void *gatt; /* ioremap'd GATT area */ + + /* i460 supports multiple GART page sizes, so GART pageshift is dynamic: */ + u8 io_page_shift; + + /* BIOS configures chipset to one of 2 possible apbase values: */ + u8 dynamic_apbase; + + /* structure for tracking partial use of 4MB GART pages: */ + struct lp_desc { + unsigned long *alloced_map; /* bitmap of kernel-pages in use */ + int refcount; /* number of kernel pages using the large page */ + u64 paddr; /* physical address of large page */ + struct page *page; /* page pointer */ + } *lp_desc; +} i460; + +static const struct aper_size_info_8 i460_sizes[3] = +{ + /* + * The 32GB aperture is only available with a 4M GART page size. Due to the + * dynamic GART page size, we can't figure out page_order or num_entries until + * runtime. + */ + {32768, 0, 0, 4}, + {1024, 0, 0, 2}, + {256, 0, 0, 1} +}; + +static struct gatt_mask i460_masks[] = +{ + { + .mask = INTEL_I460_GATT_VALID | INTEL_I460_GATT_COHERENT, + .type = 0 + } +}; + +static int i460_fetch_size (void) +{ + int i; + u8 temp; + struct aper_size_info_8 *values; + + /* Determine the GART page size */ + pci_read_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, &temp); + i460.io_page_shift = (temp & I460_4M_PS) ? 22 : 12; + pr_debug("i460_fetch_size: io_page_shift=%d\n", i460.io_page_shift); + + if (i460.io_page_shift != I460_IO_PAGE_SHIFT) { + printk(KERN_ERR PFX + "I/O (GART) page-size %luKB doesn't match expected " + "size %luKB\n", + 1UL << (i460.io_page_shift - 10), + 1UL << (I460_IO_PAGE_SHIFT)); + return 0; + } + + values = A_SIZE_8(agp_bridge->driver->aperture_sizes); + + pci_read_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, &temp); + + /* Exit now if the IO drivers for the GART SRAMS are turned off */ + if (temp & I460_SRAM_IO_DISABLE) { + printk(KERN_ERR PFX "GART SRAMS disabled on 460GX chipset\n"); + printk(KERN_ERR PFX "AGPGART operation not possible\n"); + return 0; + } + + /* Make sure we don't try to create an 2 ^ 23 entry GATT */ + if ((i460.io_page_shift == 0) && ((temp & I460_AGPSIZ_MASK) == 4)) { + printk(KERN_ERR PFX "We can't have a 32GB aperture with 4KB GART pages\n"); + return 0; + } + + /* Determine the proper APBASE register */ + if (temp & I460_BAPBASE_ENABLE) + i460.dynamic_apbase = INTEL_I460_BAPBASE; + else + i460.dynamic_apbase = AGP_APBASE; + + for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { + /* + * Dynamically calculate the proper num_entries and page_order values for + * the define aperture sizes. Take care not to shift off the end of + * values[i].size. + */ + values[i].num_entries = (values[i].size << 8) >> (I460_IO_PAGE_SHIFT - 12); + values[i].page_order = ilog2((sizeof(u32)*values[i].num_entries) >> PAGE_SHIFT); + } + + for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { + /* Neglect control bits when matching up size_value */ + if ((temp & I460_AGPSIZ_MASK) == values[i].size_value) { + agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); + agp_bridge->aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + +/* There isn't anything to do here since 460 has no GART TLB. */ +static void i460_tlb_flush (struct agp_memory *mem) +{ + return; +} + +/* + * This utility function is needed to prevent corruption of the control bits + * which are stored along with the aperture size in 460's AGPSIZ register + */ +static void i460_write_agpsiz (u8 size_value) +{ + u8 temp; + + pci_read_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, &temp); + pci_write_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, + ((temp & ~I460_AGPSIZ_MASK) | size_value)); +} + +static void i460_cleanup (void) +{ + struct aper_size_info_8 *previous_size; + + previous_size = A_SIZE_8(agp_bridge->previous_size); + i460_write_agpsiz(previous_size->size_value); + + if (I460_IO_PAGE_SHIFT > PAGE_SHIFT) + kfree(i460.lp_desc); +} + +static int i460_configure (void) +{ + union { + u32 small[2]; + u64 large; + } temp; + size_t size; + u8 scratch; + struct aper_size_info_8 *current_size; + + temp.large = 0; + + current_size = A_SIZE_8(agp_bridge->current_size); + i460_write_agpsiz(current_size->size_value); + + /* + * Do the necessary rigmarole to read all eight bytes of APBASE. + * This has to be done since the AGP aperture can be above 4GB on + * 460 based systems. + */ + pci_read_config_dword(agp_bridge->dev, i460.dynamic_apbase, &(temp.small[0])); + pci_read_config_dword(agp_bridge->dev, i460.dynamic_apbase + 4, &(temp.small[1])); + + /* Clear BAR control bits */ + agp_bridge->gart_bus_addr = temp.large & ~((1UL << 3) - 1); + + pci_read_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, &scratch); + pci_write_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, + (scratch & 0x02) | I460_GXBCTL_OOG | I460_GXBCTL_BWC); + + /* + * Initialize partial allocation trackers if a GART page is bigger than a kernel + * page. + */ + if (I460_IO_PAGE_SHIFT > PAGE_SHIFT) { + size = current_size->num_entries * sizeof(i460.lp_desc[0]); + i460.lp_desc = kzalloc(size, GFP_KERNEL); + if (!i460.lp_desc) + return -ENOMEM; + } + return 0; +} + +static int i460_create_gatt_table (struct agp_bridge_data *bridge) +{ + int page_order, num_entries, i; + void *temp; + + /* + * Load up the fixed address of the GART SRAMS which hold our GATT table. + */ + temp = agp_bridge->current_size; + page_order = A_SIZE_8(temp)->page_order; + num_entries = A_SIZE_8(temp)->num_entries; + + i460.gatt = ioremap(INTEL_I460_ATTBASE, PAGE_SIZE << page_order); + if (!i460.gatt) { + printk(KERN_ERR PFX "ioremap failed\n"); + return -ENOMEM; + } + + /* These are no good, the should be removed from the agp_bridge strucure... */ + agp_bridge->gatt_table_real = NULL; + agp_bridge->gatt_table = NULL; + agp_bridge->gatt_bus_addr = 0; + + for (i = 0; i < num_entries; ++i) + WR_GATT(i, 0); + WR_FLUSH_GATT(i - 1); + return 0; +} + +static int i460_free_gatt_table (struct agp_bridge_data *bridge) +{ + int num_entries, i; + void *temp; + + temp = agp_bridge->current_size; + + num_entries = A_SIZE_8(temp)->num_entries; + + for (i = 0; i < num_entries; ++i) + WR_GATT(i, 0); + WR_FLUSH_GATT(num_entries - 1); + + iounmap(i460.gatt); + return 0; +} + +/* + * The following functions are called when the I/O (GART) page size is smaller than + * PAGE_SIZE. + */ + +static int i460_insert_memory_small_io_page (struct agp_memory *mem, + off_t pg_start, int type) +{ + unsigned long paddr, io_pg_start, io_page_size; + int i, j, k, num_entries; + void *temp; + + pr_debug("i460_insert_memory_small_io_page(mem=%p, pg_start=%ld, type=%d, paddr0=0x%lx)\n", + mem, pg_start, type, page_to_phys(mem->pages[0])); + + if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES) + return -EINVAL; + + io_pg_start = I460_IOPAGES_PER_KPAGE * pg_start; + + temp = agp_bridge->current_size; + num_entries = A_SIZE_8(temp)->num_entries; + + if ((io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count) > num_entries) { + printk(KERN_ERR PFX "Looks like we're out of AGP memory\n"); + return -EINVAL; + } + + j = io_pg_start; + while (j < (io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count)) { + if (!PGE_EMPTY(agp_bridge, RD_GATT(j))) { + pr_debug("i460_insert_memory_small_io_page: GATT[%d]=0x%x is busy\n", + j, RD_GATT(j)); + return -EBUSY; + } + j++; + } + + io_page_size = 1UL << I460_IO_PAGE_SHIFT; + for (i = 0, j = io_pg_start; i < mem->page_count; i++) { + paddr = page_to_phys(mem->pages[i]); + for (k = 0; k < I460_IOPAGES_PER_KPAGE; k++, j++, paddr += io_page_size) + WR_GATT(j, i460_mask_memory(agp_bridge, paddr, mem->type)); + } + WR_FLUSH_GATT(j - 1); + return 0; +} + +static int i460_remove_memory_small_io_page(struct agp_memory *mem, + off_t pg_start, int type) +{ + int i; + + pr_debug("i460_remove_memory_small_io_page(mem=%p, pg_start=%ld, type=%d)\n", + mem, pg_start, type); + + pg_start = I460_IOPAGES_PER_KPAGE * pg_start; + + for (i = pg_start; i < (pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count); i++) + WR_GATT(i, 0); + WR_FLUSH_GATT(i - 1); + return 0; +} + +#if I460_LARGE_IO_PAGES + +/* + * These functions are called when the I/O (GART) page size exceeds PAGE_SIZE. + * + * This situation is interesting since AGP memory allocations that are smaller than a + * single GART page are possible. The i460.lp_desc array tracks partial allocation of the + * large GART pages to work around this issue. + * + * i460.lp_desc[pg_num].refcount tracks the number of kernel pages in use within GART page + * pg_num. i460.lp_desc[pg_num].paddr is the physical address of the large page and + * i460.lp_desc[pg_num].alloced_map is a bitmap of kernel pages that are in use (allocated). + */ + +static int i460_alloc_large_page (struct lp_desc *lp) +{ + unsigned long order = I460_IO_PAGE_SHIFT - PAGE_SHIFT; + size_t map_size; + + lp->page = alloc_pages(GFP_KERNEL, order); + if (!lp->page) { + printk(KERN_ERR PFX "Couldn't alloc 4M GART page...\n"); + return -ENOMEM; + } + + map_size = ((I460_KPAGES_PER_IOPAGE + BITS_PER_LONG - 1) & -BITS_PER_LONG)/8; + lp->alloced_map = kzalloc(map_size, GFP_KERNEL); + if (!lp->alloced_map) { + __free_pages(lp->page, order); + printk(KERN_ERR PFX "Out of memory, we're in trouble...\n"); + return -ENOMEM; + } + + lp->paddr = page_to_phys(lp->page); + lp->refcount = 0; + atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp); + return 0; +} + +static void i460_free_large_page (struct lp_desc *lp) +{ + kfree(lp->alloced_map); + lp->alloced_map = NULL; + + __free_pages(lp->page, I460_IO_PAGE_SHIFT - PAGE_SHIFT); + atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp); +} + +static int i460_insert_memory_large_io_page (struct agp_memory *mem, + off_t pg_start, int type) +{ + int i, start_offset, end_offset, idx, pg, num_entries; + struct lp_desc *start, *end, *lp; + void *temp; + + if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES) + return -EINVAL; + + temp = agp_bridge->current_size; + num_entries = A_SIZE_8(temp)->num_entries; + + /* Figure out what pg_start means in terms of our large GART pages */ + start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE]; + end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; + start_offset = pg_start % I460_KPAGES_PER_IOPAGE; + end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE; + + if (end > i460.lp_desc + num_entries) { + printk(KERN_ERR PFX "Looks like we're out of AGP memory\n"); + return -EINVAL; + } + + /* Check if the requested region of the aperture is free */ + for (lp = start; lp <= end; ++lp) { + if (!lp->alloced_map) + continue; /* OK, the entire large page is available... */ + + for (idx = ((lp == start) ? start_offset : 0); + idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE); + idx++) + { + if (test_bit(idx, lp->alloced_map)) + return -EBUSY; + } + } + + for (lp = start, i = 0; lp <= end; ++lp) { + if (!lp->alloced_map) { + /* Allocate new GART pages... */ + if (i460_alloc_large_page(lp) < 0) + return -ENOMEM; + pg = lp - i460.lp_desc; + WR_GATT(pg, i460_mask_memory(agp_bridge, + lp->paddr, 0)); + WR_FLUSH_GATT(pg); + } + + for (idx = ((lp == start) ? start_offset : 0); + idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE); + idx++, i++) + { + mem->pages[i] = lp->page; + __set_bit(idx, lp->alloced_map); + ++lp->refcount; + } + } + return 0; +} + +static int i460_remove_memory_large_io_page (struct agp_memory *mem, + off_t pg_start, int type) +{ + int i, pg, start_offset, end_offset, idx, num_entries; + struct lp_desc *start, *end, *lp; + void *temp; + + temp = agp_bridge->current_size; + num_entries = A_SIZE_8(temp)->num_entries; + + /* Figure out what pg_start means in terms of our large GART pages */ + start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE]; + end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; + start_offset = pg_start % I460_KPAGES_PER_IOPAGE; + end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE; + + for (i = 0, lp = start; lp <= end; ++lp) { + for (idx = ((lp == start) ? start_offset : 0); + idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE); + idx++, i++) + { + mem->pages[i] = NULL; + __clear_bit(idx, lp->alloced_map); + --lp->refcount; + } + + /* Free GART pages if they are unused */ + if (lp->refcount == 0) { + pg = lp - i460.lp_desc; + WR_GATT(pg, 0); + WR_FLUSH_GATT(pg); + i460_free_large_page(lp); + } + } + return 0; +} + +/* Wrapper routines to call the approriate {small_io_page,large_io_page} function */ + +static int i460_insert_memory (struct agp_memory *mem, + off_t pg_start, int type) +{ + if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) + return i460_insert_memory_small_io_page(mem, pg_start, type); + else + return i460_insert_memory_large_io_page(mem, pg_start, type); +} + +static int i460_remove_memory (struct agp_memory *mem, + off_t pg_start, int type) +{ + if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) + return i460_remove_memory_small_io_page(mem, pg_start, type); + else + return i460_remove_memory_large_io_page(mem, pg_start, type); +} + +/* + * If the I/O (GART) page size is bigger than the kernel page size, we don't want to + * allocate memory until we know where it is to be bound in the aperture (a + * multi-kernel-page alloc might fit inside of an already allocated GART page). + * + * Let's just hope nobody counts on the allocated AGP memory being there before bind time + * (I don't think current drivers do)... + */ +static struct page *i460_alloc_page (struct agp_bridge_data *bridge) +{ + void *page; + + if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) { + page = agp_generic_alloc_page(agp_bridge); + } else + /* Returning NULL would cause problems */ + /* AK: really dubious code. */ + page = (void *)~0UL; + return page; +} + +static void i460_destroy_page (struct page *page, int flags) +{ + if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) { + agp_generic_destroy_page(page, flags); + } +} + +#endif /* I460_LARGE_IO_PAGES */ + +static unsigned long i460_mask_memory (struct agp_bridge_data *bridge, + dma_addr_t addr, int type) +{ + /* Make sure the returned address is a valid GATT entry */ + return bridge->driver->masks[0].mask + | (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xfffff000) >> 12); +} + +const struct agp_bridge_driver intel_i460_driver = { + .owner = THIS_MODULE, + .aperture_sizes = i460_sizes, + .size_type = U8_APER_SIZE, + .num_aperture_sizes = 3, + .configure = i460_configure, + .fetch_size = i460_fetch_size, + .cleanup = i460_cleanup, + .tlb_flush = i460_tlb_flush, + .mask_memory = i460_mask_memory, + .masks = i460_masks, + .agp_enable = agp_generic_enable, + .cache_flush = global_cache_flush, + .create_gatt_table = i460_create_gatt_table, + .free_gatt_table = i460_free_gatt_table, +#if I460_LARGE_IO_PAGES + .insert_memory = i460_insert_memory, + .remove_memory = i460_remove_memory, + .agp_alloc_page = i460_alloc_page, + .agp_destroy_page = i460_destroy_page, +#else + .insert_memory = i460_insert_memory_small_io_page, + .remove_memory = i460_remove_memory_small_io_page, + .agp_alloc_page = agp_generic_alloc_page, + .agp_alloc_pages = agp_generic_alloc_pages, + .agp_destroy_page = agp_generic_destroy_page, + .agp_destroy_pages = agp_generic_destroy_pages, +#endif + .alloc_by_type = agp_generic_alloc_by_type, + .free_by_type = agp_generic_free_by_type, + .agp_type_to_mask_type = agp_generic_type_to_mask_type, + .cant_use_aperture = true, +}; + +static int agp_intel_i460_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct agp_bridge_data *bridge; + u8 cap_ptr; + + cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); + if (!cap_ptr) + return -ENODEV; + + bridge = agp_alloc_bridge(); + if (!bridge) + return -ENOMEM; + + bridge->driver = &intel_i460_driver; + bridge->dev = pdev; + bridge->capndx = cap_ptr; + + printk(KERN_INFO PFX "Detected Intel 460GX chipset\n"); + + pci_set_drvdata(pdev, bridge); + return agp_add_bridge(bridge); +} + +static void agp_intel_i460_remove(struct pci_dev *pdev) +{ + struct agp_bridge_data *bridge = pci_get_drvdata(pdev); + + agp_remove_bridge(bridge); + agp_put_bridge(bridge); +} + +static struct pci_device_id agp_intel_i460_pci_table[] = { + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_84460GX, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { } +}; + +MODULE_DEVICE_TABLE(pci, agp_intel_i460_pci_table); + +static struct pci_driver agp_intel_i460_pci_driver = { + .name = "agpgart-intel-i460", + .id_table = agp_intel_i460_pci_table, + .probe = agp_intel_i460_probe, + .remove = agp_intel_i460_remove, +}; + +static int __init agp_intel_i460_init(void) +{ + if (agp_off) + return -EINVAL; + return pci_register_driver(&agp_intel_i460_pci_driver); +} + +static void __exit agp_intel_i460_cleanup(void) +{ + pci_unregister_driver(&agp_intel_i460_pci_driver); +} + +module_init(agp_intel_i460_init); +module_exit(agp_intel_i460_cleanup); + +MODULE_AUTHOR("Chris Ahna "); +MODULE_LICENSE("GPL and additional rights"); diff --git a/kernel/drivers/char/agp/intel-agp.c b/kernel/drivers/char/agp/intel-agp.c new file mode 100644 index 000000000..0a21daed5 --- /dev/null +++ b/kernel/drivers/char/agp/intel-agp.c @@ -0,0 +1,924 @@ +/* + * Intel AGPGART routines. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "agp.h" +#include "intel-agp.h" +#include + +static int intel_fetch_size(void) +{ + int i; + u16 temp; + struct aper_size_info_16 *values; + + pci_read_config_word(agp_bridge->dev, INTEL_APSIZE, &temp); + values = A_SIZE_16(agp_bridge->driver->aperture_sizes); + + for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { + if (temp == values[i].size_value) { + agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); + agp_bridge->aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + +static int __intel_8xx_fetch_size(u8 temp) +{ + int i; + struct aper_size_info_8 *values; + + values = A_SIZE_8(agp_bridge->driver->aperture_sizes); + + for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { + if (temp == values[i].size_value) { + agp_bridge->previous_size = + agp_bridge->current_size = (void *) (values + i); + agp_bridge->aperture_size_idx = i; + return values[i].size; + } + } + return 0; +} + +static int intel_8xx_fetch_size(void) +{ + u8 temp; + + pci_read_config_byte(agp_bridge->dev, INTEL_APSIZE, &temp); + return __intel_8xx_fetch_size(temp); +} + +static int intel_815_fetch_size(void) +{ + u8 temp; + + /* Intel 815 chipsets have a _weird_ APSIZE register with only + * one non-reserved bit, so mask the others out ... */ + pci_read_config_byte(agp_bridge->dev, INTEL_APSIZE, &temp); + temp &= (1 << 3); + + return __intel_8xx_fetch_size(temp); +} + +static void intel_tlbflush(struct agp_memory *mem) +{ + pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2200); + pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280); +} + + +static void intel_8xx_tlbflush(struct agp_memory *mem) +{ + u32 temp; + pci_read_config_dword(agp_bridge->dev, INTEL_AGPCTRL, &temp); + pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, temp & ~(1 << 7)); + pci_read_config_dword(agp_bridge->dev, INTEL_AGPCTRL, &temp); + pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, temp | (1 << 7)); +} + + +static void intel_cleanup(void) +{ + u16 temp; + struct aper_size_info_16 *previous_size; + + previous_size = A_SIZE_16(agp_bridge->previous_size); + pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp); + pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9)); + pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, previous_size->size_value); +} + + +static void intel_8xx_cleanup(void) +{ + u16 temp; + struct aper_size_info_8 *previous_size; + + previous_size = A_SIZE_8(agp_bridge->previous_size); + pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp); + pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9)); + pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, previous_size->size_value); +} + + +static int intel_configure(void) +{ + u16 temp2; + struct aper_size_info_16 *current_size; + + current_size = A_SIZE_16(agp_bridge->current_size); + + /* aperture size */ + pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); + + /* address to map to */ + agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, + AGP_APERTURE_BAR); + + /* attbase - aperture base */ + pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); + + /* agpctrl */ + pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280); + + /* paccfg/nbxcfg */ + pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2); + pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, + (temp2 & ~(1 << 10)) | (1 << 9)); + /* clear any possible error conditions */ + pci_write_config_byte(agp_bridge->dev, INTEL_ERRSTS + 1, 7); + return 0; +} + +static int intel_815_configure(void) +{ + u32 addr; + u8 temp2; + struct aper_size_info_8 *current_size; + + /* attbase - aperture base */ + /* the Intel 815 chipset spec. says that bits 29-31 in the + * ATTBASE register are reserved -> try not to write them */ + if (agp_bridge->gatt_bus_addr & INTEL_815_ATTBASE_MASK) { + dev_emerg(&agp_bridge->dev->dev, "gatt bus addr too high"); + return -EINVAL; + } + + current_size = A_SIZE_8(agp_bridge->current_size); + + /* aperture size */ + pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, + current_size->size_value); + + /* address to map to */ + agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, + AGP_APERTURE_BAR); + + pci_read_config_dword(agp_bridge->dev, INTEL_ATTBASE, &addr); + addr &= INTEL_815_ATTBASE_MASK; + addr |= agp_bridge->gatt_bus_addr; + pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, addr); + + /* agpctrl */ + pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); + + /* apcont */ + pci_read_config_byte(agp_bridge->dev, INTEL_815_APCONT, &temp2); + pci_write_config_byte(agp_bridge->dev, INTEL_815_APCONT, temp2 | (1 << 1)); + + /* clear any possible error conditions */ + /* Oddness : this chipset seems to have no ERRSTS register ! */ + return 0; +} + +static void intel_820_tlbflush(struct agp_memory *mem) +{ + return; +} + +static void intel_820_cleanup(void) +{ + u8 temp; + struct aper_size_info_8 *previous_size; + + previous_size = A_SIZE_8(agp_bridge->previous_size); + pci_read_config_byte(agp_bridge->dev, INTEL_I820_RDCR, &temp); + pci_write_config_byte(agp_bridge->dev, INTEL_I820_RDCR, + temp & ~(1 << 1)); + pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, + previous_size->size_value); +} + + +static int intel_820_configure(void) +{ + u8 temp2; + struct aper_size_info_8 *current_size; + + current_size = A_SIZE_8(agp_bridge->current_size); + + /* aperture size */ + pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); + + /* address to map to */ + agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, + AGP_APERTURE_BAR); + + /* attbase - aperture base */ + pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); + + /* agpctrl */ + pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); + + /* global enable aperture access */ + /* This flag is not accessed through MCHCFG register as in */ + /* i850 chipset. */ + pci_read_config_byte(agp_bridge->dev, INTEL_I820_RDCR, &temp2); + pci_write_config_byte(agp_bridge->dev, INTEL_I820_RDCR, temp2 | (1 << 1)); + /* clear any possible AGP-related error conditions */ + pci_write_config_word(agp_bridge->dev, INTEL_I820_ERRSTS, 0x001c); + return 0; +} + +static int intel_840_configure(void) +{ + u16 temp2; + struct aper_size_info_8 *current_size; + + current_size = A_SIZE_8(agp_bridge->current_size); + + /* aperture size */ + pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); + + /* address to map to */ + agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, + AGP_APERTURE_BAR); + + /* attbase - aperture base */ + pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); + + /* agpctrl */ + pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); + + /* mcgcfg */ + pci_read_config_word(agp_bridge->dev, INTEL_I840_MCHCFG, &temp2); + pci_write_config_word(agp_bridge->dev, INTEL_I840_MCHCFG, temp2 | (1 << 9)); + /* clear any possible error conditions */ + pci_write_config_word(agp_bridge->dev, INTEL_I840_ERRSTS, 0xc000); + return 0; +} + +static int intel_845_configure(void) +{ + u8 temp2; + struct aper_size_info_8 *current_size; + + current_size = A_SIZE_8(agp_bridge->current_size); + + /* aperture size */ + pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); + + if (agp_bridge->apbase_config != 0) { + pci_write_config_dword(agp_bridge->dev, AGP_APBASE, + agp_bridge->apbase_config); + } else { + /* address to map to */ + agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, + AGP_APERTURE_BAR); + agp_bridge->apbase_config = agp_bridge->gart_bus_addr; + } + + /* attbase - aperture base */ + pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); + + /* agpctrl */ + pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); + + /* agpm */ + pci_read_config_byte(agp_bridge->dev, INTEL_I845_AGPM, &temp2); + pci_write_config_byte(agp_bridge->dev, INTEL_I845_AGPM, temp2 | (1 << 1)); + /* clear any possible error conditions */ + pci_write_config_word(agp_bridge->dev, INTEL_I845_ERRSTS, 0x001c); + return 0; +} + +static int intel_850_configure(void) +{ + u16 temp2; + struct aper_size_info_8 *current_size; + + current_size = A_SIZE_8(agp_bridge->current_size); + + /* aperture size */ + pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); + + /* address to map to */ + agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, + AGP_APERTURE_BAR); + + /* attbase - aperture base */ + pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); + + /* agpctrl */ + pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); + + /* mcgcfg */ + pci_read_config_word(agp_bridge->dev, INTEL_I850_MCHCFG, &temp2); + pci_write_config_word(agp_bridge->dev, INTEL_I850_MCHCFG, temp2 | (1 << 9)); + /* clear any possible AGP-related error conditions */ + pci_write_config_word(agp_bridge->dev, INTEL_I850_ERRSTS, 0x001c); + return 0; +} + +static int intel_860_configure(void) +{ + u16 temp2; + struct aper_size_info_8 *current_size; + + current_size = A_SIZE_8(agp_bridge->current_size); + + /* aperture size */ + pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); + + /* address to map to */ + agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, + AGP_APERTURE_BAR); + + /* attbase - aperture base */ + pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); + + /* agpctrl */ + pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); + + /* mcgcfg */ + pci_read_config_word(agp_bridge->dev, INTEL_I860_MCHCFG, &temp2); + pci_write_config_word(agp_bridge->dev, INTEL_I860_MCHCFG, temp2 | (1 << 9)); + /* clear any possible AGP-related error conditions */ + pci_write_config_word(agp_bridge->dev, INTEL_I860_ERRSTS, 0xf700); + return 0; +} + +static int intel_830mp_configure(void) +{ + u16 temp2; + struct aper_size_info_8 *current_size; + + current_size = A_SIZE_8(agp_bridge->current_size); + + /* aperture size */ + pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); + + /* address to map to */ + agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, + AGP_APERTURE_BAR); + + /* attbase - aperture base */ + pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); + + /* agpctrl */ + pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); + + /* gmch */ + pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2); + pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp2 | (1 << 9)); + /* clear any possible AGP-related error conditions */ + pci_write_config_word(agp_bridge->dev, INTEL_I830_ERRSTS, 0x1c); + return 0; +} + +static int intel_7505_configure(void) +{ + u16 temp2; + struct aper_size_info_8 *current_size; + + current_size = A_SIZE_8(agp_bridge->current_size); + + /* aperture size */ + pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); + + /* address to map to */ + agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, + AGP_APERTURE_BAR); + + /* attbase - aperture base */ + pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); + + /* agpctrl */ + pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); + + /* mchcfg */ + pci_read_config_word(agp_bridge->dev, INTEL_I7505_MCHCFG, &temp2); + pci_write_config_word(agp_bridge->dev, INTEL_I7505_MCHCFG, temp2 | (1 << 9)); + + return 0; +} + +/* Setup function */ +static const struct gatt_mask intel_generic_masks[] = +{ + {.mask = 0x00000017, .type = 0} +}; + +static const struct aper_size_info_8 intel_815_sizes[2] = +{ + {64, 16384, 4, 0}, + {32, 8192, 3, 8}, +}; + +static const struct aper_size_info_8 intel_8xx_sizes[7] = +{ + {256, 65536, 6, 0}, + {128, 32768, 5, 32}, + {64, 16384, 4, 48}, + {32, 8192, 3, 56}, + {16, 4096, 2, 60}, + {8, 2048, 1, 62}, + {4, 1024, 0, 63} +}; + +static const struct aper_size_info_16 intel_generic_sizes[7] = +{ + {256, 65536, 6, 0}, + {128, 32768, 5, 32}, + {64, 16384, 4, 48}, + {32, 8192, 3, 56}, + {16, 4096, 2, 60}, + {8, 2048, 1, 62}, + {4, 1024, 0, 63} +}; + +static const struct aper_size_info_8 intel_830mp_sizes[4] = +{ + {256, 65536, 6, 0}, + {128, 32768, 5, 32}, + {64, 16384, 4, 48}, + {32, 8192, 3, 56} +}; + +static const struct agp_bridge_driver intel_generic_driver = { + .owner = THIS_MODULE, + .aperture_sizes = intel_generic_sizes, + .size_type = U16_APER_SIZE, + .num_aperture_sizes = 7, + .needs_scratch_page = true, + .configure = intel_configure, + .fetch_size = intel_fetch_size, + .cleanup = intel_cleanup, + .tlb_flush = intel_tlbflush, + .mask_memory = agp_generic_mask_memory, + .masks = intel_generic_masks, + .agp_enable = agp_generic_enable, + .cache_flush = global_cache_flush, + .create_gatt_table = agp_generic_create_gatt_table, + .free_gatt_table = agp_generic_free_gatt_table, + .insert_memory = agp_generic_insert_memory, + .remove_memory = agp_generic_remove_memory, + .alloc_by_type = agp_generic_alloc_by_type, + .free_by_type = agp_generic_free_by_type, + .agp_alloc_page = agp_generic_alloc_page, + .agp_alloc_pages = agp_generic_alloc_pages, + .agp_destroy_page = agp_generic_destroy_page, + .agp_destroy_pages = agp_generic_destroy_pages, + .agp_type_to_mask_type = agp_generic_type_to_mask_type, +}; + +static const struct agp_bridge_driver intel_815_driver = { + .owner = THIS_MODULE, + .aperture_sizes = intel_815_sizes, + .size_type = U8_APER_SIZE, + .num_aperture_sizes = 2, + .needs_scratch_page = true, + .configure = intel_815_configure, + .fetch_size = intel_815_fetch_size, + .cleanup = intel_8xx_cleanup, + .tlb_flush = intel_8xx_tlbflush, + .mask_memory = agp_generic_mask_memory, + .masks = intel_generic_masks, + .agp_enable = agp_generic_enable, + .cache_flush = global_cache_flush, + .create_gatt_table = agp_generic_create_gatt_table, + .free_gatt_table = agp_generic_free_gatt_table, + .insert_memory = agp_generic_insert_memory, + .remove_memory = agp_generic_remove_memory, + .alloc_by_type = agp_generic_alloc_by_type, + .free_by_type = agp_generic_free_by_type, + .agp_alloc_page = agp_generic_alloc_page, + .agp_alloc_pages = agp_generic_alloc_pages, + .agp_destroy_page = agp_generic_destroy_page, + .agp_destroy_pages = agp_generic_destroy_pages, + .agp_type_to_mask_type = agp_generic_type_to_mask_type, +}; + +static const struct agp_bridge_driver intel_820_driver = { + .owner = THIS_MODULE, + .aperture_sizes = intel_8xx_sizes, + .size_type = U8_APER_SIZE, + .num_aperture_sizes = 7, + .needs_scratch_page = true, + .configure = intel_820_configure, + .fetch_size = intel_8xx_fetch_size, + .cleanup = intel_820_cleanup, + .tlb_flush = intel_820_tlbflush, + .mask_memory = agp_generic_mask_memory, + .masks = intel_generic_masks, + .agp_enable = agp_generic_enable, + .cache_flush = global_cache_flush, + .create_gatt_table = agp_generic_create_gatt_table, + .free_gatt_table = agp_generic_free_gatt_table, + .insert_memory = agp_generic_insert_memory, + .remove_memory = agp_generic_remove_memory, + .alloc_by_type = agp_generic_alloc_by_type, + .free_by_type = agp_generic_free_by_type, + .agp_alloc_page = agp_generic_alloc_page, + .agp_alloc_pages = agp_generic_alloc_pages, + .agp_destroy_page = agp_generic_destroy_page, + .agp_destroy_pages = agp_generic_destroy_pages, + .agp_type_to_mask_type = agp_generic_type_to_mask_type, +}; + +static const struct agp_bridge_driver intel_830mp_driver = { + .owner = THIS_MODULE, + .aperture_sizes = intel_830mp_sizes, + .size_type = U8_APER_SIZE, + .num_aperture_sizes = 4, + .needs_scratch_page = true, + .configure = intel_830mp_configure, + .fetch_size = intel_8xx_fetch_size, + .cleanup = intel_8xx_cleanup, + .tlb_flush = intel_8xx_tlbflush, + .mask_memory = agp_generic_mask_memory, + .masks = intel_generic_masks, + .agp_enable = agp_generic_enable, + .cache_flush = global_cache_flush, + .create_gatt_table = agp_generic_create_gatt_table, + .free_gatt_table = agp_generic_free_gatt_table, + .insert_memory = agp_generic_insert_memory, + .remove_memory = agp_generic_remove_memory, + .alloc_by_type = agp_generic_alloc_by_type, + .free_by_type = agp_generic_free_by_type, + .agp_alloc_page = agp_generic_alloc_page, + .agp_alloc_pages = agp_generic_alloc_pages, + .agp_destroy_page = agp_generic_destroy_page, + .agp_destroy_pages = agp_generic_destroy_pages, + .agp_type_to_mask_type = agp_generic_type_to_mask_type, +}; + +static const struct agp_bridge_driver intel_840_driver = { + .owner = THIS_MODULE, + .aperture_sizes = intel_8xx_sizes, + .size_type = U8_APER_SIZE, + .num_aperture_sizes = 7, + .needs_scratch_page = true, + .configure = intel_840_configure, + .fetch_size = intel_8xx_fetch_size, + .cleanup = intel_8xx_cleanup, + .tlb_flush = intel_8xx_tlbflush, + .mask_memory = agp_generic_mask_memory, + .masks = intel_generic_masks, + .agp_enable = agp_generic_enable, + .cache_flush = global_cache_flush, + .create_gatt_table = agp_generic_create_gatt_table, + .free_gatt_table = agp_generic_free_gatt_table, + .insert_memory = agp_generic_insert_memory, + .remove_memory = agp_generic_remove_memory, + .alloc_by_type = agp_generic_alloc_by_type, + .free_by_type = agp_generic_free_by_type, + .agp_alloc_page = agp_generic_alloc_page, + .agp_alloc_pages = agp_generic_alloc_pages, + .agp_destroy_page = agp_generic_destroy_page, + .agp_destroy_pages = agp_generic_destroy_pages, + .agp_type_to_mask_type = agp_generic_type_to_mask_type, +}; + +static const struct agp_bridge_driver intel_845_driver = { + .owner = THIS_MODULE, + .aperture_sizes = intel_8xx_sizes, + .size_type = U8_APER_SIZE, + .num_aperture_sizes = 7, + .needs_scratch_page = true, + .configure = intel_845_configure, + .fetch_size = intel_8xx_fetch_size, + .cleanup = intel_8xx_cleanup, + .tlb_flush = intel_8xx_tlbflush, + .mask_memory = agp_generic_mask_memory, + .masks = intel_generic_masks, + .agp_enable = agp_generic_enable, + .cache_flush = global_cache_flush, + .create_gatt_table = agp_generic_create_gatt_table, + .free_gatt_table = agp_generic_free_gatt_table, + .insert_memory = agp_generic_insert_memory, + .remove_memory = agp_generic_remove_memory, + .alloc_by_type = agp_generic_alloc_by_type, + .free_by_type = agp_generic_free_by_type, + .agp_alloc_page = agp_generic_alloc_page, + .agp_alloc_pages = agp_generic_alloc_pages, + .agp_destroy_page = agp_generic_destroy_page, + .agp_destroy_pages = agp_generic_destroy_pages, + .agp_type_to_mask_type = agp_generic_type_to_mask_type, +}; + +static const struct agp_bridge_driver intel_850_driver = { + .owner = THIS_MODULE, + .aperture_sizes = intel_8xx_sizes, + .size_type = U8_APER_SIZE, + .num_aperture_sizes = 7, + .needs_scratch_page = true, + .configure = intel_850_configure, + .fetch_size = intel_8xx_fetch_size, + .cleanup = intel_8xx_cleanup, + .tlb_flush = intel_8xx_tlbflush, + .mask_memory = agp_generic_mask_memory, + .masks = intel_generic_masks, + .agp_enable = agp_generic_enable, + .cache_flush = global_cache_flush, + .create_gatt_table = agp_generic_create_gatt_table, + .free_gatt_table = agp_generic_free_gatt_table, + .insert_memory = agp_generic_insert_memory, + .remove_memory = agp_generic_remove_memory, + .alloc_by_type = agp_generic_alloc_by_type, + .free_by_type = agp_generic_free_by_type, + .agp_alloc_page = agp_generic_alloc_page, + .agp_alloc_pages = agp_generic_alloc_pages, + .agp_destroy_page = agp_generic_destroy_page, + .agp_destroy_pages = agp_generic_destroy_pages, + .agp_type_to_mask_type = agp_generic_type_to_mask_type, +}; + +static const struct agp_bridge_driver intel_860_driver = { + .owner = THIS_MODULE, + .aperture_sizes = intel_8xx_sizes, + .size_type = U8_APER_SIZE, + .num_aperture_sizes = 7, + .needs_scratch_page = true, + .configure = intel_860_configure, + .fetch_size = intel_8xx_fetch_size, + .cleanup = intel_8xx_cleanup, + .tlb_flush = intel_8xx_tlbflush, + .mask_memory = agp_generic_mask_memory, + .masks = intel_generic_masks, + .agp_enable = agp_generic_enable, + .cache_flush = global_cache_flush, + .create_gatt_table = agp_generic_create_gatt_table, + .free_gatt_table = agp_generic_free_gatt_table, + .insert_memory = agp_generic_insert_memory, + .remove_memory = agp_generic_remove_memory, + .alloc_by_type = agp_generic_alloc_by_type, + .free_by_type = agp_generic_free_by_type, + .agp_alloc_page = agp_generic_alloc_page, + .agp_alloc_pages = agp_generic_alloc_pages, + .agp_destroy_page = agp_generic_destroy_page, + .agp_destroy_pages = agp_generic_destroy_pages, + .agp_type_to_mask_type = agp_generic_type_to_mask_type, +}; + +static const struct agp_bridge_driver intel_7505_driver = { + .owner = THIS_MODULE, + .aperture_sizes = intel_8xx_sizes, + .size_type = U8_APER_SIZE, + .num_aperture_sizes = 7, + .needs_scratch_page = true, + .configure = intel_7505_configure, + .fetch_size = intel_8xx_fetch_size, + .cleanup = intel_8xx_cleanup, + .tlb_flush = intel_8xx_tlbflush, + .mask_memory = agp_generic_mask_memory, + .masks = intel_generic_masks, + .agp_enable = agp_generic_enable, + .cache_flush = global_cache_flush, + .create_gatt_table = agp_generic_create_gatt_table, + .free_gatt_table = agp_generic_free_gatt_table, + .insert_memory = agp_generic_insert_memory, + .remove_memory = agp_generic_remove_memory, + .alloc_by_type = agp_generic_alloc_by_type, + .free_by_type = agp_generic_free_by_type, + .agp_alloc_page = agp_generic_alloc_page, + .agp_alloc_pages = agp_generic_alloc_pages, + .agp_destroy_page = agp_generic_destroy_page, + .agp_destroy_pages = agp_generic_destroy_pages, + .agp_type_to_mask_type = agp_generic_type_to_mask_type, +}; + +/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of + * driver and gmch_driver must be non-null, and find_gmch will determine + * which one should be used if a gmch_chip_id is present. + */ +static const struct intel_agp_driver_description { + unsigned int chip_id; + char *name; + const struct agp_bridge_driver *driver; +} intel_agp_chipsets[] = { + { PCI_DEVICE_ID_INTEL_82443LX_0, "440LX", &intel_generic_driver }, + { PCI_DEVICE_ID_INTEL_82443BX_0, "440BX", &intel_generic_driver }, + { PCI_DEVICE_ID_INTEL_82443GX_0, "440GX", &intel_generic_driver }, + { PCI_DEVICE_ID_INTEL_82815_MC, "i815", &intel_815_driver }, + { PCI_DEVICE_ID_INTEL_82820_HB, "i820", &intel_820_driver }, + { PCI_DEVICE_ID_INTEL_82820_UP_HB, "i820", &intel_820_driver }, + { PCI_DEVICE_ID_INTEL_82830_HB, "830M", &intel_830mp_driver }, + { PCI_DEVICE_ID_INTEL_82840_HB, "i840", &intel_840_driver }, + { PCI_DEVICE_ID_INTEL_82845_HB, "i845", &intel_845_driver }, + { PCI_DEVICE_ID_INTEL_82845G_HB, "845G", &intel_845_driver }, + { PCI_DEVICE_ID_INTEL_82850_HB, "i850", &intel_850_driver }, + { PCI_DEVICE_ID_INTEL_82854_HB, "854", &intel_845_driver }, + { PCI_DEVICE_ID_INTEL_82855PM_HB, "855PM", &intel_845_driver }, + { PCI_DEVICE_ID_INTEL_82855GM_HB, "855GM", &intel_845_driver }, + { PCI_DEVICE_ID_INTEL_82860_HB, "i860", &intel_860_driver }, + { PCI_DEVICE_ID_INTEL_82865_HB, "865", &intel_845_driver }, + { PCI_DEVICE_ID_INTEL_82875_HB, "i875", &intel_845_driver }, + { PCI_DEVICE_ID_INTEL_7505_0, "E7505", &intel_7505_driver }, + { PCI_DEVICE_ID_INTEL_7205_0, "E7205", &intel_7505_driver }, + { 0, NULL, NULL } +}; + +static int agp_intel_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct agp_bridge_data *bridge; + u8 cap_ptr = 0; + struct resource *r; + int i, err; + + cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); + + bridge = agp_alloc_bridge(); + if (!bridge) + return -ENOMEM; + + bridge->capndx = cap_ptr; + + if (intel_gmch_probe(pdev, NULL, bridge)) + goto found_gmch; + + for (i = 0; intel_agp_chipsets[i].name != NULL; i++) { + /* In case that multiple models of gfx chip may + stand on same host bridge type, this can be + sure we detect the right IGD. */ + if (pdev->device == intel_agp_chipsets[i].chip_id) { + bridge->driver = intel_agp_chipsets[i].driver; + break; + } + } + + if (!bridge->driver) { + if (cap_ptr) + dev_warn(&pdev->dev, "unsupported Intel chipset [%04x/%04x]\n", + pdev->vendor, pdev->device); + agp_put_bridge(bridge); + return -ENODEV; + } + + bridge->dev = pdev; + bridge->dev_private_data = NULL; + + dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); + + /* + * The following fixes the case where the BIOS has "forgotten" to + * provide an address range for the GART. + * 20030610 - hamish@zot.org + * This happens before pci_enable_device() intentionally; + * calling pci_enable_device() before assigning the resource + * will result in the GART being disabled on machines with such + * BIOSs (the GART ends up with a BAR starting at 0, which + * conflicts a lot of other devices). + */ + r = &pdev->resource[0]; + if (!r->start && r->end) { + if (pci_assign_resource(pdev, 0)) { + dev_err(&pdev->dev, "can't assign resource 0\n"); + agp_put_bridge(bridge); + return -ENODEV; + } + } + + /* + * If the device has not been properly setup, the following will catch + * the problem and should stop the system from crashing. + * 20030610 - hamish@zot.org + */ + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, "can't enable PCI device\n"); + agp_put_bridge(bridge); + return -ENODEV; + } + + /* Fill in the mode register */ + if (cap_ptr) { + pci_read_config_dword(pdev, + bridge->capndx+PCI_AGP_STATUS, + &bridge->mode); + } + +found_gmch: + pci_set_drvdata(pdev, bridge); + err = agp_add_bridge(bridge); + return err; +} + +static void agp_intel_remove(struct pci_dev *pdev) +{ + struct agp_bridge_data *bridge = pci_get_drvdata(pdev); + + agp_remove_bridge(bridge); + + intel_gmch_remove(); + + agp_put_bridge(bridge); +} + +#ifdef CONFIG_PM +static int agp_intel_resume(struct pci_dev *pdev) +{ + struct agp_bridge_data *bridge = pci_get_drvdata(pdev); + + bridge->driver->configure(); + + return 0; +} +#endif + +static struct pci_device_id agp_intel_pci_table[] = { +#define ID(x) \ + { \ + .class = (PCI_CLASS_BRIDGE_HOST << 8), \ + .class_mask = ~0, \ + .vendor = PCI_VENDOR_ID_INTEL, \ + .device = x, \ + .subvendor = PCI_ANY_ID, \ + .subdevice = PCI_ANY_ID, \ + } + ID(PCI_DEVICE_ID_INTEL_82441), /* for HAS2 support */ + ID(PCI_DEVICE_ID_INTEL_82443LX_0), + ID(PCI_DEVICE_ID_INTEL_82443BX_0), + ID(PCI_DEVICE_ID_INTEL_82443GX_0), + ID(PCI_DEVICE_ID_INTEL_82810_MC1), + ID(PCI_DEVICE_ID_INTEL_82810_MC3), + ID(PCI_DEVICE_ID_INTEL_82810E_MC), + ID(PCI_DEVICE_ID_INTEL_82815_MC), + ID(PCI_DEVICE_ID_INTEL_82820_HB), + ID(PCI_DEVICE_ID_INTEL_82820_UP_HB), + ID(PCI_DEVICE_ID_INTEL_82830_HB), + ID(PCI_DEVICE_ID_INTEL_82840_HB), + ID(PCI_DEVICE_ID_INTEL_82845_HB), + ID(PCI_DEVICE_ID_INTEL_82845G_HB), + ID(PCI_DEVICE_ID_INTEL_82850_HB), + ID(PCI_DEVICE_ID_INTEL_82854_HB), + ID(PCI_DEVICE_ID_INTEL_82855PM_HB), + ID(PCI_DEVICE_ID_INTEL_82855GM_HB), + ID(PCI_DEVICE_ID_INTEL_82860_HB), + ID(PCI_DEVICE_ID_INTEL_82865_HB), + ID(PCI_DEVICE_ID_INTEL_82875_HB), + ID(PCI_DEVICE_ID_INTEL_7505_0), + ID(PCI_DEVICE_ID_INTEL_7205_0), + ID(PCI_DEVICE_ID_INTEL_E7221_HB), + ID(PCI_DEVICE_ID_INTEL_82915G_HB), + ID(PCI_DEVICE_ID_INTEL_82915GM_HB), + ID(PCI_DEVICE_ID_INTEL_82945G_HB), + ID(PCI_DEVICE_ID_INTEL_82945GM_HB), + ID(PCI_DEVICE_ID_INTEL_82945GME_HB), + ID(PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB), + ID(PCI_DEVICE_ID_INTEL_PINEVIEW_HB), + ID(PCI_DEVICE_ID_INTEL_82946GZ_HB), + ID(PCI_DEVICE_ID_INTEL_82G35_HB), + ID(PCI_DEVICE_ID_INTEL_82965Q_HB), + ID(PCI_DEVICE_ID_INTEL_82965G_HB), + ID(PCI_DEVICE_ID_INTEL_82965GM_HB), + ID(PCI_DEVICE_ID_INTEL_82965GME_HB), + ID(PCI_DEVICE_ID_INTEL_G33_HB), + ID(PCI_DEVICE_ID_INTEL_Q35_HB), + ID(PCI_DEVICE_ID_INTEL_Q33_HB), + ID(PCI_DEVICE_ID_INTEL_GM45_HB), + ID(PCI_DEVICE_ID_INTEL_EAGLELAKE_HB), + ID(PCI_DEVICE_ID_INTEL_Q45_HB), + ID(PCI_DEVICE_ID_INTEL_G45_HB), + ID(PCI_DEVICE_ID_INTEL_G41_HB), + ID(PCI_DEVICE_ID_INTEL_B43_HB), + ID(PCI_DEVICE_ID_INTEL_B43_1_HB), + ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB), + ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB), + ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB), + ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB), + ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB), + { } +}; + +MODULE_DEVICE_TABLE(pci, agp_intel_pci_table); + +static struct pci_driver agp_intel_pci_driver = { + .name = "agpgart-intel", + .id_table = agp_intel_pci_table, + .probe = agp_intel_probe, + .remove = agp_intel_remove, +#ifdef CONFIG_PM + .resume = agp_intel_resume, +#endif +}; + +static int __init agp_intel_init(void) +{ + if (agp_off) + return -EINVAL; + return pci_register_driver(&agp_intel_pci_driver); +} + +static void __exit agp_intel_cleanup(void) +{ + pci_unregister_driver(&agp_intel_pci_driver); +} + +module_init(agp_intel_init); +module_exit(agp_intel_cleanup); + +MODULE_AUTHOR("Dave Jones, Various @Intel"); +MODULE_LICENSE("GPL and additional rights"); diff --git a/kernel/drivers/char/agp/intel-agp.h b/kernel/drivers/char/agp/intel-agp.h new file mode 100644 index 000000000..fda073dcd --- /dev/null +++ b/kernel/drivers/char/agp/intel-agp.h @@ -0,0 +1,193 @@ +/* + * Common Intel AGPGART and GTT definitions. + */ +#ifndef _INTEL_AGP_H +#define _INTEL_AGP_H + +/* Intel registers */ +#define INTEL_APSIZE 0xb4 +#define INTEL_ATTBASE 0xb8 +#define INTEL_AGPCTRL 0xb0 +#define INTEL_NBXCFG 0x50 +#define INTEL_ERRSTS 0x91 + +/* Intel i830 registers */ +#define I830_GMCH_CTRL 0x52 +#define I830_GMCH_ENABLED 0x4 +#define I830_GMCH_MEM_MASK 0x1 +#define I830_GMCH_MEM_64M 0x1 +#define I830_GMCH_MEM_128M 0 +#define I830_GMCH_GMS_MASK 0x70 +#define I830_GMCH_GMS_DISABLED 0x00 +#define I830_GMCH_GMS_LOCAL 0x10 +#define I830_GMCH_GMS_STOLEN_512 0x20 +#define I830_GMCH_GMS_STOLEN_1024 0x30 +#define I830_GMCH_GMS_STOLEN_8192 0x40 +#define I830_RDRAM_CHANNEL_TYPE 0x03010 +#define I830_RDRAM_ND(x) (((x) & 0x20) >> 5) +#define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3) + +/* This one is for I830MP w. an external graphic card */ +#define INTEL_I830_ERRSTS 0x92 + +/* Intel 855GM/852GM registers */ +#define I855_GMCH_GMS_MASK 0xF0 +#define I855_GMCH_GMS_STOLEN_0M 0x0 +#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4) +#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4) +#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4) +#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4) +#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4) +#define I85X_CAPID 0x44 +#define I85X_VARIANT_MASK 0x7 +#define I85X_VARIANT_SHIFT 5 +#define I855_GME 0x0 +#define I855_GM 0x4 +#define I852_GME 0x2 +#define I852_GM 0x5 + +/* Intel i845 registers */ +#define INTEL_I845_AGPM 0x51 +#define INTEL_I845_ERRSTS 0xc8 + +/* Intel i860 registers */ +#define INTEL_I860_MCHCFG 0x50 +#define INTEL_I860_ERRSTS 0xc8 + +/* Intel i810 registers */ +#define I810_GMADR_BAR 0 +#define I810_MMADR_BAR 1 +#define I810_PTE_BASE 0x10000 +#define I810_PTE_MAIN_UNCACHED 0x00000000 +#define I810_PTE_LOCAL 0x00000002 +#define I810_PTE_VALID 0x00000001 +#define I830_PTE_SYSTEM_CACHED 0x00000006 + +#define I810_SMRAM_MISCC 0x70 +#define I810_GFX_MEM_WIN_SIZE 0x00010000 +#define I810_GFX_MEM_WIN_32M 0x00010000 +#define I810_GMS 0x000000c0 +#define I810_GMS_DISABLE 0x00000000 +#define I810_PGETBL_CTL 0x2020 +#define I810_PGETBL_ENABLED 0x00000001 +/* Note: PGETBL_CTL2 has a different offset on G33. */ +#define I965_PGETBL_CTL2 0x20c4 +#define I965_PGETBL_SIZE_MASK 0x0000000e +#define I965_PGETBL_SIZE_512KB (0 << 1) +#define I965_PGETBL_SIZE_256KB (1 << 1) +#define I965_PGETBL_SIZE_128KB (2 << 1) +#define I965_PGETBL_SIZE_1MB (3 << 1) +#define I965_PGETBL_SIZE_2MB (4 << 1) +#define I965_PGETBL_SIZE_1_5MB (5 << 1) +#define G33_GMCH_SIZE_MASK (3 << 8) +#define G33_GMCH_SIZE_1M (1 << 8) +#define G33_GMCH_SIZE_2M (2 << 8) +#define G4x_GMCH_SIZE_MASK (0xf << 8) +#define G4x_GMCH_SIZE_1M (0x1 << 8) +#define G4x_GMCH_SIZE_2M (0x3 << 8) +#define G4x_GMCH_SIZE_VT_EN (0x8 << 8) +#define G4x_GMCH_SIZE_VT_1M (G4x_GMCH_SIZE_1M | G4x_GMCH_SIZE_VT_EN) +#define G4x_GMCH_SIZE_VT_1_5M ((0x2 << 8) | G4x_GMCH_SIZE_VT_EN) +#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN) + +#define GFX_FLSH_CNTL 0x2170 /* 915+ */ + +#define I810_DRAM_CTL 0x3000 +#define I810_DRAM_ROW_0 0x00000001 +#define I810_DRAM_ROW_0_SDRAM 0x00000001 + +/* Intel 815 register */ +#define INTEL_815_APCONT 0x51 +#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF + +/* Intel i820 registers */ +#define INTEL_I820_RDCR 0x51 +#define INTEL_I820_ERRSTS 0xc8 + +/* Intel i840 registers */ +#define INTEL_I840_MCHCFG 0x50 +#define INTEL_I840_ERRSTS 0xc8 + +/* Intel i850 registers */ +#define INTEL_I850_MCHCFG 0x50 +#define INTEL_I850_ERRSTS 0xc8 + +/* intel 915G registers */ +#define I915_GMADR_BAR 2 +#define I915_MMADR_BAR 0 +#define I915_PTE_BAR 3 +#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4) +#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4) +#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4) +#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4) +#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4) +#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4) +#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) +#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) + +#define I915_IFPADDR 0x60 +#define I830_HIC 0x70 + +/* Intel 965G registers */ +#define I965_MSAC 0x62 +#define I965_IFPADDR 0x70 + +/* Intel 7505 registers */ +#define INTEL_I7505_APSIZE 0x74 +#define INTEL_I7505_NCAPID 0x60 +#define INTEL_I7505_NISTAT 0x6c +#define INTEL_I7505_ATTBASE 0x78 +#define INTEL_I7505_ERRSTS 0x42 +#define INTEL_I7505_AGPCTRL 0x70 +#define INTEL_I7505_MCHCFG 0x50 + +/* pci devices ids */ +#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588 +#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a +#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970 +#define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972 +#define PCI_DEVICE_ID_INTEL_82G35_HB 0x2980 +#define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982 +#define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990 +#define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992 +#define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0 +#define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2 +#define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00 +#define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02 +#define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10 +#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12 +#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC +#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE +#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB 0xA010 +#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG 0xA011 +#define PCI_DEVICE_ID_INTEL_PINEVIEW_HB 0xA000 +#define PCI_DEVICE_ID_INTEL_PINEVIEW_IG 0xA001 +#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0 +#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2 +#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0 +#define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2 +#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0 +#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 +#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40 +#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42 +#define PCI_DEVICE_ID_INTEL_B43_1_HB 0x2E90 +#define PCI_DEVICE_ID_INTEL_B43_1_IG 0x2E92 +#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 +#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 +#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00 +#define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG 0x2E02 +#define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10 +#define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12 +#define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20 +#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22 +#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30 +#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32 +#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040 +#define PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB 0x0069 +#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042 +#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044 +#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 +#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a +#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 + +#endif diff --git a/kernel/drivers/char/agp/intel-gtt.c b/kernel/drivers/char/agp/intel-gtt.c new file mode 100644 index 000000000..0b4188b9a --- /dev/null +++ b/kernel/drivers/char/agp/intel-gtt.c @@ -0,0 +1,1442 @@ +/* + * Intel GTT (Graphics Translation Table) routines + * + * Caveat: This driver implements the linux agp interface, but this is far from + * a agp driver! GTT support ended up here for purely historical reasons: The + * old userspace intel graphics drivers needed an interface to map memory into + * the GTT. And the drm provides a default interface for graphic devices sitting + * on an agp port. So it made sense to fake the GTT support as an agp port to + * avoid having to create a new api. + * + * With gem this does not make much sense anymore, just needlessly complicates + * the code. But as long as the old graphics stack is still support, it's stuck + * here. + * + * /fairy-tale-mode off + */ + +#include +#include +#include +#include +#include +#include +#include +#include "agp.h" +#include "intel-agp.h" +#include + +/* + * If we have Intel graphics, we're not going to have anything other than + * an Intel IOMMU. So make the correct use of the PCI DMA API contingent + * on the Intel IOMMU support (CONFIG_INTEL_IOMMU). + * Only newer chipsets need to bother with this, of course. + */ +#ifdef CONFIG_INTEL_IOMMU +#define USE_PCI_DMA_API 1 +#else +#define USE_PCI_DMA_API 0 +#endif + +struct intel_gtt_driver { + unsigned int gen : 8; + unsigned int is_g33 : 1; + unsigned int is_pineview : 1; + unsigned int is_ironlake : 1; + unsigned int has_pgtbl_enable : 1; + unsigned int dma_mask_size : 8; + /* Chipset specific GTT setup */ + int (*setup)(void); + /* This should undo anything done in ->setup() save the unmapping + * of the mmio register file, that's done in the generic code. */ + void (*cleanup)(void); + void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags); + /* Flags is a more or less chipset specific opaque value. + * For chipsets that need to support old ums (non-gem) code, this + * needs to be identical to the various supported agp memory types! */ + bool (*check_flags)(unsigned int flags); + void (*chipset_flush)(void); +}; + +static struct _intel_private { + const struct intel_gtt_driver *driver; + struct pci_dev *pcidev; /* device one */ + struct pci_dev *bridge_dev; + u8 __iomem *registers; + phys_addr_t gtt_phys_addr; + u32 PGETBL_save; + u32 __iomem *gtt; /* I915G */ + bool clear_fake_agp; /* on first access via agp, fill with scratch */ + int num_dcache_entries; + void __iomem *i9xx_flush_page; + char *i81x_gtt_table; + struct resource ifp_resource; + int resource_valid; + struct page *scratch_page; + phys_addr_t scratch_page_dma; + int refcount; + /* Whether i915 needs to use the dmar apis or not. */ + unsigned int needs_dmar : 1; + phys_addr_t gma_bus_addr; + /* Size of memory reserved for graphics by the BIOS */ + unsigned int stolen_size; + /* Total number of gtt entries. */ + unsigned int gtt_total_entries; + /* Part of the gtt that is mappable by the cpu, for those chips where + * this is not the full gtt. */ + unsigned int gtt_mappable_entries; +} intel_private; + +#define INTEL_GTT_GEN intel_private.driver->gen +#define IS_G33 intel_private.driver->is_g33 +#define IS_PINEVIEW intel_private.driver->is_pineview +#define IS_IRONLAKE intel_private.driver->is_ironlake +#define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable + +#if IS_ENABLED(CONFIG_AGP_INTEL) +static int intel_gtt_map_memory(struct page **pages, + unsigned int num_entries, + struct sg_table *st) +{ + struct scatterlist *sg; + int i; + + DBG("try mapping %lu pages\n", (unsigned long)num_entries); + + if (sg_alloc_table(st, num_entries, GFP_KERNEL)) + goto err; + + for_each_sg(st->sgl, sg, num_entries, i) + sg_set_page(sg, pages[i], PAGE_SIZE, 0); + + if (!pci_map_sg(intel_private.pcidev, + st->sgl, st->nents, PCI_DMA_BIDIRECTIONAL)) + goto err; + + return 0; + +err: + sg_free_table(st); + return -ENOMEM; +} + +static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg) +{ + struct sg_table st; + DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); + + pci_unmap_sg(intel_private.pcidev, sg_list, + num_sg, PCI_DMA_BIDIRECTIONAL); + + st.sgl = sg_list; + st.orig_nents = st.nents = num_sg; + + sg_free_table(&st); +} + +static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode) +{ + return; +} + +/* Exists to support ARGB cursors */ +static struct page *i8xx_alloc_pages(void) +{ + struct page *page; + + page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2); + if (page == NULL) + return NULL; + + if (set_pages_uc(page, 4) < 0) { + set_pages_wb(page, 4); + __free_pages(page, 2); + return NULL; + } + atomic_inc(&agp_bridge->current_memory_agp); + return page; +} + +static void i8xx_destroy_pages(struct page *page) +{ + if (page == NULL) + return; + + set_pages_wb(page, 4); + __free_pages(page, 2); + atomic_dec(&agp_bridge->current_memory_agp); +} +#endif + +#define I810_GTT_ORDER 4 +static int i810_setup(void) +{ + phys_addr_t reg_addr; + char *gtt_table; + + /* i81x does not preallocate the gtt. It's always 64kb in size. */ + gtt_table = alloc_gatt_pages(I810_GTT_ORDER); + if (gtt_table == NULL) + return -ENOMEM; + intel_private.i81x_gtt_table = gtt_table; + + reg_addr = pci_resource_start(intel_private.pcidev, I810_MMADR_BAR); + + intel_private.registers = ioremap(reg_addr, KB(64)); + if (!intel_private.registers) + return -ENOMEM; + + writel(virt_to_phys(gtt_table) | I810_PGETBL_ENABLED, + intel_private.registers+I810_PGETBL_CTL); + + intel_private.gtt_phys_addr = reg_addr + I810_PTE_BASE; + + if ((readl(intel_private.registers+I810_DRAM_CTL) + & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { + dev_info(&intel_private.pcidev->dev, + "detected 4MB dedicated video ram\n"); + intel_private.num_dcache_entries = 1024; + } + + return 0; +} + +static void i810_cleanup(void) +{ + writel(0, intel_private.registers+I810_PGETBL_CTL); + free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER); +} + +#if IS_ENABLED(CONFIG_AGP_INTEL) +static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start, + int type) +{ + int i; + + if ((pg_start + mem->page_count) + > intel_private.num_dcache_entries) + return -EINVAL; + + if (!mem->is_flushed) + global_cache_flush(); + + for (i = pg_start; i < (pg_start + mem->page_count); i++) { + dma_addr_t addr = i << PAGE_SHIFT; + intel_private.driver->write_entry(addr, + i, type); + } + wmb(); + + return 0; +} + +/* + * The i810/i830 requires a physical address to program its mouse + * pointer into hardware. + * However the Xserver still writes to it through the agp aperture. + */ +static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type) +{ + struct agp_memory *new; + struct page *page; + + switch (pg_count) { + case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge); + break; + case 4: + /* kludge to get 4 physical pages for ARGB cursor */ + page = i8xx_alloc_pages(); + break; + default: + return NULL; + } + + if (page == NULL) + return NULL; + + new = agp_create_memory(pg_count); + if (new == NULL) + return NULL; + + new->pages[0] = page; + if (pg_count == 4) { + /* kludge to get 4 physical pages for ARGB cursor */ + new->pages[1] = new->pages[0] + 1; + new->pages[2] = new->pages[1] + 1; + new->pages[3] = new->pages[2] + 1; + } + new->page_count = pg_count; + new->num_scratch_pages = pg_count; + new->type = AGP_PHYS_MEMORY; + new->physical = page_to_phys(new->pages[0]); + return new; +} + +static void intel_i810_free_by_type(struct agp_memory *curr) +{ + agp_free_key(curr->key); + if (curr->type == AGP_PHYS_MEMORY) { + if (curr->page_count == 4) + i8xx_destroy_pages(curr->pages[0]); + else { + agp_bridge->driver->agp_destroy_page(curr->pages[0], + AGP_PAGE_DESTROY_UNMAP); + agp_bridge->driver->agp_destroy_page(curr->pages[0], + AGP_PAGE_DESTROY_FREE); + } + agp_free_page_array(curr); + } + kfree(curr); +} +#endif + +static int intel_gtt_setup_scratch_page(void) +{ + struct page *page; + dma_addr_t dma_addr; + + page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); + if (page == NULL) + return -ENOMEM; + set_pages_uc(page, 1); + + if (intel_private.needs_dmar) { + dma_addr = pci_map_page(intel_private.pcidev, page, 0, + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) + return -EINVAL; + + intel_private.scratch_page_dma = dma_addr; + } else + intel_private.scratch_page_dma = page_to_phys(page); + + intel_private.scratch_page = page; + + return 0; +} + +static void i810_write_entry(dma_addr_t addr, unsigned int entry, + unsigned int flags) +{ + u32 pte_flags = I810_PTE_VALID; + + switch (flags) { + case AGP_DCACHE_MEMORY: + pte_flags |= I810_PTE_LOCAL; + break; + case AGP_USER_CACHED_MEMORY: + pte_flags |= I830_PTE_SYSTEM_CACHED; + break; + } + + writel_relaxed(addr | pte_flags, intel_private.gtt + entry); +} + +static const struct aper_size_info_fixed intel_fake_agp_sizes[] = { + {32, 8192, 3}, + {64, 16384, 4}, + {128, 32768, 5}, + {256, 65536, 6}, + {512, 131072, 7}, +}; + +static unsigned int intel_gtt_stolen_size(void) +{ + u16 gmch_ctrl; + u8 rdct; + int local = 0; + static const int ddt[4] = { 0, 16, 32, 64 }; + unsigned int stolen_size = 0; + + if (INTEL_GTT_GEN == 1) + return 0; /* no stolen mem on i81x */ + + pci_read_config_word(intel_private.bridge_dev, + I830_GMCH_CTRL, &gmch_ctrl); + + if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB || + intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { + switch (gmch_ctrl & I830_GMCH_GMS_MASK) { + case I830_GMCH_GMS_STOLEN_512: + stolen_size = KB(512); + break; + case I830_GMCH_GMS_STOLEN_1024: + stolen_size = MB(1); + break; + case I830_GMCH_GMS_STOLEN_8192: + stolen_size = MB(8); + break; + case I830_GMCH_GMS_LOCAL: + rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE); + stolen_size = (I830_RDRAM_ND(rdct) + 1) * + MB(ddt[I830_RDRAM_DDT(rdct)]); + local = 1; + break; + default: + stolen_size = 0; + break; + } + } else { + switch (gmch_ctrl & I855_GMCH_GMS_MASK) { + case I855_GMCH_GMS_STOLEN_1M: + stolen_size = MB(1); + break; + case I855_GMCH_GMS_STOLEN_4M: + stolen_size = MB(4); + break; + case I855_GMCH_GMS_STOLEN_8M: + stolen_size = MB(8); + break; + case I855_GMCH_GMS_STOLEN_16M: + stolen_size = MB(16); + break; + case I855_GMCH_GMS_STOLEN_32M: + stolen_size = MB(32); + break; + case I915_GMCH_GMS_STOLEN_48M: + stolen_size = MB(48); + break; + case I915_GMCH_GMS_STOLEN_64M: + stolen_size = MB(64); + break; + case G33_GMCH_GMS_STOLEN_128M: + stolen_size = MB(128); + break; + case G33_GMCH_GMS_STOLEN_256M: + stolen_size = MB(256); + break; + case INTEL_GMCH_GMS_STOLEN_96M: + stolen_size = MB(96); + break; + case INTEL_GMCH_GMS_STOLEN_160M: + stolen_size = MB(160); + break; + case INTEL_GMCH_GMS_STOLEN_224M: + stolen_size = MB(224); + break; + case INTEL_GMCH_GMS_STOLEN_352M: + stolen_size = MB(352); + break; + default: + stolen_size = 0; + break; + } + } + + if (stolen_size > 0) { + dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n", + stolen_size / KB(1), local ? "local" : "stolen"); + } else { + dev_info(&intel_private.bridge_dev->dev, + "no pre-allocated video memory detected\n"); + stolen_size = 0; + } + + return stolen_size; +} + +static void i965_adjust_pgetbl_size(unsigned int size_flag) +{ + u32 pgetbl_ctl, pgetbl_ctl2; + + /* ensure that ppgtt is disabled */ + pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2); + pgetbl_ctl2 &= ~I810_PGETBL_ENABLED; + writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2); + + /* write the new ggtt size */ + pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); + pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK; + pgetbl_ctl |= size_flag; + writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL); +} + +static unsigned int i965_gtt_total_entries(void) +{ + int size; + u32 pgetbl_ctl; + u16 gmch_ctl; + + pci_read_config_word(intel_private.bridge_dev, + I830_GMCH_CTRL, &gmch_ctl); + + if (INTEL_GTT_GEN == 5) { + switch (gmch_ctl & G4x_GMCH_SIZE_MASK) { + case G4x_GMCH_SIZE_1M: + case G4x_GMCH_SIZE_VT_1M: + i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB); + break; + case G4x_GMCH_SIZE_VT_1_5M: + i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB); + break; + case G4x_GMCH_SIZE_2M: + case G4x_GMCH_SIZE_VT_2M: + i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB); + break; + } + } + + pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); + + switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { + case I965_PGETBL_SIZE_128KB: + size = KB(128); + break; + case I965_PGETBL_SIZE_256KB: + size = KB(256); + break; + case I965_PGETBL_SIZE_512KB: + size = KB(512); + break; + /* GTT pagetable sizes bigger than 512KB are not possible on G33! */ + case I965_PGETBL_SIZE_1MB: + size = KB(1024); + break; + case I965_PGETBL_SIZE_2MB: + size = KB(2048); + break; + case I965_PGETBL_SIZE_1_5MB: + size = KB(1024 + 512); + break; + default: + dev_info(&intel_private.pcidev->dev, + "unknown page table size, assuming 512KB\n"); + size = KB(512); + } + + return size/4; +} + +static unsigned int intel_gtt_total_entries(void) +{ + if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) + return i965_gtt_total_entries(); + else { + /* On previous hardware, the GTT size was just what was + * required to map the aperture. + */ + return intel_private.gtt_mappable_entries; + } +} + +static unsigned int intel_gtt_mappable_entries(void) +{ + unsigned int aperture_size; + + if (INTEL_GTT_GEN == 1) { + u32 smram_miscc; + + pci_read_config_dword(intel_private.bridge_dev, + I810_SMRAM_MISCC, &smram_miscc); + + if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) + == I810_GFX_MEM_WIN_32M) + aperture_size = MB(32); + else + aperture_size = MB(64); + } else if (INTEL_GTT_GEN == 2) { + u16 gmch_ctrl; + + pci_read_config_word(intel_private.bridge_dev, + I830_GMCH_CTRL, &gmch_ctrl); + + if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M) + aperture_size = MB(64); + else + aperture_size = MB(128); + } else { + /* 9xx supports large sizes, just look at the length */ + aperture_size = pci_resource_len(intel_private.pcidev, 2); + } + + return aperture_size >> PAGE_SHIFT; +} + +static void intel_gtt_teardown_scratch_page(void) +{ + set_pages_wb(intel_private.scratch_page, 1); + pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma, + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + __free_page(intel_private.scratch_page); +} + +static void intel_gtt_cleanup(void) +{ + intel_private.driver->cleanup(); + + iounmap(intel_private.gtt); + iounmap(intel_private.registers); + + intel_gtt_teardown_scratch_page(); +} + +/* Certain Gen5 chipsets require require idling the GPU before + * unmapping anything from the GTT when VT-d is enabled. + */ +static inline int needs_ilk_vtd_wa(void) +{ +#ifdef CONFIG_INTEL_IOMMU + const unsigned short gpu_devid = intel_private.pcidev->device; + + /* Query intel_iommu to see if we need the workaround. Presumably that + * was loaded first. + */ + if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || + gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) && + intel_iommu_gfx_mapped) + return 1; +#endif + return 0; +} + +static bool intel_gtt_can_wc(void) +{ + if (INTEL_GTT_GEN <= 2) + return false; + + if (INTEL_GTT_GEN >= 6) + return false; + + /* Reports of major corruption with ILK vt'd enabled */ + if (needs_ilk_vtd_wa()) + return false; + + return true; +} + +static int intel_gtt_init(void) +{ + u32 gtt_map_size; + int ret, bar; + + ret = intel_private.driver->setup(); + if (ret != 0) + return ret; + + intel_private.gtt_mappable_entries = intel_gtt_mappable_entries(); + intel_private.gtt_total_entries = intel_gtt_total_entries(); + + /* save the PGETBL reg for resume */ + intel_private.PGETBL_save = + readl(intel_private.registers+I810_PGETBL_CTL) + & ~I810_PGETBL_ENABLED; + /* we only ever restore the register when enabling the PGTBL... */ + if (HAS_PGTBL_EN) + intel_private.PGETBL_save |= I810_PGETBL_ENABLED; + + dev_info(&intel_private.bridge_dev->dev, + "detected gtt size: %dK total, %dK mappable\n", + intel_private.gtt_total_entries * 4, + intel_private.gtt_mappable_entries * 4); + + gtt_map_size = intel_private.gtt_total_entries * 4; + + intel_private.gtt = NULL; + if (intel_gtt_can_wc()) + intel_private.gtt = ioremap_wc(intel_private.gtt_phys_addr, + gtt_map_size); + if (intel_private.gtt == NULL) + intel_private.gtt = ioremap(intel_private.gtt_phys_addr, + gtt_map_size); + if (intel_private.gtt == NULL) { + intel_private.driver->cleanup(); + iounmap(intel_private.registers); + return -ENOMEM; + } + +#if IS_ENABLED(CONFIG_AGP_INTEL) + global_cache_flush(); /* FIXME: ? */ +#endif + + intel_private.stolen_size = intel_gtt_stolen_size(); + + intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2; + + ret = intel_gtt_setup_scratch_page(); + if (ret != 0) { + intel_gtt_cleanup(); + return ret; + } + + if (INTEL_GTT_GEN <= 2) + bar = I810_GMADR_BAR; + else + bar = I915_GMADR_BAR; + + intel_private.gma_bus_addr = pci_bus_address(intel_private.pcidev, bar); + return 0; +} + +#if IS_ENABLED(CONFIG_AGP_INTEL) +static int intel_fake_agp_fetch_size(void) +{ + int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes); + unsigned int aper_size; + int i; + + aper_size = (intel_private.gtt_mappable_entries << PAGE_SHIFT) / MB(1); + + for (i = 0; i < num_sizes; i++) { + if (aper_size == intel_fake_agp_sizes[i].size) { + agp_bridge->current_size = + (void *) (intel_fake_agp_sizes + i); + return aper_size; + } + } + + return 0; +} +#endif + +static void i830_cleanup(void) +{ +} + +/* The chipset_flush interface needs to get data that has already been + * flushed out of the CPU all the way out to main memory, because the GPU + * doesn't snoop those buffers. + * + * The 8xx series doesn't have the same lovely interface for flushing the + * chipset write buffers that the later chips do. According to the 865 + * specs, it's 64 octwords, or 1KB. So, to get those previous things in + * that buffer out, we just fill 1KB and clflush it out, on the assumption + * that it'll push whatever was in there out. It appears to work. + */ +static void i830_chipset_flush(void) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(1000); + + /* Forcibly evict everything from the CPU write buffers. + * clflush appears to be insufficient. + */ + wbinvd_on_all_cpus(); + + /* Now we've only seen documents for this magic bit on 855GM, + * we hope it exists for the other gen2 chipsets... + * + * Also works as advertised on my 845G. + */ + writel(readl(intel_private.registers+I830_HIC) | (1<<31), + intel_private.registers+I830_HIC); + + while (readl(intel_private.registers+I830_HIC) & (1<<31)) { + if (time_after(jiffies, timeout)) + break; + + udelay(50); + } +} + +static void i830_write_entry(dma_addr_t addr, unsigned int entry, + unsigned int flags) +{ + u32 pte_flags = I810_PTE_VALID; + + if (flags == AGP_USER_CACHED_MEMORY) + pte_flags |= I830_PTE_SYSTEM_CACHED; + + writel_relaxed(addr | pte_flags, intel_private.gtt + entry); +} + +bool intel_enable_gtt(void) +{ + u8 __iomem *reg; + + if (INTEL_GTT_GEN == 2) { + u16 gmch_ctrl; + + pci_read_config_word(intel_private.bridge_dev, + I830_GMCH_CTRL, &gmch_ctrl); + gmch_ctrl |= I830_GMCH_ENABLED; + pci_write_config_word(intel_private.bridge_dev, + I830_GMCH_CTRL, gmch_ctrl); + + pci_read_config_word(intel_private.bridge_dev, + I830_GMCH_CTRL, &gmch_ctrl); + if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) { + dev_err(&intel_private.pcidev->dev, + "failed to enable the GTT: GMCH_CTRL=%x\n", + gmch_ctrl); + return false; + } + } + + /* On the resume path we may be adjusting the PGTBL value, so + * be paranoid and flush all chipset write buffers... + */ + if (INTEL_GTT_GEN >= 3) + writel(0, intel_private.registers+GFX_FLSH_CNTL); + + reg = intel_private.registers+I810_PGETBL_CTL; + writel(intel_private.PGETBL_save, reg); + if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) { + dev_err(&intel_private.pcidev->dev, + "failed to enable the GTT: PGETBL=%x [expected %x]\n", + readl(reg), intel_private.PGETBL_save); + return false; + } + + if (INTEL_GTT_GEN >= 3) + writel(0, intel_private.registers+GFX_FLSH_CNTL); + + return true; +} +EXPORT_SYMBOL(intel_enable_gtt); + +static int i830_setup(void) +{ + phys_addr_t reg_addr; + + reg_addr = pci_resource_start(intel_private.pcidev, I810_MMADR_BAR); + + intel_private.registers = ioremap(reg_addr, KB(64)); + if (!intel_private.registers) + return -ENOMEM; + + intel_private.gtt_phys_addr = reg_addr + I810_PTE_BASE; + + return 0; +} + +#if IS_ENABLED(CONFIG_AGP_INTEL) +static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge) +{ + agp_bridge->gatt_table_real = NULL; + agp_bridge->gatt_table = NULL; + agp_bridge->gatt_bus_addr = 0; + + return 0; +} + +static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge) +{ + return 0; +} + +static int intel_fake_agp_configure(void) +{ + if (!intel_enable_gtt()) + return -EIO; + + intel_private.clear_fake_agp = true; + agp_bridge->gart_bus_addr = intel_private.gma_bus_addr; + + return 0; +} +#endif + +static bool i830_check_flags(unsigned int flags) +{ + switch (flags) { + case 0: + case AGP_PHYS_MEMORY: + case AGP_USER_CACHED_MEMORY: + case AGP_USER_MEMORY: + return true; + } + + return false; +} + +void intel_gtt_insert_sg_entries(struct sg_table *st, + unsigned int pg_start, + unsigned int flags) +{ + struct scatterlist *sg; + unsigned int len, m; + int i, j; + + j = pg_start; + + /* sg may merge pages, but we have to separate + * per-page addr for GTT */ + for_each_sg(st->sgl, sg, st->nents, i) { + len = sg_dma_len(sg) >> PAGE_SHIFT; + for (m = 0; m < len; m++) { + dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); + intel_private.driver->write_entry(addr, j, flags); + j++; + } + } + wmb(); +} +EXPORT_SYMBOL(intel_gtt_insert_sg_entries); + +#if IS_ENABLED(CONFIG_AGP_INTEL) +static void intel_gtt_insert_pages(unsigned int first_entry, + unsigned int num_entries, + struct page **pages, + unsigned int flags) +{ + int i, j; + + for (i = 0, j = first_entry; i < num_entries; i++, j++) { + dma_addr_t addr = page_to_phys(pages[i]); + intel_private.driver->write_entry(addr, + j, flags); + } + wmb(); +} + +static int intel_fake_agp_insert_entries(struct agp_memory *mem, + off_t pg_start, int type) +{ + int ret = -EINVAL; + + if (intel_private.clear_fake_agp) { + int start = intel_private.stolen_size / PAGE_SIZE; + int end = intel_private.gtt_mappable_entries; + intel_gtt_clear_range(start, end - start); + intel_private.clear_fake_agp = false; + } + + if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY) + return i810_insert_dcache_entries(mem, pg_start, type); + + if (mem->page_count == 0) + goto out; + + if (pg_start + mem->page_count > intel_private.gtt_total_entries) + goto out_err; + + if (type != mem->type) + goto out_err; + + if (!intel_private.driver->check_flags(type)) + goto out_err; + + if (!mem->is_flushed) + global_cache_flush(); + + if (intel_private.needs_dmar) { + struct sg_table st; + + ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st); + if (ret != 0) + return ret; + + intel_gtt_insert_sg_entries(&st, pg_start, type); + mem->sg_list = st.sgl; + mem->num_sg = st.nents; + } else + intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages, + type); + +out: + ret = 0; +out_err: + mem->is_flushed = true; + return ret; +} +#endif + +void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries) +{ + unsigned int i; + + for (i = first_entry; i < (first_entry + num_entries); i++) { + intel_private.driver->write_entry(intel_private.scratch_page_dma, + i, 0); + } + wmb(); +} +EXPORT_SYMBOL(intel_gtt_clear_range); + +#if IS_ENABLED(CONFIG_AGP_INTEL) +static int intel_fake_agp_remove_entries(struct agp_memory *mem, + off_t pg_start, int type) +{ + if (mem->page_count == 0) + return 0; + + intel_gtt_clear_range(pg_start, mem->page_count); + + if (intel_private.needs_dmar) { + intel_gtt_unmap_memory(mem->sg_list, mem->num_sg); + mem->sg_list = NULL; + mem->num_sg = 0; + } + + return 0; +} + +static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count, + int type) +{ + struct agp_memory *new; + + if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) { + if (pg_count != intel_private.num_dcache_entries) + return NULL; + + new = agp_create_memory(1); + if (new == NULL) + return NULL; + + new->type = AGP_DCACHE_MEMORY; + new->page_count = pg_count; + new->num_scratch_pages = 0; + agp_free_page_array(new); + return new; + } + if (type == AGP_PHYS_MEMORY) + return alloc_agpphysmem_i8xx(pg_count, type); + /* always return NULL for other allocation types for now */ + return NULL; +} +#endif + +static int intel_alloc_chipset_flush_resource(void) +{ + int ret; + ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE, + PAGE_SIZE, PCIBIOS_MIN_MEM, 0, + pcibios_align_resource, intel_private.bridge_dev); + + return ret; +} + +static void intel_i915_setup_chipset_flush(void) +{ + int ret; + u32 temp; + + pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp); + if (!(temp & 0x1)) { + intel_alloc_chipset_flush_resource(); + intel_private.resource_valid = 1; + pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); + } else { + temp &= ~1; + + intel_private.resource_valid = 1; + intel_private.ifp_resource.start = temp; + intel_private.ifp_resource.end = temp + PAGE_SIZE; + ret = request_resource(&iomem_resource, &intel_private.ifp_resource); + /* some BIOSes reserve this area in a pnp some don't */ + if (ret) + intel_private.resource_valid = 0; + } +} + +static void intel_i965_g33_setup_chipset_flush(void) +{ + u32 temp_hi, temp_lo; + int ret; + + pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi); + pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo); + + if (!(temp_lo & 0x1)) { + + intel_alloc_chipset_flush_resource(); + + intel_private.resource_valid = 1; + pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, + upper_32_bits(intel_private.ifp_resource.start)); + pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); + } else { + u64 l64; + + temp_lo &= ~0x1; + l64 = ((u64)temp_hi << 32) | temp_lo; + + intel_private.resource_valid = 1; + intel_private.ifp_resource.start = l64; + intel_private.ifp_resource.end = l64 + PAGE_SIZE; + ret = request_resource(&iomem_resource, &intel_private.ifp_resource); + /* some BIOSes reserve this area in a pnp some don't */ + if (ret) + intel_private.resource_valid = 0; + } +} + +static void intel_i9xx_setup_flush(void) +{ + /* return if already configured */ + if (intel_private.ifp_resource.start) + return; + + if (INTEL_GTT_GEN == 6) + return; + + /* setup a resource for this object */ + intel_private.ifp_resource.name = "Intel Flush Page"; + intel_private.ifp_resource.flags = IORESOURCE_MEM; + + /* Setup chipset flush for 915 */ + if (IS_G33 || INTEL_GTT_GEN >= 4) { + intel_i965_g33_setup_chipset_flush(); + } else { + intel_i915_setup_chipset_flush(); + } + + if (intel_private.ifp_resource.start) + intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); + if (!intel_private.i9xx_flush_page) + dev_err(&intel_private.pcidev->dev, + "can't ioremap flush page - no chipset flushing\n"); +} + +static void i9xx_cleanup(void) +{ + if (intel_private.i9xx_flush_page) + iounmap(intel_private.i9xx_flush_page); + if (intel_private.resource_valid) + release_resource(&intel_private.ifp_resource); + intel_private.ifp_resource.start = 0; + intel_private.resource_valid = 0; +} + +static void i9xx_chipset_flush(void) +{ + if (intel_private.i9xx_flush_page) + writel(1, intel_private.i9xx_flush_page); +} + +static void i965_write_entry(dma_addr_t addr, + unsigned int entry, + unsigned int flags) +{ + u32 pte_flags; + + pte_flags = I810_PTE_VALID; + if (flags == AGP_USER_CACHED_MEMORY) + pte_flags |= I830_PTE_SYSTEM_CACHED; + + /* Shift high bits down */ + addr |= (addr >> 28) & 0xf0; + writel_relaxed(addr | pte_flags, intel_private.gtt + entry); +} + +static int i9xx_setup(void) +{ + phys_addr_t reg_addr; + int size = KB(512); + + reg_addr = pci_resource_start(intel_private.pcidev, I915_MMADR_BAR); + + intel_private.registers = ioremap(reg_addr, size); + if (!intel_private.registers) + return -ENOMEM; + + switch (INTEL_GTT_GEN) { + case 3: + intel_private.gtt_phys_addr = + pci_resource_start(intel_private.pcidev, I915_PTE_BAR); + break; + case 5: + intel_private.gtt_phys_addr = reg_addr + MB(2); + break; + default: + intel_private.gtt_phys_addr = reg_addr + KB(512); + break; + } + + intel_i9xx_setup_flush(); + + return 0; +} + +#if IS_ENABLED(CONFIG_AGP_INTEL) +static const struct agp_bridge_driver intel_fake_agp_driver = { + .owner = THIS_MODULE, + .size_type = FIXED_APER_SIZE, + .aperture_sizes = intel_fake_agp_sizes, + .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes), + .configure = intel_fake_agp_configure, + .fetch_size = intel_fake_agp_fetch_size, + .cleanup = intel_gtt_cleanup, + .agp_enable = intel_fake_agp_enable, + .cache_flush = global_cache_flush, + .create_gatt_table = intel_fake_agp_create_gatt_table, + .free_gatt_table = intel_fake_agp_free_gatt_table, + .insert_memory = intel_fake_agp_insert_entries, + .remove_memory = intel_fake_agp_remove_entries, + .alloc_by_type = intel_fake_agp_alloc_by_type, + .free_by_type = intel_i810_free_by_type, + .agp_alloc_page = agp_generic_alloc_page, + .agp_alloc_pages = agp_generic_alloc_pages, + .agp_destroy_page = agp_generic_destroy_page, + .agp_destroy_pages = agp_generic_destroy_pages, +}; +#endif + +static const struct intel_gtt_driver i81x_gtt_driver = { + .gen = 1, + .has_pgtbl_enable = 1, + .dma_mask_size = 32, + .setup = i810_setup, + .cleanup = i810_cleanup, + .check_flags = i830_check_flags, + .write_entry = i810_write_entry, +}; +static const struct intel_gtt_driver i8xx_gtt_driver = { + .gen = 2, + .has_pgtbl_enable = 1, + .setup = i830_setup, + .cleanup = i830_cleanup, + .write_entry = i830_write_entry, + .dma_mask_size = 32, + .check_flags = i830_check_flags, + .chipset_flush = i830_chipset_flush, +}; +static const struct intel_gtt_driver i915_gtt_driver = { + .gen = 3, + .has_pgtbl_enable = 1, + .setup = i9xx_setup, + .cleanup = i9xx_cleanup, + /* i945 is the last gpu to need phys mem (for overlay and cursors). */ + .write_entry = i830_write_entry, + .dma_mask_size = 32, + .check_flags = i830_check_flags, + .chipset_flush = i9xx_chipset_flush, +}; +static const struct intel_gtt_driver g33_gtt_driver = { + .gen = 3, + .is_g33 = 1, + .setup = i9xx_setup, + .cleanup = i9xx_cleanup, + .write_entry = i965_write_entry, + .dma_mask_size = 36, + .check_flags = i830_check_flags, + .chipset_flush = i9xx_chipset_flush, +}; +static const struct intel_gtt_driver pineview_gtt_driver = { + .gen = 3, + .is_pineview = 1, .is_g33 = 1, + .setup = i9xx_setup, + .cleanup = i9xx_cleanup, + .write_entry = i965_write_entry, + .dma_mask_size = 36, + .check_flags = i830_check_flags, + .chipset_flush = i9xx_chipset_flush, +}; +static const struct intel_gtt_driver i965_gtt_driver = { + .gen = 4, + .has_pgtbl_enable = 1, + .setup = i9xx_setup, + .cleanup = i9xx_cleanup, + .write_entry = i965_write_entry, + .dma_mask_size = 36, + .check_flags = i830_check_flags, + .chipset_flush = i9xx_chipset_flush, +}; +static const struct intel_gtt_driver g4x_gtt_driver = { + .gen = 5, + .setup = i9xx_setup, + .cleanup = i9xx_cleanup, + .write_entry = i965_write_entry, + .dma_mask_size = 36, + .check_flags = i830_check_flags, + .chipset_flush = i9xx_chipset_flush, +}; +static const struct intel_gtt_driver ironlake_gtt_driver = { + .gen = 5, + .is_ironlake = 1, + .setup = i9xx_setup, + .cleanup = i9xx_cleanup, + .write_entry = i965_write_entry, + .dma_mask_size = 36, + .check_flags = i830_check_flags, + .chipset_flush = i9xx_chipset_flush, +}; + +/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of + * driver and gmch_driver must be non-null, and find_gmch will determine + * which one should be used if a gmch_chip_id is present. + */ +static const struct intel_gtt_driver_description { + unsigned int gmch_chip_id; + char *name; + const struct intel_gtt_driver *gtt_driver; +} intel_gtt_chipsets[] = { + { PCI_DEVICE_ID_INTEL_82810_IG1, "i810", + &i81x_gtt_driver}, + { PCI_DEVICE_ID_INTEL_82810_IG3, "i810", + &i81x_gtt_driver}, + { PCI_DEVICE_ID_INTEL_82810E_IG, "i810", + &i81x_gtt_driver}, + { PCI_DEVICE_ID_INTEL_82815_CGC, "i815", + &i81x_gtt_driver}, + { PCI_DEVICE_ID_INTEL_82830_CGC, "830M", + &i8xx_gtt_driver}, + { PCI_DEVICE_ID_INTEL_82845G_IG, "845G", + &i8xx_gtt_driver}, + { PCI_DEVICE_ID_INTEL_82854_IG, "854", + &i8xx_gtt_driver}, + { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM", + &i8xx_gtt_driver}, + { PCI_DEVICE_ID_INTEL_82865_IG, "865", + &i8xx_gtt_driver}, + { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)", + &i915_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82915G_IG, "915G", + &i915_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM", + &i915_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82945G_IG, "945G", + &i915_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM", + &i915_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME", + &i915_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ", + &i965_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82G35_IG, "G35", + &i965_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q", + &i965_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82965G_IG, "965G", + &i965_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM", + &i965_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE", + &i965_gtt_driver }, + { PCI_DEVICE_ID_INTEL_G33_IG, "G33", + &g33_gtt_driver }, + { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35", + &g33_gtt_driver }, + { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33", + &g33_gtt_driver }, + { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150", + &pineview_gtt_driver }, + { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150", + &pineview_gtt_driver }, + { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45", + &g4x_gtt_driver }, + { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake", + &g4x_gtt_driver }, + { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43", + &g4x_gtt_driver }, + { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43", + &g4x_gtt_driver }, + { PCI_DEVICE_ID_INTEL_B43_IG, "B43", + &g4x_gtt_driver }, + { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43", + &g4x_gtt_driver }, + { PCI_DEVICE_ID_INTEL_G41_IG, "G41", + &g4x_gtt_driver }, + { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, + "HD Graphics", &ironlake_gtt_driver }, + { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, + "HD Graphics", &ironlake_gtt_driver }, + { 0, NULL, NULL } +}; + +static int find_gmch(u16 device) +{ + struct pci_dev *gmch_device; + + gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL); + if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) { + gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, + device, gmch_device); + } + + if (!gmch_device) + return 0; + + intel_private.pcidev = gmch_device; + return 1; +} + +int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, + struct agp_bridge_data *bridge) +{ + int i, mask; + + /* + * Can be called from the fake agp driver but also directly from + * drm/i915.ko. Hence we need to check whether everything is set up + * already. + */ + if (intel_private.driver) { + intel_private.refcount++; + return 1; + } + + for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) { + if (gpu_pdev) { + if (gpu_pdev->device == + intel_gtt_chipsets[i].gmch_chip_id) { + intel_private.pcidev = pci_dev_get(gpu_pdev); + intel_private.driver = + intel_gtt_chipsets[i].gtt_driver; + + break; + } + } else if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) { + intel_private.driver = + intel_gtt_chipsets[i].gtt_driver; + break; + } + } + + if (!intel_private.driver) + return 0; + + intel_private.refcount++; + +#if IS_ENABLED(CONFIG_AGP_INTEL) + if (bridge) { + bridge->driver = &intel_fake_agp_driver; + bridge->dev_private_data = &intel_private; + bridge->dev = bridge_pdev; + } +#endif + + intel_private.bridge_dev = pci_dev_get(bridge_pdev); + + dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name); + + mask = intel_private.driver->dma_mask_size; + if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask))) + dev_err(&intel_private.pcidev->dev, + "set gfx device dma mask %d-bit failed!\n", mask); + else + pci_set_consistent_dma_mask(intel_private.pcidev, + DMA_BIT_MASK(mask)); + + if (intel_gtt_init() != 0) { + intel_gmch_remove(); + + return 0; + } + + return 1; +} +EXPORT_SYMBOL(intel_gmch_probe); + +void intel_gtt_get(size_t *gtt_total, size_t *stolen_size, + phys_addr_t *mappable_base, unsigned long *mappable_end) +{ + *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT; + *stolen_size = intel_private.stolen_size; + *mappable_base = intel_private.gma_bus_addr; + *mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT; +} +EXPORT_SYMBOL(intel_gtt_get); + +void intel_gtt_chipset_flush(void) +{ + if (intel_private.driver->chipset_flush) + intel_private.driver->chipset_flush(); +} +EXPORT_SYMBOL(intel_gtt_chipset_flush); + +void intel_gmch_remove(void) +{ + if (--intel_private.refcount) + return; + + if (intel_private.pcidev) + pci_dev_put(intel_private.pcidev); + if (intel_private.bridge_dev) + pci_dev_put(intel_private.bridge_dev); + intel_private.driver = NULL; +} +EXPORT_SYMBOL(intel_gmch_remove); + +MODULE_AUTHOR("Dave Jones, Various @Intel"); +MODULE_LICENSE("GPL and additional rights"); diff --git a/kernel/drivers/char/agp/isoch.c b/kernel/drivers/char/agp/isoch.c new file mode 100644 index 000000000..c73385cc4 --- /dev/null +++ b/kernel/drivers/char/agp/isoch.c @@ -0,0 +1,470 @@ +/* + * Setup routines for AGP 3.5 compliant bridges. + */ + +#include +#include +#include +#include +#include + +#include "agp.h" + +/* Generic AGP 3.5 enabling routines */ + +struct agp_3_5_dev { + struct list_head list; + u8 capndx; + u32 maxbw; + struct pci_dev *dev; +}; + +static void agp_3_5_dev_list_insert(struct list_head *head, struct list_head *new) +{ + struct agp_3_5_dev *cur, *n = list_entry(new, struct agp_3_5_dev, list); + struct list_head *pos; + + list_for_each(pos, head) { + cur = list_entry(pos, struct agp_3_5_dev, list); + if (cur->maxbw > n->maxbw) + break; + } + list_add_tail(new, pos); +} + +static void agp_3_5_dev_list_sort(struct agp_3_5_dev *list, unsigned int ndevs) +{ + struct agp_3_5_dev *cur; + struct pci_dev *dev; + struct list_head *pos, *tmp, *head = &list->list, *start = head->next; + u32 nistat; + + INIT_LIST_HEAD(head); + + for (pos=start; pos!=head; ) { + cur = list_entry(pos, struct agp_3_5_dev, list); + dev = cur->dev; + + pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &nistat); + cur->maxbw = (nistat >> 16) & 0xff; + + tmp = pos; + pos = pos->next; + agp_3_5_dev_list_insert(head, tmp); + } +} + +/* + * Initialize all isochronous transfer parameters for an AGP 3.0 + * node (i.e. a host bridge in combination with the adapters + * lying behind it...) + */ + +static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge, + struct agp_3_5_dev *dev_list, unsigned int ndevs) +{ + /* + * Convenience structure to make the calculations clearer + * here. The field names come straight from the AGP 3.0 spec. + */ + struct isoch_data { + u32 maxbw; + u32 n; + u32 y; + u32 l; + u32 rq; + struct agp_3_5_dev *dev; + }; + + struct pci_dev *td = bridge->dev, *dev; + struct list_head *head = &dev_list->list, *pos; + struct agp_3_5_dev *cur; + struct isoch_data *master, target; + unsigned int cdev = 0; + u32 mnistat, tnistat, tstatus, mcmd; + u16 tnicmd, mnicmd; + u8 mcapndx; + u32 tot_bw = 0, tot_n = 0, tot_rq = 0, y_max, rq_isoch, rq_async; + u32 step, rem, rem_isoch, rem_async; + int ret = 0; + + /* + * We'll work with an array of isoch_data's (one for each + * device in dev_list) throughout this function. + */ + if ((master = kmalloc(ndevs * sizeof(*master), GFP_KERNEL)) == NULL) { + ret = -ENOMEM; + goto get_out; + } + + /* + * Sort the device list by maxbw. We need to do this because the + * spec suggests that the devices with the smallest requirements + * have their resources allocated first, with all remaining resources + * falling to the device with the largest requirement. + * + * We don't exactly do this, we divide target resources by ndevs + * and split them amongst the AGP 3.0 devices. The remainder of such + * division operations are dropped on the last device, sort of like + * the spec mentions it should be done. + * + * We can't do this sort when we initially construct the dev_list + * because we don't know until this function whether isochronous + * transfers are enabled and consequently whether maxbw will mean + * anything. + */ + agp_3_5_dev_list_sort(dev_list, ndevs); + + pci_read_config_dword(td, bridge->capndx+AGPNISTAT, &tnistat); + pci_read_config_dword(td, bridge->capndx+AGPSTAT, &tstatus); + + /* Extract power-on defaults from the target */ + target.maxbw = (tnistat >> 16) & 0xff; + target.n = (tnistat >> 8) & 0xff; + target.y = (tnistat >> 6) & 0x3; + target.l = (tnistat >> 3) & 0x7; + target.rq = (tstatus >> 24) & 0xff; + + y_max = target.y; + + /* + * Extract power-on defaults for each device in dev_list. Along + * the way, calculate the total isochronous bandwidth required + * by these devices and the largest requested payload size. + */ + list_for_each(pos, head) { + cur = list_entry(pos, struct agp_3_5_dev, list); + dev = cur->dev; + + mcapndx = cur->capndx; + + pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &mnistat); + + master[cdev].maxbw = (mnistat >> 16) & 0xff; + master[cdev].n = (mnistat >> 8) & 0xff; + master[cdev].y = (mnistat >> 6) & 0x3; + master[cdev].dev = cur; + + tot_bw += master[cdev].maxbw; + y_max = max(y_max, master[cdev].y); + + cdev++; + } + + /* Check if this configuration has any chance of working */ + if (tot_bw > target.maxbw) { + dev_err(&td->dev, "isochronous bandwidth required " + "by AGP 3.0 devices exceeds that which is supported by " + "the AGP 3.0 bridge!\n"); + ret = -ENODEV; + goto free_and_exit; + } + + target.y = y_max; + + /* + * Write the calculated payload size into the target's NICMD + * register. Doing this directly effects the ISOCH_N value + * in the target's NISTAT register, so we need to do this now + * to get an accurate value for ISOCH_N later. + */ + pci_read_config_word(td, bridge->capndx+AGPNICMD, &tnicmd); + tnicmd &= ~(0x3 << 6); + tnicmd |= target.y << 6; + pci_write_config_word(td, bridge->capndx+AGPNICMD, tnicmd); + + /* Reread the target's ISOCH_N */ + pci_read_config_dword(td, bridge->capndx+AGPNISTAT, &tnistat); + target.n = (tnistat >> 8) & 0xff; + + /* Calculate the minimum ISOCH_N needed by each master */ + for (cdev=0; cdev target.n) { + dev_err(&td->dev, "number of isochronous " + "transactions per period required by AGP 3.0 devices " + "exceeds that which is supported by the AGP 3.0 " + "bridge!\n"); + ret = -ENODEV; + goto free_and_exit; + } + + /* Calculate left over ISOCH_N capability in the target. We'll give + * this to the hungriest device (as per the spec) */ + rem = target.n - tot_n; + + /* + * Calculate the minimum isochronous RQ depth needed by each master. + * Along the way, distribute the extra ISOCH_N capability calculated + * above. + */ + for (cdev=0; cdev 64B, then ISOCH_Y + * byte isochronous writes will be broken into 64B pieces. + * This means we need to budget more RQ depth to account for + * these kind of writes (each isochronous write is actually + * many writes on the AGP bus). + */ + master[cdev].rq = master[cdev].n; + if (master[cdev].y > 0x1) + master[cdev].rq *= (1 << (master[cdev].y - 1)); + + tot_rq += master[cdev].rq; + } + master[ndevs-1].n += rem; + + /* Figure the number of isochronous and asynchronous RQ slots the + * target is providing. */ + rq_isoch = (target.y > 0x1) ? target.n * (1 << (target.y - 1)) : target.n; + rq_async = target.rq - rq_isoch; + + /* Exit if the minimal RQ needs of the masters exceeds what the target + * can provide. */ + if (tot_rq > rq_isoch) { + dev_err(&td->dev, "number of request queue slots " + "required by the isochronous bandwidth requested by " + "AGP 3.0 devices exceeds the number provided by the " + "AGP 3.0 bridge!\n"); + ret = -ENODEV; + goto free_and_exit; + } + + /* Calculate asynchronous RQ capability in the target (per master) as + * well as the total number of leftover isochronous RQ slots. */ + step = rq_async / ndevs; + rem_async = step + (rq_async % ndevs); + rem_isoch = rq_isoch - tot_rq; + + /* Distribute the extra RQ slots calculated above and write our + * isochronous settings out to the actual devices. */ + for (cdev=0; cdevdev; + + mcapndx = cur->capndx; + + master[cdev].rq += (cdev == ndevs - 1) + ? (rem_async + rem_isoch) : step; + + pci_read_config_word(dev, cur->capndx+AGPNICMD, &mnicmd); + pci_read_config_dword(dev, cur->capndx+AGPCMD, &mcmd); + + mnicmd &= ~(0xff << 8); + mnicmd &= ~(0x3 << 6); + mcmd &= ~(0xff << 24); + + mnicmd |= master[cdev].n << 8; + mnicmd |= master[cdev].y << 6; + mcmd |= master[cdev].rq << 24; + + pci_write_config_dword(dev, cur->capndx+AGPCMD, mcmd); + pci_write_config_word(dev, cur->capndx+AGPNICMD, mnicmd); + } + +free_and_exit: + kfree(master); + +get_out: + return ret; +} + +/* + * This function basically allocates request queue slots among the + * AGP 3.0 systems in nonisochronous nodes. The algorithm is + * pretty stupid, divide the total number of RQ slots provided by the + * target by ndevs. Distribute this many slots to each AGP 3.0 device, + * giving any left over slots to the last device in dev_list. + */ +static void agp_3_5_nonisochronous_node_enable(struct agp_bridge_data *bridge, + struct agp_3_5_dev *dev_list, unsigned int ndevs) +{ + struct agp_3_5_dev *cur; + struct list_head *head = &dev_list->list, *pos; + u32 tstatus, mcmd; + u32 trq, mrq, rem; + unsigned int cdev = 0; + + pci_read_config_dword(bridge->dev, bridge->capndx+AGPSTAT, &tstatus); + + trq = (tstatus >> 24) & 0xff; + mrq = trq / ndevs; + + rem = mrq + (trq % ndevs); + + for (pos=head->next; cdevnext) { + cur = list_entry(pos, struct agp_3_5_dev, list); + + pci_read_config_dword(cur->dev, cur->capndx+AGPCMD, &mcmd); + mcmd &= ~(0xff << 24); + mcmd |= ((cdev == ndevs - 1) ? rem : mrq) << 24; + pci_write_config_dword(cur->dev, cur->capndx+AGPCMD, mcmd); + } +} + +/* + * Fully configure and enable an AGP 3.0 host bridge and all the devices + * lying behind it. + */ +int agp_3_5_enable(struct agp_bridge_data *bridge) +{ + struct pci_dev *td = bridge->dev, *dev = NULL; + u8 mcapndx; + u32 isoch, arqsz; + u32 tstatus, mstatus, ncapid; + u32 mmajor; + u16 mpstat; + struct agp_3_5_dev *dev_list, *cur; + struct list_head *head, *pos; + unsigned int ndevs = 0; + int ret = 0; + + /* Extract some power-on defaults from the target */ + pci_read_config_dword(td, bridge->capndx+AGPSTAT, &tstatus); + isoch = (tstatus >> 17) & 0x1; + if (isoch == 0) /* isoch xfers not available, bail out. */ + return -ENODEV; + + arqsz = (tstatus >> 13) & 0x7; + + /* + * Allocate a head for our AGP 3.5 device list + * (multiple AGP v3 devices are allowed behind a single bridge). + */ + if ((dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL)) == NULL) { + ret = -ENOMEM; + goto get_out; + } + head = &dev_list->list; + INIT_LIST_HEAD(head); + + /* Find all AGP devices, and add them to dev_list. */ + for_each_pci_dev(dev) { + mcapndx = pci_find_capability(dev, PCI_CAP_ID_AGP); + if (mcapndx == 0) + continue; + + switch ((dev->class >>8) & 0xff00) { + case 0x0600: /* Bridge */ + /* Skip bridges. We should call this function for each one. */ + continue; + + case 0x0001: /* Unclassified device */ + /* Don't know what this is, but log it for investigation. */ + if (mcapndx != 0) { + dev_info(&td->dev, "wacky, found unclassified AGP device %s [%04x/%04x]\n", + pci_name(dev), + dev->vendor, dev->device); + } + continue; + + case 0x0300: /* Display controller */ + case 0x0400: /* Multimedia controller */ + if ((cur = kmalloc(sizeof(*cur), GFP_KERNEL)) == NULL) { + ret = -ENOMEM; + goto free_and_exit; + } + cur->dev = dev; + + pos = &cur->list; + list_add(pos, head); + ndevs++; + continue; + + default: + continue; + } + } + + /* + * Take an initial pass through the devices lying behind our host + * bridge. Make sure each one is actually an AGP 3.0 device, otherwise + * exit with an error message. Along the way store the AGP 3.0 + * cap_ptr for each device + */ + list_for_each(pos, head) { + cur = list_entry(pos, struct agp_3_5_dev, list); + dev = cur->dev; + + pci_read_config_word(dev, PCI_STATUS, &mpstat); + if ((mpstat & PCI_STATUS_CAP_LIST) == 0) + continue; + + pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &mcapndx); + if (mcapndx != 0) { + do { + pci_read_config_dword(dev, mcapndx, &ncapid); + if ((ncapid & 0xff) != 2) + mcapndx = (ncapid >> 8) & 0xff; + } + while (((ncapid & 0xff) != 2) && (mcapndx != 0)); + } + + if (mcapndx == 0) { + dev_err(&td->dev, "woah! Non-AGP device %s on " + "secondary bus of AGP 3.5 bridge!\n", + pci_name(dev)); + ret = -ENODEV; + goto free_and_exit; + } + + mmajor = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf; + if (mmajor < 3) { + dev_err(&td->dev, "woah! AGP 2.0 device %s on " + "secondary bus of AGP 3.5 bridge operating " + "with AGP 3.0 electricals!\n", pci_name(dev)); + ret = -ENODEV; + goto free_and_exit; + } + + cur->capndx = mcapndx; + + pci_read_config_dword(dev, cur->capndx+AGPSTAT, &mstatus); + + if (((mstatus >> 3) & 0x1) == 0) { + dev_err(&td->dev, "woah! AGP 3.x device %s not " + "operating in AGP 3.x mode on secondary bus " + "of AGP 3.5 bridge operating with AGP 3.0 " + "electricals!\n", pci_name(dev)); + ret = -ENODEV; + goto free_and_exit; + } + } + + /* + * Call functions to divide target resources amongst the AGP 3.0 + * masters. This process is dramatically different depending on + * whether isochronous transfers are supported. + */ + if (isoch) { + ret = agp_3_5_isochronous_node_enable(bridge, dev_list, ndevs); + if (ret) { + dev_info(&td->dev, "something bad happened setting " + "up isochronous xfers; falling back to " + "non-isochronous xfer mode\n"); + } else { + goto free_and_exit; + } + } + agp_3_5_nonisochronous_node_enable(bridge, dev_list, ndevs); + +free_and_exit: + /* Be sure to free the dev_list */ + for (pos=head->next; pos!=head; ) { + cur = list_entry(pos, struct agp_3_5_dev, list); + + pos = pos->next; + kfree(cur); + } + kfree(dev_list); + +get_out: + return ret; +} diff --git a/kernel/drivers/char/agp/nvidia-agp.c b/kernel/drivers/char/agp/nvidia-agp.c new file mode 100644 index 000000000..6c8d39cb5 --- /dev/null +++ b/kernel/drivers/char/agp/nvidia-agp.c @@ -0,0 +1,476 @@ +/* + * Nvidia AGPGART routines. + * Based upon a 2.4 agpgart diff by the folks from NVIDIA, and hacked up + * to work in 2.5 by Dave Jones. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "agp.h" + +/* NVIDIA registers */ +#define NVIDIA_0_APSIZE 0x80 +#define NVIDIA_1_WBC 0xf0 +#define NVIDIA_2_GARTCTRL 0xd0 +#define NVIDIA_2_APBASE 0xd8 +#define NVIDIA_2_APLIMIT 0xdc +#define NVIDIA_2_ATTBASE(i) (0xe0 + (i) * 4) +#define NVIDIA_3_APBASE 0x50 +#define NVIDIA_3_APLIMIT 0x54 + + +static struct _nvidia_private { + struct pci_dev *dev_1; + struct pci_dev *dev_2; + struct pci_dev *dev_3; + volatile u32 __iomem *aperture; + int num_active_entries; + off_t pg_offset; + u32 wbc_mask; +} nvidia_private; + + +static int nvidia_fetch_size(void) +{ + int i; + u8 size_value; + struct aper_size_info_8 *values; + + pci_read_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE, &size_value); + size_value &= 0x0f; + values = A_SIZE_8(agp_bridge->driver->aperture_sizes); + + for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { + if (size_value == values[i].size_value) { + agp_bridge->previous_size = + agp_bridge->current_size = (void *) (values + i); + agp_bridge->aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + +#define SYSCFG 0xC0010010 +#define IORR_BASE0 0xC0010016 +#define IORR_MASK0 0xC0010017 +#define AMD_K7_NUM_IORR 2 + +static int nvidia_init_iorr(u32 base, u32 size) +{ + u32 base_hi, base_lo; + u32 mask_hi, mask_lo; + u32 sys_hi, sys_lo; + u32 iorr_addr, free_iorr_addr; + + /* Find the iorr that is already used for the base */ + /* If not found, determine the uppermost available iorr */ + free_iorr_addr = AMD_K7_NUM_IORR; + for (iorr_addr = 0; iorr_addr < AMD_K7_NUM_IORR; iorr_addr++) { + rdmsr(IORR_BASE0 + 2 * iorr_addr, base_lo, base_hi); + rdmsr(IORR_MASK0 + 2 * iorr_addr, mask_lo, mask_hi); + + if ((base_lo & 0xfffff000) == (base & 0xfffff000)) + break; + + if ((mask_lo & 0x00000800) == 0) + free_iorr_addr = iorr_addr; + } + + if (iorr_addr >= AMD_K7_NUM_IORR) { + iorr_addr = free_iorr_addr; + if (iorr_addr >= AMD_K7_NUM_IORR) + return -EINVAL; + } + base_hi = 0x0; + base_lo = (base & ~0xfff) | 0x18; + mask_hi = 0xf; + mask_lo = ((~(size - 1)) & 0xfffff000) | 0x800; + wrmsr(IORR_BASE0 + 2 * iorr_addr, base_lo, base_hi); + wrmsr(IORR_MASK0 + 2 * iorr_addr, mask_lo, mask_hi); + + rdmsr(SYSCFG, sys_lo, sys_hi); + sys_lo |= 0x00100000; + wrmsr(SYSCFG, sys_lo, sys_hi); + + return 0; +} + +static int nvidia_configure(void) +{ + int i, rc, num_dirs; + u32 apbase, aplimit; + phys_addr_t apbase_phys; + struct aper_size_info_8 *current_size; + u32 temp; + + current_size = A_SIZE_8(agp_bridge->current_size); + + /* aperture size */ + pci_write_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE, + current_size->size_value); + + /* address to map to */ + apbase = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR); + agp_bridge->gart_bus_addr = apbase; + aplimit = apbase + (current_size->size * 1024 * 1024) - 1; + pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_APBASE, apbase); + pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_APLIMIT, aplimit); + pci_write_config_dword(nvidia_private.dev_3, NVIDIA_3_APBASE, apbase); + pci_write_config_dword(nvidia_private.dev_3, NVIDIA_3_APLIMIT, aplimit); + if (0 != (rc = nvidia_init_iorr(apbase, current_size->size * 1024 * 1024))) + return rc; + + /* directory size is 64k */ + num_dirs = current_size->size / 64; + nvidia_private.num_active_entries = current_size->num_entries; + nvidia_private.pg_offset = 0; + if (num_dirs == 0) { + num_dirs = 1; + nvidia_private.num_active_entries /= (64 / current_size->size); + nvidia_private.pg_offset = (apbase & (64 * 1024 * 1024 - 1) & + ~(current_size->size * 1024 * 1024 - 1)) / PAGE_SIZE; + } + + /* attbase */ + for (i = 0; i < 8; i++) { + pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_ATTBASE(i), + (agp_bridge->gatt_bus_addr + (i % num_dirs) * 64 * 1024) | 1); + } + + /* gtlb control */ + pci_read_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, &temp); + pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, temp | 0x11); + + /* gart control */ + pci_read_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, &temp); + pci_write_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, temp | 0x100); + + /* map aperture */ + apbase_phys = pci_resource_start(agp_bridge->dev, AGP_APERTURE_BAR); + nvidia_private.aperture = + (volatile u32 __iomem *) ioremap(apbase_phys, 33 * PAGE_SIZE); + + if (!nvidia_private.aperture) + return -ENOMEM; + + return 0; +} + +static void nvidia_cleanup(void) +{ + struct aper_size_info_8 *previous_size; + u32 temp; + + /* gart control */ + pci_read_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, &temp); + pci_write_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, temp & ~(0x100)); + + /* gtlb control */ + pci_read_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, &temp); + pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, temp & ~(0x11)); + + /* unmap aperture */ + iounmap((void __iomem *) nvidia_private.aperture); + + /* restore previous aperture size */ + previous_size = A_SIZE_8(agp_bridge->previous_size); + pci_write_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE, + previous_size->size_value); + + /* restore iorr for previous aperture size */ + nvidia_init_iorr(agp_bridge->gart_bus_addr, + previous_size->size * 1024 * 1024); +} + + +/* + * Note we can't use the generic routines, even though they are 99% the same. + * Aperture sizes <64M still requires a full 64k GART directory, but + * only use the portion of the TLB entries that correspond to the apertures + * alignment inside the surrounding 64M block. + */ +extern int agp_memory_reserved; + +static int nvidia_insert_memory(struct agp_memory *mem, off_t pg_start, int type) +{ + int i, j; + int mask_type; + + mask_type = agp_generic_type_to_mask_type(mem->bridge, type); + if (mask_type != 0 || type != mem->type) + return -EINVAL; + + if (mem->page_count == 0) + return 0; + + if ((pg_start + mem->page_count) > + (nvidia_private.num_active_entries - agp_memory_reserved/PAGE_SIZE)) + return -EINVAL; + + for (j = pg_start; j < (pg_start + mem->page_count); j++) { + if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j))) + return -EBUSY; + } + + if (!mem->is_flushed) { + global_cache_flush(); + mem->is_flushed = true; + } + for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { + writel(agp_bridge->driver->mask_memory(agp_bridge, + page_to_phys(mem->pages[i]), mask_type), + agp_bridge->gatt_table+nvidia_private.pg_offset+j); + } + + /* PCI Posting. */ + readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j - 1); + + agp_bridge->driver->tlb_flush(mem); + return 0; +} + + +static int nvidia_remove_memory(struct agp_memory *mem, off_t pg_start, int type) +{ + int i; + + int mask_type; + + mask_type = agp_generic_type_to_mask_type(mem->bridge, type); + if (mask_type != 0 || type != mem->type) + return -EINVAL; + + if (mem->page_count == 0) + return 0; + + for (i = pg_start; i < (mem->page_count + pg_start); i++) + writel(agp_bridge->scratch_page, agp_bridge->gatt_table+nvidia_private.pg_offset+i); + + agp_bridge->driver->tlb_flush(mem); + return 0; +} + + +static void nvidia_tlbflush(struct agp_memory *mem) +{ + unsigned long end; + u32 wbc_reg, temp; + int i; + + /* flush chipset */ + if (nvidia_private.wbc_mask) { + pci_read_config_dword(nvidia_private.dev_1, NVIDIA_1_WBC, &wbc_reg); + wbc_reg |= nvidia_private.wbc_mask; + pci_write_config_dword(nvidia_private.dev_1, NVIDIA_1_WBC, wbc_reg); + + end = jiffies + 3*HZ; + do { + pci_read_config_dword(nvidia_private.dev_1, + NVIDIA_1_WBC, &wbc_reg); + if (time_before_eq(end, jiffies)) { + printk(KERN_ERR PFX + "TLB flush took more than 3 seconds.\n"); + } + } while (wbc_reg & nvidia_private.wbc_mask); + } + + /* flush TLB entries */ + for (i = 0; i < 32 + 1; i++) + temp = readl(nvidia_private.aperture+(i * PAGE_SIZE / sizeof(u32))); + for (i = 0; i < 32 + 1; i++) + temp = readl(nvidia_private.aperture+(i * PAGE_SIZE / sizeof(u32))); +} + + +static const struct aper_size_info_8 nvidia_generic_sizes[5] = +{ + {512, 131072, 7, 0}, + {256, 65536, 6, 8}, + {128, 32768, 5, 12}, + {64, 16384, 4, 14}, + /* The 32M mode still requires a 64k gatt */ + {32, 16384, 4, 15} +}; + + +static const struct gatt_mask nvidia_generic_masks[] = +{ + { .mask = 1, .type = 0} +}; + + +static const struct agp_bridge_driver nvidia_driver = { + .owner = THIS_MODULE, + .aperture_sizes = nvidia_generic_sizes, + .size_type = U8_APER_SIZE, + .num_aperture_sizes = 5, + .needs_scratch_page = true, + .configure = nvidia_configure, + .fetch_size = nvidia_fetch_size, + .cleanup = nvidia_cleanup, + .tlb_flush = nvidia_tlbflush, + .mask_memory = agp_generic_mask_memory, + .masks = nvidia_generic_masks, + .agp_enable = agp_generic_enable, + .cache_flush = global_cache_flush, + .create_gatt_table = agp_generic_create_gatt_table, + .free_gatt_table = agp_generic_free_gatt_table, + .insert_memory = nvidia_insert_memory, + .remove_memory = nvidia_remove_memory, + .alloc_by_type = agp_generic_alloc_by_type, + .free_by_type = agp_generic_free_by_type, + .agp_alloc_page = agp_generic_alloc_page, + .agp_alloc_pages = agp_generic_alloc_pages, + .agp_destroy_page = agp_generic_destroy_page, + .agp_destroy_pages = agp_generic_destroy_pages, + .agp_type_to_mask_type = agp_generic_type_to_mask_type, +}; + +static int agp_nvidia_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct agp_bridge_data *bridge; + u8 cap_ptr; + + nvidia_private.dev_1 = + pci_get_bus_and_slot((unsigned int)pdev->bus->number, PCI_DEVFN(0, 1)); + nvidia_private.dev_2 = + pci_get_bus_and_slot((unsigned int)pdev->bus->number, PCI_DEVFN(0, 2)); + nvidia_private.dev_3 = + pci_get_bus_and_slot((unsigned int)pdev->bus->number, PCI_DEVFN(30, 0)); + + if (!nvidia_private.dev_1 || !nvidia_private.dev_2 || !nvidia_private.dev_3) { + printk(KERN_INFO PFX "Detected an NVIDIA nForce/nForce2 " + "chipset, but could not find the secondary devices.\n"); + return -ENODEV; + } + + cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); + if (!cap_ptr) + return -ENODEV; + + switch (pdev->device) { + case PCI_DEVICE_ID_NVIDIA_NFORCE: + printk(KERN_INFO PFX "Detected NVIDIA nForce chipset\n"); + nvidia_private.wbc_mask = 0x00010000; + break; + case PCI_DEVICE_ID_NVIDIA_NFORCE2: + printk(KERN_INFO PFX "Detected NVIDIA nForce2 chipset\n"); + nvidia_private.wbc_mask = 0x80000000; + break; + default: + printk(KERN_ERR PFX "Unsupported NVIDIA chipset (device id: %04x)\n", + pdev->device); + return -ENODEV; + } + + bridge = agp_alloc_bridge(); + if (!bridge) + return -ENOMEM; + + bridge->driver = &nvidia_driver; + bridge->dev_private_data = &nvidia_private, + bridge->dev = pdev; + bridge->capndx = cap_ptr; + + /* Fill in the mode register */ + pci_read_config_dword(pdev, + bridge->capndx+PCI_AGP_STATUS, + &bridge->mode); + + pci_set_drvdata(pdev, bridge); + return agp_add_bridge(bridge); +} + +static void agp_nvidia_remove(struct pci_dev *pdev) +{ + struct agp_bridge_data *bridge = pci_get_drvdata(pdev); + + agp_remove_bridge(bridge); + agp_put_bridge(bridge); +} + +#ifdef CONFIG_PM +static int agp_nvidia_suspend(struct pci_dev *pdev, pm_message_t state) +{ + pci_save_state(pdev); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +static int agp_nvidia_resume(struct pci_dev *pdev) +{ + /* set power state 0 and restore PCI space */ + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + /* reconfigure AGP hardware again */ + nvidia_configure(); + + return 0; +} +#endif + + +static struct pci_device_id agp_nvidia_pci_table[] = { + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_DEVICE_ID_NVIDIA_NFORCE, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_DEVICE_ID_NVIDIA_NFORCE2, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { } +}; + +MODULE_DEVICE_TABLE(pci, agp_nvidia_pci_table); + +static struct pci_driver agp_nvidia_pci_driver = { + .name = "agpgart-nvidia", + .id_table = agp_nvidia_pci_table, + .probe = agp_nvidia_probe, + .remove = agp_nvidia_remove, +#ifdef CONFIG_PM + .suspend = agp_nvidia_suspend, + .resume = agp_nvidia_resume, +#endif +}; + +static int __init agp_nvidia_init(void) +{ + if (agp_off) + return -EINVAL; + return pci_register_driver(&agp_nvidia_pci_driver); +} + +static void __exit agp_nvidia_cleanup(void) +{ + pci_unregister_driver(&agp_nvidia_pci_driver); + pci_dev_put(nvidia_private.dev_1); + pci_dev_put(nvidia_private.dev_2); + pci_dev_put(nvidia_private.dev_3); +} + +module_init(agp_nvidia_init); +module_exit(agp_nvidia_cleanup); + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("NVIDIA Corporation"); + diff --git a/kernel/drivers/char/agp/parisc-agp.c b/kernel/drivers/char/agp/parisc-agp.c new file mode 100644 index 000000000..15f2e7025 --- /dev/null +++ b/kernel/drivers/char/agp/parisc-agp.c @@ -0,0 +1,426 @@ +/* + * HP Quicksilver AGP GART routines + * + * Copyright (c) 2006, Kyle McMartin + * + * Based on drivers/char/agpgart/hp-agp.c which is + * (c) Copyright 2002, 2003 Hewlett-Packard Development Company, L.P. + * Bjorn Helgaas + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "agp.h" + +#define DRVNAME "quicksilver" +#define DRVPFX DRVNAME ": " + +#define AGP8X_MODE_BIT 3 +#define AGP8X_MODE (1 << AGP8X_MODE_BIT) + +static unsigned long +parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr, + int type); + +static struct _parisc_agp_info { + void __iomem *ioc_regs; + void __iomem *lba_regs; + + int lba_cap_offset; + + u64 *gatt; + u64 gatt_entries; + + u64 gart_base; + u64 gart_size; + + int io_page_size; + int io_pages_per_kpage; +} parisc_agp_info; + +static struct gatt_mask parisc_agp_masks[] = +{ + { + .mask = SBA_PDIR_VALID_BIT, + .type = 0 + } +}; + +static struct aper_size_info_fixed parisc_agp_sizes[] = +{ + {0, 0, 0}, /* filled in by parisc_agp_fetch_size() */ +}; + +static int +parisc_agp_fetch_size(void) +{ + int size; + + size = parisc_agp_info.gart_size / MB(1); + parisc_agp_sizes[0].size = size; + agp_bridge->current_size = (void *) &parisc_agp_sizes[0]; + + return size; +} + +static int +parisc_agp_configure(void) +{ + struct _parisc_agp_info *info = &parisc_agp_info; + + agp_bridge->gart_bus_addr = info->gart_base; + agp_bridge->capndx = info->lba_cap_offset; + agp_bridge->mode = readl(info->lba_regs+info->lba_cap_offset+PCI_AGP_STATUS); + + return 0; +} + +static void +parisc_agp_tlbflush(struct agp_memory *mem) +{ + struct _parisc_agp_info *info = &parisc_agp_info; + + writeq(info->gart_base | ilog2(info->gart_size), info->ioc_regs+IOC_PCOM); + readq(info->ioc_regs+IOC_PCOM); /* flush */ +} + +static int +parisc_agp_create_gatt_table(struct agp_bridge_data *bridge) +{ + struct _parisc_agp_info *info = &parisc_agp_info; + int i; + + for (i = 0; i < info->gatt_entries; i++) { + info->gatt[i] = (unsigned long)agp_bridge->scratch_page; + } + + return 0; +} + +static int +parisc_agp_free_gatt_table(struct agp_bridge_data *bridge) +{ + struct _parisc_agp_info *info = &parisc_agp_info; + + info->gatt[0] = SBA_AGPGART_COOKIE; + + return 0; +} + +static int +parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type) +{ + struct _parisc_agp_info *info = &parisc_agp_info; + int i, k; + off_t j, io_pg_start; + int io_pg_count; + + if (type != mem->type || + agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) { + return -EINVAL; + } + + io_pg_start = info->io_pages_per_kpage * pg_start; + io_pg_count = info->io_pages_per_kpage * mem->page_count; + if ((io_pg_start + io_pg_count) > info->gatt_entries) { + return -EINVAL; + } + + j = io_pg_start; + while (j < (io_pg_start + io_pg_count)) { + if (info->gatt[j]) + return -EBUSY; + j++; + } + + if (!mem->is_flushed) { + global_cache_flush(); + mem->is_flushed = true; + } + + for (i = 0, j = io_pg_start; i < mem->page_count; i++) { + unsigned long paddr; + + paddr = page_to_phys(mem->pages[i]); + for (k = 0; + k < info->io_pages_per_kpage; + k++, j++, paddr += info->io_page_size) { + info->gatt[j] = + parisc_agp_mask_memory(agp_bridge, + paddr, type); + } + } + + agp_bridge->driver->tlb_flush(mem); + + return 0; +} + +static int +parisc_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type) +{ + struct _parisc_agp_info *info = &parisc_agp_info; + int i, io_pg_start, io_pg_count; + + if (type != mem->type || + agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) { + return -EINVAL; + } + + io_pg_start = info->io_pages_per_kpage * pg_start; + io_pg_count = info->io_pages_per_kpage * mem->page_count; + for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) { + info->gatt[i] = agp_bridge->scratch_page; + } + + agp_bridge->driver->tlb_flush(mem); + return 0; +} + +static unsigned long +parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr, + int type) +{ + return SBA_PDIR_VALID_BIT | addr; +} + +static void +parisc_agp_enable(struct agp_bridge_data *bridge, u32 mode) +{ + struct _parisc_agp_info *info = &parisc_agp_info; + u32 command; + + command = readl(info->lba_regs + info->lba_cap_offset + PCI_AGP_STATUS); + + command = agp_collect_device_status(bridge, mode, command); + command |= 0x00000100; + + writel(command, info->lba_regs + info->lba_cap_offset + PCI_AGP_COMMAND); + + agp_device_command(command, (mode & AGP8X_MODE) != 0); +} + +static const struct agp_bridge_driver parisc_agp_driver = { + .owner = THIS_MODULE, + .size_type = FIXED_APER_SIZE, + .configure = parisc_agp_configure, + .fetch_size = parisc_agp_fetch_size, + .tlb_flush = parisc_agp_tlbflush, + .mask_memory = parisc_agp_mask_memory, + .masks = parisc_agp_masks, + .agp_enable = parisc_agp_enable, + .cache_flush = global_cache_flush, + .create_gatt_table = parisc_agp_create_gatt_table, + .free_gatt_table = parisc_agp_free_gatt_table, + .insert_memory = parisc_agp_insert_memory, + .remove_memory = parisc_agp_remove_memory, + .alloc_by_type = agp_generic_alloc_by_type, + .free_by_type = agp_generic_free_by_type, + .agp_alloc_page = agp_generic_alloc_page, + .agp_alloc_pages = agp_generic_alloc_pages, + .agp_destroy_page = agp_generic_destroy_page, + .agp_destroy_pages = agp_generic_destroy_pages, + .agp_type_to_mask_type = agp_generic_type_to_mask_type, + .cant_use_aperture = true, +}; + +static int __init +agp_ioc_init(void __iomem *ioc_regs) +{ + struct _parisc_agp_info *info = &parisc_agp_info; + u64 iova_base, *io_pdir, io_tlb_ps; + int io_tlb_shift; + + printk(KERN_INFO DRVPFX "IO PDIR shared with sba_iommu\n"); + + info->ioc_regs = ioc_regs; + + io_tlb_ps = readq(info->ioc_regs+IOC_TCNFG); + switch (io_tlb_ps) { + case 0: io_tlb_shift = 12; break; + case 1: io_tlb_shift = 13; break; + case 2: io_tlb_shift = 14; break; + case 3: io_tlb_shift = 16; break; + default: + printk(KERN_ERR DRVPFX "Invalid IOTLB page size " + "configuration 0x%llx\n", io_tlb_ps); + info->gatt = NULL; + info->gatt_entries = 0; + return -ENODEV; + } + info->io_page_size = 1 << io_tlb_shift; + info->io_pages_per_kpage = PAGE_SIZE / info->io_page_size; + + iova_base = readq(info->ioc_regs+IOC_IBASE) & ~0x1; + info->gart_base = iova_base + PLUTO_IOVA_SIZE - PLUTO_GART_SIZE; + + info->gart_size = PLUTO_GART_SIZE; + info->gatt_entries = info->gart_size / info->io_page_size; + + io_pdir = phys_to_virt(readq(info->ioc_regs+IOC_PDIR_BASE)); + info->gatt = &io_pdir[(PLUTO_IOVA_SIZE/2) >> PAGE_SHIFT]; + + if (info->gatt[0] != SBA_AGPGART_COOKIE) { + info->gatt = NULL; + info->gatt_entries = 0; + printk(KERN_ERR DRVPFX "No reserved IO PDIR entry found; " + "GART disabled\n"); + return -ENODEV; + } + + return 0; +} + +static int +lba_find_capability(int cap) +{ + struct _parisc_agp_info *info = &parisc_agp_info; + u16 status; + u8 pos, id; + int ttl = 48; + + status = readw(info->lba_regs + PCI_STATUS); + if (!(status & PCI_STATUS_CAP_LIST)) + return 0; + pos = readb(info->lba_regs + PCI_CAPABILITY_LIST); + while (ttl-- && pos >= 0x40) { + pos &= ~3; + id = readb(info->lba_regs + pos + PCI_CAP_LIST_ID); + if (id == 0xff) + break; + if (id == cap) + return pos; + pos = readb(info->lba_regs + pos + PCI_CAP_LIST_NEXT); + } + return 0; +} + +static int __init +agp_lba_init(void __iomem *lba_hpa) +{ + struct _parisc_agp_info *info = &parisc_agp_info; + int cap; + + info->lba_regs = lba_hpa; + info->lba_cap_offset = lba_find_capability(PCI_CAP_ID_AGP); + + cap = readl(lba_hpa + info->lba_cap_offset) & 0xff; + if (cap != PCI_CAP_ID_AGP) { + printk(KERN_ERR DRVPFX "Invalid capability ID 0x%02x at 0x%x\n", + cap, info->lba_cap_offset); + return -ENODEV; + } + + return 0; +} + +static int __init +parisc_agp_setup(void __iomem *ioc_hpa, void __iomem *lba_hpa) +{ + struct pci_dev *fake_bridge_dev = NULL; + struct agp_bridge_data *bridge; + int error = 0; + + fake_bridge_dev = pci_alloc_dev(NULL); + if (!fake_bridge_dev) { + error = -ENOMEM; + goto fail; + } + + error = agp_ioc_init(ioc_hpa); + if (error) + goto fail; + + error = agp_lba_init(lba_hpa); + if (error) + goto fail; + + bridge = agp_alloc_bridge(); + if (!bridge) { + error = -ENOMEM; + goto fail; + } + bridge->driver = &parisc_agp_driver; + + fake_bridge_dev->vendor = PCI_VENDOR_ID_HP; + fake_bridge_dev->device = PCI_DEVICE_ID_HP_PCIX_LBA; + bridge->dev = fake_bridge_dev; + + error = agp_add_bridge(bridge); + if (error) + goto fail; + return 0; + +fail: + kfree(fake_bridge_dev); + return error; +} + +static int +find_quicksilver(struct device *dev, void *data) +{ + struct parisc_device **lba = data; + struct parisc_device *padev = to_parisc_device(dev); + + if (IS_QUICKSILVER(padev)) + *lba = padev; + + return 0; +} + +static int +parisc_agp_init(void) +{ + extern struct sba_device *sba_list; + + int err = -1; + struct parisc_device *sba = NULL, *lba = NULL; + struct lba_device *lbadev = NULL; + + if (!sba_list) + goto out; + + /* Find our parent Pluto */ + sba = sba_list->dev; + if (!IS_PLUTO(sba)) { + printk(KERN_INFO DRVPFX "No Pluto found, so no AGPGART for you.\n"); + goto out; + } + + /* Now search our Pluto for our precious AGP device... */ + device_for_each_child(&sba->dev, &lba, find_quicksilver); + + if (!lba) { + printk(KERN_INFO DRVPFX "No AGP devices found.\n"); + goto out; + } + + lbadev = parisc_get_drvdata(lba); + + /* w00t, let's go find our cookies... */ + parisc_agp_setup(sba_list->ioc[0].ioc_hpa, lbadev->hba.base_addr); + + return 0; + +out: + return err; +} + +module_init(parisc_agp_init); + +MODULE_AUTHOR("Kyle McMartin "); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/agp/sgi-agp.c b/kernel/drivers/char/agp/sgi-agp.c new file mode 100644 index 000000000..3051c73bc --- /dev/null +++ b/kernel/drivers/char/agp/sgi-agp.c @@ -0,0 +1,338 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003-2005 Silicon Graphics, Inc. All Rights Reserved. + */ + +/* + * SGI TIOCA AGPGART routines. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "agp.h" + +extern int agp_memory_reserved; +extern uint32_t tioca_gart_found; +extern struct list_head tioca_list; +static struct agp_bridge_data **sgi_tioca_agp_bridges; + +/* + * The aperature size and related information is set up at TIOCA init time. + * Values for this table will be extracted and filled in at + * sgi_tioca_fetch_size() time. + */ + +static struct aper_size_info_fixed sgi_tioca_sizes[] = { + {0, 0, 0}, +}; + +static struct page *sgi_tioca_alloc_page(struct agp_bridge_data *bridge) +{ + struct page *page; + int nid; + struct tioca_kernel *info = + (struct tioca_kernel *)bridge->dev_private_data; + + nid = info->ca_closest_node; + page = alloc_pages_node(nid, GFP_KERNEL, 0); + if (!page) + return NULL; + + get_page(page); + atomic_inc(&agp_bridge->current_memory_agp); + return page; +} + +/* + * Flush GART tlb's. Cannot selectively flush based on memory so the mem + * arg is ignored. + */ + +static void sgi_tioca_tlbflush(struct agp_memory *mem) +{ + tioca_tlbflush(mem->bridge->dev_private_data); +} + +/* + * Given an address of a host physical page, turn it into a valid gart + * entry. + */ +static unsigned long +sgi_tioca_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr, + int type) +{ + return tioca_physpage_to_gart(addr); +} + +static void sgi_tioca_agp_enable(struct agp_bridge_data *bridge, u32 mode) +{ + tioca_fastwrite_enable(bridge->dev_private_data); +} + +/* + * sgi_tioca_configure() doesn't have anything to do since the base CA driver + * has alreay set up the GART. + */ + +static int sgi_tioca_configure(void) +{ + return 0; +} + +/* + * Determine gfx aperature size. This has already been determined by the + * CA driver init, so just need to set agp_bridge values accordingly. + */ + +static int sgi_tioca_fetch_size(void) +{ + struct tioca_kernel *info = + (struct tioca_kernel *)agp_bridge->dev_private_data; + + sgi_tioca_sizes[0].size = info->ca_gfxap_size / MB(1); + sgi_tioca_sizes[0].num_entries = info->ca_gfxgart_entries; + + return sgi_tioca_sizes[0].size; +} + +static int sgi_tioca_create_gatt_table(struct agp_bridge_data *bridge) +{ + struct tioca_kernel *info = + (struct tioca_kernel *)bridge->dev_private_data; + + bridge->gatt_table_real = (u32 *) info->ca_gfxgart; + bridge->gatt_table = bridge->gatt_table_real; + bridge->gatt_bus_addr = info->ca_gfxgart_base; + + return 0; +} + +static int sgi_tioca_free_gatt_table(struct agp_bridge_data *bridge) +{ + return 0; +} + +static int sgi_tioca_insert_memory(struct agp_memory *mem, off_t pg_start, + int type) +{ + int num_entries; + size_t i; + off_t j; + void *temp; + struct agp_bridge_data *bridge; + u64 *table; + + bridge = mem->bridge; + if (!bridge) + return -EINVAL; + + table = (u64 *)bridge->gatt_table; + + temp = bridge->current_size; + + switch (bridge->driver->size_type) { + case U8_APER_SIZE: + num_entries = A_SIZE_8(temp)->num_entries; + break; + case U16_APER_SIZE: + num_entries = A_SIZE_16(temp)->num_entries; + break; + case U32_APER_SIZE: + num_entries = A_SIZE_32(temp)->num_entries; + break; + case FIXED_APER_SIZE: + num_entries = A_SIZE_FIX(temp)->num_entries; + break; + case LVL2_APER_SIZE: + return -EINVAL; + default: + num_entries = 0; + break; + } + + num_entries -= agp_memory_reserved / PAGE_SIZE; + if (num_entries < 0) + num_entries = 0; + + if (type != 0 || mem->type != 0) { + return -EINVAL; + } + + if ((pg_start + mem->page_count) > num_entries) + return -EINVAL; + + j = pg_start; + + while (j < (pg_start + mem->page_count)) { + if (table[j]) + return -EBUSY; + j++; + } + + if (!mem->is_flushed) { + bridge->driver->cache_flush(); + mem->is_flushed = true; + } + + for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { + table[j] = + bridge->driver->mask_memory(bridge, + page_to_phys(mem->pages[i]), + mem->type); + } + + bridge->driver->tlb_flush(mem); + return 0; +} + +static int sgi_tioca_remove_memory(struct agp_memory *mem, off_t pg_start, + int type) +{ + size_t i; + struct agp_bridge_data *bridge; + u64 *table; + + bridge = mem->bridge; + if (!bridge) + return -EINVAL; + + if (type != 0 || mem->type != 0) { + return -EINVAL; + } + + table = (u64 *)bridge->gatt_table; + + for (i = pg_start; i < (mem->page_count + pg_start); i++) { + table[i] = 0; + } + + bridge->driver->tlb_flush(mem); + return 0; +} + +static void sgi_tioca_cache_flush(void) +{ +} + +/* + * Cleanup. Nothing to do as the CA driver owns the GART. + */ + +static void sgi_tioca_cleanup(void) +{ +} + +static struct agp_bridge_data *sgi_tioca_find_bridge(struct pci_dev *pdev) +{ + struct agp_bridge_data *bridge; + + list_for_each_entry(bridge, &agp_bridges, list) { + if (bridge->dev->bus == pdev->bus) + break; + } + return bridge; +} + +const struct agp_bridge_driver sgi_tioca_driver = { + .owner = THIS_MODULE, + .size_type = U16_APER_SIZE, + .configure = sgi_tioca_configure, + .fetch_size = sgi_tioca_fetch_size, + .cleanup = sgi_tioca_cleanup, + .tlb_flush = sgi_tioca_tlbflush, + .mask_memory = sgi_tioca_mask_memory, + .agp_enable = sgi_tioca_agp_enable, + .cache_flush = sgi_tioca_cache_flush, + .create_gatt_table = sgi_tioca_create_gatt_table, + .free_gatt_table = sgi_tioca_free_gatt_table, + .insert_memory = sgi_tioca_insert_memory, + .remove_memory = sgi_tioca_remove_memory, + .alloc_by_type = agp_generic_alloc_by_type, + .free_by_type = agp_generic_free_by_type, + .agp_alloc_page = sgi_tioca_alloc_page, + .agp_destroy_page = agp_generic_destroy_page, + .agp_type_to_mask_type = agp_generic_type_to_mask_type, + .cant_use_aperture = true, + .needs_scratch_page = false, + .num_aperture_sizes = 1, +}; + +static int agp_sgi_init(void) +{ + unsigned int j; + struct tioca_kernel *info; + struct pci_dev *pdev = NULL; + + if (tioca_gart_found) + printk(KERN_INFO PFX "SGI TIO CA GART driver initialized.\n"); + else + return 0; + + sgi_tioca_agp_bridges = kmalloc(tioca_gart_found * + sizeof(struct agp_bridge_data *), + GFP_KERNEL); + if (!sgi_tioca_agp_bridges) + return -ENOMEM; + + j = 0; + list_for_each_entry(info, &tioca_list, ca_list) { + if (list_empty(info->ca_devices)) + continue; + list_for_each_entry(pdev, info->ca_devices, bus_list) { + u8 cap_ptr; + + if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8)) + continue; + cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); + if (!cap_ptr) + continue; + } + sgi_tioca_agp_bridges[j] = agp_alloc_bridge(); + printk(KERN_INFO PFX "bridge %d = 0x%p\n", j, + sgi_tioca_agp_bridges[j]); + if (sgi_tioca_agp_bridges[j]) { + sgi_tioca_agp_bridges[j]->dev = pdev; + sgi_tioca_agp_bridges[j]->dev_private_data = info; + sgi_tioca_agp_bridges[j]->driver = &sgi_tioca_driver; + sgi_tioca_agp_bridges[j]->gart_bus_addr = + info->ca_gfxap_base; + sgi_tioca_agp_bridges[j]->mode = (0x7D << 24) | /* 126 requests */ + (0x1 << 9) | /* SBA supported */ + (0x1 << 5) | /* 64-bit addresses supported */ + (0x1 << 4) | /* FW supported */ + (0x1 << 3) | /* AGP 3.0 mode */ + 0x2; /* 8x transfer only */ + sgi_tioca_agp_bridges[j]->current_size = + sgi_tioca_agp_bridges[j]->previous_size = + (void *)&sgi_tioca_sizes[0]; + agp_add_bridge(sgi_tioca_agp_bridges[j]); + } + j++; + } + + agp_find_bridge = &sgi_tioca_find_bridge; + return 0; +} + +static void agp_sgi_cleanup(void) +{ + kfree(sgi_tioca_agp_bridges); + sgi_tioca_agp_bridges = NULL; +} + +module_init(agp_sgi_init); +module_exit(agp_sgi_cleanup); + +MODULE_LICENSE("GPL and additional rights"); diff --git a/kernel/drivers/char/agp/sis-agp.c b/kernel/drivers/char/agp/sis-agp.c new file mode 100644 index 000000000..2c74038da --- /dev/null +++ b/kernel/drivers/char/agp/sis-agp.c @@ -0,0 +1,452 @@ +/* + * SiS AGPGART routines. + */ + +#include +#include +#include +#include +#include +#include "agp.h" + +#define SIS_ATTBASE 0x90 +#define SIS_APSIZE 0x94 +#define SIS_TLBCNTRL 0x97 +#define SIS_TLBFLUSH 0x98 + +#define PCI_DEVICE_ID_SI_662 0x0662 +#define PCI_DEVICE_ID_SI_671 0x0671 + +static bool agp_sis_force_delay = 0; +static int agp_sis_agp_spec = -1; + +static int sis_fetch_size(void) +{ + u8 temp_size; + int i; + struct aper_size_info_8 *values; + + pci_read_config_byte(agp_bridge->dev, SIS_APSIZE, &temp_size); + values = A_SIZE_8(agp_bridge->driver->aperture_sizes); + for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { + if ((temp_size == values[i].size_value) || + ((temp_size & ~(0x07)) == + (values[i].size_value & ~(0x07)))) { + agp_bridge->previous_size = + agp_bridge->current_size = (void *) (values + i); + + agp_bridge->aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + +static void sis_tlbflush(struct agp_memory *mem) +{ + pci_write_config_byte(agp_bridge->dev, SIS_TLBFLUSH, 0x02); +} + +static int sis_configure(void) +{ + struct aper_size_info_8 *current_size; + + current_size = A_SIZE_8(agp_bridge->current_size); + pci_write_config_byte(agp_bridge->dev, SIS_TLBCNTRL, 0x05); + agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, + AGP_APERTURE_BAR); + pci_write_config_dword(agp_bridge->dev, SIS_ATTBASE, + agp_bridge->gatt_bus_addr); + pci_write_config_byte(agp_bridge->dev, SIS_APSIZE, + current_size->size_value); + return 0; +} + +static void sis_cleanup(void) +{ + struct aper_size_info_8 *previous_size; + + previous_size = A_SIZE_8(agp_bridge->previous_size); + pci_write_config_byte(agp_bridge->dev, SIS_APSIZE, + (previous_size->size_value & ~(0x03))); +} + +static void sis_delayed_enable(struct agp_bridge_data *bridge, u32 mode) +{ + struct pci_dev *device = NULL; + u32 command; + int rate; + + dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n", + agp_bridge->major_version, agp_bridge->minor_version); + + pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + PCI_AGP_STATUS, &command); + command = agp_collect_device_status(bridge, mode, command); + command |= AGPSTAT_AGP_ENABLE; + rate = (command & 0x7) << 2; + + for_each_pci_dev(device) { + u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP); + if (!agp) + continue; + + dev_info(&agp_bridge->dev->dev, "putting AGP V3 device at %s into %dx mode\n", + pci_name(device), rate); + + pci_write_config_dword(device, agp + PCI_AGP_COMMAND, command); + + /* + * Weird: on some sis chipsets any rate change in the target + * command register triggers a 5ms screwup during which the master + * cannot be configured + */ + if (device->device == bridge->dev->device) { + dev_info(&agp_bridge->dev->dev, "SiS delay workaround: giving bridge time to recover\n"); + msleep(10); + } + } +} + +static const struct aper_size_info_8 sis_generic_sizes[7] = +{ + {256, 65536, 6, 99}, + {128, 32768, 5, 83}, + {64, 16384, 4, 67}, + {32, 8192, 3, 51}, + {16, 4096, 2, 35}, + {8, 2048, 1, 19}, + {4, 1024, 0, 3} +}; + +static struct agp_bridge_driver sis_driver = { + .owner = THIS_MODULE, + .aperture_sizes = sis_generic_sizes, + .size_type = U8_APER_SIZE, + .num_aperture_sizes = 7, + .needs_scratch_page = true, + .configure = sis_configure, + .fetch_size = sis_fetch_size, + .cleanup = sis_cleanup, + .tlb_flush = sis_tlbflush, + .mask_memory = agp_generic_mask_memory, + .masks = NULL, + .agp_enable = agp_generic_enable, + .cache_flush = global_cache_flush, + .create_gatt_table = agp_generic_create_gatt_table, + .free_gatt_table = agp_generic_free_gatt_table, + .insert_memory = agp_generic_insert_memory, + .remove_memory = agp_generic_remove_memory, + .alloc_by_type = agp_generic_alloc_by_type, + .free_by_type = agp_generic_free_by_type, + .agp_alloc_page = agp_generic_alloc_page, + .agp_alloc_pages = agp_generic_alloc_pages, + .agp_destroy_page = agp_generic_destroy_page, + .agp_destroy_pages = agp_generic_destroy_pages, + .agp_type_to_mask_type = agp_generic_type_to_mask_type, +}; + +// chipsets that require the 'delay hack' +static int sis_broken_chipsets[] = { + PCI_DEVICE_ID_SI_648, + PCI_DEVICE_ID_SI_746, + 0 // terminator +}; + +static void sis_get_driver(struct agp_bridge_data *bridge) +{ + int i; + + for (i=0; sis_broken_chipsets[i]!=0; ++i) + if (bridge->dev->device==sis_broken_chipsets[i]) + break; + + if (sis_broken_chipsets[i] || agp_sis_force_delay) + sis_driver.agp_enable=sis_delayed_enable; + + // sis chipsets that indicate less than agp3.5 + // are not actually fully agp3 compliant + if ((agp_bridge->major_version == 3 && agp_bridge->minor_version >= 5 + && agp_sis_agp_spec!=0) || agp_sis_agp_spec==1) { + sis_driver.aperture_sizes = agp3_generic_sizes; + sis_driver.size_type = U16_APER_SIZE; + sis_driver.num_aperture_sizes = AGP_GENERIC_SIZES_ENTRIES; + sis_driver.configure = agp3_generic_configure; + sis_driver.fetch_size = agp3_generic_fetch_size; + sis_driver.cleanup = agp3_generic_cleanup; + sis_driver.tlb_flush = agp3_generic_tlbflush; + } +} + + +static int agp_sis_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct agp_bridge_data *bridge; + u8 cap_ptr; + + cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); + if (!cap_ptr) + return -ENODEV; + + + dev_info(&pdev->dev, "SiS chipset [%04x/%04x]\n", + pdev->vendor, pdev->device); + bridge = agp_alloc_bridge(); + if (!bridge) + return -ENOMEM; + + bridge->driver = &sis_driver; + bridge->dev = pdev; + bridge->capndx = cap_ptr; + + get_agp_version(bridge); + + /* Fill in the mode register */ + pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode); + sis_get_driver(bridge); + + pci_set_drvdata(pdev, bridge); + return agp_add_bridge(bridge); +} + +static void agp_sis_remove(struct pci_dev *pdev) +{ + struct agp_bridge_data *bridge = pci_get_drvdata(pdev); + + agp_remove_bridge(bridge); + agp_put_bridge(bridge); +} + +#ifdef CONFIG_PM + +static int agp_sis_suspend(struct pci_dev *pdev, pm_message_t state) +{ + pci_save_state(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +static int agp_sis_resume(struct pci_dev *pdev) +{ + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + return sis_driver.configure(); +} + +#endif /* CONFIG_PM */ + +static struct pci_device_id agp_sis_pci_table[] = { + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_SI, + .device = PCI_DEVICE_ID_SI_5591, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_SI, + .device = PCI_DEVICE_ID_SI_530, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_SI, + .device = PCI_DEVICE_ID_SI_540, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_SI, + .device = PCI_DEVICE_ID_SI_550, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_SI, + .device = PCI_DEVICE_ID_SI_620, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_SI, + .device = PCI_DEVICE_ID_SI_630, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_SI, + .device = PCI_DEVICE_ID_SI_635, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_SI, + .device = PCI_DEVICE_ID_SI_645, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_SI, + .device = PCI_DEVICE_ID_SI_646, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_SI, + .device = PCI_DEVICE_ID_SI_648, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_SI, + .device = PCI_DEVICE_ID_SI_650, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_SI, + .device = PCI_DEVICE_ID_SI_651, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_SI, + .device = PCI_DEVICE_ID_SI_655, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_SI, + .device = PCI_DEVICE_ID_SI_661, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_SI, + .device = PCI_DEVICE_ID_SI_662, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_SI, + .device = PCI_DEVICE_ID_SI_671, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_SI, + .device = PCI_DEVICE_ID_SI_730, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_SI, + .device = PCI_DEVICE_ID_SI_735, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_SI, + .device = PCI_DEVICE_ID_SI_740, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_SI, + .device = PCI_DEVICE_ID_SI_741, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_SI, + .device = PCI_DEVICE_ID_SI_745, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_SI, + .device = PCI_DEVICE_ID_SI_746, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { } +}; + +MODULE_DEVICE_TABLE(pci, agp_sis_pci_table); + +static struct pci_driver agp_sis_pci_driver = { + .name = "agpgart-sis", + .id_table = agp_sis_pci_table, + .probe = agp_sis_probe, + .remove = agp_sis_remove, +#ifdef CONFIG_PM + .suspend = agp_sis_suspend, + .resume = agp_sis_resume, +#endif +}; + +static int __init agp_sis_init(void) +{ + if (agp_off) + return -EINVAL; + return pci_register_driver(&agp_sis_pci_driver); +} + +static void __exit agp_sis_cleanup(void) +{ + pci_unregister_driver(&agp_sis_pci_driver); +} + +module_init(agp_sis_init); +module_exit(agp_sis_cleanup); + +module_param(agp_sis_force_delay, bool, 0); +MODULE_PARM_DESC(agp_sis_force_delay,"forces sis delay hack"); +module_param(agp_sis_agp_spec, int, 0); +MODULE_PARM_DESC(agp_sis_agp_spec,"0=force sis init, 1=force generic agp3 init, default: autodetect"); +MODULE_LICENSE("GPL and additional rights"); diff --git a/kernel/drivers/char/agp/sworks-agp.c b/kernel/drivers/char/agp/sworks-agp.c new file mode 100644 index 000000000..9b163b49d --- /dev/null +++ b/kernel/drivers/char/agp/sworks-agp.c @@ -0,0 +1,569 @@ +/* + * Serverworks AGPGART routines. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "agp.h" + +#define SVWRKS_COMMAND 0x04 +#define SVWRKS_APSIZE 0x10 +#define SVWRKS_MMBASE 0x14 +#define SVWRKS_CACHING 0x4b +#define SVWRKS_AGP_ENABLE 0x60 +#define SVWRKS_FEATURE 0x68 + +#define SVWRKS_SIZE_MASK 0xfe000000 + +/* Memory mapped registers */ +#define SVWRKS_GART_CACHE 0x02 +#define SVWRKS_GATTBASE 0x04 +#define SVWRKS_TLBFLUSH 0x10 +#define SVWRKS_POSTFLUSH 0x14 +#define SVWRKS_DIRFLUSH 0x0c + + +struct serverworks_page_map { + unsigned long *real; + unsigned long __iomem *remapped; +}; + +static struct _serverworks_private { + struct pci_dev *svrwrks_dev; /* device one */ + volatile u8 __iomem *registers; + struct serverworks_page_map **gatt_pages; + int num_tables; + struct serverworks_page_map scratch_dir; + + int gart_addr_ofs; + int mm_addr_ofs; +} serverworks_private; + +static int serverworks_create_page_map(struct serverworks_page_map *page_map) +{ + int i; + + page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL); + if (page_map->real == NULL) { + return -ENOMEM; + } + + set_memory_uc((unsigned long)page_map->real, 1); + page_map->remapped = page_map->real; + + for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) + writel(agp_bridge->scratch_page, page_map->remapped+i); + /* Red Pen: Everyone else does pci posting flush here */ + + return 0; +} + +static void serverworks_free_page_map(struct serverworks_page_map *page_map) +{ + set_memory_wb((unsigned long)page_map->real, 1); + free_page((unsigned long) page_map->real); +} + +static void serverworks_free_gatt_pages(void) +{ + int i; + struct serverworks_page_map **tables; + struct serverworks_page_map *entry; + + tables = serverworks_private.gatt_pages; + for (i = 0; i < serverworks_private.num_tables; i++) { + entry = tables[i]; + if (entry != NULL) { + if (entry->real != NULL) { + serverworks_free_page_map(entry); + } + kfree(entry); + } + } + kfree(tables); +} + +static int serverworks_create_gatt_pages(int nr_tables) +{ + struct serverworks_page_map **tables; + struct serverworks_page_map *entry; + int retval = 0; + int i; + + tables = kzalloc((nr_tables + 1) * sizeof(struct serverworks_page_map *), + GFP_KERNEL); + if (tables == NULL) + return -ENOMEM; + + for (i = 0; i < nr_tables; i++) { + entry = kzalloc(sizeof(struct serverworks_page_map), GFP_KERNEL); + if (entry == NULL) { + retval = -ENOMEM; + break; + } + tables[i] = entry; + retval = serverworks_create_page_map(entry); + if (retval != 0) break; + } + serverworks_private.num_tables = nr_tables; + serverworks_private.gatt_pages = tables; + + if (retval != 0) serverworks_free_gatt_pages(); + + return retval; +} + +#define SVRWRKS_GET_GATT(addr) (serverworks_private.gatt_pages[\ + GET_PAGE_DIR_IDX(addr)]->remapped) + +#ifndef GET_PAGE_DIR_OFF +#define GET_PAGE_DIR_OFF(addr) (addr >> 22) +#endif + +#ifndef GET_PAGE_DIR_IDX +#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \ + GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr)) +#endif + +#ifndef GET_GATT_OFF +#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) +#endif + +static int serverworks_create_gatt_table(struct agp_bridge_data *bridge) +{ + struct aper_size_info_lvl2 *value; + struct serverworks_page_map page_dir; + int retval; + u32 temp; + int i; + + value = A_SIZE_LVL2(agp_bridge->current_size); + retval = serverworks_create_page_map(&page_dir); + if (retval != 0) { + return retval; + } + retval = serverworks_create_page_map(&serverworks_private.scratch_dir); + if (retval != 0) { + serverworks_free_page_map(&page_dir); + return retval; + } + /* Create a fake scratch directory */ + for (i = 0; i < 1024; i++) { + writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i); + writel(virt_to_phys(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i); + } + + retval = serverworks_create_gatt_pages(value->num_entries / 1024); + if (retval != 0) { + serverworks_free_page_map(&page_dir); + serverworks_free_page_map(&serverworks_private.scratch_dir); + return retval; + } + + agp_bridge->gatt_table_real = (u32 *)page_dir.real; + agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped; + agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real); + + /* Get the address for the gart region. + * This is a bus address even on the alpha, b/c its + * used to program the agp master not the cpu + */ + + pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp); + agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + + /* Calculate the agp offset */ + for (i = 0; i < value->num_entries / 1024; i++) + writel(virt_to_phys(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i); + + return 0; +} + +static int serverworks_free_gatt_table(struct agp_bridge_data *bridge) +{ + struct serverworks_page_map page_dir; + + page_dir.real = (unsigned long *)agp_bridge->gatt_table_real; + page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table; + + serverworks_free_gatt_pages(); + serverworks_free_page_map(&page_dir); + serverworks_free_page_map(&serverworks_private.scratch_dir); + return 0; +} + +static int serverworks_fetch_size(void) +{ + int i; + u32 temp; + u32 temp2; + struct aper_size_info_lvl2 *values; + + values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes); + pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp); + pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs, + SVWRKS_SIZE_MASK); + pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp2); + pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,temp); + temp2 &= SVWRKS_SIZE_MASK; + + for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { + if (temp2 == values[i].size_value) { + agp_bridge->previous_size = + agp_bridge->current_size = (void *) (values + i); + + agp_bridge->aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + +/* + * This routine could be implemented by taking the addresses + * written to the GATT, and flushing them individually. However + * currently it just flushes the whole table. Which is probably + * more efficient, since agp_memory blocks can be a large number of + * entries. + */ +static void serverworks_tlbflush(struct agp_memory *temp) +{ + unsigned long timeout; + + writeb(1, serverworks_private.registers+SVWRKS_POSTFLUSH); + timeout = jiffies + 3*HZ; + while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1) { + cpu_relax(); + if (time_after(jiffies, timeout)) { + dev_err(&serverworks_private.svrwrks_dev->dev, + "TLB post flush took more than 3 seconds\n"); + break; + } + } + + writel(1, serverworks_private.registers+SVWRKS_DIRFLUSH); + timeout = jiffies + 3*HZ; + while (readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1) { + cpu_relax(); + if (time_after(jiffies, timeout)) { + dev_err(&serverworks_private.svrwrks_dev->dev, + "TLB Dir flush took more than 3 seconds\n"); + break; + } + } +} + +static int serverworks_configure(void) +{ + struct aper_size_info_lvl2 *current_size; + u32 temp; + u8 enable_reg; + u16 cap_reg; + + current_size = A_SIZE_LVL2(agp_bridge->current_size); + + /* Get the memory mapped registers */ + pci_read_config_dword(agp_bridge->dev, serverworks_private.mm_addr_ofs, &temp); + temp = (temp & PCI_BASE_ADDRESS_MEM_MASK); + serverworks_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096); + if (!serverworks_private.registers) { + dev_err(&agp_bridge->dev->dev, "can't ioremap(%#x)\n", temp); + return -ENOMEM; + } + + writeb(0xA, serverworks_private.registers+SVWRKS_GART_CACHE); + readb(serverworks_private.registers+SVWRKS_GART_CACHE); /* PCI Posting. */ + + writel(agp_bridge->gatt_bus_addr, serverworks_private.registers+SVWRKS_GATTBASE); + readl(serverworks_private.registers+SVWRKS_GATTBASE); /* PCI Posting. */ + + cap_reg = readw(serverworks_private.registers+SVWRKS_COMMAND); + cap_reg &= ~0x0007; + cap_reg |= 0x4; + writew(cap_reg, serverworks_private.registers+SVWRKS_COMMAND); + readw(serverworks_private.registers+SVWRKS_COMMAND); + + pci_read_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, &enable_reg); + enable_reg |= 0x1; /* Agp Enable bit */ + pci_write_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, enable_reg); + serverworks_tlbflush(NULL); + + agp_bridge->capndx = pci_find_capability(serverworks_private.svrwrks_dev, PCI_CAP_ID_AGP); + + /* Fill in the mode register */ + pci_read_config_dword(serverworks_private.svrwrks_dev, + agp_bridge->capndx+PCI_AGP_STATUS, &agp_bridge->mode); + + pci_read_config_byte(agp_bridge->dev, SVWRKS_CACHING, &enable_reg); + enable_reg &= ~0x3; + pci_write_config_byte(agp_bridge->dev, SVWRKS_CACHING, enable_reg); + + pci_read_config_byte(agp_bridge->dev, SVWRKS_FEATURE, &enable_reg); + enable_reg |= (1<<6); + pci_write_config_byte(agp_bridge->dev,SVWRKS_FEATURE, enable_reg); + + return 0; +} + +static void serverworks_cleanup(void) +{ + iounmap((void __iomem *) serverworks_private.registers); +} + +static int serverworks_insert_memory(struct agp_memory *mem, + off_t pg_start, int type) +{ + int i, j, num_entries; + unsigned long __iomem *cur_gatt; + unsigned long addr; + + num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; + + if (type != 0 || mem->type != 0) { + return -EINVAL; + } + if ((pg_start + mem->page_count) > num_entries) { + return -EINVAL; + } + + j = pg_start; + while (j < (pg_start + mem->page_count)) { + addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; + cur_gatt = SVRWRKS_GET_GATT(addr); + if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr)))) + return -EBUSY; + j++; + } + + if (!mem->is_flushed) { + global_cache_flush(); + mem->is_flushed = true; + } + + for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { + addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; + cur_gatt = SVRWRKS_GET_GATT(addr); + writel(agp_bridge->driver->mask_memory(agp_bridge, + page_to_phys(mem->pages[i]), mem->type), + cur_gatt+GET_GATT_OFF(addr)); + } + serverworks_tlbflush(mem); + return 0; +} + +static int serverworks_remove_memory(struct agp_memory *mem, off_t pg_start, + int type) +{ + int i; + unsigned long __iomem *cur_gatt; + unsigned long addr; + + if (type != 0 || mem->type != 0) { + return -EINVAL; + } + + global_cache_flush(); + serverworks_tlbflush(mem); + + for (i = pg_start; i < (mem->page_count + pg_start); i++) { + addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; + cur_gatt = SVRWRKS_GET_GATT(addr); + writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); + } + + serverworks_tlbflush(mem); + return 0; +} + +static const struct gatt_mask serverworks_masks[] = +{ + {.mask = 1, .type = 0} +}; + +static const struct aper_size_info_lvl2 serverworks_sizes[7] = +{ + {2048, 524288, 0x80000000}, + {1024, 262144, 0xc0000000}, + {512, 131072, 0xe0000000}, + {256, 65536, 0xf0000000}, + {128, 32768, 0xf8000000}, + {64, 16384, 0xfc000000}, + {32, 8192, 0xfe000000} +}; + +static void serverworks_agp_enable(struct agp_bridge_data *bridge, u32 mode) +{ + u32 command; + + pci_read_config_dword(serverworks_private.svrwrks_dev, + bridge->capndx + PCI_AGP_STATUS, + &command); + + command = agp_collect_device_status(bridge, mode, command); + + command &= ~0x10; /* disable FW */ + command &= ~0x08; + + command |= 0x100; + + pci_write_config_dword(serverworks_private.svrwrks_dev, + bridge->capndx + PCI_AGP_COMMAND, + command); + + agp_device_command(command, false); +} + +static const struct agp_bridge_driver sworks_driver = { + .owner = THIS_MODULE, + .aperture_sizes = serverworks_sizes, + .size_type = LVL2_APER_SIZE, + .num_aperture_sizes = 7, + .configure = serverworks_configure, + .fetch_size = serverworks_fetch_size, + .cleanup = serverworks_cleanup, + .tlb_flush = serverworks_tlbflush, + .mask_memory = agp_generic_mask_memory, + .masks = serverworks_masks, + .agp_enable = serverworks_agp_enable, + .cache_flush = global_cache_flush, + .create_gatt_table = serverworks_create_gatt_table, + .free_gatt_table = serverworks_free_gatt_table, + .insert_memory = serverworks_insert_memory, + .remove_memory = serverworks_remove_memory, + .alloc_by_type = agp_generic_alloc_by_type, + .free_by_type = agp_generic_free_by_type, + .agp_alloc_page = agp_generic_alloc_page, + .agp_alloc_pages = agp_generic_alloc_pages, + .agp_destroy_page = agp_generic_destroy_page, + .agp_destroy_pages = agp_generic_destroy_pages, + .agp_type_to_mask_type = agp_generic_type_to_mask_type, +}; + +static int agp_serverworks_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct agp_bridge_data *bridge; + struct pci_dev *bridge_dev; + u32 temp, temp2; + u8 cap_ptr = 0; + + cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); + + switch (pdev->device) { + case 0x0006: + dev_err(&pdev->dev, "ServerWorks CNB20HE is unsupported due to lack of documentation\n"); + return -ENODEV; + + case PCI_DEVICE_ID_SERVERWORKS_HE: + case PCI_DEVICE_ID_SERVERWORKS_LE: + case 0x0007: + break; + + default: + if (cap_ptr) + dev_err(&pdev->dev, "unsupported Serverworks chipset " + "[%04x/%04x]\n", pdev->vendor, pdev->device); + return -ENODEV; + } + + /* Everything is on func 1 here so we are hardcoding function one */ + bridge_dev = pci_get_bus_and_slot((unsigned int)pdev->bus->number, + PCI_DEVFN(0, 1)); + if (!bridge_dev) { + dev_info(&pdev->dev, "can't find secondary device\n"); + return -ENODEV; + } + + serverworks_private.svrwrks_dev = bridge_dev; + serverworks_private.gart_addr_ofs = 0x10; + + pci_read_config_dword(pdev, SVWRKS_APSIZE, &temp); + if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) { + pci_read_config_dword(pdev, SVWRKS_APSIZE + 4, &temp2); + if (temp2 != 0) { + dev_info(&pdev->dev, "64 bit aperture address, " + "but top bits are not zero; disabling AGP\n"); + return -ENODEV; + } + serverworks_private.mm_addr_ofs = 0x18; + } else + serverworks_private.mm_addr_ofs = 0x14; + + pci_read_config_dword(pdev, serverworks_private.mm_addr_ofs, &temp); + if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) { + pci_read_config_dword(pdev, + serverworks_private.mm_addr_ofs + 4, &temp2); + if (temp2 != 0) { + dev_info(&pdev->dev, "64 bit MMIO address, but top " + "bits are not zero; disabling AGP\n"); + return -ENODEV; + } + } + + bridge = agp_alloc_bridge(); + if (!bridge) + return -ENOMEM; + + bridge->driver = &sworks_driver; + bridge->dev_private_data = &serverworks_private, + bridge->dev = pci_dev_get(pdev); + + pci_set_drvdata(pdev, bridge); + return agp_add_bridge(bridge); +} + +static void agp_serverworks_remove(struct pci_dev *pdev) +{ + struct agp_bridge_data *bridge = pci_get_drvdata(pdev); + + pci_dev_put(bridge->dev); + agp_remove_bridge(bridge); + agp_put_bridge(bridge); + pci_dev_put(serverworks_private.svrwrks_dev); + serverworks_private.svrwrks_dev = NULL; +} + +static struct pci_device_id agp_serverworks_pci_table[] = { + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_SERVERWORKS, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { } +}; + +MODULE_DEVICE_TABLE(pci, agp_serverworks_pci_table); + +static struct pci_driver agp_serverworks_pci_driver = { + .name = "agpgart-serverworks", + .id_table = agp_serverworks_pci_table, + .probe = agp_serverworks_probe, + .remove = agp_serverworks_remove, +}; + +static int __init agp_serverworks_init(void) +{ + if (agp_off) + return -EINVAL; + return pci_register_driver(&agp_serverworks_pci_driver); +} + +static void __exit agp_serverworks_cleanup(void) +{ + pci_unregister_driver(&agp_serverworks_pci_driver); +} + +module_init(agp_serverworks_init); +module_exit(agp_serverworks_cleanup); + +MODULE_LICENSE("GPL and additional rights"); + diff --git a/kernel/drivers/char/agp/uninorth-agp.c b/kernel/drivers/char/agp/uninorth-agp.c new file mode 100644 index 000000000..a56ee9bed --- /dev/null +++ b/kernel/drivers/char/agp/uninorth-agp.c @@ -0,0 +1,722 @@ +/* + * UniNorth AGPGART routines. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "agp.h" + +/* + * NOTES for uninorth3 (G5 AGP) supports : + * + * There maybe also possibility to have bigger cache line size for + * agp (see pmac_pci.c and look for cache line). Need to be investigated + * by someone. + * + * PAGE size are hardcoded but this may change, see asm/page.h. + * + * Jerome Glisse + */ +static int uninorth_rev; +static int is_u3; +static u32 scratch_value; + +#define DEFAULT_APERTURE_SIZE 256 +#define DEFAULT_APERTURE_STRING "256" +static char *aperture = NULL; + +static int uninorth_fetch_size(void) +{ + int i, size = 0; + struct aper_size_info_32 *values = + A_SIZE_32(agp_bridge->driver->aperture_sizes); + + if (aperture) { + char *save = aperture; + + size = memparse(aperture, &aperture) >> 20; + aperture = save; + + for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) + if (size == values[i].size) + break; + + if (i == agp_bridge->driver->num_aperture_sizes) { + dev_err(&agp_bridge->dev->dev, "invalid aperture size, " + "using default\n"); + size = 0; + aperture = NULL; + } + } + + if (!size) { + for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) + if (values[i].size == DEFAULT_APERTURE_SIZE) + break; + } + + agp_bridge->previous_size = + agp_bridge->current_size = (void *)(values + i); + agp_bridge->aperture_size_idx = i; + return values[i].size; +} + +static void uninorth_tlbflush(struct agp_memory *mem) +{ + u32 ctrl = UNI_N_CFG_GART_ENABLE; + + if (is_u3) + ctrl |= U3_N_CFG_GART_PERFRD; + pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, + ctrl | UNI_N_CFG_GART_INVAL); + pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, ctrl); + + if (!mem && uninorth_rev <= 0x30) { + pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, + ctrl | UNI_N_CFG_GART_2xRESET); + pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, + ctrl); + } +} + +static void uninorth_cleanup(void) +{ + u32 tmp; + + pci_read_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, &tmp); + if (!(tmp & UNI_N_CFG_GART_ENABLE)) + return; + tmp |= UNI_N_CFG_GART_INVAL; + pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, tmp); + pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, 0); + + if (uninorth_rev <= 0x30) { + pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, + UNI_N_CFG_GART_2xRESET); + pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, + 0); + } +} + +static int uninorth_configure(void) +{ + struct aper_size_info_32 *current_size; + + current_size = A_SIZE_32(agp_bridge->current_size); + + dev_info(&agp_bridge->dev->dev, "configuring for size idx: %d\n", + current_size->size_value); + + /* aperture size and gatt addr */ + pci_write_config_dword(agp_bridge->dev, + UNI_N_CFG_GART_BASE, + (agp_bridge->gatt_bus_addr & 0xfffff000) + | current_size->size_value); + + /* HACK ALERT + * UniNorth seem to be buggy enough not to handle properly when + * the AGP aperture isn't mapped at bus physical address 0 + */ + agp_bridge->gart_bus_addr = 0; +#ifdef CONFIG_PPC64 + /* Assume U3 or later on PPC64 systems */ + /* high 4 bits of GART physical address go in UNI_N_CFG_AGP_BASE */ + pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_AGP_BASE, + (agp_bridge->gatt_bus_addr >> 32) & 0xf); +#else + pci_write_config_dword(agp_bridge->dev, + UNI_N_CFG_AGP_BASE, agp_bridge->gart_bus_addr); +#endif + + if (is_u3) { + pci_write_config_dword(agp_bridge->dev, + UNI_N_CFG_GART_DUMMY_PAGE, + page_to_phys(agp_bridge->scratch_page_page) >> 12); + } + + return 0; +} + +static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int type) +{ + int i, num_entries; + void *temp; + u32 *gp; + int mask_type; + + if (type != mem->type) + return -EINVAL; + + mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); + if (mask_type != 0) { + /* We know nothing of memory types */ + return -EINVAL; + } + + if (mem->page_count == 0) + return 0; + + temp = agp_bridge->current_size; + num_entries = A_SIZE_32(temp)->num_entries; + + if ((pg_start + mem->page_count) > num_entries) + return -EINVAL; + + gp = (u32 *) &agp_bridge->gatt_table[pg_start]; + for (i = 0; i < mem->page_count; ++i) { + if (gp[i] != scratch_value) { + dev_info(&agp_bridge->dev->dev, + "uninorth_insert_memory: entry 0x%x occupied (%x)\n", + i, gp[i]); + return -EBUSY; + } + } + + for (i = 0; i < mem->page_count; i++) { + if (is_u3) + gp[i] = (page_to_phys(mem->pages[i]) >> PAGE_SHIFT) | 0x80000000UL; + else + gp[i] = cpu_to_le32((page_to_phys(mem->pages[i]) & 0xFFFFF000UL) | + 0x1UL); + flush_dcache_range((unsigned long)__va(page_to_phys(mem->pages[i])), + (unsigned long)__va(page_to_phys(mem->pages[i]))+0x1000); + } + mb(); + uninorth_tlbflush(mem); + + return 0; +} + +int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type) +{ + size_t i; + u32 *gp; + int mask_type; + + if (type != mem->type) + return -EINVAL; + + mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); + if (mask_type != 0) { + /* We know nothing of memory types */ + return -EINVAL; + } + + if (mem->page_count == 0) + return 0; + + gp = (u32 *) &agp_bridge->gatt_table[pg_start]; + for (i = 0; i < mem->page_count; ++i) { + gp[i] = scratch_value; + } + mb(); + uninorth_tlbflush(mem); + + return 0; +} + +static void uninorth_agp_enable(struct agp_bridge_data *bridge, u32 mode) +{ + u32 command, scratch, status; + int timeout; + + pci_read_config_dword(bridge->dev, + bridge->capndx + PCI_AGP_STATUS, + &status); + + command = agp_collect_device_status(bridge, mode, status); + command |= PCI_AGP_COMMAND_AGP; + + if (uninorth_rev == 0x21) { + /* + * Darwin disable AGP 4x on this revision, thus we + * may assume it's broken. This is an AGP2 controller. + */ + command &= ~AGPSTAT2_4X; + } + + if ((uninorth_rev >= 0x30) && (uninorth_rev <= 0x33)) { + /* + * We need to set REQ_DEPTH to 7 for U3 versions 1.0, 2.1, + * 2.2 and 2.3, Darwin do so. + */ + if ((command >> AGPSTAT_RQ_DEPTH_SHIFT) > 7) + command = (command & ~AGPSTAT_RQ_DEPTH) + | (7 << AGPSTAT_RQ_DEPTH_SHIFT); + } + + uninorth_tlbflush(NULL); + + timeout = 0; + do { + pci_write_config_dword(bridge->dev, + bridge->capndx + PCI_AGP_COMMAND, + command); + pci_read_config_dword(bridge->dev, + bridge->capndx + PCI_AGP_COMMAND, + &scratch); + } while ((scratch & PCI_AGP_COMMAND_AGP) == 0 && ++timeout < 1000); + if ((scratch & PCI_AGP_COMMAND_AGP) == 0) + dev_err(&bridge->dev->dev, "can't write UniNorth AGP " + "command register\n"); + + if (uninorth_rev >= 0x30) { + /* This is an AGP V3 */ + agp_device_command(command, (status & AGPSTAT_MODE_3_0) != 0); + } else { + /* AGP V2 */ + agp_device_command(command, false); + } + + uninorth_tlbflush(NULL); +} + +#ifdef CONFIG_PM +/* + * These Power Management routines are _not_ called by the normal PCI PM layer, + * but directly by the video driver through function pointers in the device + * tree. + */ +static int agp_uninorth_suspend(struct pci_dev *pdev) +{ + struct agp_bridge_data *bridge; + u32 cmd; + u8 agp; + struct pci_dev *device = NULL; + + bridge = agp_find_bridge(pdev); + if (bridge == NULL) + return -ENODEV; + + /* Only one suspend supported */ + if (bridge->dev_private_data) + return 0; + + /* turn off AGP on the video chip, if it was enabled */ + for_each_pci_dev(device) { + /* Don't touch the bridge yet, device first */ + if (device == pdev) + continue; + /* Only deal with devices on the same bus here, no Mac has a P2P + * bridge on the AGP port, and mucking around the entire PCI + * tree is source of problems on some machines because of a bug + * in some versions of pci_find_capability() when hitting a dead + * device + */ + if (device->bus != pdev->bus) + continue; + agp = pci_find_capability(device, PCI_CAP_ID_AGP); + if (!agp) + continue; + pci_read_config_dword(device, agp + PCI_AGP_COMMAND, &cmd); + if (!(cmd & PCI_AGP_COMMAND_AGP)) + continue; + dev_info(&pdev->dev, "disabling AGP on device %s\n", + pci_name(device)); + cmd &= ~PCI_AGP_COMMAND_AGP; + pci_write_config_dword(device, agp + PCI_AGP_COMMAND, cmd); + } + + /* turn off AGP on the bridge */ + agp = pci_find_capability(pdev, PCI_CAP_ID_AGP); + pci_read_config_dword(pdev, agp + PCI_AGP_COMMAND, &cmd); + bridge->dev_private_data = (void *)(long)cmd; + if (cmd & PCI_AGP_COMMAND_AGP) { + dev_info(&pdev->dev, "disabling AGP on bridge\n"); + cmd &= ~PCI_AGP_COMMAND_AGP; + pci_write_config_dword(pdev, agp + PCI_AGP_COMMAND, cmd); + } + /* turn off the GART */ + uninorth_cleanup(); + + return 0; +} + +static int agp_uninorth_resume(struct pci_dev *pdev) +{ + struct agp_bridge_data *bridge; + u32 command; + + bridge = agp_find_bridge(pdev); + if (bridge == NULL) + return -ENODEV; + + command = (long)bridge->dev_private_data; + bridge->dev_private_data = NULL; + if (!(command & PCI_AGP_COMMAND_AGP)) + return 0; + + uninorth_agp_enable(bridge, command); + + return 0; +} +#endif /* CONFIG_PM */ + +static int uninorth_create_gatt_table(struct agp_bridge_data *bridge) +{ + char *table; + char *table_end; + int size; + int page_order; + int num_entries; + int i; + void *temp; + struct page *page; + struct page **pages; + + /* We can't handle 2 level gatt's */ + if (bridge->driver->size_type == LVL2_APER_SIZE) + return -EINVAL; + + table = NULL; + i = bridge->aperture_size_idx; + temp = bridge->current_size; + size = page_order = num_entries = 0; + + do { + size = A_SIZE_32(temp)->size; + page_order = A_SIZE_32(temp)->page_order; + num_entries = A_SIZE_32(temp)->num_entries; + + table = (char *) __get_free_pages(GFP_KERNEL, page_order); + + if (table == NULL) { + i++; + bridge->current_size = A_IDX32(bridge); + } else { + bridge->aperture_size_idx = i; + } + } while (!table && (i < bridge->driver->num_aperture_sizes)); + + if (table == NULL) + return -ENOMEM; + + pages = kmalloc((1 << page_order) * sizeof(struct page*), GFP_KERNEL); + if (pages == NULL) + goto enomem; + + table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); + + for (page = virt_to_page(table), i = 0; page <= virt_to_page(table_end); + page++, i++) { + SetPageReserved(page); + pages[i] = page; + } + + bridge->gatt_table_real = (u32 *) table; + /* Need to clear out any dirty data still sitting in caches */ + flush_dcache_range((unsigned long)table, + (unsigned long)table_end + 1); + bridge->gatt_table = vmap(pages, (1 << page_order), 0, PAGE_KERNEL_NCG); + + if (bridge->gatt_table == NULL) + goto enomem; + + bridge->gatt_bus_addr = virt_to_phys(table); + + if (is_u3) + scratch_value = (page_to_phys(agp_bridge->scratch_page_page) >> PAGE_SHIFT) | 0x80000000UL; + else + scratch_value = cpu_to_le32((page_to_phys(agp_bridge->scratch_page_page) & 0xFFFFF000UL) | + 0x1UL); + for (i = 0; i < num_entries; i++) + bridge->gatt_table[i] = scratch_value; + + return 0; + +enomem: + kfree(pages); + if (table) + free_pages((unsigned long)table, page_order); + return -ENOMEM; +} + +static int uninorth_free_gatt_table(struct agp_bridge_data *bridge) +{ + int page_order; + char *table, *table_end; + void *temp; + struct page *page; + + temp = bridge->current_size; + page_order = A_SIZE_32(temp)->page_order; + + /* Do not worry about freeing memory, because if this is + * called, then all agp memory is deallocated and removed + * from the table. + */ + + vunmap(bridge->gatt_table); + table = (char *) bridge->gatt_table_real; + table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); + + for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) + ClearPageReserved(page); + + free_pages((unsigned long) bridge->gatt_table_real, page_order); + + return 0; +} + +void null_cache_flush(void) +{ + mb(); +} + +/* Setup function */ + +static const struct aper_size_info_32 uninorth_sizes[] = +{ + {256, 65536, 6, 64}, + {128, 32768, 5, 32}, + {64, 16384, 4, 16}, + {32, 8192, 3, 8}, + {16, 4096, 2, 4}, + {8, 2048, 1, 2}, + {4, 1024, 0, 1} +}; + +/* + * Not sure that u3 supports that high aperture sizes but it + * would strange if it did not :) + */ +static const struct aper_size_info_32 u3_sizes[] = +{ + {512, 131072, 7, 128}, + {256, 65536, 6, 64}, + {128, 32768, 5, 32}, + {64, 16384, 4, 16}, + {32, 8192, 3, 8}, + {16, 4096, 2, 4}, + {8, 2048, 1, 2}, + {4, 1024, 0, 1} +}; + +const struct agp_bridge_driver uninorth_agp_driver = { + .owner = THIS_MODULE, + .aperture_sizes = (void *)uninorth_sizes, + .size_type = U32_APER_SIZE, + .num_aperture_sizes = ARRAY_SIZE(uninorth_sizes), + .configure = uninorth_configure, + .fetch_size = uninorth_fetch_size, + .cleanup = uninorth_cleanup, + .tlb_flush = uninorth_tlbflush, + .mask_memory = agp_generic_mask_memory, + .masks = NULL, + .cache_flush = null_cache_flush, + .agp_enable = uninorth_agp_enable, + .create_gatt_table = uninorth_create_gatt_table, + .free_gatt_table = uninorth_free_gatt_table, + .insert_memory = uninorth_insert_memory, + .remove_memory = uninorth_remove_memory, + .alloc_by_type = agp_generic_alloc_by_type, + .free_by_type = agp_generic_free_by_type, + .agp_alloc_page = agp_generic_alloc_page, + .agp_alloc_pages = agp_generic_alloc_pages, + .agp_destroy_page = agp_generic_destroy_page, + .agp_destroy_pages = agp_generic_destroy_pages, + .agp_type_to_mask_type = agp_generic_type_to_mask_type, + .cant_use_aperture = true, + .needs_scratch_page = true, +}; + +const struct agp_bridge_driver u3_agp_driver = { + .owner = THIS_MODULE, + .aperture_sizes = (void *)u3_sizes, + .size_type = U32_APER_SIZE, + .num_aperture_sizes = ARRAY_SIZE(u3_sizes), + .configure = uninorth_configure, + .fetch_size = uninorth_fetch_size, + .cleanup = uninorth_cleanup, + .tlb_flush = uninorth_tlbflush, + .mask_memory = agp_generic_mask_memory, + .masks = NULL, + .cache_flush = null_cache_flush, + .agp_enable = uninorth_agp_enable, + .create_gatt_table = uninorth_create_gatt_table, + .free_gatt_table = uninorth_free_gatt_table, + .insert_memory = uninorth_insert_memory, + .remove_memory = uninorth_remove_memory, + .alloc_by_type = agp_generic_alloc_by_type, + .free_by_type = agp_generic_free_by_type, + .agp_alloc_page = agp_generic_alloc_page, + .agp_alloc_pages = agp_generic_alloc_pages, + .agp_destroy_page = agp_generic_destroy_page, + .agp_destroy_pages = agp_generic_destroy_pages, + .agp_type_to_mask_type = agp_generic_type_to_mask_type, + .cant_use_aperture = true, + .needs_scratch_page = true, +}; + +static struct agp_device_ids uninorth_agp_device_ids[] = { + { + .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP, + .chipset_name = "UniNorth", + }, + { + .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP_P, + .chipset_name = "UniNorth/Pangea", + }, + { + .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP15, + .chipset_name = "UniNorth 1.5", + }, + { + .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP2, + .chipset_name = "UniNorth 2", + }, + { + .device_id = PCI_DEVICE_ID_APPLE_U3_AGP, + .chipset_name = "U3", + }, + { + .device_id = PCI_DEVICE_ID_APPLE_U3L_AGP, + .chipset_name = "U3L", + }, + { + .device_id = PCI_DEVICE_ID_APPLE_U3H_AGP, + .chipset_name = "U3H", + }, + { + .device_id = PCI_DEVICE_ID_APPLE_IPID2_AGP, + .chipset_name = "UniNorth/Intrepid2", + }, +}; + +static int agp_uninorth_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct agp_device_ids *devs = uninorth_agp_device_ids; + struct agp_bridge_data *bridge; + struct device_node *uninorth_node; + u8 cap_ptr; + int j; + + cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); + if (cap_ptr == 0) + return -ENODEV; + + /* probe for known chipsets */ + for (j = 0; devs[j].chipset_name != NULL; ++j) { + if (pdev->device == devs[j].device_id) { + dev_info(&pdev->dev, "Apple %s chipset\n", + devs[j].chipset_name); + goto found; + } + } + + dev_err(&pdev->dev, "unsupported Apple chipset [%04x/%04x]\n", + pdev->vendor, pdev->device); + return -ENODEV; + + found: + /* Set revision to 0 if we could not read it. */ + uninorth_rev = 0; + is_u3 = 0; + /* Locate core99 Uni-N */ + uninorth_node = of_find_node_by_name(NULL, "uni-n"); + /* Locate G5 u3 */ + if (uninorth_node == NULL) { + is_u3 = 1; + uninorth_node = of_find_node_by_name(NULL, "u3"); + } + if (uninorth_node) { + const int *revprop = of_get_property(uninorth_node, + "device-rev", NULL); + if (revprop != NULL) + uninorth_rev = *revprop & 0x3f; + of_node_put(uninorth_node); + } + +#ifdef CONFIG_PM + /* Inform platform of our suspend/resume caps */ + pmac_register_agp_pm(pdev, agp_uninorth_suspend, agp_uninorth_resume); +#endif + + /* Allocate & setup our driver */ + bridge = agp_alloc_bridge(); + if (!bridge) + return -ENOMEM; + + if (is_u3) + bridge->driver = &u3_agp_driver; + else + bridge->driver = &uninorth_agp_driver; + + bridge->dev = pdev; + bridge->capndx = cap_ptr; + bridge->flags = AGP_ERRATA_FASTWRITES; + + /* Fill in the mode register */ + pci_read_config_dword(pdev, cap_ptr+PCI_AGP_STATUS, &bridge->mode); + + pci_set_drvdata(pdev, bridge); + return agp_add_bridge(bridge); +} + +static void agp_uninorth_remove(struct pci_dev *pdev) +{ + struct agp_bridge_data *bridge = pci_get_drvdata(pdev); + +#ifdef CONFIG_PM + /* Inform platform of our suspend/resume caps */ + pmac_register_agp_pm(pdev, NULL, NULL); +#endif + + agp_remove_bridge(bridge); + agp_put_bridge(bridge); +} + +static struct pci_device_id agp_uninorth_pci_table[] = { + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_APPLE, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { } +}; + +MODULE_DEVICE_TABLE(pci, agp_uninorth_pci_table); + +static struct pci_driver agp_uninorth_pci_driver = { + .name = "agpgart-uninorth", + .id_table = agp_uninorth_pci_table, + .probe = agp_uninorth_probe, + .remove = agp_uninorth_remove, +}; + +static int __init agp_uninorth_init(void) +{ + if (agp_off) + return -EINVAL; + return pci_register_driver(&agp_uninorth_pci_driver); +} + +static void __exit agp_uninorth_cleanup(void) +{ + pci_unregister_driver(&agp_uninorth_pci_driver); +} + +module_init(agp_uninorth_init); +module_exit(agp_uninorth_cleanup); + +module_param(aperture, charp, 0); +MODULE_PARM_DESC(aperture, + "Aperture size, must be power of two between 4MB and an\n" + "\t\tupper limit specific to the UniNorth revision.\n" + "\t\tDefault: " DEFAULT_APERTURE_STRING "M"); + +MODULE_AUTHOR("Ben Herrenschmidt & Paul Mackerras"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/agp/via-agp.c b/kernel/drivers/char/agp/via-agp.c new file mode 100644 index 000000000..a4961d35e --- /dev/null +++ b/kernel/drivers/char/agp/via-agp.c @@ -0,0 +1,598 @@ +/* + * VIA AGPGART routines. + */ + +#include +#include +#include +#include +#include +#include "agp.h" + +static const struct pci_device_id agp_via_pci_table[]; + +#define VIA_GARTCTRL 0x80 +#define VIA_APSIZE 0x84 +#define VIA_ATTBASE 0x88 + +#define VIA_AGP3_GARTCTRL 0x90 +#define VIA_AGP3_APSIZE 0x94 +#define VIA_AGP3_ATTBASE 0x98 +#define VIA_AGPSEL 0xfd + +static int via_fetch_size(void) +{ + int i; + u8 temp; + struct aper_size_info_8 *values; + + values = A_SIZE_8(agp_bridge->driver->aperture_sizes); + pci_read_config_byte(agp_bridge->dev, VIA_APSIZE, &temp); + for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { + if (temp == values[i].size_value) { + agp_bridge->previous_size = + agp_bridge->current_size = (void *) (values + i); + agp_bridge->aperture_size_idx = i; + return values[i].size; + } + } + printk(KERN_ERR PFX "Unknown aperture size from AGP bridge (0x%x)\n", temp); + return 0; +} + + +static int via_configure(void) +{ + struct aper_size_info_8 *current_size; + + current_size = A_SIZE_8(agp_bridge->current_size); + /* aperture size */ + pci_write_config_byte(agp_bridge->dev, VIA_APSIZE, + current_size->size_value); + /* address to map to */ + agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, + AGP_APERTURE_BAR); + + /* GART control register */ + pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, 0x0000000f); + + /* attbase - aperture GATT base */ + pci_write_config_dword(agp_bridge->dev, VIA_ATTBASE, + (agp_bridge->gatt_bus_addr & 0xfffff000) | 3); + return 0; +} + + +static void via_cleanup(void) +{ + struct aper_size_info_8 *previous_size; + + previous_size = A_SIZE_8(agp_bridge->previous_size); + pci_write_config_byte(agp_bridge->dev, VIA_APSIZE, + previous_size->size_value); + /* Do not disable by writing 0 to VIA_ATTBASE, it screws things up + * during reinitialization. + */ +} + + +static void via_tlbflush(struct agp_memory *mem) +{ + u32 temp; + + pci_read_config_dword(agp_bridge->dev, VIA_GARTCTRL, &temp); + temp |= (1<<7); + pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, temp); + temp &= ~(1<<7); + pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, temp); +} + + +static const struct aper_size_info_8 via_generic_sizes[9] = +{ + {256, 65536, 6, 0}, + {128, 32768, 5, 128}, + {64, 16384, 4, 192}, + {32, 8192, 3, 224}, + {16, 4096, 2, 240}, + {8, 2048, 1, 248}, + {4, 1024, 0, 252}, + {2, 512, 0, 254}, + {1, 256, 0, 255} +}; + + +static int via_fetch_size_agp3(void) +{ + int i; + u16 temp; + struct aper_size_info_16 *values; + + values = A_SIZE_16(agp_bridge->driver->aperture_sizes); + pci_read_config_word(agp_bridge->dev, VIA_AGP3_APSIZE, &temp); + temp &= 0xfff; + + for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { + if (temp == values[i].size_value) { + agp_bridge->previous_size = + agp_bridge->current_size = (void *) (values + i); + agp_bridge->aperture_size_idx = i; + return values[i].size; + } + } + return 0; +} + + +static int via_configure_agp3(void) +{ + u32 temp; + struct aper_size_info_16 *current_size; + + current_size = A_SIZE_16(agp_bridge->current_size); + + /* address to map to */ + agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, + AGP_APERTURE_BAR); + + /* attbase - aperture GATT base */ + pci_write_config_dword(agp_bridge->dev, VIA_AGP3_ATTBASE, + agp_bridge->gatt_bus_addr & 0xfffff000); + + /* 1. Enable GTLB in RX90<7>, all AGP aperture access needs to fetch + * translation table first. + * 2. Enable AGP aperture in RX91<0>. This bit controls the enabling of the + * graphics AGP aperture for the AGP3.0 port. + */ + pci_read_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, &temp); + pci_write_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, temp | (3<<7)); + return 0; +} + + +static void via_cleanup_agp3(void) +{ + struct aper_size_info_16 *previous_size; + + previous_size = A_SIZE_16(agp_bridge->previous_size); + pci_write_config_byte(agp_bridge->dev, VIA_APSIZE, previous_size->size_value); +} + + +static void via_tlbflush_agp3(struct agp_memory *mem) +{ + u32 temp; + + pci_read_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, &temp); + pci_write_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, temp & ~(1<<7)); + pci_write_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, temp); +} + + +static const struct agp_bridge_driver via_agp3_driver = { + .owner = THIS_MODULE, + .aperture_sizes = agp3_generic_sizes, + .size_type = U8_APER_SIZE, + .num_aperture_sizes = 10, + .needs_scratch_page = true, + .configure = via_configure_agp3, + .fetch_size = via_fetch_size_agp3, + .cleanup = via_cleanup_agp3, + .tlb_flush = via_tlbflush_agp3, + .mask_memory = agp_generic_mask_memory, + .masks = NULL, + .agp_enable = agp_generic_enable, + .cache_flush = global_cache_flush, + .create_gatt_table = agp_generic_create_gatt_table, + .free_gatt_table = agp_generic_free_gatt_table, + .insert_memory = agp_generic_insert_memory, + .remove_memory = agp_generic_remove_memory, + .alloc_by_type = agp_generic_alloc_by_type, + .free_by_type = agp_generic_free_by_type, + .agp_alloc_page = agp_generic_alloc_page, + .agp_alloc_pages = agp_generic_alloc_pages, + .agp_destroy_page = agp_generic_destroy_page, + .agp_destroy_pages = agp_generic_destroy_pages, + .agp_type_to_mask_type = agp_generic_type_to_mask_type, +}; + +static const struct agp_bridge_driver via_driver = { + .owner = THIS_MODULE, + .aperture_sizes = via_generic_sizes, + .size_type = U8_APER_SIZE, + .num_aperture_sizes = 9, + .needs_scratch_page = true, + .configure = via_configure, + .fetch_size = via_fetch_size, + .cleanup = via_cleanup, + .tlb_flush = via_tlbflush, + .mask_memory = agp_generic_mask_memory, + .masks = NULL, + .agp_enable = agp_generic_enable, + .cache_flush = global_cache_flush, + .create_gatt_table = agp_generic_create_gatt_table, + .free_gatt_table = agp_generic_free_gatt_table, + .insert_memory = agp_generic_insert_memory, + .remove_memory = agp_generic_remove_memory, + .alloc_by_type = agp_generic_alloc_by_type, + .free_by_type = agp_generic_free_by_type, + .agp_alloc_page = agp_generic_alloc_page, + .agp_alloc_pages = agp_generic_alloc_pages, + .agp_destroy_page = agp_generic_destroy_page, + .agp_destroy_pages = agp_generic_destroy_pages, + .agp_type_to_mask_type = agp_generic_type_to_mask_type, +}; + +static struct agp_device_ids via_agp_device_ids[] = +{ + { + .device_id = PCI_DEVICE_ID_VIA_82C597_0, + .chipset_name = "Apollo VP3", + }, + + { + .device_id = PCI_DEVICE_ID_VIA_82C598_0, + .chipset_name = "Apollo MVP3", + }, + + { + .device_id = PCI_DEVICE_ID_VIA_8501_0, + .chipset_name = "Apollo MVP4", + }, + + /* VT8601 */ + { + .device_id = PCI_DEVICE_ID_VIA_8601_0, + .chipset_name = "Apollo ProMedia/PLE133Ta", + }, + + /* VT82C693A / VT28C694T */ + { + .device_id = PCI_DEVICE_ID_VIA_82C691_0, + .chipset_name = "Apollo Pro 133", + }, + + { + .device_id = PCI_DEVICE_ID_VIA_8371_0, + .chipset_name = "KX133", + }, + + /* VT8633 */ + { + .device_id = PCI_DEVICE_ID_VIA_8633_0, + .chipset_name = "Pro 266", + }, + + { + .device_id = PCI_DEVICE_ID_VIA_XN266, + .chipset_name = "Apollo Pro266", + }, + + /* VT8361 */ + { + .device_id = PCI_DEVICE_ID_VIA_8361, + .chipset_name = "KLE133", + }, + + /* VT8365 / VT8362 */ + { + .device_id = PCI_DEVICE_ID_VIA_8363_0, + .chipset_name = "Twister-K/KT133x/KM133", + }, + + /* VT8753A */ + { + .device_id = PCI_DEVICE_ID_VIA_8753_0, + .chipset_name = "P4X266", + }, + + /* VT8366 */ + { + .device_id = PCI_DEVICE_ID_VIA_8367_0, + .chipset_name = "KT266/KY266x/KT333", + }, + + /* VT8633 (for CuMine/ Celeron) */ + { + .device_id = PCI_DEVICE_ID_VIA_8653_0, + .chipset_name = "Pro266T", + }, + + /* KM266 / PM266 */ + { + .device_id = PCI_DEVICE_ID_VIA_XM266, + .chipset_name = "PM266/KM266", + }, + + /* CLE266 */ + { + .device_id = PCI_DEVICE_ID_VIA_862X_0, + .chipset_name = "CLE266", + }, + + { + .device_id = PCI_DEVICE_ID_VIA_8377_0, + .chipset_name = "KT400/KT400A/KT600", + }, + + /* VT8604 / VT8605 / VT8603 + * (Apollo Pro133A chipset with S3 Savage4) */ + { + .device_id = PCI_DEVICE_ID_VIA_8605_0, + .chipset_name = "ProSavage PM133/PL133/PN133" + }, + + /* P4M266x/P4N266 */ + { + .device_id = PCI_DEVICE_ID_VIA_8703_51_0, + .chipset_name = "P4M266x/P4N266", + }, + + /* VT8754 */ + { + .device_id = PCI_DEVICE_ID_VIA_8754C_0, + .chipset_name = "PT800", + }, + + /* P4X600 */ + { + .device_id = PCI_DEVICE_ID_VIA_8763_0, + .chipset_name = "P4X600" + }, + + /* KM400 */ + { + .device_id = PCI_DEVICE_ID_VIA_8378_0, + .chipset_name = "KM400/KM400A", + }, + + /* PT880 */ + { + .device_id = PCI_DEVICE_ID_VIA_PT880, + .chipset_name = "PT880", + }, + + /* PT880 Ultra */ + { + .device_id = PCI_DEVICE_ID_VIA_PT880ULTRA, + .chipset_name = "PT880 Ultra", + }, + + /* PT890 */ + { + .device_id = PCI_DEVICE_ID_VIA_8783_0, + .chipset_name = "PT890", + }, + + /* PM800/PN800/PM880/PN880 */ + { + .device_id = PCI_DEVICE_ID_VIA_PX8X0_0, + .chipset_name = "PM800/PN800/PM880/PN880", + }, + /* KT880 */ + { + .device_id = PCI_DEVICE_ID_VIA_3269_0, + .chipset_name = "KT880", + }, + /* KTxxx/Px8xx */ + { + .device_id = PCI_DEVICE_ID_VIA_83_87XX_1, + .chipset_name = "VT83xx/VT87xx/KTxxx/Px8xx", + }, + /* P4M800 */ + { + .device_id = PCI_DEVICE_ID_VIA_3296_0, + .chipset_name = "P4M800", + }, + /* P4M800CE */ + { + .device_id = PCI_DEVICE_ID_VIA_P4M800CE, + .chipset_name = "VT3314", + }, + /* VT3324 / CX700 */ + { + .device_id = PCI_DEVICE_ID_VIA_VT3324, + .chipset_name = "CX700", + }, + /* VT3336 - this is a chipset for AMD Athlon/K8 CPU. Due to K8's unique + * architecture, the AGP resource and behavior are different from + * the traditional AGP which resides only in chipset. AGP is used + * by 3D driver which wasn't available for the VT3336 and VT3364 + * generation until now. Unfortunately, by testing, VT3364 works + * but VT3336 doesn't. - explanation from via, just leave this as + * as a placeholder to avoid future patches adding it back in. + */ +#if 0 + { + .device_id = PCI_DEVICE_ID_VIA_VT3336, + .chipset_name = "VT3336", + }, +#endif + /* P4M890 */ + { + .device_id = PCI_DEVICE_ID_VIA_P4M890, + .chipset_name = "P4M890", + }, + /* P4M900 */ + { + .device_id = PCI_DEVICE_ID_VIA_VT3364, + .chipset_name = "P4M900", + }, + { }, /* dummy final entry, always present */ +}; + + +/* + * VIA's AGP3 chipsets do magick to put the AGP bridge compliant + * with the same standards version as the graphics card. + */ +static void check_via_agp3 (struct agp_bridge_data *bridge) +{ + u8 reg; + + pci_read_config_byte(bridge->dev, VIA_AGPSEL, ®); + /* Check AGP 2.0 compatibility mode. */ + if ((reg & (1<<1))==0) + bridge->driver = &via_agp3_driver; +} + + +static int agp_via_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct agp_device_ids *devs = via_agp_device_ids; + struct agp_bridge_data *bridge; + int j = 0; + u8 cap_ptr; + + cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); + if (!cap_ptr) + return -ENODEV; + + j = ent - agp_via_pci_table; + printk (KERN_INFO PFX "Detected VIA %s chipset\n", devs[j].chipset_name); + + bridge = agp_alloc_bridge(); + if (!bridge) + return -ENOMEM; + + bridge->dev = pdev; + bridge->capndx = cap_ptr; + bridge->driver = &via_driver; + + /* + * Garg, there are KT400s with KT266 IDs. + */ + if (pdev->device == PCI_DEVICE_ID_VIA_8367_0) { + /* Is there a KT400 subsystem ? */ + if (pdev->subsystem_device == PCI_DEVICE_ID_VIA_8377_0) { + printk(KERN_INFO PFX "Found KT400 in disguise as a KT266.\n"); + check_via_agp3(bridge); + } + } + + /* If this is an AGP3 bridge, check which mode its in and adjust. */ + get_agp_version(bridge); + if (bridge->major_version >= 3) + check_via_agp3(bridge); + + /* Fill in the mode register */ + pci_read_config_dword(pdev, + bridge->capndx+PCI_AGP_STATUS, &bridge->mode); + + pci_set_drvdata(pdev, bridge); + return agp_add_bridge(bridge); +} + +static void agp_via_remove(struct pci_dev *pdev) +{ + struct agp_bridge_data *bridge = pci_get_drvdata(pdev); + + agp_remove_bridge(bridge); + agp_put_bridge(bridge); +} + +#ifdef CONFIG_PM + +static int agp_via_suspend(struct pci_dev *pdev, pm_message_t state) +{ + pci_save_state (pdev); + pci_set_power_state (pdev, PCI_D3hot); + + return 0; +} + +static int agp_via_resume(struct pci_dev *pdev) +{ + struct agp_bridge_data *bridge = pci_get_drvdata(pdev); + + pci_set_power_state (pdev, PCI_D0); + pci_restore_state(pdev); + + if (bridge->driver == &via_agp3_driver) + return via_configure_agp3(); + else if (bridge->driver == &via_driver) + return via_configure(); + + return 0; +} + +#endif /* CONFIG_PM */ + +/* must be the same order as name table above */ +static const struct pci_device_id agp_via_pci_table[] = { +#define ID(x) \ + { \ + .class = (PCI_CLASS_BRIDGE_HOST << 8), \ + .class_mask = ~0, \ + .vendor = PCI_VENDOR_ID_VIA, \ + .device = x, \ + .subvendor = PCI_ANY_ID, \ + .subdevice = PCI_ANY_ID, \ + } + ID(PCI_DEVICE_ID_VIA_82C597_0), + ID(PCI_DEVICE_ID_VIA_82C598_0), + ID(PCI_DEVICE_ID_VIA_8501_0), + ID(PCI_DEVICE_ID_VIA_8601_0), + ID(PCI_DEVICE_ID_VIA_82C691_0), + ID(PCI_DEVICE_ID_VIA_8371_0), + ID(PCI_DEVICE_ID_VIA_8633_0), + ID(PCI_DEVICE_ID_VIA_XN266), + ID(PCI_DEVICE_ID_VIA_8361), + ID(PCI_DEVICE_ID_VIA_8363_0), + ID(PCI_DEVICE_ID_VIA_8753_0), + ID(PCI_DEVICE_ID_VIA_8367_0), + ID(PCI_DEVICE_ID_VIA_8653_0), + ID(PCI_DEVICE_ID_VIA_XM266), + ID(PCI_DEVICE_ID_VIA_862X_0), + ID(PCI_DEVICE_ID_VIA_8377_0), + ID(PCI_DEVICE_ID_VIA_8605_0), + ID(PCI_DEVICE_ID_VIA_8703_51_0), + ID(PCI_DEVICE_ID_VIA_8754C_0), + ID(PCI_DEVICE_ID_VIA_8763_0), + ID(PCI_DEVICE_ID_VIA_8378_0), + ID(PCI_DEVICE_ID_VIA_PT880), + ID(PCI_DEVICE_ID_VIA_PT880ULTRA), + ID(PCI_DEVICE_ID_VIA_8783_0), + ID(PCI_DEVICE_ID_VIA_PX8X0_0), + ID(PCI_DEVICE_ID_VIA_3269_0), + ID(PCI_DEVICE_ID_VIA_83_87XX_1), + ID(PCI_DEVICE_ID_VIA_3296_0), + ID(PCI_DEVICE_ID_VIA_P4M800CE), + ID(PCI_DEVICE_ID_VIA_VT3324), + ID(PCI_DEVICE_ID_VIA_P4M890), + ID(PCI_DEVICE_ID_VIA_VT3364), + { } +}; + +MODULE_DEVICE_TABLE(pci, agp_via_pci_table); + + +static struct pci_driver agp_via_pci_driver = { + .name = "agpgart-via", + .id_table = agp_via_pci_table, + .probe = agp_via_probe, + .remove = agp_via_remove, +#ifdef CONFIG_PM + .suspend = agp_via_suspend, + .resume = agp_via_resume, +#endif +}; + + +static int __init agp_via_init(void) +{ + if (agp_off) + return -EINVAL; + return pci_register_driver(&agp_via_pci_driver); +} + +static void __exit agp_via_cleanup(void) +{ + pci_unregister_driver(&agp_via_pci_driver); +} + +module_init(agp_via_init); +module_exit(agp_via_cleanup); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Dave Jones"); diff --git a/kernel/drivers/char/apm-emulation.c b/kernel/drivers/char/apm-emulation.c new file mode 100644 index 000000000..dd9dfa15e --- /dev/null +++ b/kernel/drivers/char/apm-emulation.c @@ -0,0 +1,745 @@ +/* + * bios-less APM driver for ARM Linux + * Jamey Hicks + * adapted from the APM BIOS driver for Linux by Stephen Rothwell (sfr@linuxcare.com) + * + * APM 1.2 Reference: + * Intel Corporation, Microsoft Corporation. Advanced Power Management + * (APM) BIOS Interface Specification, Revision 1.2, February 1996. + * + * This document is available from Microsoft at: + * http://www.microsoft.com/whdc/archive/amp_12.mspx + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* + * The apm_bios device is one of the misc char devices. + * This is its minor number. + */ +#define APM_MINOR_DEV 134 + +/* + * One option can be changed at boot time as follows: + * apm=on/off enable/disable APM + */ + +/* + * Maximum number of events stored + */ +#define APM_MAX_EVENTS 16 + +struct apm_queue { + unsigned int event_head; + unsigned int event_tail; + apm_event_t events[APM_MAX_EVENTS]; +}; + +/* + * thread states (for threads using a writable /dev/apm_bios fd): + * + * SUSPEND_NONE: nothing happening + * SUSPEND_PENDING: suspend event queued for thread and pending to be read + * SUSPEND_READ: suspend event read, pending acknowledgement + * SUSPEND_ACKED: acknowledgement received from thread (via ioctl), + * waiting for resume + * SUSPEND_ACKTO: acknowledgement timeout + * SUSPEND_DONE: thread had acked suspend and is now notified of + * resume + * + * SUSPEND_WAIT: this thread invoked suspend and is waiting for resume + * + * A thread migrates in one of three paths: + * NONE -1-> PENDING -2-> READ -3-> ACKED -4-> DONE -5-> NONE + * -6-> ACKTO -7-> NONE + * NONE -8-> WAIT -9-> NONE + * + * While in PENDING or READ, the thread is accounted for in the + * suspend_acks_pending counter. + * + * The transitions are invoked as follows: + * 1: suspend event is signalled from the core PM code + * 2: the suspend event is read from the fd by the userspace thread + * 3: userspace thread issues the APM_IOC_SUSPEND ioctl (as ack) + * 4: core PM code signals that we have resumed + * 5: APM_IOC_SUSPEND ioctl returns + * + * 6: the notifier invoked from the core PM code timed out waiting + * for all relevant threds to enter ACKED state and puts those + * that haven't into ACKTO + * 7: those threads issue APM_IOC_SUSPEND ioctl too late, + * get an error + * + * 8: userspace thread issues the APM_IOC_SUSPEND ioctl (to suspend), + * ioctl code invokes pm_suspend() + * 9: pm_suspend() returns indicating resume + */ +enum apm_suspend_state { + SUSPEND_NONE, + SUSPEND_PENDING, + SUSPEND_READ, + SUSPEND_ACKED, + SUSPEND_ACKTO, + SUSPEND_WAIT, + SUSPEND_DONE, +}; + +/* + * The per-file APM data + */ +struct apm_user { + struct list_head list; + + unsigned int suser: 1; + unsigned int writer: 1; + unsigned int reader: 1; + + int suspend_result; + enum apm_suspend_state suspend_state; + + struct apm_queue queue; +}; + +/* + * Local variables + */ +static atomic_t suspend_acks_pending = ATOMIC_INIT(0); +static atomic_t userspace_notification_inhibit = ATOMIC_INIT(0); +static int apm_disabled; +static struct task_struct *kapmd_tsk; + +static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue); +static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue); + +/* + * This is a list of everyone who has opened /dev/apm_bios + */ +static DECLARE_RWSEM(user_list_lock); +static LIST_HEAD(apm_user_list); + +/* + * kapmd info. kapmd provides us a process context to handle + * "APM" events within - specifically necessary if we're going + * to be suspending the system. + */ +static DECLARE_WAIT_QUEUE_HEAD(kapmd_wait); +static DEFINE_SPINLOCK(kapmd_queue_lock); +static struct apm_queue kapmd_queue; + +static DEFINE_MUTEX(state_lock); + +static const char driver_version[] = "1.13"; /* no spaces */ + + + +/* + * Compatibility cruft until the IPAQ people move over to the new + * interface. + */ +static void __apm_get_power_status(struct apm_power_info *info) +{ +} + +/* + * This allows machines to provide their own "apm get power status" function. + */ +void (*apm_get_power_status)(struct apm_power_info *) = __apm_get_power_status; +EXPORT_SYMBOL(apm_get_power_status); + + +/* + * APM event queue management. + */ +static inline int queue_empty(struct apm_queue *q) +{ + return q->event_head == q->event_tail; +} + +static inline apm_event_t queue_get_event(struct apm_queue *q) +{ + q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS; + return q->events[q->event_tail]; +} + +static void queue_add_event(struct apm_queue *q, apm_event_t event) +{ + q->event_head = (q->event_head + 1) % APM_MAX_EVENTS; + if (q->event_head == q->event_tail) { + static int notified; + + if (notified++ == 0) + printk(KERN_ERR "apm: an event queue overflowed\n"); + q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS; + } + q->events[q->event_head] = event; +} + +static void queue_event(apm_event_t event) +{ + struct apm_user *as; + + down_read(&user_list_lock); + list_for_each_entry(as, &apm_user_list, list) { + if (as->reader) + queue_add_event(&as->queue, event); + } + up_read(&user_list_lock); + wake_up_interruptible(&apm_waitqueue); +} + +static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos) +{ + struct apm_user *as = fp->private_data; + apm_event_t event; + int i = count, ret = 0; + + if (count < sizeof(apm_event_t)) + return -EINVAL; + + if (queue_empty(&as->queue) && fp->f_flags & O_NONBLOCK) + return -EAGAIN; + + wait_event_interruptible(apm_waitqueue, !queue_empty(&as->queue)); + + while ((i >= sizeof(event)) && !queue_empty(&as->queue)) { + event = queue_get_event(&as->queue); + + ret = -EFAULT; + if (copy_to_user(buf, &event, sizeof(event))) + break; + + mutex_lock(&state_lock); + if (as->suspend_state == SUSPEND_PENDING && + (event == APM_SYS_SUSPEND || event == APM_USER_SUSPEND)) + as->suspend_state = SUSPEND_READ; + mutex_unlock(&state_lock); + + buf += sizeof(event); + i -= sizeof(event); + } + + if (i < count) + ret = count - i; + + return ret; +} + +static unsigned int apm_poll(struct file *fp, poll_table * wait) +{ + struct apm_user *as = fp->private_data; + + poll_wait(fp, &apm_waitqueue, wait); + return queue_empty(&as->queue) ? 0 : POLLIN | POLLRDNORM; +} + +/* + * apm_ioctl - handle APM ioctl + * + * APM_IOC_SUSPEND + * This IOCTL is overloaded, and performs two functions. It is used to: + * - initiate a suspend + * - acknowledge a suspend read from /dev/apm_bios. + * Only when everyone who has opened /dev/apm_bios with write permission + * has acknowledge does the actual suspend happen. + */ +static long +apm_ioctl(struct file *filp, u_int cmd, u_long arg) +{ + struct apm_user *as = filp->private_data; + int err = -EINVAL; + + if (!as->suser || !as->writer) + return -EPERM; + + switch (cmd) { + case APM_IOC_SUSPEND: + mutex_lock(&state_lock); + + as->suspend_result = -EINTR; + + switch (as->suspend_state) { + case SUSPEND_READ: + /* + * If we read a suspend command from /dev/apm_bios, + * then the corresponding APM_IOC_SUSPEND ioctl is + * interpreted as an acknowledge. + */ + as->suspend_state = SUSPEND_ACKED; + atomic_dec(&suspend_acks_pending); + mutex_unlock(&state_lock); + + /* + * suspend_acks_pending changed, the notifier needs to + * be woken up for this + */ + wake_up(&apm_suspend_waitqueue); + + /* + * Wait for the suspend/resume to complete. If there + * are pending acknowledges, we wait here for them. + * wait_event_freezable() is interruptible and pending + * signal can cause busy looping. We aren't doing + * anything critical, chill a bit on each iteration. + */ + while (wait_event_freezable(apm_suspend_waitqueue, + as->suspend_state != SUSPEND_ACKED)) + msleep(10); + break; + case SUSPEND_ACKTO: + as->suspend_result = -ETIMEDOUT; + mutex_unlock(&state_lock); + break; + default: + as->suspend_state = SUSPEND_WAIT; + mutex_unlock(&state_lock); + + /* + * Otherwise it is a request to suspend the system. + * Just invoke pm_suspend(), we'll handle it from + * there via the notifier. + */ + as->suspend_result = pm_suspend(PM_SUSPEND_MEM); + } + + mutex_lock(&state_lock); + err = as->suspend_result; + as->suspend_state = SUSPEND_NONE; + mutex_unlock(&state_lock); + break; + } + + return err; +} + +static int apm_release(struct inode * inode, struct file * filp) +{ + struct apm_user *as = filp->private_data; + + filp->private_data = NULL; + + down_write(&user_list_lock); + list_del(&as->list); + up_write(&user_list_lock); + + /* + * We are now unhooked from the chain. As far as new + * events are concerned, we no longer exist. + */ + mutex_lock(&state_lock); + if (as->suspend_state == SUSPEND_PENDING || + as->suspend_state == SUSPEND_READ) + atomic_dec(&suspend_acks_pending); + mutex_unlock(&state_lock); + + wake_up(&apm_suspend_waitqueue); + + kfree(as); + return 0; +} + +static int apm_open(struct inode * inode, struct file * filp) +{ + struct apm_user *as; + + as = kzalloc(sizeof(*as), GFP_KERNEL); + if (as) { + /* + * XXX - this is a tiny bit broken, when we consider BSD + * process accounting. If the device is opened by root, we + * instantly flag that we used superuser privs. Who knows, + * we might close the device immediately without doing a + * privileged operation -- cevans + */ + as->suser = capable(CAP_SYS_ADMIN); + as->writer = (filp->f_mode & FMODE_WRITE) == FMODE_WRITE; + as->reader = (filp->f_mode & FMODE_READ) == FMODE_READ; + + down_write(&user_list_lock); + list_add(&as->list, &apm_user_list); + up_write(&user_list_lock); + + filp->private_data = as; + } + + return as ? 0 : -ENOMEM; +} + +static const struct file_operations apm_bios_fops = { + .owner = THIS_MODULE, + .read = apm_read, + .poll = apm_poll, + .unlocked_ioctl = apm_ioctl, + .open = apm_open, + .release = apm_release, + .llseek = noop_llseek, +}; + +static struct miscdevice apm_device = { + .minor = APM_MINOR_DEV, + .name = "apm_bios", + .fops = &apm_bios_fops +}; + + +#ifdef CONFIG_PROC_FS +/* + * Arguments, with symbols from linux/apm_bios.h. + * + * 0) Linux driver version (this will change if format changes) + * 1) APM BIOS Version. Usually 1.0, 1.1 or 1.2. + * 2) APM flags from APM Installation Check (0x00): + * bit 0: APM_16_BIT_SUPPORT + * bit 1: APM_32_BIT_SUPPORT + * bit 2: APM_IDLE_SLOWS_CLOCK + * bit 3: APM_BIOS_DISABLED + * bit 4: APM_BIOS_DISENGAGED + * 3) AC line status + * 0x00: Off-line + * 0x01: On-line + * 0x02: On backup power (BIOS >= 1.1 only) + * 0xff: Unknown + * 4) Battery status + * 0x00: High + * 0x01: Low + * 0x02: Critical + * 0x03: Charging + * 0x04: Selected battery not present (BIOS >= 1.2 only) + * 0xff: Unknown + * 5) Battery flag + * bit 0: High + * bit 1: Low + * bit 2: Critical + * bit 3: Charging + * bit 7: No system battery + * 0xff: Unknown + * 6) Remaining battery life (percentage of charge): + * 0-100: valid + * -1: Unknown + * 7) Remaining battery life (time units): + * Number of remaining minutes or seconds + * -1: Unknown + * 8) min = minutes; sec = seconds + */ +static int proc_apm_show(struct seq_file *m, void *v) +{ + struct apm_power_info info; + char *units; + + info.ac_line_status = 0xff; + info.battery_status = 0xff; + info.battery_flag = 0xff; + info.battery_life = -1; + info.time = -1; + info.units = -1; + + if (apm_get_power_status) + apm_get_power_status(&info); + + switch (info.units) { + default: units = "?"; break; + case 0: units = "min"; break; + case 1: units = "sec"; break; + } + + seq_printf(m, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n", + driver_version, APM_32_BIT_SUPPORT, + info.ac_line_status, info.battery_status, + info.battery_flag, info.battery_life, + info.time, units); + + return 0; +} + +static int proc_apm_open(struct inode *inode, struct file *file) +{ + return single_open(file, proc_apm_show, NULL); +} + +static const struct file_operations apm_proc_fops = { + .owner = THIS_MODULE, + .open = proc_apm_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + +static int kapmd(void *arg) +{ + do { + apm_event_t event; + + wait_event_interruptible(kapmd_wait, + !queue_empty(&kapmd_queue) || kthread_should_stop()); + + if (kthread_should_stop()) + break; + + spin_lock_irq(&kapmd_queue_lock); + event = 0; + if (!queue_empty(&kapmd_queue)) + event = queue_get_event(&kapmd_queue); + spin_unlock_irq(&kapmd_queue_lock); + + switch (event) { + case 0: + break; + + case APM_LOW_BATTERY: + case APM_POWER_STATUS_CHANGE: + queue_event(event); + break; + + case APM_USER_SUSPEND: + case APM_SYS_SUSPEND: + pm_suspend(PM_SUSPEND_MEM); + break; + + case APM_CRITICAL_SUSPEND: + atomic_inc(&userspace_notification_inhibit); + pm_suspend(PM_SUSPEND_MEM); + atomic_dec(&userspace_notification_inhibit); + break; + } + } while (1); + + return 0; +} + +static int apm_suspend_notifier(struct notifier_block *nb, + unsigned long event, + void *dummy) +{ + struct apm_user *as; + int err; + unsigned long apm_event; + + /* short-cut emergency suspends */ + if (atomic_read(&userspace_notification_inhibit)) + return NOTIFY_DONE; + + switch (event) { + case PM_SUSPEND_PREPARE: + case PM_HIBERNATION_PREPARE: + apm_event = (event == PM_SUSPEND_PREPARE) ? + APM_USER_SUSPEND : APM_USER_HIBERNATION; + /* + * Queue an event to all "writer" users that we want + * to suspend and need their ack. + */ + mutex_lock(&state_lock); + down_read(&user_list_lock); + + list_for_each_entry(as, &apm_user_list, list) { + if (as->suspend_state != SUSPEND_WAIT && as->reader && + as->writer && as->suser) { + as->suspend_state = SUSPEND_PENDING; + atomic_inc(&suspend_acks_pending); + queue_add_event(&as->queue, apm_event); + } + } + + up_read(&user_list_lock); + mutex_unlock(&state_lock); + wake_up_interruptible(&apm_waitqueue); + + /* + * Wait for the the suspend_acks_pending variable to drop to + * zero, meaning everybody acked the suspend event (or the + * process was killed.) + * + * If the app won't answer within a short while we assume it + * locked up and ignore it. + */ + err = wait_event_interruptible_timeout( + apm_suspend_waitqueue, + atomic_read(&suspend_acks_pending) == 0, + 5*HZ); + + /* timed out */ + if (err == 0) { + /* + * Move anybody who timed out to "ack timeout" state. + * + * We could time out and the userspace does the ACK + * right after we time out but before we enter the + * locked section here, but that's fine. + */ + mutex_lock(&state_lock); + down_read(&user_list_lock); + list_for_each_entry(as, &apm_user_list, list) { + if (as->suspend_state == SUSPEND_PENDING || + as->suspend_state == SUSPEND_READ) { + as->suspend_state = SUSPEND_ACKTO; + atomic_dec(&suspend_acks_pending); + } + } + up_read(&user_list_lock); + mutex_unlock(&state_lock); + } + + /* let suspend proceed */ + if (err >= 0) + return NOTIFY_OK; + + /* interrupted by signal */ + return notifier_from_errno(err); + + case PM_POST_SUSPEND: + case PM_POST_HIBERNATION: + apm_event = (event == PM_POST_SUSPEND) ? + APM_NORMAL_RESUME : APM_HIBERNATION_RESUME; + /* + * Anyone on the APM queues will think we're still suspended. + * Send a message so everyone knows we're now awake again. + */ + queue_event(apm_event); + + /* + * Finally, wake up anyone who is sleeping on the suspend. + */ + mutex_lock(&state_lock); + down_read(&user_list_lock); + list_for_each_entry(as, &apm_user_list, list) { + if (as->suspend_state == SUSPEND_ACKED) { + /* + * TODO: maybe grab error code, needs core + * changes to push the error to the notifier + * chain (could use the second parameter if + * implemented) + */ + as->suspend_result = 0; + as->suspend_state = SUSPEND_DONE; + } + } + up_read(&user_list_lock); + mutex_unlock(&state_lock); + + wake_up(&apm_suspend_waitqueue); + return NOTIFY_OK; + + default: + return NOTIFY_DONE; + } +} + +static struct notifier_block apm_notif_block = { + .notifier_call = apm_suspend_notifier, +}; + +static int __init apm_init(void) +{ + int ret; + + if (apm_disabled) { + printk(KERN_NOTICE "apm: disabled on user request.\n"); + return -ENODEV; + } + + kapmd_tsk = kthread_create(kapmd, NULL, "kapmd"); + if (IS_ERR(kapmd_tsk)) { + ret = PTR_ERR(kapmd_tsk); + kapmd_tsk = NULL; + goto out; + } + wake_up_process(kapmd_tsk); + +#ifdef CONFIG_PROC_FS + proc_create("apm", 0, NULL, &apm_proc_fops); +#endif + + ret = misc_register(&apm_device); + if (ret) + goto out_stop; + + ret = register_pm_notifier(&apm_notif_block); + if (ret) + goto out_unregister; + + return 0; + + out_unregister: + misc_deregister(&apm_device); + out_stop: + remove_proc_entry("apm", NULL); + kthread_stop(kapmd_tsk); + out: + return ret; +} + +static void __exit apm_exit(void) +{ + unregister_pm_notifier(&apm_notif_block); + misc_deregister(&apm_device); + remove_proc_entry("apm", NULL); + + kthread_stop(kapmd_tsk); +} + +module_init(apm_init); +module_exit(apm_exit); + +MODULE_AUTHOR("Stephen Rothwell"); +MODULE_DESCRIPTION("Advanced Power Management"); +MODULE_LICENSE("GPL"); + +#ifndef MODULE +static int __init apm_setup(char *str) +{ + while ((str != NULL) && (*str != '\0')) { + if (strncmp(str, "off", 3) == 0) + apm_disabled = 1; + if (strncmp(str, "on", 2) == 0) + apm_disabled = 0; + str = strchr(str, ','); + if (str != NULL) + str += strspn(str, ", \t"); + } + return 1; +} + +__setup("apm=", apm_setup); +#endif + +/** + * apm_queue_event - queue an APM event for kapmd + * @event: APM event + * + * Queue an APM event for kapmd to process and ultimately take the + * appropriate action. Only a subset of events are handled: + * %APM_LOW_BATTERY + * %APM_POWER_STATUS_CHANGE + * %APM_USER_SUSPEND + * %APM_SYS_SUSPEND + * %APM_CRITICAL_SUSPEND + */ +void apm_queue_event(apm_event_t event) +{ + unsigned long flags; + + spin_lock_irqsave(&kapmd_queue_lock, flags); + queue_add_event(&kapmd_queue, event); + spin_unlock_irqrestore(&kapmd_queue_lock, flags); + + wake_up_interruptible(&kapmd_wait); +} +EXPORT_SYMBOL(apm_queue_event); diff --git a/kernel/drivers/char/applicom.c b/kernel/drivers/char/applicom.c new file mode 100644 index 000000000..14790304b --- /dev/null +++ b/kernel/drivers/char/applicom.c @@ -0,0 +1,842 @@ +/* Derived from Applicom driver ac.c for SCO Unix */ +/* Ported by David Woodhouse, Axiom (Cambridge) Ltd. */ +/* dwmw2@infradead.org 30/8/98 */ +/* $Id: ac.c,v 1.30 2000/03/22 16:03:57 dwmw2 Exp $ */ +/* This module is for Linux 2.1 and 2.2 series kernels. */ +/*****************************************************************************/ +/* J PAGET 18/02/94 passage V2.4.2 ioctl avec code 2 reset to les interrupt */ +/* ceci pour reseter correctement apres une sortie sauvage */ +/* J PAGET 02/05/94 passage V2.4.3 dans le traitement de d'interruption, */ +/* LoopCount n'etait pas initialise a 0. */ +/* F LAFORSE 04/07/95 version V2.6.0 lecture bidon apres acces a une carte */ +/* pour liberer le bus */ +/* J.PAGET 19/11/95 version V2.6.1 Nombre, addresse,irq n'est plus configure */ +/* et passe en argument a acinit, mais est scrute sur le bus pour s'adapter */ +/* au nombre de cartes presentes sur le bus. IOCL code 6 affichait V2.4.3 */ +/* F.LAFORSE 28/11/95 creation de fichiers acXX.o avec les differentes */ +/* addresses de base des cartes, IOCTL 6 plus complet */ +/* J.PAGET le 19/08/96 copie de la version V2.6 en V2.8.0 sans modification */ +/* de code autre que le texte V2.6.1 en V2.8.0 */ +/*****************************************************************************/ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "applicom.h" + + +/* NOTE: We use for loops with {write,read}b() instead of + memcpy_{from,to}io throughout this driver. This is because + the board doesn't correctly handle word accesses - only + bytes. +*/ + + +#undef DEBUG + +#define MAX_BOARD 8 /* maximum of pc board possible */ +#define MAX_ISA_BOARD 4 +#define LEN_RAM_IO 0x800 +#define AC_MINOR 157 + +#ifndef PCI_VENDOR_ID_APPLICOM +#define PCI_VENDOR_ID_APPLICOM 0x1389 +#define PCI_DEVICE_ID_APPLICOM_PCIGENERIC 0x0001 +#define PCI_DEVICE_ID_APPLICOM_PCI2000IBS_CAN 0x0002 +#define PCI_DEVICE_ID_APPLICOM_PCI2000PFB 0x0003 +#endif + +static DEFINE_MUTEX(ac_mutex); +static char *applicom_pci_devnames[] = { + "PCI board", + "PCI2000IBS / PCI2000CAN", + "PCI2000PFB" +}; + +static struct pci_device_id applicom_pci_tbl[] = { + { PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCIGENERIC) }, + { PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCI2000IBS_CAN) }, + { PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCI2000PFB) }, + { 0 } +}; +MODULE_DEVICE_TABLE(pci, applicom_pci_tbl); + +MODULE_AUTHOR("David Woodhouse & Applicom International"); +MODULE_DESCRIPTION("Driver for Applicom Profibus card"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_MISCDEV(AC_MINOR); + +MODULE_SUPPORTED_DEVICE("ac"); + + +static struct applicom_board { + unsigned long PhysIO; + void __iomem *RamIO; + wait_queue_head_t FlagSleepSend; + long irq; + spinlock_t mutex; +} apbs[MAX_BOARD]; + +static unsigned int irq = 0; /* interrupt number IRQ */ +static unsigned long mem = 0; /* physical segment of board */ + +module_param(irq, uint, 0); +MODULE_PARM_DESC(irq, "IRQ of the Applicom board"); +module_param(mem, ulong, 0); +MODULE_PARM_DESC(mem, "Shared Memory Address of Applicom board"); + +static unsigned int numboards; /* number of installed boards */ +static volatile unsigned char Dummy; +static DECLARE_WAIT_QUEUE_HEAD(FlagSleepRec); +static unsigned int WriteErrorCount; /* number of write error */ +static unsigned int ReadErrorCount; /* number of read error */ +static unsigned int DeviceErrorCount; /* number of device error */ + +static ssize_t ac_read (struct file *, char __user *, size_t, loff_t *); +static ssize_t ac_write (struct file *, const char __user *, size_t, loff_t *); +static long ac_ioctl(struct file *, unsigned int, unsigned long); +static irqreturn_t ac_interrupt(int, void *); + +static const struct file_operations ac_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .read = ac_read, + .write = ac_write, + .unlocked_ioctl = ac_ioctl, +}; + +static struct miscdevice ac_miscdev = { + AC_MINOR, + "ac", + &ac_fops +}; + +static int dummy; /* dev_id for request_irq() */ + +static int ac_register_board(unsigned long physloc, void __iomem *loc, + unsigned char boardno) +{ + volatile unsigned char byte_reset_it; + + if((readb(loc + CONF_END_TEST) != 0x00) || + (readb(loc + CONF_END_TEST + 1) != 0x55) || + (readb(loc + CONF_END_TEST + 2) != 0xAA) || + (readb(loc + CONF_END_TEST + 3) != 0xFF)) + return 0; + + if (!boardno) + boardno = readb(loc + NUMCARD_OWNER_TO_PC); + + if (!boardno || boardno > MAX_BOARD) { + printk(KERN_WARNING "Board #%d (at 0x%lx) is out of range (1 <= x <= %d).\n", + boardno, physloc, MAX_BOARD); + return 0; + } + + if (apbs[boardno - 1].RamIO) { + printk(KERN_WARNING "Board #%d (at 0x%lx) conflicts with previous board #%d (at 0x%lx)\n", + boardno, physloc, boardno, apbs[boardno-1].PhysIO); + return 0; + } + + boardno--; + + apbs[boardno].PhysIO = physloc; + apbs[boardno].RamIO = loc; + init_waitqueue_head(&apbs[boardno].FlagSleepSend); + spin_lock_init(&apbs[boardno].mutex); + byte_reset_it = readb(loc + RAM_IT_TO_PC); + + numboards++; + return boardno + 1; +} + +static void __exit applicom_exit(void) +{ + unsigned int i; + + misc_deregister(&ac_miscdev); + + for (i = 0; i < MAX_BOARD; i++) { + + if (!apbs[i].RamIO) + continue; + + if (apbs[i].irq) + free_irq(apbs[i].irq, &dummy); + + iounmap(apbs[i].RamIO); + } +} + +static int __init applicom_init(void) +{ + int i, numisa = 0; + struct pci_dev *dev = NULL; + void __iomem *RamIO; + int boardno, ret; + + printk(KERN_INFO "Applicom driver: $Id: ac.c,v 1.30 2000/03/22 16:03:57 dwmw2 Exp $\n"); + + /* No mem and irq given - check for a PCI card */ + + while ( (dev = pci_get_class(PCI_CLASS_OTHERS << 16, dev))) { + + if (!pci_match_id(applicom_pci_tbl, dev)) + continue; + + if (pci_enable_device(dev)) + return -EIO; + + RamIO = ioremap_nocache(pci_resource_start(dev, 0), LEN_RAM_IO); + + if (!RamIO) { + printk(KERN_INFO "ac.o: Failed to ioremap PCI memory " + "space at 0x%llx\n", + (unsigned long long)pci_resource_start(dev, 0)); + pci_disable_device(dev); + return -EIO; + } + + printk(KERN_INFO "Applicom %s found at mem 0x%llx, irq %d\n", + applicom_pci_devnames[dev->device-1], + (unsigned long long)pci_resource_start(dev, 0), + dev->irq); + + boardno = ac_register_board(pci_resource_start(dev, 0), + RamIO, 0); + if (!boardno) { + printk(KERN_INFO "ac.o: PCI Applicom device doesn't have correct signature.\n"); + iounmap(RamIO); + pci_disable_device(dev); + continue; + } + + if (request_irq(dev->irq, &ac_interrupt, IRQF_SHARED, "Applicom PCI", &dummy)) { + printk(KERN_INFO "Could not allocate IRQ %d for PCI Applicom device.\n", dev->irq); + iounmap(RamIO); + pci_disable_device(dev); + apbs[boardno - 1].RamIO = NULL; + continue; + } + + /* Enable interrupts. */ + + writeb(0x40, apbs[boardno - 1].RamIO + RAM_IT_FROM_PC); + + apbs[boardno - 1].irq = dev->irq; + } + + /* Finished with PCI cards. If none registered, + * and there was no mem/irq specified, exit */ + + if (!mem || !irq) { + if (numboards) + goto fin; + else { + printk(KERN_INFO "ac.o: No PCI boards found.\n"); + printk(KERN_INFO "ac.o: For an ISA board you must supply memory and irq parameters.\n"); + return -ENXIO; + } + } + + /* Now try the specified ISA cards */ + + for (i = 0; i < MAX_ISA_BOARD; i++) { + RamIO = ioremap_nocache(mem + (LEN_RAM_IO * i), LEN_RAM_IO); + + if (!RamIO) { + printk(KERN_INFO "ac.o: Failed to ioremap the ISA card's memory space (slot #%d)\n", i + 1); + continue; + } + + if (!(boardno = ac_register_board((unsigned long)mem+ (LEN_RAM_IO*i), + RamIO,i+1))) { + iounmap(RamIO); + continue; + } + + printk(KERN_NOTICE "Applicom ISA card found at mem 0x%lx, irq %d\n", mem + (LEN_RAM_IO*i), irq); + + if (!numisa) { + if (request_irq(irq, &ac_interrupt, IRQF_SHARED, "Applicom ISA", &dummy)) { + printk(KERN_WARNING "Could not allocate IRQ %d for ISA Applicom device.\n", irq); + iounmap(RamIO); + apbs[boardno - 1].RamIO = NULL; + } + else + apbs[boardno - 1].irq = irq; + } + else + apbs[boardno - 1].irq = 0; + + numisa++; + } + + if (!numisa) + printk(KERN_WARNING "ac.o: No valid ISA Applicom boards found " + "at mem 0x%lx\n", mem); + + fin: + init_waitqueue_head(&FlagSleepRec); + + WriteErrorCount = 0; + ReadErrorCount = 0; + DeviceErrorCount = 0; + + if (numboards) { + ret = misc_register(&ac_miscdev); + if (ret) { + printk(KERN_WARNING "ac.o: Unable to register misc device\n"); + goto out; + } + for (i = 0; i < MAX_BOARD; i++) { + int serial; + char boardname[(SERIAL_NUMBER - TYPE_CARD) + 1]; + + if (!apbs[i].RamIO) + continue; + + for (serial = 0; serial < SERIAL_NUMBER - TYPE_CARD; serial++) + boardname[serial] = readb(apbs[i].RamIO + TYPE_CARD + serial); + + boardname[serial] = 0; + + + printk(KERN_INFO "Applicom board %d: %s, PROM V%d.%d", + i+1, boardname, + (int)(readb(apbs[i].RamIO + VERS) >> 4), + (int)(readb(apbs[i].RamIO + VERS) & 0xF)); + + serial = (readb(apbs[i].RamIO + SERIAL_NUMBER) << 16) + + (readb(apbs[i].RamIO + SERIAL_NUMBER + 1) << 8) + + (readb(apbs[i].RamIO + SERIAL_NUMBER + 2) ); + + if (serial != 0) + printk(" S/N %d\n", serial); + else + printk("\n"); + } + return 0; + } + + else + return -ENXIO; + +out: + for (i = 0; i < MAX_BOARD; i++) { + if (!apbs[i].RamIO) + continue; + if (apbs[i].irq) + free_irq(apbs[i].irq, &dummy); + iounmap(apbs[i].RamIO); + } + return ret; +} + +module_init(applicom_init); +module_exit(applicom_exit); + + +static ssize_t ac_write(struct file *file, const char __user *buf, size_t count, loff_t * ppos) +{ + unsigned int NumCard; /* Board number 1 -> 8 */ + unsigned int IndexCard; /* Index board number 0 -> 7 */ + unsigned char TicCard; /* Board TIC to send */ + unsigned long flags; /* Current priority */ + struct st_ram_io st_loc; + struct mailbox tmpmailbox; +#ifdef DEBUG + int c; +#endif + DECLARE_WAITQUEUE(wait, current); + + if (count != sizeof(struct st_ram_io) + sizeof(struct mailbox)) { + static int warncount = 5; + if (warncount) { + printk(KERN_INFO "Hmmm. write() of Applicom card, length %zd != expected %zd\n", + count, sizeof(struct st_ram_io) + sizeof(struct mailbox)); + warncount--; + } + return -EINVAL; + } + + if(copy_from_user(&st_loc, buf, sizeof(struct st_ram_io))) + return -EFAULT; + + if(copy_from_user(&tmpmailbox, &buf[sizeof(struct st_ram_io)], + sizeof(struct mailbox))) + return -EFAULT; + + NumCard = st_loc.num_card; /* board number to send */ + TicCard = st_loc.tic_des_from_pc; /* tic number to send */ + IndexCard = NumCard - 1; + + if((NumCard < 1) || (NumCard > MAX_BOARD) || !apbs[IndexCard].RamIO) + return -EINVAL; + +#ifdef DEBUG + printk("Write to applicom card #%d. struct st_ram_io follows:", + IndexCard+1); + + for (c = 0; c < sizeof(struct st_ram_io);) { + + printk("\n%5.5X: %2.2X", c, ((unsigned char *) &st_loc)[c]); + + for (c++; c % 8 && c < sizeof(struct st_ram_io); c++) { + printk(" %2.2X", ((unsigned char *) &st_loc)[c]); + } + } + + printk("\nstruct mailbox follows:"); + + for (c = 0; c < sizeof(struct mailbox);) { + printk("\n%5.5X: %2.2X", c, ((unsigned char *) &tmpmailbox)[c]); + + for (c++; c % 8 && c < sizeof(struct mailbox); c++) { + printk(" %2.2X", ((unsigned char *) &tmpmailbox)[c]); + } + } + + printk("\n"); +#endif + + spin_lock_irqsave(&apbs[IndexCard].mutex, flags); + + /* Test octet ready correct */ + if(readb(apbs[IndexCard].RamIO + DATA_FROM_PC_READY) > 2) { + Dummy = readb(apbs[IndexCard].RamIO + VERS); + spin_unlock_irqrestore(&apbs[IndexCard].mutex, flags); + printk(KERN_WARNING "APPLICOM driver write error board %d, DataFromPcReady = %d\n", + IndexCard,(int)readb(apbs[IndexCard].RamIO + DATA_FROM_PC_READY)); + DeviceErrorCount++; + return -EIO; + } + + /* Place ourselves on the wait queue */ + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&apbs[IndexCard].FlagSleepSend, &wait); + + /* Check whether the card is ready for us */ + while (readb(apbs[IndexCard].RamIO + DATA_FROM_PC_READY) != 0) { + Dummy = readb(apbs[IndexCard].RamIO + VERS); + /* It's busy. Sleep. */ + + spin_unlock_irqrestore(&apbs[IndexCard].mutex, flags); + schedule(); + if (signal_pending(current)) { + remove_wait_queue(&apbs[IndexCard].FlagSleepSend, + &wait); + return -EINTR; + } + spin_lock_irqsave(&apbs[IndexCard].mutex, flags); + set_current_state(TASK_INTERRUPTIBLE); + } + + /* We may not have actually slept */ + set_current_state(TASK_RUNNING); + remove_wait_queue(&apbs[IndexCard].FlagSleepSend, &wait); + + writeb(1, apbs[IndexCard].RamIO + DATA_FROM_PC_READY); + + /* Which is best - lock down the pages with rawio and then + copy directly, or use bounce buffers? For now we do the latter + because it works with 2.2 still */ + { + unsigned char *from = (unsigned char *) &tmpmailbox; + void __iomem *to = apbs[IndexCard].RamIO + RAM_FROM_PC; + int c; + + for (c = 0; c < sizeof(struct mailbox); c++) + writeb(*(from++), to++); + } + + writeb(0x20, apbs[IndexCard].RamIO + TIC_OWNER_FROM_PC); + writeb(0xff, apbs[IndexCard].RamIO + NUMCARD_OWNER_FROM_PC); + writeb(TicCard, apbs[IndexCard].RamIO + TIC_DES_FROM_PC); + writeb(NumCard, apbs[IndexCard].RamIO + NUMCARD_DES_FROM_PC); + writeb(2, apbs[IndexCard].RamIO + DATA_FROM_PC_READY); + writeb(1, apbs[IndexCard].RamIO + RAM_IT_FROM_PC); + Dummy = readb(apbs[IndexCard].RamIO + VERS); + spin_unlock_irqrestore(&apbs[IndexCard].mutex, flags); + return 0; +} + +static int do_ac_read(int IndexCard, char __user *buf, + struct st_ram_io *st_loc, struct mailbox *mailbox) +{ + void __iomem *from = apbs[IndexCard].RamIO + RAM_TO_PC; + unsigned char *to = (unsigned char *)mailbox; +#ifdef DEBUG + int c; +#endif + + st_loc->tic_owner_to_pc = readb(apbs[IndexCard].RamIO + TIC_OWNER_TO_PC); + st_loc->numcard_owner_to_pc = readb(apbs[IndexCard].RamIO + NUMCARD_OWNER_TO_PC); + + + { + int c; + + for (c = 0; c < sizeof(struct mailbox); c++) + *(to++) = readb(from++); + } + writeb(1, apbs[IndexCard].RamIO + ACK_FROM_PC_READY); + writeb(1, apbs[IndexCard].RamIO + TYP_ACK_FROM_PC); + writeb(IndexCard+1, apbs[IndexCard].RamIO + NUMCARD_ACK_FROM_PC); + writeb(readb(apbs[IndexCard].RamIO + TIC_OWNER_TO_PC), + apbs[IndexCard].RamIO + TIC_ACK_FROM_PC); + writeb(2, apbs[IndexCard].RamIO + ACK_FROM_PC_READY); + writeb(0, apbs[IndexCard].RamIO + DATA_TO_PC_READY); + writeb(2, apbs[IndexCard].RamIO + RAM_IT_FROM_PC); + Dummy = readb(apbs[IndexCard].RamIO + VERS); + +#ifdef DEBUG + printk("Read from applicom card #%d. struct st_ram_io follows:", NumCard); + + for (c = 0; c < sizeof(struct st_ram_io);) { + printk("\n%5.5X: %2.2X", c, ((unsigned char *)st_loc)[c]); + + for (c++; c % 8 && c < sizeof(struct st_ram_io); c++) { + printk(" %2.2X", ((unsigned char *)st_loc)[c]); + } + } + + printk("\nstruct mailbox follows:"); + + for (c = 0; c < sizeof(struct mailbox);) { + printk("\n%5.5X: %2.2X", c, ((unsigned char *)mailbox)[c]); + + for (c++; c % 8 && c < sizeof(struct mailbox); c++) { + printk(" %2.2X", ((unsigned char *)mailbox)[c]); + } + } + printk("\n"); +#endif + return (sizeof(struct st_ram_io) + sizeof(struct mailbox)); +} + +static ssize_t ac_read (struct file *filp, char __user *buf, size_t count, loff_t *ptr) +{ + unsigned long flags; + unsigned int i; + unsigned char tmp; + int ret = 0; + DECLARE_WAITQUEUE(wait, current); +#ifdef DEBUG + int loopcount=0; +#endif + /* No need to ratelimit this. Only root can trigger it anyway */ + if (count != sizeof(struct st_ram_io) + sizeof(struct mailbox)) { + printk( KERN_WARNING "Hmmm. read() of Applicom card, length %zd != expected %zd\n", + count,sizeof(struct st_ram_io) + sizeof(struct mailbox)); + return -EINVAL; + } + + while(1) { + /* Stick ourself on the wait queue */ + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&FlagSleepRec, &wait); + + /* Scan each board, looking for one which has a packet for us */ + for (i=0; i < MAX_BOARD; i++) { + if (!apbs[i].RamIO) + continue; + spin_lock_irqsave(&apbs[i].mutex, flags); + + tmp = readb(apbs[i].RamIO + DATA_TO_PC_READY); + + if (tmp == 2) { + struct st_ram_io st_loc; + struct mailbox mailbox; + + /* Got a packet for us */ + memset(&st_loc, 0, sizeof(st_loc)); + ret = do_ac_read(i, buf, &st_loc, &mailbox); + spin_unlock_irqrestore(&apbs[i].mutex, flags); + set_current_state(TASK_RUNNING); + remove_wait_queue(&FlagSleepRec, &wait); + + if (copy_to_user(buf, &st_loc, sizeof(st_loc))) + return -EFAULT; + if (copy_to_user(buf + sizeof(st_loc), &mailbox, sizeof(mailbox))) + return -EFAULT; + return tmp; + } + + if (tmp > 2) { + /* Got an error */ + Dummy = readb(apbs[i].RamIO + VERS); + + spin_unlock_irqrestore(&apbs[i].mutex, flags); + set_current_state(TASK_RUNNING); + remove_wait_queue(&FlagSleepRec, &wait); + + printk(KERN_WARNING "APPLICOM driver read error board %d, DataToPcReady = %d\n", + i,(int)readb(apbs[i].RamIO + DATA_TO_PC_READY)); + DeviceErrorCount++; + return -EIO; + } + + /* Nothing for us. Try the next board */ + Dummy = readb(apbs[i].RamIO + VERS); + spin_unlock_irqrestore(&apbs[i].mutex, flags); + + } /* per board */ + + /* OK - No boards had data for us. Sleep now */ + + schedule(); + remove_wait_queue(&FlagSleepRec, &wait); + + if (signal_pending(current)) + return -EINTR; + +#ifdef DEBUG + if (loopcount++ > 2) { + printk(KERN_DEBUG "Looping in ac_read. loopcount %d\n", loopcount); + } +#endif + } +} + +static irqreturn_t ac_interrupt(int vec, void *dev_instance) +{ + unsigned int i; + unsigned int FlagInt; + unsigned int LoopCount; + int handled = 0; + + // printk("Applicom interrupt on IRQ %d occurred\n", vec); + + LoopCount = 0; + + do { + FlagInt = 0; + for (i = 0; i < MAX_BOARD; i++) { + + /* Skip if this board doesn't exist */ + if (!apbs[i].RamIO) + continue; + + spin_lock(&apbs[i].mutex); + + /* Skip if this board doesn't want attention */ + if(readb(apbs[i].RamIO + RAM_IT_TO_PC) == 0) { + spin_unlock(&apbs[i].mutex); + continue; + } + + handled = 1; + FlagInt = 1; + writeb(0, apbs[i].RamIO + RAM_IT_TO_PC); + + if (readb(apbs[i].RamIO + DATA_TO_PC_READY) > 2) { + printk(KERN_WARNING "APPLICOM driver interrupt err board %d, DataToPcReady = %d\n", + i+1,(int)readb(apbs[i].RamIO + DATA_TO_PC_READY)); + DeviceErrorCount++; + } + + if((readb(apbs[i].RamIO + DATA_FROM_PC_READY) > 2) && + (readb(apbs[i].RamIO + DATA_FROM_PC_READY) != 6)) { + + printk(KERN_WARNING "APPLICOM driver interrupt err board %d, DataFromPcReady = %d\n", + i+1,(int)readb(apbs[i].RamIO + DATA_FROM_PC_READY)); + DeviceErrorCount++; + } + + if (readb(apbs[i].RamIO + DATA_TO_PC_READY) == 2) { /* mailbox sent by the card ? */ + if (waitqueue_active(&FlagSleepRec)) { + wake_up_interruptible(&FlagSleepRec); + } + } + + if (readb(apbs[i].RamIO + DATA_FROM_PC_READY) == 0) { /* ram i/o free for write by pc ? */ + if (waitqueue_active(&apbs[i].FlagSleepSend)) { /* process sleep during read ? */ + wake_up_interruptible(&apbs[i].FlagSleepSend); + } + } + Dummy = readb(apbs[i].RamIO + VERS); + + if(readb(apbs[i].RamIO + RAM_IT_TO_PC)) { + /* There's another int waiting on this card */ + spin_unlock(&apbs[i].mutex); + i--; + } else { + spin_unlock(&apbs[i].mutex); + } + } + if (FlagInt) + LoopCount = 0; + else + LoopCount++; + } while(LoopCount < 2); + return IRQ_RETVAL(handled); +} + + + +static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + +{ /* @ ADG ou ATO selon le cas */ + int i; + unsigned char IndexCard; + void __iomem *pmem; + int ret = 0; + volatile unsigned char byte_reset_it; + struct st_ram_io *adgl; + void __user *argp = (void __user *)arg; + + /* In general, the device is only openable by root anyway, so we're not + particularly concerned that bogus ioctls can flood the console. */ + + adgl = memdup_user(argp, sizeof(struct st_ram_io)); + if (IS_ERR(adgl)) + return PTR_ERR(adgl); + + mutex_lock(&ac_mutex); + IndexCard = adgl->num_card-1; + + if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) { + static int warncount = 10; + if (warncount) { + printk( KERN_WARNING "APPLICOM driver IOCTL, bad board number %d\n",(int)IndexCard+1); + warncount--; + } + kfree(adgl); + mutex_unlock(&ac_mutex); + return -EINVAL; + } + + switch (cmd) { + + case 0: + pmem = apbs[IndexCard].RamIO; + for (i = 0; i < sizeof(struct st_ram_io); i++) + ((unsigned char *)adgl)[i]=readb(pmem++); + if (copy_to_user(argp, adgl, sizeof(struct st_ram_io))) + ret = -EFAULT; + break; + case 1: + pmem = apbs[IndexCard].RamIO + CONF_END_TEST; + for (i = 0; i < 4; i++) + adgl->conf_end_test[i] = readb(pmem++); + for (i = 0; i < 2; i++) + adgl->error_code[i] = readb(pmem++); + for (i = 0; i < 4; i++) + adgl->parameter_error[i] = readb(pmem++); + pmem = apbs[IndexCard].RamIO + VERS; + adgl->vers = readb(pmem); + pmem = apbs[IndexCard].RamIO + TYPE_CARD; + for (i = 0; i < 20; i++) + adgl->reserv1[i] = readb(pmem++); + *(int *)&adgl->reserv1[20] = + (readb(apbs[IndexCard].RamIO + SERIAL_NUMBER) << 16) + + (readb(apbs[IndexCard].RamIO + SERIAL_NUMBER + 1) << 8) + + (readb(apbs[IndexCard].RamIO + SERIAL_NUMBER + 2) ); + + if (copy_to_user(argp, adgl, sizeof(struct st_ram_io))) + ret = -EFAULT; + break; + case 2: + pmem = apbs[IndexCard].RamIO + CONF_END_TEST; + for (i = 0; i < 10; i++) + writeb(0xff, pmem++); + writeb(adgl->data_from_pc_ready, + apbs[IndexCard].RamIO + DATA_FROM_PC_READY); + + writeb(1, apbs[IndexCard].RamIO + RAM_IT_FROM_PC); + + for (i = 0; i < MAX_BOARD; i++) { + if (apbs[i].RamIO) { + byte_reset_it = readb(apbs[i].RamIO + RAM_IT_TO_PC); + } + } + break; + case 3: + pmem = apbs[IndexCard].RamIO + TIC_DES_FROM_PC; + writeb(adgl->tic_des_from_pc, pmem); + break; + case 4: + pmem = apbs[IndexCard].RamIO + TIC_OWNER_TO_PC; + adgl->tic_owner_to_pc = readb(pmem++); + adgl->numcard_owner_to_pc = readb(pmem); + if (copy_to_user(argp, adgl,sizeof(struct st_ram_io))) + ret = -EFAULT; + break; + case 5: + writeb(adgl->num_card, apbs[IndexCard].RamIO + NUMCARD_OWNER_TO_PC); + writeb(adgl->num_card, apbs[IndexCard].RamIO + NUMCARD_DES_FROM_PC); + writeb(adgl->num_card, apbs[IndexCard].RamIO + NUMCARD_ACK_FROM_PC); + writeb(4, apbs[IndexCard].RamIO + DATA_FROM_PC_READY); + writeb(1, apbs[IndexCard].RamIO + RAM_IT_FROM_PC); + break; + case 6: + printk(KERN_INFO "APPLICOM driver release .... V2.8.0 ($Revision: 1.30 $)\n"); + printk(KERN_INFO "Number of installed boards . %d\n", (int) numboards); + printk(KERN_INFO "Segment of board ........... %X\n", (int) mem); + printk(KERN_INFO "Interrupt IRQ number ....... %d\n", (int) irq); + for (i = 0; i < MAX_BOARD; i++) { + int serial; + char boardname[(SERIAL_NUMBER - TYPE_CARD) + 1]; + + if (!apbs[i].RamIO) + continue; + + for (serial = 0; serial < SERIAL_NUMBER - TYPE_CARD; serial++) + boardname[serial] = readb(apbs[i].RamIO + TYPE_CARD + serial); + boardname[serial] = 0; + + printk(KERN_INFO "Prom version board %d ....... V%d.%d %s", + i+1, + (int)(readb(apbs[i].RamIO + VERS) >> 4), + (int)(readb(apbs[i].RamIO + VERS) & 0xF), + boardname); + + + serial = (readb(apbs[i].RamIO + SERIAL_NUMBER) << 16) + + (readb(apbs[i].RamIO + SERIAL_NUMBER + 1) << 8) + + (readb(apbs[i].RamIO + SERIAL_NUMBER + 2) ); + + if (serial != 0) + printk(" S/N %d\n", serial); + else + printk("\n"); + } + if (DeviceErrorCount != 0) + printk(KERN_INFO "DeviceErrorCount ........... %d\n", DeviceErrorCount); + if (ReadErrorCount != 0) + printk(KERN_INFO "ReadErrorCount ............. %d\n", ReadErrorCount); + if (WriteErrorCount != 0) + printk(KERN_INFO "WriteErrorCount ............ %d\n", WriteErrorCount); + if (waitqueue_active(&FlagSleepRec)) + printk(KERN_INFO "Process in read pending\n"); + for (i = 0; i < MAX_BOARD; i++) { + if (apbs[i].RamIO && waitqueue_active(&apbs[i].FlagSleepSend)) + printk(KERN_INFO "Process in write pending board %d\n",i+1); + } + break; + default: + ret = -ENOTTY; + break; + } + Dummy = readb(apbs[IndexCard].RamIO + VERS); + kfree(adgl); + mutex_unlock(&ac_mutex); + return 0; +} + diff --git a/kernel/drivers/char/applicom.h b/kernel/drivers/char/applicom.h new file mode 100644 index 000000000..35530b3d9 --- /dev/null +++ b/kernel/drivers/char/applicom.h @@ -0,0 +1,85 @@ +/* $Id: applicom.h,v 1.2 1999/08/28 15:09:49 dwmw2 Exp $ */ + + +#ifndef __LINUX_APPLICOM_H__ +#define __LINUX_APPLICOM_H__ + + +#define DATA_TO_PC_READY 0x00 +#define TIC_OWNER_TO_PC 0x01 +#define NUMCARD_OWNER_TO_PC 0x02 +#define TIC_DES_TO_PC 0x03 +#define NUMCARD_DES_TO_PC 0x04 +#define DATA_FROM_PC_READY 0x05 +#define TIC_OWNER_FROM_PC 0x06 +#define NUMCARD_OWNER_FROM_PC 0x07 +#define TIC_DES_FROM_PC 0x08 +#define NUMCARD_DES_FROM_PC 0x09 +#define ACK_FROM_PC_READY 0x0E +#define TIC_ACK_FROM_PC 0x0F +#define NUMCARD_ACK_FROM_PC 0x010 +#define TYP_ACK_FROM_PC 0x011 +#define CONF_END_TEST 0x012 +#define ERROR_CODE 0x016 +#define PARAMETER_ERROR 0x018 +#define VERS 0x01E +#define RAM_TO_PC 0x040 +#define RAM_FROM_PC 0x0170 +#define TYPE_CARD 0x03C0 +#define SERIAL_NUMBER 0x03DA +#define RAM_IT_FROM_PC 0x03FE +#define RAM_IT_TO_PC 0x03FF + +struct mailbox{ + u16 stjb_codef; /* offset 00 */ + s16 stjb_status; /* offset 02 */ + u16 stjb_ticuser_root; /* offset 04 */ + u8 stjb_piduser[4]; /* offset 06 */ + u16 stjb_mode; /* offset 0A */ + u16 stjb_time; /* offset 0C */ + u16 stjb_stop; /* offset 0E */ + u16 stjb_nfonc; /* offset 10 */ + u16 stjb_ncard; /* offset 12 */ + u16 stjb_nchan; /* offset 14 */ + u16 stjb_nes; /* offset 16 */ + u16 stjb_nb; /* offset 18 */ + u16 stjb_typvar; /* offset 1A */ + u32 stjb_adr; /* offset 1C */ + u16 stjb_ticuser_dispcyc; /* offset 20 */ + u16 stjb_ticuser_protocol; /* offset 22 */ + u8 stjb_filler[12]; /* offset 24 */ + u8 stjb_data[256]; /* offset 30 */ + }; + +struct st_ram_io +{ + unsigned char data_to_pc_ready; + unsigned char tic_owner_to_pc; + unsigned char numcard_owner_to_pc; + unsigned char tic_des_to_pc; + unsigned char numcard_des_to_pc; + unsigned char data_from_pc_ready; + unsigned char tic_owner_from_pc; + unsigned char numcard_owner_from_pc; + unsigned char tic_des_from_pc; + unsigned char numcard_des_from_pc; + unsigned char ack_to_pc_ready; + unsigned char tic_ack_to_pc; + unsigned char numcard_ack_to_pc; + unsigned char typ_ack_to_pc; + unsigned char ack_from_pc_ready; + unsigned char tic_ack_from_pc; + unsigned char numcard_ack_from_pc; + unsigned char typ_ack_from_pc; + unsigned char conf_end_test[4]; + unsigned char error_code[2]; + unsigned char parameter_error[4]; + unsigned char time_base; + unsigned char nul_inc; + unsigned char vers; + unsigned char num_card; + unsigned char reserv1[32]; +}; + + +#endif /* __LINUX_APPLICOM_H__ */ diff --git a/kernel/drivers/char/bfin-otp.c b/kernel/drivers/char/bfin-otp.c new file mode 100644 index 000000000..44660f1c4 --- /dev/null +++ b/kernel/drivers/char/bfin-otp.c @@ -0,0 +1,275 @@ +/* + * Blackfin On-Chip OTP Memory Interface + * + * Copyright 2007-2009 Analog Devices Inc. + * + * Enter bugs at http://blackfin.uclinux.org/ + * + * Licensed under the GPL-2 or later. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define stamp(fmt, args...) pr_debug("%s:%i: " fmt "\n", __func__, __LINE__, ## args) +#define stampit() stamp("here i am") +#define pr_init(fmt, args...) ({ static const __initconst char __fmt[] = fmt; printk(__fmt, ## args); }) + +#define DRIVER_NAME "bfin-otp" +#define PFX DRIVER_NAME ": " + +static DEFINE_MUTEX(bfin_otp_lock); + +/** + * bfin_otp_read - Read OTP pages + * + * All reads must be in half page chunks (half page == 64 bits). + */ +static ssize_t bfin_otp_read(struct file *file, char __user *buff, size_t count, loff_t *pos) +{ + ssize_t bytes_done; + u32 page, flags, ret; + u64 content; + + stampit(); + + if (count % sizeof(u64)) + return -EMSGSIZE; + + if (mutex_lock_interruptible(&bfin_otp_lock)) + return -ERESTARTSYS; + + bytes_done = 0; + page = *pos / (sizeof(u64) * 2); + while (bytes_done < count) { + flags = (*pos % (sizeof(u64) * 2) ? OTP_UPPER_HALF : OTP_LOWER_HALF); + stamp("processing page %i (0x%x:%s)", page, flags, + (flags & OTP_UPPER_HALF ? "upper" : "lower")); + ret = bfrom_OtpRead(page, flags, &content); + if (ret & OTP_MASTER_ERROR) { + stamp("error from otp: 0x%x", ret); + bytes_done = -EIO; + break; + } + if (copy_to_user(buff + bytes_done, &content, sizeof(content))) { + bytes_done = -EFAULT; + break; + } + if (flags & OTP_UPPER_HALF) + ++page; + bytes_done += sizeof(content); + *pos += sizeof(content); + } + + mutex_unlock(&bfin_otp_lock); + + return bytes_done; +} + +#ifdef CONFIG_BFIN_OTP_WRITE_ENABLE +static bool allow_writes; + +/** + * bfin_otp_init_timing - setup OTP timing parameters + * + * Required before doing any write operation. Algorithms from HRM. + */ +static u32 bfin_otp_init_timing(void) +{ + u32 tp1, tp2, tp3, timing; + + tp1 = get_sclk() / 1000000; + tp2 = (2 * get_sclk() / 10000000) << 8; + tp3 = (0x1401) << 15; + timing = tp1 | tp2 | tp3; + if (bfrom_OtpCommand(OTP_INIT, timing)) + return 0; + + return timing; +} + +/** + * bfin_otp_deinit_timing - set timings to only allow reads + * + * Should be called after all writes are done. + */ +static void bfin_otp_deinit_timing(u32 timing) +{ + /* mask bits [31:15] so that any attempts to write fail */ + bfrom_OtpCommand(OTP_CLOSE, 0); + bfrom_OtpCommand(OTP_INIT, timing & ~(-1 << 15)); + bfrom_OtpCommand(OTP_CLOSE, 0); +} + +/** + * bfin_otp_write - write OTP pages + * + * All writes must be in half page chunks (half page == 64 bits). + */ +static ssize_t bfin_otp_write(struct file *filp, const char __user *buff, size_t count, loff_t *pos) +{ + ssize_t bytes_done; + u32 timing, page, base_flags, flags, ret; + u64 content; + + if (!allow_writes) + return -EACCES; + + if (count % sizeof(u64)) + return -EMSGSIZE; + + if (mutex_lock_interruptible(&bfin_otp_lock)) + return -ERESTARTSYS; + + stampit(); + + timing = bfin_otp_init_timing(); + if (timing == 0) { + mutex_unlock(&bfin_otp_lock); + return -EIO; + } + + base_flags = OTP_CHECK_FOR_PREV_WRITE; + + bytes_done = 0; + page = *pos / (sizeof(u64) * 2); + while (bytes_done < count) { + flags = base_flags | (*pos % (sizeof(u64) * 2) ? OTP_UPPER_HALF : OTP_LOWER_HALF); + stamp("processing page %i (0x%x:%s) from %p", page, flags, + (flags & OTP_UPPER_HALF ? "upper" : "lower"), buff + bytes_done); + if (copy_from_user(&content, buff + bytes_done, sizeof(content))) { + bytes_done = -EFAULT; + break; + } + ret = bfrom_OtpWrite(page, flags, &content); + if (ret & OTP_MASTER_ERROR) { + stamp("error from otp: 0x%x", ret); + bytes_done = -EIO; + break; + } + if (flags & OTP_UPPER_HALF) + ++page; + bytes_done += sizeof(content); + *pos += sizeof(content); + } + + bfin_otp_deinit_timing(timing); + + mutex_unlock(&bfin_otp_lock); + + return bytes_done; +} + +static long bfin_otp_ioctl(struct file *filp, unsigned cmd, unsigned long arg) +{ + stampit(); + + switch (cmd) { + case OTPLOCK: { + u32 timing; + int ret = -EIO; + + if (!allow_writes) + return -EACCES; + + if (mutex_lock_interruptible(&bfin_otp_lock)) + return -ERESTARTSYS; + + timing = bfin_otp_init_timing(); + if (timing) { + u32 otp_result = bfrom_OtpWrite(arg, OTP_LOCK, NULL); + stamp("locking page %lu resulted in 0x%x", arg, otp_result); + if (!(otp_result & OTP_MASTER_ERROR)) + ret = 0; + + bfin_otp_deinit_timing(timing); + } + + mutex_unlock(&bfin_otp_lock); + + return ret; + } + + case MEMLOCK: + allow_writes = false; + return 0; + + case MEMUNLOCK: + allow_writes = true; + return 0; + } + + return -EINVAL; +} +#else +# define bfin_otp_write NULL +# define bfin_otp_ioctl NULL +#endif + +static const struct file_operations bfin_otp_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = bfin_otp_ioctl, + .read = bfin_otp_read, + .write = bfin_otp_write, + .llseek = default_llseek, +}; + +static struct miscdevice bfin_otp_misc_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = DRIVER_NAME, + .fops = &bfin_otp_fops, +}; + +/** + * bfin_otp_init - Initialize module + * + * Registers the device and notifier handler. Actual device + * initialization is handled by bfin_otp_open(). + */ +static int __init bfin_otp_init(void) +{ + int ret; + + stampit(); + + ret = misc_register(&bfin_otp_misc_device); + if (ret) { + pr_init(KERN_ERR PFX "unable to register a misc device\n"); + return ret; + } + + pr_init(KERN_INFO PFX "initialized\n"); + + return 0; +} + +/** + * bfin_otp_exit - Deinitialize module + * + * Unregisters the device and notifier handler. Actual device + * deinitialization is handled by bfin_otp_close(). + */ +static void __exit bfin_otp_exit(void) +{ + stampit(); + + misc_deregister(&bfin_otp_misc_device); +} + +module_init(bfin_otp_init); +module_exit(bfin_otp_exit); + +MODULE_AUTHOR("Mike Frysinger "); +MODULE_DESCRIPTION("Blackfin OTP Memory Interface"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/bsr.c b/kernel/drivers/char/bsr.c new file mode 100644 index 000000000..a6cef548e --- /dev/null +++ b/kernel/drivers/char/bsr.c @@ -0,0 +1,361 @@ +/* IBM POWER Barrier Synchronization Register Driver + * + * Copyright IBM Corporation 2008 + * + * Author: Sonny Rao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + This driver exposes a special register which can be used for fast + synchronization across a large SMP machine. The hardware is exposed + as an array of bytes where each process will write to one of the bytes to + indicate it has finished the current stage and this update is broadcast to + all processors without having to bounce a cacheline between them. In + POWER5 and POWER6 there is one of these registers per SMP, but it is + presented in two forms; first, it is given as a whole and then as a number + of smaller registers which alias to parts of the single whole register. + This can potentially allow multiple groups of processes to each have their + own private synchronization device. + + Note that this hardware *must* be written to using *only* single byte writes. + It may be read using 1, 2, 4, or 8 byte loads which must be aligned since + this region is treated as cache-inhibited processes should also use a + full sync before and after writing to the BSR to ensure all stores and + the BSR update have made it to all chips in the system +*/ + +/* This is arbitrary number, up to Power6 it's been 17 or fewer */ +#define BSR_MAX_DEVS (32) + +struct bsr_dev { + u64 bsr_addr; /* Real address */ + u64 bsr_len; /* length of mem region we can map */ + unsigned bsr_bytes; /* size of the BSR reg itself */ + unsigned bsr_stride; /* interval at which BSR repeats in the page */ + unsigned bsr_type; /* maps to enum below */ + unsigned bsr_num; /* bsr id number for its type */ + int bsr_minor; + + struct list_head bsr_list; + + dev_t bsr_dev; + struct cdev bsr_cdev; + struct device *bsr_device; + char bsr_name[32]; + +}; + +static unsigned total_bsr_devs; +static struct list_head bsr_devs = LIST_HEAD_INIT(bsr_devs); +static struct class *bsr_class; +static int bsr_major; + +enum { + BSR_8 = 0, + BSR_16 = 1, + BSR_64 = 2, + BSR_128 = 3, + BSR_4096 = 4, + BSR_UNKNOWN = 5, + BSR_MAX = 6, +}; + +static unsigned bsr_types[BSR_MAX]; + +static ssize_t +bsr_size_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct bsr_dev *bsr_dev = dev_get_drvdata(dev); + return sprintf(buf, "%u\n", bsr_dev->bsr_bytes); +} +static DEVICE_ATTR_RO(bsr_size); + +static ssize_t +bsr_stride_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct bsr_dev *bsr_dev = dev_get_drvdata(dev); + return sprintf(buf, "%u\n", bsr_dev->bsr_stride); +} +static DEVICE_ATTR_RO(bsr_stride); + +static ssize_t +bsr_length_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct bsr_dev *bsr_dev = dev_get_drvdata(dev); + return sprintf(buf, "%llu\n", bsr_dev->bsr_len); +} +static DEVICE_ATTR_RO(bsr_length); + +static struct attribute *bsr_dev_attrs[] = { + &dev_attr_bsr_size.attr, + &dev_attr_bsr_stride.attr, + &dev_attr_bsr_length.attr, + NULL, +}; +ATTRIBUTE_GROUPS(bsr_dev); + +static int bsr_mmap(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long size = vma->vm_end - vma->vm_start; + struct bsr_dev *dev = filp->private_data; + int ret; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + /* check for the case of a small BSR device and map one 4k page for it*/ + if (dev->bsr_len < PAGE_SIZE && size == PAGE_SIZE) + ret = remap_4k_pfn(vma, vma->vm_start, dev->bsr_addr >> 12, + vma->vm_page_prot); + else if (size <= dev->bsr_len) + ret = io_remap_pfn_range(vma, vma->vm_start, + dev->bsr_addr >> PAGE_SHIFT, + size, vma->vm_page_prot); + else + return -EINVAL; + + if (ret) + return -EAGAIN; + + return 0; +} + +static int bsr_open(struct inode * inode, struct file * filp) +{ + struct cdev *cdev = inode->i_cdev; + struct bsr_dev *dev = container_of(cdev, struct bsr_dev, bsr_cdev); + + filp->private_data = dev; + return 0; +} + +static const struct file_operations bsr_fops = { + .owner = THIS_MODULE, + .mmap = bsr_mmap, + .open = bsr_open, + .llseek = noop_llseek, +}; + +static void bsr_cleanup_devs(void) +{ + struct bsr_dev *cur, *n; + + list_for_each_entry_safe(cur, n, &bsr_devs, bsr_list) { + if (cur->bsr_device) { + cdev_del(&cur->bsr_cdev); + device_del(cur->bsr_device); + } + list_del(&cur->bsr_list); + kfree(cur); + } +} + +static int bsr_add_node(struct device_node *bn) +{ + int bsr_stride_len, bsr_bytes_len, num_bsr_devs; + const u32 *bsr_stride; + const u32 *bsr_bytes; + unsigned i; + int ret = -ENODEV; + + bsr_stride = of_get_property(bn, "ibm,lock-stride", &bsr_stride_len); + bsr_bytes = of_get_property(bn, "ibm,#lock-bytes", &bsr_bytes_len); + + if (!bsr_stride || !bsr_bytes || + (bsr_stride_len != bsr_bytes_len)) { + printk(KERN_ERR "bsr of-node has missing/incorrect property\n"); + return ret; + } + + num_bsr_devs = bsr_bytes_len / sizeof(u32); + + for (i = 0 ; i < num_bsr_devs; i++) { + struct bsr_dev *cur = kzalloc(sizeof(struct bsr_dev), + GFP_KERNEL); + struct resource res; + int result; + + if (!cur) { + printk(KERN_ERR "Unable to alloc bsr dev\n"); + ret = -ENOMEM; + goto out_err; + } + + result = of_address_to_resource(bn, i, &res); + if (result < 0) { + printk(KERN_ERR "bsr of-node has invalid reg property, skipping\n"); + kfree(cur); + continue; + } + + cur->bsr_minor = i + total_bsr_devs; + cur->bsr_addr = res.start; + cur->bsr_len = resource_size(&res); + cur->bsr_bytes = bsr_bytes[i]; + cur->bsr_stride = bsr_stride[i]; + cur->bsr_dev = MKDEV(bsr_major, i + total_bsr_devs); + + /* if we have a bsr_len of > 4k and less then PAGE_SIZE (64k pages) */ + /* we can only map 4k of it, so only advertise the 4k in sysfs */ + if (cur->bsr_len > 4096 && cur->bsr_len < PAGE_SIZE) + cur->bsr_len = 4096; + + switch(cur->bsr_bytes) { + case 8: + cur->bsr_type = BSR_8; + break; + case 16: + cur->bsr_type = BSR_16; + break; + case 64: + cur->bsr_type = BSR_64; + break; + case 128: + cur->bsr_type = BSR_128; + break; + case 4096: + cur->bsr_type = BSR_4096; + break; + default: + cur->bsr_type = BSR_UNKNOWN; + } + + cur->bsr_num = bsr_types[cur->bsr_type]; + snprintf(cur->bsr_name, 32, "bsr%d_%d", + cur->bsr_bytes, cur->bsr_num); + + cdev_init(&cur->bsr_cdev, &bsr_fops); + result = cdev_add(&cur->bsr_cdev, cur->bsr_dev, 1); + if (result) { + kfree(cur); + goto out_err; + } + + cur->bsr_device = device_create(bsr_class, NULL, cur->bsr_dev, + cur, "%s", cur->bsr_name); + if (IS_ERR(cur->bsr_device)) { + printk(KERN_ERR "device_create failed for %s\n", + cur->bsr_name); + cdev_del(&cur->bsr_cdev); + kfree(cur); + goto out_err; + } + + bsr_types[cur->bsr_type] = cur->bsr_num + 1; + list_add_tail(&cur->bsr_list, &bsr_devs); + } + + total_bsr_devs += num_bsr_devs; + + return 0; + + out_err: + + bsr_cleanup_devs(); + return ret; +} + +static int bsr_create_devs(struct device_node *bn) +{ + int ret; + + while (bn) { + ret = bsr_add_node(bn); + if (ret) { + of_node_put(bn); + return ret; + } + bn = of_find_compatible_node(bn, NULL, "ibm,bsr"); + } + return 0; +} + +static int __init bsr_init(void) +{ + struct device_node *np; + dev_t bsr_dev; + int ret = -ENODEV; + + np = of_find_compatible_node(NULL, NULL, "ibm,bsr"); + if (!np) + goto out_err; + + bsr_class = class_create(THIS_MODULE, "bsr"); + if (IS_ERR(bsr_class)) { + printk(KERN_ERR "class_create() failed for bsr_class\n"); + ret = PTR_ERR(bsr_class); + goto out_err_1; + } + bsr_class->dev_groups = bsr_dev_groups; + + ret = alloc_chrdev_region(&bsr_dev, 0, BSR_MAX_DEVS, "bsr"); + bsr_major = MAJOR(bsr_dev); + if (ret < 0) { + printk(KERN_ERR "alloc_chrdev_region() failed for bsr\n"); + goto out_err_2; + } + + if ((ret = bsr_create_devs(np)) < 0) { + np = NULL; + goto out_err_3; + } + + return 0; + + out_err_3: + unregister_chrdev_region(bsr_dev, BSR_MAX_DEVS); + + out_err_2: + class_destroy(bsr_class); + + out_err_1: + of_node_put(np); + + out_err: + + return ret; +} + +static void __exit bsr_exit(void) +{ + + bsr_cleanup_devs(); + + if (bsr_class) + class_destroy(bsr_class); + + if (bsr_major) + unregister_chrdev_region(MKDEV(bsr_major, 0), BSR_MAX_DEVS); +} + +module_init(bsr_init); +module_exit(bsr_exit); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Sonny Rao "); diff --git a/kernel/drivers/char/ds1302.c b/kernel/drivers/char/ds1302.c new file mode 100644 index 000000000..7d34b2037 --- /dev/null +++ b/kernel/drivers/char/ds1302.c @@ -0,0 +1,357 @@ +/*!*************************************************************************** +*! +*! FILE NAME : ds1302.c +*! +*! DESCRIPTION: Implements an interface for the DS1302 RTC +*! +*! Functions exported: ds1302_readreg, ds1302_writereg, ds1302_init, get_rtc_status +*! +*! --------------------------------------------------------------------------- +*! +*! (C) Copyright 1999, 2000, 2001 Axis Communications AB, LUND, SWEDEN +*! +*!***************************************************************************/ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#if defined(CONFIG_M32R) +#include +#endif + +#define RTC_MAJOR_NR 121 /* local major, change later */ + +static DEFINE_MUTEX(rtc_mutex); +static const char ds1302_name[] = "ds1302"; + +/* Send 8 bits. */ +static void +out_byte_rtc(unsigned int reg_addr, unsigned char x) +{ + //RST H + outw(0x0001,(unsigned long)PLD_RTCRSTODT); + //write data + outw(((x<<8)|(reg_addr&0xff)),(unsigned long)PLD_RTCWRDATA); + //WE + outw(0x0002,(unsigned long)PLD_RTCCR); + //wait + while(inw((unsigned long)PLD_RTCCR)); + + //RST L + outw(0x0000,(unsigned long)PLD_RTCRSTODT); + +} + +static unsigned char +in_byte_rtc(unsigned int reg_addr) +{ + unsigned char retval; + + //RST H + outw(0x0001,(unsigned long)PLD_RTCRSTODT); + //write data + outw((reg_addr&0xff),(unsigned long)PLD_RTCRDDATA); + //RE + outw(0x0001,(unsigned long)PLD_RTCCR); + //wait + while(inw((unsigned long)PLD_RTCCR)); + + //read data + retval=(inw((unsigned long)PLD_RTCRDDATA) & 0xff00)>>8; + + //RST L + outw(0x0000,(unsigned long)PLD_RTCRSTODT); + + return retval; +} + +/* Enable writing. */ + +static void +ds1302_wenable(void) +{ + out_byte_rtc(0x8e,0x00); +} + +/* Disable writing. */ + +static void +ds1302_wdisable(void) +{ + out_byte_rtc(0x8e,0x80); +} + + + +/* Read a byte from the selected register in the DS1302. */ + +unsigned char +ds1302_readreg(int reg) +{ + unsigned char x; + + x=in_byte_rtc((0x81 | (reg << 1))); /* read register */ + + return x; +} + +/* Write a byte to the selected register. */ + +void +ds1302_writereg(int reg, unsigned char val) +{ + ds1302_wenable(); + out_byte_rtc((0x80 | (reg << 1)),val); + ds1302_wdisable(); +} + +void +get_rtc_time(struct rtc_time *rtc_tm) +{ + unsigned long flags; + + local_irq_save(flags); + + rtc_tm->tm_sec = CMOS_READ(RTC_SECONDS); + rtc_tm->tm_min = CMOS_READ(RTC_MINUTES); + rtc_tm->tm_hour = CMOS_READ(RTC_HOURS); + rtc_tm->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH); + rtc_tm->tm_mon = CMOS_READ(RTC_MONTH); + rtc_tm->tm_year = CMOS_READ(RTC_YEAR); + + local_irq_restore(flags); + + rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec); + rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min); + rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour); + rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday); + rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon); + rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year); + + /* + * Account for differences between how the RTC uses the values + * and how they are defined in a struct rtc_time; + */ + + if (rtc_tm->tm_year <= 69) + rtc_tm->tm_year += 100; + + rtc_tm->tm_mon--; +} + +static unsigned char days_in_mo[] = + {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; + +/* ioctl that supports RTC_RD_TIME and RTC_SET_TIME (read and set time/date). */ + +static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + unsigned long flags; + + switch(cmd) { + case RTC_RD_TIME: /* read the time/date from RTC */ + { + struct rtc_time rtc_tm; + + memset(&rtc_tm, 0, sizeof (struct rtc_time)); + mutex_lock(&rtc_mutex); + get_rtc_time(&rtc_tm); + mutex_unlock(&rtc_mutex); + if (copy_to_user((struct rtc_time*)arg, &rtc_tm, sizeof(struct rtc_time))) + return -EFAULT; + return 0; + } + + case RTC_SET_TIME: /* set the RTC */ + { + struct rtc_time rtc_tm; + unsigned char mon, day, hrs, min, sec, leap_yr; + unsigned int yrs; + + if (!capable(CAP_SYS_TIME)) + return -EPERM; + + if (copy_from_user(&rtc_tm, (struct rtc_time*)arg, sizeof(struct rtc_time))) + return -EFAULT; + + yrs = rtc_tm.tm_year + 1900; + mon = rtc_tm.tm_mon + 1; /* tm_mon starts at zero */ + day = rtc_tm.tm_mday; + hrs = rtc_tm.tm_hour; + min = rtc_tm.tm_min; + sec = rtc_tm.tm_sec; + + + if ((yrs < 1970) || (yrs > 2069)) + return -EINVAL; + + leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400)); + + if ((mon > 12) || (day == 0)) + return -EINVAL; + + if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr))) + return -EINVAL; + + if ((hrs >= 24) || (min >= 60) || (sec >= 60)) + return -EINVAL; + + if (yrs >= 2000) + yrs -= 2000; /* RTC (0, 1, ... 69) */ + else + yrs -= 1900; /* RTC (70, 71, ... 99) */ + + sec = bin2bcd(sec); + min = bin2bcd(min); + hrs = bin2bcd(hrs); + day = bin2bcd(day); + mon = bin2bcd(mon); + yrs = bin2bcd(yrs); + + mutex_lock(&rtc_mutex); + local_irq_save(flags); + CMOS_WRITE(yrs, RTC_YEAR); + CMOS_WRITE(mon, RTC_MONTH); + CMOS_WRITE(day, RTC_DAY_OF_MONTH); + CMOS_WRITE(hrs, RTC_HOURS); + CMOS_WRITE(min, RTC_MINUTES); + CMOS_WRITE(sec, RTC_SECONDS); + local_irq_restore(flags); + mutex_unlock(&rtc_mutex); + + /* Notice that at this point, the RTC is updated but + * the kernel is still running with the old time. + * You need to set that separately with settimeofday + * or adjtimex. + */ + return 0; + } + + case RTC_SET_CHARGE: /* set the RTC TRICKLE CHARGE register */ + { + int tcs_val; + + if (!capable(CAP_SYS_TIME)) + return -EPERM; + + if(copy_from_user(&tcs_val, (int*)arg, sizeof(int))) + return -EFAULT; + + mutex_lock(&rtc_mutex); + tcs_val = RTC_TCR_PATTERN | (tcs_val & 0x0F); + ds1302_writereg(RTC_TRICKLECHARGER, tcs_val); + mutex_unlock(&rtc_mutex); + return 0; + } + default: + return -EINVAL; + } +} + +int +get_rtc_status(char *buf) +{ + char *p; + struct rtc_time tm; + + p = buf; + + get_rtc_time(&tm); + + /* + * There is no way to tell if the luser has the RTC set for local + * time or for Universal Standard Time (GMT). Probably local though. + */ + + p += sprintf(p, + "rtc_time\t: %02d:%02d:%02d\n" + "rtc_date\t: %04d-%02d-%02d\n", + tm.tm_hour, tm.tm_min, tm.tm_sec, + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday); + + return p - buf; +} + + +/* The various file operations we support. */ + +static const struct file_operations rtc_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = rtc_ioctl, + .llseek = noop_llseek, +}; + +/* Probe for the chip by writing something to its RAM and try reading it back. */ + +#define MAGIC_PATTERN 0x42 + +static int __init +ds1302_probe(void) +{ + int retval, res, baur; + + baur=(boot_cpu_data.bus_clock/(2*1000*1000)); + + printk("%s: Set PLD_RTCBAUR = %d\n", ds1302_name,baur); + + outw(0x0000,(unsigned long)PLD_RTCCR); + outw(0x0000,(unsigned long)PLD_RTCRSTODT); + outw(baur,(unsigned long)PLD_RTCBAUR); + + /* Try to talk to timekeeper. */ + + ds1302_wenable(); + /* write RAM byte 0 */ + /* write something magic */ + out_byte_rtc(0xc0,MAGIC_PATTERN); + + /* read RAM byte 0 */ + if((res = in_byte_rtc(0xc1)) == MAGIC_PATTERN) { + char buf[100]; + ds1302_wdisable(); + printk("%s: RTC found.\n", ds1302_name); + get_rtc_status(buf); + printk(buf); + retval = 1; + } else { + printk("%s: RTC not found.\n", ds1302_name); + retval = 0; + } + + return retval; +} + + +/* Just probe for the RTC and register the device to handle the ioctl needed. */ + +int __init +ds1302_init(void) +{ + if (!ds1302_probe()) { + return -1; + } + return 0; +} + +static int __init ds1302_register(void) +{ + ds1302_init(); + if (register_chrdev(RTC_MAJOR_NR, ds1302_name, &rtc_fops)) { + printk(KERN_INFO "%s: unable to get major %d for rtc\n", + ds1302_name, RTC_MAJOR_NR); + return -1; + } + return 0; +} + +module_init(ds1302_register); diff --git a/kernel/drivers/char/ds1620.c b/kernel/drivers/char/ds1620.c new file mode 100644 index 000000000..0fae5296e --- /dev/null +++ b/kernel/drivers/char/ds1620.c @@ -0,0 +1,435 @@ +/* + * linux/drivers/char/ds1620.c: Dallas Semiconductors DS1620 + * thermometer driver (as used in the Rebel.com NetWinder) + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#ifdef CONFIG_PROC_FS +/* define for /proc interface */ +#define THERM_USE_PROC +#endif + +/* Definitions for DS1620 chip */ +#define THERM_START_CONVERT 0xee +#define THERM_RESET 0xaf +#define THERM_READ_CONFIG 0xac +#define THERM_READ_TEMP 0xaa +#define THERM_READ_TL 0xa2 +#define THERM_READ_TH 0xa1 +#define THERM_WRITE_CONFIG 0x0c +#define THERM_WRITE_TL 0x02 +#define THERM_WRITE_TH 0x01 + +#define CFG_CPU 2 +#define CFG_1SHOT 1 + +static DEFINE_MUTEX(ds1620_mutex); +static const char *fan_state[] = { "off", "on", "on (hardwired)" }; + +/* + * Start of NetWinder specifics + * Note! We have to hold the gpio lock with IRQs disabled over the + * whole of our transaction to the Dallas chip, since there is a + * chance that the WaveArtist driver could touch these bits to + * enable or disable the speaker. + */ +extern unsigned int system_rev; + +static inline void netwinder_ds1620_set_clk(int clk) +{ + nw_gpio_modify_op(GPIO_DSCLK, clk ? GPIO_DSCLK : 0); +} + +static inline void netwinder_ds1620_set_data(int dat) +{ + nw_gpio_modify_op(GPIO_DATA, dat ? GPIO_DATA : 0); +} + +static inline int netwinder_ds1620_get_data(void) +{ + return nw_gpio_read() & GPIO_DATA; +} + +static inline void netwinder_ds1620_set_data_dir(int dir) +{ + nw_gpio_modify_io(GPIO_DATA, dir ? GPIO_DATA : 0); +} + +static inline void netwinder_ds1620_reset(void) +{ + nw_cpld_modify(CPLD_DS_ENABLE, 0); + nw_cpld_modify(CPLD_DS_ENABLE, CPLD_DS_ENABLE); +} + +static inline void netwinder_lock(unsigned long *flags) +{ + raw_spin_lock_irqsave(&nw_gpio_lock, *flags); +} + +static inline void netwinder_unlock(unsigned long *flags) +{ + raw_spin_unlock_irqrestore(&nw_gpio_lock, *flags); +} + +static inline void netwinder_set_fan(int i) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&nw_gpio_lock, flags); + nw_gpio_modify_op(GPIO_FAN, i ? GPIO_FAN : 0); + raw_spin_unlock_irqrestore(&nw_gpio_lock, flags); +} + +static inline int netwinder_get_fan(void) +{ + if ((system_rev & 0xf000) == 0x4000) + return FAN_ALWAYS_ON; + + return (nw_gpio_read() & GPIO_FAN) ? FAN_ON : FAN_OFF; +} + +/* + * End of NetWinder specifics + */ + +static void ds1620_send_bits(int nr, int value) +{ + int i; + + for (i = 0; i < nr; i++) { + netwinder_ds1620_set_data(value & 1); + netwinder_ds1620_set_clk(0); + udelay(1); + netwinder_ds1620_set_clk(1); + udelay(1); + + value >>= 1; + } +} + +static unsigned int ds1620_recv_bits(int nr) +{ + unsigned int value = 0, mask = 1; + int i; + + netwinder_ds1620_set_data(0); + + for (i = 0; i < nr; i++) { + netwinder_ds1620_set_clk(0); + udelay(1); + + if (netwinder_ds1620_get_data()) + value |= mask; + + mask <<= 1; + + netwinder_ds1620_set_clk(1); + udelay(1); + } + + return value; +} + +static void ds1620_out(int cmd, int bits, int value) +{ + unsigned long flags; + + netwinder_lock(&flags); + netwinder_ds1620_set_clk(1); + netwinder_ds1620_set_data_dir(0); + netwinder_ds1620_reset(); + + udelay(1); + + ds1620_send_bits(8, cmd); + if (bits) + ds1620_send_bits(bits, value); + + udelay(1); + + netwinder_ds1620_reset(); + netwinder_unlock(&flags); + + msleep(20); +} + +static unsigned int ds1620_in(int cmd, int bits) +{ + unsigned long flags; + unsigned int value; + + netwinder_lock(&flags); + netwinder_ds1620_set_clk(1); + netwinder_ds1620_set_data_dir(0); + netwinder_ds1620_reset(); + + udelay(1); + + ds1620_send_bits(8, cmd); + + netwinder_ds1620_set_data_dir(1); + value = ds1620_recv_bits(bits); + + netwinder_ds1620_reset(); + netwinder_unlock(&flags); + + return value; +} + +static int cvt_9_to_int(unsigned int val) +{ + if (val & 0x100) + val |= 0xfffffe00; + + return val; +} + +static void ds1620_write_state(struct therm *therm) +{ + ds1620_out(THERM_WRITE_CONFIG, 8, CFG_CPU); + ds1620_out(THERM_WRITE_TL, 9, therm->lo); + ds1620_out(THERM_WRITE_TH, 9, therm->hi); + ds1620_out(THERM_START_CONVERT, 0, 0); +} + +static void ds1620_read_state(struct therm *therm) +{ + therm->lo = cvt_9_to_int(ds1620_in(THERM_READ_TL, 9)); + therm->hi = cvt_9_to_int(ds1620_in(THERM_READ_TH, 9)); +} + +static int ds1620_open(struct inode *inode, struct file *file) +{ + return nonseekable_open(inode, file); +} + +static ssize_t +ds1620_read(struct file *file, char __user *buf, size_t count, loff_t *ptr) +{ + signed int cur_temp; + signed char cur_temp_degF; + + cur_temp = cvt_9_to_int(ds1620_in(THERM_READ_TEMP, 9)) >> 1; + + /* convert to Fahrenheit, as per wdt.c */ + cur_temp_degF = (cur_temp * 9) / 5 + 32; + + if (copy_to_user(buf, &cur_temp_degF, 1)) + return -EFAULT; + + return 1; +} + +static int +ds1620_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct therm therm; + union { + struct therm __user *therm; + int __user *i; + } uarg; + int i; + + uarg.i = (int __user *)arg; + + switch(cmd) { + case CMD_SET_THERMOSTATE: + case CMD_SET_THERMOSTATE2: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (cmd == CMD_SET_THERMOSTATE) { + if (get_user(therm.hi, uarg.i)) + return -EFAULT; + therm.lo = therm.hi - 3; + } else { + if (copy_from_user(&therm, uarg.therm, sizeof(therm))) + return -EFAULT; + } + + therm.lo <<= 1; + therm.hi <<= 1; + + ds1620_write_state(&therm); + break; + + case CMD_GET_THERMOSTATE: + case CMD_GET_THERMOSTATE2: + ds1620_read_state(&therm); + + therm.lo >>= 1; + therm.hi >>= 1; + + if (cmd == CMD_GET_THERMOSTATE) { + if (put_user(therm.hi, uarg.i)) + return -EFAULT; + } else { + if (copy_to_user(uarg.therm, &therm, sizeof(therm))) + return -EFAULT; + } + break; + + case CMD_GET_TEMPERATURE: + case CMD_GET_TEMPERATURE2: + i = cvt_9_to_int(ds1620_in(THERM_READ_TEMP, 9)); + + if (cmd == CMD_GET_TEMPERATURE) + i >>= 1; + + return put_user(i, uarg.i) ? -EFAULT : 0; + + case CMD_GET_STATUS: + i = ds1620_in(THERM_READ_CONFIG, 8) & 0xe3; + + return put_user(i, uarg.i) ? -EFAULT : 0; + + case CMD_GET_FAN: + i = netwinder_get_fan(); + + return put_user(i, uarg.i) ? -EFAULT : 0; + + case CMD_SET_FAN: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (get_user(i, uarg.i)) + return -EFAULT; + + netwinder_set_fan(i); + break; + + default: + return -ENOIOCTLCMD; + } + + return 0; +} + +static long +ds1620_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int ret; + + mutex_lock(&ds1620_mutex); + ret = ds1620_ioctl(file, cmd, arg); + mutex_unlock(&ds1620_mutex); + + return ret; +} + +#ifdef THERM_USE_PROC +static int ds1620_proc_therm_show(struct seq_file *m, void *v) +{ + struct therm th; + int temp; + + ds1620_read_state(&th); + temp = cvt_9_to_int(ds1620_in(THERM_READ_TEMP, 9)); + + seq_printf(m, "Thermostat: HI %i.%i, LOW %i.%i; temperature: %i.%i C, fan %s\n", + th.hi >> 1, th.hi & 1 ? 5 : 0, + th.lo >> 1, th.lo & 1 ? 5 : 0, + temp >> 1, temp & 1 ? 5 : 0, + fan_state[netwinder_get_fan()]); + return 0; +} + +static int ds1620_proc_therm_open(struct inode *inode, struct file *file) +{ + return single_open(file, ds1620_proc_therm_show, NULL); +} + +static const struct file_operations ds1620_proc_therm_fops = { + .open = ds1620_proc_therm_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + +static const struct file_operations ds1620_fops = { + .owner = THIS_MODULE, + .open = ds1620_open, + .read = ds1620_read, + .unlocked_ioctl = ds1620_unlocked_ioctl, + .llseek = no_llseek, +}; + +static struct miscdevice ds1620_miscdev = { + TEMP_MINOR, + "temp", + &ds1620_fops +}; + +static int __init ds1620_init(void) +{ + int ret; + struct therm th, th_start; + + if (!machine_is_netwinder()) + return -ENODEV; + + ds1620_out(THERM_RESET, 0, 0); + ds1620_out(THERM_WRITE_CONFIG, 8, CFG_CPU); + ds1620_out(THERM_START_CONVERT, 0, 0); + + /* + * Trigger the fan to start by setting + * temperature high point low. This kicks + * the fan into action. + */ + ds1620_read_state(&th); + th_start.lo = 0; + th_start.hi = 1; + ds1620_write_state(&th_start); + + msleep(2000); + + ds1620_write_state(&th); + + ret = misc_register(&ds1620_miscdev); + if (ret < 0) + return ret; + +#ifdef THERM_USE_PROC + if (!proc_create("therm", 0, NULL, &ds1620_proc_therm_fops)) + printk(KERN_ERR "therm: unable to register /proc/therm\n"); +#endif + + ds1620_read_state(&th); + ret = cvt_9_to_int(ds1620_in(THERM_READ_TEMP, 9)); + + printk(KERN_INFO "Thermostat: high %i.%i, low %i.%i, " + "current %i.%i C, fan %s.\n", + th.hi >> 1, th.hi & 1 ? 5 : 0, + th.lo >> 1, th.lo & 1 ? 5 : 0, + ret >> 1, ret & 1 ? 5 : 0, + fan_state[netwinder_get_fan()]); + + return 0; +} + +static void __exit ds1620_exit(void) +{ +#ifdef THERM_USE_PROC + remove_proc_entry("therm", NULL); +#endif + misc_deregister(&ds1620_miscdev); +} + +module_init(ds1620_init); +module_exit(ds1620_exit); + +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/dsp56k.c b/kernel/drivers/char/dsp56k.c new file mode 100644 index 000000000..8bf70e8c3 --- /dev/null +++ b/kernel/drivers/char/dsp56k.c @@ -0,0 +1,534 @@ +/* + * The DSP56001 Device Driver, saviour of the Free World(tm) + * + * Authors: Fredrik Noring + * lars brinkhoff + * Tomas Berndtsson + * + * First version May 1996 + * + * History: + * 97-01-29 Tomas Berndtsson, + * Integrated with Linux 2.1.21 kernel sources. + * 97-02-15 Tomas Berndtsson, + * Fixed for kernel 2.1.26 + * + * BUGS: + * Hmm... there must be something here :) + * + * Copyright (C) 1996,1997 Fredrik Noring, lars brinkhoff & Tomas Berndtsson + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ + +#include +#include +#include +#include +#include /* guess what */ +#include +#include +#include +#include +#include +#include +#include +#include /* For put_user and get_user */ + +#include +#include + +#include + +/* minor devices */ +#define DSP56K_DEV_56001 0 /* The only device so far */ + +#define TIMEOUT 10 /* Host port timeout in number of tries */ +#define MAXIO 2048 /* Maximum number of words before sleep */ +#define DSP56K_MAX_BINARY_LENGTH (3*64*1024) + +#define DSP56K_TX_INT_ON dsp56k_host_interface.icr |= DSP56K_ICR_TREQ +#define DSP56K_RX_INT_ON dsp56k_host_interface.icr |= DSP56K_ICR_RREQ +#define DSP56K_TX_INT_OFF dsp56k_host_interface.icr &= ~DSP56K_ICR_TREQ +#define DSP56K_RX_INT_OFF dsp56k_host_interface.icr &= ~DSP56K_ICR_RREQ + +#define DSP56K_TRANSMIT (dsp56k_host_interface.isr & DSP56K_ISR_TXDE) +#define DSP56K_RECEIVE (dsp56k_host_interface.isr & DSP56K_ISR_RXDF) + +#define handshake(count, maxio, timeout, ENABLE, f) \ +{ \ + long i, t, m; \ + while (count > 0) { \ + m = min_t(unsigned long, count, maxio); \ + for (i = 0; i < m; i++) { \ + for (t = 0; t < timeout && !ENABLE; t++) \ + msleep(20); \ + if(!ENABLE) \ + return -EIO; \ + f; \ + } \ + count -= m; \ + if (m == maxio) msleep(20); \ + } \ +} + +#define tx_wait(n) \ +{ \ + int t; \ + for(t = 0; t < n && !DSP56K_TRANSMIT; t++) \ + msleep(10); \ + if(!DSP56K_TRANSMIT) { \ + return -EIO; \ + } \ +} + +#define rx_wait(n) \ +{ \ + int t; \ + for(t = 0; t < n && !DSP56K_RECEIVE; t++) \ + msleep(10); \ + if(!DSP56K_RECEIVE) { \ + return -EIO; \ + } \ +} + +static DEFINE_MUTEX(dsp56k_mutex); +static struct dsp56k_device { + unsigned long in_use; + long maxio, timeout; + int tx_wsize, rx_wsize; +} dsp56k; + +static struct class *dsp56k_class; + +static int dsp56k_reset(void) +{ + u_char status; + + /* Power down the DSP */ + sound_ym.rd_data_reg_sel = 14; + status = sound_ym.rd_data_reg_sel & 0xef; + sound_ym.wd_data = status; + sound_ym.wd_data = status | 0x10; + + udelay(10); + + /* Power up the DSP */ + sound_ym.rd_data_reg_sel = 14; + sound_ym.wd_data = sound_ym.rd_data_reg_sel & 0xef; + + return 0; +} + +static int dsp56k_upload(u_char __user *bin, int len) +{ + struct platform_device *pdev; + const struct firmware *fw; + const char fw_name[] = "dsp56k/bootstrap.bin"; + int err; + int i; + + dsp56k_reset(); + + pdev = platform_device_register_simple("dsp56k", 0, NULL, 0); + if (IS_ERR(pdev)) { + printk(KERN_ERR "Failed to register device for \"%s\"\n", + fw_name); + return -EINVAL; + } + err = request_firmware(&fw, fw_name, &pdev->dev); + platform_device_unregister(pdev); + if (err) { + printk(KERN_ERR "Failed to load image \"%s\" err %d\n", + fw_name, err); + return err; + } + if (fw->size % 3) { + printk(KERN_ERR "Bogus length %d in image \"%s\"\n", + fw->size, fw_name); + release_firmware(fw); + return -EINVAL; + } + for (i = 0; i < fw->size; i = i + 3) { + /* tx_wait(10); */ + dsp56k_host_interface.data.b[1] = fw->data[i]; + dsp56k_host_interface.data.b[2] = fw->data[i + 1]; + dsp56k_host_interface.data.b[3] = fw->data[i + 2]; + } + release_firmware(fw); + for (; i < 512; i++) { + /* tx_wait(10); */ + dsp56k_host_interface.data.b[1] = 0; + dsp56k_host_interface.data.b[2] = 0; + dsp56k_host_interface.data.b[3] = 0; + } + + for (i = 0; i < len; i++) { + tx_wait(10); + get_user(dsp56k_host_interface.data.b[1], bin++); + get_user(dsp56k_host_interface.data.b[2], bin++); + get_user(dsp56k_host_interface.data.b[3], bin++); + } + + tx_wait(10); + dsp56k_host_interface.data.l = 3; /* Magic execute */ + + return 0; +} + +static ssize_t dsp56k_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) +{ + struct inode *inode = file_inode(file); + int dev = iminor(inode) & 0x0f; + + switch(dev) + { + case DSP56K_DEV_56001: + { + + long n; + + /* Don't do anything if nothing is to be done */ + if (!count) return 0; + + n = 0; + switch (dsp56k.rx_wsize) { + case 1: /* 8 bit */ + { + handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE, + put_user(dsp56k_host_interface.data.b[3], buf+n++)); + return n; + } + case 2: /* 16 bit */ + { + short __user *data; + + count /= 2; + data = (short __user *) buf; + handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE, + put_user(dsp56k_host_interface.data.w[1], data+n++)); + return 2*n; + } + case 3: /* 24 bit */ + { + count /= 3; + handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE, + put_user(dsp56k_host_interface.data.b[1], buf+n++); + put_user(dsp56k_host_interface.data.b[2], buf+n++); + put_user(dsp56k_host_interface.data.b[3], buf+n++)); + return 3*n; + } + case 4: /* 32 bit */ + { + long __user *data; + + count /= 4; + data = (long __user *) buf; + handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE, + put_user(dsp56k_host_interface.data.l, data+n++)); + return 4*n; + } + } + return -EFAULT; + } + + default: + printk(KERN_ERR "DSP56k driver: Unknown minor device: %d\n", dev); + return -ENXIO; + } +} + +static ssize_t dsp56k_write(struct file *file, const char __user *buf, size_t count, + loff_t *ppos) +{ + struct inode *inode = file_inode(file); + int dev = iminor(inode) & 0x0f; + + switch(dev) + { + case DSP56K_DEV_56001: + { + long n; + + /* Don't do anything if nothing is to be done */ + if (!count) return 0; + + n = 0; + switch (dsp56k.tx_wsize) { + case 1: /* 8 bit */ + { + handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT, + get_user(dsp56k_host_interface.data.b[3], buf+n++)); + return n; + } + case 2: /* 16 bit */ + { + const short __user *data; + + count /= 2; + data = (const short __user *)buf; + handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT, + get_user(dsp56k_host_interface.data.w[1], data+n++)); + return 2*n; + } + case 3: /* 24 bit */ + { + count /= 3; + handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT, + get_user(dsp56k_host_interface.data.b[1], buf+n++); + get_user(dsp56k_host_interface.data.b[2], buf+n++); + get_user(dsp56k_host_interface.data.b[3], buf+n++)); + return 3*n; + } + case 4: /* 32 bit */ + { + const long __user *data; + + count /= 4; + data = (const long __user *)buf; + handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT, + get_user(dsp56k_host_interface.data.l, data+n++)); + return 4*n; + } + } + + return -EFAULT; + } + default: + printk(KERN_ERR "DSP56k driver: Unknown minor device: %d\n", dev); + return -ENXIO; + } +} + +static long dsp56k_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int dev = iminor(file_inode(file)) & 0x0f; + void __user *argp = (void __user *)arg; + + switch(dev) + { + case DSP56K_DEV_56001: + + switch(cmd) { + case DSP56K_UPLOAD: + { + char __user *bin; + int r, len; + struct dsp56k_upload __user *binary = argp; + + if(get_user(len, &binary->len) < 0) + return -EFAULT; + if(get_user(bin, &binary->bin) < 0) + return -EFAULT; + + if (len == 0) { + return -EINVAL; /* nothing to upload?!? */ + } + if (len > DSP56K_MAX_BINARY_LENGTH) { + return -EINVAL; + } + mutex_lock(&dsp56k_mutex); + r = dsp56k_upload(bin, len); + mutex_unlock(&dsp56k_mutex); + if (r < 0) { + return r; + } + + break; + } + case DSP56K_SET_TX_WSIZE: + if (arg > 4 || arg < 1) + return -EINVAL; + mutex_lock(&dsp56k_mutex); + dsp56k.tx_wsize = (int) arg; + mutex_unlock(&dsp56k_mutex); + break; + case DSP56K_SET_RX_WSIZE: + if (arg > 4 || arg < 1) + return -EINVAL; + mutex_lock(&dsp56k_mutex); + dsp56k.rx_wsize = (int) arg; + mutex_unlock(&dsp56k_mutex); + break; + case DSP56K_HOST_FLAGS: + { + int dir, out, status; + struct dsp56k_host_flags __user *hf = argp; + + if(get_user(dir, &hf->dir) < 0) + return -EFAULT; + if(get_user(out, &hf->out) < 0) + return -EFAULT; + + mutex_lock(&dsp56k_mutex); + if ((dir & 0x1) && (out & 0x1)) + dsp56k_host_interface.icr |= DSP56K_ICR_HF0; + else if (dir & 0x1) + dsp56k_host_interface.icr &= ~DSP56K_ICR_HF0; + if ((dir & 0x2) && (out & 0x2)) + dsp56k_host_interface.icr |= DSP56K_ICR_HF1; + else if (dir & 0x2) + dsp56k_host_interface.icr &= ~DSP56K_ICR_HF1; + + status = 0; + if (dsp56k_host_interface.icr & DSP56K_ICR_HF0) status |= 0x1; + if (dsp56k_host_interface.icr & DSP56K_ICR_HF1) status |= 0x2; + if (dsp56k_host_interface.isr & DSP56K_ISR_HF2) status |= 0x4; + if (dsp56k_host_interface.isr & DSP56K_ISR_HF3) status |= 0x8; + mutex_unlock(&dsp56k_mutex); + return put_user(status, &hf->status); + } + case DSP56K_HOST_CMD: + if (arg > 31) + return -EINVAL; + mutex_lock(&dsp56k_mutex); + dsp56k_host_interface.cvr = (u_char)((arg & DSP56K_CVR_HV_MASK) | + DSP56K_CVR_HC); + mutex_unlock(&dsp56k_mutex); + break; + default: + return -EINVAL; + } + return 0; + + default: + printk(KERN_ERR "DSP56k driver: Unknown minor device: %d\n", dev); + return -ENXIO; + } +} + +/* As of 2.1.26 this should be dsp56k_poll, + * but how do I then check device minor number? + * Do I need this function at all??? + */ +#if 0 +static unsigned int dsp56k_poll(struct file *file, poll_table *wait) +{ + int dev = iminor(file_inode(file)) & 0x0f; + + switch(dev) + { + case DSP56K_DEV_56001: + /* poll_wait(file, ???, wait); */ + return POLLIN | POLLRDNORM | POLLOUT; + + default: + printk("DSP56k driver: Unknown minor device: %d\n", dev); + return 0; + } +} +#endif + +static int dsp56k_open(struct inode *inode, struct file *file) +{ + int dev = iminor(inode) & 0x0f; + int ret = 0; + + mutex_lock(&dsp56k_mutex); + switch(dev) + { + case DSP56K_DEV_56001: + + if (test_and_set_bit(0, &dsp56k.in_use)) { + ret = -EBUSY; + goto out; + } + + dsp56k.timeout = TIMEOUT; + dsp56k.maxio = MAXIO; + dsp56k.rx_wsize = dsp56k.tx_wsize = 4; + + DSP56K_TX_INT_OFF; + DSP56K_RX_INT_OFF; + + /* Zero host flags */ + dsp56k_host_interface.icr &= ~DSP56K_ICR_HF0; + dsp56k_host_interface.icr &= ~DSP56K_ICR_HF1; + + break; + + default: + ret = -ENODEV; + } +out: + mutex_unlock(&dsp56k_mutex); + return ret; +} + +static int dsp56k_release(struct inode *inode, struct file *file) +{ + int dev = iminor(inode) & 0x0f; + + switch(dev) + { + case DSP56K_DEV_56001: + clear_bit(0, &dsp56k.in_use); + break; + default: + printk(KERN_ERR "DSP56k driver: Unknown minor device: %d\n", dev); + return -ENXIO; + } + + return 0; +} + +static const struct file_operations dsp56k_fops = { + .owner = THIS_MODULE, + .read = dsp56k_read, + .write = dsp56k_write, + .unlocked_ioctl = dsp56k_ioctl, + .open = dsp56k_open, + .release = dsp56k_release, + .llseek = noop_llseek, +}; + + +/****** Init and module functions ******/ + +static char banner[] __initdata = KERN_INFO "DSP56k driver installed\n"; + +static int __init dsp56k_init_driver(void) +{ + int err = 0; + + if(!MACH_IS_ATARI || !ATARIHW_PRESENT(DSP56K)) { + printk("DSP56k driver: Hardware not present\n"); + return -ENODEV; + } + + if(register_chrdev(DSP56K_MAJOR, "dsp56k", &dsp56k_fops)) { + printk("DSP56k driver: Unable to register driver\n"); + return -ENODEV; + } + dsp56k_class = class_create(THIS_MODULE, "dsp56k"); + if (IS_ERR(dsp56k_class)) { + err = PTR_ERR(dsp56k_class); + goto out_chrdev; + } + device_create(dsp56k_class, NULL, MKDEV(DSP56K_MAJOR, 0), NULL, + "dsp56k"); + + printk(banner); + goto out; + +out_chrdev: + unregister_chrdev(DSP56K_MAJOR, "dsp56k"); +out: + return err; +} +module_init(dsp56k_init_driver); + +static void __exit dsp56k_cleanup_driver(void) +{ + device_destroy(dsp56k_class, MKDEV(DSP56K_MAJOR, 0)); + class_destroy(dsp56k_class); + unregister_chrdev(DSP56K_MAJOR, "dsp56k"); +} +module_exit(dsp56k_cleanup_driver); + +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE("dsp56k/bootstrap.bin"); diff --git a/kernel/drivers/char/dtlk.c b/kernel/drivers/char/dtlk.c new file mode 100644 index 000000000..65a8d96c0 --- /dev/null +++ b/kernel/drivers/char/dtlk.c @@ -0,0 +1,663 @@ +/* -*- linux-c -*- + * dtlk.c - DoubleTalk PC driver for Linux + * + * Original author: Chris Pallotta + * Current maintainer: Jim Van Zandt + * + * 2000-03-18 Jim Van Zandt: Fix polling. + * Eliminate dtlk_timer_active flag and separate dtlk_stop_timer + * function. Don't restart timer in dtlk_timer_tick. Restart timer + * in dtlk_poll after every poll. dtlk_poll returns mask (duh). + * Eliminate unused function dtlk_write_byte. Misc. code cleanups. + */ + +/* This driver is for the DoubleTalk PC, a speech synthesizer + manufactured by RC Systems (http://www.rcsys.com/). It was written + based on documentation in their User's Manual file and Developer's + Tools disk. + + The DoubleTalk PC contains four voice synthesizers: text-to-speech + (TTS), linear predictive coding (LPC), PCM/ADPCM, and CVSD. It + also has a tone generator. Output data for LPC are written to the + LPC port, and output data for the other modes are written to the + TTS port. + + Two kinds of data can be read from the DoubleTalk: status + information (in response to the "\001?" interrogation command) is + read from the TTS port, and index markers (which mark the progress + of the speech) are read from the LPC port. Not all models of the + DoubleTalk PC implement index markers. Both the TTS and LPC ports + can also display status flags. + + The DoubleTalk PC generates no interrupts. + + These characteristics are mapped into the Unix stream I/O model as + follows: + + "write" sends bytes to the TTS port. It is the responsibility of + the user program to switch modes among TTS, PCM/ADPCM, and CVSD. + This driver was written for use with the text-to-speech + synthesizer. If LPC output is needed some day, other minor device + numbers can be used to select among output modes. + + "read" gets index markers from the LPC port. If the device does + not implement index markers, the read will fail with error EINVAL. + + Status information is available using the DTLK_INTERROGATE ioctl. + + */ + +#include + +#define KERNEL +#include +#include +#include +#include /* for -EBUSY */ +#include /* for request_region */ +#include /* for loops_per_jiffy */ +#include +#include +#include /* for inb_p, outb_p, inb, outb, etc. */ +#include /* for get_user, etc. */ +#include /* for wait_queue */ +#include /* for __init, module_{init,exit} */ +#include /* for POLLIN, etc. */ +#include /* local header file for DoubleTalk values */ + +#ifdef TRACING +#define TRACE_TEXT(str) printk(str); +#define TRACE_RET printk(")") +#else /* !TRACING */ +#define TRACE_TEXT(str) ((void) 0) +#define TRACE_RET ((void) 0) +#endif /* TRACING */ + +static DEFINE_MUTEX(dtlk_mutex); +static void dtlk_timer_tick(unsigned long data); + +static int dtlk_major; +static int dtlk_port_lpc; +static int dtlk_port_tts; +static int dtlk_busy; +static int dtlk_has_indexing; +static unsigned int dtlk_portlist[] = +{0x25e, 0x29e, 0x2de, 0x31e, 0x35e, 0x39e, 0}; +static wait_queue_head_t dtlk_process_list; +static DEFINE_TIMER(dtlk_timer, dtlk_timer_tick, 0, 0); + +/* prototypes for file_operations struct */ +static ssize_t dtlk_read(struct file *, char __user *, + size_t nbytes, loff_t * ppos); +static ssize_t dtlk_write(struct file *, const char __user *, + size_t nbytes, loff_t * ppos); +static unsigned int dtlk_poll(struct file *, poll_table *); +static int dtlk_open(struct inode *, struct file *); +static int dtlk_release(struct inode *, struct file *); +static long dtlk_ioctl(struct file *file, + unsigned int cmd, unsigned long arg); + +static const struct file_operations dtlk_fops = +{ + .owner = THIS_MODULE, + .read = dtlk_read, + .write = dtlk_write, + .poll = dtlk_poll, + .unlocked_ioctl = dtlk_ioctl, + .open = dtlk_open, + .release = dtlk_release, + .llseek = no_llseek, +}; + +/* local prototypes */ +static int dtlk_dev_probe(void); +static struct dtlk_settings *dtlk_interrogate(void); +static int dtlk_readable(void); +static char dtlk_read_lpc(void); +static char dtlk_read_tts(void); +static int dtlk_writeable(void); +static char dtlk_write_bytes(const char *buf, int n); +static char dtlk_write_tts(char); +/* + static void dtlk_handle_error(char, char, unsigned int); + */ + +static ssize_t dtlk_read(struct file *file, char __user *buf, + size_t count, loff_t * ppos) +{ + unsigned int minor = iminor(file_inode(file)); + char ch; + int i = 0, retries; + + TRACE_TEXT("(dtlk_read"); + /* printk("DoubleTalk PC - dtlk_read()\n"); */ + + if (minor != DTLK_MINOR || !dtlk_has_indexing) + return -EINVAL; + + for (retries = 0; retries < loops_per_jiffy; retries++) { + while (i < count && dtlk_readable()) { + ch = dtlk_read_lpc(); + /* printk("dtlk_read() reads 0x%02x\n", ch); */ + if (put_user(ch, buf++)) + return -EFAULT; + i++; + } + if (i) + return i; + if (file->f_flags & O_NONBLOCK) + break; + msleep_interruptible(100); + } + if (retries == loops_per_jiffy) + printk(KERN_ERR "dtlk_read times out\n"); + TRACE_RET; + return -EAGAIN; +} + +static ssize_t dtlk_write(struct file *file, const char __user *buf, + size_t count, loff_t * ppos) +{ + int i = 0, retries = 0, ch; + + TRACE_TEXT("(dtlk_write"); +#ifdef TRACING + printk(" \""); + { + int i, ch; + for (i = 0; i < count; i++) { + if (get_user(ch, buf + i)) + return -EFAULT; + if (' ' <= ch && ch <= '~') + printk("%c", ch); + else + printk("\\%03o", ch); + } + printk("\""); + } +#endif + + if (iminor(file_inode(file)) != DTLK_MINOR) + return -EINVAL; + + while (1) { + while (i < count && !get_user(ch, buf) && + (ch == DTLK_CLEAR || dtlk_writeable())) { + dtlk_write_tts(ch); + buf++; + i++; + if (i % 5 == 0) + /* We yield our time until scheduled + again. This reduces the transfer + rate to 500 bytes/sec, but that's + still enough to keep up with the + speech synthesizer. */ + msleep_interruptible(1); + else { + /* the RDY bit goes zero 2-3 usec + after writing, and goes 1 again + 180-190 usec later. Here, we wait + up to 250 usec for the RDY bit to + go nonzero. */ + for (retries = 0; + retries < loops_per_jiffy / (4000/HZ); + retries++) + if (inb_p(dtlk_port_tts) & + TTS_WRITABLE) + break; + } + retries = 0; + } + if (i == count) + return i; + if (file->f_flags & O_NONBLOCK) + break; + + msleep_interruptible(1); + + if (++retries > 10 * HZ) { /* wait no more than 10 sec + from last write */ + printk("dtlk: write timeout. " + "inb_p(dtlk_port_tts) = 0x%02x\n", + inb_p(dtlk_port_tts)); + TRACE_RET; + return -EBUSY; + } + } + TRACE_RET; + return -EAGAIN; +} + +static unsigned int dtlk_poll(struct file *file, poll_table * wait) +{ + int mask = 0; + unsigned long expires; + + TRACE_TEXT(" dtlk_poll"); + /* + static long int j; + printk("."); + printk("<%ld>", jiffies-j); + j=jiffies; + */ + poll_wait(file, &dtlk_process_list, wait); + + if (dtlk_has_indexing && dtlk_readable()) { + del_timer(&dtlk_timer); + mask = POLLIN | POLLRDNORM; + } + if (dtlk_writeable()) { + del_timer(&dtlk_timer); + mask |= POLLOUT | POLLWRNORM; + } + /* there are no exception conditions */ + + /* There won't be any interrupts, so we set a timer instead. */ + expires = jiffies + 3*HZ / 100; + mod_timer(&dtlk_timer, expires); + + return mask; +} + +static void dtlk_timer_tick(unsigned long data) +{ + TRACE_TEXT(" dtlk_timer_tick"); + wake_up_interruptible(&dtlk_process_list); +} + +static long dtlk_ioctl(struct file *file, + unsigned int cmd, + unsigned long arg) +{ + char __user *argp = (char __user *)arg; + struct dtlk_settings *sp; + char portval; + TRACE_TEXT(" dtlk_ioctl"); + + switch (cmd) { + + case DTLK_INTERROGATE: + mutex_lock(&dtlk_mutex); + sp = dtlk_interrogate(); + mutex_unlock(&dtlk_mutex); + if (copy_to_user(argp, sp, sizeof(struct dtlk_settings))) + return -EINVAL; + return 0; + + case DTLK_STATUS: + portval = inb_p(dtlk_port_tts); + return put_user(portval, argp); + + default: + return -EINVAL; + } +} + +/* Note that nobody ever sets dtlk_busy... */ +static int dtlk_open(struct inode *inode, struct file *file) +{ + TRACE_TEXT("(dtlk_open"); + + nonseekable_open(inode, file); + switch (iminor(inode)) { + case DTLK_MINOR: + if (dtlk_busy) + return -EBUSY; + return nonseekable_open(inode, file); + + default: + return -ENXIO; + } +} + +static int dtlk_release(struct inode *inode, struct file *file) +{ + TRACE_TEXT("(dtlk_release"); + + switch (iminor(inode)) { + case DTLK_MINOR: + break; + + default: + break; + } + TRACE_RET; + + del_timer_sync(&dtlk_timer); + + return 0; +} + +static int __init dtlk_init(void) +{ + int err; + + dtlk_port_lpc = 0; + dtlk_port_tts = 0; + dtlk_busy = 0; + dtlk_major = register_chrdev(0, "dtlk", &dtlk_fops); + if (dtlk_major < 0) { + printk(KERN_ERR "DoubleTalk PC - cannot register device\n"); + return dtlk_major; + } + err = dtlk_dev_probe(); + if (err) { + unregister_chrdev(dtlk_major, "dtlk"); + return err; + } + printk(", MAJOR %d\n", dtlk_major); + + init_waitqueue_head(&dtlk_process_list); + + return 0; +} + +static void __exit dtlk_cleanup (void) +{ + dtlk_write_bytes("goodbye", 8); + msleep_interruptible(500); /* nap 0.50 sec but + could be awakened + earlier by + signals... */ + + dtlk_write_tts(DTLK_CLEAR); + unregister_chrdev(dtlk_major, "dtlk"); + release_region(dtlk_port_lpc, DTLK_IO_EXTENT); +} + +module_init(dtlk_init); +module_exit(dtlk_cleanup); + +/* ------------------------------------------------------------------------ */ + +static int dtlk_readable(void) +{ +#ifdef TRACING + printk(" dtlk_readable=%u@%u", inb_p(dtlk_port_lpc) != 0x7f, jiffies); +#endif + return inb_p(dtlk_port_lpc) != 0x7f; +} + +static int dtlk_writeable(void) +{ + /* TRACE_TEXT(" dtlk_writeable"); */ +#ifdef TRACINGMORE + printk(" dtlk_writeable=%u", (inb_p(dtlk_port_tts) & TTS_WRITABLE)!=0); +#endif + return inb_p(dtlk_port_tts) & TTS_WRITABLE; +} + +static int __init dtlk_dev_probe(void) +{ + unsigned int testval = 0; + int i = 0; + struct dtlk_settings *sp; + + if (dtlk_port_lpc | dtlk_port_tts) + return -EBUSY; + + for (i = 0; dtlk_portlist[i]; i++) { +#if 0 + printk("DoubleTalk PC - Port %03x = %04x\n", + dtlk_portlist[i], (testval = inw_p(dtlk_portlist[i]))); +#endif + + if (!request_region(dtlk_portlist[i], DTLK_IO_EXTENT, + "dtlk")) + continue; + testval = inw_p(dtlk_portlist[i]); + if ((testval &= 0xfbff) == 0x107f) { + dtlk_port_lpc = dtlk_portlist[i]; + dtlk_port_tts = dtlk_port_lpc + 1; + + sp = dtlk_interrogate(); + printk("DoubleTalk PC at %03x-%03x, " + "ROM version %s, serial number %u", + dtlk_portlist[i], dtlk_portlist[i] + + DTLK_IO_EXTENT - 1, + sp->rom_version, sp->serial_number); + + /* put LPC port into known state, so + dtlk_readable() gives valid result */ + outb_p(0xff, dtlk_port_lpc); + + /* INIT string and index marker */ + dtlk_write_bytes("\036\1@\0\0012I\r", 8); + /* posting an index takes 18 msec. Here, we + wait up to 100 msec to see whether it + appears. */ + msleep_interruptible(100); + dtlk_has_indexing = dtlk_readable(); +#ifdef TRACING + printk(", indexing %d\n", dtlk_has_indexing); +#endif +#ifdef INSCOPE + { +/* This macro records ten samples read from the LPC port, for later display */ +#define LOOK \ +for (i = 0; i < 10; i++) \ + { \ + buffer[b++] = inb_p(dtlk_port_lpc); \ + __delay(loops_per_jiffy/(1000000/HZ)); \ + } + char buffer[1000]; + int b = 0, i, j; + + LOOK + outb_p(0xff, dtlk_port_lpc); + buffer[b++] = 0; + LOOK + dtlk_write_bytes("\0012I\r", 4); + buffer[b++] = 0; + __delay(50 * loops_per_jiffy / (1000/HZ)); + outb_p(0xff, dtlk_port_lpc); + buffer[b++] = 0; + LOOK + + printk("\n"); + for (j = 0; j < b; j++) + printk(" %02x", buffer[j]); + printk("\n"); + } +#endif /* INSCOPE */ + +#ifdef OUTSCOPE + { +/* This macro records ten samples read from the TTS port, for later display */ +#define LOOK \ +for (i = 0; i < 10; i++) \ + { \ + buffer[b++] = inb_p(dtlk_port_tts); \ + __delay(loops_per_jiffy/(1000000/HZ)); /* 1 us */ \ + } + char buffer[1000]; + int b = 0, i, j; + + mdelay(10); /* 10 ms */ + LOOK + outb_p(0x03, dtlk_port_tts); + buffer[b++] = 0; + LOOK + LOOK + + printk("\n"); + for (j = 0; j < b; j++) + printk(" %02x", buffer[j]); + printk("\n"); + } +#endif /* OUTSCOPE */ + + dtlk_write_bytes("Double Talk found", 18); + + return 0; + } + release_region(dtlk_portlist[i], DTLK_IO_EXTENT); + } + + printk(KERN_INFO "DoubleTalk PC - not found\n"); + return -ENODEV; +} + +/* + static void dtlk_handle_error(char op, char rc, unsigned int minor) + { + printk(KERN_INFO"\nDoubleTalk PC - MINOR: %d, OPCODE: %d, ERROR: %d\n", + minor, op, rc); + return; + } + */ + +/* interrogate the DoubleTalk PC and return its settings */ +static struct dtlk_settings *dtlk_interrogate(void) +{ + unsigned char *t; + static char buf[sizeof(struct dtlk_settings) + 1]; + int total, i; + static struct dtlk_settings status; + TRACE_TEXT("(dtlk_interrogate"); + dtlk_write_bytes("\030\001?", 3); + for (total = 0, i = 0; i < 50; i++) { + buf[total] = dtlk_read_tts(); + if (total > 2 && buf[total] == 0x7f) + break; + if (total < sizeof(struct dtlk_settings)) + total++; + } + /* + if (i==50) printk("interrogate() read overrun\n"); + for (i=0; i DTLK_MAX_RETRIES) + printk(KERN_ERR "dtlk_read_tts() timeout\n"); + + ch = inb_p(dtlk_port_tts); /* input from TTS port */ + ch &= 0x7f; + outb_p(ch, dtlk_port_tts); + + retries = 0; + do { + portval = inb_p(dtlk_port_tts); + } while ((portval & TTS_READABLE) != 0 && + retries++ < DTLK_MAX_RETRIES); + if (retries > DTLK_MAX_RETRIES) + printk(KERN_ERR "dtlk_read_tts() timeout\n"); + + TRACE_RET; + return ch; +} + +static char dtlk_read_lpc(void) +{ + int retries = 0; + char ch; + TRACE_TEXT("(dtlk_read_lpc"); + + /* no need to test -- this is only called when the port is readable */ + + ch = inb_p(dtlk_port_lpc); /* input from LPC port */ + + outb_p(0xff, dtlk_port_lpc); + + /* acknowledging a read takes 3-4 + usec. Here, we wait up to 20 usec + for the acknowledgement */ + retries = (loops_per_jiffy * 20) / (1000000/HZ); + while (inb_p(dtlk_port_lpc) != 0x7f && --retries > 0); + if (retries == 0) + printk(KERN_ERR "dtlk_read_lpc() timeout\n"); + + TRACE_RET; + return ch; +} + +/* write n bytes to tts port */ +static char dtlk_write_bytes(const char *buf, int n) +{ + char val = 0; + /* printk("dtlk_write_bytes(\"%-*s\", %d)\n", n, buf, n); */ + TRACE_TEXT("(dtlk_write_bytes"); + while (n-- > 0) + val = dtlk_write_tts(*buf++); + TRACE_RET; + return val; +} + +static char dtlk_write_tts(char ch) +{ + int retries = 0; +#ifdef TRACINGMORE + printk(" dtlk_write_tts("); + if (' ' <= ch && ch <= '~') + printk("'%c'", ch); + else + printk("0x%02x", ch); +#endif + if (ch != DTLK_CLEAR) /* no flow control for CLEAR command */ + while ((inb_p(dtlk_port_tts) & TTS_WRITABLE) == 0 && + retries++ < DTLK_MAX_RETRIES) /* DT ready? */ + ; + if (retries > DTLK_MAX_RETRIES) + printk(KERN_ERR "dtlk_write_tts() timeout\n"); + + outb_p(ch, dtlk_port_tts); /* output to TTS port */ + /* the RDY bit goes zero 2-3 usec after writing, and goes + 1 again 180-190 usec later. Here, we wait up to 10 + usec for the RDY bit to go zero. */ + for (retries = 0; retries < loops_per_jiffy / (100000/HZ); retries++) + if ((inb_p(dtlk_port_tts) & TTS_WRITABLE) == 0) + break; + +#ifdef TRACINGMORE + printk(")\n"); +#endif + return 0; +} + +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/efirtc.c b/kernel/drivers/char/efirtc.c new file mode 100644 index 000000000..e39e7402e --- /dev/null +++ b/kernel/drivers/char/efirtc.c @@ -0,0 +1,408 @@ +/* + * EFI Time Services Driver for Linux + * + * Copyright (C) 1999 Hewlett-Packard Co + * Copyright (C) 1999 Stephane Eranian + * + * Based on skeleton from the drivers/char/rtc.c driver by P. Gortmaker + * + * This code provides an architected & portable interface to the real time + * clock by using EFI instead of direct bit fiddling. The functionalities are + * quite different from the rtc.c driver. The only way to talk to the device + * is by using ioctl(). There is a /proc interface which provides the raw + * information. + * + * Please note that we have kept the API as close as possible to the + * legacy RTC. The standard /sbin/hwclock program should work normally + * when used to get/set the time. + * + * NOTES: + * - Locking is required for safe execution of EFI calls with regards + * to interrupts and SMP. + * + * TODO (December 1999): + * - provide the API to set/get the WakeUp Alarm (different from the + * rtc.c alarm). + * - SMP testing + * - Add module support + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define EFI_RTC_VERSION "0.4" + +#define EFI_ISDST (EFI_TIME_ADJUST_DAYLIGHT|EFI_TIME_IN_DAYLIGHT) +/* + * EFI Epoch is 1/1/1998 + */ +#define EFI_RTC_EPOCH 1998 + +static DEFINE_SPINLOCK(efi_rtc_lock); + +static long efi_rtc_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); + +#define is_leap(year) \ + ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0)) + +static const unsigned short int __mon_yday[2][13] = +{ + /* Normal years. */ + { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 }, + /* Leap years. */ + { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 } +}; + +/* + * returns day of the year [0-365] + */ +static inline int +compute_yday(efi_time_t *eft) +{ + /* efi_time_t.month is in the [1-12] so, we need -1 */ + return __mon_yday[is_leap(eft->year)][eft->month-1]+ eft->day -1; +} +/* + * returns day of the week [0-6] 0=Sunday + * + * Don't try to provide a year that's before 1998, please ! + */ +static int +compute_wday(efi_time_t *eft) +{ + int y; + int ndays = 0; + + if ( eft->year < 1998 ) { + printk(KERN_ERR "efirtc: EFI year < 1998, invalid date\n"); + return -1; + } + + for(y=EFI_RTC_EPOCH; y < eft->year; y++ ) { + ndays += 365 + (is_leap(y) ? 1 : 0); + } + ndays += compute_yday(eft); + + /* + * 4=1/1/1998 was a Thursday + */ + return (ndays + 4) % 7; +} + +static void +convert_to_efi_time(struct rtc_time *wtime, efi_time_t *eft) +{ + + eft->year = wtime->tm_year + 1900; + eft->month = wtime->tm_mon + 1; + eft->day = wtime->tm_mday; + eft->hour = wtime->tm_hour; + eft->minute = wtime->tm_min; + eft->second = wtime->tm_sec; + eft->nanosecond = 0; + eft->daylight = wtime->tm_isdst ? EFI_ISDST: 0; + eft->timezone = EFI_UNSPECIFIED_TIMEZONE; +} + +static void +convert_from_efi_time(efi_time_t *eft, struct rtc_time *wtime) +{ + memset(wtime, 0, sizeof(*wtime)); + wtime->tm_sec = eft->second; + wtime->tm_min = eft->minute; + wtime->tm_hour = eft->hour; + wtime->tm_mday = eft->day; + wtime->tm_mon = eft->month - 1; + wtime->tm_year = eft->year - 1900; + + /* day of the week [0-6], Sunday=0 */ + wtime->tm_wday = compute_wday(eft); + + /* day in the year [1-365]*/ + wtime->tm_yday = compute_yday(eft); + + + switch (eft->daylight & EFI_ISDST) { + case EFI_ISDST: + wtime->tm_isdst = 1; + break; + case EFI_TIME_ADJUST_DAYLIGHT: + wtime->tm_isdst = 0; + break; + default: + wtime->tm_isdst = -1; + } +} + +static long efi_rtc_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + + efi_status_t status; + unsigned long flags; + efi_time_t eft; + efi_time_cap_t cap; + struct rtc_time wtime; + struct rtc_wkalrm __user *ewp; + unsigned char enabled, pending; + + switch (cmd) { + case RTC_UIE_ON: + case RTC_UIE_OFF: + case RTC_PIE_ON: + case RTC_PIE_OFF: + case RTC_AIE_ON: + case RTC_AIE_OFF: + case RTC_ALM_SET: + case RTC_ALM_READ: + case RTC_IRQP_READ: + case RTC_IRQP_SET: + case RTC_EPOCH_READ: + case RTC_EPOCH_SET: + return -EINVAL; + + case RTC_RD_TIME: + spin_lock_irqsave(&efi_rtc_lock, flags); + + status = efi.get_time(&eft, &cap); + + spin_unlock_irqrestore(&efi_rtc_lock,flags); + + if (status != EFI_SUCCESS) { + /* should never happen */ + printk(KERN_ERR "efitime: can't read time\n"); + return -EINVAL; + } + + convert_from_efi_time(&eft, &wtime); + + return copy_to_user((void __user *)arg, &wtime, + sizeof (struct rtc_time)) ? - EFAULT : 0; + + case RTC_SET_TIME: + + if (!capable(CAP_SYS_TIME)) return -EACCES; + + if (copy_from_user(&wtime, (struct rtc_time __user *)arg, + sizeof(struct rtc_time)) ) + return -EFAULT; + + convert_to_efi_time(&wtime, &eft); + + spin_lock_irqsave(&efi_rtc_lock, flags); + + status = efi.set_time(&eft); + + spin_unlock_irqrestore(&efi_rtc_lock,flags); + + return status == EFI_SUCCESS ? 0 : -EINVAL; + + case RTC_WKALM_SET: + + if (!capable(CAP_SYS_TIME)) return -EACCES; + + ewp = (struct rtc_wkalrm __user *)arg; + + if ( get_user(enabled, &ewp->enabled) + || copy_from_user(&wtime, &ewp->time, sizeof(struct rtc_time)) ) + return -EFAULT; + + convert_to_efi_time(&wtime, &eft); + + spin_lock_irqsave(&efi_rtc_lock, flags); + /* + * XXX Fixme: + * As of EFI 0.92 with the firmware I have on my + * machine this call does not seem to work quite + * right + */ + status = efi.set_wakeup_time((efi_bool_t)enabled, &eft); + + spin_unlock_irqrestore(&efi_rtc_lock,flags); + + return status == EFI_SUCCESS ? 0 : -EINVAL; + + case RTC_WKALM_RD: + + spin_lock_irqsave(&efi_rtc_lock, flags); + + status = efi.get_wakeup_time((efi_bool_t *)&enabled, (efi_bool_t *)&pending, &eft); + + spin_unlock_irqrestore(&efi_rtc_lock,flags); + + if (status != EFI_SUCCESS) return -EINVAL; + + ewp = (struct rtc_wkalrm __user *)arg; + + if ( put_user(enabled, &ewp->enabled) + || put_user(pending, &ewp->pending)) return -EFAULT; + + convert_from_efi_time(&eft, &wtime); + + return copy_to_user(&ewp->time, &wtime, + sizeof(struct rtc_time)) ? -EFAULT : 0; + } + return -ENOTTY; +} + +/* + * We enforce only one user at a time here with the open/close. + * Also clear the previous interrupt data on an open, and clean + * up things on a close. + */ + +static int efi_rtc_open(struct inode *inode, struct file *file) +{ + /* + * nothing special to do here + * We do accept multiple open files at the same time as we + * synchronize on the per call operation. + */ + return 0; +} + +static int efi_rtc_close(struct inode *inode, struct file *file) +{ + return 0; +} + +/* + * The various file operations we support. + */ + +static const struct file_operations efi_rtc_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = efi_rtc_ioctl, + .open = efi_rtc_open, + .release = efi_rtc_close, + .llseek = no_llseek, +}; + +static struct miscdevice efi_rtc_dev= { + EFI_RTC_MINOR, + "efirtc", + &efi_rtc_fops +}; + +/* + * We export RAW EFI information to /proc/driver/efirtc + */ +static int efi_rtc_proc_show(struct seq_file *m, void *v) +{ + efi_time_t eft, alm; + efi_time_cap_t cap; + efi_bool_t enabled, pending; + unsigned long flags; + + memset(&eft, 0, sizeof(eft)); + memset(&alm, 0, sizeof(alm)); + memset(&cap, 0, sizeof(cap)); + + spin_lock_irqsave(&efi_rtc_lock, flags); + + efi.get_time(&eft, &cap); + efi.get_wakeup_time(&enabled, &pending, &alm); + + spin_unlock_irqrestore(&efi_rtc_lock,flags); + + seq_printf(m, + "Time : %u:%u:%u.%09u\n" + "Date : %u-%u-%u\n" + "Daylight : %u\n", + eft.hour, eft.minute, eft.second, eft.nanosecond, + eft.year, eft.month, eft.day, + eft.daylight); + + if (eft.timezone == EFI_UNSPECIFIED_TIMEZONE) + seq_puts(m, "Timezone : unspecified\n"); + else + /* XXX fixme: convert to string? */ + seq_printf(m, "Timezone : %u\n", eft.timezone); + + + seq_printf(m, + "Alarm Time : %u:%u:%u.%09u\n" + "Alarm Date : %u-%u-%u\n" + "Alarm Daylight : %u\n" + "Enabled : %s\n" + "Pending : %s\n", + alm.hour, alm.minute, alm.second, alm.nanosecond, + alm.year, alm.month, alm.day, + alm.daylight, + enabled == 1 ? "yes" : "no", + pending == 1 ? "yes" : "no"); + + if (eft.timezone == EFI_UNSPECIFIED_TIMEZONE) + seq_puts(m, "Timezone : unspecified\n"); + else + /* XXX fixme: convert to string? */ + seq_printf(m, "Timezone : %u\n", alm.timezone); + + /* + * now prints the capabilities + */ + seq_printf(m, + "Resolution : %u\n" + "Accuracy : %u\n" + "SetstoZero : %u\n", + cap.resolution, cap.accuracy, cap.sets_to_zero); + + return 0; +} + +static int efi_rtc_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, efi_rtc_proc_show, NULL); +} + +static const struct file_operations efi_rtc_proc_fops = { + .open = efi_rtc_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init +efi_rtc_init(void) +{ + int ret; + struct proc_dir_entry *dir; + + printk(KERN_INFO "EFI Time Services Driver v%s\n", EFI_RTC_VERSION); + + ret = misc_register(&efi_rtc_dev); + if (ret) { + printk(KERN_ERR "efirtc: can't misc_register on minor=%d\n", + EFI_RTC_MINOR); + return ret; + } + + dir = proc_create("driver/efirtc", 0, NULL, &efi_rtc_proc_fops); + if (dir == NULL) { + printk(KERN_ERR "efirtc: can't create /proc/driver/efirtc.\n"); + misc_deregister(&efi_rtc_dev); + return -1; + } + return 0; +} + +static void __exit +efi_rtc_exit(void) +{ + /* not yet used */ +} + +module_init(efi_rtc_init); +module_exit(efi_rtc_exit); + +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/generic_nvram.c b/kernel/drivers/char/generic_nvram.c new file mode 100644 index 000000000..6c4f4b5a9 --- /dev/null +++ b/kernel/drivers/char/generic_nvram.c @@ -0,0 +1,174 @@ +/* + * Generic /dev/nvram driver for architectures providing some + * "generic" hooks, that is : + * + * nvram_read_byte, nvram_write_byte, nvram_sync, nvram_get_size + * + * Note that an additional hook is supported for PowerMac only + * for getting the nvram "partition" informations + * + */ + +#define NVRAM_VERSION "1.1" + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_PPC_PMAC +#include +#endif + +#define NVRAM_SIZE 8192 + +static DEFINE_MUTEX(nvram_mutex); +static ssize_t nvram_len; + +static loff_t nvram_llseek(struct file *file, loff_t offset, int origin) +{ + switch (origin) { + case 0: + break; + case 1: + offset += file->f_pos; + break; + case 2: + offset += nvram_len; + break; + default: + offset = -1; + } + if (offset < 0) + return -EINVAL; + + file->f_pos = offset; + + return file->f_pos; +} + +static ssize_t read_nvram(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned int i; + char __user *p = buf; + + if (!access_ok(VERIFY_WRITE, buf, count)) + return -EFAULT; + if (*ppos >= nvram_len) + return 0; + for (i = *ppos; count > 0 && i < nvram_len; ++i, ++p, --count) + if (__put_user(nvram_read_byte(i), p)) + return -EFAULT; + *ppos = i; + return p - buf; +} + +static ssize_t write_nvram(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned int i; + const char __user *p = buf; + char c; + + if (!access_ok(VERIFY_READ, buf, count)) + return -EFAULT; + if (*ppos >= nvram_len) + return 0; + for (i = *ppos; count > 0 && i < nvram_len; ++i, ++p, --count) { + if (__get_user(c, p)) + return -EFAULT; + nvram_write_byte(c, i); + } + *ppos = i; + return p - buf; +} + +static int nvram_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + switch(cmd) { +#ifdef CONFIG_PPC_PMAC + case OBSOLETE_PMAC_NVRAM_GET_OFFSET: + printk(KERN_WARNING "nvram: Using obsolete PMAC_NVRAM_GET_OFFSET ioctl\n"); + case IOC_NVRAM_GET_OFFSET: { + int part, offset; + + if (!machine_is(powermac)) + return -EINVAL; + if (copy_from_user(&part, (void __user*)arg, sizeof(part)) != 0) + return -EFAULT; + if (part < pmac_nvram_OF || part > pmac_nvram_NR) + return -EINVAL; + offset = pmac_get_partition(part); + if (copy_to_user((void __user*)arg, &offset, sizeof(offset)) != 0) + return -EFAULT; + break; + } +#endif /* CONFIG_PPC_PMAC */ + case IOC_NVRAM_SYNC: + nvram_sync(); + break; + default: + return -EINVAL; + } + + return 0; +} + +static long nvram_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int ret; + + mutex_lock(&nvram_mutex); + ret = nvram_ioctl(file, cmd, arg); + mutex_unlock(&nvram_mutex); + + return ret; +} + +const struct file_operations nvram_fops = { + .owner = THIS_MODULE, + .llseek = nvram_llseek, + .read = read_nvram, + .write = write_nvram, + .unlocked_ioctl = nvram_unlocked_ioctl, +}; + +static struct miscdevice nvram_dev = { + NVRAM_MINOR, + "nvram", + &nvram_fops +}; + +int __init nvram_init(void) +{ + int ret = 0; + + printk(KERN_INFO "Generic non-volatile memory driver v%s\n", + NVRAM_VERSION); + ret = misc_register(&nvram_dev); + if (ret != 0) + goto out; + + nvram_len = nvram_get_size(); + if (nvram_len < 0) + nvram_len = NVRAM_SIZE; + +out: + return ret; +} + +void __exit nvram_cleanup(void) +{ + misc_deregister( &nvram_dev ); +} + +module_init(nvram_init); +module_exit(nvram_cleanup); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/genrtc.c b/kernel/drivers/char/genrtc.c new file mode 100644 index 000000000..4f943759d --- /dev/null +++ b/kernel/drivers/char/genrtc.c @@ -0,0 +1,539 @@ +/* + * Real Time Clock interface for + * - q40 and other m68k machines, + * - HP PARISC machines + * - PowerPC machines + * emulate some RTC irq capabilities in software + * + * Copyright (C) 1999 Richard Zidlicky + * + * based on Paul Gortmaker's rtc.c device and + * Sam Creasey Generic rtc driver + * + * This driver allows use of the real time clock (built into + * nearly all computers) from user space. It exports the /dev/rtc + * interface supporting various ioctl() and also the /proc/driver/rtc + * pseudo-file for status information. + * + * The ioctls can be used to set the interrupt behaviour where + * supported. + * + * The /dev/rtc interface will block on reads until an interrupt + * has been received. If a RTC interrupt has already happened, + * it will output an unsigned long and then block. The output value + * contains the interrupt status in the low byte and the number of + * interrupts since the last read in the remaining high bytes. The + * /dev/rtc interface can also be used with the select(2) call. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + + * 1.01 fix for 2.3.X rz@linux-m68k.org + * 1.02 merged with code from genrtc.c rz@linux-m68k.org + * 1.03 make it more portable zippel@linux-m68k.org + * 1.04 removed useless timer code rz@linux-m68k.org + * 1.05 portable RTC_UIE emulation rz@linux-m68k.org + * 1.06 set_rtc_time can return an error trini@kernel.crashing.org + * 1.07 ported to HP PARISC (hppa) Helge Deller + */ + +#define RTC_VERSION "1.07" + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/* + * We sponge a minor off of the misc major. No need slurping + * up another valuable major dev number for this. If you add + * an ioctl, make sure you don't conflict with SPARC's RTC + * ioctls. + */ + +static DEFINE_MUTEX(gen_rtc_mutex); +static DECLARE_WAIT_QUEUE_HEAD(gen_rtc_wait); + +/* + * Bits in gen_rtc_status. + */ + +#define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */ + +static unsigned char gen_rtc_status; /* bitmapped status byte. */ +static unsigned long gen_rtc_irq_data; /* our output to the world */ + +/* months start at 0 now */ +static unsigned char days_in_mo[] = +{31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; + +static int irq_active; + +#ifdef CONFIG_GEN_RTC_X +static struct work_struct genrtc_task; +static struct timer_list timer_task; + +static unsigned int oldsecs; +static int lostint; +static unsigned long tt_exp; + +static void gen_rtc_timer(unsigned long data); + +static volatile int stask_active; /* schedule_work */ +static volatile int ttask_active; /* timer_task */ +static int stop_rtc_timers; /* don't requeue tasks */ +static DEFINE_SPINLOCK(gen_rtc_lock); + +static void gen_rtc_interrupt(unsigned long arg); + +/* + * Routine to poll RTC seconds field for change as often as possible, + * after first RTC_UIE use timer to reduce polling + */ +static void genrtc_troutine(struct work_struct *work) +{ + unsigned int tmp = get_rtc_ss(); + + if (stop_rtc_timers) { + stask_active = 0; + return; + } + + if (oldsecs != tmp){ + oldsecs = tmp; + + timer_task.function = gen_rtc_timer; + timer_task.expires = jiffies + HZ - (HZ/10); + tt_exp=timer_task.expires; + ttask_active=1; + stask_active=0; + add_timer(&timer_task); + + gen_rtc_interrupt(0); + } else if (schedule_work(&genrtc_task) == 0) + stask_active = 0; +} + +static void gen_rtc_timer(unsigned long data) +{ + lostint = get_rtc_ss() - oldsecs ; + if (lostint<0) + lostint = 60 - lostint; + if (time_after(jiffies, tt_exp)) + printk(KERN_INFO "genrtc: timer task delayed by %ld jiffies\n", + jiffies-tt_exp); + ttask_active=0; + stask_active=1; + if ((schedule_work(&genrtc_task) == 0)) + stask_active = 0; +} + +/* + * call gen_rtc_interrupt function to signal an RTC_UIE, + * arg is unused. + * Could be invoked either from a real interrupt handler or + * from some routine that periodically (eg 100HZ) monitors + * whether RTC_SECS changed + */ +static void gen_rtc_interrupt(unsigned long arg) +{ + /* We store the status in the low byte and the number of + * interrupts received since the last read in the remainder + * of rtc_irq_data. */ + + gen_rtc_irq_data += 0x100; + gen_rtc_irq_data &= ~0xff; + gen_rtc_irq_data |= RTC_UIE; + + if (lostint){ + printk("genrtc: system delaying clock ticks?\n"); + /* increment count so that userspace knows something is wrong */ + gen_rtc_irq_data += ((lostint-1)<<8); + lostint = 0; + } + + wake_up_interruptible(&gen_rtc_wait); +} + +/* + * Now all the various file operations that we export. + */ +static ssize_t gen_rtc_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned long data; + ssize_t retval; + + if (count != sizeof (unsigned int) && count != sizeof (unsigned long)) + return -EINVAL; + + if (file->f_flags & O_NONBLOCK && !gen_rtc_irq_data) + return -EAGAIN; + + retval = wait_event_interruptible(gen_rtc_wait, + (data = xchg(&gen_rtc_irq_data, 0))); + if (retval) + goto out; + + /* first test allows optimizer to nuke this case for 32-bit machines */ + if (sizeof (int) != sizeof (long) && count == sizeof (unsigned int)) { + unsigned int uidata = data; + retval = put_user(uidata, (unsigned int __user *)buf) ?: + sizeof(unsigned int); + } + else { + retval = put_user(data, (unsigned long __user *)buf) ?: + sizeof(unsigned long); + } +out: + return retval; +} + +static unsigned int gen_rtc_poll(struct file *file, + struct poll_table_struct *wait) +{ + poll_wait(file, &gen_rtc_wait, wait); + if (gen_rtc_irq_data != 0) + return POLLIN | POLLRDNORM; + return 0; +} + +#endif + +/* + * Used to disable/enable interrupts, only RTC_UIE supported + * We also clear out any old irq data after an ioctl() that + * meddles with the interrupt enable/disable bits. + */ + +static inline void gen_clear_rtc_irq_bit(unsigned char bit) +{ +#ifdef CONFIG_GEN_RTC_X + stop_rtc_timers = 1; + if (ttask_active){ + del_timer_sync(&timer_task); + ttask_active = 0; + } + while (stask_active) + schedule(); + + spin_lock(&gen_rtc_lock); + irq_active = 0; + spin_unlock(&gen_rtc_lock); +#endif +} + +static inline int gen_set_rtc_irq_bit(unsigned char bit) +{ +#ifdef CONFIG_GEN_RTC_X + spin_lock(&gen_rtc_lock); + if ( !irq_active ) { + irq_active = 1; + stop_rtc_timers = 0; + lostint = 0; + INIT_WORK(&genrtc_task, genrtc_troutine); + oldsecs = get_rtc_ss(); + init_timer(&timer_task); + + stask_active = 1; + if (schedule_work(&genrtc_task) == 0){ + stask_active = 0; + } + } + spin_unlock(&gen_rtc_lock); + gen_rtc_irq_data = 0; + return 0; +#else + return -EINVAL; +#endif +} + +static int gen_rtc_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct rtc_time wtime; + struct rtc_pll_info pll; + void __user *argp = (void __user *)arg; + + switch (cmd) { + + case RTC_PLL_GET: + if (get_rtc_pll(&pll)) + return -EINVAL; + else + return copy_to_user(argp, &pll, sizeof pll) ? -EFAULT : 0; + + case RTC_PLL_SET: + if (!capable(CAP_SYS_TIME)) + return -EACCES; + if (copy_from_user(&pll, argp, sizeof(pll))) + return -EFAULT; + return set_rtc_pll(&pll); + + case RTC_UIE_OFF: /* disable ints from RTC updates. */ + gen_clear_rtc_irq_bit(RTC_UIE); + return 0; + + case RTC_UIE_ON: /* enable ints for RTC updates. */ + return gen_set_rtc_irq_bit(RTC_UIE); + + case RTC_RD_TIME: /* Read the time/date from RTC */ + /* this doesn't get week-day, who cares */ + memset(&wtime, 0, sizeof(wtime)); + get_rtc_time(&wtime); + + return copy_to_user(argp, &wtime, sizeof(wtime)) ? -EFAULT : 0; + + case RTC_SET_TIME: /* Set the RTC */ + { + int year; + unsigned char leap_yr; + + if (!capable(CAP_SYS_TIME)) + return -EACCES; + + if (copy_from_user(&wtime, argp, sizeof(wtime))) + return -EFAULT; + + year = wtime.tm_year + 1900; + leap_yr = ((!(year % 4) && (year % 100)) || + !(year % 400)); + + if ((wtime.tm_mon < 0 || wtime.tm_mon > 11) || (wtime.tm_mday < 1)) + return -EINVAL; + + if (wtime.tm_mday < 0 || wtime.tm_mday > + (days_in_mo[wtime.tm_mon] + ((wtime.tm_mon == 1) && leap_yr))) + return -EINVAL; + + if (wtime.tm_hour < 0 || wtime.tm_hour >= 24 || + wtime.tm_min < 0 || wtime.tm_min >= 60 || + wtime.tm_sec < 0 || wtime.tm_sec >= 60) + return -EINVAL; + + return set_rtc_time(&wtime); + } + } + + return -EINVAL; +} + +static long gen_rtc_unlocked_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int ret; + + mutex_lock(&gen_rtc_mutex); + ret = gen_rtc_ioctl(file, cmd, arg); + mutex_unlock(&gen_rtc_mutex); + + return ret; +} + +/* + * We enforce only one user at a time here with the open/close. + * Also clear the previous interrupt data on an open, and clean + * up things on a close. + */ + +static int gen_rtc_open(struct inode *inode, struct file *file) +{ + mutex_lock(&gen_rtc_mutex); + if (gen_rtc_status & RTC_IS_OPEN) { + mutex_unlock(&gen_rtc_mutex); + return -EBUSY; + } + + gen_rtc_status |= RTC_IS_OPEN; + gen_rtc_irq_data = 0; + irq_active = 0; + mutex_unlock(&gen_rtc_mutex); + + return 0; +} + +static int gen_rtc_release(struct inode *inode, struct file *file) +{ + /* + * Turn off all interrupts once the device is no longer + * in use and clear the data. + */ + + gen_clear_rtc_irq_bit(RTC_PIE|RTC_AIE|RTC_UIE); + + gen_rtc_status &= ~RTC_IS_OPEN; + return 0; +} + + +#ifdef CONFIG_PROC_FS + +/* + * Info exported via "/proc/driver/rtc". + */ + +static int gen_rtc_proc_show(struct seq_file *m, void *v) +{ + struct rtc_time tm; + unsigned int flags; + struct rtc_pll_info pll; + + flags = get_rtc_time(&tm); + + seq_printf(m, + "rtc_time\t: %02d:%02d:%02d\n" + "rtc_date\t: %04d-%02d-%02d\n" + "rtc_epoch\t: %04u\n", + tm.tm_hour, tm.tm_min, tm.tm_sec, + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, 1900); + + tm.tm_hour = tm.tm_min = tm.tm_sec = 0; + + seq_puts(m, "alarm\t\t: "); + if (tm.tm_hour <= 24) + seq_printf(m, "%02d:", tm.tm_hour); + else + seq_puts(m, "**:"); + + if (tm.tm_min <= 59) + seq_printf(m, "%02d:", tm.tm_min); + else + seq_puts(m, "**:"); + + if (tm.tm_sec <= 59) + seq_printf(m, "%02d\n", tm.tm_sec); + else + seq_puts(m, "**\n"); + + seq_printf(m, + "DST_enable\t: %s\n" + "BCD\t\t: %s\n" + "24hr\t\t: %s\n" + "square_wave\t: %s\n" + "alarm_IRQ\t: %s\n" + "update_IRQ\t: %s\n" + "periodic_IRQ\t: %s\n" + "periodic_freq\t: %ld\n" + "batt_status\t: %s\n", + (flags & RTC_DST_EN) ? "yes" : "no", + (flags & RTC_DM_BINARY) ? "no" : "yes", + (flags & RTC_24H) ? "yes" : "no", + (flags & RTC_SQWE) ? "yes" : "no", + (flags & RTC_AIE) ? "yes" : "no", + irq_active ? "yes" : "no", + (flags & RTC_PIE) ? "yes" : "no", + 0L /* freq */, + (flags & RTC_BATT_BAD) ? "bad" : "okay"); + if (!get_rtc_pll(&pll)) + seq_printf(m, + "PLL adjustment\t: %d\n" + "PLL max +ve adjustment\t: %d\n" + "PLL max -ve adjustment\t: %d\n" + "PLL +ve adjustment factor\t: %d\n" + "PLL -ve adjustment factor\t: %d\n" + "PLL frequency\t: %ld\n", + pll.pll_value, + pll.pll_max, + pll.pll_min, + pll.pll_posmult, + pll.pll_negmult, + pll.pll_clock); + return 0; +} + +static int gen_rtc_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, gen_rtc_proc_show, NULL); +} + +static const struct file_operations gen_rtc_proc_fops = { + .open = gen_rtc_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init gen_rtc_proc_init(void) +{ + struct proc_dir_entry *r; + + r = proc_create("driver/rtc", 0, NULL, &gen_rtc_proc_fops); + if (!r) + return -ENOMEM; + return 0; +} +#else +static inline int gen_rtc_proc_init(void) { return 0; } +#endif /* CONFIG_PROC_FS */ + + +/* + * The various file operations we support. + */ + +static const struct file_operations gen_rtc_fops = { + .owner = THIS_MODULE, +#ifdef CONFIG_GEN_RTC_X + .read = gen_rtc_read, + .poll = gen_rtc_poll, +#endif + .unlocked_ioctl = gen_rtc_unlocked_ioctl, + .open = gen_rtc_open, + .release = gen_rtc_release, + .llseek = noop_llseek, +}; + +static struct miscdevice rtc_gen_dev = +{ + .minor = RTC_MINOR, + .name = "rtc", + .fops = &gen_rtc_fops, +}; + +static int __init rtc_generic_init(void) +{ + int retval; + + printk(KERN_INFO "Generic RTC Driver v%s\n", RTC_VERSION); + + retval = misc_register(&rtc_gen_dev); + if (retval < 0) + return retval; + + retval = gen_rtc_proc_init(); + if (retval) { + misc_deregister(&rtc_gen_dev); + return retval; + } + + return 0; +} + +static void __exit rtc_generic_exit(void) +{ + remove_proc_entry ("driver/rtc", NULL); + misc_deregister(&rtc_gen_dev); +} + + +module_init(rtc_generic_init); +module_exit(rtc_generic_exit); + +MODULE_AUTHOR("Richard Zidlicky"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_MISCDEV(RTC_MINOR); diff --git a/kernel/drivers/char/hangcheck-timer.c b/kernel/drivers/char/hangcheck-timer.c new file mode 100644 index 000000000..a7c5c5967 --- /dev/null +++ b/kernel/drivers/char/hangcheck-timer.c @@ -0,0 +1,188 @@ +/* + * hangcheck-timer.c + * + * Driver for a little io fencing timer. + * + * Copyright (C) 2002, 2003 Oracle. All rights reserved. + * + * Author: Joel Becker + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +/* + * The hangcheck-timer driver uses the TSC to catch delays that + * jiffies does not notice. A timer is set. When the timer fires, it + * checks whether it was delayed and if that delay exceeds a given + * margin of error. The hangcheck_tick module parameter takes the timer + * duration in seconds. The hangcheck_margin parameter defines the + * margin of error, in seconds. The defaults are 60 seconds for the + * timer and 180 seconds for the margin of error. IOW, a timer is set + * for 60 seconds. When the timer fires, the callback checks the + * actual duration that the timer waited. If the duration exceeds the + * alloted time and margin (here 60 + 180, or 240 seconds), the machine + * is restarted. A healthy machine will have the duration match the + * expected timeout very closely. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define VERSION_STR "0.9.1" + +#define DEFAULT_IOFENCE_MARGIN 60 /* Default fudge factor, in seconds */ +#define DEFAULT_IOFENCE_TICK 180 /* Default timer timeout, in seconds */ + +static int hangcheck_tick = DEFAULT_IOFENCE_TICK; +static int hangcheck_margin = DEFAULT_IOFENCE_MARGIN; +static int hangcheck_reboot; /* Defaults to not reboot */ +static int hangcheck_dump_tasks; /* Defaults to not dumping SysRQ T */ + +/* options - modular */ +module_param(hangcheck_tick, int, 0); +MODULE_PARM_DESC(hangcheck_tick, "Timer delay."); +module_param(hangcheck_margin, int, 0); +MODULE_PARM_DESC(hangcheck_margin, "If the hangcheck timer has been delayed more than hangcheck_margin seconds, the driver will fire."); +module_param(hangcheck_reboot, int, 0); +MODULE_PARM_DESC(hangcheck_reboot, "If nonzero, the machine will reboot when the timer margin is exceeded."); +module_param(hangcheck_dump_tasks, int, 0); +MODULE_PARM_DESC(hangcheck_dump_tasks, "If nonzero, the machine will dump the system task state when the timer margin is exceeded."); + +MODULE_AUTHOR("Oracle"); +MODULE_DESCRIPTION("Hangcheck-timer detects when the system has gone out to lunch past a certain margin."); +MODULE_LICENSE("GPL"); +MODULE_VERSION(VERSION_STR); + +/* options - nonmodular */ +#ifndef MODULE + +static int __init hangcheck_parse_tick(char *str) +{ + int par; + if (get_option(&str,&par)) + hangcheck_tick = par; + return 1; +} + +static int __init hangcheck_parse_margin(char *str) +{ + int par; + if (get_option(&str,&par)) + hangcheck_margin = par; + return 1; +} + +static int __init hangcheck_parse_reboot(char *str) +{ + int par; + if (get_option(&str,&par)) + hangcheck_reboot = par; + return 1; +} + +static int __init hangcheck_parse_dump_tasks(char *str) +{ + int par; + if (get_option(&str,&par)) + hangcheck_dump_tasks = par; + return 1; +} + +__setup("hcheck_tick", hangcheck_parse_tick); +__setup("hcheck_margin", hangcheck_parse_margin); +__setup("hcheck_reboot", hangcheck_parse_reboot); +__setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks); +#endif /* not MODULE */ + +#define TIMER_FREQ 1000000000ULL + +/* Last time scheduled */ +static unsigned long long hangcheck_tsc, hangcheck_tsc_margin; + +static void hangcheck_fire(unsigned long); + +static DEFINE_TIMER(hangcheck_ticktock, hangcheck_fire, 0, 0); + +static void hangcheck_fire(unsigned long data) +{ + unsigned long long cur_tsc, tsc_diff; + + cur_tsc = ktime_get_ns(); + + if (cur_tsc > hangcheck_tsc) + tsc_diff = cur_tsc - hangcheck_tsc; + else + tsc_diff = (cur_tsc + (~0ULL - hangcheck_tsc)); /* or something */ + + if (tsc_diff > hangcheck_tsc_margin) { + if (hangcheck_dump_tasks) { + printk(KERN_CRIT "Hangcheck: Task state:\n"); +#ifdef CONFIG_MAGIC_SYSRQ + handle_sysrq('t'); +#endif /* CONFIG_MAGIC_SYSRQ */ + } + if (hangcheck_reboot) { + printk(KERN_CRIT "Hangcheck: hangcheck is restarting the machine.\n"); + emergency_restart(); + } else { + printk(KERN_CRIT "Hangcheck: hangcheck value past margin!\n"); + } + } +#if 0 + /* + * Enable to investigate delays in detail + */ + printk("Hangcheck: called %Ld ns since last time (%Ld ns overshoot)\n", + tsc_diff, tsc_diff - hangcheck_tick*TIMER_FREQ); +#endif + mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ)); + hangcheck_tsc = ktime_get_ns(); +} + + +static int __init hangcheck_init(void) +{ + printk("Hangcheck: starting hangcheck timer %s (tick is %d seconds, margin is %d seconds).\n", + VERSION_STR, hangcheck_tick, hangcheck_margin); + hangcheck_tsc_margin = + (unsigned long long)hangcheck_margin + hangcheck_tick; + hangcheck_tsc_margin *= TIMER_FREQ; + + hangcheck_tsc = ktime_get_ns(); + mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ)); + + return 0; +} + + +static void __exit hangcheck_exit(void) +{ + del_timer_sync(&hangcheck_ticktock); + printk("Hangcheck: Stopped hangcheck timer.\n"); +} + +module_init(hangcheck_init); +module_exit(hangcheck_exit); diff --git a/kernel/drivers/char/hpet.c b/kernel/drivers/char/hpet.c new file mode 100644 index 000000000..5c0baa9ff --- /dev/null +++ b/kernel/drivers/char/hpet.c @@ -0,0 +1,1104 @@ +/* + * Intel & MS High Precision Event Timer Implementation. + * + * Copyright (C) 2003 Intel Corporation + * Venki Pallipadi + * (c) Copyright 2004 Hewlett-Packard Development Company, L.P. + * Bob Picco + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * The High Precision Event Timer driver. + * This driver is closely modelled after the rtc.c driver. + * http://www.intel.com/hardwaredesign/hpetspec_1.pdf + */ +#define HPET_USER_FREQ (64) +#define HPET_DRIFT (500) + +#define HPET_RANGE_SIZE 1024 /* from HPET spec */ + + +/* WARNING -- don't get confused. These macros are never used + * to write the (single) counter, and rarely to read it. + * They're badly named; to fix, someday. + */ +#if BITS_PER_LONG == 64 +#define write_counter(V, MC) writeq(V, MC) +#define read_counter(MC) readq(MC) +#else +#define write_counter(V, MC) writel(V, MC) +#define read_counter(MC) readl(MC) +#endif + +static DEFINE_MUTEX(hpet_mutex); /* replaces BKL */ +static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ; + +/* This clocksource driver currently only works on ia64 */ +#ifdef CONFIG_IA64 +static void __iomem *hpet_mctr; + +static cycle_t read_hpet(struct clocksource *cs) +{ + return (cycle_t)read_counter((void __iomem *)hpet_mctr); +} + +static struct clocksource clocksource_hpet = { + .name = "hpet", + .rating = 250, + .read = read_hpet, + .mask = CLOCKSOURCE_MASK(64), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; +static struct clocksource *hpet_clocksource; +#endif + +/* A lock for concurrent access by app and isr hpet activity. */ +static DEFINE_SPINLOCK(hpet_lock); + +#define HPET_DEV_NAME (7) + +struct hpet_dev { + struct hpets *hd_hpets; + struct hpet __iomem *hd_hpet; + struct hpet_timer __iomem *hd_timer; + unsigned long hd_ireqfreq; + unsigned long hd_irqdata; + wait_queue_head_t hd_waitqueue; + struct fasync_struct *hd_async_queue; + unsigned int hd_flags; + unsigned int hd_irq; + unsigned int hd_hdwirq; + char hd_name[HPET_DEV_NAME]; +}; + +struct hpets { + struct hpets *hp_next; + struct hpet __iomem *hp_hpet; + unsigned long hp_hpet_phys; + struct clocksource *hp_clocksource; + unsigned long long hp_tick_freq; + unsigned long hp_delta; + unsigned int hp_ntimer; + unsigned int hp_which; + struct hpet_dev hp_dev[1]; +}; + +static struct hpets *hpets; + +#define HPET_OPEN 0x0001 +#define HPET_IE 0x0002 /* interrupt enabled */ +#define HPET_PERIODIC 0x0004 +#define HPET_SHARED_IRQ 0x0008 + + +#ifndef readq +static inline unsigned long long readq(void __iomem *addr) +{ + return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL); +} +#endif + +#ifndef writeq +static inline void writeq(unsigned long long v, void __iomem *addr) +{ + writel(v & 0xffffffff, addr); + writel(v >> 32, addr + 4); +} +#endif + +static irqreturn_t hpet_interrupt(int irq, void *data) +{ + struct hpet_dev *devp; + unsigned long isr; + + devp = data; + isr = 1 << (devp - devp->hd_hpets->hp_dev); + + if ((devp->hd_flags & HPET_SHARED_IRQ) && + !(isr & readl(&devp->hd_hpet->hpet_isr))) + return IRQ_NONE; + + spin_lock(&hpet_lock); + devp->hd_irqdata++; + + /* + * For non-periodic timers, increment the accumulator. + * This has the effect of treating non-periodic like periodic. + */ + if ((devp->hd_flags & (HPET_IE | HPET_PERIODIC)) == HPET_IE) { + unsigned long m, t, mc, base, k; + struct hpet __iomem *hpet = devp->hd_hpet; + struct hpets *hpetp = devp->hd_hpets; + + t = devp->hd_ireqfreq; + m = read_counter(&devp->hd_timer->hpet_compare); + mc = read_counter(&hpet->hpet_mc); + /* The time for the next interrupt would logically be t + m, + * however, if we are very unlucky and the interrupt is delayed + * for longer than t then we will completely miss the next + * interrupt if we set t + m and an application will hang. + * Therefore we need to make a more complex computation assuming + * that there exists a k for which the following is true: + * k * t + base < mc + delta + * (k + 1) * t + base > mc + delta + * where t is the interval in hpet ticks for the given freq, + * base is the theoretical start value 0 < base < t, + * mc is the main counter value at the time of the interrupt, + * delta is the time it takes to write the a value to the + * comparator. + * k may then be computed as (mc - base + delta) / t . + */ + base = mc % t; + k = (mc - base + hpetp->hp_delta) / t; + write_counter(t * (k + 1) + base, + &devp->hd_timer->hpet_compare); + } + + if (devp->hd_flags & HPET_SHARED_IRQ) + writel(isr, &devp->hd_hpet->hpet_isr); + spin_unlock(&hpet_lock); + + wake_up_interruptible(&devp->hd_waitqueue); + + kill_fasync(&devp->hd_async_queue, SIGIO, POLL_IN); + + return IRQ_HANDLED; +} + +static void hpet_timer_set_irq(struct hpet_dev *devp) +{ + unsigned long v; + int irq, gsi; + struct hpet_timer __iomem *timer; + + spin_lock_irq(&hpet_lock); + if (devp->hd_hdwirq) { + spin_unlock_irq(&hpet_lock); + return; + } + + timer = devp->hd_timer; + + /* we prefer level triggered mode */ + v = readl(&timer->hpet_config); + if (!(v & Tn_INT_TYPE_CNF_MASK)) { + v |= Tn_INT_TYPE_CNF_MASK; + writel(v, &timer->hpet_config); + } + spin_unlock_irq(&hpet_lock); + + v = (readq(&timer->hpet_config) & Tn_INT_ROUTE_CAP_MASK) >> + Tn_INT_ROUTE_CAP_SHIFT; + + /* + * In PIC mode, skip IRQ0-4, IRQ6-9, IRQ12-15 which is always used by + * legacy device. In IO APIC mode, we skip all the legacy IRQS. + */ + if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) + v &= ~0xf3df; + else + v &= ~0xffff; + + for_each_set_bit(irq, &v, HPET_MAX_IRQ) { + if (irq >= nr_irqs) { + irq = HPET_MAX_IRQ; + break; + } + + gsi = acpi_register_gsi(NULL, irq, ACPI_LEVEL_SENSITIVE, + ACPI_ACTIVE_LOW); + if (gsi > 0) + break; + + /* FIXME: Setup interrupt source table */ + } + + if (irq < HPET_MAX_IRQ) { + spin_lock_irq(&hpet_lock); + v = readl(&timer->hpet_config); + v |= irq << Tn_INT_ROUTE_CNF_SHIFT; + writel(v, &timer->hpet_config); + devp->hd_hdwirq = gsi; + spin_unlock_irq(&hpet_lock); + } + return; +} + +static int hpet_open(struct inode *inode, struct file *file) +{ + struct hpet_dev *devp; + struct hpets *hpetp; + int i; + + if (file->f_mode & FMODE_WRITE) + return -EINVAL; + + mutex_lock(&hpet_mutex); + spin_lock_irq(&hpet_lock); + + for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next) + for (i = 0; i < hpetp->hp_ntimer; i++) + if (hpetp->hp_dev[i].hd_flags & HPET_OPEN) + continue; + else { + devp = &hpetp->hp_dev[i]; + break; + } + + if (!devp) { + spin_unlock_irq(&hpet_lock); + mutex_unlock(&hpet_mutex); + return -EBUSY; + } + + file->private_data = devp; + devp->hd_irqdata = 0; + devp->hd_flags |= HPET_OPEN; + spin_unlock_irq(&hpet_lock); + mutex_unlock(&hpet_mutex); + + hpet_timer_set_irq(devp); + + return 0; +} + +static ssize_t +hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos) +{ + DECLARE_WAITQUEUE(wait, current); + unsigned long data; + ssize_t retval; + struct hpet_dev *devp; + + devp = file->private_data; + if (!devp->hd_ireqfreq) + return -EIO; + + if (count < sizeof(unsigned long)) + return -EINVAL; + + add_wait_queue(&devp->hd_waitqueue, &wait); + + for ( ; ; ) { + set_current_state(TASK_INTERRUPTIBLE); + + spin_lock_irq(&hpet_lock); + data = devp->hd_irqdata; + devp->hd_irqdata = 0; + spin_unlock_irq(&hpet_lock); + + if (data) + break; + else if (file->f_flags & O_NONBLOCK) { + retval = -EAGAIN; + goto out; + } else if (signal_pending(current)) { + retval = -ERESTARTSYS; + goto out; + } + schedule(); + } + + retval = put_user(data, (unsigned long __user *)buf); + if (!retval) + retval = sizeof(unsigned long); +out: + __set_current_state(TASK_RUNNING); + remove_wait_queue(&devp->hd_waitqueue, &wait); + + return retval; +} + +static unsigned int hpet_poll(struct file *file, poll_table * wait) +{ + unsigned long v; + struct hpet_dev *devp; + + devp = file->private_data; + + if (!devp->hd_ireqfreq) + return 0; + + poll_wait(file, &devp->hd_waitqueue, wait); + + spin_lock_irq(&hpet_lock); + v = devp->hd_irqdata; + spin_unlock_irq(&hpet_lock); + + if (v != 0) + return POLLIN | POLLRDNORM; + + return 0; +} + +#ifdef CONFIG_HPET_MMAP +#ifdef CONFIG_HPET_MMAP_DEFAULT +static int hpet_mmap_enabled = 1; +#else +static int hpet_mmap_enabled = 0; +#endif + +static __init int hpet_mmap_enable(char *str) +{ + get_option(&str, &hpet_mmap_enabled); + pr_info("HPET mmap %s\n", hpet_mmap_enabled ? "enabled" : "disabled"); + return 1; +} +__setup("hpet_mmap", hpet_mmap_enable); + +static int hpet_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct hpet_dev *devp; + unsigned long addr; + + if (!hpet_mmap_enabled) + return -EACCES; + + devp = file->private_data; + addr = devp->hd_hpets->hp_hpet_phys; + + if (addr & (PAGE_SIZE - 1)) + return -ENOSYS; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + return vm_iomap_memory(vma, addr, PAGE_SIZE); +} +#else +static int hpet_mmap(struct file *file, struct vm_area_struct *vma) +{ + return -ENOSYS; +} +#endif + +static int hpet_fasync(int fd, struct file *file, int on) +{ + struct hpet_dev *devp; + + devp = file->private_data; + + if (fasync_helper(fd, file, on, &devp->hd_async_queue) >= 0) + return 0; + else + return -EIO; +} + +static int hpet_release(struct inode *inode, struct file *file) +{ + struct hpet_dev *devp; + struct hpet_timer __iomem *timer; + int irq = 0; + + devp = file->private_data; + timer = devp->hd_timer; + + spin_lock_irq(&hpet_lock); + + writeq((readq(&timer->hpet_config) & ~Tn_INT_ENB_CNF_MASK), + &timer->hpet_config); + + irq = devp->hd_irq; + devp->hd_irq = 0; + + devp->hd_ireqfreq = 0; + + if (devp->hd_flags & HPET_PERIODIC + && readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) { + unsigned long v; + + v = readq(&timer->hpet_config); + v ^= Tn_TYPE_CNF_MASK; + writeq(v, &timer->hpet_config); + } + + devp->hd_flags &= ~(HPET_OPEN | HPET_IE | HPET_PERIODIC); + spin_unlock_irq(&hpet_lock); + + if (irq) + free_irq(irq, devp); + + file->private_data = NULL; + return 0; +} + +static int hpet_ioctl_ieon(struct hpet_dev *devp) +{ + struct hpet_timer __iomem *timer; + struct hpet __iomem *hpet; + struct hpets *hpetp; + int irq; + unsigned long g, v, t, m; + unsigned long flags, isr; + + timer = devp->hd_timer; + hpet = devp->hd_hpet; + hpetp = devp->hd_hpets; + + if (!devp->hd_ireqfreq) + return -EIO; + + spin_lock_irq(&hpet_lock); + + if (devp->hd_flags & HPET_IE) { + spin_unlock_irq(&hpet_lock); + return -EBUSY; + } + + devp->hd_flags |= HPET_IE; + + if (readl(&timer->hpet_config) & Tn_INT_TYPE_CNF_MASK) + devp->hd_flags |= HPET_SHARED_IRQ; + spin_unlock_irq(&hpet_lock); + + irq = devp->hd_hdwirq; + + if (irq) { + unsigned long irq_flags; + + if (devp->hd_flags & HPET_SHARED_IRQ) { + /* + * To prevent the interrupt handler from seeing an + * unwanted interrupt status bit, program the timer + * so that it will not fire in the near future ... + */ + writel(readl(&timer->hpet_config) & ~Tn_TYPE_CNF_MASK, + &timer->hpet_config); + write_counter(read_counter(&hpet->hpet_mc), + &timer->hpet_compare); + /* ... and clear any left-over status. */ + isr = 1 << (devp - devp->hd_hpets->hp_dev); + writel(isr, &hpet->hpet_isr); + } + + sprintf(devp->hd_name, "hpet%d", (int)(devp - hpetp->hp_dev)); + irq_flags = devp->hd_flags & HPET_SHARED_IRQ ? IRQF_SHARED : 0; + if (request_irq(irq, hpet_interrupt, irq_flags, + devp->hd_name, (void *)devp)) { + printk(KERN_ERR "hpet: IRQ %d is not free\n", irq); + irq = 0; + } + } + + if (irq == 0) { + spin_lock_irq(&hpet_lock); + devp->hd_flags ^= HPET_IE; + spin_unlock_irq(&hpet_lock); + return -EIO; + } + + devp->hd_irq = irq; + t = devp->hd_ireqfreq; + v = readq(&timer->hpet_config); + + /* 64-bit comparators are not yet supported through the ioctls, + * so force this into 32-bit mode if it supports both modes + */ + g = v | Tn_32MODE_CNF_MASK | Tn_INT_ENB_CNF_MASK; + + if (devp->hd_flags & HPET_PERIODIC) { + g |= Tn_TYPE_CNF_MASK; + v |= Tn_TYPE_CNF_MASK | Tn_VAL_SET_CNF_MASK; + writeq(v, &timer->hpet_config); + local_irq_save(flags); + + /* + * NOTE: First we modify the hidden accumulator + * register supported by periodic-capable comparators. + * We never want to modify the (single) counter; that + * would affect all the comparators. The value written + * is the counter value when the first interrupt is due. + */ + m = read_counter(&hpet->hpet_mc); + write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); + /* + * Then we modify the comparator, indicating the period + * for subsequent interrupt. + */ + write_counter(t, &timer->hpet_compare); + } else { + local_irq_save(flags); + m = read_counter(&hpet->hpet_mc); + write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); + } + + if (devp->hd_flags & HPET_SHARED_IRQ) { + isr = 1 << (devp - devp->hd_hpets->hp_dev); + writel(isr, &hpet->hpet_isr); + } + writeq(g, &timer->hpet_config); + local_irq_restore(flags); + + return 0; +} + +/* converts Hz to number of timer ticks */ +static inline unsigned long hpet_time_div(struct hpets *hpets, + unsigned long dis) +{ + unsigned long long m; + + m = hpets->hp_tick_freq + (dis >> 1); + do_div(m, dis); + return (unsigned long)m; +} + +static int +hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, + struct hpet_info *info) +{ + struct hpet_timer __iomem *timer; + struct hpet __iomem *hpet; + struct hpets *hpetp; + int err; + unsigned long v; + + switch (cmd) { + case HPET_IE_OFF: + case HPET_INFO: + case HPET_EPI: + case HPET_DPI: + case HPET_IRQFREQ: + timer = devp->hd_timer; + hpet = devp->hd_hpet; + hpetp = devp->hd_hpets; + break; + case HPET_IE_ON: + return hpet_ioctl_ieon(devp); + default: + return -EINVAL; + } + + err = 0; + + switch (cmd) { + case HPET_IE_OFF: + if ((devp->hd_flags & HPET_IE) == 0) + break; + v = readq(&timer->hpet_config); + v &= ~Tn_INT_ENB_CNF_MASK; + writeq(v, &timer->hpet_config); + if (devp->hd_irq) { + free_irq(devp->hd_irq, devp); + devp->hd_irq = 0; + } + devp->hd_flags ^= HPET_IE; + break; + case HPET_INFO: + { + memset(info, 0, sizeof(*info)); + if (devp->hd_ireqfreq) + info->hi_ireqfreq = + hpet_time_div(hpetp, devp->hd_ireqfreq); + info->hi_flags = + readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK; + info->hi_hpet = hpetp->hp_which; + info->hi_timer = devp - hpetp->hp_dev; + break; + } + case HPET_EPI: + v = readq(&timer->hpet_config); + if ((v & Tn_PER_INT_CAP_MASK) == 0) { + err = -ENXIO; + break; + } + devp->hd_flags |= HPET_PERIODIC; + break; + case HPET_DPI: + v = readq(&timer->hpet_config); + if ((v & Tn_PER_INT_CAP_MASK) == 0) { + err = -ENXIO; + break; + } + if (devp->hd_flags & HPET_PERIODIC && + readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) { + v = readq(&timer->hpet_config); + v ^= Tn_TYPE_CNF_MASK; + writeq(v, &timer->hpet_config); + } + devp->hd_flags &= ~HPET_PERIODIC; + break; + case HPET_IRQFREQ: + if ((arg > hpet_max_freq) && + !capable(CAP_SYS_RESOURCE)) { + err = -EACCES; + break; + } + + if (!arg) { + err = -EINVAL; + break; + } + + devp->hd_ireqfreq = hpet_time_div(hpetp, arg); + } + + return err; +} + +static long +hpet_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct hpet_info info; + int err; + + mutex_lock(&hpet_mutex); + err = hpet_ioctl_common(file->private_data, cmd, arg, &info); + mutex_unlock(&hpet_mutex); + + if ((cmd == HPET_INFO) && !err && + (copy_to_user((void __user *)arg, &info, sizeof(info)))) + err = -EFAULT; + + return err; +} + +#ifdef CONFIG_COMPAT +struct compat_hpet_info { + compat_ulong_t hi_ireqfreq; /* Hz */ + compat_ulong_t hi_flags; /* information */ + unsigned short hi_hpet; + unsigned short hi_timer; +}; + +static long +hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct hpet_info info; + int err; + + mutex_lock(&hpet_mutex); + err = hpet_ioctl_common(file->private_data, cmd, arg, &info); + mutex_unlock(&hpet_mutex); + + if ((cmd == HPET_INFO) && !err) { + struct compat_hpet_info __user *u = compat_ptr(arg); + if (put_user(info.hi_ireqfreq, &u->hi_ireqfreq) || + put_user(info.hi_flags, &u->hi_flags) || + put_user(info.hi_hpet, &u->hi_hpet) || + put_user(info.hi_timer, &u->hi_timer)) + err = -EFAULT; + } + + return err; +} +#endif + +static const struct file_operations hpet_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .read = hpet_read, + .poll = hpet_poll, + .unlocked_ioctl = hpet_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = hpet_compat_ioctl, +#endif + .open = hpet_open, + .release = hpet_release, + .fasync = hpet_fasync, + .mmap = hpet_mmap, +}; + +static int hpet_is_known(struct hpet_data *hdp) +{ + struct hpets *hpetp; + + for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next) + if (hpetp->hp_hpet_phys == hdp->hd_phys_address) + return 1; + + return 0; +} + +static struct ctl_table hpet_table[] = { + { + .procname = "max-user-freq", + .data = &hpet_max_freq, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + {} +}; + +static struct ctl_table hpet_root[] = { + { + .procname = "hpet", + .maxlen = 0, + .mode = 0555, + .child = hpet_table, + }, + {} +}; + +static struct ctl_table dev_root[] = { + { + .procname = "dev", + .maxlen = 0, + .mode = 0555, + .child = hpet_root, + }, + {} +}; + +static struct ctl_table_header *sysctl_header; + +/* + * Adjustment for when arming the timer with + * initial conditions. That is, main counter + * ticks expired before interrupts are enabled. + */ +#define TICK_CALIBRATE (1000UL) + +static unsigned long __hpet_calibrate(struct hpets *hpetp) +{ + struct hpet_timer __iomem *timer = NULL; + unsigned long t, m, count, i, flags, start; + struct hpet_dev *devp; + int j; + struct hpet __iomem *hpet; + + for (j = 0, devp = hpetp->hp_dev; j < hpetp->hp_ntimer; j++, devp++) + if ((devp->hd_flags & HPET_OPEN) == 0) { + timer = devp->hd_timer; + break; + } + + if (!timer) + return 0; + + hpet = hpetp->hp_hpet; + t = read_counter(&timer->hpet_compare); + + i = 0; + count = hpet_time_div(hpetp, TICK_CALIBRATE); + + local_irq_save(flags); + + start = read_counter(&hpet->hpet_mc); + + do { + m = read_counter(&hpet->hpet_mc); + write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); + } while (i++, (m - start) < count); + + local_irq_restore(flags); + + return (m - start) / i; +} + +static unsigned long hpet_calibrate(struct hpets *hpetp) +{ + unsigned long ret = ~0UL; + unsigned long tmp; + + /* + * Try to calibrate until return value becomes stable small value. + * If SMI interruption occurs in calibration loop, the return value + * will be big. This avoids its impact. + */ + for ( ; ; ) { + tmp = __hpet_calibrate(hpetp); + if (ret <= tmp) + break; + ret = tmp; + } + + return ret; +} + +int hpet_alloc(struct hpet_data *hdp) +{ + u64 cap, mcfg; + struct hpet_dev *devp; + u32 i, ntimer; + struct hpets *hpetp; + size_t siz; + struct hpet __iomem *hpet; + static struct hpets *last; + unsigned long period; + unsigned long long temp; + u32 remainder; + + /* + * hpet_alloc can be called by platform dependent code. + * If platform dependent code has allocated the hpet that + * ACPI has also reported, then we catch it here. + */ + if (hpet_is_known(hdp)) { + printk(KERN_DEBUG "%s: duplicate HPET ignored\n", + __func__); + return 0; + } + + siz = sizeof(struct hpets) + ((hdp->hd_nirqs - 1) * + sizeof(struct hpet_dev)); + + hpetp = kzalloc(siz, GFP_KERNEL); + + if (!hpetp) + return -ENOMEM; + + hpetp->hp_which = hpet_nhpet++; + hpetp->hp_hpet = hdp->hd_address; + hpetp->hp_hpet_phys = hdp->hd_phys_address; + + hpetp->hp_ntimer = hdp->hd_nirqs; + + for (i = 0; i < hdp->hd_nirqs; i++) + hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i]; + + hpet = hpetp->hp_hpet; + + cap = readq(&hpet->hpet_cap); + + ntimer = ((cap & HPET_NUM_TIM_CAP_MASK) >> HPET_NUM_TIM_CAP_SHIFT) + 1; + + if (hpetp->hp_ntimer != ntimer) { + printk(KERN_WARNING "hpet: number irqs doesn't agree" + " with number of timers\n"); + kfree(hpetp); + return -ENODEV; + } + + if (last) + last->hp_next = hpetp; + else + hpets = hpetp; + + last = hpetp; + + period = (cap & HPET_COUNTER_CLK_PERIOD_MASK) >> + HPET_COUNTER_CLK_PERIOD_SHIFT; /* fs, 10^-15 */ + temp = 1000000000000000uLL; /* 10^15 femtoseconds per second */ + temp += period >> 1; /* round */ + do_div(temp, period); + hpetp->hp_tick_freq = temp; /* ticks per second */ + + printk(KERN_INFO "hpet%d: at MMIO 0x%lx, IRQ%s", + hpetp->hp_which, hdp->hd_phys_address, + hpetp->hp_ntimer > 1 ? "s" : ""); + for (i = 0; i < hpetp->hp_ntimer; i++) + printk(KERN_CONT "%s %d", i > 0 ? "," : "", hdp->hd_irq[i]); + printk(KERN_CONT "\n"); + + temp = hpetp->hp_tick_freq; + remainder = do_div(temp, 1000000); + printk(KERN_INFO + "hpet%u: %u comparators, %d-bit %u.%06u MHz counter\n", + hpetp->hp_which, hpetp->hp_ntimer, + cap & HPET_COUNTER_SIZE_MASK ? 64 : 32, + (unsigned) temp, remainder); + + mcfg = readq(&hpet->hpet_config); + if ((mcfg & HPET_ENABLE_CNF_MASK) == 0) { + write_counter(0L, &hpet->hpet_mc); + mcfg |= HPET_ENABLE_CNF_MASK; + writeq(mcfg, &hpet->hpet_config); + } + + for (i = 0, devp = hpetp->hp_dev; i < hpetp->hp_ntimer; i++, devp++) { + struct hpet_timer __iomem *timer; + + timer = &hpet->hpet_timers[devp - hpetp->hp_dev]; + + devp->hd_hpets = hpetp; + devp->hd_hpet = hpet; + devp->hd_timer = timer; + + /* + * If the timer was reserved by platform code, + * then make timer unavailable for opens. + */ + if (hdp->hd_state & (1 << i)) { + devp->hd_flags = HPET_OPEN; + continue; + } + + init_waitqueue_head(&devp->hd_waitqueue); + } + + hpetp->hp_delta = hpet_calibrate(hpetp); + +/* This clocksource driver currently only works on ia64 */ +#ifdef CONFIG_IA64 + if (!hpet_clocksource) { + hpet_mctr = (void __iomem *)&hpetp->hp_hpet->hpet_mc; + clocksource_hpet.archdata.fsys_mmio = hpet_mctr; + clocksource_register_hz(&clocksource_hpet, hpetp->hp_tick_freq); + hpetp->hp_clocksource = &clocksource_hpet; + hpet_clocksource = &clocksource_hpet; + } +#endif + + return 0; +} + +static acpi_status hpet_resources(struct acpi_resource *res, void *data) +{ + struct hpet_data *hdp; + acpi_status status; + struct acpi_resource_address64 addr; + + hdp = data; + + status = acpi_resource_to_address64(res, &addr); + + if (ACPI_SUCCESS(status)) { + hdp->hd_phys_address = addr.address.minimum; + hdp->hd_address = ioremap(addr.address.minimum, addr.address.address_length); + + if (hpet_is_known(hdp)) { + iounmap(hdp->hd_address); + return AE_ALREADY_EXISTS; + } + } else if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) { + struct acpi_resource_fixed_memory32 *fixmem32; + + fixmem32 = &res->data.fixed_memory32; + + hdp->hd_phys_address = fixmem32->address; + hdp->hd_address = ioremap(fixmem32->address, + HPET_RANGE_SIZE); + + if (hpet_is_known(hdp)) { + iounmap(hdp->hd_address); + return AE_ALREADY_EXISTS; + } + } else if (res->type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) { + struct acpi_resource_extended_irq *irqp; + int i, irq; + + irqp = &res->data.extended_irq; + + for (i = 0; i < irqp->interrupt_count; i++) { + if (hdp->hd_nirqs >= HPET_MAX_TIMERS) + break; + + irq = acpi_register_gsi(NULL, irqp->interrupts[i], + irqp->triggering, irqp->polarity); + if (irq < 0) + return AE_ERROR; + + hdp->hd_irq[hdp->hd_nirqs] = irq; + hdp->hd_nirqs++; + } + } + + return AE_OK; +} + +static int hpet_acpi_add(struct acpi_device *device) +{ + acpi_status result; + struct hpet_data data; + + memset(&data, 0, sizeof(data)); + + result = + acpi_walk_resources(device->handle, METHOD_NAME__CRS, + hpet_resources, &data); + + if (ACPI_FAILURE(result)) + return -ENODEV; + + if (!data.hd_address || !data.hd_nirqs) { + if (data.hd_address) + iounmap(data.hd_address); + printk("%s: no address or irqs in _CRS\n", __func__); + return -ENODEV; + } + + return hpet_alloc(&data); +} + +static int hpet_acpi_remove(struct acpi_device *device) +{ + /* XXX need to unregister clocksource, dealloc mem, etc */ + return -EINVAL; +} + +static const struct acpi_device_id hpet_device_ids[] = { + {"PNP0103", 0}, + {"", 0}, +}; +MODULE_DEVICE_TABLE(acpi, hpet_device_ids); + +static struct acpi_driver hpet_acpi_driver = { + .name = "hpet", + .ids = hpet_device_ids, + .ops = { + .add = hpet_acpi_add, + .remove = hpet_acpi_remove, + }, +}; + +static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops }; + +static int __init hpet_init(void) +{ + int result; + + result = misc_register(&hpet_misc); + if (result < 0) + return -ENODEV; + + sysctl_header = register_sysctl_table(dev_root); + + result = acpi_bus_register_driver(&hpet_acpi_driver); + if (result < 0) { + if (sysctl_header) + unregister_sysctl_table(sysctl_header); + misc_deregister(&hpet_misc); + return result; + } + + return 0; +} + +static void __exit hpet_exit(void) +{ + acpi_bus_unregister_driver(&hpet_acpi_driver); + + if (sysctl_header) + unregister_sysctl_table(sysctl_header); + misc_deregister(&hpet_misc); + + return; +} + +module_init(hpet_init); +module_exit(hpet_exit); +MODULE_AUTHOR("Bob Picco "); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/hw_random/Kconfig b/kernel/drivers/char/hw_random/Kconfig new file mode 100644 index 000000000..f48cf11c6 --- /dev/null +++ b/kernel/drivers/char/hw_random/Kconfig @@ -0,0 +1,377 @@ +# +# Hardware Random Number Generator (RNG) configuration +# + +menuconfig HW_RANDOM + tristate "Hardware Random Number Generator Core support" + default m + ---help--- + Hardware Random Number Generator Core infrastructure. + + To compile this driver as a module, choose M here: the + module will be called rng-core. This provides a device + that's usually called /dev/hw_random, and which exposes one + of possibly several hardware random number generators. + + These hardware random number generators do not feed directly + into the kernel's random number generator. That is usually + handled by the "rngd" daemon. Documentation/hw_random.txt + has more information. + + If unsure, say Y. + +if HW_RANDOM + +config HW_RANDOM_TIMERIOMEM + tristate "Timer IOMEM HW Random Number Generator support" + depends on HAS_IOMEM + ---help--- + This driver provides kernel-side support for a generic Random + Number Generator used by reading a 'dumb' iomem address that + is to be read no faster than, for example, once a second; + the default FPGA bitstream on the TS-7800 has such functionality. + + To compile this driver as a module, choose M here: the + module will be called timeriomem-rng. + + If unsure, say Y. + +config HW_RANDOM_INTEL + tristate "Intel HW Random Number Generator support" + depends on (X86 || IA64) && PCI + default HW_RANDOM + ---help--- + This driver provides kernel-side support for the Random Number + Generator hardware found on Intel i8xx-based motherboards. + + To compile this driver as a module, choose M here: the + module will be called intel-rng. + + If unsure, say Y. + +config HW_RANDOM_AMD + tristate "AMD HW Random Number Generator support" + depends on (X86 || PPC_MAPLE) && PCI + default HW_RANDOM + ---help--- + This driver provides kernel-side support for the Random Number + Generator hardware found on AMD 76x-based motherboards. + + To compile this driver as a module, choose M here: the + module will be called amd-rng. + + If unsure, say Y. + +config HW_RANDOM_ATMEL + tristate "Atmel Random Number Generator support" + depends on ARCH_AT91 && HAVE_CLK && OF + default HW_RANDOM + ---help--- + This driver provides kernel-side support for the Random Number + Generator hardware found on Atmel AT91 devices. + + To compile this driver as a module, choose M here: the + module will be called atmel-rng. + + If unsure, say Y. + +config HW_RANDOM_BCM63XX + tristate "Broadcom BCM63xx Random Number Generator support" + depends on BCM63XX + default HW_RANDOM + ---help--- + This driver provides kernel-side support for the Random Number + Generator hardware found on the Broadcom BCM63xx SoCs. + + To compile this driver as a module, choose M here: the + module will be called bcm63xx-rng + + If unusure, say Y. + +config HW_RANDOM_BCM2835 + tristate "Broadcom BCM2835 Random Number Generator support" + depends on ARCH_BCM2835 + default HW_RANDOM + ---help--- + This driver provides kernel-side support for the Random Number + Generator hardware found on the Broadcom BCM2835 SoCs. + + To compile this driver as a module, choose M here: the + module will be called bcm2835-rng + + If unsure, say Y. + +config HW_RANDOM_IPROC_RNG200 + tristate "Broadcom iProc RNG200 support" + depends on ARCH_BCM_IPROC + default HW_RANDOM + ---help--- + This driver provides kernel-side support for the RNG200 + hardware found on the Broadcom iProc SoCs. + + To compile this driver as a module, choose M here: the + module will be called iproc-rng200 + + If unsure, say Y. + +config HW_RANDOM_GEODE + tristate "AMD Geode HW Random Number Generator support" + depends on X86_32 && PCI + default HW_RANDOM + ---help--- + This driver provides kernel-side support for the Random Number + Generator hardware found on the AMD Geode LX. + + To compile this driver as a module, choose M here: the + module will be called geode-rng. + + If unsure, say Y. + +config HW_RANDOM_N2RNG + tristate "Niagara2 Random Number Generator support" + depends on SPARC64 + default HW_RANDOM + ---help--- + This driver provides kernel-side support for the Random Number + Generator hardware found on Niagara2 cpus. + + To compile this driver as a module, choose M here: the + module will be called n2-rng. + + If unsure, say Y. + +config HW_RANDOM_VIA + tristate "VIA HW Random Number Generator support" + depends on X86 + default HW_RANDOM + ---help--- + This driver provides kernel-side support for the Random Number + Generator hardware found on VIA based motherboards. + + To compile this driver as a module, choose M here: the + module will be called via-rng. + + If unsure, say Y. + +config HW_RANDOM_IXP4XX + tristate "Intel IXP4xx NPU HW Pseudo-Random Number Generator support" + depends on ARCH_IXP4XX + default HW_RANDOM + ---help--- + This driver provides kernel-side support for the Pseudo-Random + Number Generator hardware found on the Intel IXP45x/46x NPU. + + To compile this driver as a module, choose M here: the + module will be called ixp4xx-rng. + + If unsure, say Y. + +config HW_RANDOM_OMAP + tristate "OMAP Random Number Generator support" + depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS + default HW_RANDOM + ---help--- + This driver provides kernel-side support for the Random Number + Generator hardware found on OMAP16xx, OMAP2/3/4/5 and AM33xx/AM43xx + multimedia processors. + + To compile this driver as a module, choose M here: the + module will be called omap-rng. + + If unsure, say Y. + +config HW_RANDOM_OMAP3_ROM + tristate "OMAP3 ROM Random Number Generator support" + depends on ARCH_OMAP3 + default HW_RANDOM + ---help--- + This driver provides kernel-side support for the Random Number + Generator hardware found on OMAP34xx processors. + + To compile this driver as a module, choose M here: the + module will be called omap3-rom-rng. + + If unsure, say Y. + +config HW_RANDOM_OCTEON + tristate "Octeon Random Number Generator support" + depends on CAVIUM_OCTEON_SOC + default HW_RANDOM + ---help--- + This driver provides kernel-side support for the Random Number + Generator hardware found on Octeon processors. + + To compile this driver as a module, choose M here: the + module will be called octeon-rng. + + If unsure, say Y. + +config HW_RANDOM_PASEMI + tristate "PA Semi HW Random Number Generator support" + depends on PPC_PASEMI + default HW_RANDOM + ---help--- + This driver provides kernel-side support for the Random Number + Generator hardware found on PA Semi PWRficient SoCs. + + To compile this driver as a module, choose M here: the + module will be called pasemi-rng. + + If unsure, say Y. + +config HW_RANDOM_VIRTIO + tristate "VirtIO Random Number Generator support" + depends on VIRTIO + ---help--- + This driver provides kernel-side support for the virtual Random Number + Generator hardware. + + To compile this driver as a module, choose M here: the + module will be called virtio-rng. If unsure, say N. + +config HW_RANDOM_TX4939 + tristate "TX4939 Random Number Generator support" + depends on SOC_TX4939 + default HW_RANDOM + ---help--- + This driver provides kernel-side support for the Random Number + Generator hardware found on TX4939 SoC. + + To compile this driver as a module, choose M here: the + module will be called tx4939-rng. + + If unsure, say Y. + +config HW_RANDOM_MXC_RNGA + tristate "Freescale i.MX RNGA Random Number Generator" + depends on ARCH_HAS_RNGA + default HW_RANDOM + ---help--- + This driver provides kernel-side support for the Random Number + Generator hardware found on Freescale i.MX processors. + + To compile this driver as a module, choose M here: the + module will be called mxc-rnga. + + If unsure, say Y. + +config HW_RANDOM_NOMADIK + tristate "ST-Ericsson Nomadik Random Number Generator support" + depends on ARCH_NOMADIK + default HW_RANDOM + ---help--- + This driver provides kernel-side support for the Random Number + Generator hardware found on ST-Ericsson SoCs (8815 and 8500). + + To compile this driver as a module, choose M here: the + module will be called nomadik-rng. + + If unsure, say Y. + +config HW_RANDOM_PPC4XX + tristate "PowerPC 4xx generic true random number generator support" + depends on PPC && 4xx + default HW_RANDOM + ---help--- + This driver provides the kernel-side support for the TRNG hardware + found in the security function of some PowerPC 4xx SoCs. + + To compile this driver as a module, choose M here: the + module will be called ppc4xx-rng. + + If unsure, say N. + +config HW_RANDOM_PSERIES + tristate "pSeries HW Random Number Generator support" + depends on PPC64 && IBMVIO + default HW_RANDOM + ---help--- + This driver provides kernel-side support for the Random Number + Generator hardware found on POWER7+ machines and above + + To compile this driver as a module, choose M here: the + module will be called pseries-rng. + + If unsure, say Y. + +config HW_RANDOM_POWERNV + tristate "PowerNV Random Number Generator support" + depends on PPC_POWERNV + default HW_RANDOM + ---help--- + This is the driver for Random Number Generator hardware found + in POWER7+ and above machines for PowerNV platform. + + To compile this driver as a module, choose M here: the + module will be called powernv-rng. + + If unsure, say Y. + +config HW_RANDOM_EXYNOS + tristate "EXYNOS HW random number generator support" + depends on ARCH_EXYNOS + default HW_RANDOM + ---help--- + This driver provides kernel-side support for the Random Number + Generator hardware found on EXYNOS SOCs. + + To compile this driver as a module, choose M here: the + module will be called exynos-rng. + + If unsure, say Y. + +config HW_RANDOM_TPM + tristate "TPM HW Random Number Generator support" + depends on TCG_TPM + default HW_RANDOM + ---help--- + This driver provides kernel-side support for the Random Number + Generator in the Trusted Platform Module + + To compile this driver as a module, choose M here: the + module will be called tpm-rng. + + If unsure, say Y. + +config HW_RANDOM_MSM + tristate "Qualcomm SoCs Random Number Generator support" + depends on HW_RANDOM && ARCH_QCOM + default HW_RANDOM + ---help--- + This driver provides kernel-side support for the Random Number + Generator hardware found on Qualcomm SoCs. + + To compile this driver as a module, choose M here. the + module will be called msm-rng. + + If unsure, say Y. + +config HW_RANDOM_XGENE + tristate "APM X-Gene True Random Number Generator (TRNG) support" + depends on HW_RANDOM && ARCH_XGENE + default HW_RANDOM + ---help--- + This driver provides kernel-side support for the Random Number + Generator hardware found on APM X-Gene SoC. + + To compile this driver as a module, choose M here: the + module will be called xgene_rng. + + If unsure, say Y. + +endif # HW_RANDOM + +config UML_RANDOM + depends on UML + tristate "Hardware random number generator" + help + This option enables UML's "hardware" random number generator. It + attaches itself to the host's /dev/random, supplying as much entropy + as the host has, rather than the small amount the UML gets from its + own drivers. It registers itself as a standard hardware random number + generator, major 10, minor 183, and the canonical device name is + /dev/hwrng. + The way to make use of this is to install the rng-tools package + (check your distro, or download from + http://sourceforge.net/projects/gkernel/). rngd periodically reads + /dev/hwrng and injects the entropy into /dev/random. diff --git a/kernel/drivers/char/hw_random/Makefile b/kernel/drivers/char/hw_random/Makefile new file mode 100644 index 000000000..055bb0151 --- /dev/null +++ b/kernel/drivers/char/hw_random/Makefile @@ -0,0 +1,33 @@ +# +# Makefile for HW Random Number Generator (RNG) device drivers. +# + +obj-$(CONFIG_HW_RANDOM) += rng-core.o +rng-core-y := core.o +obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o +obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o +obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o +obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o +obj-$(CONFIG_HW_RANDOM_BCM63XX) += bcm63xx-rng.o +obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o +obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o +n2-rng-y := n2-drv.o n2-asm.o +obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o +obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o +obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o +obj-$(CONFIG_HW_RANDOM_OMAP3_ROM) += omap3-rom-rng.o +obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o +obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o +obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o +obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o +obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o +obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o +obj-$(CONFIG_HW_RANDOM_PPC4XX) += ppc4xx-rng.o +obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o +obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o +obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o +obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o +obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o +obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o +obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o +obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o diff --git a/kernel/drivers/char/hw_random/amd-rng.c b/kernel/drivers/char/hw_random/amd-rng.c new file mode 100644 index 000000000..48f6a83cd --- /dev/null +++ b/kernel/drivers/char/hw_random/amd-rng.c @@ -0,0 +1,169 @@ +/* + * RNG driver for AMD RNGs + * + * Copyright 2005 (c) MontaVista Software, Inc. + * + * with the majority of the code coming from: + * + * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG) + * (c) Copyright 2003 Red Hat Inc + * + * derived from + * + * Hardware driver for the AMD 768 Random Number Generator (RNG) + * (c) Copyright 2001 Red Hat Inc + * + * derived from + * + * Hardware driver for Intel i810 Random Number Generator (RNG) + * Copyright 2000,2001 Jeff Garzik + * Copyright 2000,2001 Philipp Rumpf + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include + + +#define PFX KBUILD_MODNAME ": " + + +/* + * Data for PCI driver interface + * + * This data only exists for exporting the supported + * PCI ids via MODULE_DEVICE_TABLE. We do not actually + * register a pci_driver, because someone else might one day + * want to register another driver on the same PCI id. + */ +static const struct pci_device_id pci_tbl[] = { + { PCI_VDEVICE(AMD, 0x7443), 0, }, + { PCI_VDEVICE(AMD, 0x746b), 0, }, + { 0, }, /* terminate list */ +}; +MODULE_DEVICE_TABLE(pci, pci_tbl); + +static struct pci_dev *amd_pdev; + + +static int amd_rng_data_present(struct hwrng *rng, int wait) +{ + u32 pmbase = (u32)rng->priv; + int data, i; + + for (i = 0; i < 20; i++) { + data = !!(inl(pmbase + 0xF4) & 1); + if (data || !wait) + break; + udelay(10); + } + return data; +} + +static int amd_rng_data_read(struct hwrng *rng, u32 *data) +{ + u32 pmbase = (u32)rng->priv; + + *data = inl(pmbase + 0xF0); + + return 4; +} + +static int amd_rng_init(struct hwrng *rng) +{ + u8 rnen; + + pci_read_config_byte(amd_pdev, 0x40, &rnen); + rnen |= (1 << 7); /* RNG on */ + pci_write_config_byte(amd_pdev, 0x40, rnen); + + pci_read_config_byte(amd_pdev, 0x41, &rnen); + rnen |= (1 << 7); /* PMIO enable */ + pci_write_config_byte(amd_pdev, 0x41, rnen); + + return 0; +} + +static void amd_rng_cleanup(struct hwrng *rng) +{ + u8 rnen; + + pci_read_config_byte(amd_pdev, 0x40, &rnen); + rnen &= ~(1 << 7); /* RNG off */ + pci_write_config_byte(amd_pdev, 0x40, rnen); +} + + +static struct hwrng amd_rng = { + .name = "amd", + .init = amd_rng_init, + .cleanup = amd_rng_cleanup, + .data_present = amd_rng_data_present, + .data_read = amd_rng_data_read, +}; + + +static int __init mod_init(void) +{ + int err = -ENODEV; + struct pci_dev *pdev = NULL; + const struct pci_device_id *ent; + u32 pmbase; + + for_each_pci_dev(pdev) { + ent = pci_match_id(pci_tbl, pdev); + if (ent) + goto found; + } + /* Device not found. */ + goto out; + +found: + err = pci_read_config_dword(pdev, 0x58, &pmbase); + if (err) + goto out; + err = -EIO; + pmbase &= 0x0000FF00; + if (pmbase == 0) + goto out; + if (!request_region(pmbase + 0xF0, 8, "AMD HWRNG")) { + dev_err(&pdev->dev, "AMD HWRNG region 0x%x already in use!\n", + pmbase + 0xF0); + err = -EBUSY; + goto out; + } + amd_rng.priv = (unsigned long)pmbase; + amd_pdev = pdev; + + pr_info("AMD768 RNG detected\n"); + err = hwrng_register(&amd_rng); + if (err) { + pr_err(PFX "RNG registering failed (%d)\n", + err); + release_region(pmbase + 0xF0, 8); + goto out; + } +out: + return err; +} + +static void __exit mod_exit(void) +{ + u32 pmbase = (unsigned long)amd_rng.priv; + release_region(pmbase + 0xF0, 8); + hwrng_unregister(&amd_rng); +} + +module_init(mod_init); +module_exit(mod_exit); + +MODULE_AUTHOR("The Linux Kernel team"); +MODULE_DESCRIPTION("H/W RNG driver for AMD chipsets"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/hw_random/atmel-rng.c b/kernel/drivers/char/hw_random/atmel-rng.c new file mode 100644 index 000000000..0fcc9e69a --- /dev/null +++ b/kernel/drivers/char/hw_random/atmel-rng.c @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2011 Peter Korsgaard + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define TRNG_CR 0x00 +#define TRNG_ISR 0x1c +#define TRNG_ODATA 0x50 + +#define TRNG_KEY 0x524e4700 /* RNG */ + +struct atmel_trng { + struct clk *clk; + void __iomem *base; + struct hwrng rng; +}; + +static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max, + bool wait) +{ + struct atmel_trng *trng = container_of(rng, struct atmel_trng, rng); + u32 *data = buf; + + /* data ready? */ + if (readl(trng->base + TRNG_ISR) & 1) { + *data = readl(trng->base + TRNG_ODATA); + /* + ensure data ready is only set again AFTER the next data + word is ready in case it got set between checking ISR + and reading ODATA, so we don't risk re-reading the + same word + */ + readl(trng->base + TRNG_ISR); + return 4; + } else + return 0; +} + +static int atmel_trng_probe(struct platform_device *pdev) +{ + struct atmel_trng *trng; + struct resource *res; + int ret; + + trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL); + if (!trng) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + trng->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(trng->base)) + return PTR_ERR(trng->base); + + trng->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(trng->clk)) + return PTR_ERR(trng->clk); + + ret = clk_prepare_enable(trng->clk); + if (ret) + return ret; + + writel(TRNG_KEY | 1, trng->base + TRNG_CR); + trng->rng.name = pdev->name; + trng->rng.read = atmel_trng_read; + + ret = hwrng_register(&trng->rng); + if (ret) + goto err_register; + + platform_set_drvdata(pdev, trng); + + return 0; + +err_register: + clk_disable(trng->clk); + return ret; +} + +static int atmel_trng_remove(struct platform_device *pdev) +{ + struct atmel_trng *trng = platform_get_drvdata(pdev); + + hwrng_unregister(&trng->rng); + + writel(TRNG_KEY, trng->base + TRNG_CR); + clk_disable_unprepare(trng->clk); + + return 0; +} + +#ifdef CONFIG_PM +static int atmel_trng_suspend(struct device *dev) +{ + struct atmel_trng *trng = dev_get_drvdata(dev); + + clk_disable_unprepare(trng->clk); + + return 0; +} + +static int atmel_trng_resume(struct device *dev) +{ + struct atmel_trng *trng = dev_get_drvdata(dev); + + return clk_prepare_enable(trng->clk); +} + +static const struct dev_pm_ops atmel_trng_pm_ops = { + .suspend = atmel_trng_suspend, + .resume = atmel_trng_resume, +}; +#endif /* CONFIG_PM */ + +static const struct of_device_id atmel_trng_dt_ids[] = { + { .compatible = "atmel,at91sam9g45-trng" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, atmel_trng_dt_ids); + +static struct platform_driver atmel_trng_driver = { + .probe = atmel_trng_probe, + .remove = atmel_trng_remove, + .driver = { + .name = "atmel-trng", +#ifdef CONFIG_PM + .pm = &atmel_trng_pm_ops, +#endif /* CONFIG_PM */ + .of_match_table = atmel_trng_dt_ids, + }, +}; + +module_platform_driver(atmel_trng_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Peter Korsgaard "); +MODULE_DESCRIPTION("Atmel true random number generator driver"); diff --git a/kernel/drivers/char/hw_random/bcm2835-rng.c b/kernel/drivers/char/hw_random/bcm2835-rng.c new file mode 100644 index 000000000..7192ec25f --- /dev/null +++ b/kernel/drivers/char/hw_random/bcm2835-rng.c @@ -0,0 +1,111 @@ +/** + * Copyright (c) 2010-2012 Broadcom. All rights reserved. + * Copyright (c) 2013 Lubomir Rintel + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License ("GPL") + * version 2, as published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define RNG_CTRL 0x0 +#define RNG_STATUS 0x4 +#define RNG_DATA 0x8 + +/* enable rng */ +#define RNG_RBGEN 0x1 + +/* the initial numbers generated are "less random" so will be discarded */ +#define RNG_WARMUP_COUNT 0x40000 + +static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max, + bool wait) +{ + void __iomem *rng_base = (void __iomem *)rng->priv; + + while ((__raw_readl(rng_base + RNG_STATUS) >> 24) == 0) { + if (!wait) + return 0; + cpu_relax(); + } + + *(u32 *)buf = __raw_readl(rng_base + RNG_DATA); + return sizeof(u32); +} + +static struct hwrng bcm2835_rng_ops = { + .name = "bcm2835", + .read = bcm2835_rng_read, +}; + +static int bcm2835_rng_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + void __iomem *rng_base; + int err; + + /* map peripheral */ + rng_base = of_iomap(np, 0); + if (!rng_base) { + dev_err(dev, "failed to remap rng regs"); + return -ENODEV; + } + bcm2835_rng_ops.priv = (unsigned long)rng_base; + + /* set warm-up count & enable */ + __raw_writel(RNG_WARMUP_COUNT, rng_base + RNG_STATUS); + __raw_writel(RNG_RBGEN, rng_base + RNG_CTRL); + + /* register driver */ + err = hwrng_register(&bcm2835_rng_ops); + if (err) { + dev_err(dev, "hwrng registration failed\n"); + iounmap(rng_base); + } else + dev_info(dev, "hwrng registered\n"); + + return err; +} + +static int bcm2835_rng_remove(struct platform_device *pdev) +{ + void __iomem *rng_base = (void __iomem *)bcm2835_rng_ops.priv; + + /* disable rng hardware */ + __raw_writel(0, rng_base + RNG_CTRL); + + /* unregister driver */ + hwrng_unregister(&bcm2835_rng_ops); + iounmap(rng_base); + + return 0; +} + +static const struct of_device_id bcm2835_rng_of_match[] = { + { .compatible = "brcm,bcm2835-rng", }, + {}, +}; +MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match); + +static struct platform_driver bcm2835_rng_driver = { + .driver = { + .name = "bcm2835-rng", + .of_match_table = bcm2835_rng_of_match, + }, + .probe = bcm2835_rng_probe, + .remove = bcm2835_rng_remove, +}; +module_platform_driver(bcm2835_rng_driver); + +MODULE_AUTHOR("Lubomir Rintel "); +MODULE_DESCRIPTION("BCM2835 Random Number Generator (RNG) driver"); +MODULE_LICENSE("GPL v2"); diff --git a/kernel/drivers/char/hw_random/bcm63xx-rng.c b/kernel/drivers/char/hw_random/bcm63xx-rng.c new file mode 100644 index 000000000..4b31f1387 --- /dev/null +++ b/kernel/drivers/char/hw_random/bcm63xx-rng.c @@ -0,0 +1,146 @@ +/* + * Broadcom BCM63xx Random Number Generator support + * + * Copyright (C) 2011, Florian Fainelli + * Copyright (C) 2009, Broadcom Corporation + * + */ +#include +#include +#include +#include +#include +#include +#include + +#define RNG_CTRL 0x00 +#define RNG_EN (1 << 0) + +#define RNG_STAT 0x04 +#define RNG_AVAIL_MASK (0xff000000) + +#define RNG_DATA 0x08 +#define RNG_THRES 0x0c +#define RNG_MASK 0x10 + +struct bcm63xx_rng_priv { + struct hwrng rng; + struct clk *clk; + void __iomem *regs; +}; + +#define to_rng_priv(rng) container_of(rng, struct bcm63xx_rng_priv, rng) + +static int bcm63xx_rng_init(struct hwrng *rng) +{ + struct bcm63xx_rng_priv *priv = to_rng_priv(rng); + u32 val; + int error; + + error = clk_prepare_enable(priv->clk); + if (error) + return error; + + val = __raw_readl(priv->regs + RNG_CTRL); + val |= RNG_EN; + __raw_writel(val, priv->regs + RNG_CTRL); + + return 0; +} + +static void bcm63xx_rng_cleanup(struct hwrng *rng) +{ + struct bcm63xx_rng_priv *priv = to_rng_priv(rng); + u32 val; + + val = __raw_readl(priv->regs + RNG_CTRL); + val &= ~RNG_EN; + __raw_writel(val, priv->regs + RNG_CTRL); + + clk_disable_unprepare(priv->clk); +} + +static int bcm63xx_rng_data_present(struct hwrng *rng, int wait) +{ + struct bcm63xx_rng_priv *priv = to_rng_priv(rng); + + return __raw_readl(priv->regs + RNG_STAT) & RNG_AVAIL_MASK; +} + +static int bcm63xx_rng_data_read(struct hwrng *rng, u32 *data) +{ + struct bcm63xx_rng_priv *priv = to_rng_priv(rng); + + *data = __raw_readl(priv->regs + RNG_DATA); + + return 4; +} + +static int bcm63xx_rng_probe(struct platform_device *pdev) +{ + struct resource *r; + struct clk *clk; + int ret; + struct bcm63xx_rng_priv *priv; + struct hwrng *rng; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + dev_err(&pdev->dev, "no iomem resource\n"); + return -ENXIO; + } + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->rng.name = pdev->name; + priv->rng.init = bcm63xx_rng_init; + priv->rng.cleanup = bcm63xx_rng_cleanup; + priv->rng.data_present = bcm63xx_rng_data_present; + priv->rng.data_read = bcm63xx_rng_data_read; + + priv->clk = devm_clk_get(&pdev->dev, "ipsec"); + if (IS_ERR(priv->clk)) { + ret = PTR_ERR(priv->clk); + dev_err(&pdev->dev, "no clock for device: %d\n", ret); + return ret; + } + + if (!devm_request_mem_region(&pdev->dev, r->start, + resource_size(r), pdev->name)) { + dev_err(&pdev->dev, "request mem failed"); + return -EBUSY; + } + + priv->regs = devm_ioremap_nocache(&pdev->dev, r->start, + resource_size(r)); + if (!priv->regs) { + dev_err(&pdev->dev, "ioremap failed"); + return -ENOMEM; + } + + ret = devm_hwrng_register(&pdev->dev, &priv->rng); + if (ret) { + dev_err(&pdev->dev, "failed to register rng device: %d\n", + ret); + return ret; + } + + dev_info(&pdev->dev, "registered RNG driver\n"); + + return 0; +} + +static struct platform_driver bcm63xx_rng_driver = { + .probe = bcm63xx_rng_probe, + .driver = { + .name = "bcm63xx-rng", + }, +}; + +module_platform_driver(bcm63xx_rng_driver); + +MODULE_AUTHOR("Florian Fainelli "); +MODULE_DESCRIPTION("Broadcom BCM63xx RNG driver"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/hw_random/core.c b/kernel/drivers/char/hw_random/core.c new file mode 100644 index 000000000..da8faf785 --- /dev/null +++ b/kernel/drivers/char/hw_random/core.c @@ -0,0 +1,590 @@ +/* + Added support for the AMD Geode LX RNG + (c) Copyright 2004-2005 Advanced Micro Devices, Inc. + + derived from + + Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG) + (c) Copyright 2003 Red Hat Inc + + derived from + + Hardware driver for the AMD 768 Random Number Generator (RNG) + (c) Copyright 2001 Red Hat Inc + + derived from + + Hardware driver for Intel i810 Random Number Generator (RNG) + Copyright 2000,2001 Jeff Garzik + Copyright 2000,2001 Philipp Rumpf + + Added generic RNG API + Copyright 2006 Michael Buesch + Copyright 2005 (c) MontaVista Software, Inc. + + Please read Documentation/hw_random.txt for details on use. + + ---------------------------------------------------------- + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define RNG_MODULE_NAME "hw_random" +#define PFX RNG_MODULE_NAME ": " +#define RNG_MISCDEV_MINOR 183 /* official */ + + +static struct hwrng *current_rng; +static struct task_struct *hwrng_fill; +static LIST_HEAD(rng_list); +/* Protects rng_list and current_rng */ +static DEFINE_MUTEX(rng_mutex); +/* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */ +static DEFINE_MUTEX(reading_mutex); +static int data_avail; +static u8 *rng_buffer, *rng_fillbuf; +static unsigned short current_quality; +static unsigned short default_quality; /* = 0; default to "off" */ + +module_param(current_quality, ushort, 0644); +MODULE_PARM_DESC(current_quality, + "current hwrng entropy estimation per mill"); +module_param(default_quality, ushort, 0644); +MODULE_PARM_DESC(default_quality, + "default entropy content of hwrng per mill"); + +static void drop_current_rng(void); +static int hwrng_init(struct hwrng *rng); +static void start_khwrngd(void); + +static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, + int wait); + +static size_t rng_buffer_size(void) +{ + return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES; +} + +static void add_early_randomness(struct hwrng *rng) +{ + unsigned char bytes[16]; + int bytes_read; + + mutex_lock(&reading_mutex); + bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1); + mutex_unlock(&reading_mutex); + if (bytes_read > 0) + add_device_randomness(bytes, bytes_read); +} + +static inline void cleanup_rng(struct kref *kref) +{ + struct hwrng *rng = container_of(kref, struct hwrng, ref); + + if (rng->cleanup) + rng->cleanup(rng); + + complete(&rng->cleanup_done); +} + +static int set_current_rng(struct hwrng *rng) +{ + int err; + + BUG_ON(!mutex_is_locked(&rng_mutex)); + + err = hwrng_init(rng); + if (err) + return err; + + drop_current_rng(); + current_rng = rng; + + return 0; +} + +static void drop_current_rng(void) +{ + BUG_ON(!mutex_is_locked(&rng_mutex)); + if (!current_rng) + return; + + /* decrease last reference for triggering the cleanup */ + kref_put(¤t_rng->ref, cleanup_rng); + current_rng = NULL; +} + +/* Returns ERR_PTR(), NULL or refcounted hwrng */ +static struct hwrng *get_current_rng(void) +{ + struct hwrng *rng; + + if (mutex_lock_interruptible(&rng_mutex)) + return ERR_PTR(-ERESTARTSYS); + + rng = current_rng; + if (rng) + kref_get(&rng->ref); + + mutex_unlock(&rng_mutex); + return rng; +} + +static void put_rng(struct hwrng *rng) +{ + /* + * Hold rng_mutex here so we serialize in case they set_current_rng + * on rng again immediately. + */ + mutex_lock(&rng_mutex); + if (rng) + kref_put(&rng->ref, cleanup_rng); + mutex_unlock(&rng_mutex); +} + +static int hwrng_init(struct hwrng *rng) +{ + if (kref_get_unless_zero(&rng->ref)) + goto skip_init; + + if (rng->init) { + int ret; + + ret = rng->init(rng); + if (ret) + return ret; + } + + kref_init(&rng->ref); + reinit_completion(&rng->cleanup_done); + +skip_init: + add_early_randomness(rng); + + current_quality = rng->quality ? : default_quality; + if (current_quality > 1024) + current_quality = 1024; + + if (current_quality == 0 && hwrng_fill) + kthread_stop(hwrng_fill); + if (current_quality > 0 && !hwrng_fill) + start_khwrngd(); + + return 0; +} + +static int rng_dev_open(struct inode *inode, struct file *filp) +{ + /* enforce read-only access to this chrdev */ + if ((filp->f_mode & FMODE_READ) == 0) + return -EINVAL; + if (filp->f_mode & FMODE_WRITE) + return -EINVAL; + return 0; +} + +static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, + int wait) { + int present; + + BUG_ON(!mutex_is_locked(&reading_mutex)); + if (rng->read) + return rng->read(rng, (void *)buffer, size, wait); + + if (rng->data_present) + present = rng->data_present(rng, wait); + else + present = 1; + + if (present) + return rng->data_read(rng, (u32 *)buffer); + + return 0; +} + +static ssize_t rng_dev_read(struct file *filp, char __user *buf, + size_t size, loff_t *offp) +{ + ssize_t ret = 0; + int err = 0; + int bytes_read, len; + struct hwrng *rng; + + while (size) { + rng = get_current_rng(); + if (IS_ERR(rng)) { + err = PTR_ERR(rng); + goto out; + } + if (!rng) { + err = -ENODEV; + goto out; + } + + mutex_lock(&reading_mutex); + if (!data_avail) { + bytes_read = rng_get_data(rng, rng_buffer, + rng_buffer_size(), + !(filp->f_flags & O_NONBLOCK)); + if (bytes_read < 0) { + err = bytes_read; + goto out_unlock_reading; + } + data_avail = bytes_read; + } + + if (!data_avail) { + if (filp->f_flags & O_NONBLOCK) { + err = -EAGAIN; + goto out_unlock_reading; + } + } else { + len = data_avail; + if (len > size) + len = size; + + data_avail -= len; + + if (copy_to_user(buf + ret, rng_buffer + data_avail, + len)) { + err = -EFAULT; + goto out_unlock_reading; + } + + size -= len; + ret += len; + } + + mutex_unlock(&reading_mutex); + put_rng(rng); + + if (need_resched()) + schedule_timeout_interruptible(1); + + if (signal_pending(current)) { + err = -ERESTARTSYS; + goto out; + } + } +out: + return ret ? : err; + +out_unlock_reading: + mutex_unlock(&reading_mutex); + put_rng(rng); + goto out; +} + + +static const struct file_operations rng_chrdev_ops = { + .owner = THIS_MODULE, + .open = rng_dev_open, + .read = rng_dev_read, + .llseek = noop_llseek, +}; + +static const struct attribute_group *rng_dev_groups[]; + +static struct miscdevice rng_miscdev = { + .minor = RNG_MISCDEV_MINOR, + .name = RNG_MODULE_NAME, + .nodename = "hwrng", + .fops = &rng_chrdev_ops, + .groups = rng_dev_groups, +}; + + +static ssize_t hwrng_attr_current_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + int err; + struct hwrng *rng; + + err = mutex_lock_interruptible(&rng_mutex); + if (err) + return -ERESTARTSYS; + err = -ENODEV; + list_for_each_entry(rng, &rng_list, list) { + if (strcmp(rng->name, buf) == 0) { + err = 0; + if (rng != current_rng) + err = set_current_rng(rng); + break; + } + } + mutex_unlock(&rng_mutex); + + return err ? : len; +} + +static ssize_t hwrng_attr_current_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + ssize_t ret; + struct hwrng *rng; + + rng = get_current_rng(); + if (IS_ERR(rng)) + return PTR_ERR(rng); + + ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none"); + put_rng(rng); + + return ret; +} + +static ssize_t hwrng_attr_available_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int err; + struct hwrng *rng; + + err = mutex_lock_interruptible(&rng_mutex); + if (err) + return -ERESTARTSYS; + buf[0] = '\0'; + list_for_each_entry(rng, &rng_list, list) { + strlcat(buf, rng->name, PAGE_SIZE); + strlcat(buf, " ", PAGE_SIZE); + } + strlcat(buf, "\n", PAGE_SIZE); + mutex_unlock(&rng_mutex); + + return strlen(buf); +} + +static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR, + hwrng_attr_current_show, + hwrng_attr_current_store); +static DEVICE_ATTR(rng_available, S_IRUGO, + hwrng_attr_available_show, + NULL); + +static struct attribute *rng_dev_attrs[] = { + &dev_attr_rng_current.attr, + &dev_attr_rng_available.attr, + NULL +}; + +ATTRIBUTE_GROUPS(rng_dev); + +static void __exit unregister_miscdev(void) +{ + misc_deregister(&rng_miscdev); +} + +static int __init register_miscdev(void) +{ + return misc_register(&rng_miscdev); +} + +static int hwrng_fillfn(void *unused) +{ + long rc; + + while (!kthread_should_stop()) { + struct hwrng *rng; + + rng = get_current_rng(); + if (IS_ERR(rng) || !rng) + break; + mutex_lock(&reading_mutex); + rc = rng_get_data(rng, rng_fillbuf, + rng_buffer_size(), 1); + mutex_unlock(&reading_mutex); + put_rng(rng); + if (rc <= 0) { + pr_warn("hwrng: no data available\n"); + msleep_interruptible(10000); + continue; + } + /* Outside lock, sure, but y'know: randomness. */ + add_hwgenerator_randomness((void *)rng_fillbuf, rc, + rc * current_quality * 8 >> 10); + } + hwrng_fill = NULL; + return 0; +} + +static void start_khwrngd(void) +{ + hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng"); + if (hwrng_fill == ERR_PTR(-ENOMEM)) { + pr_err("hwrng_fill thread creation failed"); + hwrng_fill = NULL; + } +} + +int hwrng_register(struct hwrng *rng) +{ + int err = -EINVAL; + struct hwrng *old_rng, *tmp; + + if (rng->name == NULL || + (rng->data_read == NULL && rng->read == NULL)) + goto out; + + mutex_lock(&rng_mutex); + + /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */ + err = -ENOMEM; + if (!rng_buffer) { + rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL); + if (!rng_buffer) + goto out_unlock; + } + if (!rng_fillbuf) { + rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL); + if (!rng_fillbuf) { + kfree(rng_buffer); + goto out_unlock; + } + } + + /* Must not register two RNGs with the same name. */ + err = -EEXIST; + list_for_each_entry(tmp, &rng_list, list) { + if (strcmp(tmp->name, rng->name) == 0) + goto out_unlock; + } + + init_completion(&rng->cleanup_done); + complete(&rng->cleanup_done); + + old_rng = current_rng; + err = 0; + if (!old_rng) { + err = set_current_rng(rng); + if (err) + goto out_unlock; + } + list_add_tail(&rng->list, &rng_list); + + if (old_rng && !rng->init) { + /* + * Use a new device's input to add some randomness to + * the system. If this rng device isn't going to be + * used right away, its init function hasn't been + * called yet; so only use the randomness from devices + * that don't need an init callback. + */ + add_early_randomness(rng); + } + +out_unlock: + mutex_unlock(&rng_mutex); +out: + return err; +} +EXPORT_SYMBOL_GPL(hwrng_register); + +void hwrng_unregister(struct hwrng *rng) +{ + mutex_lock(&rng_mutex); + + list_del(&rng->list); + if (current_rng == rng) { + drop_current_rng(); + if (!list_empty(&rng_list)) { + struct hwrng *tail; + + tail = list_entry(rng_list.prev, struct hwrng, list); + + set_current_rng(tail); + } + } + + if (list_empty(&rng_list)) { + mutex_unlock(&rng_mutex); + if (hwrng_fill) + kthread_stop(hwrng_fill); + } else + mutex_unlock(&rng_mutex); + + wait_for_completion(&rng->cleanup_done); +} +EXPORT_SYMBOL_GPL(hwrng_unregister); + +static void devm_hwrng_release(struct device *dev, void *res) +{ + hwrng_unregister(*(struct hwrng **)res); +} + +static int devm_hwrng_match(struct device *dev, void *res, void *data) +{ + struct hwrng **r = res; + + if (WARN_ON(!r || !*r)) + return 0; + + return *r == data; +} + +int devm_hwrng_register(struct device *dev, struct hwrng *rng) +{ + struct hwrng **ptr; + int error; + + ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + error = hwrng_register(rng); + if (error) { + devres_free(ptr); + return error; + } + + *ptr = rng; + devres_add(dev, ptr); + return 0; +} +EXPORT_SYMBOL_GPL(devm_hwrng_register); + +void devm_hwrng_unregister(struct device *dev, struct hwrng *rng) +{ + devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng); +} +EXPORT_SYMBOL_GPL(devm_hwrng_unregister); + +static int __init hwrng_modinit(void) +{ + return register_miscdev(); +} + +static void __exit hwrng_modexit(void) +{ + mutex_lock(&rng_mutex); + BUG_ON(current_rng); + kfree(rng_buffer); + kfree(rng_fillbuf); + mutex_unlock(&rng_mutex); + + unregister_miscdev(); +} + +module_init(hwrng_modinit); +module_exit(hwrng_modexit); + +MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/hw_random/exynos-rng.c b/kernel/drivers/char/hw_random/exynos-rng.c new file mode 100644 index 000000000..dc4701fd8 --- /dev/null +++ b/kernel/drivers/char/hw_random/exynos-rng.c @@ -0,0 +1,172 @@ +/* + * exynos-rng.c - Random Number Generator driver for the exynos + * + * Copyright (C) 2012 Samsung Electronics + * Jonghwa Lee + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define EXYNOS_PRNG_STATUS_OFFSET 0x10 +#define EXYNOS_PRNG_SEED_OFFSET 0x140 +#define EXYNOS_PRNG_OUT1_OFFSET 0x160 +#define SEED_SETTING_DONE BIT(1) +#define PRNG_START 0x18 +#define PRNG_DONE BIT(5) +#define EXYNOS_AUTOSUSPEND_DELAY 100 + +struct exynos_rng { + struct device *dev; + struct hwrng rng; + void __iomem *mem; + struct clk *clk; +}; + +static u32 exynos_rng_readl(struct exynos_rng *rng, u32 offset) +{ + return __raw_readl(rng->mem + offset); +} + +static void exynos_rng_writel(struct exynos_rng *rng, u32 val, u32 offset) +{ + __raw_writel(val, rng->mem + offset); +} + +static int exynos_init(struct hwrng *rng) +{ + struct exynos_rng *exynos_rng = container_of(rng, + struct exynos_rng, rng); + int i; + int ret = 0; + + pm_runtime_get_sync(exynos_rng->dev); + + for (i = 0 ; i < 5 ; i++) + exynos_rng_writel(exynos_rng, jiffies, + EXYNOS_PRNG_SEED_OFFSET + 4*i); + + if (!(exynos_rng_readl(exynos_rng, EXYNOS_PRNG_STATUS_OFFSET) + & SEED_SETTING_DONE)) + ret = -EIO; + + pm_runtime_put_noidle(exynos_rng->dev); + + return ret; +} + +static int exynos_read(struct hwrng *rng, void *buf, + size_t max, bool wait) +{ + struct exynos_rng *exynos_rng = container_of(rng, + struct exynos_rng, rng); + u32 *data = buf; + + pm_runtime_get_sync(exynos_rng->dev); + + exynos_rng_writel(exynos_rng, PRNG_START, 0); + + while (!(exynos_rng_readl(exynos_rng, + EXYNOS_PRNG_STATUS_OFFSET) & PRNG_DONE)) + cpu_relax(); + + exynos_rng_writel(exynos_rng, PRNG_DONE, EXYNOS_PRNG_STATUS_OFFSET); + + *data = exynos_rng_readl(exynos_rng, EXYNOS_PRNG_OUT1_OFFSET); + + pm_runtime_mark_last_busy(exynos_rng->dev); + pm_runtime_autosuspend(exynos_rng->dev); + + return 4; +} + +static int exynos_rng_probe(struct platform_device *pdev) +{ + struct exynos_rng *exynos_rng; + struct resource *res; + + exynos_rng = devm_kzalloc(&pdev->dev, sizeof(struct exynos_rng), + GFP_KERNEL); + if (!exynos_rng) + return -ENOMEM; + + exynos_rng->dev = &pdev->dev; + exynos_rng->rng.name = "exynos"; + exynos_rng->rng.init = exynos_init; + exynos_rng->rng.read = exynos_read; + exynos_rng->clk = devm_clk_get(&pdev->dev, "secss"); + if (IS_ERR(exynos_rng->clk)) { + dev_err(&pdev->dev, "Couldn't get clock.\n"); + return -ENOENT; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + exynos_rng->mem = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(exynos_rng->mem)) + return PTR_ERR(exynos_rng->mem); + + platform_set_drvdata(pdev, exynos_rng); + + pm_runtime_set_autosuspend_delay(&pdev->dev, EXYNOS_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); + + return devm_hwrng_register(&pdev->dev, &exynos_rng->rng); +} + +#ifdef CONFIG_PM +static int exynos_rng_runtime_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct exynos_rng *exynos_rng = platform_get_drvdata(pdev); + + clk_disable_unprepare(exynos_rng->clk); + + return 0; +} + +static int exynos_rng_runtime_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct exynos_rng *exynos_rng = platform_get_drvdata(pdev); + + return clk_prepare_enable(exynos_rng->clk); +} +#endif + +static UNIVERSAL_DEV_PM_OPS(exynos_rng_pm_ops, exynos_rng_runtime_suspend, + exynos_rng_runtime_resume, NULL); + +static struct platform_driver exynos_rng_driver = { + .driver = { + .name = "exynos-rng", + .pm = &exynos_rng_pm_ops, + }, + .probe = exynos_rng_probe, +}; + +module_platform_driver(exynos_rng_driver); + +MODULE_DESCRIPTION("EXYNOS 4 H/W Random Number Generator driver"); +MODULE_AUTHOR("Jonghwa Lee "); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/hw_random/geode-rng.c b/kernel/drivers/char/hw_random/geode-rng.c new file mode 100644 index 000000000..0d0579fe4 --- /dev/null +++ b/kernel/drivers/char/hw_random/geode-rng.c @@ -0,0 +1,139 @@ +/* + * RNG driver for AMD Geode RNGs + * + * Copyright 2005 (c) MontaVista Software, Inc. + * + * with the majority of the code coming from: + * + * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG) + * (c) Copyright 2003 Red Hat Inc + * + * derived from + * + * Hardware driver for the AMD 768 Random Number Generator (RNG) + * (c) Copyright 2001 Red Hat Inc + * + * derived from + * + * Hardware driver for Intel i810 Random Number Generator (RNG) + * Copyright 2000,2001 Jeff Garzik + * Copyright 2000,2001 Philipp Rumpf + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include + + +#define PFX KBUILD_MODNAME ": " + +#define GEODE_RNG_DATA_REG 0x50 +#define GEODE_RNG_STATUS_REG 0x54 + +/* + * Data for PCI driver interface + * + * This data only exists for exporting the supported + * PCI ids via MODULE_DEVICE_TABLE. We do not actually + * register a pci_driver, because someone else might one day + * want to register another driver on the same PCI id. + */ +static const struct pci_device_id pci_tbl[] = { + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), 0, }, + { 0, }, /* terminate list */ +}; +MODULE_DEVICE_TABLE(pci, pci_tbl); + + +static int geode_rng_data_read(struct hwrng *rng, u32 *data) +{ + void __iomem *mem = (void __iomem *)rng->priv; + + *data = readl(mem + GEODE_RNG_DATA_REG); + + return 4; +} + +static int geode_rng_data_present(struct hwrng *rng, int wait) +{ + void __iomem *mem = (void __iomem *)rng->priv; + int data, i; + + for (i = 0; i < 20; i++) { + data = !!(readl(mem + GEODE_RNG_STATUS_REG)); + if (data || !wait) + break; + udelay(10); + } + return data; +} + + +static struct hwrng geode_rng = { + .name = "geode", + .data_present = geode_rng_data_present, + .data_read = geode_rng_data_read, +}; + + +static int __init mod_init(void) +{ + int err = -ENODEV; + struct pci_dev *pdev = NULL; + const struct pci_device_id *ent; + void __iomem *mem; + unsigned long rng_base; + + for_each_pci_dev(pdev) { + ent = pci_match_id(pci_tbl, pdev); + if (ent) + goto found; + } + /* Device not found. */ + goto out; + +found: + rng_base = pci_resource_start(pdev, 0); + if (rng_base == 0) + goto out; + err = -ENOMEM; + mem = ioremap(rng_base, 0x58); + if (!mem) + goto out; + geode_rng.priv = (unsigned long)mem; + + pr_info("AMD Geode RNG detected\n"); + err = hwrng_register(&geode_rng); + if (err) { + pr_err(PFX "RNG registering failed (%d)\n", + err); + goto err_unmap; + } +out: + return err; + +err_unmap: + iounmap(mem); + goto out; +} + +static void __exit mod_exit(void) +{ + void __iomem *mem = (void __iomem *)geode_rng.priv; + + hwrng_unregister(&geode_rng); + iounmap(mem); +} + +module_init(mod_init); +module_exit(mod_exit); + +MODULE_DESCRIPTION("H/W RNG driver for AMD Geode LX CPUs"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/hw_random/intel-rng.c b/kernel/drivers/char/hw_random/intel-rng.c new file mode 100644 index 000000000..290c88026 --- /dev/null +++ b/kernel/drivers/char/hw_random/intel-rng.c @@ -0,0 +1,418 @@ +/* + * RNG driver for Intel RNGs + * + * Copyright 2005 (c) MontaVista Software, Inc. + * + * with the majority of the code coming from: + * + * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG) + * (c) Copyright 2003 Red Hat Inc + * + * derived from + * + * Hardware driver for the AMD 768 Random Number Generator (RNG) + * (c) Copyright 2001 Red Hat Inc + * + * derived from + * + * Hardware driver for Intel i810 Random Number Generator (RNG) + * Copyright 2000,2001 Jeff Garzik + * Copyright 2000,2001 Philipp Rumpf + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + + +#define PFX KBUILD_MODNAME ": " + +/* + * RNG registers + */ +#define INTEL_RNG_HW_STATUS 0 +#define INTEL_RNG_PRESENT 0x40 +#define INTEL_RNG_ENABLED 0x01 +#define INTEL_RNG_STATUS 1 +#define INTEL_RNG_DATA_PRESENT 0x01 +#define INTEL_RNG_DATA 2 + +/* + * Magic address at which Intel PCI bridges locate the RNG + */ +#define INTEL_RNG_ADDR 0xFFBC015F +#define INTEL_RNG_ADDR_LEN 3 + +/* + * LPC bridge PCI config space registers + */ +#define FWH_DEC_EN1_REG_OLD 0xe3 +#define FWH_DEC_EN1_REG_NEW 0xd9 /* high byte of 16-bit register */ +#define FWH_F8_EN_MASK 0x80 + +#define BIOS_CNTL_REG_OLD 0x4e +#define BIOS_CNTL_REG_NEW 0xdc +#define BIOS_CNTL_WRITE_ENABLE_MASK 0x01 +#define BIOS_CNTL_LOCK_ENABLE_MASK 0x02 + +/* + * Magic address at which Intel Firmware Hubs get accessed + */ +#define INTEL_FWH_ADDR 0xffff0000 +#define INTEL_FWH_ADDR_LEN 2 + +/* + * Intel Firmware Hub command codes (write to any address inside the device) + */ +#define INTEL_FWH_RESET_CMD 0xff /* aka READ_ARRAY */ +#define INTEL_FWH_READ_ID_CMD 0x90 + +/* + * Intel Firmware Hub Read ID command result addresses + */ +#define INTEL_FWH_MANUFACTURER_CODE_ADDRESS 0x000000 +#define INTEL_FWH_DEVICE_CODE_ADDRESS 0x000001 + +/* + * Intel Firmware Hub Read ID command result values + */ +#define INTEL_FWH_MANUFACTURER_CODE 0x89 +#define INTEL_FWH_DEVICE_CODE_8M 0xac +#define INTEL_FWH_DEVICE_CODE_4M 0xad + +/* + * Data for PCI driver interface + * + * This data only exists for exporting the supported + * PCI ids via MODULE_DEVICE_TABLE. We do not actually + * register a pci_driver, because someone else might one day + * want to register another driver on the same PCI id. + */ +static const struct pci_device_id pci_tbl[] = { +/* AA + { PCI_DEVICE(0x8086, 0x2418) }, */ + { PCI_DEVICE(0x8086, 0x2410) }, /* AA */ +/* AB + { PCI_DEVICE(0x8086, 0x2428) }, */ + { PCI_DEVICE(0x8086, 0x2420) }, /* AB */ +/* ?? + { PCI_DEVICE(0x8086, 0x2430) }, */ +/* BAM, CAM, DBM, FBM, GxM + { PCI_DEVICE(0x8086, 0x2448) }, */ + { PCI_DEVICE(0x8086, 0x244c) }, /* BAM */ + { PCI_DEVICE(0x8086, 0x248c) }, /* CAM */ + { PCI_DEVICE(0x8086, 0x24cc) }, /* DBM */ + { PCI_DEVICE(0x8086, 0x2641) }, /* FBM */ + { PCI_DEVICE(0x8086, 0x27b9) }, /* GxM */ + { PCI_DEVICE(0x8086, 0x27bd) }, /* GxM DH */ +/* BA, CA, DB, Ex, 6300, Fx, 631x/632x, Gx + { PCI_DEVICE(0x8086, 0x244e) }, */ + { PCI_DEVICE(0x8086, 0x2440) }, /* BA */ + { PCI_DEVICE(0x8086, 0x2480) }, /* CA */ + { PCI_DEVICE(0x8086, 0x24c0) }, /* DB */ + { PCI_DEVICE(0x8086, 0x24d0) }, /* Ex */ + { PCI_DEVICE(0x8086, 0x25a1) }, /* 6300 */ + { PCI_DEVICE(0x8086, 0x2640) }, /* Fx */ + { PCI_DEVICE(0x8086, 0x2670) }, /* 631x/632x */ + { PCI_DEVICE(0x8086, 0x2671) }, /* 631x/632x */ + { PCI_DEVICE(0x8086, 0x2672) }, /* 631x/632x */ + { PCI_DEVICE(0x8086, 0x2673) }, /* 631x/632x */ + { PCI_DEVICE(0x8086, 0x2674) }, /* 631x/632x */ + { PCI_DEVICE(0x8086, 0x2675) }, /* 631x/632x */ + { PCI_DEVICE(0x8086, 0x2676) }, /* 631x/632x */ + { PCI_DEVICE(0x8086, 0x2677) }, /* 631x/632x */ + { PCI_DEVICE(0x8086, 0x2678) }, /* 631x/632x */ + { PCI_DEVICE(0x8086, 0x2679) }, /* 631x/632x */ + { PCI_DEVICE(0x8086, 0x267a) }, /* 631x/632x */ + { PCI_DEVICE(0x8086, 0x267b) }, /* 631x/632x */ + { PCI_DEVICE(0x8086, 0x267c) }, /* 631x/632x */ + { PCI_DEVICE(0x8086, 0x267d) }, /* 631x/632x */ + { PCI_DEVICE(0x8086, 0x267e) }, /* 631x/632x */ + { PCI_DEVICE(0x8086, 0x267f) }, /* 631x/632x */ + { PCI_DEVICE(0x8086, 0x27b8) }, /* Gx */ +/* E + { PCI_DEVICE(0x8086, 0x245e) }, */ + { PCI_DEVICE(0x8086, 0x2450) }, /* E */ + { 0, }, /* terminate list */ +}; +MODULE_DEVICE_TABLE(pci, pci_tbl); + +static __initdata int no_fwh_detect; +module_param(no_fwh_detect, int, 0); +MODULE_PARM_DESC(no_fwh_detect, "Skip FWH detection:\n" + " positive value - skip if FWH space locked read-only\n" + " negative value - skip always"); + +static inline u8 hwstatus_get(void __iomem *mem) +{ + return readb(mem + INTEL_RNG_HW_STATUS); +} + +static inline u8 hwstatus_set(void __iomem *mem, + u8 hw_status) +{ + writeb(hw_status, mem + INTEL_RNG_HW_STATUS); + return hwstatus_get(mem); +} + +static int intel_rng_data_present(struct hwrng *rng, int wait) +{ + void __iomem *mem = (void __iomem *)rng->priv; + int data, i; + + for (i = 0; i < 20; i++) { + data = !!(readb(mem + INTEL_RNG_STATUS) & + INTEL_RNG_DATA_PRESENT); + if (data || !wait) + break; + udelay(10); + } + return data; +} + +static int intel_rng_data_read(struct hwrng *rng, u32 *data) +{ + void __iomem *mem = (void __iomem *)rng->priv; + + *data = readb(mem + INTEL_RNG_DATA); + + return 1; +} + +static int intel_rng_init(struct hwrng *rng) +{ + void __iomem *mem = (void __iomem *)rng->priv; + u8 hw_status; + int err = -EIO; + + hw_status = hwstatus_get(mem); + /* turn RNG h/w on, if it's off */ + if ((hw_status & INTEL_RNG_ENABLED) == 0) + hw_status = hwstatus_set(mem, hw_status | INTEL_RNG_ENABLED); + if ((hw_status & INTEL_RNG_ENABLED) == 0) { + pr_err(PFX "cannot enable RNG, aborting\n"); + goto out; + } + err = 0; +out: + return err; +} + +static void intel_rng_cleanup(struct hwrng *rng) +{ + void __iomem *mem = (void __iomem *)rng->priv; + u8 hw_status; + + hw_status = hwstatus_get(mem); + if (hw_status & INTEL_RNG_ENABLED) + hwstatus_set(mem, hw_status & ~INTEL_RNG_ENABLED); + else + pr_warn(PFX "unusual: RNG already disabled\n"); +} + + +static struct hwrng intel_rng = { + .name = "intel", + .init = intel_rng_init, + .cleanup = intel_rng_cleanup, + .data_present = intel_rng_data_present, + .data_read = intel_rng_data_read, +}; + +struct intel_rng_hw { + struct pci_dev *dev; + void __iomem *mem; + u8 bios_cntl_off; + u8 bios_cntl_val; + u8 fwh_dec_en1_off; + u8 fwh_dec_en1_val; +}; + +static int __init intel_rng_hw_init(void *_intel_rng_hw) +{ + struct intel_rng_hw *intel_rng_hw = _intel_rng_hw; + u8 mfc, dvc; + + /* interrupts disabled in stop_machine call */ + + if (!(intel_rng_hw->fwh_dec_en1_val & FWH_F8_EN_MASK)) + pci_write_config_byte(intel_rng_hw->dev, + intel_rng_hw->fwh_dec_en1_off, + intel_rng_hw->fwh_dec_en1_val | + FWH_F8_EN_MASK); + if (!(intel_rng_hw->bios_cntl_val & BIOS_CNTL_WRITE_ENABLE_MASK)) + pci_write_config_byte(intel_rng_hw->dev, + intel_rng_hw->bios_cntl_off, + intel_rng_hw->bios_cntl_val | + BIOS_CNTL_WRITE_ENABLE_MASK); + + writeb(INTEL_FWH_RESET_CMD, intel_rng_hw->mem); + writeb(INTEL_FWH_READ_ID_CMD, intel_rng_hw->mem); + mfc = readb(intel_rng_hw->mem + INTEL_FWH_MANUFACTURER_CODE_ADDRESS); + dvc = readb(intel_rng_hw->mem + INTEL_FWH_DEVICE_CODE_ADDRESS); + writeb(INTEL_FWH_RESET_CMD, intel_rng_hw->mem); + + if (!(intel_rng_hw->bios_cntl_val & + (BIOS_CNTL_LOCK_ENABLE_MASK|BIOS_CNTL_WRITE_ENABLE_MASK))) + pci_write_config_byte(intel_rng_hw->dev, + intel_rng_hw->bios_cntl_off, + intel_rng_hw->bios_cntl_val); + if (!(intel_rng_hw->fwh_dec_en1_val & FWH_F8_EN_MASK)) + pci_write_config_byte(intel_rng_hw->dev, + intel_rng_hw->fwh_dec_en1_off, + intel_rng_hw->fwh_dec_en1_val); + + if (mfc != INTEL_FWH_MANUFACTURER_CODE || + (dvc != INTEL_FWH_DEVICE_CODE_8M && + dvc != INTEL_FWH_DEVICE_CODE_4M)) { + pr_notice(PFX "FWH not detected\n"); + return -ENODEV; + } + + return 0; +} + +static int __init intel_init_hw_struct(struct intel_rng_hw *intel_rng_hw, + struct pci_dev *dev) +{ + intel_rng_hw->bios_cntl_val = 0xff; + intel_rng_hw->fwh_dec_en1_val = 0xff; + intel_rng_hw->dev = dev; + + /* Check for Intel 82802 */ + if (dev->device < 0x2640) { + intel_rng_hw->fwh_dec_en1_off = FWH_DEC_EN1_REG_OLD; + intel_rng_hw->bios_cntl_off = BIOS_CNTL_REG_OLD; + } else { + intel_rng_hw->fwh_dec_en1_off = FWH_DEC_EN1_REG_NEW; + intel_rng_hw->bios_cntl_off = BIOS_CNTL_REG_NEW; + } + + pci_read_config_byte(dev, intel_rng_hw->fwh_dec_en1_off, + &intel_rng_hw->fwh_dec_en1_val); + pci_read_config_byte(dev, intel_rng_hw->bios_cntl_off, + &intel_rng_hw->bios_cntl_val); + + if ((intel_rng_hw->bios_cntl_val & + (BIOS_CNTL_LOCK_ENABLE_MASK|BIOS_CNTL_WRITE_ENABLE_MASK)) + == BIOS_CNTL_LOCK_ENABLE_MASK) { + static __initdata /*const*/ char warning[] = +PFX "Firmware space is locked read-only. If you can't or\n" +PFX "don't want to disable this in firmware setup, and if\n" +PFX "you are certain that your system has a functional\n" +PFX "RNG, try using the 'no_fwh_detect' option.\n"; + + if (no_fwh_detect) + return -ENODEV; + pr_warn("%s", warning); + return -EBUSY; + } + + intel_rng_hw->mem = ioremap_nocache(INTEL_FWH_ADDR, INTEL_FWH_ADDR_LEN); + if (intel_rng_hw->mem == NULL) + return -EBUSY; + + return 0; +} + + +static int __init mod_init(void) +{ + int err = -ENODEV; + int i; + struct pci_dev *dev = NULL; + void __iomem *mem = mem; + u8 hw_status; + struct intel_rng_hw *intel_rng_hw; + + for (i = 0; !dev && pci_tbl[i].vendor; ++i) + dev = pci_get_device(pci_tbl[i].vendor, pci_tbl[i].device, + NULL); + + if (!dev) + goto out; /* Device not found. */ + + if (no_fwh_detect < 0) { + pci_dev_put(dev); + goto fwh_done; + } + + intel_rng_hw = kmalloc(sizeof(*intel_rng_hw), GFP_KERNEL); + if (!intel_rng_hw) { + pci_dev_put(dev); + goto out; + } + + err = intel_init_hw_struct(intel_rng_hw, dev); + if (err) { + pci_dev_put(dev); + kfree(intel_rng_hw); + if (err == -ENODEV) + goto fwh_done; + goto out; + } + + /* + * Since the BIOS code/data is going to disappear from its normal + * location with the Read ID command, all activity on the system + * must be stopped until the state is back to normal. + * + * Use stop_machine because IPIs can be blocked by disabling + * interrupts. + */ + err = stop_machine(intel_rng_hw_init, intel_rng_hw, NULL); + pci_dev_put(dev); + iounmap(intel_rng_hw->mem); + kfree(intel_rng_hw); + if (err) + goto out; + +fwh_done: + err = -ENOMEM; + mem = ioremap(INTEL_RNG_ADDR, INTEL_RNG_ADDR_LEN); + if (!mem) + goto out; + intel_rng.priv = (unsigned long)mem; + + /* Check for Random Number Generator */ + err = -ENODEV; + hw_status = hwstatus_get(mem); + if ((hw_status & INTEL_RNG_PRESENT) == 0) { + iounmap(mem); + goto out; + } + + pr_info("Intel 82802 RNG detected\n"); + err = hwrng_register(&intel_rng); + if (err) { + pr_err(PFX "RNG registering failed (%d)\n", + err); + iounmap(mem); + } +out: + return err; + +} + +static void __exit mod_exit(void) +{ + void __iomem *mem = (void __iomem *)intel_rng.priv; + + hwrng_unregister(&intel_rng); + iounmap(mem); +} + +module_init(mod_init); +module_exit(mod_exit); + +MODULE_DESCRIPTION("H/W RNG driver for Intel chipsets"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/hw_random/iproc-rng200.c b/kernel/drivers/char/hw_random/iproc-rng200.c new file mode 100644 index 000000000..3eaf7cb96 --- /dev/null +++ b/kernel/drivers/char/hw_random/iproc-rng200.c @@ -0,0 +1,239 @@ +/* +* Copyright (C) 2015 Broadcom Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License as +* published by the Free Software Foundation version 2. +* +* This program is distributed "as is" WITHOUT ANY WARRANTY of any +* kind, whether express or implied; without even the implied warranty +* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +*/ +/* + * DESCRIPTION: The Broadcom iProc RNG200 Driver + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Registers */ +#define RNG_CTRL_OFFSET 0x00 +#define RNG_CTRL_RNG_RBGEN_MASK 0x00001FFF +#define RNG_CTRL_RNG_RBGEN_ENABLE 0x00000001 +#define RNG_CTRL_RNG_RBGEN_DISABLE 0x00000000 + +#define RNG_SOFT_RESET_OFFSET 0x04 +#define RNG_SOFT_RESET 0x00000001 + +#define RBG_SOFT_RESET_OFFSET 0x08 +#define RBG_SOFT_RESET 0x00000001 + +#define RNG_INT_STATUS_OFFSET 0x18 +#define RNG_INT_STATUS_MASTER_FAIL_LOCKOUT_IRQ_MASK 0x80000000 +#define RNG_INT_STATUS_STARTUP_TRANSITIONS_MET_IRQ_MASK 0x00020000 +#define RNG_INT_STATUS_NIST_FAIL_IRQ_MASK 0x00000020 +#define RNG_INT_STATUS_TOTAL_BITS_COUNT_IRQ_MASK 0x00000001 + +#define RNG_FIFO_DATA_OFFSET 0x20 + +#define RNG_FIFO_COUNT_OFFSET 0x24 +#define RNG_FIFO_COUNT_RNG_FIFO_COUNT_MASK 0x000000FF + +struct iproc_rng200_dev { + struct hwrng rng; + void __iomem *base; +}; + +#define to_rng_priv(rng) container_of(rng, struct iproc_rng200_dev, rng) + +static void iproc_rng200_restart(void __iomem *rng_base) +{ + uint32_t val; + + /* Disable RBG */ + val = ioread32(rng_base + RNG_CTRL_OFFSET); + val &= ~RNG_CTRL_RNG_RBGEN_MASK; + val |= RNG_CTRL_RNG_RBGEN_DISABLE; + iowrite32(val, rng_base + RNG_CTRL_OFFSET); + + /* Clear all interrupt status */ + iowrite32(0xFFFFFFFFUL, rng_base + RNG_INT_STATUS_OFFSET); + + /* Reset RNG and RBG */ + val = ioread32(rng_base + RBG_SOFT_RESET_OFFSET); + val |= RBG_SOFT_RESET; + iowrite32(val, rng_base + RBG_SOFT_RESET_OFFSET); + + val = ioread32(rng_base + RNG_SOFT_RESET_OFFSET); + val |= RNG_SOFT_RESET; + iowrite32(val, rng_base + RNG_SOFT_RESET_OFFSET); + + val = ioread32(rng_base + RNG_SOFT_RESET_OFFSET); + val &= ~RNG_SOFT_RESET; + iowrite32(val, rng_base + RNG_SOFT_RESET_OFFSET); + + val = ioread32(rng_base + RBG_SOFT_RESET_OFFSET); + val &= ~RBG_SOFT_RESET; + iowrite32(val, rng_base + RBG_SOFT_RESET_OFFSET); + + /* Enable RBG */ + val = ioread32(rng_base + RNG_CTRL_OFFSET); + val &= ~RNG_CTRL_RNG_RBGEN_MASK; + val |= RNG_CTRL_RNG_RBGEN_ENABLE; + iowrite32(val, rng_base + RNG_CTRL_OFFSET); +} + +static int iproc_rng200_read(struct hwrng *rng, void *buf, size_t max, + bool wait) +{ + struct iproc_rng200_dev *priv = to_rng_priv(rng); + uint32_t num_remaining = max; + uint32_t status; + + #define MAX_RESETS_PER_READ 1 + uint32_t num_resets = 0; + + #define MAX_IDLE_TIME (1 * HZ) + unsigned long idle_endtime = jiffies + MAX_IDLE_TIME; + + while ((num_remaining > 0) && time_before(jiffies, idle_endtime)) { + + /* Is RNG sane? If not, reset it. */ + status = ioread32(priv->base + RNG_INT_STATUS_OFFSET); + if ((status & (RNG_INT_STATUS_MASTER_FAIL_LOCKOUT_IRQ_MASK | + RNG_INT_STATUS_NIST_FAIL_IRQ_MASK)) != 0) { + + if (num_resets >= MAX_RESETS_PER_READ) + return max - num_remaining; + + iproc_rng200_restart(priv->base); + num_resets++; + } + + /* Are there any random numbers available? */ + if ((ioread32(priv->base + RNG_FIFO_COUNT_OFFSET) & + RNG_FIFO_COUNT_RNG_FIFO_COUNT_MASK) > 0) { + + if (num_remaining >= sizeof(uint32_t)) { + /* Buffer has room to store entire word */ + *(uint32_t *)buf = ioread32(priv->base + + RNG_FIFO_DATA_OFFSET); + buf += sizeof(uint32_t); + num_remaining -= sizeof(uint32_t); + } else { + /* Buffer can only store partial word */ + uint32_t rnd_number = ioread32(priv->base + + RNG_FIFO_DATA_OFFSET); + memcpy(buf, &rnd_number, num_remaining); + buf += num_remaining; + num_remaining = 0; + } + + /* Reset the IDLE timeout */ + idle_endtime = jiffies + MAX_IDLE_TIME; + } else { + if (!wait) + /* Cannot wait, return immediately */ + return max - num_remaining; + + /* Can wait, give others chance to run */ + usleep_range(min(num_remaining * 10, 500U), 500); + } + } + + return max - num_remaining; +} + +static int iproc_rng200_init(struct hwrng *rng) +{ + struct iproc_rng200_dev *priv = to_rng_priv(rng); + uint32_t val; + + /* Setup RNG. */ + val = ioread32(priv->base + RNG_CTRL_OFFSET); + val &= ~RNG_CTRL_RNG_RBGEN_MASK; + val |= RNG_CTRL_RNG_RBGEN_ENABLE; + iowrite32(val, priv->base + RNG_CTRL_OFFSET); + + return 0; +} + +static void iproc_rng200_cleanup(struct hwrng *rng) +{ + struct iproc_rng200_dev *priv = to_rng_priv(rng); + uint32_t val; + + /* Disable RNG hardware */ + val = ioread32(priv->base + RNG_CTRL_OFFSET); + val &= ~RNG_CTRL_RNG_RBGEN_MASK; + val |= RNG_CTRL_RNG_RBGEN_DISABLE; + iowrite32(val, priv->base + RNG_CTRL_OFFSET); +} + +static int iproc_rng200_probe(struct platform_device *pdev) +{ + struct iproc_rng200_dev *priv; + struct resource *res; + struct device *dev = &pdev->dev; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + /* Map peripheral */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "failed to get rng resources\n"); + return -EINVAL; + } + + priv->base = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->base)) { + dev_err(dev, "failed to remap rng regs\n"); + return PTR_ERR(priv->base); + } + + priv->rng.name = "iproc-rng200", + priv->rng.read = iproc_rng200_read, + priv->rng.init = iproc_rng200_init, + priv->rng.cleanup = iproc_rng200_cleanup, + + /* Register driver */ + ret = devm_hwrng_register(dev, &priv->rng); + if (ret) { + dev_err(dev, "hwrng registration failed\n"); + return ret; + } + + dev_info(dev, "hwrng registered\n"); + + return 0; +} + +static const struct of_device_id iproc_rng200_of_match[] = { + { .compatible = "brcm,iproc-rng200", }, + {}, +}; +MODULE_DEVICE_TABLE(of, iproc_rng200_of_match); + +static struct platform_driver iproc_rng200_driver = { + .driver = { + .name = "iproc-rng200", + .of_match_table = iproc_rng200_of_match, + }, + .probe = iproc_rng200_probe, +}; +module_platform_driver(iproc_rng200_driver); + +MODULE_AUTHOR("Broadcom"); +MODULE_DESCRIPTION("iProc RNG200 Random Number Generator driver"); +MODULE_LICENSE("GPL v2"); diff --git a/kernel/drivers/char/hw_random/ixp4xx-rng.c b/kernel/drivers/char/hw_random/ixp4xx-rng.c new file mode 100644 index 000000000..beec1627d --- /dev/null +++ b/kernel/drivers/char/hw_random/ixp4xx-rng.c @@ -0,0 +1,75 @@ +/* + * drivers/char/hw_random/ixp4xx-rng.c + * + * RNG driver for Intel IXP4xx family of NPUs + * + * Author: Deepak Saxena + * + * Copyright 2005 (c) MontaVista Software, Inc. + * + * Fixes by Michael Buesch + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + + +static int ixp4xx_rng_data_read(struct hwrng *rng, u32 *buffer) +{ + void __iomem * rng_base = (void __iomem *)rng->priv; + + *buffer = __raw_readl(rng_base); + + return 4; +} + +static struct hwrng ixp4xx_rng_ops = { + .name = "ixp4xx", + .data_read = ixp4xx_rng_data_read, +}; + +static int __init ixp4xx_rng_init(void) +{ + void __iomem * rng_base; + int err; + + if (!cpu_is_ixp46x()) /* includes IXP455 */ + return -ENOSYS; + + rng_base = ioremap(0x70002100, 4); + if (!rng_base) + return -ENOMEM; + ixp4xx_rng_ops.priv = (unsigned long)rng_base; + err = hwrng_register(&ixp4xx_rng_ops); + if (err) + iounmap(rng_base); + + return err; +} + +static void __exit ixp4xx_rng_exit(void) +{ + void __iomem * rng_base = (void __iomem *)ixp4xx_rng_ops.priv; + + hwrng_unregister(&ixp4xx_rng_ops); + iounmap(rng_base); +} + +module_init(ixp4xx_rng_init); +module_exit(ixp4xx_rng_exit); + +MODULE_AUTHOR("Deepak Saxena "); +MODULE_DESCRIPTION("H/W Pseudo-Random Number Generator (RNG) driver for IXP45x/46x"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/hw_random/msm-rng.c b/kernel/drivers/char/hw_random/msm-rng.c new file mode 100644 index 000000000..96fb98640 --- /dev/null +++ b/kernel/drivers/char/hw_random/msm-rng.c @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include + +/* Device specific register offsets */ +#define PRNG_DATA_OUT 0x0000 +#define PRNG_STATUS 0x0004 +#define PRNG_LFSR_CFG 0x0100 +#define PRNG_CONFIG 0x0104 + +/* Device specific register masks and config values */ +#define PRNG_LFSR_CFG_MASK 0x0000ffff +#define PRNG_LFSR_CFG_CLOCKS 0x0000dddd +#define PRNG_CONFIG_HW_ENABLE BIT(1) +#define PRNG_STATUS_DATA_AVAIL BIT(0) + +#define MAX_HW_FIFO_DEPTH 16 +#define MAX_HW_FIFO_SIZE (MAX_HW_FIFO_DEPTH * 4) +#define WORD_SZ 4 + +struct msm_rng { + void __iomem *base; + struct clk *clk; + struct hwrng hwrng; +}; + +#define to_msm_rng(p) container_of(p, struct msm_rng, hwrng) + +static int msm_rng_enable(struct hwrng *hwrng, int enable) +{ + struct msm_rng *rng = to_msm_rng(hwrng); + u32 val; + int ret; + + ret = clk_prepare_enable(rng->clk); + if (ret) + return ret; + + if (enable) { + /* Enable PRNG only if it is not already enabled */ + val = readl_relaxed(rng->base + PRNG_CONFIG); + if (val & PRNG_CONFIG_HW_ENABLE) + goto already_enabled; + + val = readl_relaxed(rng->base + PRNG_LFSR_CFG); + val &= ~PRNG_LFSR_CFG_MASK; + val |= PRNG_LFSR_CFG_CLOCKS; + writel(val, rng->base + PRNG_LFSR_CFG); + + val = readl_relaxed(rng->base + PRNG_CONFIG); + val |= PRNG_CONFIG_HW_ENABLE; + writel(val, rng->base + PRNG_CONFIG); + } else { + val = readl_relaxed(rng->base + PRNG_CONFIG); + val &= ~PRNG_CONFIG_HW_ENABLE; + writel(val, rng->base + PRNG_CONFIG); + } + +already_enabled: + clk_disable_unprepare(rng->clk); + return 0; +} + +static int msm_rng_read(struct hwrng *hwrng, void *data, size_t max, bool wait) +{ + struct msm_rng *rng = to_msm_rng(hwrng); + size_t currsize = 0; + u32 *retdata = data; + size_t maxsize; + int ret; + u32 val; + + /* calculate max size bytes to transfer back to caller */ + maxsize = min_t(size_t, MAX_HW_FIFO_SIZE, max); + + /* no room for word data */ + if (maxsize < WORD_SZ) + return 0; + + ret = clk_prepare_enable(rng->clk); + if (ret) + return ret; + + /* read random data from hardware */ + do { + val = readl_relaxed(rng->base + PRNG_STATUS); + if (!(val & PRNG_STATUS_DATA_AVAIL)) + break; + + val = readl_relaxed(rng->base + PRNG_DATA_OUT); + if (!val) + break; + + *retdata++ = val; + currsize += WORD_SZ; + + /* make sure we stay on 32bit boundary */ + if ((maxsize - currsize) < WORD_SZ) + break; + } while (currsize < maxsize); + + clk_disable_unprepare(rng->clk); + + return currsize; +} + +static int msm_rng_init(struct hwrng *hwrng) +{ + return msm_rng_enable(hwrng, 1); +} + +static void msm_rng_cleanup(struct hwrng *hwrng) +{ + msm_rng_enable(hwrng, 0); +} + +static int msm_rng_probe(struct platform_device *pdev) +{ + struct resource *res; + struct msm_rng *rng; + int ret; + + rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL); + if (!rng) + return -ENOMEM; + + platform_set_drvdata(pdev, rng); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + rng->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(rng->base)) + return PTR_ERR(rng->base); + + rng->clk = devm_clk_get(&pdev->dev, "core"); + if (IS_ERR(rng->clk)) + return PTR_ERR(rng->clk); + + rng->hwrng.name = KBUILD_MODNAME, + rng->hwrng.init = msm_rng_init, + rng->hwrng.cleanup = msm_rng_cleanup, + rng->hwrng.read = msm_rng_read, + + ret = devm_hwrng_register(&pdev->dev, &rng->hwrng); + if (ret) { + dev_err(&pdev->dev, "failed to register hwrng\n"); + return ret; + } + + return 0; +} + +static const struct of_device_id msm_rng_of_match[] = { + { .compatible = "qcom,prng", }, + {} +}; +MODULE_DEVICE_TABLE(of, msm_rng_of_match); + +static struct platform_driver msm_rng_driver = { + .probe = msm_rng_probe, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = of_match_ptr(msm_rng_of_match), + } +}; +module_platform_driver(msm_rng_driver); + +MODULE_ALIAS("platform:" KBUILD_MODNAME); +MODULE_AUTHOR("The Linux Foundation"); +MODULE_DESCRIPTION("Qualcomm MSM random number generator driver"); +MODULE_LICENSE("GPL v2"); diff --git a/kernel/drivers/char/hw_random/mxc-rnga.c b/kernel/drivers/char/hw_random/mxc-rnga.c new file mode 100644 index 000000000..6cbb72ec6 --- /dev/null +++ b/kernel/drivers/char/hw_random/mxc-rnga.c @@ -0,0 +1,217 @@ +/* + * RNG driver for Freescale RNGA + * + * Copyright 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved. + * Author: Alan Carvalho de Assis + */ + +/* + * The code contained herein is licensed under the GNU General Public + * License. You may obtain a copy of the GNU General Public License + * Version 2 or later at the following locations: + * + * http://www.opensource.org/licenses/gpl-license.html + * http://www.gnu.org/copyleft/gpl.html + * + * This driver is based on other RNG drivers. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* RNGA Registers */ +#define RNGA_CONTROL 0x00 +#define RNGA_STATUS 0x04 +#define RNGA_ENTROPY 0x08 +#define RNGA_OUTPUT_FIFO 0x0c +#define RNGA_MODE 0x10 +#define RNGA_VERIFICATION_CONTROL 0x14 +#define RNGA_OSC_CONTROL_COUNTER 0x18 +#define RNGA_OSC1_COUNTER 0x1c +#define RNGA_OSC2_COUNTER 0x20 +#define RNGA_OSC_COUNTER_STATUS 0x24 + +/* RNGA Registers Range */ +#define RNG_ADDR_RANGE 0x28 + +/* RNGA Control Register */ +#define RNGA_CONTROL_SLEEP 0x00000010 +#define RNGA_CONTROL_CLEAR_INT 0x00000008 +#define RNGA_CONTROL_MASK_INTS 0x00000004 +#define RNGA_CONTROL_HIGH_ASSURANCE 0x00000002 +#define RNGA_CONTROL_GO 0x00000001 + +#define RNGA_STATUS_LEVEL_MASK 0x0000ff00 + +/* RNGA Status Register */ +#define RNGA_STATUS_OSC_DEAD 0x80000000 +#define RNGA_STATUS_SLEEP 0x00000010 +#define RNGA_STATUS_ERROR_INT 0x00000008 +#define RNGA_STATUS_FIFO_UNDERFLOW 0x00000004 +#define RNGA_STATUS_LAST_READ_STATUS 0x00000002 +#define RNGA_STATUS_SECURITY_VIOLATION 0x00000001 + +struct mxc_rng { + struct device *dev; + struct hwrng rng; + void __iomem *mem; + struct clk *clk; +}; + +static int mxc_rnga_data_present(struct hwrng *rng, int wait) +{ + int i; + struct mxc_rng *mxc_rng = container_of(rng, struct mxc_rng, rng); + + for (i = 0; i < 20; i++) { + /* how many random numbers are in FIFO? [0-16] */ + int level = (__raw_readl(mxc_rng->mem + RNGA_STATUS) & + RNGA_STATUS_LEVEL_MASK) >> 8; + if (level || !wait) + return !!level; + udelay(10); + } + return 0; +} + +static int mxc_rnga_data_read(struct hwrng *rng, u32 * data) +{ + int err; + u32 ctrl; + struct mxc_rng *mxc_rng = container_of(rng, struct mxc_rng, rng); + + /* retrieve a random number from FIFO */ + *data = __raw_readl(mxc_rng->mem + RNGA_OUTPUT_FIFO); + + /* some error while reading this random number? */ + err = __raw_readl(mxc_rng->mem + RNGA_STATUS) & RNGA_STATUS_ERROR_INT; + + /* if error: clear error interrupt, but doesn't return random number */ + if (err) { + dev_dbg(mxc_rng->dev, "Error while reading random number!\n"); + ctrl = __raw_readl(mxc_rng->mem + RNGA_CONTROL); + __raw_writel(ctrl | RNGA_CONTROL_CLEAR_INT, + mxc_rng->mem + RNGA_CONTROL); + return 0; + } else + return 4; +} + +static int mxc_rnga_init(struct hwrng *rng) +{ + u32 ctrl, osc; + struct mxc_rng *mxc_rng = container_of(rng, struct mxc_rng, rng); + + /* wake up */ + ctrl = __raw_readl(mxc_rng->mem + RNGA_CONTROL); + __raw_writel(ctrl & ~RNGA_CONTROL_SLEEP, mxc_rng->mem + RNGA_CONTROL); + + /* verify if oscillator is working */ + osc = __raw_readl(mxc_rng->mem + RNGA_STATUS); + if (osc & RNGA_STATUS_OSC_DEAD) { + dev_err(mxc_rng->dev, "RNGA Oscillator is dead!\n"); + return -ENODEV; + } + + /* go running */ + ctrl = __raw_readl(mxc_rng->mem + RNGA_CONTROL); + __raw_writel(ctrl | RNGA_CONTROL_GO, mxc_rng->mem + RNGA_CONTROL); + + return 0; +} + +static void mxc_rnga_cleanup(struct hwrng *rng) +{ + u32 ctrl; + struct mxc_rng *mxc_rng = container_of(rng, struct mxc_rng, rng); + + ctrl = __raw_readl(mxc_rng->mem + RNGA_CONTROL); + + /* stop rnga */ + __raw_writel(ctrl & ~RNGA_CONTROL_GO, mxc_rng->mem + RNGA_CONTROL); +} + +static int __init mxc_rnga_probe(struct platform_device *pdev) +{ + int err = -ENODEV; + struct resource *res; + struct mxc_rng *mxc_rng; + + mxc_rng = devm_kzalloc(&pdev->dev, sizeof(struct mxc_rng), + GFP_KERNEL); + if (!mxc_rng) + return -ENOMEM; + + mxc_rng->dev = &pdev->dev; + mxc_rng->rng.name = "mxc-rnga"; + mxc_rng->rng.init = mxc_rnga_init; + mxc_rng->rng.cleanup = mxc_rnga_cleanup, + mxc_rng->rng.data_present = mxc_rnga_data_present, + mxc_rng->rng.data_read = mxc_rnga_data_read, + + mxc_rng->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(mxc_rng->clk)) { + dev_err(&pdev->dev, "Could not get rng_clk!\n"); + err = PTR_ERR(mxc_rng->clk); + goto out; + } + + err = clk_prepare_enable(mxc_rng->clk); + if (err) + goto out; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + mxc_rng->mem = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(mxc_rng->mem)) { + err = PTR_ERR(mxc_rng->mem); + goto err_ioremap; + } + + err = hwrng_register(&mxc_rng->rng); + if (err) { + dev_err(&pdev->dev, "MXC RNGA registering failed (%d)\n", err); + goto err_ioremap; + } + + dev_info(&pdev->dev, "MXC RNGA Registered.\n"); + + return 0; + +err_ioremap: + clk_disable_unprepare(mxc_rng->clk); + +out: + return err; +} + +static int __exit mxc_rnga_remove(struct platform_device *pdev) +{ + struct mxc_rng *mxc_rng = platform_get_drvdata(pdev); + + hwrng_unregister(&mxc_rng->rng); + + clk_disable_unprepare(mxc_rng->clk); + + return 0; +} + +static struct platform_driver mxc_rnga_driver = { + .driver = { + .name = "mxc_rnga", + }, + .remove = __exit_p(mxc_rnga_remove), +}; + +module_platform_driver_probe(mxc_rnga_driver, mxc_rnga_probe); + +MODULE_AUTHOR("Freescale Semiconductor, Inc."); +MODULE_DESCRIPTION("H/W RNGA driver for i.MX"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/hw_random/n2-asm.S b/kernel/drivers/char/hw_random/n2-asm.S new file mode 100644 index 000000000..9b6eb5cd5 --- /dev/null +++ b/kernel/drivers/char/hw_random/n2-asm.S @@ -0,0 +1,79 @@ +/* n2-asm.S: Niagara2 RNG hypervisor call assembler. + * + * Copyright (C) 2008 David S. Miller + */ +#include +#include +#include "n2rng.h" + + .text + +ENTRY(sun4v_rng_get_diag_ctl) + mov HV_FAST_RNG_GET_DIAG_CTL, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_rng_get_diag_ctl) + +ENTRY(sun4v_rng_ctl_read_v1) + mov %o1, %o3 + mov %o2, %o4 + mov HV_FAST_RNG_CTL_READ, %o5 + ta HV_FAST_TRAP + stx %o1, [%o3] + retl + stx %o2, [%o4] +ENDPROC(sun4v_rng_ctl_read_v1) + +ENTRY(sun4v_rng_ctl_read_v2) + save %sp, -192, %sp + mov %i0, %o0 + mov %i1, %o1 + mov HV_FAST_RNG_CTL_READ, %o5 + ta HV_FAST_TRAP + stx %o1, [%i2] + stx %o2, [%i3] + stx %o3, [%i4] + stx %o4, [%i5] + ret + restore %g0, %o0, %o0 +ENDPROC(sun4v_rng_ctl_read_v2) + +ENTRY(sun4v_rng_ctl_write_v1) + mov %o3, %o4 + mov HV_FAST_RNG_CTL_WRITE, %o5 + ta HV_FAST_TRAP + retl + stx %o1, [%o4] +ENDPROC(sun4v_rng_ctl_write_v1) + +ENTRY(sun4v_rng_ctl_write_v2) + mov HV_FAST_RNG_CTL_WRITE, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_rng_ctl_write_v2) + +ENTRY(sun4v_rng_data_read_diag_v1) + mov %o2, %o4 + mov HV_FAST_RNG_DATA_READ_DIAG, %o5 + ta HV_FAST_TRAP + retl + stx %o1, [%o4] +ENDPROC(sun4v_rng_data_read_diag_v1) + +ENTRY(sun4v_rng_data_read_diag_v2) + mov %o3, %o4 + mov HV_FAST_RNG_DATA_READ_DIAG, %o5 + ta HV_FAST_TRAP + retl + stx %o1, [%o4] +ENDPROC(sun4v_rng_data_read_diag_v2) + +ENTRY(sun4v_rng_data_read) + mov %o1, %o4 + mov HV_FAST_RNG_DATA_READ, %o5 + ta HV_FAST_TRAP + retl + stx %o1, [%o4] +ENDPROC(sun4v_rng_data_read) diff --git a/kernel/drivers/char/hw_random/n2-drv.c b/kernel/drivers/char/hw_random/n2-drv.c new file mode 100644 index 000000000..843d6f6ae --- /dev/null +++ b/kernel/drivers/char/hw_random/n2-drv.c @@ -0,0 +1,759 @@ +/* n2-drv.c: Niagara-2 RNG driver. + * + * Copyright (C) 2008, 2011 David S. Miller + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include "n2rng.h" + +#define DRV_MODULE_NAME "n2rng" +#define PFX DRV_MODULE_NAME ": " +#define DRV_MODULE_VERSION "0.2" +#define DRV_MODULE_RELDATE "July 27, 2011" + +static char version[] = + DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + +MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); +MODULE_DESCRIPTION("Niagara2 RNG driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +/* The Niagara2 RNG provides a 64-bit read-only random number + * register, plus a control register. Access to the RNG is + * virtualized through the hypervisor so that both guests and control + * nodes can access the device. + * + * The entropy source consists of raw entropy sources, each + * constructed from a voltage controlled oscillator whose phase is + * jittered by thermal noise sources. + * + * The oscillator in each of the three raw entropy sources run at + * different frequencies. Normally, all three generator outputs are + * gathered, xored together, and fed into a CRC circuit, the output of + * which is the 64-bit read-only register. + * + * Some time is necessary for all the necessary entropy to build up + * such that a full 64-bits of entropy are available in the register. + * In normal operating mode (RNG_CTL_LFSR is set), the chip implements + * an interlock which blocks register reads until sufficient entropy + * is available. + * + * A control register is provided for adjusting various aspects of RNG + * operation, and to enable diagnostic modes. Each of the three raw + * entropy sources has an enable bit (RNG_CTL_ES{1,2,3}). Also + * provided are fields for controlling the minimum time in cycles + * between read accesses to the register (RNG_CTL_WAIT, this controls + * the interlock described in the previous paragraph). + * + * The standard setting is to have the mode bit (RNG_CTL_LFSR) set, + * all three entropy sources enabled, and the interlock time set + * appropriately. + * + * The CRC polynomial used by the chip is: + * + * P(X) = x64 + x61 + x57 + x56 + x52 + x51 + x50 + x48 + x47 + x46 + + * x43 + x42 + x41 + x39 + x38 + x37 + x35 + x32 + x28 + x25 + + * x22 + x21 + x17 + x15 + x13 + x12 + x11 + x7 + x5 + x + 1 + * + * The RNG_CTL_VCO value of each noise cell must be programmed + * separately. This is why 4 control register values must be provided + * to the hypervisor. During a write, the hypervisor writes them all, + * one at a time, to the actual RNG_CTL register. The first three + * values are used to setup the desired RNG_CTL_VCO for each entropy + * source, for example: + * + * control 0: (1 << RNG_CTL_VCO_SHIFT) | RNG_CTL_ES1 + * control 1: (2 << RNG_CTL_VCO_SHIFT) | RNG_CTL_ES2 + * control 2: (3 << RNG_CTL_VCO_SHIFT) | RNG_CTL_ES3 + * + * And then the fourth value sets the final chip state and enables + * desired. + */ + +static int n2rng_hv_err_trans(unsigned long hv_err) +{ + switch (hv_err) { + case HV_EOK: + return 0; + case HV_EWOULDBLOCK: + return -EAGAIN; + case HV_ENOACCESS: + return -EPERM; + case HV_EIO: + return -EIO; + case HV_EBUSY: + return -EBUSY; + case HV_EBADALIGN: + case HV_ENORADDR: + return -EFAULT; + default: + return -EINVAL; + } +} + +static unsigned long n2rng_generic_read_control_v2(unsigned long ra, + unsigned long unit) +{ + unsigned long hv_err, state, ticks, watchdog_delta, watchdog_status; + int block = 0, busy = 0; + + while (1) { + hv_err = sun4v_rng_ctl_read_v2(ra, unit, &state, + &ticks, + &watchdog_delta, + &watchdog_status); + if (hv_err == HV_EOK) + break; + + if (hv_err == HV_EBUSY) { + if (++busy >= N2RNG_BUSY_LIMIT) + break; + + udelay(1); + } else if (hv_err == HV_EWOULDBLOCK) { + if (++block >= N2RNG_BLOCK_LIMIT) + break; + + __delay(ticks); + } else + break; + } + + return hv_err; +} + +/* In multi-socket situations, the hypervisor might need to + * queue up the RNG control register write if it's for a unit + * that is on a cpu socket other than the one we are executing on. + * + * We poll here waiting for a successful read of that control + * register to make sure the write has been actually performed. + */ +static unsigned long n2rng_control_settle_v2(struct n2rng *np, int unit) +{ + unsigned long ra = __pa(&np->scratch_control[0]); + + return n2rng_generic_read_control_v2(ra, unit); +} + +static unsigned long n2rng_write_ctl_one(struct n2rng *np, int unit, + unsigned long state, + unsigned long control_ra, + unsigned long watchdog_timeout, + unsigned long *ticks) +{ + unsigned long hv_err; + + if (np->hvapi_major == 1) { + hv_err = sun4v_rng_ctl_write_v1(control_ra, state, + watchdog_timeout, ticks); + } else { + hv_err = sun4v_rng_ctl_write_v2(control_ra, state, + watchdog_timeout, unit); + if (hv_err == HV_EOK) + hv_err = n2rng_control_settle_v2(np, unit); + *ticks = N2RNG_ACCUM_CYCLES_DEFAULT; + } + + return hv_err; +} + +static int n2rng_generic_read_data(unsigned long data_ra) +{ + unsigned long ticks, hv_err; + int block = 0, hcheck = 0; + + while (1) { + hv_err = sun4v_rng_data_read(data_ra, &ticks); + if (hv_err == HV_EOK) + return 0; + + if (hv_err == HV_EWOULDBLOCK) { + if (++block >= N2RNG_BLOCK_LIMIT) + return -EWOULDBLOCK; + __delay(ticks); + } else if (hv_err == HV_ENOACCESS) { + return -EPERM; + } else if (hv_err == HV_EIO) { + if (++hcheck >= N2RNG_HCHECK_LIMIT) + return -EIO; + udelay(10000); + } else + return -ENODEV; + } +} + +static unsigned long n2rng_read_diag_data_one(struct n2rng *np, + unsigned long unit, + unsigned long data_ra, + unsigned long data_len, + unsigned long *ticks) +{ + unsigned long hv_err; + + if (np->hvapi_major == 1) { + hv_err = sun4v_rng_data_read_diag_v1(data_ra, data_len, ticks); + } else { + hv_err = sun4v_rng_data_read_diag_v2(data_ra, data_len, + unit, ticks); + if (!*ticks) + *ticks = N2RNG_ACCUM_CYCLES_DEFAULT; + } + return hv_err; +} + +static int n2rng_generic_read_diag_data(struct n2rng *np, + unsigned long unit, + unsigned long data_ra, + unsigned long data_len) +{ + unsigned long ticks, hv_err; + int block = 0; + + while (1) { + hv_err = n2rng_read_diag_data_one(np, unit, + data_ra, data_len, + &ticks); + if (hv_err == HV_EOK) + return 0; + + if (hv_err == HV_EWOULDBLOCK) { + if (++block >= N2RNG_BLOCK_LIMIT) + return -EWOULDBLOCK; + __delay(ticks); + } else if (hv_err == HV_ENOACCESS) { + return -EPERM; + } else if (hv_err == HV_EIO) { + return -EIO; + } else + return -ENODEV; + } +} + + +static int n2rng_generic_write_control(struct n2rng *np, + unsigned long control_ra, + unsigned long unit, + unsigned long state) +{ + unsigned long hv_err, ticks; + int block = 0, busy = 0; + + while (1) { + hv_err = n2rng_write_ctl_one(np, unit, state, control_ra, + np->wd_timeo, &ticks); + if (hv_err == HV_EOK) + return 0; + + if (hv_err == HV_EWOULDBLOCK) { + if (++block >= N2RNG_BLOCK_LIMIT) + return -EWOULDBLOCK; + __delay(ticks); + } else if (hv_err == HV_EBUSY) { + if (++busy >= N2RNG_BUSY_LIMIT) + return -EBUSY; + udelay(1); + } else + return -ENODEV; + } +} + +/* Just try to see if we can successfully access the control register + * of the RNG on the domain on which we are currently executing. + */ +static int n2rng_try_read_ctl(struct n2rng *np) +{ + unsigned long hv_err; + unsigned long x; + + if (np->hvapi_major == 1) { + hv_err = sun4v_rng_get_diag_ctl(); + } else { + /* We purposefully give invalid arguments, HV_NOACCESS + * is higher priority than the errors we'd get from + * these other cases, and that's the error we are + * truly interested in. + */ + hv_err = sun4v_rng_ctl_read_v2(0UL, ~0UL, &x, &x, &x, &x); + switch (hv_err) { + case HV_EWOULDBLOCK: + case HV_ENOACCESS: + break; + default: + hv_err = HV_EOK; + break; + } + } + + return n2rng_hv_err_trans(hv_err); +} + +#define CONTROL_DEFAULT_BASE \ + ((2 << RNG_CTL_ASEL_SHIFT) | \ + (N2RNG_ACCUM_CYCLES_DEFAULT << RNG_CTL_WAIT_SHIFT) | \ + RNG_CTL_LFSR) + +#define CONTROL_DEFAULT_0 \ + (CONTROL_DEFAULT_BASE | \ + (1 << RNG_CTL_VCO_SHIFT) | \ + RNG_CTL_ES1) +#define CONTROL_DEFAULT_1 \ + (CONTROL_DEFAULT_BASE | \ + (2 << RNG_CTL_VCO_SHIFT) | \ + RNG_CTL_ES2) +#define CONTROL_DEFAULT_2 \ + (CONTROL_DEFAULT_BASE | \ + (3 << RNG_CTL_VCO_SHIFT) | \ + RNG_CTL_ES3) +#define CONTROL_DEFAULT_3 \ + (CONTROL_DEFAULT_BASE | \ + RNG_CTL_ES1 | RNG_CTL_ES2 | RNG_CTL_ES3) + +static void n2rng_control_swstate_init(struct n2rng *np) +{ + int i; + + np->flags |= N2RNG_FLAG_CONTROL; + + np->health_check_sec = N2RNG_HEALTH_CHECK_SEC_DEFAULT; + np->accum_cycles = N2RNG_ACCUM_CYCLES_DEFAULT; + np->wd_timeo = N2RNG_WD_TIMEO_DEFAULT; + + for (i = 0; i < np->num_units; i++) { + struct n2rng_unit *up = &np->units[i]; + + up->control[0] = CONTROL_DEFAULT_0; + up->control[1] = CONTROL_DEFAULT_1; + up->control[2] = CONTROL_DEFAULT_2; + up->control[3] = CONTROL_DEFAULT_3; + } + + np->hv_state = HV_RNG_STATE_UNCONFIGURED; +} + +static int n2rng_grab_diag_control(struct n2rng *np) +{ + int i, busy_count, err = -ENODEV; + + busy_count = 0; + for (i = 0; i < 100; i++) { + err = n2rng_try_read_ctl(np); + if (err != -EAGAIN) + break; + + if (++busy_count > 100) { + dev_err(&np->op->dev, + "Grab diag control timeout.\n"); + return -ENODEV; + } + + udelay(1); + } + + return err; +} + +static int n2rng_init_control(struct n2rng *np) +{ + int err = n2rng_grab_diag_control(np); + + /* Not in the control domain, that's OK we are only a consumer + * of the RNG data, we don't setup and program it. + */ + if (err == -EPERM) + return 0; + if (err) + return err; + + n2rng_control_swstate_init(np); + + return 0; +} + +static int n2rng_data_read(struct hwrng *rng, u32 *data) +{ + struct n2rng *np = (struct n2rng *) rng->priv; + unsigned long ra = __pa(&np->test_data); + int len; + + if (!(np->flags & N2RNG_FLAG_READY)) { + len = 0; + } else if (np->flags & N2RNG_FLAG_BUFFER_VALID) { + np->flags &= ~N2RNG_FLAG_BUFFER_VALID; + *data = np->buffer; + len = 4; + } else { + int err = n2rng_generic_read_data(ra); + if (!err) { + np->buffer = np->test_data >> 32; + *data = np->test_data & 0xffffffff; + len = 4; + } else { + dev_err(&np->op->dev, "RNG error, restesting\n"); + np->flags &= ~N2RNG_FLAG_READY; + if (!(np->flags & N2RNG_FLAG_SHUTDOWN)) + schedule_delayed_work(&np->work, 0); + len = 0; + } + } + + return len; +} + +/* On a guest node, just make sure we can read random data properly. + * If a control node reboots or reloads it's n2rng driver, this won't + * work during that time. So we have to keep probing until the device + * becomes usable. + */ +static int n2rng_guest_check(struct n2rng *np) +{ + unsigned long ra = __pa(&np->test_data); + + return n2rng_generic_read_data(ra); +} + +static int n2rng_entropy_diag_read(struct n2rng *np, unsigned long unit, + u64 *pre_control, u64 pre_state, + u64 *buffer, unsigned long buf_len, + u64 *post_control, u64 post_state) +{ + unsigned long post_ctl_ra = __pa(post_control); + unsigned long pre_ctl_ra = __pa(pre_control); + unsigned long buffer_ra = __pa(buffer); + int err; + + err = n2rng_generic_write_control(np, pre_ctl_ra, unit, pre_state); + if (err) + return err; + + err = n2rng_generic_read_diag_data(np, unit, + buffer_ra, buf_len); + + (void) n2rng_generic_write_control(np, post_ctl_ra, unit, + post_state); + + return err; +} + +static u64 advance_polynomial(u64 poly, u64 val, int count) +{ + int i; + + for (i = 0; i < count; i++) { + int highbit_set = ((s64)val < 0); + + val <<= 1; + if (highbit_set) + val ^= poly; + } + + return val; +} + +static int n2rng_test_buffer_find(struct n2rng *np, u64 val) +{ + int i, count = 0; + + /* Purposefully skip over the first word. */ + for (i = 1; i < SELFTEST_BUFFER_WORDS; i++) { + if (np->test_buffer[i] == val) + count++; + } + return count; +} + +static void n2rng_dump_test_buffer(struct n2rng *np) +{ + int i; + + for (i = 0; i < SELFTEST_BUFFER_WORDS; i++) + dev_err(&np->op->dev, "Test buffer slot %d [0x%016llx]\n", + i, np->test_buffer[i]); +} + +static int n2rng_check_selftest_buffer(struct n2rng *np, unsigned long unit) +{ + u64 val = SELFTEST_VAL; + int err, matches, limit; + + matches = 0; + for (limit = 0; limit < SELFTEST_LOOPS_MAX; limit++) { + matches += n2rng_test_buffer_find(np, val); + if (matches >= SELFTEST_MATCH_GOAL) + break; + val = advance_polynomial(SELFTEST_POLY, val, 1); + } + + err = 0; + if (limit >= SELFTEST_LOOPS_MAX) { + err = -ENODEV; + dev_err(&np->op->dev, "Selftest failed on unit %lu\n", unit); + n2rng_dump_test_buffer(np); + } else + dev_info(&np->op->dev, "Selftest passed on unit %lu\n", unit); + + return err; +} + +static int n2rng_control_selftest(struct n2rng *np, unsigned long unit) +{ + int err; + + np->test_control[0] = (0x2 << RNG_CTL_ASEL_SHIFT); + np->test_control[1] = (0x2 << RNG_CTL_ASEL_SHIFT); + np->test_control[2] = (0x2 << RNG_CTL_ASEL_SHIFT); + np->test_control[3] = ((0x2 << RNG_CTL_ASEL_SHIFT) | + RNG_CTL_LFSR | + ((SELFTEST_TICKS - 2) << RNG_CTL_WAIT_SHIFT)); + + + err = n2rng_entropy_diag_read(np, unit, np->test_control, + HV_RNG_STATE_HEALTHCHECK, + np->test_buffer, + sizeof(np->test_buffer), + &np->units[unit].control[0], + np->hv_state); + if (err) + return err; + + return n2rng_check_selftest_buffer(np, unit); +} + +static int n2rng_control_check(struct n2rng *np) +{ + int i; + + for (i = 0; i < np->num_units; i++) { + int err = n2rng_control_selftest(np, i); + if (err) + return err; + } + return 0; +} + +/* The sanity checks passed, install the final configuration into the + * chip, it's ready to use. + */ +static int n2rng_control_configure_units(struct n2rng *np) +{ + int unit, err; + + err = 0; + for (unit = 0; unit < np->num_units; unit++) { + struct n2rng_unit *up = &np->units[unit]; + unsigned long ctl_ra = __pa(&up->control[0]); + int esrc; + u64 base; + + base = ((np->accum_cycles << RNG_CTL_WAIT_SHIFT) | + (2 << RNG_CTL_ASEL_SHIFT) | + RNG_CTL_LFSR); + + /* XXX This isn't the best. We should fetch a bunch + * XXX of words using each entropy source combined XXX + * with each VCO setting, and see which combinations + * XXX give the best random data. + */ + for (esrc = 0; esrc < 3; esrc++) + up->control[esrc] = base | + (esrc << RNG_CTL_VCO_SHIFT) | + (RNG_CTL_ES1 << esrc); + + up->control[3] = base | + (RNG_CTL_ES1 | RNG_CTL_ES2 | RNG_CTL_ES3); + + err = n2rng_generic_write_control(np, ctl_ra, unit, + HV_RNG_STATE_CONFIGURED); + if (err) + break; + } + + return err; +} + +static void n2rng_work(struct work_struct *work) +{ + struct n2rng *np = container_of(work, struct n2rng, work.work); + int err = 0; + + if (!(np->flags & N2RNG_FLAG_CONTROL)) { + err = n2rng_guest_check(np); + } else { + preempt_disable(); + err = n2rng_control_check(np); + preempt_enable(); + + if (!err) + err = n2rng_control_configure_units(np); + } + + if (!err) { + np->flags |= N2RNG_FLAG_READY; + dev_info(&np->op->dev, "RNG ready\n"); + } + + if (err && !(np->flags & N2RNG_FLAG_SHUTDOWN)) + schedule_delayed_work(&np->work, HZ * 2); +} + +static void n2rng_driver_version(void) +{ + static int n2rng_version_printed; + + if (n2rng_version_printed++ == 0) + pr_info("%s", version); +} + +static const struct of_device_id n2rng_match[]; +static int n2rng_probe(struct platform_device *op) +{ + const struct of_device_id *match; + int multi_capable; + int err = -ENOMEM; + struct n2rng *np; + + match = of_match_device(n2rng_match, &op->dev); + if (!match) + return -EINVAL; + multi_capable = (match->data != NULL); + + n2rng_driver_version(); + np = devm_kzalloc(&op->dev, sizeof(*np), GFP_KERNEL); + if (!np) + goto out; + np->op = op; + + INIT_DELAYED_WORK(&np->work, n2rng_work); + + if (multi_capable) + np->flags |= N2RNG_FLAG_MULTI; + + err = -ENODEV; + np->hvapi_major = 2; + if (sun4v_hvapi_register(HV_GRP_RNG, + np->hvapi_major, + &np->hvapi_minor)) { + np->hvapi_major = 1; + if (sun4v_hvapi_register(HV_GRP_RNG, + np->hvapi_major, + &np->hvapi_minor)) { + dev_err(&op->dev, "Cannot register suitable " + "HVAPI version.\n"); + goto out; + } + } + + if (np->flags & N2RNG_FLAG_MULTI) { + if (np->hvapi_major < 2) { + dev_err(&op->dev, "multi-unit-capable RNG requires " + "HVAPI major version 2 or later, got %lu\n", + np->hvapi_major); + goto out_hvapi_unregister; + } + np->num_units = of_getintprop_default(op->dev.of_node, + "rng-#units", 0); + if (!np->num_units) { + dev_err(&op->dev, "VF RNG lacks rng-#units property\n"); + goto out_hvapi_unregister; + } + } else + np->num_units = 1; + + dev_info(&op->dev, "Registered RNG HVAPI major %lu minor %lu\n", + np->hvapi_major, np->hvapi_minor); + + np->units = devm_kzalloc(&op->dev, + sizeof(struct n2rng_unit) * np->num_units, + GFP_KERNEL); + err = -ENOMEM; + if (!np->units) + goto out_hvapi_unregister; + + err = n2rng_init_control(np); + if (err) + goto out_hvapi_unregister; + + dev_info(&op->dev, "Found %s RNG, units: %d\n", + ((np->flags & N2RNG_FLAG_MULTI) ? + "multi-unit-capable" : "single-unit"), + np->num_units); + + np->hwrng.name = "n2rng"; + np->hwrng.data_read = n2rng_data_read; + np->hwrng.priv = (unsigned long) np; + + err = hwrng_register(&np->hwrng); + if (err) + goto out_hvapi_unregister; + + platform_set_drvdata(op, np); + + schedule_delayed_work(&np->work, 0); + + return 0; + +out_hvapi_unregister: + sun4v_hvapi_unregister(HV_GRP_RNG); + +out: + return err; +} + +static int n2rng_remove(struct platform_device *op) +{ + struct n2rng *np = platform_get_drvdata(op); + + np->flags |= N2RNG_FLAG_SHUTDOWN; + + cancel_delayed_work_sync(&np->work); + + hwrng_unregister(&np->hwrng); + + sun4v_hvapi_unregister(HV_GRP_RNG); + + return 0; +} + +static const struct of_device_id n2rng_match[] = { + { + .name = "random-number-generator", + .compatible = "SUNW,n2-rng", + }, + { + .name = "random-number-generator", + .compatible = "SUNW,vf-rng", + .data = (void *) 1, + }, + { + .name = "random-number-generator", + .compatible = "SUNW,kt-rng", + .data = (void *) 1, + }, + {}, +}; +MODULE_DEVICE_TABLE(of, n2rng_match); + +static struct platform_driver n2rng_driver = { + .driver = { + .name = "n2rng", + .of_match_table = n2rng_match, + }, + .probe = n2rng_probe, + .remove = n2rng_remove, +}; + +module_platform_driver(n2rng_driver); diff --git a/kernel/drivers/char/hw_random/n2rng.h b/kernel/drivers/char/hw_random/n2rng.h new file mode 100644 index 000000000..f244ac890 --- /dev/null +++ b/kernel/drivers/char/hw_random/n2rng.h @@ -0,0 +1,118 @@ +/* n2rng.h: Niagara2 RNG defines. + * + * Copyright (C) 2008 David S. Miller + */ + +#ifndef _N2RNG_H +#define _N2RNG_H + +#define RNG_CTL_WAIT 0x0000000001fffe00ULL /* Minimum wait time */ +#define RNG_CTL_WAIT_SHIFT 9 +#define RNG_CTL_BYPASS 0x0000000000000100ULL /* VCO voltage source */ +#define RNG_CTL_VCO 0x00000000000000c0ULL /* VCO rate control */ +#define RNG_CTL_VCO_SHIFT 6 +#define RNG_CTL_ASEL 0x0000000000000030ULL /* Analog MUX select */ +#define RNG_CTL_ASEL_SHIFT 4 +#define RNG_CTL_LFSR 0x0000000000000008ULL /* Use LFSR or plain shift */ +#define RNG_CTL_ES3 0x0000000000000004ULL /* Enable entropy source 3 */ +#define RNG_CTL_ES2 0x0000000000000002ULL /* Enable entropy source 2 */ +#define RNG_CTL_ES1 0x0000000000000001ULL /* Enable entropy source 1 */ + +#define HV_FAST_RNG_GET_DIAG_CTL 0x130 +#define HV_FAST_RNG_CTL_READ 0x131 +#define HV_FAST_RNG_CTL_WRITE 0x132 +#define HV_FAST_RNG_DATA_READ_DIAG 0x133 +#define HV_FAST_RNG_DATA_READ 0x134 + +#define HV_RNG_STATE_UNCONFIGURED 0 +#define HV_RNG_STATE_CONFIGURED 1 +#define HV_RNG_STATE_HEALTHCHECK 2 +#define HV_RNG_STATE_ERROR 3 + +#define HV_RNG_NUM_CONTROL 4 + +#ifndef __ASSEMBLY__ +extern unsigned long sun4v_rng_get_diag_ctl(void); +extern unsigned long sun4v_rng_ctl_read_v1(unsigned long ctl_regs_ra, + unsigned long *state, + unsigned long *tick_delta); +extern unsigned long sun4v_rng_ctl_read_v2(unsigned long ctl_regs_ra, + unsigned long unit, + unsigned long *state, + unsigned long *tick_delta, + unsigned long *watchdog, + unsigned long *write_status); +extern unsigned long sun4v_rng_ctl_write_v1(unsigned long ctl_regs_ra, + unsigned long state, + unsigned long write_timeout, + unsigned long *tick_delta); +extern unsigned long sun4v_rng_ctl_write_v2(unsigned long ctl_regs_ra, + unsigned long state, + unsigned long write_timeout, + unsigned long unit); +extern unsigned long sun4v_rng_data_read_diag_v1(unsigned long data_ra, + unsigned long len, + unsigned long *tick_delta); +extern unsigned long sun4v_rng_data_read_diag_v2(unsigned long data_ra, + unsigned long len, + unsigned long unit, + unsigned long *tick_delta); +extern unsigned long sun4v_rng_data_read(unsigned long data_ra, + unsigned long *tick_delta); + +struct n2rng_unit { + u64 control[HV_RNG_NUM_CONTROL]; +}; + +struct n2rng { + struct platform_device *op; + + unsigned long flags; +#define N2RNG_FLAG_MULTI 0x00000001 /* Multi-unit capable RNG */ +#define N2RNG_FLAG_CONTROL 0x00000002 /* Operating in control domain */ +#define N2RNG_FLAG_READY 0x00000008 /* Ready for hw-rng layer */ +#define N2RNG_FLAG_SHUTDOWN 0x00000010 /* Driver unregistering */ +#define N2RNG_FLAG_BUFFER_VALID 0x00000020 /* u32 buffer holds valid data */ + + int num_units; + struct n2rng_unit *units; + + struct hwrng hwrng; + u32 buffer; + + /* Registered hypervisor group API major and minor version. */ + unsigned long hvapi_major; + unsigned long hvapi_minor; + + struct delayed_work work; + + unsigned long hv_state; /* HV_RNG_STATE_foo */ + + unsigned long health_check_sec; + unsigned long accum_cycles; + unsigned long wd_timeo; +#define N2RNG_HEALTH_CHECK_SEC_DEFAULT 0 +#define N2RNG_ACCUM_CYCLES_DEFAULT 2048 +#define N2RNG_WD_TIMEO_DEFAULT 0 + + u64 scratch_control[HV_RNG_NUM_CONTROL]; + +#define SELFTEST_TICKS 38859 +#define SELFTEST_VAL ((u64)0xB8820C7BD387E32C) +#define SELFTEST_POLY ((u64)0x231DCEE91262B8A3) +#define SELFTEST_MATCH_GOAL 6 +#define SELFTEST_LOOPS_MAX 40000 +#define SELFTEST_BUFFER_WORDS 8 + + u64 test_data; + u64 test_control[HV_RNG_NUM_CONTROL]; + u64 test_buffer[SELFTEST_BUFFER_WORDS]; +}; + +#define N2RNG_BLOCK_LIMIT 60000 +#define N2RNG_BUSY_LIMIT 100 +#define N2RNG_HCHECK_LIMIT 100 + +#endif /* !(__ASSEMBLY__) */ + +#endif /* _N2RNG_H */ diff --git a/kernel/drivers/char/hw_random/nomadik-rng.c b/kernel/drivers/char/hw_random/nomadik-rng.c new file mode 100644 index 000000000..9c8581577 --- /dev/null +++ b/kernel/drivers/char/hw_random/nomadik-rng.c @@ -0,0 +1,106 @@ +/* + * Nomadik RNG support + * Copyright 2009 Alessandro Rubini + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +static struct clk *rng_clk; + +static int nmk_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) +{ + void __iomem *base = (void __iomem *)rng->priv; + + /* + * The register is 32 bits and gives 16 random bits (low half). + * A subsequent read will delay the core for 400ns, so we just read + * once and accept the very unlikely very small delay, even if wait==0. + */ + *(u16 *)data = __raw_readl(base + 8) & 0xffff; + return 2; +} + +/* we have at most one RNG per machine, granted */ +static struct hwrng nmk_rng = { + .name = "nomadik", + .read = nmk_rng_read, +}; + +static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id) +{ + void __iomem *base; + int ret; + + rng_clk = devm_clk_get(&dev->dev, NULL); + if (IS_ERR(rng_clk)) { + dev_err(&dev->dev, "could not get rng clock\n"); + ret = PTR_ERR(rng_clk); + return ret; + } + + clk_prepare_enable(rng_clk); + + ret = amba_request_regions(dev, dev->dev.init_name); + if (ret) + goto out_clk; + ret = -ENOMEM; + base = devm_ioremap(&dev->dev, dev->res.start, + resource_size(&dev->res)); + if (!base) + goto out_release; + nmk_rng.priv = (unsigned long)base; + ret = hwrng_register(&nmk_rng); + if (ret) + goto out_release; + return 0; + +out_release: + amba_release_regions(dev); +out_clk: + clk_disable(rng_clk); + return ret; +} + +static int nmk_rng_remove(struct amba_device *dev) +{ + hwrng_unregister(&nmk_rng); + amba_release_regions(dev); + clk_disable(rng_clk); + return 0; +} + +static struct amba_id nmk_rng_ids[] = { + { + .id = 0x000805e1, + .mask = 0x000fffff, /* top bits are rev and cfg: accept all */ + }, + {0, 0}, +}; + +MODULE_DEVICE_TABLE(amba, nmk_rng_ids); + +static struct amba_driver nmk_rng_driver = { + .drv = { + .owner = THIS_MODULE, + .name = "rng", + }, + .probe = nmk_rng_probe, + .remove = nmk_rng_remove, + .id_table = nmk_rng_ids, +}; + +module_amba_driver(nmk_rng_driver); + +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/hw_random/octeon-rng.c b/kernel/drivers/char/hw_random/octeon-rng.c new file mode 100644 index 000000000..6234a4a19 --- /dev/null +++ b/kernel/drivers/char/hw_random/octeon-rng.c @@ -0,0 +1,128 @@ +/* + * Hardware Random Number Generator support for Cavium Networks + * Octeon processor family. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2009 Cavium Networks + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +struct octeon_rng { + struct hwrng ops; + void __iomem *control_status; + void __iomem *result; +}; + +static int octeon_rng_init(struct hwrng *rng) +{ + union cvmx_rnm_ctl_status ctl; + struct octeon_rng *p = container_of(rng, struct octeon_rng, ops); + + ctl.u64 = 0; + ctl.s.ent_en = 1; /* Enable the entropy source. */ + ctl.s.rng_en = 1; /* Enable the RNG hardware. */ + cvmx_write_csr((u64)p->control_status, ctl.u64); + return 0; +} + +static void octeon_rng_cleanup(struct hwrng *rng) +{ + union cvmx_rnm_ctl_status ctl; + struct octeon_rng *p = container_of(rng, struct octeon_rng, ops); + + ctl.u64 = 0; + /* Disable everything. */ + cvmx_write_csr((u64)p->control_status, ctl.u64); +} + +static int octeon_rng_data_read(struct hwrng *rng, u32 *data) +{ + struct octeon_rng *p = container_of(rng, struct octeon_rng, ops); + + *data = cvmx_read64_uint32((u64)p->result); + return sizeof(u32); +} + +static int octeon_rng_probe(struct platform_device *pdev) +{ + struct resource *res_ports; + struct resource *res_result; + struct octeon_rng *rng; + int ret; + struct hwrng ops = { + .name = "octeon", + .init = octeon_rng_init, + .cleanup = octeon_rng_cleanup, + .data_read = octeon_rng_data_read + }; + + rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL); + if (!rng) + return -ENOMEM; + + res_ports = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res_ports) + return -ENOENT; + + res_result = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res_result) + return -ENOENT; + + + rng->control_status = devm_ioremap_nocache(&pdev->dev, + res_ports->start, + sizeof(u64)); + if (!rng->control_status) + return -ENOENT; + + rng->result = devm_ioremap_nocache(&pdev->dev, + res_result->start, + sizeof(u64)); + if (!rng->result) + return -ENOENT; + + rng->ops = ops; + + platform_set_drvdata(pdev, &rng->ops); + ret = hwrng_register(&rng->ops); + if (ret) + return -ENOENT; + + dev_info(&pdev->dev, "Octeon Random Number Generator\n"); + + return 0; +} + +static int octeon_rng_remove(struct platform_device *pdev) +{ + struct hwrng *rng = platform_get_drvdata(pdev); + + hwrng_unregister(rng); + + return 0; +} + +static struct platform_driver octeon_rng_driver = { + .driver = { + .name = "octeon_rng", + }, + .probe = octeon_rng_probe, + .remove = octeon_rng_remove, +}; + +module_platform_driver(octeon_rng_driver); + +MODULE_AUTHOR("David Daney"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/hw_random/omap-rng.c b/kernel/drivers/char/hw_random/omap-rng.c new file mode 100644 index 000000000..8a1432e8b --- /dev/null +++ b/kernel/drivers/char/hw_random/omap-rng.c @@ -0,0 +1,460 @@ +/* + * omap-rng.c - RNG driver for TI OMAP CPU family + * + * Author: Deepak Saxena + * + * Copyright 2005 (c) MontaVista Software, Inc. + * + * Mostly based on original driver: + * + * Copyright (C) 2005 Nokia Corporation + * Author: Juha Yrjölä + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define RNG_REG_STATUS_RDY (1 << 0) + +#define RNG_REG_INTACK_RDY_MASK (1 << 0) +#define RNG_REG_INTACK_SHUTDOWN_OFLO_MASK (1 << 1) +#define RNG_SHUTDOWN_OFLO_MASK (1 << 1) + +#define RNG_CONTROL_STARTUP_CYCLES_SHIFT 16 +#define RNG_CONTROL_STARTUP_CYCLES_MASK (0xffff << 16) +#define RNG_CONTROL_ENABLE_TRNG_SHIFT 10 +#define RNG_CONTROL_ENABLE_TRNG_MASK (1 << 10) + +#define RNG_CONFIG_MAX_REFIL_CYCLES_SHIFT 16 +#define RNG_CONFIG_MAX_REFIL_CYCLES_MASK (0xffff << 16) +#define RNG_CONFIG_MIN_REFIL_CYCLES_SHIFT 0 +#define RNG_CONFIG_MIN_REFIL_CYCLES_MASK (0xff << 0) + +#define RNG_CONTROL_STARTUP_CYCLES 0xff +#define RNG_CONFIG_MIN_REFIL_CYCLES 0x21 +#define RNG_CONFIG_MAX_REFIL_CYCLES 0x22 + +#define RNG_ALARMCNT_ALARM_TH_SHIFT 0x0 +#define RNG_ALARMCNT_ALARM_TH_MASK (0xff << 0) +#define RNG_ALARMCNT_SHUTDOWN_TH_SHIFT 16 +#define RNG_ALARMCNT_SHUTDOWN_TH_MASK (0x1f << 16) +#define RNG_ALARM_THRESHOLD 0xff +#define RNG_SHUTDOWN_THRESHOLD 0x4 + +#define RNG_REG_FROENABLE_MASK 0xffffff +#define RNG_REG_FRODETUNE_MASK 0xffffff + +#define OMAP2_RNG_OUTPUT_SIZE 0x4 +#define OMAP4_RNG_OUTPUT_SIZE 0x8 + +enum { + RNG_OUTPUT_L_REG = 0, + RNG_OUTPUT_H_REG, + RNG_STATUS_REG, + RNG_INTMASK_REG, + RNG_INTACK_REG, + RNG_CONTROL_REG, + RNG_CONFIG_REG, + RNG_ALARMCNT_REG, + RNG_FROENABLE_REG, + RNG_FRODETUNE_REG, + RNG_ALARMMASK_REG, + RNG_ALARMSTOP_REG, + RNG_REV_REG, + RNG_SYSCONFIG_REG, +}; + +static const u16 reg_map_omap2[] = { + [RNG_OUTPUT_L_REG] = 0x0, + [RNG_STATUS_REG] = 0x4, + [RNG_CONFIG_REG] = 0x28, + [RNG_REV_REG] = 0x3c, + [RNG_SYSCONFIG_REG] = 0x40, +}; + +static const u16 reg_map_omap4[] = { + [RNG_OUTPUT_L_REG] = 0x0, + [RNG_OUTPUT_H_REG] = 0x4, + [RNG_STATUS_REG] = 0x8, + [RNG_INTMASK_REG] = 0xc, + [RNG_INTACK_REG] = 0x10, + [RNG_CONTROL_REG] = 0x14, + [RNG_CONFIG_REG] = 0x18, + [RNG_ALARMCNT_REG] = 0x1c, + [RNG_FROENABLE_REG] = 0x20, + [RNG_FRODETUNE_REG] = 0x24, + [RNG_ALARMMASK_REG] = 0x28, + [RNG_ALARMSTOP_REG] = 0x2c, + [RNG_REV_REG] = 0x1FE0, + [RNG_SYSCONFIG_REG] = 0x1FE4, +}; + +struct omap_rng_dev; +/** + * struct omap_rng_pdata - RNG IP block-specific data + * @regs: Pointer to the register offsets structure. + * @data_size: No. of bytes in RNG output. + * @data_present: Callback to determine if data is available. + * @init: Callback for IP specific initialization sequence. + * @cleanup: Callback for IP specific cleanup sequence. + */ +struct omap_rng_pdata { + u16 *regs; + u32 data_size; + u32 (*data_present)(struct omap_rng_dev *priv); + int (*init)(struct omap_rng_dev *priv); + void (*cleanup)(struct omap_rng_dev *priv); +}; + +struct omap_rng_dev { + void __iomem *base; + struct device *dev; + const struct omap_rng_pdata *pdata; +}; + +static inline u32 omap_rng_read(struct omap_rng_dev *priv, u16 reg) +{ + return __raw_readl(priv->base + priv->pdata->regs[reg]); +} + +static inline void omap_rng_write(struct omap_rng_dev *priv, u16 reg, + u32 val) +{ + __raw_writel(val, priv->base + priv->pdata->regs[reg]); +} + +static int omap_rng_data_present(struct hwrng *rng, int wait) +{ + struct omap_rng_dev *priv; + int data, i; + + priv = (struct omap_rng_dev *)rng->priv; + + for (i = 0; i < 20; i++) { + data = priv->pdata->data_present(priv); + if (data || !wait) + break; + /* RNG produces data fast enough (2+ MBit/sec, even + * during "rngtest" loads, that these delays don't + * seem to trigger. We *could* use the RNG IRQ, but + * that'd be higher overhead ... so why bother? + */ + udelay(10); + } + return data; +} + +static int omap_rng_data_read(struct hwrng *rng, u32 *data) +{ + struct omap_rng_dev *priv; + u32 data_size, i; + + priv = (struct omap_rng_dev *)rng->priv; + data_size = priv->pdata->data_size; + + for (i = 0; i < data_size / sizeof(u32); i++) + data[i] = omap_rng_read(priv, RNG_OUTPUT_L_REG + i); + + if (priv->pdata->regs[RNG_INTACK_REG]) + omap_rng_write(priv, RNG_INTACK_REG, RNG_REG_INTACK_RDY_MASK); + return data_size; +} + +static int omap_rng_init(struct hwrng *rng) +{ + struct omap_rng_dev *priv; + + priv = (struct omap_rng_dev *)rng->priv; + return priv->pdata->init(priv); +} + +static void omap_rng_cleanup(struct hwrng *rng) +{ + struct omap_rng_dev *priv; + + priv = (struct omap_rng_dev *)rng->priv; + priv->pdata->cleanup(priv); +} + +static struct hwrng omap_rng_ops = { + .name = "omap", + .data_present = omap_rng_data_present, + .data_read = omap_rng_data_read, + .init = omap_rng_init, + .cleanup = omap_rng_cleanup, +}; + +static inline u32 omap2_rng_data_present(struct omap_rng_dev *priv) +{ + return omap_rng_read(priv, RNG_STATUS_REG) ? 0 : 1; +} + +static int omap2_rng_init(struct omap_rng_dev *priv) +{ + omap_rng_write(priv, RNG_SYSCONFIG_REG, 0x1); + return 0; +} + +static void omap2_rng_cleanup(struct omap_rng_dev *priv) +{ + omap_rng_write(priv, RNG_SYSCONFIG_REG, 0x0); +} + +static struct omap_rng_pdata omap2_rng_pdata = { + .regs = (u16 *)reg_map_omap2, + .data_size = OMAP2_RNG_OUTPUT_SIZE, + .data_present = omap2_rng_data_present, + .init = omap2_rng_init, + .cleanup = omap2_rng_cleanup, +}; + +#if defined(CONFIG_OF) +static inline u32 omap4_rng_data_present(struct omap_rng_dev *priv) +{ + return omap_rng_read(priv, RNG_STATUS_REG) & RNG_REG_STATUS_RDY; +} + +static int omap4_rng_init(struct omap_rng_dev *priv) +{ + u32 val; + + /* Return if RNG is already running. */ + if (omap_rng_read(priv, RNG_CONTROL_REG) & RNG_CONTROL_ENABLE_TRNG_MASK) + return 0; + + val = RNG_CONFIG_MIN_REFIL_CYCLES << RNG_CONFIG_MIN_REFIL_CYCLES_SHIFT; + val |= RNG_CONFIG_MAX_REFIL_CYCLES << RNG_CONFIG_MAX_REFIL_CYCLES_SHIFT; + omap_rng_write(priv, RNG_CONFIG_REG, val); + + omap_rng_write(priv, RNG_FRODETUNE_REG, 0x0); + omap_rng_write(priv, RNG_FROENABLE_REG, RNG_REG_FROENABLE_MASK); + val = RNG_ALARM_THRESHOLD << RNG_ALARMCNT_ALARM_TH_SHIFT; + val |= RNG_SHUTDOWN_THRESHOLD << RNG_ALARMCNT_SHUTDOWN_TH_SHIFT; + omap_rng_write(priv, RNG_ALARMCNT_REG, val); + + val = RNG_CONTROL_STARTUP_CYCLES << RNG_CONTROL_STARTUP_CYCLES_SHIFT; + val |= RNG_CONTROL_ENABLE_TRNG_MASK; + omap_rng_write(priv, RNG_CONTROL_REG, val); + + return 0; +} + +static void omap4_rng_cleanup(struct omap_rng_dev *priv) +{ + int val; + + val = omap_rng_read(priv, RNG_CONTROL_REG); + val &= ~RNG_CONTROL_ENABLE_TRNG_MASK; + omap_rng_write(priv, RNG_CONTROL_REG, val); +} + +static irqreturn_t omap4_rng_irq(int irq, void *dev_id) +{ + struct omap_rng_dev *priv = dev_id; + u32 fro_detune, fro_enable; + + /* + * Interrupt raised by a fro shutdown threshold, do the following: + * 1. Clear the alarm events. + * 2. De tune the FROs which are shutdown. + * 3. Re enable the shutdown FROs. + */ + omap_rng_write(priv, RNG_ALARMMASK_REG, 0x0); + omap_rng_write(priv, RNG_ALARMSTOP_REG, 0x0); + + fro_enable = omap_rng_read(priv, RNG_FROENABLE_REG); + fro_detune = ~fro_enable & RNG_REG_FRODETUNE_MASK; + fro_detune = fro_detune | omap_rng_read(priv, RNG_FRODETUNE_REG); + fro_enable = RNG_REG_FROENABLE_MASK; + + omap_rng_write(priv, RNG_FRODETUNE_REG, fro_detune); + omap_rng_write(priv, RNG_FROENABLE_REG, fro_enable); + + omap_rng_write(priv, RNG_INTACK_REG, RNG_REG_INTACK_SHUTDOWN_OFLO_MASK); + + return IRQ_HANDLED; +} + +static struct omap_rng_pdata omap4_rng_pdata = { + .regs = (u16 *)reg_map_omap4, + .data_size = OMAP4_RNG_OUTPUT_SIZE, + .data_present = omap4_rng_data_present, + .init = omap4_rng_init, + .cleanup = omap4_rng_cleanup, +}; + +static const struct of_device_id omap_rng_of_match[] = { + { + .compatible = "ti,omap2-rng", + .data = &omap2_rng_pdata, + }, + { + .compatible = "ti,omap4-rng", + .data = &omap4_rng_pdata, + }, + {}, +}; +MODULE_DEVICE_TABLE(of, omap_rng_of_match); + +static int of_get_omap_rng_device_details(struct omap_rng_dev *priv, + struct platform_device *pdev) +{ + const struct of_device_id *match; + struct device *dev = &pdev->dev; + int irq, err; + + match = of_match_device(of_match_ptr(omap_rng_of_match), dev); + if (!match) { + dev_err(dev, "no compatible OF match\n"); + return -EINVAL; + } + priv->pdata = match->data; + + if (of_device_is_compatible(dev->of_node, "ti,omap4-rng")) { + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "%s: error getting IRQ resource - %d\n", + __func__, irq); + return irq; + } + + err = devm_request_irq(dev, irq, omap4_rng_irq, + IRQF_TRIGGER_NONE, dev_name(dev), priv); + if (err) { + dev_err(dev, "unable to request irq %d, err = %d\n", + irq, err); + return err; + } + omap_rng_write(priv, RNG_INTMASK_REG, RNG_SHUTDOWN_OFLO_MASK); + } + return 0; +} +#else +static int of_get_omap_rng_device_details(struct omap_rng_dev *omap_rng, + struct platform_device *pdev) +{ + return -EINVAL; +} +#endif + +static int get_omap_rng_device_details(struct omap_rng_dev *omap_rng) +{ + /* Only OMAP2/3 can be non-DT */ + omap_rng->pdata = &omap2_rng_pdata; + return 0; +} + +static int omap_rng_probe(struct platform_device *pdev) +{ + struct omap_rng_dev *priv; + struct resource *res; + struct device *dev = &pdev->dev; + int ret; + + priv = devm_kzalloc(dev, sizeof(struct omap_rng_dev), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + omap_rng_ops.priv = (unsigned long)priv; + platform_set_drvdata(pdev, priv); + priv->dev = dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->base)) { + ret = PTR_ERR(priv->base); + goto err_ioremap; + } + + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); + + ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) : + get_omap_rng_device_details(priv); + if (ret) + goto err_ioremap; + + ret = hwrng_register(&omap_rng_ops); + if (ret) + goto err_register; + + dev_info(&pdev->dev, "OMAP Random Number Generator ver. %02x\n", + omap_rng_read(priv, RNG_REV_REG)); + + return 0; + +err_register: + priv->base = NULL; + pm_runtime_disable(&pdev->dev); +err_ioremap: + dev_err(dev, "initialization failed.\n"); + return ret; +} + +static int omap_rng_remove(struct platform_device *pdev) +{ + struct omap_rng_dev *priv = platform_get_drvdata(pdev); + + hwrng_unregister(&omap_rng_ops); + + priv->pdata->cleanup(priv); + + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return 0; +} + +static int __maybe_unused omap_rng_suspend(struct device *dev) +{ + struct omap_rng_dev *priv = dev_get_drvdata(dev); + + priv->pdata->cleanup(priv); + pm_runtime_put_sync(dev); + + return 0; +} + +static int __maybe_unused omap_rng_resume(struct device *dev) +{ + struct omap_rng_dev *priv = dev_get_drvdata(dev); + + pm_runtime_get_sync(dev); + priv->pdata->init(priv); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(omap_rng_pm, omap_rng_suspend, omap_rng_resume); + +static struct platform_driver omap_rng_driver = { + .driver = { + .name = "omap_rng", + .pm = &omap_rng_pm, + .of_match_table = of_match_ptr(omap_rng_of_match), + }, + .probe = omap_rng_probe, + .remove = omap_rng_remove, +}; + +module_platform_driver(omap_rng_driver); +MODULE_ALIAS("platform:omap_rng"); +MODULE_AUTHOR("Deepak Saxena (and others)"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/hw_random/omap3-rom-rng.c b/kernel/drivers/char/hw_random/omap3-rom-rng.c new file mode 100644 index 000000000..a405cdcd8 --- /dev/null +++ b/kernel/drivers/char/hw_random/omap3-rom-rng.c @@ -0,0 +1,139 @@ +/* + * omap3-rom-rng.c - RNG driver for TI OMAP3 CPU family + * + * Copyright (C) 2009 Nokia Corporation + * Author: Juha Yrjola + * + * Copyright (C) 2013 Pali Rohár + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +#define RNG_RESET 0x01 +#define RNG_GEN_PRNG_HW_INIT 0x02 +#define RNG_GEN_HW 0x08 + +/* param1: ptr, param2: count, param3: flag */ +static u32 (*omap3_rom_rng_call)(u32, u32, u32); + +static struct timer_list idle_timer; +static int rng_idle; +static struct clk *rng_clk; + +static void omap3_rom_rng_idle(unsigned long data) +{ + int r; + + r = omap3_rom_rng_call(0, 0, RNG_RESET); + if (r != 0) { + pr_err("reset failed: %d\n", r); + return; + } + clk_disable_unprepare(rng_clk); + rng_idle = 1; +} + +static int omap3_rom_rng_get_random(void *buf, unsigned int count) +{ + u32 r; + u32 ptr; + + del_timer_sync(&idle_timer); + if (rng_idle) { + clk_prepare_enable(rng_clk); + r = omap3_rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT); + if (r != 0) { + clk_disable_unprepare(rng_clk); + pr_err("HW init failed: %d\n", r); + return -EIO; + } + rng_idle = 0; + } + + ptr = virt_to_phys(buf); + r = omap3_rom_rng_call(ptr, count, RNG_GEN_HW); + mod_timer(&idle_timer, jiffies + msecs_to_jiffies(500)); + if (r != 0) + return -EINVAL; + return 0; +} + +static int omap3_rom_rng_data_present(struct hwrng *rng, int wait) +{ + return 1; +} + +static int omap3_rom_rng_data_read(struct hwrng *rng, u32 *data) +{ + int r; + + r = omap3_rom_rng_get_random(data, 4); + if (r < 0) + return r; + return 4; +} + +static struct hwrng omap3_rom_rng_ops = { + .name = "omap3-rom", + .data_present = omap3_rom_rng_data_present, + .data_read = omap3_rom_rng_data_read, +}; + +static int omap3_rom_rng_probe(struct platform_device *pdev) +{ + pr_info("initializing\n"); + + omap3_rom_rng_call = pdev->dev.platform_data; + if (!omap3_rom_rng_call) { + pr_err("omap3_rom_rng_call is NULL\n"); + return -EINVAL; + } + + setup_timer(&idle_timer, omap3_rom_rng_idle, 0); + rng_clk = devm_clk_get(&pdev->dev, "ick"); + if (IS_ERR(rng_clk)) { + pr_err("unable to get RNG clock\n"); + return PTR_ERR(rng_clk); + } + + /* Leave the RNG in reset state. */ + clk_prepare_enable(rng_clk); + omap3_rom_rng_idle(0); + + return hwrng_register(&omap3_rom_rng_ops); +} + +static int omap3_rom_rng_remove(struct platform_device *pdev) +{ + hwrng_unregister(&omap3_rom_rng_ops); + clk_disable_unprepare(rng_clk); + return 0; +} + +static struct platform_driver omap3_rom_rng_driver = { + .driver = { + .name = "omap3-rom-rng", + }, + .probe = omap3_rom_rng_probe, + .remove = omap3_rom_rng_remove, +}; + +module_platform_driver(omap3_rom_rng_driver); + +MODULE_ALIAS("platform:omap3-rom-rng"); +MODULE_AUTHOR("Juha Yrjola"); +MODULE_AUTHOR("Pali Rohár "); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/hw_random/pasemi-rng.c b/kernel/drivers/char/hw_random/pasemi-rng.c new file mode 100644 index 000000000..51cb1d5cc --- /dev/null +++ b/kernel/drivers/char/hw_random/pasemi-rng.c @@ -0,0 +1,155 @@ +/* + * Copyright (C) 2006-2007 PA Semi, Inc + * + * Maintained by: Olof Johansson + * + * Driver for the PWRficient onchip rng + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define SDCRNG_CTL_REG 0x00 +#define SDCRNG_CTL_FVLD_M 0x0000f000 +#define SDCRNG_CTL_FVLD_S 12 +#define SDCRNG_CTL_KSZ 0x00000800 +#define SDCRNG_CTL_RSRC_CRG 0x00000010 +#define SDCRNG_CTL_RSRC_RRG 0x00000000 +#define SDCRNG_CTL_CE 0x00000004 +#define SDCRNG_CTL_RE 0x00000002 +#define SDCRNG_CTL_DR 0x00000001 +#define SDCRNG_CTL_SELECT_RRG_RNG (SDCRNG_CTL_RE | SDCRNG_CTL_RSRC_RRG) +#define SDCRNG_CTL_SELECT_CRG_RNG (SDCRNG_CTL_CE | SDCRNG_CTL_RSRC_CRG) +#define SDCRNG_VAL_REG 0x20 + +#define MODULE_NAME "pasemi_rng" + +static int pasemi_rng_data_present(struct hwrng *rng, int wait) +{ + void __iomem *rng_regs = (void __iomem *)rng->priv; + int data, i; + + for (i = 0; i < 20; i++) { + data = (in_le32(rng_regs + SDCRNG_CTL_REG) + & SDCRNG_CTL_FVLD_M) ? 1 : 0; + if (data || !wait) + break; + udelay(10); + } + return data; +} + +static int pasemi_rng_data_read(struct hwrng *rng, u32 *data) +{ + void __iomem *rng_regs = (void __iomem *)rng->priv; + *data = in_le32(rng_regs + SDCRNG_VAL_REG); + return 4; +} + +static int pasemi_rng_init(struct hwrng *rng) +{ + void __iomem *rng_regs = (void __iomem *)rng->priv; + u32 ctl; + + ctl = SDCRNG_CTL_DR | SDCRNG_CTL_SELECT_RRG_RNG | SDCRNG_CTL_KSZ; + out_le32(rng_regs + SDCRNG_CTL_REG, ctl); + out_le32(rng_regs + SDCRNG_CTL_REG, ctl & ~SDCRNG_CTL_DR); + + return 0; +} + +static void pasemi_rng_cleanup(struct hwrng *rng) +{ + void __iomem *rng_regs = (void __iomem *)rng->priv; + u32 ctl; + + ctl = SDCRNG_CTL_RE | SDCRNG_CTL_CE; + out_le32(rng_regs + SDCRNG_CTL_REG, + in_le32(rng_regs + SDCRNG_CTL_REG) & ~ctl); +} + +static struct hwrng pasemi_rng = { + .name = MODULE_NAME, + .init = pasemi_rng_init, + .cleanup = pasemi_rng_cleanup, + .data_present = pasemi_rng_data_present, + .data_read = pasemi_rng_data_read, +}; + +static int rng_probe(struct platform_device *ofdev) +{ + void __iomem *rng_regs; + struct device_node *rng_np = ofdev->dev.of_node; + struct resource res; + int err = 0; + + err = of_address_to_resource(rng_np, 0, &res); + if (err) + return -ENODEV; + + rng_regs = ioremap(res.start, 0x100); + + if (!rng_regs) + return -ENOMEM; + + pasemi_rng.priv = (unsigned long)rng_regs; + + pr_info("Registering PA Semi RNG\n"); + + err = hwrng_register(&pasemi_rng); + + if (err) + iounmap(rng_regs); + + return err; +} + +static int rng_remove(struct platform_device *dev) +{ + void __iomem *rng_regs = (void __iomem *)pasemi_rng.priv; + + hwrng_unregister(&pasemi_rng); + iounmap(rng_regs); + + return 0; +} + +static const struct of_device_id rng_match[] = { + { .compatible = "1682m-rng", }, + { .compatible = "pasemi,pwrficient-rng", }, + { }, +}; + +static struct platform_driver rng_driver = { + .driver = { + .name = "pasemi-rng", + .of_match_table = rng_match, + }, + .probe = rng_probe, + .remove = rng_remove, +}; + +module_platform_driver(rng_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Egor Martovetsky "); +MODULE_DESCRIPTION("H/W RNG driver for PA Semi processor"); diff --git a/kernel/drivers/char/hw_random/powernv-rng.c b/kernel/drivers/char/hw_random/powernv-rng.c new file mode 100644 index 000000000..263a5bb8e --- /dev/null +++ b/kernel/drivers/char/hw_random/powernv-rng.c @@ -0,0 +1,81 @@ +/* + * Copyright 2013 Michael Ellerman, Guo Chao, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +static int powernv_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) +{ + unsigned long *buf; + int i, len; + + /* We rely on rng_buffer_size() being >= sizeof(unsigned long) */ + len = max / sizeof(unsigned long); + + buf = (unsigned long *)data; + + for (i = 0; i < len; i++) + powernv_get_random_long(buf++); + + return len * sizeof(unsigned long); +} + +static struct hwrng powernv_hwrng = { + .name = "powernv-rng", + .read = powernv_rng_read, +}; + +static int powernv_rng_remove(struct platform_device *pdev) +{ + hwrng_unregister(&powernv_hwrng); + + return 0; +} + +static int powernv_rng_probe(struct platform_device *pdev) +{ + int rc; + + rc = hwrng_register(&powernv_hwrng); + if (rc) { + /* We only register one device, ignore any others */ + if (rc == -EEXIST) + rc = -ENODEV; + + return rc; + } + + pr_info("Registered powernv hwrng.\n"); + + return 0; +} + +static const struct of_device_id powernv_rng_match[] = { + { .compatible = "ibm,power-rng",}, + {}, +}; +MODULE_DEVICE_TABLE(of, powernv_rng_match); + +static struct platform_driver powernv_rng_driver = { + .driver = { + .name = "powernv_rng", + .of_match_table = powernv_rng_match, + }, + .probe = powernv_rng_probe, + .remove = powernv_rng_remove, +}; +module_platform_driver(powernv_rng_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Bare metal HWRNG driver for POWER7+ and above"); diff --git a/kernel/drivers/char/hw_random/ppc4xx-rng.c b/kernel/drivers/char/hw_random/ppc4xx-rng.c new file mode 100644 index 000000000..b2cfda0fa --- /dev/null +++ b/kernel/drivers/char/hw_random/ppc4xx-rng.c @@ -0,0 +1,146 @@ +/* + * Generic PowerPC 44x RNG driver + * + * Copyright 2011 IBM Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2 of the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define PPC4XX_TRNG_DEV_CTRL 0x60080 + +#define PPC4XX_TRNGE 0x00020000 +#define PPC4XX_TRNG_CTRL 0x0008 +#define PPC4XX_TRNG_CTRL_DALM 0x20 +#define PPC4XX_TRNG_STAT 0x0004 +#define PPC4XX_TRNG_STAT_B 0x1 +#define PPC4XX_TRNG_DATA 0x0000 + +#define MODULE_NAME "ppc4xx_rng" + +static int ppc4xx_rng_data_present(struct hwrng *rng, int wait) +{ + void __iomem *rng_regs = (void __iomem *) rng->priv; + int busy, i, present = 0; + + for (i = 0; i < 20; i++) { + busy = (in_le32(rng_regs + PPC4XX_TRNG_STAT) & PPC4XX_TRNG_STAT_B); + if (!busy || !wait) { + present = 1; + break; + } + udelay(10); + } + return present; +} + +static int ppc4xx_rng_data_read(struct hwrng *rng, u32 *data) +{ + void __iomem *rng_regs = (void __iomem *) rng->priv; + *data = in_le32(rng_regs + PPC4XX_TRNG_DATA); + return 4; +} + +static int ppc4xx_rng_enable(int enable) +{ + struct device_node *ctrl; + void __iomem *ctrl_reg; + int err = 0; + u32 val; + + /* Find the main crypto device node and map it to turn the TRNG on */ + ctrl = of_find_compatible_node(NULL, NULL, "amcc,ppc4xx-crypto"); + if (!ctrl) + return -ENODEV; + + ctrl_reg = of_iomap(ctrl, 0); + if (!ctrl_reg) { + err = -ENODEV; + goto out; + } + + val = in_le32(ctrl_reg + PPC4XX_TRNG_DEV_CTRL); + + if (enable) + val |= PPC4XX_TRNGE; + else + val = val & ~PPC4XX_TRNGE; + + out_le32(ctrl_reg + PPC4XX_TRNG_DEV_CTRL, val); + iounmap(ctrl_reg); + +out: + of_node_put(ctrl); + + return err; +} + +static struct hwrng ppc4xx_rng = { + .name = MODULE_NAME, + .data_present = ppc4xx_rng_data_present, + .data_read = ppc4xx_rng_data_read, +}; + +static int ppc4xx_rng_probe(struct platform_device *dev) +{ + void __iomem *rng_regs; + int err = 0; + + rng_regs = of_iomap(dev->dev.of_node, 0); + if (!rng_regs) + return -ENODEV; + + err = ppc4xx_rng_enable(1); + if (err) + return err; + + out_le32(rng_regs + PPC4XX_TRNG_CTRL, PPC4XX_TRNG_CTRL_DALM); + ppc4xx_rng.priv = (unsigned long) rng_regs; + + err = hwrng_register(&ppc4xx_rng); + + return err; +} + +static int ppc4xx_rng_remove(struct platform_device *dev) +{ + void __iomem *rng_regs = (void __iomem *) ppc4xx_rng.priv; + + hwrng_unregister(&ppc4xx_rng); + ppc4xx_rng_enable(0); + iounmap(rng_regs); + + return 0; +} + +static const struct of_device_id ppc4xx_rng_match[] = { + { .compatible = "ppc4xx-rng", }, + { .compatible = "amcc,ppc460ex-rng", }, + { .compatible = "amcc,ppc440epx-rng", }, + {}, +}; + +static struct platform_driver ppc4xx_rng_driver = { + .driver = { + .name = MODULE_NAME, + .of_match_table = ppc4xx_rng_match, + }, + .probe = ppc4xx_rng_probe, + .remove = ppc4xx_rng_remove, +}; + +module_platform_driver(ppc4xx_rng_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Josh Boyer "); +MODULE_DESCRIPTION("HW RNG driver for PPC 4xx processors"); diff --git a/kernel/drivers/char/hw_random/pseries-rng.c b/kernel/drivers/char/hw_random/pseries-rng.c new file mode 100644 index 000000000..63ce51d09 --- /dev/null +++ b/kernel/drivers/char/hw_random/pseries-rng.c @@ -0,0 +1,106 @@ +/* + * Copyright (C) 2010 Michael Neuling IBM Corporation + * + * Driver for the pseries hardware RNG for POWER7+ and above + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include + + +static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) +{ + u64 buffer[PLPAR_HCALL_BUFSIZE]; + size_t size = max < 8 ? max : 8; + int rc; + + rc = plpar_hcall(H_RANDOM, (unsigned long *)buffer); + if (rc != H_SUCCESS) { + pr_err_ratelimited("H_RANDOM call failed %d\n", rc); + return -EIO; + } + memcpy(data, buffer, size); + + /* The hypervisor interface returns 64 bits */ + return size; +} + +/** + * pseries_rng_get_desired_dma - Return desired DMA allocate for CMO operations + * + * This is a required function for a driver to operate in a CMO environment + * but this device does not make use of DMA allocations, return 0. + * + * Return value: + * Number of bytes of IO data the driver will need to perform well -> 0 + */ +static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev) +{ + return 0; +}; + +static struct hwrng pseries_rng = { + .name = KBUILD_MODNAME, + .read = pseries_rng_read, +}; + +static int pseries_rng_probe(struct vio_dev *dev, + const struct vio_device_id *id) +{ + return hwrng_register(&pseries_rng); +} + +static int pseries_rng_remove(struct vio_dev *dev) +{ + hwrng_unregister(&pseries_rng); + return 0; +} + +static struct vio_device_id pseries_rng_driver_ids[] = { + { "ibm,random-v1", "ibm,random"}, + { "", "" } +}; +MODULE_DEVICE_TABLE(vio, pseries_rng_driver_ids); + +static struct vio_driver pseries_rng_driver = { + .name = KBUILD_MODNAME, + .probe = pseries_rng_probe, + .remove = pseries_rng_remove, + .get_desired_dma = pseries_rng_get_desired_dma, + .id_table = pseries_rng_driver_ids +}; + +static int __init rng_init(void) +{ + pr_info("Registering IBM pSeries RNG driver\n"); + return vio_register_driver(&pseries_rng_driver); +} + +module_init(rng_init); + +static void __exit rng_exit(void) +{ + vio_unregister_driver(&pseries_rng_driver); +} +module_exit(rng_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michael Neuling "); +MODULE_DESCRIPTION("H/W RNG driver for IBM pSeries processors"); diff --git a/kernel/drivers/char/hw_random/timeriomem-rng.c b/kernel/drivers/char/hw_random/timeriomem-rng.c new file mode 100644 index 000000000..cf37db263 --- /dev/null +++ b/kernel/drivers/char/hw_random/timeriomem-rng.c @@ -0,0 +1,213 @@ +/* + * drivers/char/hw_random/timeriomem-rng.c + * + * Copyright (C) 2009 Alexander Clouter + * + * Derived from drivers/char/hw_random/omap-rng.c + * Copyright 2005 (c) MontaVista Software, Inc. + * Author: Deepak Saxena + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Overview: + * This driver is useful for platforms that have an IO range that provides + * periodic random data from a single IO memory address. All the platform + * has to do is provide the address and 'wait time' that new data becomes + * available. + * + * TODO: add support for reading sizes other than 32bits and masking + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct timeriomem_rng_private_data { + void __iomem *io_base; + unsigned int expires; + unsigned int period; + unsigned int present:1; + + struct timer_list timer; + struct completion completion; + + struct hwrng timeriomem_rng_ops; +}; + +#define to_rng_priv(rng) \ + ((struct timeriomem_rng_private_data *)rng->priv) + +/* + * have data return 1, however return 0 if we have nothing + */ +static int timeriomem_rng_data_present(struct hwrng *rng, int wait) +{ + struct timeriomem_rng_private_data *priv = to_rng_priv(rng); + + if (!wait || priv->present) + return priv->present; + + wait_for_completion(&priv->completion); + + return 1; +} + +static int timeriomem_rng_data_read(struct hwrng *rng, u32 *data) +{ + struct timeriomem_rng_private_data *priv = to_rng_priv(rng); + unsigned long cur; + s32 delay; + + *data = readl(priv->io_base); + + cur = jiffies; + + delay = cur - priv->expires; + delay = priv->period - (delay % priv->period); + + priv->expires = cur + delay; + priv->present = 0; + + reinit_completion(&priv->completion); + mod_timer(&priv->timer, priv->expires); + + return 4; +} + +static void timeriomem_rng_trigger(unsigned long data) +{ + struct timeriomem_rng_private_data *priv + = (struct timeriomem_rng_private_data *)data; + + priv->present = 1; + complete(&priv->completion); +} + +static int timeriomem_rng_probe(struct platform_device *pdev) +{ + struct timeriomem_rng_data *pdata = pdev->dev.platform_data; + struct timeriomem_rng_private_data *priv; + struct resource *res; + int err = 0; + int period; + + if (!pdev->dev.of_node && !pdata) { + dev_err(&pdev->dev, "timeriomem_rng_data is missing\n"); + return -EINVAL; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENXIO; + + if (res->start % 4 != 0 || resource_size(res) != 4) { + dev_err(&pdev->dev, + "address must be four bytes wide and aligned\n"); + return -EINVAL; + } + + /* Allocate memory for the device structure (and zero it) */ + priv = devm_kzalloc(&pdev->dev, + sizeof(struct timeriomem_rng_private_data), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + platform_set_drvdata(pdev, priv); + + if (pdev->dev.of_node) { + int i; + + if (!of_property_read_u32(pdev->dev.of_node, + "period", &i)) + period = i; + else { + dev_err(&pdev->dev, "missing period\n"); + return -EINVAL; + } + } else { + period = pdata->period; + } + + priv->period = usecs_to_jiffies(period); + if (priv->period < 1) { + dev_err(&pdev->dev, "period is less than one jiffy\n"); + return -EINVAL; + } + + priv->expires = jiffies; + priv->present = 1; + + init_completion(&priv->completion); + complete(&priv->completion); + + setup_timer(&priv->timer, timeriomem_rng_trigger, (unsigned long)priv); + + priv->timeriomem_rng_ops.name = dev_name(&pdev->dev); + priv->timeriomem_rng_ops.data_present = timeriomem_rng_data_present; + priv->timeriomem_rng_ops.data_read = timeriomem_rng_data_read; + priv->timeriomem_rng_ops.priv = (unsigned long)priv; + + priv->io_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->io_base)) { + err = PTR_ERR(priv->io_base); + goto out_timer; + } + + err = hwrng_register(&priv->timeriomem_rng_ops); + if (err) { + dev_err(&pdev->dev, "problem registering\n"); + goto out_timer; + } + + dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n", + priv->io_base, period); + + return 0; + +out_timer: + del_timer_sync(&priv->timer); + return err; +} + +static int timeriomem_rng_remove(struct platform_device *pdev) +{ + struct timeriomem_rng_private_data *priv = platform_get_drvdata(pdev); + + hwrng_unregister(&priv->timeriomem_rng_ops); + + del_timer_sync(&priv->timer); + + return 0; +} + +static const struct of_device_id timeriomem_rng_match[] = { + { .compatible = "timeriomem_rng" }, + {}, +}; +MODULE_DEVICE_TABLE(of, timeriomem_rng_match); + +static struct platform_driver timeriomem_rng_driver = { + .driver = { + .name = "timeriomem_rng", + .of_match_table = timeriomem_rng_match, + }, + .probe = timeriomem_rng_probe, + .remove = timeriomem_rng_remove, +}; + +module_platform_driver(timeriomem_rng_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Alexander Clouter "); +MODULE_DESCRIPTION("Timer IOMEM H/W RNG driver"); diff --git a/kernel/drivers/char/hw_random/tpm-rng.c b/kernel/drivers/char/hw_random/tpm-rng.c new file mode 100644 index 000000000..d6d448266 --- /dev/null +++ b/kernel/drivers/char/hw_random/tpm-rng.c @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2012 Kent Yoder IBM Corporation + * + * HWRNG interfaces to pull RNG data from a TPM + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include + +#define MODULE_NAME "tpm-rng" + +static int tpm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) +{ + return tpm_get_random(TPM_ANY_NUM, data, max); +} + +static struct hwrng tpm_rng = { + .name = MODULE_NAME, + .read = tpm_rng_read, +}; + +static int __init rng_init(void) +{ + return hwrng_register(&tpm_rng); +} +module_init(rng_init); + +static void __exit rng_exit(void) +{ + hwrng_unregister(&tpm_rng); +} +module_exit(rng_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Kent Yoder "); +MODULE_DESCRIPTION("RNG driver for TPM devices"); diff --git a/kernel/drivers/char/hw_random/tx4939-rng.c b/kernel/drivers/char/hw_random/tx4939-rng.c new file mode 100644 index 000000000..a7b694913 --- /dev/null +++ b/kernel/drivers/char/hw_random/tx4939-rng.c @@ -0,0 +1,168 @@ +/* + * RNG driver for TX4939 Random Number Generators (RNG) + * + * Copyright (C) 2009 Atsushi Nemoto + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TX4939_RNG_RCSR 0x00000000 +#define TX4939_RNG_ROR(n) (0x00000018 + (n) * 8) + +#define TX4939_RNG_RCSR_INTE 0x00000008 +#define TX4939_RNG_RCSR_RST 0x00000004 +#define TX4939_RNG_RCSR_FIN 0x00000002 +#define TX4939_RNG_RCSR_ST 0x00000001 + +struct tx4939_rng { + struct hwrng rng; + void __iomem *base; + u64 databuf[3]; + unsigned int data_avail; +}; + +static void rng_io_start(void) +{ +#ifndef CONFIG_64BIT + /* + * readq is reading a 64-bit register using a 64-bit load. On + * a 32-bit kernel however interrupts or any other processor + * exception would clobber the upper 32-bit of the processor + * register so interrupts need to be disabled. + */ + local_irq_disable(); +#endif +} + +static void rng_io_end(void) +{ +#ifndef CONFIG_64BIT + local_irq_enable(); +#endif +} + +static u64 read_rng(void __iomem *base, unsigned int offset) +{ + return ____raw_readq(base + offset); +} + +static void write_rng(u64 val, void __iomem *base, unsigned int offset) +{ + return ____raw_writeq(val, base + offset); +} + +static int tx4939_rng_data_present(struct hwrng *rng, int wait) +{ + struct tx4939_rng *rngdev = container_of(rng, struct tx4939_rng, rng); + int i; + + if (rngdev->data_avail) + return rngdev->data_avail; + for (i = 0; i < 20; i++) { + rng_io_start(); + if (!(read_rng(rngdev->base, TX4939_RNG_RCSR) + & TX4939_RNG_RCSR_ST)) { + rngdev->databuf[0] = + read_rng(rngdev->base, TX4939_RNG_ROR(0)); + rngdev->databuf[1] = + read_rng(rngdev->base, TX4939_RNG_ROR(1)); + rngdev->databuf[2] = + read_rng(rngdev->base, TX4939_RNG_ROR(2)); + rngdev->data_avail = + sizeof(rngdev->databuf) / sizeof(u32); + /* Start RNG */ + write_rng(TX4939_RNG_RCSR_ST, + rngdev->base, TX4939_RNG_RCSR); + wait = 0; + } + rng_io_end(); + if (!wait) + break; + /* 90 bus clock cycles by default for generation */ + ndelay(90 * 5); + } + return rngdev->data_avail; +} + +static int tx4939_rng_data_read(struct hwrng *rng, u32 *buffer) +{ + struct tx4939_rng *rngdev = container_of(rng, struct tx4939_rng, rng); + + rngdev->data_avail--; + *buffer = *((u32 *)&rngdev->databuf + rngdev->data_avail); + return sizeof(u32); +} + +static int __init tx4939_rng_probe(struct platform_device *dev) +{ + struct tx4939_rng *rngdev; + struct resource *r; + int i; + + rngdev = devm_kzalloc(&dev->dev, sizeof(*rngdev), GFP_KERNEL); + if (!rngdev) + return -ENOMEM; + r = platform_get_resource(dev, IORESOURCE_MEM, 0); + rngdev->base = devm_ioremap_resource(&dev->dev, r); + if (IS_ERR(rngdev->base)) + return PTR_ERR(rngdev->base); + + rngdev->rng.name = dev_name(&dev->dev); + rngdev->rng.data_present = tx4939_rng_data_present; + rngdev->rng.data_read = tx4939_rng_data_read; + + rng_io_start(); + /* Reset RNG */ + write_rng(TX4939_RNG_RCSR_RST, rngdev->base, TX4939_RNG_RCSR); + write_rng(0, rngdev->base, TX4939_RNG_RCSR); + /* Start RNG */ + write_rng(TX4939_RNG_RCSR_ST, rngdev->base, TX4939_RNG_RCSR); + rng_io_end(); + /* + * Drop first two results. From the datasheet: + * The quality of the random numbers generated immediately + * after reset can be insufficient. Therefore, do not use + * random numbers obtained from the first and second + * generations; use the ones from the third or subsequent + * generation. + */ + for (i = 0; i < 2; i++) { + rngdev->data_avail = 0; + if (!tx4939_rng_data_present(&rngdev->rng, 1)) + return -EIO; + } + + platform_set_drvdata(dev, rngdev); + return hwrng_register(&rngdev->rng); +} + +static int __exit tx4939_rng_remove(struct platform_device *dev) +{ + struct tx4939_rng *rngdev = platform_get_drvdata(dev); + + hwrng_unregister(&rngdev->rng); + return 0; +} + +static struct platform_driver tx4939_rng_driver = { + .driver = { + .name = "tx4939-rng", + }, + .remove = tx4939_rng_remove, +}; + +module_platform_driver_probe(tx4939_rng_driver, tx4939_rng_probe); + +MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver for TX4939"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/hw_random/via-rng.c b/kernel/drivers/char/hw_random/via-rng.c new file mode 100644 index 000000000..a3bebef25 --- /dev/null +++ b/kernel/drivers/char/hw_random/via-rng.c @@ -0,0 +1,231 @@ +/* + * RNG driver for VIA RNGs + * + * Copyright 2005 (c) MontaVista Software, Inc. + * + * with the majority of the code coming from: + * + * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG) + * (c) Copyright 2003 Red Hat Inc + * + * derived from + * + * Hardware driver for the AMD 768 Random Number Generator (RNG) + * (c) Copyright 2001 Red Hat Inc + * + * derived from + * + * Hardware driver for Intel i810 Random Number Generator (RNG) + * Copyright 2000,2001 Jeff Garzik + * Copyright 2000,2001 Philipp Rumpf + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + + +enum { + VIA_STRFILT_CNT_SHIFT = 16, + VIA_STRFILT_FAIL = (1 << 15), + VIA_STRFILT_ENABLE = (1 << 14), + VIA_RAWBITS_ENABLE = (1 << 13), + VIA_RNG_ENABLE = (1 << 6), + VIA_NOISESRC1 = (1 << 8), + VIA_NOISESRC2 = (1 << 9), + VIA_XSTORE_CNT_MASK = 0x0F, + + VIA_RNG_CHUNK_8 = 0x00, /* 64 rand bits, 64 stored bits */ + VIA_RNG_CHUNK_4 = 0x01, /* 32 rand bits, 32 stored bits */ + VIA_RNG_CHUNK_4_MASK = 0xFFFFFFFF, + VIA_RNG_CHUNK_2 = 0x02, /* 16 rand bits, 32 stored bits */ + VIA_RNG_CHUNK_2_MASK = 0xFFFF, + VIA_RNG_CHUNK_1 = 0x03, /* 8 rand bits, 32 stored bits */ + VIA_RNG_CHUNK_1_MASK = 0xFF, +}; + +/* + * Investigate using the 'rep' prefix to obtain 32 bits of random data + * in one insn. The upside is potentially better performance. The + * downside is that the instruction becomes no longer atomic. Due to + * this, just like familiar issues with /dev/random itself, the worst + * case of a 'rep xstore' could potentially pause a cpu for an + * unreasonably long time. In practice, this condition would likely + * only occur when the hardware is failing. (or so we hope :)) + * + * Another possible performance boost may come from simply buffering + * until we have 4 bytes, thus returning a u32 at a time, + * instead of the current u8-at-a-time. + * + * Padlock instructions can generate a spurious DNA fault, so + * we have to call them in the context of irq_ts_save/restore() + */ + +static inline u32 xstore(u32 *addr, u32 edx_in) +{ + u32 eax_out; + int ts_state; + + ts_state = irq_ts_save(); + + asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */" + : "=m" (*addr), "=a" (eax_out), "+d" (edx_in), "+D" (addr)); + + irq_ts_restore(ts_state); + return eax_out; +} + +static int via_rng_data_present(struct hwrng *rng, int wait) +{ + char buf[16 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ + ((aligned(STACK_ALIGN))); + u32 *via_rng_datum = (u32 *)PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + u32 bytes_out; + int i; + + /* We choose the recommended 1-byte-per-instruction RNG rate, + * for greater randomness at the expense of speed. Larger + * values 2, 4, or 8 bytes-per-instruction yield greater + * speed at lesser randomness. + * + * If you change this to another VIA_CHUNK_n, you must also + * change the ->n_bytes values in rng_vendor_ops[] tables. + * VIA_CHUNK_8 requires further code changes. + * + * A copy of MSR_VIA_RNG is placed in eax_out when xstore + * completes. + */ + + for (i = 0; i < 20; i++) { + *via_rng_datum = 0; /* paranoia, not really necessary */ + bytes_out = xstore(via_rng_datum, VIA_RNG_CHUNK_1); + bytes_out &= VIA_XSTORE_CNT_MASK; + if (bytes_out || !wait) + break; + udelay(10); + } + rng->priv = *via_rng_datum; + return bytes_out ? 1 : 0; +} + +static int via_rng_data_read(struct hwrng *rng, u32 *data) +{ + u32 via_rng_datum = (u32)rng->priv; + + *data = via_rng_datum; + + return 1; +} + +static int via_rng_init(struct hwrng *rng) +{ + struct cpuinfo_x86 *c = &cpu_data(0); + u32 lo, hi, old_lo; + + /* VIA Nano CPUs don't have the MSR_VIA_RNG anymore. The RNG + * is always enabled if CPUID rng_en is set. There is no + * RNG configuration like it used to be the case in this + * register */ + if ((c->x86 == 6) && (c->x86_model >= 0x0f)) { + if (!cpu_has_xstore_enabled) { + pr_err(PFX "can't enable hardware RNG " + "if XSTORE is not enabled\n"); + return -ENODEV; + } + return 0; + } + + /* Control the RNG via MSR. Tread lightly and pay very close + * close attention to values written, as the reserved fields + * are documented to be "undefined and unpredictable"; but it + * does not say to write them as zero, so I make a guess that + * we restore the values we find in the register. + */ + rdmsr(MSR_VIA_RNG, lo, hi); + + old_lo = lo; + lo &= ~(0x7f << VIA_STRFILT_CNT_SHIFT); + lo &= ~VIA_XSTORE_CNT_MASK; + lo &= ~(VIA_STRFILT_ENABLE | VIA_STRFILT_FAIL | VIA_RAWBITS_ENABLE); + lo |= VIA_RNG_ENABLE; + lo |= VIA_NOISESRC1; + + /* Enable secondary noise source on CPUs where it is present. */ + + /* Nehemiah stepping 8 and higher */ + if ((c->x86_model == 9) && (c->x86_mask > 7)) + lo |= VIA_NOISESRC2; + + /* Esther */ + if (c->x86_model >= 10) + lo |= VIA_NOISESRC2; + + if (lo != old_lo) + wrmsr(MSR_VIA_RNG, lo, hi); + + /* perhaps-unnecessary sanity check; remove after testing if + unneeded */ + rdmsr(MSR_VIA_RNG, lo, hi); + if ((lo & VIA_RNG_ENABLE) == 0) { + pr_err(PFX "cannot enable VIA C3 RNG, aborting\n"); + return -ENODEV; + } + + return 0; +} + + +static struct hwrng via_rng = { + .name = "via", + .init = via_rng_init, + .data_present = via_rng_data_present, + .data_read = via_rng_data_read, +}; + + +static int __init mod_init(void) +{ + int err; + + if (!cpu_has_xstore) + return -ENODEV; + pr_info("VIA RNG detected\n"); + err = hwrng_register(&via_rng); + if (err) { + pr_err(PFX "RNG registering failed (%d)\n", + err); + goto out; + } +out: + return err; +} + +static void __exit mod_exit(void) +{ + hwrng_unregister(&via_rng); +} + +module_init(mod_init); +module_exit(mod_exit); + +static struct x86_cpu_id __maybe_unused via_rng_cpu_id[] = { + X86_FEATURE_MATCH(X86_FEATURE_XSTORE), + {} +}; + +MODULE_DESCRIPTION("H/W RNG driver for VIA CPU with PadLock"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(x86cpu, via_rng_cpu_id); diff --git a/kernel/drivers/char/hw_random/virtio-rng.c b/kernel/drivers/char/hw_random/virtio-rng.c new file mode 100644 index 000000000..3fa2f8a00 --- /dev/null +++ b/kernel/drivers/char/hw_random/virtio-rng.c @@ -0,0 +1,212 @@ +/* + * Randomness driver for virtio + * Copyright (C) 2007, 2008 Rusty Russell IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include + +static DEFINE_IDA(rng_index_ida); + +struct virtrng_info { + struct hwrng hwrng; + struct virtqueue *vq; + struct completion have_data; + char name[25]; + unsigned int data_avail; + int index; + bool busy; + bool hwrng_register_done; + bool hwrng_removed; +}; + +static void random_recv_done(struct virtqueue *vq) +{ + struct virtrng_info *vi = vq->vdev->priv; + + /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */ + if (!virtqueue_get_buf(vi->vq, &vi->data_avail)) + return; + + complete(&vi->have_data); +} + +/* The host will fill any buffer we give it with sweet, sweet randomness. */ +static void register_buffer(struct virtrng_info *vi, u8 *buf, size_t size) +{ + struct scatterlist sg; + + sg_init_one(&sg, buf, size); + + /* There should always be room for one buffer. */ + virtqueue_add_inbuf(vi->vq, &sg, 1, buf, GFP_KERNEL); + + virtqueue_kick(vi->vq); +} + +static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait) +{ + int ret; + struct virtrng_info *vi = (struct virtrng_info *)rng->priv; + + if (vi->hwrng_removed) + return -ENODEV; + + if (!vi->busy) { + vi->busy = true; + init_completion(&vi->have_data); + register_buffer(vi, buf, size); + } + + if (!wait) + return 0; + + ret = wait_for_completion_killable(&vi->have_data); + if (ret < 0) + return ret; + + vi->busy = false; + + return vi->data_avail; +} + +static void virtio_cleanup(struct hwrng *rng) +{ + struct virtrng_info *vi = (struct virtrng_info *)rng->priv; + + if (vi->busy) + wait_for_completion(&vi->have_data); +} + +static int probe_common(struct virtio_device *vdev) +{ + int err, index; + struct virtrng_info *vi = NULL; + + vi = kzalloc(sizeof(struct virtrng_info), GFP_KERNEL); + if (!vi) + return -ENOMEM; + + vi->index = index = ida_simple_get(&rng_index_ida, 0, 0, GFP_KERNEL); + if (index < 0) { + err = index; + goto err_ida; + } + sprintf(vi->name, "virtio_rng.%d", index); + init_completion(&vi->have_data); + + vi->hwrng = (struct hwrng) { + .read = virtio_read, + .cleanup = virtio_cleanup, + .priv = (unsigned long)vi, + .name = vi->name, + .quality = 1000, + }; + vdev->priv = vi; + + /* We expect a single virtqueue. */ + vi->vq = virtio_find_single_vq(vdev, random_recv_done, "input"); + if (IS_ERR(vi->vq)) { + err = PTR_ERR(vi->vq); + goto err_find; + } + + return 0; + +err_find: + ida_simple_remove(&rng_index_ida, index); +err_ida: + kfree(vi); + return err; +} + +static void remove_common(struct virtio_device *vdev) +{ + struct virtrng_info *vi = vdev->priv; + + vi->hwrng_removed = true; + vi->data_avail = 0; + complete(&vi->have_data); + vdev->config->reset(vdev); + vi->busy = false; + if (vi->hwrng_register_done) + hwrng_unregister(&vi->hwrng); + vdev->config->del_vqs(vdev); + ida_simple_remove(&rng_index_ida, vi->index); + kfree(vi); +} + +static int virtrng_probe(struct virtio_device *vdev) +{ + return probe_common(vdev); +} + +static void virtrng_remove(struct virtio_device *vdev) +{ + remove_common(vdev); +} + +static void virtrng_scan(struct virtio_device *vdev) +{ + struct virtrng_info *vi = vdev->priv; + int err; + + err = hwrng_register(&vi->hwrng); + if (!err) + vi->hwrng_register_done = true; +} + +#ifdef CONFIG_PM_SLEEP +static int virtrng_freeze(struct virtio_device *vdev) +{ + remove_common(vdev); + return 0; +} + +static int virtrng_restore(struct virtio_device *vdev) +{ + return probe_common(vdev); +} +#endif + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_RNG, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +static struct virtio_driver virtio_rng_driver = { + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = virtrng_probe, + .remove = virtrng_remove, + .scan = virtrng_scan, +#ifdef CONFIG_PM_SLEEP + .freeze = virtrng_freeze, + .restore = virtrng_restore, +#endif +}; + +module_virtio_driver(virtio_rng_driver); +MODULE_DEVICE_TABLE(virtio, id_table); +MODULE_DESCRIPTION("Virtio random number driver"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/hw_random/xgene-rng.c b/kernel/drivers/char/hw_random/xgene-rng.c new file mode 100644 index 000000000..c37cf754a --- /dev/null +++ b/kernel/drivers/char/hw_random/xgene-rng.c @@ -0,0 +1,433 @@ +/* + * APM X-Gene SoC RNG Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Author: Rameshwar Prasad Sahu + * Shamal Winchurkar + * Feng Kan + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define RNG_MAX_DATUM 4 +#define MAX_TRY 100 +#define XGENE_RNG_RETRY_COUNT 20 +#define XGENE_RNG_RETRY_INTERVAL 10 + +/* RNG Registers */ +#define RNG_INOUT_0 0x00 +#define RNG_INTR_STS_ACK 0x10 +#define RNG_CONTROL 0x14 +#define RNG_CONFIG 0x18 +#define RNG_ALARMCNT 0x1c +#define RNG_FROENABLE 0x20 +#define RNG_FRODETUNE 0x24 +#define RNG_ALARMMASK 0x28 +#define RNG_ALARMSTOP 0x2c +#define RNG_OPTIONS 0x78 +#define RNG_EIP_REV 0x7c + +#define MONOBIT_FAIL_MASK BIT(7) +#define POKER_FAIL_MASK BIT(6) +#define LONG_RUN_FAIL_MASK BIT(5) +#define RUN_FAIL_MASK BIT(4) +#define NOISE_FAIL_MASK BIT(3) +#define STUCK_OUT_MASK BIT(2) +#define SHUTDOWN_OFLO_MASK BIT(1) +#define READY_MASK BIT(0) + +#define MAJOR_HW_REV_RD(src) (((src) & 0x0f000000) >> 24) +#define MINOR_HW_REV_RD(src) (((src) & 0x00f00000) >> 20) +#define HW_PATCH_LEVEL_RD(src) (((src) & 0x000f0000) >> 16) +#define MAX_REFILL_CYCLES_SET(dst, src) \ + ((dst & ~0xffff0000) | (((u32)src << 16) & 0xffff0000)) +#define MIN_REFILL_CYCLES_SET(dst, src) \ + ((dst & ~0x000000ff) | (((u32)src) & 0x000000ff)) +#define ALARM_THRESHOLD_SET(dst, src) \ + ((dst & ~0x000000ff) | (((u32)src) & 0x000000ff)) +#define ENABLE_RNG_SET(dst, src) \ + ((dst & ~BIT(10)) | (((u32)src << 10) & BIT(10))) +#define REGSPEC_TEST_MODE_SET(dst, src) \ + ((dst & ~BIT(8)) | (((u32)src << 8) & BIT(8))) +#define MONOBIT_FAIL_MASK_SET(dst, src) \ + ((dst & ~BIT(7)) | (((u32)src << 7) & BIT(7))) +#define POKER_FAIL_MASK_SET(dst, src) \ + ((dst & ~BIT(6)) | (((u32)src << 6) & BIT(6))) +#define LONG_RUN_FAIL_MASK_SET(dst, src) \ + ((dst & ~BIT(5)) | (((u32)src << 5) & BIT(5))) +#define RUN_FAIL_MASK_SET(dst, src) \ + ((dst & ~BIT(4)) | (((u32)src << 4) & BIT(4))) +#define NOISE_FAIL_MASK_SET(dst, src) \ + ((dst & ~BIT(3)) | (((u32)src << 3) & BIT(3))) +#define STUCK_OUT_MASK_SET(dst, src) \ + ((dst & ~BIT(2)) | (((u32)src << 2) & BIT(2))) +#define SHUTDOWN_OFLO_MASK_SET(dst, src) \ + ((dst & ~BIT(1)) | (((u32)src << 1) & BIT(1))) + +struct xgene_rng_dev { + u32 irq; + void __iomem *csr_base; + u32 revision; + u32 datum_size; + u32 failure_cnt; /* Failure count last minute */ + unsigned long failure_ts;/* First failure timestamp */ + struct timer_list failure_timer; + struct device *dev; + struct clk *clk; +}; + +static void xgene_rng_expired_timer(unsigned long arg) +{ + struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) arg; + + /* Clear failure counter as timer expired */ + disable_irq(ctx->irq); + ctx->failure_cnt = 0; + del_timer(&ctx->failure_timer); + enable_irq(ctx->irq); +} + +static void xgene_rng_start_timer(struct xgene_rng_dev *ctx) +{ + ctx->failure_timer.data = (unsigned long) ctx; + ctx->failure_timer.function = xgene_rng_expired_timer; + ctx->failure_timer.expires = jiffies + 120 * HZ; + add_timer(&ctx->failure_timer); +} + +/* + * Initialize or reinit free running oscillators (FROs) + */ +static void xgene_rng_init_fro(struct xgene_rng_dev *ctx, u32 fro_val) +{ + writel(fro_val, ctx->csr_base + RNG_FRODETUNE); + writel(0x00000000, ctx->csr_base + RNG_ALARMMASK); + writel(0x00000000, ctx->csr_base + RNG_ALARMSTOP); + writel(0xFFFFFFFF, ctx->csr_base + RNG_FROENABLE); +} + +static void xgene_rng_chk_overflow(struct xgene_rng_dev *ctx) +{ + u32 val; + + val = readl(ctx->csr_base + RNG_INTR_STS_ACK); + if (val & MONOBIT_FAIL_MASK) + /* + * LFSR detected an out-of-bounds number of 1s after + * checking 20,000 bits (test T1 as specified in the + * AIS-31 standard) + */ + dev_err(ctx->dev, "test monobit failure error 0x%08X\n", val); + if (val & POKER_FAIL_MASK) + /* + * LFSR detected an out-of-bounds value in at least one + * of the 16 poker_count_X counters or an out of bounds sum + * of squares value after checking 20,000 bits (test T2 as + * specified in the AIS-31 standard) + */ + dev_err(ctx->dev, "test poker failure error 0x%08X\n", val); + if (val & LONG_RUN_FAIL_MASK) + /* + * LFSR detected a sequence of 34 identical bits + * (test T4 as specified in the AIS-31 standard) + */ + dev_err(ctx->dev, "test long run failure error 0x%08X\n", val); + if (val & RUN_FAIL_MASK) + /* + * LFSR detected an outof-bounds value for at least one + * of the running counters after checking 20,000 bits + * (test T3 as specified in the AIS-31 standard) + */ + dev_err(ctx->dev, "test run failure error 0x%08X\n", val); + if (val & NOISE_FAIL_MASK) + /* LFSR detected a sequence of 48 identical bits */ + dev_err(ctx->dev, "noise failure error 0x%08X\n", val); + if (val & STUCK_OUT_MASK) + /* + * Detected output data registers generated same value twice + * in a row + */ + dev_err(ctx->dev, "stuck out failure error 0x%08X\n", val); + + if (val & SHUTDOWN_OFLO_MASK) { + u32 frostopped; + + /* FROs shut down after a second error event. Try recover. */ + if (++ctx->failure_cnt == 1) { + /* 1st time, just recover */ + ctx->failure_ts = jiffies; + frostopped = readl(ctx->csr_base + RNG_ALARMSTOP); + xgene_rng_init_fro(ctx, frostopped); + + /* + * We must start a timer to clear out this error + * in case the system timer wrap around + */ + xgene_rng_start_timer(ctx); + } else { + /* 2nd time failure in lesser than 1 minute? */ + if (time_after(ctx->failure_ts + 60 * HZ, jiffies)) { + dev_err(ctx->dev, + "FRO shutdown failure error 0x%08X\n", + val); + } else { + /* 2nd time failure after 1 minutes, recover */ + ctx->failure_ts = jiffies; + ctx->failure_cnt = 1; + /* + * We must start a timer to clear out this + * error in case the system timer wrap + * around + */ + xgene_rng_start_timer(ctx); + } + frostopped = readl(ctx->csr_base + RNG_ALARMSTOP); + xgene_rng_init_fro(ctx, frostopped); + } + } + /* Clear them all */ + writel(val, ctx->csr_base + RNG_INTR_STS_ACK); +} + +static irqreturn_t xgene_rng_irq_handler(int irq, void *id) +{ + struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) id; + + /* RNG Alarm Counter overflow */ + xgene_rng_chk_overflow(ctx); + + return IRQ_HANDLED; +} + +static int xgene_rng_data_present(struct hwrng *rng, int wait) +{ + struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv; + u32 i, val = 0; + + for (i = 0; i < XGENE_RNG_RETRY_COUNT; i++) { + val = readl(ctx->csr_base + RNG_INTR_STS_ACK); + if ((val & READY_MASK) || !wait) + break; + udelay(XGENE_RNG_RETRY_INTERVAL); + } + + return (val & READY_MASK); +} + +static int xgene_rng_data_read(struct hwrng *rng, u32 *data) +{ + struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv; + int i; + + for (i = 0; i < ctx->datum_size; i++) + data[i] = readl(ctx->csr_base + RNG_INOUT_0 + i * 4); + + /* Clear ready bit to start next transaction */ + writel(READY_MASK, ctx->csr_base + RNG_INTR_STS_ACK); + + return ctx->datum_size << 2; +} + +static void xgene_rng_init_internal(struct xgene_rng_dev *ctx) +{ + u32 val; + + writel(0x00000000, ctx->csr_base + RNG_CONTROL); + + val = MAX_REFILL_CYCLES_SET(0, 10); + val = MIN_REFILL_CYCLES_SET(val, 10); + writel(val, ctx->csr_base + RNG_CONFIG); + + val = ALARM_THRESHOLD_SET(0, 0xFF); + writel(val, ctx->csr_base + RNG_ALARMCNT); + + xgene_rng_init_fro(ctx, 0); + + writel(MONOBIT_FAIL_MASK | + POKER_FAIL_MASK | + LONG_RUN_FAIL_MASK | + RUN_FAIL_MASK | + NOISE_FAIL_MASK | + STUCK_OUT_MASK | + SHUTDOWN_OFLO_MASK | + READY_MASK, ctx->csr_base + RNG_INTR_STS_ACK); + + val = ENABLE_RNG_SET(0, 1); + val = MONOBIT_FAIL_MASK_SET(val, 1); + val = POKER_FAIL_MASK_SET(val, 1); + val = LONG_RUN_FAIL_MASK_SET(val, 1); + val = RUN_FAIL_MASK_SET(val, 1); + val = NOISE_FAIL_MASK_SET(val, 1); + val = STUCK_OUT_MASK_SET(val, 1); + val = SHUTDOWN_OFLO_MASK_SET(val, 1); + writel(val, ctx->csr_base + RNG_CONTROL); +} + +static int xgene_rng_init(struct hwrng *rng) +{ + struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv; + + ctx->failure_cnt = 0; + init_timer(&ctx->failure_timer); + + ctx->revision = readl(ctx->csr_base + RNG_EIP_REV); + + dev_dbg(ctx->dev, "Rev %d.%d.%d\n", + MAJOR_HW_REV_RD(ctx->revision), + MINOR_HW_REV_RD(ctx->revision), + HW_PATCH_LEVEL_RD(ctx->revision)); + + dev_dbg(ctx->dev, "Options 0x%08X", + readl(ctx->csr_base + RNG_OPTIONS)); + + xgene_rng_init_internal(ctx); + + ctx->datum_size = RNG_MAX_DATUM; + + return 0; +} + +#ifdef CONFIG_ACPI +static const struct acpi_device_id xgene_rng_acpi_match[] = { + { "APMC0D18", }, + { } +}; +MODULE_DEVICE_TABLE(acpi, xgene_rng_acpi_match); +#endif + +static struct hwrng xgene_rng_func = { + .name = "xgene-rng", + .init = xgene_rng_init, + .data_present = xgene_rng_data_present, + .data_read = xgene_rng_data_read, +}; + +static int xgene_rng_probe(struct platform_device *pdev) +{ + struct resource *res; + struct xgene_rng_dev *ctx; + int rc = 0; + + ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->dev = &pdev->dev; + platform_set_drvdata(pdev, ctx); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ctx->csr_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ctx->csr_base)) + return PTR_ERR(ctx->csr_base); + + ctx->irq = platform_get_irq(pdev, 0); + if (ctx->irq < 0) { + dev_err(&pdev->dev, "No IRQ resource\n"); + return ctx->irq; + } + + dev_dbg(&pdev->dev, "APM X-Gene RNG BASE %p ALARM IRQ %d", + ctx->csr_base, ctx->irq); + + rc = devm_request_irq(&pdev->dev, ctx->irq, xgene_rng_irq_handler, 0, + dev_name(&pdev->dev), ctx); + if (rc) { + dev_err(&pdev->dev, "Could not request RNG alarm IRQ\n"); + return rc; + } + + /* Enable IP clock */ + ctx->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(ctx->clk)) { + dev_warn(&pdev->dev, "Couldn't get the clock for RNG\n"); + } else { + rc = clk_prepare_enable(ctx->clk); + if (rc) { + dev_warn(&pdev->dev, + "clock prepare enable failed for RNG"); + return rc; + } + } + + xgene_rng_func.priv = (unsigned long) ctx; + + rc = hwrng_register(&xgene_rng_func); + if (rc) { + dev_err(&pdev->dev, "RNG registering failed error %d\n", rc); + if (!IS_ERR(ctx->clk)) + clk_disable_unprepare(ctx->clk); + return rc; + } + + rc = device_init_wakeup(&pdev->dev, 1); + if (rc) { + dev_err(&pdev->dev, "RNG device_init_wakeup failed error %d\n", + rc); + if (!IS_ERR(ctx->clk)) + clk_disable_unprepare(ctx->clk); + hwrng_unregister(&xgene_rng_func); + return rc; + } + + return 0; +} + +static int xgene_rng_remove(struct platform_device *pdev) +{ + struct xgene_rng_dev *ctx = platform_get_drvdata(pdev); + int rc; + + rc = device_init_wakeup(&pdev->dev, 0); + if (rc) + dev_err(&pdev->dev, "RNG init wakeup failed error %d\n", rc); + if (!IS_ERR(ctx->clk)) + clk_disable_unprepare(ctx->clk); + hwrng_unregister(&xgene_rng_func); + + return rc; +} + +static const struct of_device_id xgene_rng_of_match[] = { + { .compatible = "apm,xgene-rng" }, + { } +}; + +MODULE_DEVICE_TABLE(of, xgene_rng_of_match); + +static struct platform_driver xgene_rng_driver = { + .probe = xgene_rng_probe, + .remove = xgene_rng_remove, + .driver = { + .name = "xgene-rng", + .of_match_table = xgene_rng_of_match, + .acpi_match_table = ACPI_PTR(xgene_rng_acpi_match), + }, +}; + +module_platform_driver(xgene_rng_driver); +MODULE_DESCRIPTION("APM X-Gene RNG driver"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/i8k.c b/kernel/drivers/char/i8k.c new file mode 100644 index 000000000..a43048b5b --- /dev/null +++ b/kernel/drivers/char/i8k.c @@ -0,0 +1,1007 @@ +/* + * i8k.c -- Linux driver for accessing the SMM BIOS on Dell laptops. + * + * Copyright (C) 2001 Massimo Dal Zotto + * + * Hwmon integration: + * Copyright (C) 2011 Jean Delvare + * Copyright (C) 2013, 2014 Guenter Roeck + * Copyright (C) 2014 Pali Rohár + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define I8K_SMM_FN_STATUS 0x0025 +#define I8K_SMM_POWER_STATUS 0x0069 +#define I8K_SMM_SET_FAN 0x01a3 +#define I8K_SMM_GET_FAN 0x00a3 +#define I8K_SMM_GET_SPEED 0x02a3 +#define I8K_SMM_GET_FAN_TYPE 0x03a3 +#define I8K_SMM_GET_NOM_SPEED 0x04a3 +#define I8K_SMM_GET_TEMP 0x10a3 +#define I8K_SMM_GET_TEMP_TYPE 0x11a3 +#define I8K_SMM_GET_DELL_SIG1 0xfea3 +#define I8K_SMM_GET_DELL_SIG2 0xffa3 + +#define I8K_FAN_MULT 30 +#define I8K_FAN_MAX_RPM 30000 +#define I8K_MAX_TEMP 127 + +#define I8K_FN_NONE 0x00 +#define I8K_FN_UP 0x01 +#define I8K_FN_DOWN 0x02 +#define I8K_FN_MUTE 0x04 +#define I8K_FN_MASK 0x07 +#define I8K_FN_SHIFT 8 + +#define I8K_POWER_AC 0x05 +#define I8K_POWER_BATTERY 0x01 + +static DEFINE_MUTEX(i8k_mutex); +static char bios_version[4]; +static struct device *i8k_hwmon_dev; +static u32 i8k_hwmon_flags; +static uint i8k_fan_mult = I8K_FAN_MULT; +static uint i8k_pwm_mult; +static uint i8k_fan_max = I8K_FAN_HIGH; + +#define I8K_HWMON_HAVE_TEMP1 (1 << 0) +#define I8K_HWMON_HAVE_TEMP2 (1 << 1) +#define I8K_HWMON_HAVE_TEMP3 (1 << 2) +#define I8K_HWMON_HAVE_TEMP4 (1 << 3) +#define I8K_HWMON_HAVE_FAN1 (1 << 4) +#define I8K_HWMON_HAVE_FAN2 (1 << 5) + +MODULE_AUTHOR("Massimo Dal Zotto (dz@debian.org)"); +MODULE_DESCRIPTION("Driver for accessing SMM BIOS on Dell laptops"); +MODULE_LICENSE("GPL"); + +static bool force; +module_param(force, bool, 0); +MODULE_PARM_DESC(force, "Force loading without checking for supported models"); + +static bool ignore_dmi; +module_param(ignore_dmi, bool, 0); +MODULE_PARM_DESC(ignore_dmi, "Continue probing hardware even if DMI data does not match"); + +static bool restricted; +module_param(restricted, bool, 0); +MODULE_PARM_DESC(restricted, "Allow fan control if SYS_ADMIN capability set"); + +static bool power_status; +module_param(power_status, bool, 0600); +MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k"); + +static uint fan_mult; +module_param(fan_mult, uint, 0); +MODULE_PARM_DESC(fan_mult, "Factor to multiply fan speed with (default: autodetect)"); + +static uint fan_max; +module_param(fan_max, uint, 0); +MODULE_PARM_DESC(fan_max, "Maximum configurable fan speed (default: autodetect)"); + +static int i8k_open_fs(struct inode *inode, struct file *file); +static long i8k_ioctl(struct file *, unsigned int, unsigned long); + +static const struct file_operations i8k_fops = { + .owner = THIS_MODULE, + .open = i8k_open_fs, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .unlocked_ioctl = i8k_ioctl, +}; + +struct smm_regs { + unsigned int eax; + unsigned int ebx __packed; + unsigned int ecx __packed; + unsigned int edx __packed; + unsigned int esi __packed; + unsigned int edi __packed; +}; + +static inline const char *i8k_get_dmi_data(int field) +{ + const char *dmi_data = dmi_get_system_info(field); + + return dmi_data && *dmi_data ? dmi_data : "?"; +} + +/* + * Call the System Management Mode BIOS. Code provided by Jonathan Buzzard. + */ +static int i8k_smm(struct smm_regs *regs) +{ + int rc; + int eax = regs->eax; + cpumask_var_t old_mask; + + /* SMM requires CPU 0 */ + if (!alloc_cpumask_var(&old_mask, GFP_KERNEL)) + return -ENOMEM; + cpumask_copy(old_mask, ¤t->cpus_allowed); + rc = set_cpus_allowed_ptr(current, cpumask_of(0)); + if (rc) + goto out; + if (smp_processor_id() != 0) { + rc = -EBUSY; + goto out; + } + +#if defined(CONFIG_X86_64) + asm volatile("pushq %%rax\n\t" + "movl 0(%%rax),%%edx\n\t" + "pushq %%rdx\n\t" + "movl 4(%%rax),%%ebx\n\t" + "movl 8(%%rax),%%ecx\n\t" + "movl 12(%%rax),%%edx\n\t" + "movl 16(%%rax),%%esi\n\t" + "movl 20(%%rax),%%edi\n\t" + "popq %%rax\n\t" + "out %%al,$0xb2\n\t" + "out %%al,$0x84\n\t" + "xchgq %%rax,(%%rsp)\n\t" + "movl %%ebx,4(%%rax)\n\t" + "movl %%ecx,8(%%rax)\n\t" + "movl %%edx,12(%%rax)\n\t" + "movl %%esi,16(%%rax)\n\t" + "movl %%edi,20(%%rax)\n\t" + "popq %%rdx\n\t" + "movl %%edx,0(%%rax)\n\t" + "pushfq\n\t" + "popq %%rax\n\t" + "andl $1,%%eax\n" + : "=a"(rc) + : "a"(regs) + : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory"); +#else + asm volatile("pushl %%eax\n\t" + "movl 0(%%eax),%%edx\n\t" + "push %%edx\n\t" + "movl 4(%%eax),%%ebx\n\t" + "movl 8(%%eax),%%ecx\n\t" + "movl 12(%%eax),%%edx\n\t" + "movl 16(%%eax),%%esi\n\t" + "movl 20(%%eax),%%edi\n\t" + "popl %%eax\n\t" + "out %%al,$0xb2\n\t" + "out %%al,$0x84\n\t" + "xchgl %%eax,(%%esp)\n\t" + "movl %%ebx,4(%%eax)\n\t" + "movl %%ecx,8(%%eax)\n\t" + "movl %%edx,12(%%eax)\n\t" + "movl %%esi,16(%%eax)\n\t" + "movl %%edi,20(%%eax)\n\t" + "popl %%edx\n\t" + "movl %%edx,0(%%eax)\n\t" + "lahf\n\t" + "shrl $8,%%eax\n\t" + "andl $1,%%eax\n" + : "=a"(rc) + : "a"(regs) + : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory"); +#endif + if (rc != 0 || (regs->eax & 0xffff) == 0xffff || regs->eax == eax) + rc = -EINVAL; + +out: + set_cpus_allowed_ptr(current, old_mask); + free_cpumask_var(old_mask); + return rc; +} + +/* + * Read the Fn key status. + */ +static int i8k_get_fn_status(void) +{ + struct smm_regs regs = { .eax = I8K_SMM_FN_STATUS, }; + int rc; + + rc = i8k_smm(®s); + if (rc < 0) + return rc; + + switch ((regs.eax >> I8K_FN_SHIFT) & I8K_FN_MASK) { + case I8K_FN_UP: + return I8K_VOL_UP; + case I8K_FN_DOWN: + return I8K_VOL_DOWN; + case I8K_FN_MUTE: + return I8K_VOL_MUTE; + default: + return 0; + } +} + +/* + * Read the power status. + */ +static int i8k_get_power_status(void) +{ + struct smm_regs regs = { .eax = I8K_SMM_POWER_STATUS, }; + int rc; + + rc = i8k_smm(®s); + if (rc < 0) + return rc; + + return (regs.eax & 0xff) == I8K_POWER_AC ? I8K_AC : I8K_BATTERY; +} + +/* + * Read the fan status. + */ +static int i8k_get_fan_status(int fan) +{ + struct smm_regs regs = { .eax = I8K_SMM_GET_FAN, }; + + regs.ebx = fan & 0xff; + return i8k_smm(®s) ? : regs.eax & 0xff; +} + +/* + * Read the fan speed in RPM. + */ +static int i8k_get_fan_speed(int fan) +{ + struct smm_regs regs = { .eax = I8K_SMM_GET_SPEED, }; + + regs.ebx = fan & 0xff; + return i8k_smm(®s) ? : (regs.eax & 0xffff) * i8k_fan_mult; +} + +/* + * Read the fan type. + */ +static int i8k_get_fan_type(int fan) +{ + struct smm_regs regs = { .eax = I8K_SMM_GET_FAN_TYPE, }; + + regs.ebx = fan & 0xff; + return i8k_smm(®s) ? : regs.eax & 0xff; +} + +/* + * Read the fan nominal rpm for specific fan speed. + */ +static int i8k_get_fan_nominal_speed(int fan, int speed) +{ + struct smm_regs regs = { .eax = I8K_SMM_GET_NOM_SPEED, }; + + regs.ebx = (fan & 0xff) | (speed << 8); + return i8k_smm(®s) ? : (regs.eax & 0xffff) * i8k_fan_mult; +} + +/* + * Set the fan speed (off, low, high). Returns the new fan status. + */ +static int i8k_set_fan(int fan, int speed) +{ + struct smm_regs regs = { .eax = I8K_SMM_SET_FAN, }; + + speed = (speed < 0) ? 0 : ((speed > i8k_fan_max) ? i8k_fan_max : speed); + regs.ebx = (fan & 0xff) | (speed << 8); + + return i8k_smm(®s) ? : i8k_get_fan_status(fan); +} + +static int i8k_get_temp_type(int sensor) +{ + struct smm_regs regs = { .eax = I8K_SMM_GET_TEMP_TYPE, }; + + regs.ebx = sensor & 0xff; + return i8k_smm(®s) ? : regs.eax & 0xff; +} + +/* + * Read the cpu temperature. + */ +static int _i8k_get_temp(int sensor) +{ + struct smm_regs regs = { + .eax = I8K_SMM_GET_TEMP, + .ebx = sensor & 0xff, + }; + + return i8k_smm(®s) ? : regs.eax & 0xff; +} + +static int i8k_get_temp(int sensor) +{ + int temp = _i8k_get_temp(sensor); + + /* + * Sometimes the temperature sensor returns 0x99, which is out of range. + * In this case we retry (once) before returning an error. + # 1003655137 00000058 00005a4b + # 1003655138 00000099 00003a80 <--- 0x99 = 153 degrees + # 1003655139 00000054 00005c52 + */ + if (temp == 0x99) { + msleep(100); + temp = _i8k_get_temp(sensor); + } + /* + * Return -ENODATA for all invalid temperatures. + * + * Known instances are the 0x99 value as seen above as well as + * 0xc1 (193), which may be returned when trying to read the GPU + * temperature if the system supports a GPU and it is currently + * turned off. + */ + if (temp > I8K_MAX_TEMP) + return -ENODATA; + + return temp; +} + +static int i8k_get_dell_signature(int req_fn) +{ + struct smm_regs regs = { .eax = req_fn, }; + int rc; + + rc = i8k_smm(®s); + if (rc < 0) + return rc; + + return regs.eax == 1145651527 && regs.edx == 1145392204 ? 0 : -1; +} + +static int +i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg) +{ + int val = 0; + int speed; + unsigned char buff[16]; + int __user *argp = (int __user *)arg; + + if (!argp) + return -EINVAL; + + switch (cmd) { + case I8K_BIOS_VERSION: + val = (bios_version[0] << 16) | + (bios_version[1] << 8) | bios_version[2]; + break; + + case I8K_MACHINE_ID: + memset(buff, 0, 16); + strlcpy(buff, i8k_get_dmi_data(DMI_PRODUCT_SERIAL), + sizeof(buff)); + break; + + case I8K_FN_STATUS: + val = i8k_get_fn_status(); + break; + + case I8K_POWER_STATUS: + val = i8k_get_power_status(); + break; + + case I8K_GET_TEMP: + val = i8k_get_temp(0); + break; + + case I8K_GET_SPEED: + if (copy_from_user(&val, argp, sizeof(int))) + return -EFAULT; + + val = i8k_get_fan_speed(val); + break; + + case I8K_GET_FAN: + if (copy_from_user(&val, argp, sizeof(int))) + return -EFAULT; + + val = i8k_get_fan_status(val); + break; + + case I8K_SET_FAN: + if (restricted && !capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (copy_from_user(&val, argp, sizeof(int))) + return -EFAULT; + + if (copy_from_user(&speed, argp + 1, sizeof(int))) + return -EFAULT; + + val = i8k_set_fan(val, speed); + break; + + default: + return -EINVAL; + } + + if (val < 0) + return val; + + switch (cmd) { + case I8K_BIOS_VERSION: + if (copy_to_user(argp, &val, 4)) + return -EFAULT; + + break; + case I8K_MACHINE_ID: + if (copy_to_user(argp, buff, 16)) + return -EFAULT; + + break; + default: + if (copy_to_user(argp, &val, sizeof(int))) + return -EFAULT; + + break; + } + + return 0; +} + +static long i8k_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) +{ + long ret; + + mutex_lock(&i8k_mutex); + ret = i8k_ioctl_unlocked(fp, cmd, arg); + mutex_unlock(&i8k_mutex); + + return ret; +} + +/* + * Print the information for /proc/i8k. + */ +static int i8k_proc_show(struct seq_file *seq, void *offset) +{ + int fn_key, cpu_temp, ac_power; + int left_fan, right_fan, left_speed, right_speed; + + cpu_temp = i8k_get_temp(0); /* 11100 µs */ + left_fan = i8k_get_fan_status(I8K_FAN_LEFT); /* 580 µs */ + right_fan = i8k_get_fan_status(I8K_FAN_RIGHT); /* 580 µs */ + left_speed = i8k_get_fan_speed(I8K_FAN_LEFT); /* 580 µs */ + right_speed = i8k_get_fan_speed(I8K_FAN_RIGHT); /* 580 µs */ + fn_key = i8k_get_fn_status(); /* 750 µs */ + if (power_status) + ac_power = i8k_get_power_status(); /* 14700 µs */ + else + ac_power = -1; + + /* + * Info: + * + * 1) Format version (this will change if format changes) + * 2) BIOS version + * 3) BIOS machine ID + * 4) Cpu temperature + * 5) Left fan status + * 6) Right fan status + * 7) Left fan speed + * 8) Right fan speed + * 9) AC power + * 10) Fn Key status + */ + seq_printf(seq, "%s %s %s %d %d %d %d %d %d %d\n", + I8K_PROC_FMT, + bios_version, + i8k_get_dmi_data(DMI_PRODUCT_SERIAL), + cpu_temp, + left_fan, right_fan, left_speed, right_speed, + ac_power, fn_key); + + return 0; +} + +static int i8k_open_fs(struct inode *inode, struct file *file) +{ + return single_open(file, i8k_proc_show, NULL); +} + + +/* + * Hwmon interface + */ + +static ssize_t i8k_hwmon_show_temp_label(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + static const char * const labels[] = { + "CPU", + "GPU", + "SODIMM", + "Other", + "Ambient", + "Other", + }; + int index = to_sensor_dev_attr(devattr)->index; + int type; + + type = i8k_get_temp_type(index); + if (type < 0) + return type; + if (type >= ARRAY_SIZE(labels)) + type = ARRAY_SIZE(labels) - 1; + return sprintf(buf, "%s\n", labels[type]); +} + +static ssize_t i8k_hwmon_show_temp(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + int index = to_sensor_dev_attr(devattr)->index; + int temp; + + temp = i8k_get_temp(index); + if (temp < 0) + return temp; + return sprintf(buf, "%d\n", temp * 1000); +} + +static ssize_t i8k_hwmon_show_fan_label(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + static const char * const labels[] = { + "Processor Fan", + "Motherboard Fan", + "Video Fan", + "Power Supply Fan", + "Chipset Fan", + "Other Fan", + }; + int index = to_sensor_dev_attr(devattr)->index; + bool dock = false; + int type; + + type = i8k_get_fan_type(index); + if (type < 0) + return type; + + if (type & 0x10) { + dock = true; + type &= 0x0F; + } + + if (type >= ARRAY_SIZE(labels)) + type = (ARRAY_SIZE(labels) - 1); + + return sprintf(buf, "%s%s\n", (dock ? "Docking " : ""), labels[type]); +} + +static ssize_t i8k_hwmon_show_fan(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + int index = to_sensor_dev_attr(devattr)->index; + int fan_speed; + + fan_speed = i8k_get_fan_speed(index); + if (fan_speed < 0) + return fan_speed; + return sprintf(buf, "%d\n", fan_speed); +} + +static ssize_t i8k_hwmon_show_pwm(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + int index = to_sensor_dev_attr(devattr)->index; + int status; + + status = i8k_get_fan_status(index); + if (status < 0) + return -EIO; + return sprintf(buf, "%d\n", clamp_val(status * i8k_pwm_mult, 0, 255)); +} + +static ssize_t i8k_hwmon_set_pwm(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int index = to_sensor_dev_attr(attr)->index; + unsigned long val; + int err; + + err = kstrtoul(buf, 10, &val); + if (err) + return err; + val = clamp_val(DIV_ROUND_CLOSEST(val, i8k_pwm_mult), 0, i8k_fan_max); + + mutex_lock(&i8k_mutex); + err = i8k_set_fan(index, val); + mutex_unlock(&i8k_mutex); + + return err < 0 ? -EIO : count; +} + +static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 0); +static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, i8k_hwmon_show_temp_label, NULL, + 0); +static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 1); +static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, i8k_hwmon_show_temp_label, NULL, + 1); +static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 2); +static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, i8k_hwmon_show_temp_label, NULL, + 2); +static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 3); +static SENSOR_DEVICE_ATTR(temp4_label, S_IRUGO, i8k_hwmon_show_temp_label, NULL, + 3); +static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, i8k_hwmon_show_fan, NULL, 0); +static SENSOR_DEVICE_ATTR(fan1_label, S_IRUGO, i8k_hwmon_show_fan_label, NULL, + 0); +static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, i8k_hwmon_show_pwm, + i8k_hwmon_set_pwm, 0); +static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, i8k_hwmon_show_fan, NULL, + 1); +static SENSOR_DEVICE_ATTR(fan2_label, S_IRUGO, i8k_hwmon_show_fan_label, NULL, + 1); +static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, i8k_hwmon_show_pwm, + i8k_hwmon_set_pwm, 1); + +static struct attribute *i8k_attrs[] = { + &sensor_dev_attr_temp1_input.dev_attr.attr, /* 0 */ + &sensor_dev_attr_temp1_label.dev_attr.attr, /* 1 */ + &sensor_dev_attr_temp2_input.dev_attr.attr, /* 2 */ + &sensor_dev_attr_temp2_label.dev_attr.attr, /* 3 */ + &sensor_dev_attr_temp3_input.dev_attr.attr, /* 4 */ + &sensor_dev_attr_temp3_label.dev_attr.attr, /* 5 */ + &sensor_dev_attr_temp4_input.dev_attr.attr, /* 6 */ + &sensor_dev_attr_temp4_label.dev_attr.attr, /* 7 */ + &sensor_dev_attr_fan1_input.dev_attr.attr, /* 8 */ + &sensor_dev_attr_fan1_label.dev_attr.attr, /* 9 */ + &sensor_dev_attr_pwm1.dev_attr.attr, /* 10 */ + &sensor_dev_attr_fan2_input.dev_attr.attr, /* 11 */ + &sensor_dev_attr_fan2_label.dev_attr.attr, /* 12 */ + &sensor_dev_attr_pwm2.dev_attr.attr, /* 13 */ + NULL +}; + +static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr, + int index) +{ + if (index >= 0 && index <= 1 && + !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1)) + return 0; + if (index >= 2 && index <= 3 && + !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP2)) + return 0; + if (index >= 4 && index <= 5 && + !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP3)) + return 0; + if (index >= 6 && index <= 7 && + !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP4)) + return 0; + if (index >= 8 && index <= 10 && + !(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN1)) + return 0; + if (index >= 11 && index <= 13 && + !(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN2)) + return 0; + + return attr->mode; +} + +static const struct attribute_group i8k_group = { + .attrs = i8k_attrs, + .is_visible = i8k_is_visible, +}; +__ATTRIBUTE_GROUPS(i8k); + +static int __init i8k_init_hwmon(void) +{ + int err; + + i8k_hwmon_flags = 0; + + /* CPU temperature attributes, if temperature type is OK */ + err = i8k_get_temp_type(0); + if (err >= 0) + i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP1; + /* check for additional temperature sensors */ + err = i8k_get_temp_type(1); + if (err >= 0) + i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP2; + err = i8k_get_temp_type(2); + if (err >= 0) + i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP3; + err = i8k_get_temp_type(3); + if (err >= 0) + i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP4; + + /* First fan attributes, if fan type is OK */ + err = i8k_get_fan_type(0); + if (err >= 0) + i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN1; + + /* Second fan attributes, if fan type is OK */ + err = i8k_get_fan_type(1); + if (err >= 0) + i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2; + + i8k_hwmon_dev = hwmon_device_register_with_groups(NULL, "i8k", NULL, + i8k_groups); + if (IS_ERR(i8k_hwmon_dev)) { + err = PTR_ERR(i8k_hwmon_dev); + i8k_hwmon_dev = NULL; + pr_err("hwmon registration failed (%d)\n", err); + return err; + } + return 0; +} + +struct i8k_config_data { + uint fan_mult; + uint fan_max; +}; + +enum i8k_configs { + DELL_LATITUDE_D520, + DELL_PRECISION_490, + DELL_STUDIO, + DELL_XPS, +}; + +static const struct i8k_config_data i8k_config_data[] = { + [DELL_LATITUDE_D520] = { + .fan_mult = 1, + .fan_max = I8K_FAN_TURBO, + }, + [DELL_PRECISION_490] = { + .fan_mult = 1, + .fan_max = I8K_FAN_TURBO, + }, + [DELL_STUDIO] = { + .fan_mult = 1, + .fan_max = I8K_FAN_HIGH, + }, + [DELL_XPS] = { + .fan_mult = 1, + .fan_max = I8K_FAN_HIGH, + }, +}; + +static struct dmi_system_id i8k_dmi_table[] __initdata = { + { + .ident = "Dell Inspiron", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron"), + }, + }, + { + .ident = "Dell Latitude", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude"), + }, + }, + { + .ident = "Dell Inspiron 2", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron"), + }, + }, + { + .ident = "Dell Latitude D520", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude D520"), + }, + .driver_data = (void *)&i8k_config_data[DELL_LATITUDE_D520], + }, + { + .ident = "Dell Latitude 2", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude"), + }, + }, + { /* UK Inspiron 6400 */ + .ident = "Dell Inspiron 3", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MM061"), + }, + }, + { + .ident = "Dell Inspiron 3", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MP061"), + }, + }, + { + .ident = "Dell Precision 490", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, + "Precision WorkStation 490"), + }, + .driver_data = (void *)&i8k_config_data[DELL_PRECISION_490], + }, + { + .ident = "Dell Precision", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision"), + }, + }, + { + .ident = "Dell Vostro", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro"), + }, + }, + { + .ident = "Dell XPS421", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "XPS L421X"), + }, + }, + { + .ident = "Dell Studio", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Studio"), + }, + .driver_data = (void *)&i8k_config_data[DELL_STUDIO], + }, + { + .ident = "Dell XPS 13", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "XPS13"), + }, + .driver_data = (void *)&i8k_config_data[DELL_XPS], + }, + { + .ident = "Dell XPS M140", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MXC051"), + }, + .driver_data = (void *)&i8k_config_data[DELL_XPS], + }, + { } +}; + +MODULE_DEVICE_TABLE(dmi, i8k_dmi_table); + +/* + * Probe for the presence of a supported laptop. + */ +static int __init i8k_probe(void) +{ + const struct dmi_system_id *id; + int fan, ret; + + /* + * Get DMI information + */ + if (!dmi_check_system(i8k_dmi_table)) { + if (!ignore_dmi && !force) + return -ENODEV; + + pr_info("not running on a supported Dell system.\n"); + pr_info("vendor=%s, model=%s, version=%s\n", + i8k_get_dmi_data(DMI_SYS_VENDOR), + i8k_get_dmi_data(DMI_PRODUCT_NAME), + i8k_get_dmi_data(DMI_BIOS_VERSION)); + } + + strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION), + sizeof(bios_version)); + + /* + * Get SMM Dell signature + */ + if (i8k_get_dell_signature(I8K_SMM_GET_DELL_SIG1) && + i8k_get_dell_signature(I8K_SMM_GET_DELL_SIG2)) { + pr_err("unable to get SMM Dell signature\n"); + if (!force) + return -ENODEV; + } + + /* + * Set fan multiplier and maximal fan speed from dmi config + * Values specified in module parameters override values from dmi + */ + id = dmi_first_match(i8k_dmi_table); + if (id && id->driver_data) { + const struct i8k_config_data *conf = id->driver_data; + if (!fan_mult && conf->fan_mult) + fan_mult = conf->fan_mult; + if (!fan_max && conf->fan_max) + fan_max = conf->fan_max; + } + + i8k_fan_max = fan_max ? : I8K_FAN_HIGH; /* Must not be 0 */ + i8k_pwm_mult = DIV_ROUND_UP(255, i8k_fan_max); + + if (!fan_mult) { + /* + * Autodetect fan multiplier based on nominal rpm + * If fan reports rpm value too high then set multiplier to 1 + */ + for (fan = 0; fan < 2; ++fan) { + ret = i8k_get_fan_nominal_speed(fan, i8k_fan_max); + if (ret < 0) + continue; + if (ret > I8K_FAN_MAX_RPM) + i8k_fan_mult = 1; + break; + } + } else { + /* Fan multiplier was specified in module param or in dmi */ + i8k_fan_mult = fan_mult; + } + + return 0; +} + +static int __init i8k_init(void) +{ + struct proc_dir_entry *proc_i8k; + int err; + + /* Are we running on an supported laptop? */ + if (i8k_probe()) + return -ENODEV; + + /* Register the proc entry */ + proc_i8k = proc_create("i8k", 0, NULL, &i8k_fops); + if (!proc_i8k) + return -ENOENT; + + err = i8k_init_hwmon(); + if (err) + goto exit_remove_proc; + + return 0; + + exit_remove_proc: + remove_proc_entry("i8k", NULL); + return err; +} + +static void __exit i8k_exit(void) +{ + hwmon_device_unregister(i8k_hwmon_dev); + remove_proc_entry("i8k", NULL); +} + +module_init(i8k_init); +module_exit(i8k_exit); diff --git a/kernel/drivers/char/ipmi/Kconfig b/kernel/drivers/char/ipmi/Kconfig new file mode 100644 index 000000000..6ed9e9fe5 --- /dev/null +++ b/kernel/drivers/char/ipmi/Kconfig @@ -0,0 +1,90 @@ +# +# IPMI device configuration +# + +menuconfig IPMI_HANDLER + tristate 'IPMI top-level message handler' + depends on HAS_IOMEM + help + This enables the central IPMI message handler, required for IPMI + to work. + + IPMI is a standard for managing sensors (temperature, + voltage, etc.) in a system. + + See for more details on the driver. + + If unsure, say N. + +if IPMI_HANDLER + +config IPMI_PANIC_EVENT + bool 'Generate a panic event to all BMCs on a panic' + help + When a panic occurs, this will cause the IPMI message handler to + generate an IPMI event describing the panic to each interface + registered with the message handler. + +config IPMI_PANIC_STRING + bool 'Generate OEM events containing the panic string' + depends on IPMI_PANIC_EVENT + help + When a panic occurs, this will cause the IPMI message handler to + generate IPMI OEM type f0 events holding the IPMB address of the + panic generator (byte 4 of the event), a sequence number for the + string (byte 5 of the event) and part of the string (the rest of the + event). Bytes 1, 2, and 3 are the normal usage for an OEM event. + You can fetch these events and use the sequence numbers to piece the + string together. + +config IPMI_DEVICE_INTERFACE + tristate 'Device interface for IPMI' + help + This provides an IOCTL interface to the IPMI message handler so + userland processes may use IPMI. It supports poll() and select(). + +config IPMI_SI + tristate 'IPMI System Interface handler' + help + Provides a driver for System Interfaces (KCS, SMIC, BT). + Currently, only KCS and SMIC are supported. If + you are using IPMI, you should probably say "y" here. + +config IPMI_SI_PROBE_DEFAULTS + bool 'Probe for all possible IPMI system interfaces by default' + default n + depends on IPMI_SI + help + Modern systems will usually expose IPMI interfaces via a discoverable + firmware mechanism such as ACPI or DMI. Older systems do not, and so + the driver is forced to probe hardware manually. This may cause boot + delays. Say "n" here to disable this manual probing. IPMI will then + only be available on older systems if the "ipmi_si_intf.trydefaults=1" + boot argument is passed. + +config IPMI_SSIF + tristate 'IPMI SMBus handler (SSIF)' + select I2C + help + Provides a driver for a SMBus interface to a BMC, meaning that you + have a driver that must be accessed over an I2C bus instead of a + standard interface. This module requires I2C support. + +config IPMI_POWERNV + depends on PPC_POWERNV + tristate 'POWERNV (OPAL firmware) IPMI interface' + help + Provides a driver for OPAL firmware-based IPMI interfaces. + +config IPMI_WATCHDOG + tristate 'IPMI Watchdog Timer' + help + This enables the IPMI watchdog timer. + +config IPMI_POWEROFF + tristate 'IPMI Poweroff' + help + This enables a function to power off the system with IPMI if + the IPMI management controller is capable of this. + +endif # IPMI_HANDLER diff --git a/kernel/drivers/char/ipmi/Makefile b/kernel/drivers/char/ipmi/Makefile new file mode 100644 index 000000000..f3ffde1f5 --- /dev/null +++ b/kernel/drivers/char/ipmi/Makefile @@ -0,0 +1,13 @@ +# +# Makefile for the ipmi drivers. +# + +ipmi_si-y := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o + +obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o +obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o +obj-$(CONFIG_IPMI_SI) += ipmi_si.o +obj-$(CONFIG_IPMI_SSIF) += ipmi_ssif.o +obj-$(CONFIG_IPMI_POWERNV) += ipmi_powernv.o +obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o +obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o diff --git a/kernel/drivers/char/ipmi/ipmi_bt_sm.c b/kernel/drivers/char/ipmi/ipmi_bt_sm.c new file mode 100644 index 000000000..61e716166 --- /dev/null +++ b/kernel/drivers/char/ipmi/ipmi_bt_sm.c @@ -0,0 +1,705 @@ +/* + * ipmi_bt_sm.c + * + * The state machine for an Open IPMI BT sub-driver under ipmi_si.c, part + * of the driver architecture at http://sourceforge.net/projects/openipmi + * + * Author: Rocky Craig + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. */ + +#include /* For printk. */ +#include +#include +#include +#include /* for completion codes */ +#include "ipmi_si_sm.h" + +#define BT_DEBUG_OFF 0 /* Used in production */ +#define BT_DEBUG_ENABLE 1 /* Generic messages */ +#define BT_DEBUG_MSG 2 /* Prints all request/response buffers */ +#define BT_DEBUG_STATES 4 /* Verbose look at state changes */ +/* + * BT_DEBUG_OFF must be zero to correspond to the default uninitialized + * value + */ + +static int bt_debug; /* 0 == BT_DEBUG_OFF */ + +module_param(bt_debug, int, 0644); +MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states"); + +/* + * Typical "Get BT Capabilities" values are 2-3 retries, 5-10 seconds, + * and 64 byte buffers. However, one HP implementation wants 255 bytes of + * buffer (with a documented message of 160 bytes) so go for the max. + * Since the Open IPMI architecture is single-message oriented at this + * stage, the queue depth of BT is of no concern. + */ + +#define BT_NORMAL_TIMEOUT 5 /* seconds */ +#define BT_NORMAL_RETRY_LIMIT 2 +#define BT_RESET_DELAY 6 /* seconds after warm reset */ + +/* + * States are written in chronological order and usually cover + * multiple rows of the state table discussion in the IPMI spec. + */ + +enum bt_states { + BT_STATE_IDLE = 0, /* Order is critical in this list */ + BT_STATE_XACTION_START, + BT_STATE_WRITE_BYTES, + BT_STATE_WRITE_CONSUME, + BT_STATE_READ_WAIT, + BT_STATE_CLEAR_B2H, + BT_STATE_READ_BYTES, + BT_STATE_RESET1, /* These must come last */ + BT_STATE_RESET2, + BT_STATE_RESET3, + BT_STATE_RESTART, + BT_STATE_PRINTME, + BT_STATE_CAPABILITIES_BEGIN, + BT_STATE_CAPABILITIES_END, + BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */ +}; + +/* + * Macros seen at the end of state "case" blocks. They help with legibility + * and debugging. + */ + +#define BT_STATE_CHANGE(X, Y) { bt->state = X; return Y; } + +#define BT_SI_SM_RETURN(Y) { last_printed = BT_STATE_PRINTME; return Y; } + +struct si_sm_data { + enum bt_states state; + unsigned char seq; /* BT sequence number */ + struct si_sm_io *io; + unsigned char write_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */ + int write_count; + unsigned char read_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */ + int read_count; + int truncated; + long timeout; /* microseconds countdown */ + int error_retries; /* end of "common" fields */ + int nonzero_status; /* hung BMCs stay all 0 */ + enum bt_states complete; /* to divert the state machine */ + int BT_CAP_outreqs; + long BT_CAP_req2rsp; + int BT_CAP_retries; /* Recommended retries */ +}; + +#define BT_CLR_WR_PTR 0x01 /* See IPMI 1.5 table 11.6.4 */ +#define BT_CLR_RD_PTR 0x02 +#define BT_H2B_ATN 0x04 +#define BT_B2H_ATN 0x08 +#define BT_SMS_ATN 0x10 +#define BT_OEM0 0x20 +#define BT_H_BUSY 0x40 +#define BT_B_BUSY 0x80 + +/* + * Some bits are toggled on each write: write once to set it, once + * more to clear it; writing a zero does nothing. To absolutely + * clear it, check its state and write if set. This avoids the "get + * current then use as mask" scheme to modify one bit. Note that the + * variable "bt" is hardcoded into these macros. + */ + +#define BT_STATUS bt->io->inputb(bt->io, 0) +#define BT_CONTROL(x) bt->io->outputb(bt->io, 0, x) + +#define BMC2HOST bt->io->inputb(bt->io, 1) +#define HOST2BMC(x) bt->io->outputb(bt->io, 1, x) + +#define BT_INTMASK_R bt->io->inputb(bt->io, 2) +#define BT_INTMASK_W(x) bt->io->outputb(bt->io, 2, x) + +/* + * Convenience routines for debugging. These are not multi-open safe! + * Note the macros have hardcoded variables in them. + */ + +static char *state2txt(unsigned char state) +{ + switch (state) { + case BT_STATE_IDLE: return("IDLE"); + case BT_STATE_XACTION_START: return("XACTION"); + case BT_STATE_WRITE_BYTES: return("WR_BYTES"); + case BT_STATE_WRITE_CONSUME: return("WR_CONSUME"); + case BT_STATE_READ_WAIT: return("RD_WAIT"); + case BT_STATE_CLEAR_B2H: return("CLEAR_B2H"); + case BT_STATE_READ_BYTES: return("RD_BYTES"); + case BT_STATE_RESET1: return("RESET1"); + case BT_STATE_RESET2: return("RESET2"); + case BT_STATE_RESET3: return("RESET3"); + case BT_STATE_RESTART: return("RESTART"); + case BT_STATE_LONG_BUSY: return("LONG_BUSY"); + case BT_STATE_CAPABILITIES_BEGIN: return("CAP_BEGIN"); + case BT_STATE_CAPABILITIES_END: return("CAP_END"); + } + return("BAD STATE"); +} +#define STATE2TXT state2txt(bt->state) + +static char *status2txt(unsigned char status) +{ + /* + * This cannot be called by two threads at the same time and + * the buffer is always consumed immediately, so the static is + * safe to use. + */ + static char buf[40]; + + strcpy(buf, "[ "); + if (status & BT_B_BUSY) + strcat(buf, "B_BUSY "); + if (status & BT_H_BUSY) + strcat(buf, "H_BUSY "); + if (status & BT_OEM0) + strcat(buf, "OEM0 "); + if (status & BT_SMS_ATN) + strcat(buf, "SMS "); + if (status & BT_B2H_ATN) + strcat(buf, "B2H "); + if (status & BT_H2B_ATN) + strcat(buf, "H2B "); + strcat(buf, "]"); + return buf; +} +#define STATUS2TXT status2txt(status) + +/* called externally at insmod time, and internally on cleanup */ + +static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io) +{ + memset(bt, 0, sizeof(struct si_sm_data)); + if (bt->io != io) { + /* external: one-time only things */ + bt->io = io; + bt->seq = 0; + } + bt->state = BT_STATE_IDLE; /* start here */ + bt->complete = BT_STATE_IDLE; /* end here */ + bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * USEC_PER_SEC; + bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT; + /* BT_CAP_outreqs == zero is a flag to read BT Capabilities */ + return 3; /* We claim 3 bytes of space; ought to check SPMI table */ +} + +/* Jam a completion code (probably an error) into a response */ + +static void force_result(struct si_sm_data *bt, unsigned char completion_code) +{ + bt->read_data[0] = 4; /* # following bytes */ + bt->read_data[1] = bt->write_data[1] | 4; /* Odd NetFn/LUN */ + bt->read_data[2] = bt->write_data[2]; /* seq (ignored) */ + bt->read_data[3] = bt->write_data[3]; /* Command */ + bt->read_data[4] = completion_code; + bt->read_count = 5; +} + +/* The upper state machine starts here */ + +static int bt_start_transaction(struct si_sm_data *bt, + unsigned char *data, + unsigned int size) +{ + unsigned int i; + + if (size < 2) + return IPMI_REQ_LEN_INVALID_ERR; + if (size > IPMI_MAX_MSG_LENGTH) + return IPMI_REQ_LEN_EXCEEDED_ERR; + + if (bt->state == BT_STATE_LONG_BUSY) + return IPMI_NODE_BUSY_ERR; + + if (bt->state != BT_STATE_IDLE) + return IPMI_NOT_IN_MY_STATE_ERR; + + if (bt_debug & BT_DEBUG_MSG) { + printk(KERN_WARNING "BT: +++++++++++++++++ New command\n"); + printk(KERN_WARNING "BT: NetFn/LUN CMD [%d data]:", size - 2); + for (i = 0; i < size; i ++) + printk(" %02x", data[i]); + printk("\n"); + } + bt->write_data[0] = size + 1; /* all data plus seq byte */ + bt->write_data[1] = *data; /* NetFn/LUN */ + bt->write_data[2] = bt->seq++; + memcpy(bt->write_data + 3, data + 1, size - 1); + bt->write_count = size + 2; + bt->error_retries = 0; + bt->nonzero_status = 0; + bt->truncated = 0; + bt->state = BT_STATE_XACTION_START; + bt->timeout = bt->BT_CAP_req2rsp; + force_result(bt, IPMI_ERR_UNSPECIFIED); + return 0; +} + +/* + * After the upper state machine has been told SI_SM_TRANSACTION_COMPLETE + * it calls this. Strip out the length and seq bytes. + */ + +static int bt_get_result(struct si_sm_data *bt, + unsigned char *data, + unsigned int length) +{ + int i, msg_len; + + msg_len = bt->read_count - 2; /* account for length & seq */ + if (msg_len < 3 || msg_len > IPMI_MAX_MSG_LENGTH) { + force_result(bt, IPMI_ERR_UNSPECIFIED); + msg_len = 3; + } + data[0] = bt->read_data[1]; + data[1] = bt->read_data[3]; + if (length < msg_len || bt->truncated) { + data[2] = IPMI_ERR_MSG_TRUNCATED; + msg_len = 3; + } else + memcpy(data + 2, bt->read_data + 4, msg_len - 2); + + if (bt_debug & BT_DEBUG_MSG) { + printk(KERN_WARNING "BT: result %d bytes:", msg_len); + for (i = 0; i < msg_len; i++) + printk(" %02x", data[i]); + printk("\n"); + } + return msg_len; +} + +/* This bit's functionality is optional */ +#define BT_BMC_HWRST 0x80 + +static void reset_flags(struct si_sm_data *bt) +{ + if (bt_debug) + printk(KERN_WARNING "IPMI BT: flag reset %s\n", + status2txt(BT_STATUS)); + if (BT_STATUS & BT_H_BUSY) + BT_CONTROL(BT_H_BUSY); /* force clear */ + BT_CONTROL(BT_CLR_WR_PTR); /* always reset */ + BT_CONTROL(BT_SMS_ATN); /* always clear */ + BT_INTMASK_W(BT_BMC_HWRST); +} + +/* + * Get rid of an unwanted/stale response. This should only be needed for + * BMCs that support multiple outstanding requests. + */ + +static void drain_BMC2HOST(struct si_sm_data *bt) +{ + int i, size; + + if (!(BT_STATUS & BT_B2H_ATN)) /* Not signalling a response */ + return; + + BT_CONTROL(BT_H_BUSY); /* now set */ + BT_CONTROL(BT_B2H_ATN); /* always clear */ + BT_STATUS; /* pause */ + BT_CONTROL(BT_B2H_ATN); /* some BMCs are stubborn */ + BT_CONTROL(BT_CLR_RD_PTR); /* always reset */ + if (bt_debug) + printk(KERN_WARNING "IPMI BT: stale response %s; ", + status2txt(BT_STATUS)); + size = BMC2HOST; + for (i = 0; i < size ; i++) + BMC2HOST; + BT_CONTROL(BT_H_BUSY); /* now clear */ + if (bt_debug) + printk("drained %d bytes\n", size + 1); +} + +static inline void write_all_bytes(struct si_sm_data *bt) +{ + int i; + + if (bt_debug & BT_DEBUG_MSG) { + printk(KERN_WARNING "BT: write %d bytes seq=0x%02X", + bt->write_count, bt->seq); + for (i = 0; i < bt->write_count; i++) + printk(" %02x", bt->write_data[i]); + printk("\n"); + } + for (i = 0; i < bt->write_count; i++) + HOST2BMC(bt->write_data[i]); +} + +static inline int read_all_bytes(struct si_sm_data *bt) +{ + unsigned int i; + + /* + * length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode. + * Keep layout of first four bytes aligned with write_data[] + */ + + bt->read_data[0] = BMC2HOST; + bt->read_count = bt->read_data[0]; + + if (bt->read_count < 4 || bt->read_count >= IPMI_MAX_MSG_LENGTH) { + if (bt_debug & BT_DEBUG_MSG) + printk(KERN_WARNING "BT: bad raw rsp len=%d\n", + bt->read_count); + bt->truncated = 1; + return 1; /* let next XACTION START clean it up */ + } + for (i = 1; i <= bt->read_count; i++) + bt->read_data[i] = BMC2HOST; + bt->read_count++; /* Account internally for length byte */ + + if (bt_debug & BT_DEBUG_MSG) { + int max = bt->read_count; + + printk(KERN_WARNING "BT: got %d bytes seq=0x%02X", + max, bt->read_data[2]); + if (max > 16) + max = 16; + for (i = 0; i < max; i++) + printk(KERN_CONT " %02x", bt->read_data[i]); + printk(KERN_CONT "%s\n", bt->read_count == max ? "" : " ..."); + } + + /* per the spec, the (NetFn[1], Seq[2], Cmd[3]) tuples must match */ + if ((bt->read_data[3] == bt->write_data[3]) && + (bt->read_data[2] == bt->write_data[2]) && + ((bt->read_data[1] & 0xF8) == (bt->write_data[1] & 0xF8))) + return 1; + + if (bt_debug & BT_DEBUG_MSG) + printk(KERN_WARNING "IPMI BT: bad packet: " + "want 0x(%02X, %02X, %02X) got (%02X, %02X, %02X)\n", + bt->write_data[1] | 0x04, bt->write_data[2], bt->write_data[3], + bt->read_data[1], bt->read_data[2], bt->read_data[3]); + return 0; +} + +/* Restart if retries are left, or return an error completion code */ + +static enum si_sm_result error_recovery(struct si_sm_data *bt, + unsigned char status, + unsigned char cCode) +{ + char *reason; + + bt->timeout = bt->BT_CAP_req2rsp; + + switch (cCode) { + case IPMI_TIMEOUT_ERR: + reason = "timeout"; + break; + default: + reason = "internal error"; + break; + } + + printk(KERN_WARNING "IPMI BT: %s in %s %s ", /* open-ended line */ + reason, STATE2TXT, STATUS2TXT); + + /* + * Per the IPMI spec, retries are based on the sequence number + * known only to this module, so manage a restart here. + */ + (bt->error_retries)++; + if (bt->error_retries < bt->BT_CAP_retries) { + printk("%d retries left\n", + bt->BT_CAP_retries - bt->error_retries); + bt->state = BT_STATE_RESTART; + return SI_SM_CALL_WITHOUT_DELAY; + } + + printk(KERN_WARNING "failed %d retries, sending error response\n", + bt->BT_CAP_retries); + if (!bt->nonzero_status) + printk(KERN_ERR "IPMI BT: stuck, try power cycle\n"); + + /* this is most likely during insmod */ + else if (bt->seq <= (unsigned char)(bt->BT_CAP_retries & 0xFF)) { + printk(KERN_WARNING "IPMI: BT reset (takes 5 secs)\n"); + bt->state = BT_STATE_RESET1; + return SI_SM_CALL_WITHOUT_DELAY; + } + + /* + * Concoct a useful error message, set up the next state, and + * be done with this sequence. + */ + + bt->state = BT_STATE_IDLE; + switch (cCode) { + case IPMI_TIMEOUT_ERR: + if (status & BT_B_BUSY) { + cCode = IPMI_NODE_BUSY_ERR; + bt->state = BT_STATE_LONG_BUSY; + } + break; + default: + break; + } + force_result(bt, cCode); + return SI_SM_TRANSACTION_COMPLETE; +} + +/* Check status and (usually) take action and change this state machine. */ + +static enum si_sm_result bt_event(struct si_sm_data *bt, long time) +{ + unsigned char status, BT_CAP[8]; + static enum bt_states last_printed = BT_STATE_PRINTME; + int i; + + status = BT_STATUS; + bt->nonzero_status |= status; + if ((bt_debug & BT_DEBUG_STATES) && (bt->state != last_printed)) { + printk(KERN_WARNING "BT: %s %s TO=%ld - %ld \n", + STATE2TXT, + STATUS2TXT, + bt->timeout, + time); + last_printed = bt->state; + } + + /* + * Commands that time out may still (eventually) provide a response. + * This stale response will get in the way of a new response so remove + * it if possible (hopefully during IDLE). Even if it comes up later + * it will be rejected by its (now-forgotten) seq number. + */ + + if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) { + drain_BMC2HOST(bt); + BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); + } + + if ((bt->state != BT_STATE_IDLE) && + (bt->state < BT_STATE_PRINTME)) { + /* check timeout */ + bt->timeout -= time; + if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) + return error_recovery(bt, + status, + IPMI_TIMEOUT_ERR); + } + + switch (bt->state) { + + /* + * Idle state first checks for asynchronous messages from another + * channel, then does some opportunistic housekeeping. + */ + + case BT_STATE_IDLE: + if (status & BT_SMS_ATN) { + BT_CONTROL(BT_SMS_ATN); /* clear it */ + return SI_SM_ATTN; + } + + if (status & BT_H_BUSY) /* clear a leftover H_BUSY */ + BT_CONTROL(BT_H_BUSY); + + /* Read BT capabilities if it hasn't been done yet */ + if (!bt->BT_CAP_outreqs) + BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN, + SI_SM_CALL_WITHOUT_DELAY); + bt->timeout = bt->BT_CAP_req2rsp; + BT_SI_SM_RETURN(SI_SM_IDLE); + + case BT_STATE_XACTION_START: + if (status & (BT_B_BUSY | BT_H2B_ATN)) + BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); + if (BT_STATUS & BT_H_BUSY) + BT_CONTROL(BT_H_BUSY); /* force clear */ + BT_STATE_CHANGE(BT_STATE_WRITE_BYTES, + SI_SM_CALL_WITHOUT_DELAY); + + case BT_STATE_WRITE_BYTES: + if (status & BT_H_BUSY) + BT_CONTROL(BT_H_BUSY); /* clear */ + BT_CONTROL(BT_CLR_WR_PTR); + write_all_bytes(bt); + BT_CONTROL(BT_H2B_ATN); /* can clear too fast to catch */ + BT_STATE_CHANGE(BT_STATE_WRITE_CONSUME, + SI_SM_CALL_WITHOUT_DELAY); + + case BT_STATE_WRITE_CONSUME: + if (status & (BT_B_BUSY | BT_H2B_ATN)) + BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); + BT_STATE_CHANGE(BT_STATE_READ_WAIT, + SI_SM_CALL_WITHOUT_DELAY); + + /* Spinning hard can suppress B2H_ATN and force a timeout */ + + case BT_STATE_READ_WAIT: + if (!(status & BT_B2H_ATN)) + BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); + BT_CONTROL(BT_H_BUSY); /* set */ + + /* + * Uncached, ordered writes should just proceed serially but + * some BMCs don't clear B2H_ATN with one hit. Fast-path a + * workaround without too much penalty to the general case. + */ + + BT_CONTROL(BT_B2H_ATN); /* clear it to ACK the BMC */ + BT_STATE_CHANGE(BT_STATE_CLEAR_B2H, + SI_SM_CALL_WITHOUT_DELAY); + + case BT_STATE_CLEAR_B2H: + if (status & BT_B2H_ATN) { + /* keep hitting it */ + BT_CONTROL(BT_B2H_ATN); + BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); + } + BT_STATE_CHANGE(BT_STATE_READ_BYTES, + SI_SM_CALL_WITHOUT_DELAY); + + case BT_STATE_READ_BYTES: + if (!(status & BT_H_BUSY)) + /* check in case of retry */ + BT_CONTROL(BT_H_BUSY); + BT_CONTROL(BT_CLR_RD_PTR); /* start of BMC2HOST buffer */ + i = read_all_bytes(bt); /* true == packet seq match */ + BT_CONTROL(BT_H_BUSY); /* NOW clear */ + if (!i) /* Not my message */ + BT_STATE_CHANGE(BT_STATE_READ_WAIT, + SI_SM_CALL_WITHOUT_DELAY); + bt->state = bt->complete; + return bt->state == BT_STATE_IDLE ? /* where to next? */ + SI_SM_TRANSACTION_COMPLETE : /* normal */ + SI_SM_CALL_WITHOUT_DELAY; /* Startup magic */ + + case BT_STATE_LONG_BUSY: /* For example: after FW update */ + if (!(status & BT_B_BUSY)) { + reset_flags(bt); /* next state is now IDLE */ + bt_init_data(bt, bt->io); + } + return SI_SM_CALL_WITH_DELAY; /* No repeat printing */ + + case BT_STATE_RESET1: + reset_flags(bt); + drain_BMC2HOST(bt); + BT_STATE_CHANGE(BT_STATE_RESET2, + SI_SM_CALL_WITH_DELAY); + + case BT_STATE_RESET2: /* Send a soft reset */ + BT_CONTROL(BT_CLR_WR_PTR); + HOST2BMC(3); /* number of bytes following */ + HOST2BMC(0x18); /* NetFn/LUN == Application, LUN 0 */ + HOST2BMC(42); /* Sequence number */ + HOST2BMC(3); /* Cmd == Soft reset */ + BT_CONTROL(BT_H2B_ATN); + bt->timeout = BT_RESET_DELAY * USEC_PER_SEC; + BT_STATE_CHANGE(BT_STATE_RESET3, + SI_SM_CALL_WITH_DELAY); + + case BT_STATE_RESET3: /* Hold off everything for a bit */ + if (bt->timeout > 0) + return SI_SM_CALL_WITH_DELAY; + drain_BMC2HOST(bt); + BT_STATE_CHANGE(BT_STATE_RESTART, + SI_SM_CALL_WITH_DELAY); + + case BT_STATE_RESTART: /* don't reset retries or seq! */ + bt->read_count = 0; + bt->nonzero_status = 0; + bt->timeout = bt->BT_CAP_req2rsp; + BT_STATE_CHANGE(BT_STATE_XACTION_START, + SI_SM_CALL_WITH_DELAY); + + /* + * Get BT Capabilities, using timing of upper level state machine. + * Set outreqs to prevent infinite loop on timeout. + */ + case BT_STATE_CAPABILITIES_BEGIN: + bt->BT_CAP_outreqs = 1; + { + unsigned char GetBT_CAP[] = { 0x18, 0x36 }; + bt->state = BT_STATE_IDLE; + bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP)); + } + bt->complete = BT_STATE_CAPABILITIES_END; + BT_STATE_CHANGE(BT_STATE_XACTION_START, + SI_SM_CALL_WITH_DELAY); + + case BT_STATE_CAPABILITIES_END: + i = bt_get_result(bt, BT_CAP, sizeof(BT_CAP)); + bt_init_data(bt, bt->io); + if ((i == 8) && !BT_CAP[2]) { + bt->BT_CAP_outreqs = BT_CAP[3]; + bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC; + bt->BT_CAP_retries = BT_CAP[7]; + } else + printk(KERN_WARNING "IPMI BT: using default values\n"); + if (!bt->BT_CAP_outreqs) + bt->BT_CAP_outreqs = 1; + printk(KERN_WARNING "IPMI BT: req2rsp=%ld secs retries=%d\n", + bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries); + bt->timeout = bt->BT_CAP_req2rsp; + return SI_SM_CALL_WITHOUT_DELAY; + + default: /* should never occur */ + return error_recovery(bt, + status, + IPMI_ERR_UNSPECIFIED); + } + return SI_SM_CALL_WITH_DELAY; +} + +static int bt_detect(struct si_sm_data *bt) +{ + /* + * It's impossible for the BT status and interrupt registers to be + * all 1's, (assuming a properly functioning, self-initialized BMC) + * but that's what you get from reading a bogus address, so we + * test that first. The calling routine uses negative logic. + */ + + if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF)) + return 1; + reset_flags(bt); + return 0; +} + +static void bt_cleanup(struct si_sm_data *bt) +{ +} + +static int bt_size(void) +{ + return sizeof(struct si_sm_data); +} + +struct si_sm_handlers bt_smi_handlers = { + .init_data = bt_init_data, + .start_transaction = bt_start_transaction, + .get_result = bt_get_result, + .event = bt_event, + .detect = bt_detect, + .cleanup = bt_cleanup, + .size = bt_size, +}; diff --git a/kernel/drivers/char/ipmi/ipmi_devintf.c b/kernel/drivers/char/ipmi/ipmi_devintf.c new file mode 100644 index 000000000..178657453 --- /dev/null +++ b/kernel/drivers/char/ipmi/ipmi_devintf.c @@ -0,0 +1,992 @@ +/* + * ipmi_devintf.c + * + * Linux device interface for the IPMI message handler. + * + * Author: MontaVista Software, Inc. + * Corey Minyard + * source@mvista.com + * + * Copyright 2002 MontaVista Software Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct ipmi_file_private +{ + ipmi_user_t user; + spinlock_t recv_msg_lock; + struct list_head recv_msgs; + struct file *file; + struct fasync_struct *fasync_queue; + wait_queue_head_t wait; + struct mutex recv_mutex; + int default_retries; + unsigned int default_retry_time_ms; +}; + +static DEFINE_MUTEX(ipmi_mutex); +static void file_receive_handler(struct ipmi_recv_msg *msg, + void *handler_data) +{ + struct ipmi_file_private *priv = handler_data; + int was_empty; + unsigned long flags; + + spin_lock_irqsave(&(priv->recv_msg_lock), flags); + + was_empty = list_empty(&(priv->recv_msgs)); + list_add_tail(&(msg->link), &(priv->recv_msgs)); + + if (was_empty) { + wake_up_interruptible(&priv->wait); + kill_fasync(&priv->fasync_queue, SIGIO, POLL_IN); + } + + spin_unlock_irqrestore(&(priv->recv_msg_lock), flags); +} + +static unsigned int ipmi_poll(struct file *file, poll_table *wait) +{ + struct ipmi_file_private *priv = file->private_data; + unsigned int mask = 0; + unsigned long flags; + + poll_wait(file, &priv->wait, wait); + + spin_lock_irqsave(&priv->recv_msg_lock, flags); + + if (!list_empty(&(priv->recv_msgs))) + mask |= (POLLIN | POLLRDNORM); + + spin_unlock_irqrestore(&priv->recv_msg_lock, flags); + + return mask; +} + +static int ipmi_fasync(int fd, struct file *file, int on) +{ + struct ipmi_file_private *priv = file->private_data; + int result; + + mutex_lock(&ipmi_mutex); /* could race against open() otherwise */ + result = fasync_helper(fd, file, on, &priv->fasync_queue); + mutex_unlock(&ipmi_mutex); + + return (result); +} + +static struct ipmi_user_hndl ipmi_hndlrs = +{ + .ipmi_recv_hndl = file_receive_handler, +}; + +static int ipmi_open(struct inode *inode, struct file *file) +{ + int if_num = iminor(inode); + int rv; + struct ipmi_file_private *priv; + + + priv = kmalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + mutex_lock(&ipmi_mutex); + priv->file = file; + + rv = ipmi_create_user(if_num, + &ipmi_hndlrs, + priv, + &(priv->user)); + if (rv) { + kfree(priv); + goto out; + } + + file->private_data = priv; + + spin_lock_init(&(priv->recv_msg_lock)); + INIT_LIST_HEAD(&(priv->recv_msgs)); + init_waitqueue_head(&priv->wait); + priv->fasync_queue = NULL; + mutex_init(&priv->recv_mutex); + + /* Use the low-level defaults. */ + priv->default_retries = -1; + priv->default_retry_time_ms = 0; + +out: + mutex_unlock(&ipmi_mutex); + return rv; +} + +static int ipmi_release(struct inode *inode, struct file *file) +{ + struct ipmi_file_private *priv = file->private_data; + int rv; + struct ipmi_recv_msg *msg, *next; + + rv = ipmi_destroy_user(priv->user); + if (rv) + return rv; + + list_for_each_entry_safe(msg, next, &priv->recv_msgs, link) + ipmi_free_recv_msg(msg); + + + kfree(priv); + + return 0; +} + +static int handle_send_req(ipmi_user_t user, + struct ipmi_req *req, + int retries, + unsigned int retry_time_ms) +{ + int rv; + struct ipmi_addr addr; + struct kernel_ipmi_msg msg; + + if (req->addr_len > sizeof(struct ipmi_addr)) + return -EINVAL; + + if (copy_from_user(&addr, req->addr, req->addr_len)) + return -EFAULT; + + msg.netfn = req->msg.netfn; + msg.cmd = req->msg.cmd; + msg.data_len = req->msg.data_len; + msg.data = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); + if (!msg.data) + return -ENOMEM; + + /* From here out we cannot return, we must jump to "out" for + error exits to free msgdata. */ + + rv = ipmi_validate_addr(&addr, req->addr_len); + if (rv) + goto out; + + if (req->msg.data != NULL) { + if (req->msg.data_len > IPMI_MAX_MSG_LENGTH) { + rv = -EMSGSIZE; + goto out; + } + + if (copy_from_user(msg.data, + req->msg.data, + req->msg.data_len)) + { + rv = -EFAULT; + goto out; + } + } else { + msg.data_len = 0; + } + + rv = ipmi_request_settime(user, + &addr, + req->msgid, + &msg, + NULL, + 0, + retries, + retry_time_ms); + out: + kfree(msg.data); + return rv; +} + +static int ipmi_ioctl(struct file *file, + unsigned int cmd, + unsigned long data) +{ + int rv = -EINVAL; + struct ipmi_file_private *priv = file->private_data; + void __user *arg = (void __user *)data; + + switch (cmd) + { + case IPMICTL_SEND_COMMAND: + { + struct ipmi_req req; + + if (copy_from_user(&req, arg, sizeof(req))) { + rv = -EFAULT; + break; + } + + rv = handle_send_req(priv->user, + &req, + priv->default_retries, + priv->default_retry_time_ms); + break; + } + + case IPMICTL_SEND_COMMAND_SETTIME: + { + struct ipmi_req_settime req; + + if (copy_from_user(&req, arg, sizeof(req))) { + rv = -EFAULT; + break; + } + + rv = handle_send_req(priv->user, + &req.req, + req.retries, + req.retry_time_ms); + break; + } + + case IPMICTL_RECEIVE_MSG: + case IPMICTL_RECEIVE_MSG_TRUNC: + { + struct ipmi_recv rsp; + int addr_len; + struct list_head *entry; + struct ipmi_recv_msg *msg; + unsigned long flags; + + + rv = 0; + if (copy_from_user(&rsp, arg, sizeof(rsp))) { + rv = -EFAULT; + break; + } + + /* We claim a mutex because we don't want two + users getting something from the queue at a time. + Since we have to release the spinlock before we can + copy the data to the user, it's possible another + user will grab something from the queue, too. Then + the messages might get out of order if something + fails and the message gets put back onto the + queue. This mutex prevents that problem. */ + mutex_lock(&priv->recv_mutex); + + /* Grab the message off the list. */ + spin_lock_irqsave(&(priv->recv_msg_lock), flags); + if (list_empty(&(priv->recv_msgs))) { + spin_unlock_irqrestore(&(priv->recv_msg_lock), flags); + rv = -EAGAIN; + goto recv_err; + } + entry = priv->recv_msgs.next; + msg = list_entry(entry, struct ipmi_recv_msg, link); + list_del(entry); + spin_unlock_irqrestore(&(priv->recv_msg_lock), flags); + + addr_len = ipmi_addr_length(msg->addr.addr_type); + if (rsp.addr_len < addr_len) + { + rv = -EINVAL; + goto recv_putback_on_err; + } + + if (copy_to_user(rsp.addr, &(msg->addr), addr_len)) { + rv = -EFAULT; + goto recv_putback_on_err; + } + rsp.addr_len = addr_len; + + rsp.recv_type = msg->recv_type; + rsp.msgid = msg->msgid; + rsp.msg.netfn = msg->msg.netfn; + rsp.msg.cmd = msg->msg.cmd; + + if (msg->msg.data_len > 0) { + if (rsp.msg.data_len < msg->msg.data_len) { + rv = -EMSGSIZE; + if (cmd == IPMICTL_RECEIVE_MSG_TRUNC) { + msg->msg.data_len = rsp.msg.data_len; + } else { + goto recv_putback_on_err; + } + } + + if (copy_to_user(rsp.msg.data, + msg->msg.data, + msg->msg.data_len)) + { + rv = -EFAULT; + goto recv_putback_on_err; + } + rsp.msg.data_len = msg->msg.data_len; + } else { + rsp.msg.data_len = 0; + } + + if (copy_to_user(arg, &rsp, sizeof(rsp))) { + rv = -EFAULT; + goto recv_putback_on_err; + } + + mutex_unlock(&priv->recv_mutex); + ipmi_free_recv_msg(msg); + break; + + recv_putback_on_err: + /* If we got an error, put the message back onto + the head of the queue. */ + spin_lock_irqsave(&(priv->recv_msg_lock), flags); + list_add(entry, &(priv->recv_msgs)); + spin_unlock_irqrestore(&(priv->recv_msg_lock), flags); + mutex_unlock(&priv->recv_mutex); + break; + + recv_err: + mutex_unlock(&priv->recv_mutex); + break; + } + + case IPMICTL_REGISTER_FOR_CMD: + { + struct ipmi_cmdspec val; + + if (copy_from_user(&val, arg, sizeof(val))) { + rv = -EFAULT; + break; + } + + rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd, + IPMI_CHAN_ALL); + break; + } + + case IPMICTL_UNREGISTER_FOR_CMD: + { + struct ipmi_cmdspec val; + + if (copy_from_user(&val, arg, sizeof(val))) { + rv = -EFAULT; + break; + } + + rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd, + IPMI_CHAN_ALL); + break; + } + + case IPMICTL_REGISTER_FOR_CMD_CHANS: + { + struct ipmi_cmdspec_chans val; + + if (copy_from_user(&val, arg, sizeof(val))) { + rv = -EFAULT; + break; + } + + rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd, + val.chans); + break; + } + + case IPMICTL_UNREGISTER_FOR_CMD_CHANS: + { + struct ipmi_cmdspec_chans val; + + if (copy_from_user(&val, arg, sizeof(val))) { + rv = -EFAULT; + break; + } + + rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd, + val.chans); + break; + } + + case IPMICTL_SET_GETS_EVENTS_CMD: + { + int val; + + if (copy_from_user(&val, arg, sizeof(val))) { + rv = -EFAULT; + break; + } + + rv = ipmi_set_gets_events(priv->user, val); + break; + } + + /* The next four are legacy, not per-channel. */ + case IPMICTL_SET_MY_ADDRESS_CMD: + { + unsigned int val; + + if (copy_from_user(&val, arg, sizeof(val))) { + rv = -EFAULT; + break; + } + + rv = ipmi_set_my_address(priv->user, 0, val); + break; + } + + case IPMICTL_GET_MY_ADDRESS_CMD: + { + unsigned int val; + unsigned char rval; + + rv = ipmi_get_my_address(priv->user, 0, &rval); + if (rv) + break; + + val = rval; + + if (copy_to_user(arg, &val, sizeof(val))) { + rv = -EFAULT; + break; + } + break; + } + + case IPMICTL_SET_MY_LUN_CMD: + { + unsigned int val; + + if (copy_from_user(&val, arg, sizeof(val))) { + rv = -EFAULT; + break; + } + + rv = ipmi_set_my_LUN(priv->user, 0, val); + break; + } + + case IPMICTL_GET_MY_LUN_CMD: + { + unsigned int val; + unsigned char rval; + + rv = ipmi_get_my_LUN(priv->user, 0, &rval); + if (rv) + break; + + val = rval; + + if (copy_to_user(arg, &val, sizeof(val))) { + rv = -EFAULT; + break; + } + break; + } + + case IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD: + { + struct ipmi_channel_lun_address_set val; + + if (copy_from_user(&val, arg, sizeof(val))) { + rv = -EFAULT; + break; + } + + return ipmi_set_my_address(priv->user, val.channel, val.value); + break; + } + + case IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD: + { + struct ipmi_channel_lun_address_set val; + + if (copy_from_user(&val, arg, sizeof(val))) { + rv = -EFAULT; + break; + } + + rv = ipmi_get_my_address(priv->user, val.channel, &val.value); + if (rv) + break; + + if (copy_to_user(arg, &val, sizeof(val))) { + rv = -EFAULT; + break; + } + break; + } + + case IPMICTL_SET_MY_CHANNEL_LUN_CMD: + { + struct ipmi_channel_lun_address_set val; + + if (copy_from_user(&val, arg, sizeof(val))) { + rv = -EFAULT; + break; + } + + rv = ipmi_set_my_LUN(priv->user, val.channel, val.value); + break; + } + + case IPMICTL_GET_MY_CHANNEL_LUN_CMD: + { + struct ipmi_channel_lun_address_set val; + + if (copy_from_user(&val, arg, sizeof(val))) { + rv = -EFAULT; + break; + } + + rv = ipmi_get_my_LUN(priv->user, val.channel, &val.value); + if (rv) + break; + + if (copy_to_user(arg, &val, sizeof(val))) { + rv = -EFAULT; + break; + } + break; + } + + case IPMICTL_SET_TIMING_PARMS_CMD: + { + struct ipmi_timing_parms parms; + + if (copy_from_user(&parms, arg, sizeof(parms))) { + rv = -EFAULT; + break; + } + + priv->default_retries = parms.retries; + priv->default_retry_time_ms = parms.retry_time_ms; + rv = 0; + break; + } + + case IPMICTL_GET_TIMING_PARMS_CMD: + { + struct ipmi_timing_parms parms; + + parms.retries = priv->default_retries; + parms.retry_time_ms = priv->default_retry_time_ms; + + if (copy_to_user(arg, &parms, sizeof(parms))) { + rv = -EFAULT; + break; + } + + rv = 0; + break; + } + + case IPMICTL_GET_MAINTENANCE_MODE_CMD: + { + int mode; + + mode = ipmi_get_maintenance_mode(priv->user); + if (copy_to_user(arg, &mode, sizeof(mode))) { + rv = -EFAULT; + break; + } + rv = 0; + break; + } + + case IPMICTL_SET_MAINTENANCE_MODE_CMD: + { + int mode; + + if (copy_from_user(&mode, arg, sizeof(mode))) { + rv = -EFAULT; + break; + } + rv = ipmi_set_maintenance_mode(priv->user, mode); + break; + } + } + + return rv; +} + +/* + * Note: it doesn't make sense to take the BKL here but + * not in compat_ipmi_ioctl. -arnd + */ +static long ipmi_unlocked_ioctl(struct file *file, + unsigned int cmd, + unsigned long data) +{ + int ret; + + mutex_lock(&ipmi_mutex); + ret = ipmi_ioctl(file, cmd, data); + mutex_unlock(&ipmi_mutex); + + return ret; +} + +#ifdef CONFIG_COMPAT + +/* + * The following code contains code for supporting 32-bit compatible + * ioctls on 64-bit kernels. This allows running 32-bit apps on the + * 64-bit kernel + */ +#define COMPAT_IPMICTL_SEND_COMMAND \ + _IOR(IPMI_IOC_MAGIC, 13, struct compat_ipmi_req) +#define COMPAT_IPMICTL_SEND_COMMAND_SETTIME \ + _IOR(IPMI_IOC_MAGIC, 21, struct compat_ipmi_req_settime) +#define COMPAT_IPMICTL_RECEIVE_MSG \ + _IOWR(IPMI_IOC_MAGIC, 12, struct compat_ipmi_recv) +#define COMPAT_IPMICTL_RECEIVE_MSG_TRUNC \ + _IOWR(IPMI_IOC_MAGIC, 11, struct compat_ipmi_recv) + +struct compat_ipmi_msg { + u8 netfn; + u8 cmd; + u16 data_len; + compat_uptr_t data; +}; + +struct compat_ipmi_req { + compat_uptr_t addr; + compat_uint_t addr_len; + compat_long_t msgid; + struct compat_ipmi_msg msg; +}; + +struct compat_ipmi_recv { + compat_int_t recv_type; + compat_uptr_t addr; + compat_uint_t addr_len; + compat_long_t msgid; + struct compat_ipmi_msg msg; +}; + +struct compat_ipmi_req_settime { + struct compat_ipmi_req req; + compat_int_t retries; + compat_uint_t retry_time_ms; +}; + +/* + * Define some helper functions for copying IPMI data + */ +static long get_compat_ipmi_msg(struct ipmi_msg *p64, + struct compat_ipmi_msg __user *p32) +{ + compat_uptr_t tmp; + + if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) || + __get_user(p64->netfn, &p32->netfn) || + __get_user(p64->cmd, &p32->cmd) || + __get_user(p64->data_len, &p32->data_len) || + __get_user(tmp, &p32->data)) + return -EFAULT; + p64->data = compat_ptr(tmp); + return 0; +} + +static long put_compat_ipmi_msg(struct ipmi_msg *p64, + struct compat_ipmi_msg __user *p32) +{ + if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) || + __put_user(p64->netfn, &p32->netfn) || + __put_user(p64->cmd, &p32->cmd) || + __put_user(p64->data_len, &p32->data_len)) + return -EFAULT; + return 0; +} + +static long get_compat_ipmi_req(struct ipmi_req *p64, + struct compat_ipmi_req __user *p32) +{ + + compat_uptr_t tmp; + + if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) || + __get_user(tmp, &p32->addr) || + __get_user(p64->addr_len, &p32->addr_len) || + __get_user(p64->msgid, &p32->msgid) || + get_compat_ipmi_msg(&p64->msg, &p32->msg)) + return -EFAULT; + p64->addr = compat_ptr(tmp); + return 0; +} + +static long get_compat_ipmi_req_settime(struct ipmi_req_settime *p64, + struct compat_ipmi_req_settime __user *p32) +{ + if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) || + get_compat_ipmi_req(&p64->req, &p32->req) || + __get_user(p64->retries, &p32->retries) || + __get_user(p64->retry_time_ms, &p32->retry_time_ms)) + return -EFAULT; + return 0; +} + +static long get_compat_ipmi_recv(struct ipmi_recv *p64, + struct compat_ipmi_recv __user *p32) +{ + compat_uptr_t tmp; + + if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) || + __get_user(p64->recv_type, &p32->recv_type) || + __get_user(tmp, &p32->addr) || + __get_user(p64->addr_len, &p32->addr_len) || + __get_user(p64->msgid, &p32->msgid) || + get_compat_ipmi_msg(&p64->msg, &p32->msg)) + return -EFAULT; + p64->addr = compat_ptr(tmp); + return 0; +} + +static long put_compat_ipmi_recv(struct ipmi_recv *p64, + struct compat_ipmi_recv __user *p32) +{ + if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) || + __put_user(p64->recv_type, &p32->recv_type) || + __put_user(p64->addr_len, &p32->addr_len) || + __put_user(p64->msgid, &p32->msgid) || + put_compat_ipmi_msg(&p64->msg, &p32->msg)) + return -EFAULT; + return 0; +} + +/* + * Handle compatibility ioctls + */ +static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd, + unsigned long arg) +{ + int rc; + struct ipmi_file_private *priv = filep->private_data; + + switch(cmd) { + case COMPAT_IPMICTL_SEND_COMMAND: + { + struct ipmi_req rp; + + if (get_compat_ipmi_req(&rp, compat_ptr(arg))) + return -EFAULT; + + return handle_send_req(priv->user, &rp, + priv->default_retries, + priv->default_retry_time_ms); + } + case COMPAT_IPMICTL_SEND_COMMAND_SETTIME: + { + struct ipmi_req_settime sp; + + if (get_compat_ipmi_req_settime(&sp, compat_ptr(arg))) + return -EFAULT; + + return handle_send_req(priv->user, &sp.req, + sp.retries, sp.retry_time_ms); + } + case COMPAT_IPMICTL_RECEIVE_MSG: + case COMPAT_IPMICTL_RECEIVE_MSG_TRUNC: + { + struct ipmi_recv __user *precv64; + struct ipmi_recv recv64; + + memset(&recv64, 0, sizeof(recv64)); + if (get_compat_ipmi_recv(&recv64, compat_ptr(arg))) + return -EFAULT; + + precv64 = compat_alloc_user_space(sizeof(recv64)); + if (copy_to_user(precv64, &recv64, sizeof(recv64))) + return -EFAULT; + + rc = ipmi_ioctl(filep, + ((cmd == COMPAT_IPMICTL_RECEIVE_MSG) + ? IPMICTL_RECEIVE_MSG + : IPMICTL_RECEIVE_MSG_TRUNC), + (unsigned long) precv64); + if (rc != 0) + return rc; + + if (copy_from_user(&recv64, precv64, sizeof(recv64))) + return -EFAULT; + + if (put_compat_ipmi_recv(&recv64, compat_ptr(arg))) + return -EFAULT; + + return rc; + } + default: + return ipmi_ioctl(filep, cmd, arg); + } +} + +static long unlocked_compat_ipmi_ioctl(struct file *filep, unsigned int cmd, + unsigned long arg) +{ + int ret; + + mutex_lock(&ipmi_mutex); + ret = compat_ipmi_ioctl(filep, cmd, arg); + mutex_unlock(&ipmi_mutex); + + return ret; +} +#endif + +static const struct file_operations ipmi_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = ipmi_unlocked_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = unlocked_compat_ipmi_ioctl, +#endif + .open = ipmi_open, + .release = ipmi_release, + .fasync = ipmi_fasync, + .poll = ipmi_poll, + .llseek = noop_llseek, +}; + +#define DEVICE_NAME "ipmidev" + +static int ipmi_major; +module_param(ipmi_major, int, 0); +MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By" + " default, or if you set it to zero, it will choose the next" + " available device. Setting it to -1 will disable the" + " interface. Other values will set the major device number" + " to that value."); + +/* Keep track of the devices that are registered. */ +struct ipmi_reg_list { + dev_t dev; + struct list_head link; +}; +static LIST_HEAD(reg_list); +static DEFINE_MUTEX(reg_list_mutex); + +static struct class *ipmi_class; + +static void ipmi_new_smi(int if_num, struct device *device) +{ + dev_t dev = MKDEV(ipmi_major, if_num); + struct ipmi_reg_list *entry; + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + printk(KERN_ERR "ipmi_devintf: Unable to create the" + " ipmi class device link\n"); + return; + } + entry->dev = dev; + + mutex_lock(®_list_mutex); + device_create(ipmi_class, device, dev, NULL, "ipmi%d", if_num); + list_add(&entry->link, ®_list); + mutex_unlock(®_list_mutex); +} + +static void ipmi_smi_gone(int if_num) +{ + dev_t dev = MKDEV(ipmi_major, if_num); + struct ipmi_reg_list *entry; + + mutex_lock(®_list_mutex); + list_for_each_entry(entry, ®_list, link) { + if (entry->dev == dev) { + list_del(&entry->link); + kfree(entry); + break; + } + } + device_destroy(ipmi_class, dev); + mutex_unlock(®_list_mutex); +} + +static struct ipmi_smi_watcher smi_watcher = +{ + .owner = THIS_MODULE, + .new_smi = ipmi_new_smi, + .smi_gone = ipmi_smi_gone, +}; + +static int __init init_ipmi_devintf(void) +{ + int rv; + + if (ipmi_major < 0) + return -EINVAL; + + printk(KERN_INFO "ipmi device interface\n"); + + ipmi_class = class_create(THIS_MODULE, "ipmi"); + if (IS_ERR(ipmi_class)) { + printk(KERN_ERR "ipmi: can't register device class\n"); + return PTR_ERR(ipmi_class); + } + + rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops); + if (rv < 0) { + class_destroy(ipmi_class); + printk(KERN_ERR "ipmi: can't get major %d\n", ipmi_major); + return rv; + } + + if (ipmi_major == 0) { + ipmi_major = rv; + } + + rv = ipmi_smi_watcher_register(&smi_watcher); + if (rv) { + unregister_chrdev(ipmi_major, DEVICE_NAME); + class_destroy(ipmi_class); + printk(KERN_WARNING "ipmi: can't register smi watcher\n"); + return rv; + } + + return 0; +} +module_init(init_ipmi_devintf); + +static void __exit cleanup_ipmi(void) +{ + struct ipmi_reg_list *entry, *entry2; + mutex_lock(®_list_mutex); + list_for_each_entry_safe(entry, entry2, ®_list, link) { + list_del(&entry->link); + device_destroy(ipmi_class, entry->dev); + kfree(entry); + } + mutex_unlock(®_list_mutex); + class_destroy(ipmi_class); + ipmi_smi_watcher_unregister(&smi_watcher); + unregister_chrdev(ipmi_major, DEVICE_NAME); +} +module_exit(cleanup_ipmi); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Corey Minyard "); +MODULE_DESCRIPTION("Linux device interface for the IPMI message handler."); +MODULE_ALIAS("platform:ipmi_si"); diff --git a/kernel/drivers/char/ipmi/ipmi_kcs_sm.c b/kernel/drivers/char/ipmi/ipmi_kcs_sm.c new file mode 100644 index 000000000..8c25f5968 --- /dev/null +++ b/kernel/drivers/char/ipmi/ipmi_kcs_sm.c @@ -0,0 +1,551 @@ +/* + * ipmi_kcs_sm.c + * + * State machine for handling IPMI KCS interfaces. + * + * Author: MontaVista Software, Inc. + * Corey Minyard + * source@mvista.com + * + * Copyright 2002 MontaVista Software Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* + * This state machine is taken from the state machine in the IPMI spec, + * pretty much verbatim. If you have questions about the states, see + * that document. + */ + +#include /* For printk. */ +#include +#include +#include +#include +#include /* for completion codes */ +#include "ipmi_si_sm.h" + +/* kcs_debug is a bit-field + * KCS_DEBUG_ENABLE - turned on for now + * KCS_DEBUG_MSG - commands and their responses + * KCS_DEBUG_STATES - state machine + */ +#define KCS_DEBUG_STATES 4 +#define KCS_DEBUG_MSG 2 +#define KCS_DEBUG_ENABLE 1 + +static int kcs_debug; +module_param(kcs_debug, int, 0644); +MODULE_PARM_DESC(kcs_debug, "debug bitmask, 1=enable, 2=messages, 4=states"); + +/* The states the KCS driver may be in. */ +enum kcs_states { + /* The KCS interface is currently doing nothing. */ + KCS_IDLE, + + /* + * We are starting an operation. The data is in the output + * buffer, but nothing has been done to the interface yet. This + * was added to the state machine in the spec to wait for the + * initial IBF. + */ + KCS_START_OP, + + /* We have written a write cmd to the interface. */ + KCS_WAIT_WRITE_START, + + /* We are writing bytes to the interface. */ + KCS_WAIT_WRITE, + + /* + * We have written the write end cmd to the interface, and + * still need to write the last byte. + */ + KCS_WAIT_WRITE_END, + + /* We are waiting to read data from the interface. */ + KCS_WAIT_READ, + + /* + * State to transition to the error handler, this was added to + * the state machine in the spec to be sure IBF was there. + */ + KCS_ERROR0, + + /* + * First stage error handler, wait for the interface to + * respond. + */ + KCS_ERROR1, + + /* + * The abort cmd has been written, wait for the interface to + * respond. + */ + KCS_ERROR2, + + /* + * We wrote some data to the interface, wait for it to switch + * to read mode. + */ + KCS_ERROR3, + + /* The hardware failed to follow the state machine. */ + KCS_HOSED +}; + +#define MAX_KCS_READ_SIZE IPMI_MAX_MSG_LENGTH +#define MAX_KCS_WRITE_SIZE IPMI_MAX_MSG_LENGTH + +/* Timeouts in microseconds. */ +#define IBF_RETRY_TIMEOUT (5*USEC_PER_SEC) +#define OBF_RETRY_TIMEOUT (5*USEC_PER_SEC) +#define MAX_ERROR_RETRIES 10 +#define ERROR0_OBF_WAIT_JIFFIES (2*HZ) + +struct si_sm_data { + enum kcs_states state; + struct si_sm_io *io; + unsigned char write_data[MAX_KCS_WRITE_SIZE]; + int write_pos; + int write_count; + int orig_write_count; + unsigned char read_data[MAX_KCS_READ_SIZE]; + int read_pos; + int truncated; + + unsigned int error_retries; + long ibf_timeout; + long obf_timeout; + unsigned long error0_timeout; +}; + +static unsigned int init_kcs_data(struct si_sm_data *kcs, + struct si_sm_io *io) +{ + kcs->state = KCS_IDLE; + kcs->io = io; + kcs->write_pos = 0; + kcs->write_count = 0; + kcs->orig_write_count = 0; + kcs->read_pos = 0; + kcs->error_retries = 0; + kcs->truncated = 0; + kcs->ibf_timeout = IBF_RETRY_TIMEOUT; + kcs->obf_timeout = OBF_RETRY_TIMEOUT; + + /* Reserve 2 I/O bytes. */ + return 2; +} + +static inline unsigned char read_status(struct si_sm_data *kcs) +{ + return kcs->io->inputb(kcs->io, 1); +} + +static inline unsigned char read_data(struct si_sm_data *kcs) +{ + return kcs->io->inputb(kcs->io, 0); +} + +static inline void write_cmd(struct si_sm_data *kcs, unsigned char data) +{ + kcs->io->outputb(kcs->io, 1, data); +} + +static inline void write_data(struct si_sm_data *kcs, unsigned char data) +{ + kcs->io->outputb(kcs->io, 0, data); +} + +/* Control codes. */ +#define KCS_GET_STATUS_ABORT 0x60 +#define KCS_WRITE_START 0x61 +#define KCS_WRITE_END 0x62 +#define KCS_READ_BYTE 0x68 + +/* Status bits. */ +#define GET_STATUS_STATE(status) (((status) >> 6) & 0x03) +#define KCS_IDLE_STATE 0 +#define KCS_READ_STATE 1 +#define KCS_WRITE_STATE 2 +#define KCS_ERROR_STATE 3 +#define GET_STATUS_ATN(status) ((status) & 0x04) +#define GET_STATUS_IBF(status) ((status) & 0x02) +#define GET_STATUS_OBF(status) ((status) & 0x01) + + +static inline void write_next_byte(struct si_sm_data *kcs) +{ + write_data(kcs, kcs->write_data[kcs->write_pos]); + (kcs->write_pos)++; + (kcs->write_count)--; +} + +static inline void start_error_recovery(struct si_sm_data *kcs, char *reason) +{ + (kcs->error_retries)++; + if (kcs->error_retries > MAX_ERROR_RETRIES) { + if (kcs_debug & KCS_DEBUG_ENABLE) + printk(KERN_DEBUG "ipmi_kcs_sm: kcs hosed: %s\n", + reason); + kcs->state = KCS_HOSED; + } else { + kcs->error0_timeout = jiffies + ERROR0_OBF_WAIT_JIFFIES; + kcs->state = KCS_ERROR0; + } +} + +static inline void read_next_byte(struct si_sm_data *kcs) +{ + if (kcs->read_pos >= MAX_KCS_READ_SIZE) { + /* Throw the data away and mark it truncated. */ + read_data(kcs); + kcs->truncated = 1; + } else { + kcs->read_data[kcs->read_pos] = read_data(kcs); + (kcs->read_pos)++; + } + write_data(kcs, KCS_READ_BYTE); +} + +static inline int check_ibf(struct si_sm_data *kcs, unsigned char status, + long time) +{ + if (GET_STATUS_IBF(status)) { + kcs->ibf_timeout -= time; + if (kcs->ibf_timeout < 0) { + start_error_recovery(kcs, "IBF not ready in time"); + kcs->ibf_timeout = IBF_RETRY_TIMEOUT; + return 1; + } + return 0; + } + kcs->ibf_timeout = IBF_RETRY_TIMEOUT; + return 1; +} + +static inline int check_obf(struct si_sm_data *kcs, unsigned char status, + long time) +{ + if (!GET_STATUS_OBF(status)) { + kcs->obf_timeout -= time; + if (kcs->obf_timeout < 0) { + kcs->obf_timeout = OBF_RETRY_TIMEOUT; + start_error_recovery(kcs, "OBF not ready in time"); + return 1; + } + return 0; + } + kcs->obf_timeout = OBF_RETRY_TIMEOUT; + return 1; +} + +static void clear_obf(struct si_sm_data *kcs, unsigned char status) +{ + if (GET_STATUS_OBF(status)) + read_data(kcs); +} + +static void restart_kcs_transaction(struct si_sm_data *kcs) +{ + kcs->write_count = kcs->orig_write_count; + kcs->write_pos = 0; + kcs->read_pos = 0; + kcs->state = KCS_WAIT_WRITE_START; + kcs->ibf_timeout = IBF_RETRY_TIMEOUT; + kcs->obf_timeout = OBF_RETRY_TIMEOUT; + write_cmd(kcs, KCS_WRITE_START); +} + +static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data, + unsigned int size) +{ + unsigned int i; + + if (size < 2) + return IPMI_REQ_LEN_INVALID_ERR; + if (size > MAX_KCS_WRITE_SIZE) + return IPMI_REQ_LEN_EXCEEDED_ERR; + + if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) + return IPMI_NOT_IN_MY_STATE_ERR; + + if (kcs_debug & KCS_DEBUG_MSG) { + printk(KERN_DEBUG "start_kcs_transaction -"); + for (i = 0; i < size; i++) + printk(" %02x", (unsigned char) (data [i])); + printk("\n"); + } + kcs->error_retries = 0; + memcpy(kcs->write_data, data, size); + kcs->write_count = size; + kcs->orig_write_count = size; + kcs->write_pos = 0; + kcs->read_pos = 0; + kcs->state = KCS_START_OP; + kcs->ibf_timeout = IBF_RETRY_TIMEOUT; + kcs->obf_timeout = OBF_RETRY_TIMEOUT; + return 0; +} + +static int get_kcs_result(struct si_sm_data *kcs, unsigned char *data, + unsigned int length) +{ + if (length < kcs->read_pos) { + kcs->read_pos = length; + kcs->truncated = 1; + } + + memcpy(data, kcs->read_data, kcs->read_pos); + + if ((length >= 3) && (kcs->read_pos < 3)) { + /* Guarantee that we return at least 3 bytes, with an + error in the third byte if it is too short. */ + data[2] = IPMI_ERR_UNSPECIFIED; + kcs->read_pos = 3; + } + if (kcs->truncated) { + /* + * Report a truncated error. We might overwrite + * another error, but that's too bad, the user needs + * to know it was truncated. + */ + data[2] = IPMI_ERR_MSG_TRUNCATED; + kcs->truncated = 0; + } + + return kcs->read_pos; +} + +/* + * This implements the state machine defined in the IPMI manual, see + * that for details on how this works. Divide that flowchart into + * sections delimited by "Wait for IBF" and this will become clear. + */ +static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time) +{ + unsigned char status; + unsigned char state; + + status = read_status(kcs); + + if (kcs_debug & KCS_DEBUG_STATES) + printk(KERN_DEBUG "KCS: State = %d, %x\n", kcs->state, status); + + /* All states wait for ibf, so just do it here. */ + if (!check_ibf(kcs, status, time)) + return SI_SM_CALL_WITH_DELAY; + + /* Just about everything looks at the KCS state, so grab that, too. */ + state = GET_STATUS_STATE(status); + + switch (kcs->state) { + case KCS_IDLE: + /* If there's and interrupt source, turn it off. */ + clear_obf(kcs, status); + + if (GET_STATUS_ATN(status)) + return SI_SM_ATTN; + else + return SI_SM_IDLE; + + case KCS_START_OP: + if (state != KCS_IDLE_STATE) { + start_error_recovery(kcs, + "State machine not idle at start"); + break; + } + + clear_obf(kcs, status); + write_cmd(kcs, KCS_WRITE_START); + kcs->state = KCS_WAIT_WRITE_START; + break; + + case KCS_WAIT_WRITE_START: + if (state != KCS_WRITE_STATE) { + start_error_recovery( + kcs, + "Not in write state at write start"); + break; + } + read_data(kcs); + if (kcs->write_count == 1) { + write_cmd(kcs, KCS_WRITE_END); + kcs->state = KCS_WAIT_WRITE_END; + } else { + write_next_byte(kcs); + kcs->state = KCS_WAIT_WRITE; + } + break; + + case KCS_WAIT_WRITE: + if (state != KCS_WRITE_STATE) { + start_error_recovery(kcs, + "Not in write state for write"); + break; + } + clear_obf(kcs, status); + if (kcs->write_count == 1) { + write_cmd(kcs, KCS_WRITE_END); + kcs->state = KCS_WAIT_WRITE_END; + } else { + write_next_byte(kcs); + } + break; + + case KCS_WAIT_WRITE_END: + if (state != KCS_WRITE_STATE) { + start_error_recovery(kcs, + "Not in write state" + " for write end"); + break; + } + clear_obf(kcs, status); + write_next_byte(kcs); + kcs->state = KCS_WAIT_READ; + break; + + case KCS_WAIT_READ: + if ((state != KCS_READ_STATE) && (state != KCS_IDLE_STATE)) { + start_error_recovery( + kcs, + "Not in read or idle in read state"); + break; + } + + if (state == KCS_READ_STATE) { + if (!check_obf(kcs, status, time)) + return SI_SM_CALL_WITH_DELAY; + read_next_byte(kcs); + } else { + /* + * We don't implement this exactly like the state + * machine in the spec. Some broken hardware + * does not write the final dummy byte to the + * read register. Thus obf will never go high + * here. We just go straight to idle, and we + * handle clearing out obf in idle state if it + * happens to come in. + */ + clear_obf(kcs, status); + kcs->orig_write_count = 0; + kcs->state = KCS_IDLE; + return SI_SM_TRANSACTION_COMPLETE; + } + break; + + case KCS_ERROR0: + clear_obf(kcs, status); + status = read_status(kcs); + if (GET_STATUS_OBF(status)) + /* controller isn't responding */ + if (time_before(jiffies, kcs->error0_timeout)) + return SI_SM_CALL_WITH_TICK_DELAY; + write_cmd(kcs, KCS_GET_STATUS_ABORT); + kcs->state = KCS_ERROR1; + break; + + case KCS_ERROR1: + clear_obf(kcs, status); + write_data(kcs, 0); + kcs->state = KCS_ERROR2; + break; + + case KCS_ERROR2: + if (state != KCS_READ_STATE) { + start_error_recovery(kcs, + "Not in read state for error2"); + break; + } + if (!check_obf(kcs, status, time)) + return SI_SM_CALL_WITH_DELAY; + + clear_obf(kcs, status); + write_data(kcs, KCS_READ_BYTE); + kcs->state = KCS_ERROR3; + break; + + case KCS_ERROR3: + if (state != KCS_IDLE_STATE) { + start_error_recovery(kcs, + "Not in idle state for error3"); + break; + } + + if (!check_obf(kcs, status, time)) + return SI_SM_CALL_WITH_DELAY; + + clear_obf(kcs, status); + if (kcs->orig_write_count) { + restart_kcs_transaction(kcs); + } else { + kcs->state = KCS_IDLE; + return SI_SM_TRANSACTION_COMPLETE; + } + break; + + case KCS_HOSED: + break; + } + + if (kcs->state == KCS_HOSED) { + init_kcs_data(kcs, kcs->io); + return SI_SM_HOSED; + } + + return SI_SM_CALL_WITHOUT_DELAY; +} + +static int kcs_size(void) +{ + return sizeof(struct si_sm_data); +} + +static int kcs_detect(struct si_sm_data *kcs) +{ + /* + * It's impossible for the KCS status register to be all 1's, + * (assuming a properly functioning, self-initialized BMC) + * but that's what you get from reading a bogus address, so we + * test that first. + */ + if (read_status(kcs) == 0xff) + return 1; + + return 0; +} + +static void kcs_cleanup(struct si_sm_data *kcs) +{ +} + +struct si_sm_handlers kcs_smi_handlers = { + .init_data = init_kcs_data, + .start_transaction = start_kcs_transaction, + .get_result = get_kcs_result, + .event = kcs_event, + .detect = kcs_detect, + .cleanup = kcs_cleanup, + .size = kcs_size, +}; diff --git a/kernel/drivers/char/ipmi/ipmi_msghandler.c b/kernel/drivers/char/ipmi/ipmi_msghandler.c new file mode 100644 index 000000000..bf75f6361 --- /dev/null +++ b/kernel/drivers/char/ipmi/ipmi_msghandler.c @@ -0,0 +1,4615 @@ +/* + * ipmi_msghandler.c + * + * Incoming and outgoing message routing for an IPMI interface. + * + * Author: MontaVista Software, Inc. + * Corey Minyard + * source@mvista.com + * + * Copyright 2002 MontaVista Software Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PFX "IPMI message handler: " + +#define IPMI_DRIVER_VERSION "39.2" + +static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); +static int ipmi_init_msghandler(void); +static void smi_recv_tasklet(unsigned long); +static void handle_new_recv_msgs(ipmi_smi_t intf); +static void need_waiter(ipmi_smi_t intf); +static int handle_one_recv_msg(ipmi_smi_t intf, + struct ipmi_smi_msg *msg); + +static int initialized; + +#ifdef CONFIG_PROC_FS +static struct proc_dir_entry *proc_ipmi_root; +#endif /* CONFIG_PROC_FS */ + +/* Remain in auto-maintenance mode for this amount of time (in ms). */ +#define IPMI_MAINTENANCE_MODE_TIMEOUT 30000 + +#define MAX_EVENTS_IN_QUEUE 25 + +/* + * Don't let a message sit in a queue forever, always time it with at lest + * the max message timer. This is in milliseconds. + */ +#define MAX_MSG_TIMEOUT 60000 + +/* Call every ~1000 ms. */ +#define IPMI_TIMEOUT_TIME 1000 + +/* How many jiffies does it take to get to the timeout time. */ +#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) + +/* + * Request events from the queue every second (this is the number of + * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the + * future, IPMI will add a way to know immediately if an event is in + * the queue and this silliness can go away. + */ +#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) + +/* + * The main "user" data structure. + */ +struct ipmi_user { + struct list_head link; + + /* Set to false when the user is destroyed. */ + bool valid; + + struct kref refcount; + + /* The upper layer that handles receive messages. */ + struct ipmi_user_hndl *handler; + void *handler_data; + + /* The interface this user is bound to. */ + ipmi_smi_t intf; + + /* Does this interface receive IPMI events? */ + bool gets_events; +}; + +struct cmd_rcvr { + struct list_head link; + + ipmi_user_t user; + unsigned char netfn; + unsigned char cmd; + unsigned int chans; + + /* + * This is used to form a linked lised during mass deletion. + * Since this is in an RCU list, we cannot use the link above + * or change any data until the RCU period completes. So we + * use this next variable during mass deletion so we can have + * a list and don't have to wait and restart the search on + * every individual deletion of a command. + */ + struct cmd_rcvr *next; +}; + +struct seq_table { + unsigned int inuse : 1; + unsigned int broadcast : 1; + + unsigned long timeout; + unsigned long orig_timeout; + unsigned int retries_left; + + /* + * To verify on an incoming send message response that this is + * the message that the response is for, we keep a sequence id + * and increment it every time we send a message. + */ + long seqid; + + /* + * This is held so we can properly respond to the message on a + * timeout, and it is used to hold the temporary data for + * retransmission, too. + */ + struct ipmi_recv_msg *recv_msg; +}; + +/* + * Store the information in a msgid (long) to allow us to find a + * sequence table entry from the msgid. + */ +#define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff)) + +#define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \ + do { \ + seq = ((msgid >> 26) & 0x3f); \ + seqid = (msgid & 0x3fffff); \ + } while (0) + +#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff) + +struct ipmi_channel { + unsigned char medium; + unsigned char protocol; + + /* + * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR, + * but may be changed by the user. + */ + unsigned char address; + + /* + * My LUN. This should generally stay the SMS LUN, but just in + * case... + */ + unsigned char lun; +}; + +#ifdef CONFIG_PROC_FS +struct ipmi_proc_entry { + char *name; + struct ipmi_proc_entry *next; +}; +#endif + +struct bmc_device { + struct platform_device pdev; + struct ipmi_device_id id; + unsigned char guid[16]; + int guid_set; + char name[16]; + struct kref usecount; +}; +#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev) + +/* + * Various statistics for IPMI, these index stats[] in the ipmi_smi + * structure. + */ +enum ipmi_stat_indexes { + /* Commands we got from the user that were invalid. */ + IPMI_STAT_sent_invalid_commands = 0, + + /* Commands we sent to the MC. */ + IPMI_STAT_sent_local_commands, + + /* Responses from the MC that were delivered to a user. */ + IPMI_STAT_handled_local_responses, + + /* Responses from the MC that were not delivered to a user. */ + IPMI_STAT_unhandled_local_responses, + + /* Commands we sent out to the IPMB bus. */ + IPMI_STAT_sent_ipmb_commands, + + /* Commands sent on the IPMB that had errors on the SEND CMD */ + IPMI_STAT_sent_ipmb_command_errs, + + /* Each retransmit increments this count. */ + IPMI_STAT_retransmitted_ipmb_commands, + + /* + * When a message times out (runs out of retransmits) this is + * incremented. + */ + IPMI_STAT_timed_out_ipmb_commands, + + /* + * This is like above, but for broadcasts. Broadcasts are + * *not* included in the above count (they are expected to + * time out). + */ + IPMI_STAT_timed_out_ipmb_broadcasts, + + /* Responses I have sent to the IPMB bus. */ + IPMI_STAT_sent_ipmb_responses, + + /* The response was delivered to the user. */ + IPMI_STAT_handled_ipmb_responses, + + /* The response had invalid data in it. */ + IPMI_STAT_invalid_ipmb_responses, + + /* The response didn't have anyone waiting for it. */ + IPMI_STAT_unhandled_ipmb_responses, + + /* Commands we sent out to the IPMB bus. */ + IPMI_STAT_sent_lan_commands, + + /* Commands sent on the IPMB that had errors on the SEND CMD */ + IPMI_STAT_sent_lan_command_errs, + + /* Each retransmit increments this count. */ + IPMI_STAT_retransmitted_lan_commands, + + /* + * When a message times out (runs out of retransmits) this is + * incremented. + */ + IPMI_STAT_timed_out_lan_commands, + + /* Responses I have sent to the IPMB bus. */ + IPMI_STAT_sent_lan_responses, + + /* The response was delivered to the user. */ + IPMI_STAT_handled_lan_responses, + + /* The response had invalid data in it. */ + IPMI_STAT_invalid_lan_responses, + + /* The response didn't have anyone waiting for it. */ + IPMI_STAT_unhandled_lan_responses, + + /* The command was delivered to the user. */ + IPMI_STAT_handled_commands, + + /* The command had invalid data in it. */ + IPMI_STAT_invalid_commands, + + /* The command didn't have anyone waiting for it. */ + IPMI_STAT_unhandled_commands, + + /* Invalid data in an event. */ + IPMI_STAT_invalid_events, + + /* Events that were received with the proper format. */ + IPMI_STAT_events, + + /* Retransmissions on IPMB that failed. */ + IPMI_STAT_dropped_rexmit_ipmb_commands, + + /* Retransmissions on LAN that failed. */ + IPMI_STAT_dropped_rexmit_lan_commands, + + /* This *must* remain last, add new values above this. */ + IPMI_NUM_STATS +}; + + +#define IPMI_IPMB_NUM_SEQ 64 +#define IPMI_MAX_CHANNELS 16 +struct ipmi_smi { + /* What interface number are we? */ + int intf_num; + + struct kref refcount; + + /* Set when the interface is being unregistered. */ + bool in_shutdown; + + /* Used for a list of interfaces. */ + struct list_head link; + + /* + * The list of upper layers that are using me. seq_lock + * protects this. + */ + struct list_head users; + + /* Information to supply to users. */ + unsigned char ipmi_version_major; + unsigned char ipmi_version_minor; + + /* Used for wake ups at startup. */ + wait_queue_head_t waitq; + + struct bmc_device *bmc; + char *my_dev_name; + + /* + * This is the lower-layer's sender routine. Note that you + * must either be holding the ipmi_interfaces_mutex or be in + * an umpreemptible region to use this. You must fetch the + * value into a local variable and make sure it is not NULL. + */ + struct ipmi_smi_handlers *handlers; + void *send_info; + +#ifdef CONFIG_PROC_FS + /* A list of proc entries for this interface. */ + struct mutex proc_entry_lock; + struct ipmi_proc_entry *proc_entries; +#endif + + /* Driver-model device for the system interface. */ + struct device *si_dev; + + /* + * A table of sequence numbers for this interface. We use the + * sequence numbers for IPMB messages that go out of the + * interface to match them up with their responses. A routine + * is called periodically to time the items in this list. + */ + spinlock_t seq_lock; + struct seq_table seq_table[IPMI_IPMB_NUM_SEQ]; + int curr_seq; + + /* + * Messages queued for delivery. If delivery fails (out of memory + * for instance), They will stay in here to be processed later in a + * periodic timer interrupt. The tasklet is for handling received + * messages directly from the handler. + */ + spinlock_t waiting_rcv_msgs_lock; + struct list_head waiting_rcv_msgs; + atomic_t watchdog_pretimeouts_to_deliver; + struct tasklet_struct recv_tasklet; + + spinlock_t xmit_msgs_lock; + struct list_head xmit_msgs; + struct ipmi_smi_msg *curr_msg; + struct list_head hp_xmit_msgs; + + /* + * The list of command receivers that are registered for commands + * on this interface. + */ + struct mutex cmd_rcvrs_mutex; + struct list_head cmd_rcvrs; + + /* + * Events that were queues because no one was there to receive + * them. + */ + spinlock_t events_lock; /* For dealing with event stuff. */ + struct list_head waiting_events; + unsigned int waiting_events_count; /* How many events in queue? */ + char delivering_events; + char event_msg_printed; + atomic_t event_waiters; + unsigned int ticks_to_req_ev; + int last_needs_timer; + + /* + * The event receiver for my BMC, only really used at panic + * shutdown as a place to store this. + */ + unsigned char event_receiver; + unsigned char event_receiver_lun; + unsigned char local_sel_device; + unsigned char local_event_generator; + + /* For handling of maintenance mode. */ + int maintenance_mode; + bool maintenance_mode_enable; + int auto_maintenance_timeout; + spinlock_t maintenance_mode_lock; /* Used in a timer... */ + + /* + * A cheap hack, if this is non-null and a message to an + * interface comes in with a NULL user, call this routine with + * it. Note that the message will still be freed by the + * caller. This only works on the system interface. + */ + void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg); + + /* + * When we are scanning the channels for an SMI, this will + * tell which channel we are scanning. + */ + int curr_channel; + + /* Channel information */ + struct ipmi_channel channels[IPMI_MAX_CHANNELS]; + + /* Proc FS stuff. */ + struct proc_dir_entry *proc_dir; + char proc_dir_name[10]; + + atomic_t stats[IPMI_NUM_STATS]; + + /* + * run_to_completion duplicate of smb_info, smi_info + * and ipmi_serial_info structures. Used to decrease numbers of + * parameters passed by "low" level IPMI code. + */ + int run_to_completion; +}; +#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) + +/** + * The driver model view of the IPMI messaging driver. + */ +static struct platform_driver ipmidriver = { + .driver = { + .name = "ipmi", + .bus = &platform_bus_type + } +}; +static DEFINE_MUTEX(ipmidriver_mutex); + +static LIST_HEAD(ipmi_interfaces); +static DEFINE_MUTEX(ipmi_interfaces_mutex); + +/* + * List of watchers that want to know when smi's are added and deleted. + */ +static LIST_HEAD(smi_watchers); +static DEFINE_MUTEX(smi_watchers_mutex); + +#define ipmi_inc_stat(intf, stat) \ + atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) +#define ipmi_get_stat(intf, stat) \ + ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat])) + +static char *addr_src_to_str[] = { "invalid", "hotmod", "hardcoded", "SPMI", + "ACPI", "SMBIOS", "PCI", + "device-tree", "default" }; + +const char *ipmi_addr_src_to_str(enum ipmi_addr_src src) +{ + if (src > SI_DEFAULT) + src = 0; /* Invalid */ + return addr_src_to_str[src]; +} +EXPORT_SYMBOL(ipmi_addr_src_to_str); + +static int is_lan_addr(struct ipmi_addr *addr) +{ + return addr->addr_type == IPMI_LAN_ADDR_TYPE; +} + +static int is_ipmb_addr(struct ipmi_addr *addr) +{ + return addr->addr_type == IPMI_IPMB_ADDR_TYPE; +} + +static int is_ipmb_bcast_addr(struct ipmi_addr *addr) +{ + return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE; +} + +static void free_recv_msg_list(struct list_head *q) +{ + struct ipmi_recv_msg *msg, *msg2; + + list_for_each_entry_safe(msg, msg2, q, link) { + list_del(&msg->link); + ipmi_free_recv_msg(msg); + } +} + +static void free_smi_msg_list(struct list_head *q) +{ + struct ipmi_smi_msg *msg, *msg2; + + list_for_each_entry_safe(msg, msg2, q, link) { + list_del(&msg->link); + ipmi_free_smi_msg(msg); + } +} + +static void clean_up_interface_data(ipmi_smi_t intf) +{ + int i; + struct cmd_rcvr *rcvr, *rcvr2; + struct list_head list; + + tasklet_kill(&intf->recv_tasklet); + + free_smi_msg_list(&intf->waiting_rcv_msgs); + free_recv_msg_list(&intf->waiting_events); + + /* + * Wholesale remove all the entries from the list in the + * interface and wait for RCU to know that none are in use. + */ + mutex_lock(&intf->cmd_rcvrs_mutex); + INIT_LIST_HEAD(&list); + list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu); + mutex_unlock(&intf->cmd_rcvrs_mutex); + + list_for_each_entry_safe(rcvr, rcvr2, &list, link) + kfree(rcvr); + + for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { + if ((intf->seq_table[i].inuse) + && (intf->seq_table[i].recv_msg)) + ipmi_free_recv_msg(intf->seq_table[i].recv_msg); + } +} + +static void intf_free(struct kref *ref) +{ + ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount); + + clean_up_interface_data(intf); + kfree(intf); +} + +struct watcher_entry { + int intf_num; + ipmi_smi_t intf; + struct list_head link; +}; + +int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) +{ + ipmi_smi_t intf; + LIST_HEAD(to_deliver); + struct watcher_entry *e, *e2; + + mutex_lock(&smi_watchers_mutex); + + mutex_lock(&ipmi_interfaces_mutex); + + /* Build a list of things to deliver. */ + list_for_each_entry(intf, &ipmi_interfaces, link) { + if (intf->intf_num == -1) + continue; + e = kmalloc(sizeof(*e), GFP_KERNEL); + if (!e) + goto out_err; + kref_get(&intf->refcount); + e->intf = intf; + e->intf_num = intf->intf_num; + list_add_tail(&e->link, &to_deliver); + } + + /* We will succeed, so add it to the list. */ + list_add(&watcher->link, &smi_watchers); + + mutex_unlock(&ipmi_interfaces_mutex); + + list_for_each_entry_safe(e, e2, &to_deliver, link) { + list_del(&e->link); + watcher->new_smi(e->intf_num, e->intf->si_dev); + kref_put(&e->intf->refcount, intf_free); + kfree(e); + } + + mutex_unlock(&smi_watchers_mutex); + + return 0; + + out_err: + mutex_unlock(&ipmi_interfaces_mutex); + mutex_unlock(&smi_watchers_mutex); + list_for_each_entry_safe(e, e2, &to_deliver, link) { + list_del(&e->link); + kref_put(&e->intf->refcount, intf_free); + kfree(e); + } + return -ENOMEM; +} +EXPORT_SYMBOL(ipmi_smi_watcher_register); + +int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) +{ + mutex_lock(&smi_watchers_mutex); + list_del(&(watcher->link)); + mutex_unlock(&smi_watchers_mutex); + return 0; +} +EXPORT_SYMBOL(ipmi_smi_watcher_unregister); + +/* + * Must be called with smi_watchers_mutex held. + */ +static void +call_smi_watchers(int i, struct device *dev) +{ + struct ipmi_smi_watcher *w; + + list_for_each_entry(w, &smi_watchers, link) { + if (try_module_get(w->owner)) { + w->new_smi(i, dev); + module_put(w->owner); + } + } +} + +static int +ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2) +{ + if (addr1->addr_type != addr2->addr_type) + return 0; + + if (addr1->channel != addr2->channel) + return 0; + + if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { + struct ipmi_system_interface_addr *smi_addr1 + = (struct ipmi_system_interface_addr *) addr1; + struct ipmi_system_interface_addr *smi_addr2 + = (struct ipmi_system_interface_addr *) addr2; + return (smi_addr1->lun == smi_addr2->lun); + } + + if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) { + struct ipmi_ipmb_addr *ipmb_addr1 + = (struct ipmi_ipmb_addr *) addr1; + struct ipmi_ipmb_addr *ipmb_addr2 + = (struct ipmi_ipmb_addr *) addr2; + + return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr) + && (ipmb_addr1->lun == ipmb_addr2->lun)); + } + + if (is_lan_addr(addr1)) { + struct ipmi_lan_addr *lan_addr1 + = (struct ipmi_lan_addr *) addr1; + struct ipmi_lan_addr *lan_addr2 + = (struct ipmi_lan_addr *) addr2; + + return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID) + && (lan_addr1->local_SWID == lan_addr2->local_SWID) + && (lan_addr1->session_handle + == lan_addr2->session_handle) + && (lan_addr1->lun == lan_addr2->lun)); + } + + return 1; +} + +int ipmi_validate_addr(struct ipmi_addr *addr, int len) +{ + if (len < sizeof(struct ipmi_system_interface_addr)) + return -EINVAL; + + if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { + if (addr->channel != IPMI_BMC_CHANNEL) + return -EINVAL; + return 0; + } + + if ((addr->channel == IPMI_BMC_CHANNEL) + || (addr->channel >= IPMI_MAX_CHANNELS) + || (addr->channel < 0)) + return -EINVAL; + + if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { + if (len < sizeof(struct ipmi_ipmb_addr)) + return -EINVAL; + return 0; + } + + if (is_lan_addr(addr)) { + if (len < sizeof(struct ipmi_lan_addr)) + return -EINVAL; + return 0; + } + + return -EINVAL; +} +EXPORT_SYMBOL(ipmi_validate_addr); + +unsigned int ipmi_addr_length(int addr_type) +{ + if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) + return sizeof(struct ipmi_system_interface_addr); + + if ((addr_type == IPMI_IPMB_ADDR_TYPE) + || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) + return sizeof(struct ipmi_ipmb_addr); + + if (addr_type == IPMI_LAN_ADDR_TYPE) + return sizeof(struct ipmi_lan_addr); + + return 0; +} +EXPORT_SYMBOL(ipmi_addr_length); + +static void deliver_response(struct ipmi_recv_msg *msg) +{ + if (!msg->user) { + ipmi_smi_t intf = msg->user_msg_data; + + /* Special handling for NULL users. */ + if (intf->null_user_handler) { + intf->null_user_handler(intf, msg); + ipmi_inc_stat(intf, handled_local_responses); + } else { + /* No handler, so give up. */ + ipmi_inc_stat(intf, unhandled_local_responses); + } + ipmi_free_recv_msg(msg); + } else { + ipmi_user_t user = msg->user; + user->handler->ipmi_recv_hndl(msg, user->handler_data); + } +} + +static void +deliver_err_response(struct ipmi_recv_msg *msg, int err) +{ + msg->recv_type = IPMI_RESPONSE_RECV_TYPE; + msg->msg_data[0] = err; + msg->msg.netfn |= 1; /* Convert to a response. */ + msg->msg.data_len = 1; + msg->msg.data = msg->msg_data; + deliver_response(msg); +} + +/* + * Find the next sequence number not being used and add the given + * message with the given timeout to the sequence table. This must be + * called with the interface's seq_lock held. + */ +static int intf_next_seq(ipmi_smi_t intf, + struct ipmi_recv_msg *recv_msg, + unsigned long timeout, + int retries, + int broadcast, + unsigned char *seq, + long *seqid) +{ + int rv = 0; + unsigned int i; + + for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq; + i = (i+1)%IPMI_IPMB_NUM_SEQ) { + if (!intf->seq_table[i].inuse) + break; + } + + if (!intf->seq_table[i].inuse) { + intf->seq_table[i].recv_msg = recv_msg; + + /* + * Start with the maximum timeout, when the send response + * comes in we will start the real timer. + */ + intf->seq_table[i].timeout = MAX_MSG_TIMEOUT; + intf->seq_table[i].orig_timeout = timeout; + intf->seq_table[i].retries_left = retries; + intf->seq_table[i].broadcast = broadcast; + intf->seq_table[i].inuse = 1; + intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid); + *seq = i; + *seqid = intf->seq_table[i].seqid; + intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; + need_waiter(intf); + } else { + rv = -EAGAIN; + } + + return rv; +} + +/* + * Return the receive message for the given sequence number and + * release the sequence number so it can be reused. Some other data + * is passed in to be sure the message matches up correctly (to help + * guard against message coming in after their timeout and the + * sequence number being reused). + */ +static int intf_find_seq(ipmi_smi_t intf, + unsigned char seq, + short channel, + unsigned char cmd, + unsigned char netfn, + struct ipmi_addr *addr, + struct ipmi_recv_msg **recv_msg) +{ + int rv = -ENODEV; + unsigned long flags; + + if (seq >= IPMI_IPMB_NUM_SEQ) + return -EINVAL; + + spin_lock_irqsave(&(intf->seq_lock), flags); + if (intf->seq_table[seq].inuse) { + struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg; + + if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd) + && (msg->msg.netfn == netfn) + && (ipmi_addr_equal(addr, &(msg->addr)))) { + *recv_msg = msg; + intf->seq_table[seq].inuse = 0; + rv = 0; + } + } + spin_unlock_irqrestore(&(intf->seq_lock), flags); + + return rv; +} + + +/* Start the timer for a specific sequence table entry. */ +static int intf_start_seq_timer(ipmi_smi_t intf, + long msgid) +{ + int rv = -ENODEV; + unsigned long flags; + unsigned char seq; + unsigned long seqid; + + + GET_SEQ_FROM_MSGID(msgid, seq, seqid); + + spin_lock_irqsave(&(intf->seq_lock), flags); + /* + * We do this verification because the user can be deleted + * while a message is outstanding. + */ + if ((intf->seq_table[seq].inuse) + && (intf->seq_table[seq].seqid == seqid)) { + struct seq_table *ent = &(intf->seq_table[seq]); + ent->timeout = ent->orig_timeout; + rv = 0; + } + spin_unlock_irqrestore(&(intf->seq_lock), flags); + + return rv; +} + +/* Got an error for the send message for a specific sequence number. */ +static int intf_err_seq(ipmi_smi_t intf, + long msgid, + unsigned int err) +{ + int rv = -ENODEV; + unsigned long flags; + unsigned char seq; + unsigned long seqid; + struct ipmi_recv_msg *msg = NULL; + + + GET_SEQ_FROM_MSGID(msgid, seq, seqid); + + spin_lock_irqsave(&(intf->seq_lock), flags); + /* + * We do this verification because the user can be deleted + * while a message is outstanding. + */ + if ((intf->seq_table[seq].inuse) + && (intf->seq_table[seq].seqid == seqid)) { + struct seq_table *ent = &(intf->seq_table[seq]); + + ent->inuse = 0; + msg = ent->recv_msg; + rv = 0; + } + spin_unlock_irqrestore(&(intf->seq_lock), flags); + + if (msg) + deliver_err_response(msg, err); + + return rv; +} + + +int ipmi_create_user(unsigned int if_num, + struct ipmi_user_hndl *handler, + void *handler_data, + ipmi_user_t *user) +{ + unsigned long flags; + ipmi_user_t new_user; + int rv = 0; + ipmi_smi_t intf; + + /* + * There is no module usecount here, because it's not + * required. Since this can only be used by and called from + * other modules, they will implicitly use this module, and + * thus this can't be removed unless the other modules are + * removed. + */ + + if (handler == NULL) + return -EINVAL; + + /* + * Make sure the driver is actually initialized, this handles + * problems with initialization order. + */ + if (!initialized) { + rv = ipmi_init_msghandler(); + if (rv) + return rv; + + /* + * The init code doesn't return an error if it was turned + * off, but it won't initialize. Check that. + */ + if (!initialized) + return -ENODEV; + } + + new_user = kmalloc(sizeof(*new_user), GFP_KERNEL); + if (!new_user) + return -ENOMEM; + + mutex_lock(&ipmi_interfaces_mutex); + list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { + if (intf->intf_num == if_num) + goto found; + } + /* Not found, return an error */ + rv = -EINVAL; + goto out_kfree; + + found: + /* Note that each existing user holds a refcount to the interface. */ + kref_get(&intf->refcount); + + kref_init(&new_user->refcount); + new_user->handler = handler; + new_user->handler_data = handler_data; + new_user->intf = intf; + new_user->gets_events = false; + + if (!try_module_get(intf->handlers->owner)) { + rv = -ENODEV; + goto out_kref; + } + + if (intf->handlers->inc_usecount) { + rv = intf->handlers->inc_usecount(intf->send_info); + if (rv) { + module_put(intf->handlers->owner); + goto out_kref; + } + } + + /* + * Hold the lock so intf->handlers is guaranteed to be good + * until now + */ + mutex_unlock(&ipmi_interfaces_mutex); + + new_user->valid = true; + spin_lock_irqsave(&intf->seq_lock, flags); + list_add_rcu(&new_user->link, &intf->users); + spin_unlock_irqrestore(&intf->seq_lock, flags); + if (handler->ipmi_watchdog_pretimeout) { + /* User wants pretimeouts, so make sure to watch for them. */ + if (atomic_inc_return(&intf->event_waiters) == 1) + need_waiter(intf); + } + *user = new_user; + return 0; + +out_kref: + kref_put(&intf->refcount, intf_free); +out_kfree: + mutex_unlock(&ipmi_interfaces_mutex); + kfree(new_user); + return rv; +} +EXPORT_SYMBOL(ipmi_create_user); + +int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data) +{ + int rv = 0; + ipmi_smi_t intf; + struct ipmi_smi_handlers *handlers; + + mutex_lock(&ipmi_interfaces_mutex); + list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { + if (intf->intf_num == if_num) + goto found; + } + /* Not found, return an error */ + rv = -EINVAL; + mutex_unlock(&ipmi_interfaces_mutex); + return rv; + +found: + handlers = intf->handlers; + rv = -ENOSYS; + if (handlers->get_smi_info) + rv = handlers->get_smi_info(intf->send_info, data); + mutex_unlock(&ipmi_interfaces_mutex); + + return rv; +} +EXPORT_SYMBOL(ipmi_get_smi_info); + +static void free_user(struct kref *ref) +{ + ipmi_user_t user = container_of(ref, struct ipmi_user, refcount); + kfree(user); +} + +int ipmi_destroy_user(ipmi_user_t user) +{ + ipmi_smi_t intf = user->intf; + int i; + unsigned long flags; + struct cmd_rcvr *rcvr; + struct cmd_rcvr *rcvrs = NULL; + + user->valid = false; + + if (user->handler->ipmi_watchdog_pretimeout) + atomic_dec(&intf->event_waiters); + + if (user->gets_events) + atomic_dec(&intf->event_waiters); + + /* Remove the user from the interface's sequence table. */ + spin_lock_irqsave(&intf->seq_lock, flags); + list_del_rcu(&user->link); + + for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { + if (intf->seq_table[i].inuse + && (intf->seq_table[i].recv_msg->user == user)) { + intf->seq_table[i].inuse = 0; + ipmi_free_recv_msg(intf->seq_table[i].recv_msg); + } + } + spin_unlock_irqrestore(&intf->seq_lock, flags); + + /* + * Remove the user from the command receiver's table. First + * we build a list of everything (not using the standard link, + * since other things may be using it till we do + * synchronize_rcu()) then free everything in that list. + */ + mutex_lock(&intf->cmd_rcvrs_mutex); + list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) { + if (rcvr->user == user) { + list_del_rcu(&rcvr->link); + rcvr->next = rcvrs; + rcvrs = rcvr; + } + } + mutex_unlock(&intf->cmd_rcvrs_mutex); + synchronize_rcu(); + while (rcvrs) { + rcvr = rcvrs; + rcvrs = rcvr->next; + kfree(rcvr); + } + + mutex_lock(&ipmi_interfaces_mutex); + if (intf->handlers) { + module_put(intf->handlers->owner); + if (intf->handlers->dec_usecount) + intf->handlers->dec_usecount(intf->send_info); + } + mutex_unlock(&ipmi_interfaces_mutex); + + kref_put(&intf->refcount, intf_free); + + kref_put(&user->refcount, free_user); + + return 0; +} +EXPORT_SYMBOL(ipmi_destroy_user); + +void ipmi_get_version(ipmi_user_t user, + unsigned char *major, + unsigned char *minor) +{ + *major = user->intf->ipmi_version_major; + *minor = user->intf->ipmi_version_minor; +} +EXPORT_SYMBOL(ipmi_get_version); + +int ipmi_set_my_address(ipmi_user_t user, + unsigned int channel, + unsigned char address) +{ + if (channel >= IPMI_MAX_CHANNELS) + return -EINVAL; + user->intf->channels[channel].address = address; + return 0; +} +EXPORT_SYMBOL(ipmi_set_my_address); + +int ipmi_get_my_address(ipmi_user_t user, + unsigned int channel, + unsigned char *address) +{ + if (channel >= IPMI_MAX_CHANNELS) + return -EINVAL; + *address = user->intf->channels[channel].address; + return 0; +} +EXPORT_SYMBOL(ipmi_get_my_address); + +int ipmi_set_my_LUN(ipmi_user_t user, + unsigned int channel, + unsigned char LUN) +{ + if (channel >= IPMI_MAX_CHANNELS) + return -EINVAL; + user->intf->channels[channel].lun = LUN & 0x3; + return 0; +} +EXPORT_SYMBOL(ipmi_set_my_LUN); + +int ipmi_get_my_LUN(ipmi_user_t user, + unsigned int channel, + unsigned char *address) +{ + if (channel >= IPMI_MAX_CHANNELS) + return -EINVAL; + *address = user->intf->channels[channel].lun; + return 0; +} +EXPORT_SYMBOL(ipmi_get_my_LUN); + +int ipmi_get_maintenance_mode(ipmi_user_t user) +{ + int mode; + unsigned long flags; + + spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags); + mode = user->intf->maintenance_mode; + spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags); + + return mode; +} +EXPORT_SYMBOL(ipmi_get_maintenance_mode); + +static void maintenance_mode_update(ipmi_smi_t intf) +{ + if (intf->handlers->set_maintenance_mode) + intf->handlers->set_maintenance_mode( + intf->send_info, intf->maintenance_mode_enable); +} + +int ipmi_set_maintenance_mode(ipmi_user_t user, int mode) +{ + int rv = 0; + unsigned long flags; + ipmi_smi_t intf = user->intf; + + spin_lock_irqsave(&intf->maintenance_mode_lock, flags); + if (intf->maintenance_mode != mode) { + switch (mode) { + case IPMI_MAINTENANCE_MODE_AUTO: + intf->maintenance_mode_enable + = (intf->auto_maintenance_timeout > 0); + break; + + case IPMI_MAINTENANCE_MODE_OFF: + intf->maintenance_mode_enable = false; + break; + + case IPMI_MAINTENANCE_MODE_ON: + intf->maintenance_mode_enable = true; + break; + + default: + rv = -EINVAL; + goto out_unlock; + } + intf->maintenance_mode = mode; + + maintenance_mode_update(intf); + } + out_unlock: + spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags); + + return rv; +} +EXPORT_SYMBOL(ipmi_set_maintenance_mode); + +int ipmi_set_gets_events(ipmi_user_t user, bool val) +{ + unsigned long flags; + ipmi_smi_t intf = user->intf; + struct ipmi_recv_msg *msg, *msg2; + struct list_head msgs; + + INIT_LIST_HEAD(&msgs); + + spin_lock_irqsave(&intf->events_lock, flags); + if (user->gets_events == val) + goto out; + + user->gets_events = val; + + if (val) { + if (atomic_inc_return(&intf->event_waiters) == 1) + need_waiter(intf); + } else { + atomic_dec(&intf->event_waiters); + } + + if (intf->delivering_events) + /* + * Another thread is delivering events for this, so + * let it handle any new events. + */ + goto out; + + /* Deliver any queued events. */ + while (user->gets_events && !list_empty(&intf->waiting_events)) { + list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) + list_move_tail(&msg->link, &msgs); + intf->waiting_events_count = 0; + if (intf->event_msg_printed) { + printk(KERN_WARNING PFX "Event queue no longer" + " full\n"); + intf->event_msg_printed = 0; + } + + intf->delivering_events = 1; + spin_unlock_irqrestore(&intf->events_lock, flags); + + list_for_each_entry_safe(msg, msg2, &msgs, link) { + msg->user = user; + kref_get(&user->refcount); + deliver_response(msg); + } + + spin_lock_irqsave(&intf->events_lock, flags); + intf->delivering_events = 0; + } + + out: + spin_unlock_irqrestore(&intf->events_lock, flags); + + return 0; +} +EXPORT_SYMBOL(ipmi_set_gets_events); + +static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf, + unsigned char netfn, + unsigned char cmd, + unsigned char chan) +{ + struct cmd_rcvr *rcvr; + + list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) { + if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) + && (rcvr->chans & (1 << chan))) + return rcvr; + } + return NULL; +} + +static int is_cmd_rcvr_exclusive(ipmi_smi_t intf, + unsigned char netfn, + unsigned char cmd, + unsigned int chans) +{ + struct cmd_rcvr *rcvr; + + list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) { + if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) + && (rcvr->chans & chans)) + return 0; + } + return 1; +} + +int ipmi_register_for_cmd(ipmi_user_t user, + unsigned char netfn, + unsigned char cmd, + unsigned int chans) +{ + ipmi_smi_t intf = user->intf; + struct cmd_rcvr *rcvr; + int rv = 0; + + + rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL); + if (!rcvr) + return -ENOMEM; + rcvr->cmd = cmd; + rcvr->netfn = netfn; + rcvr->chans = chans; + rcvr->user = user; + + mutex_lock(&intf->cmd_rcvrs_mutex); + /* Make sure the command/netfn is not already registered. */ + if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) { + rv = -EBUSY; + goto out_unlock; + } + + if (atomic_inc_return(&intf->event_waiters) == 1) + need_waiter(intf); + + list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); + + out_unlock: + mutex_unlock(&intf->cmd_rcvrs_mutex); + if (rv) + kfree(rcvr); + + return rv; +} +EXPORT_SYMBOL(ipmi_register_for_cmd); + +int ipmi_unregister_for_cmd(ipmi_user_t user, + unsigned char netfn, + unsigned char cmd, + unsigned int chans) +{ + ipmi_smi_t intf = user->intf; + struct cmd_rcvr *rcvr; + struct cmd_rcvr *rcvrs = NULL; + int i, rv = -ENOENT; + + mutex_lock(&intf->cmd_rcvrs_mutex); + for (i = 0; i < IPMI_NUM_CHANNELS; i++) { + if (((1 << i) & chans) == 0) + continue; + rcvr = find_cmd_rcvr(intf, netfn, cmd, i); + if (rcvr == NULL) + continue; + if (rcvr->user == user) { + rv = 0; + rcvr->chans &= ~chans; + if (rcvr->chans == 0) { + list_del_rcu(&rcvr->link); + rcvr->next = rcvrs; + rcvrs = rcvr; + } + } + } + mutex_unlock(&intf->cmd_rcvrs_mutex); + synchronize_rcu(); + while (rcvrs) { + atomic_dec(&intf->event_waiters); + rcvr = rcvrs; + rcvrs = rcvr->next; + kfree(rcvr); + } + return rv; +} +EXPORT_SYMBOL(ipmi_unregister_for_cmd); + +static unsigned char +ipmb_checksum(unsigned char *data, int size) +{ + unsigned char csum = 0; + + for (; size > 0; size--, data++) + csum += *data; + + return -csum; +} + +static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg, + struct kernel_ipmi_msg *msg, + struct ipmi_ipmb_addr *ipmb_addr, + long msgid, + unsigned char ipmb_seq, + int broadcast, + unsigned char source_address, + unsigned char source_lun) +{ + int i = broadcast; + + /* Format the IPMB header data. */ + smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); + smi_msg->data[1] = IPMI_SEND_MSG_CMD; + smi_msg->data[2] = ipmb_addr->channel; + if (broadcast) + smi_msg->data[3] = 0; + smi_msg->data[i+3] = ipmb_addr->slave_addr; + smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3); + smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2); + smi_msg->data[i+6] = source_address; + smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun; + smi_msg->data[i+8] = msg->cmd; + + /* Now tack on the data to the message. */ + if (msg->data_len > 0) + memcpy(&(smi_msg->data[i+9]), msg->data, + msg->data_len); + smi_msg->data_size = msg->data_len + 9; + + /* Now calculate the checksum and tack it on. */ + smi_msg->data[i+smi_msg->data_size] + = ipmb_checksum(&(smi_msg->data[i+6]), + smi_msg->data_size-6); + + /* + * Add on the checksum size and the offset from the + * broadcast. + */ + smi_msg->data_size += 1 + i; + + smi_msg->msgid = msgid; +} + +static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg, + struct kernel_ipmi_msg *msg, + struct ipmi_lan_addr *lan_addr, + long msgid, + unsigned char ipmb_seq, + unsigned char source_lun) +{ + /* Format the IPMB header data. */ + smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); + smi_msg->data[1] = IPMI_SEND_MSG_CMD; + smi_msg->data[2] = lan_addr->channel; + smi_msg->data[3] = lan_addr->session_handle; + smi_msg->data[4] = lan_addr->remote_SWID; + smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3); + smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2); + smi_msg->data[7] = lan_addr->local_SWID; + smi_msg->data[8] = (ipmb_seq << 2) | source_lun; + smi_msg->data[9] = msg->cmd; + + /* Now tack on the data to the message. */ + if (msg->data_len > 0) + memcpy(&(smi_msg->data[10]), msg->data, + msg->data_len); + smi_msg->data_size = msg->data_len + 10; + + /* Now calculate the checksum and tack it on. */ + smi_msg->data[smi_msg->data_size] + = ipmb_checksum(&(smi_msg->data[7]), + smi_msg->data_size-7); + + /* + * Add on the checksum size and the offset from the + * broadcast. + */ + smi_msg->data_size += 1; + + smi_msg->msgid = msgid; +} + +static struct ipmi_smi_msg *smi_add_send_msg(ipmi_smi_t intf, + struct ipmi_smi_msg *smi_msg, + int priority) +{ + if (intf->curr_msg) { + if (priority > 0) + list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs); + else + list_add_tail(&smi_msg->link, &intf->xmit_msgs); + smi_msg = NULL; + } else { + intf->curr_msg = smi_msg; + } + + return smi_msg; +} + + +static void smi_send(ipmi_smi_t intf, struct ipmi_smi_handlers *handlers, + struct ipmi_smi_msg *smi_msg, int priority) +{ + int run_to_completion = intf->run_to_completion; + + if (run_to_completion) { + smi_msg = smi_add_send_msg(intf, smi_msg, priority); + } else { + unsigned long flags; + + spin_lock_irqsave(&intf->xmit_msgs_lock, flags); + smi_msg = smi_add_send_msg(intf, smi_msg, priority); + spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); + } + + if (smi_msg) + handlers->sender(intf->send_info, smi_msg); +} + +/* + * Separate from ipmi_request so that the user does not have to be + * supplied in certain circumstances (mainly at panic time). If + * messages are supplied, they will be freed, even if an error + * occurs. + */ +static int i_ipmi_request(ipmi_user_t user, + ipmi_smi_t intf, + struct ipmi_addr *addr, + long msgid, + struct kernel_ipmi_msg *msg, + void *user_msg_data, + void *supplied_smi, + struct ipmi_recv_msg *supplied_recv, + int priority, + unsigned char source_address, + unsigned char source_lun, + int retries, + unsigned int retry_time_ms) +{ + int rv = 0; + struct ipmi_smi_msg *smi_msg; + struct ipmi_recv_msg *recv_msg; + unsigned long flags; + + + if (supplied_recv) + recv_msg = supplied_recv; + else { + recv_msg = ipmi_alloc_recv_msg(); + if (recv_msg == NULL) + return -ENOMEM; + } + recv_msg->user_msg_data = user_msg_data; + + if (supplied_smi) + smi_msg = (struct ipmi_smi_msg *) supplied_smi; + else { + smi_msg = ipmi_alloc_smi_msg(); + if (smi_msg == NULL) { + ipmi_free_recv_msg(recv_msg); + return -ENOMEM; + } + } + + rcu_read_lock(); + if (intf->in_shutdown) { + rv = -ENODEV; + goto out_err; + } + + recv_msg->user = user; + if (user) + kref_get(&user->refcount); + recv_msg->msgid = msgid; + /* + * Store the message to send in the receive message so timeout + * responses can get the proper response data. + */ + recv_msg->msg = *msg; + + if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { + struct ipmi_system_interface_addr *smi_addr; + + if (msg->netfn & 1) { + /* Responses are not allowed to the SMI. */ + rv = -EINVAL; + goto out_err; + } + + smi_addr = (struct ipmi_system_interface_addr *) addr; + if (smi_addr->lun > 3) { + ipmi_inc_stat(intf, sent_invalid_commands); + rv = -EINVAL; + goto out_err; + } + + memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr)); + + if ((msg->netfn == IPMI_NETFN_APP_REQUEST) + && ((msg->cmd == IPMI_SEND_MSG_CMD) + || (msg->cmd == IPMI_GET_MSG_CMD) + || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) { + /* + * We don't let the user do these, since we manage + * the sequence numbers. + */ + ipmi_inc_stat(intf, sent_invalid_commands); + rv = -EINVAL; + goto out_err; + } + + if (((msg->netfn == IPMI_NETFN_APP_REQUEST) + && ((msg->cmd == IPMI_COLD_RESET_CMD) + || (msg->cmd == IPMI_WARM_RESET_CMD))) + || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)) { + spin_lock_irqsave(&intf->maintenance_mode_lock, flags); + intf->auto_maintenance_timeout + = IPMI_MAINTENANCE_MODE_TIMEOUT; + if (!intf->maintenance_mode + && !intf->maintenance_mode_enable) { + intf->maintenance_mode_enable = true; + maintenance_mode_update(intf); + } + spin_unlock_irqrestore(&intf->maintenance_mode_lock, + flags); + } + + if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) { + ipmi_inc_stat(intf, sent_invalid_commands); + rv = -EMSGSIZE; + goto out_err; + } + + smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3); + smi_msg->data[1] = msg->cmd; + smi_msg->msgid = msgid; + smi_msg->user_data = recv_msg; + if (msg->data_len > 0) + memcpy(&(smi_msg->data[2]), msg->data, msg->data_len); + smi_msg->data_size = msg->data_len + 2; + ipmi_inc_stat(intf, sent_local_commands); + } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { + struct ipmi_ipmb_addr *ipmb_addr; + unsigned char ipmb_seq; + long seqid; + int broadcast = 0; + + if (addr->channel >= IPMI_MAX_CHANNELS) { + ipmi_inc_stat(intf, sent_invalid_commands); + rv = -EINVAL; + goto out_err; + } + + if (intf->channels[addr->channel].medium + != IPMI_CHANNEL_MEDIUM_IPMB) { + ipmi_inc_stat(intf, sent_invalid_commands); + rv = -EINVAL; + goto out_err; + } + + if (retries < 0) { + if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) + retries = 0; /* Don't retry broadcasts. */ + else + retries = 4; + } + if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) { + /* + * Broadcasts add a zero at the beginning of the + * message, but otherwise is the same as an IPMB + * address. + */ + addr->addr_type = IPMI_IPMB_ADDR_TYPE; + broadcast = 1; + } + + + /* Default to 1 second retries. */ + if (retry_time_ms == 0) + retry_time_ms = 1000; + + /* + * 9 for the header and 1 for the checksum, plus + * possibly one for the broadcast. + */ + if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) { + ipmi_inc_stat(intf, sent_invalid_commands); + rv = -EMSGSIZE; + goto out_err; + } + + ipmb_addr = (struct ipmi_ipmb_addr *) addr; + if (ipmb_addr->lun > 3) { + ipmi_inc_stat(intf, sent_invalid_commands); + rv = -EINVAL; + goto out_err; + } + + memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr)); + + if (recv_msg->msg.netfn & 0x1) { + /* + * It's a response, so use the user's sequence + * from msgid. + */ + ipmi_inc_stat(intf, sent_ipmb_responses); + format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid, + msgid, broadcast, + source_address, source_lun); + + /* + * Save the receive message so we can use it + * to deliver the response. + */ + smi_msg->user_data = recv_msg; + } else { + /* It's a command, so get a sequence for it. */ + + spin_lock_irqsave(&(intf->seq_lock), flags); + + /* + * Create a sequence number with a 1 second + * timeout and 4 retries. + */ + rv = intf_next_seq(intf, + recv_msg, + retry_time_ms, + retries, + broadcast, + &ipmb_seq, + &seqid); + if (rv) { + /* + * We have used up all the sequence numbers, + * probably, so abort. + */ + spin_unlock_irqrestore(&(intf->seq_lock), + flags); + goto out_err; + } + + ipmi_inc_stat(intf, sent_ipmb_commands); + + /* + * Store the sequence number in the message, + * so that when the send message response + * comes back we can start the timer. + */ + format_ipmb_msg(smi_msg, msg, ipmb_addr, + STORE_SEQ_IN_MSGID(ipmb_seq, seqid), + ipmb_seq, broadcast, + source_address, source_lun); + + /* + * Copy the message into the recv message data, so we + * can retransmit it later if necessary. + */ + memcpy(recv_msg->msg_data, smi_msg->data, + smi_msg->data_size); + recv_msg->msg.data = recv_msg->msg_data; + recv_msg->msg.data_len = smi_msg->data_size; + + /* + * We don't unlock until here, because we need + * to copy the completed message into the + * recv_msg before we release the lock. + * Otherwise, race conditions may bite us. I + * know that's pretty paranoid, but I prefer + * to be correct. + */ + spin_unlock_irqrestore(&(intf->seq_lock), flags); + } + } else if (is_lan_addr(addr)) { + struct ipmi_lan_addr *lan_addr; + unsigned char ipmb_seq; + long seqid; + + if (addr->channel >= IPMI_MAX_CHANNELS) { + ipmi_inc_stat(intf, sent_invalid_commands); + rv = -EINVAL; + goto out_err; + } + + if ((intf->channels[addr->channel].medium + != IPMI_CHANNEL_MEDIUM_8023LAN) + && (intf->channels[addr->channel].medium + != IPMI_CHANNEL_MEDIUM_ASYNC)) { + ipmi_inc_stat(intf, sent_invalid_commands); + rv = -EINVAL; + goto out_err; + } + + retries = 4; + + /* Default to 1 second retries. */ + if (retry_time_ms == 0) + retry_time_ms = 1000; + + /* 11 for the header and 1 for the checksum. */ + if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) { + ipmi_inc_stat(intf, sent_invalid_commands); + rv = -EMSGSIZE; + goto out_err; + } + + lan_addr = (struct ipmi_lan_addr *) addr; + if (lan_addr->lun > 3) { + ipmi_inc_stat(intf, sent_invalid_commands); + rv = -EINVAL; + goto out_err; + } + + memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr)); + + if (recv_msg->msg.netfn & 0x1) { + /* + * It's a response, so use the user's sequence + * from msgid. + */ + ipmi_inc_stat(intf, sent_lan_responses); + format_lan_msg(smi_msg, msg, lan_addr, msgid, + msgid, source_lun); + + /* + * Save the receive message so we can use it + * to deliver the response. + */ + smi_msg->user_data = recv_msg; + } else { + /* It's a command, so get a sequence for it. */ + + spin_lock_irqsave(&(intf->seq_lock), flags); + + /* + * Create a sequence number with a 1 second + * timeout and 4 retries. + */ + rv = intf_next_seq(intf, + recv_msg, + retry_time_ms, + retries, + 0, + &ipmb_seq, + &seqid); + if (rv) { + /* + * We have used up all the sequence numbers, + * probably, so abort. + */ + spin_unlock_irqrestore(&(intf->seq_lock), + flags); + goto out_err; + } + + ipmi_inc_stat(intf, sent_lan_commands); + + /* + * Store the sequence number in the message, + * so that when the send message response + * comes back we can start the timer. + */ + format_lan_msg(smi_msg, msg, lan_addr, + STORE_SEQ_IN_MSGID(ipmb_seq, seqid), + ipmb_seq, source_lun); + + /* + * Copy the message into the recv message data, so we + * can retransmit it later if necessary. + */ + memcpy(recv_msg->msg_data, smi_msg->data, + smi_msg->data_size); + recv_msg->msg.data = recv_msg->msg_data; + recv_msg->msg.data_len = smi_msg->data_size; + + /* + * We don't unlock until here, because we need + * to copy the completed message into the + * recv_msg before we release the lock. + * Otherwise, race conditions may bite us. I + * know that's pretty paranoid, but I prefer + * to be correct. + */ + spin_unlock_irqrestore(&(intf->seq_lock), flags); + } + } else { + /* Unknown address type. */ + ipmi_inc_stat(intf, sent_invalid_commands); + rv = -EINVAL; + goto out_err; + } + +#ifdef DEBUG_MSGING + { + int m; + for (m = 0; m < smi_msg->data_size; m++) + printk(" %2.2x", smi_msg->data[m]); + printk("\n"); + } +#endif + + smi_send(intf, intf->handlers, smi_msg, priority); + rcu_read_unlock(); + + return 0; + + out_err: + rcu_read_unlock(); + ipmi_free_smi_msg(smi_msg); + ipmi_free_recv_msg(recv_msg); + return rv; +} + +static int check_addr(ipmi_smi_t intf, + struct ipmi_addr *addr, + unsigned char *saddr, + unsigned char *lun) +{ + if (addr->channel >= IPMI_MAX_CHANNELS) + return -EINVAL; + *lun = intf->channels[addr->channel].lun; + *saddr = intf->channels[addr->channel].address; + return 0; +} + +int ipmi_request_settime(ipmi_user_t user, + struct ipmi_addr *addr, + long msgid, + struct kernel_ipmi_msg *msg, + void *user_msg_data, + int priority, + int retries, + unsigned int retry_time_ms) +{ + unsigned char saddr = 0, lun = 0; + int rv; + + if (!user) + return -EINVAL; + rv = check_addr(user->intf, addr, &saddr, &lun); + if (rv) + return rv; + return i_ipmi_request(user, + user->intf, + addr, + msgid, + msg, + user_msg_data, + NULL, NULL, + priority, + saddr, + lun, + retries, + retry_time_ms); +} +EXPORT_SYMBOL(ipmi_request_settime); + +int ipmi_request_supply_msgs(ipmi_user_t user, + struct ipmi_addr *addr, + long msgid, + struct kernel_ipmi_msg *msg, + void *user_msg_data, + void *supplied_smi, + struct ipmi_recv_msg *supplied_recv, + int priority) +{ + unsigned char saddr = 0, lun = 0; + int rv; + + if (!user) + return -EINVAL; + rv = check_addr(user->intf, addr, &saddr, &lun); + if (rv) + return rv; + return i_ipmi_request(user, + user->intf, + addr, + msgid, + msg, + user_msg_data, + supplied_smi, + supplied_recv, + priority, + saddr, + lun, + -1, 0); +} +EXPORT_SYMBOL(ipmi_request_supply_msgs); + +#ifdef CONFIG_PROC_FS +static int smi_ipmb_proc_show(struct seq_file *m, void *v) +{ + ipmi_smi_t intf = m->private; + int i; + + seq_printf(m, "%x", intf->channels[0].address); + for (i = 1; i < IPMI_MAX_CHANNELS; i++) + seq_printf(m, " %x", intf->channels[i].address); + seq_putc(m, '\n'); + + return 0; +} + +static int smi_ipmb_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, smi_ipmb_proc_show, PDE_DATA(inode)); +} + +static const struct file_operations smi_ipmb_proc_ops = { + .open = smi_ipmb_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int smi_version_proc_show(struct seq_file *m, void *v) +{ + ipmi_smi_t intf = m->private; + + seq_printf(m, "%u.%u\n", + ipmi_version_major(&intf->bmc->id), + ipmi_version_minor(&intf->bmc->id)); + + return 0; +} + +static int smi_version_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, smi_version_proc_show, PDE_DATA(inode)); +} + +static const struct file_operations smi_version_proc_ops = { + .open = smi_version_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int smi_stats_proc_show(struct seq_file *m, void *v) +{ + ipmi_smi_t intf = m->private; + + seq_printf(m, "sent_invalid_commands: %u\n", + ipmi_get_stat(intf, sent_invalid_commands)); + seq_printf(m, "sent_local_commands: %u\n", + ipmi_get_stat(intf, sent_local_commands)); + seq_printf(m, "handled_local_responses: %u\n", + ipmi_get_stat(intf, handled_local_responses)); + seq_printf(m, "unhandled_local_responses: %u\n", + ipmi_get_stat(intf, unhandled_local_responses)); + seq_printf(m, "sent_ipmb_commands: %u\n", + ipmi_get_stat(intf, sent_ipmb_commands)); + seq_printf(m, "sent_ipmb_command_errs: %u\n", + ipmi_get_stat(intf, sent_ipmb_command_errs)); + seq_printf(m, "retransmitted_ipmb_commands: %u\n", + ipmi_get_stat(intf, retransmitted_ipmb_commands)); + seq_printf(m, "timed_out_ipmb_commands: %u\n", + ipmi_get_stat(intf, timed_out_ipmb_commands)); + seq_printf(m, "timed_out_ipmb_broadcasts: %u\n", + ipmi_get_stat(intf, timed_out_ipmb_broadcasts)); + seq_printf(m, "sent_ipmb_responses: %u\n", + ipmi_get_stat(intf, sent_ipmb_responses)); + seq_printf(m, "handled_ipmb_responses: %u\n", + ipmi_get_stat(intf, handled_ipmb_responses)); + seq_printf(m, "invalid_ipmb_responses: %u\n", + ipmi_get_stat(intf, invalid_ipmb_responses)); + seq_printf(m, "unhandled_ipmb_responses: %u\n", + ipmi_get_stat(intf, unhandled_ipmb_responses)); + seq_printf(m, "sent_lan_commands: %u\n", + ipmi_get_stat(intf, sent_lan_commands)); + seq_printf(m, "sent_lan_command_errs: %u\n", + ipmi_get_stat(intf, sent_lan_command_errs)); + seq_printf(m, "retransmitted_lan_commands: %u\n", + ipmi_get_stat(intf, retransmitted_lan_commands)); + seq_printf(m, "timed_out_lan_commands: %u\n", + ipmi_get_stat(intf, timed_out_lan_commands)); + seq_printf(m, "sent_lan_responses: %u\n", + ipmi_get_stat(intf, sent_lan_responses)); + seq_printf(m, "handled_lan_responses: %u\n", + ipmi_get_stat(intf, handled_lan_responses)); + seq_printf(m, "invalid_lan_responses: %u\n", + ipmi_get_stat(intf, invalid_lan_responses)); + seq_printf(m, "unhandled_lan_responses: %u\n", + ipmi_get_stat(intf, unhandled_lan_responses)); + seq_printf(m, "handled_commands: %u\n", + ipmi_get_stat(intf, handled_commands)); + seq_printf(m, "invalid_commands: %u\n", + ipmi_get_stat(intf, invalid_commands)); + seq_printf(m, "unhandled_commands: %u\n", + ipmi_get_stat(intf, unhandled_commands)); + seq_printf(m, "invalid_events: %u\n", + ipmi_get_stat(intf, invalid_events)); + seq_printf(m, "events: %u\n", + ipmi_get_stat(intf, events)); + seq_printf(m, "failed rexmit LAN msgs: %u\n", + ipmi_get_stat(intf, dropped_rexmit_lan_commands)); + seq_printf(m, "failed rexmit IPMB msgs: %u\n", + ipmi_get_stat(intf, dropped_rexmit_ipmb_commands)); + return 0; +} + +static int smi_stats_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, smi_stats_proc_show, PDE_DATA(inode)); +} + +static const struct file_operations smi_stats_proc_ops = { + .open = smi_stats_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif /* CONFIG_PROC_FS */ + +int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, + const struct file_operations *proc_ops, + void *data) +{ + int rv = 0; +#ifdef CONFIG_PROC_FS + struct proc_dir_entry *file; + struct ipmi_proc_entry *entry; + + /* Create a list element. */ + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + entry->name = kstrdup(name, GFP_KERNEL); + if (!entry->name) { + kfree(entry); + return -ENOMEM; + } + + file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data); + if (!file) { + kfree(entry->name); + kfree(entry); + rv = -ENOMEM; + } else { + mutex_lock(&smi->proc_entry_lock); + /* Stick it on the list. */ + entry->next = smi->proc_entries; + smi->proc_entries = entry; + mutex_unlock(&smi->proc_entry_lock); + } +#endif /* CONFIG_PROC_FS */ + + return rv; +} +EXPORT_SYMBOL(ipmi_smi_add_proc_entry); + +static int add_proc_entries(ipmi_smi_t smi, int num) +{ + int rv = 0; + +#ifdef CONFIG_PROC_FS + sprintf(smi->proc_dir_name, "%d", num); + smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root); + if (!smi->proc_dir) + rv = -ENOMEM; + + if (rv == 0) + rv = ipmi_smi_add_proc_entry(smi, "stats", + &smi_stats_proc_ops, + smi); + + if (rv == 0) + rv = ipmi_smi_add_proc_entry(smi, "ipmb", + &smi_ipmb_proc_ops, + smi); + + if (rv == 0) + rv = ipmi_smi_add_proc_entry(smi, "version", + &smi_version_proc_ops, + smi); +#endif /* CONFIG_PROC_FS */ + + return rv; +} + +static void remove_proc_entries(ipmi_smi_t smi) +{ +#ifdef CONFIG_PROC_FS + struct ipmi_proc_entry *entry; + + mutex_lock(&smi->proc_entry_lock); + while (smi->proc_entries) { + entry = smi->proc_entries; + smi->proc_entries = entry->next; + + remove_proc_entry(entry->name, smi->proc_dir); + kfree(entry->name); + kfree(entry); + } + mutex_unlock(&smi->proc_entry_lock); + remove_proc_entry(smi->proc_dir_name, proc_ipmi_root); +#endif /* CONFIG_PROC_FS */ +} + +static int __find_bmc_guid(struct device *dev, void *data) +{ + unsigned char *id = data; + struct bmc_device *bmc = to_bmc_device(dev); + return memcmp(bmc->guid, id, 16) == 0; +} + +static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv, + unsigned char *guid) +{ + struct device *dev; + + dev = driver_find_device(drv, NULL, guid, __find_bmc_guid); + if (dev) + return to_bmc_device(dev); + else + return NULL; +} + +struct prod_dev_id { + unsigned int product_id; + unsigned char device_id; +}; + +static int __find_bmc_prod_dev_id(struct device *dev, void *data) +{ + struct prod_dev_id *id = data; + struct bmc_device *bmc = to_bmc_device(dev); + + return (bmc->id.product_id == id->product_id + && bmc->id.device_id == id->device_id); +} + +static struct bmc_device *ipmi_find_bmc_prod_dev_id( + struct device_driver *drv, + unsigned int product_id, unsigned char device_id) +{ + struct prod_dev_id id = { + .product_id = product_id, + .device_id = device_id, + }; + struct device *dev; + + dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id); + if (dev) + return to_bmc_device(dev); + else + return NULL; +} + +static ssize_t device_id_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct bmc_device *bmc = to_bmc_device(dev); + + return snprintf(buf, 10, "%u\n", bmc->id.device_id); +} +static DEVICE_ATTR(device_id, S_IRUGO, device_id_show, NULL); + +static ssize_t provides_device_sdrs_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct bmc_device *bmc = to_bmc_device(dev); + + return snprintf(buf, 10, "%u\n", + (bmc->id.device_revision & 0x80) >> 7); +} +static DEVICE_ATTR(provides_device_sdrs, S_IRUGO, provides_device_sdrs_show, + NULL); + +static ssize_t revision_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct bmc_device *bmc = to_bmc_device(dev); + + return snprintf(buf, 20, "%u\n", + bmc->id.device_revision & 0x0F); +} +static DEVICE_ATTR(revision, S_IRUGO, revision_show, NULL); + +static ssize_t firmware_revision_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct bmc_device *bmc = to_bmc_device(dev); + + return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1, + bmc->id.firmware_revision_2); +} +static DEVICE_ATTR(firmware_revision, S_IRUGO, firmware_revision_show, NULL); + +static ssize_t ipmi_version_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct bmc_device *bmc = to_bmc_device(dev); + + return snprintf(buf, 20, "%u.%u\n", + ipmi_version_major(&bmc->id), + ipmi_version_minor(&bmc->id)); +} +static DEVICE_ATTR(ipmi_version, S_IRUGO, ipmi_version_show, NULL); + +static ssize_t add_dev_support_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct bmc_device *bmc = to_bmc_device(dev); + + return snprintf(buf, 10, "0x%02x\n", + bmc->id.additional_device_support); +} +static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show, + NULL); + +static ssize_t manufacturer_id_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct bmc_device *bmc = to_bmc_device(dev); + + return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id); +} +static DEVICE_ATTR(manufacturer_id, S_IRUGO, manufacturer_id_show, NULL); + +static ssize_t product_id_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct bmc_device *bmc = to_bmc_device(dev); + + return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id); +} +static DEVICE_ATTR(product_id, S_IRUGO, product_id_show, NULL); + +static ssize_t aux_firmware_rev_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct bmc_device *bmc = to_bmc_device(dev); + + return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n", + bmc->id.aux_firmware_revision[3], + bmc->id.aux_firmware_revision[2], + bmc->id.aux_firmware_revision[1], + bmc->id.aux_firmware_revision[0]); +} +static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL); + +static ssize_t guid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct bmc_device *bmc = to_bmc_device(dev); + + return snprintf(buf, 100, "%Lx%Lx\n", + (long long) bmc->guid[0], + (long long) bmc->guid[8]); +} +static DEVICE_ATTR(guid, S_IRUGO, guid_show, NULL); + +static struct attribute *bmc_dev_attrs[] = { + &dev_attr_device_id.attr, + &dev_attr_provides_device_sdrs.attr, + &dev_attr_revision.attr, + &dev_attr_firmware_revision.attr, + &dev_attr_ipmi_version.attr, + &dev_attr_additional_device_support.attr, + &dev_attr_manufacturer_id.attr, + &dev_attr_product_id.attr, + &dev_attr_aux_firmware_revision.attr, + &dev_attr_guid.attr, + NULL +}; + +static umode_t bmc_dev_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int idx) +{ + struct device *dev = kobj_to_dev(kobj); + struct bmc_device *bmc = to_bmc_device(dev); + umode_t mode = attr->mode; + + if (attr == &dev_attr_aux_firmware_revision.attr) + return bmc->id.aux_firmware_revision_set ? mode : 0; + if (attr == &dev_attr_guid.attr) + return bmc->guid_set ? mode : 0; + return mode; +} + +static struct attribute_group bmc_dev_attr_group = { + .attrs = bmc_dev_attrs, + .is_visible = bmc_dev_attr_is_visible, +}; + +static const struct attribute_group *bmc_dev_attr_groups[] = { + &bmc_dev_attr_group, + NULL +}; + +static struct device_type bmc_device_type = { + .groups = bmc_dev_attr_groups, +}; + +static void +release_bmc_device(struct device *dev) +{ + kfree(to_bmc_device(dev)); +} + +static void +cleanup_bmc_device(struct kref *ref) +{ + struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount); + + platform_device_unregister(&bmc->pdev); +} + +static void ipmi_bmc_unregister(ipmi_smi_t intf) +{ + struct bmc_device *bmc = intf->bmc; + + sysfs_remove_link(&intf->si_dev->kobj, "bmc"); + if (intf->my_dev_name) { + sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name); + kfree(intf->my_dev_name); + intf->my_dev_name = NULL; + } + + mutex_lock(&ipmidriver_mutex); + kref_put(&bmc->usecount, cleanup_bmc_device); + intf->bmc = NULL; + mutex_unlock(&ipmidriver_mutex); +} + +static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum) +{ + int rv; + struct bmc_device *bmc = intf->bmc; + struct bmc_device *old_bmc; + + mutex_lock(&ipmidriver_mutex); + + /* + * Try to find if there is an bmc_device struct + * representing the interfaced BMC already + */ + if (bmc->guid_set) + old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, bmc->guid); + else + old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver, + bmc->id.product_id, + bmc->id.device_id); + + /* + * If there is already an bmc_device, free the new one, + * otherwise register the new BMC device + */ + if (old_bmc) { + kfree(bmc); + intf->bmc = old_bmc; + bmc = old_bmc; + + kref_get(&bmc->usecount); + mutex_unlock(&ipmidriver_mutex); + + printk(KERN_INFO + "ipmi: interfacing existing BMC (man_id: 0x%6.6x," + " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", + bmc->id.manufacturer_id, + bmc->id.product_id, + bmc->id.device_id); + } else { + unsigned char orig_dev_id = bmc->id.device_id; + int warn_printed = 0; + + snprintf(bmc->name, sizeof(bmc->name), + "ipmi_bmc.%4.4x", bmc->id.product_id); + bmc->pdev.name = bmc->name; + + while (ipmi_find_bmc_prod_dev_id(&ipmidriver.driver, + bmc->id.product_id, + bmc->id.device_id)) { + if (!warn_printed) { + printk(KERN_WARNING PFX + "This machine has two different BMCs" + " with the same product id and device" + " id. This is an error in the" + " firmware, but incrementing the" + " device id to work around the problem." + " Prod ID = 0x%x, Dev ID = 0x%x\n", + bmc->id.product_id, bmc->id.device_id); + warn_printed = 1; + } + bmc->id.device_id++; /* Wraps at 255 */ + if (bmc->id.device_id == orig_dev_id) { + printk(KERN_ERR PFX + "Out of device ids!\n"); + break; + } + } + + bmc->pdev.dev.driver = &ipmidriver.driver; + bmc->pdev.id = bmc->id.device_id; + bmc->pdev.dev.release = release_bmc_device; + bmc->pdev.dev.type = &bmc_device_type; + kref_init(&bmc->usecount); + + rv = platform_device_register(&bmc->pdev); + mutex_unlock(&ipmidriver_mutex); + if (rv) { + put_device(&bmc->pdev.dev); + printk(KERN_ERR + "ipmi_msghandler:" + " Unable to register bmc device: %d\n", + rv); + /* + * Don't go to out_err, you can only do that if + * the device is registered already. + */ + return rv; + } + + dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, " + "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", + bmc->id.manufacturer_id, + bmc->id.product_id, + bmc->id.device_id); + } + + /* + * create symlink from system interface device to bmc device + * and back. + */ + rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc"); + if (rv) { + printk(KERN_ERR + "ipmi_msghandler: Unable to create bmc symlink: %d\n", + rv); + goto out_err; + } + + intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", ifnum); + if (!intf->my_dev_name) { + rv = -ENOMEM; + printk(KERN_ERR + "ipmi_msghandler: allocate link from BMC: %d\n", + rv); + goto out_err; + } + + rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj, + intf->my_dev_name); + if (rv) { + kfree(intf->my_dev_name); + intf->my_dev_name = NULL; + printk(KERN_ERR + "ipmi_msghandler:" + " Unable to create symlink to bmc: %d\n", + rv); + goto out_err; + } + + return 0; + +out_err: + ipmi_bmc_unregister(intf); + return rv; +} + +static int +send_guid_cmd(ipmi_smi_t intf, int chan) +{ + struct kernel_ipmi_msg msg; + struct ipmi_system_interface_addr si; + + si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; + si.channel = IPMI_BMC_CHANNEL; + si.lun = 0; + + msg.netfn = IPMI_NETFN_APP_REQUEST; + msg.cmd = IPMI_GET_DEVICE_GUID_CMD; + msg.data = NULL; + msg.data_len = 0; + return i_ipmi_request(NULL, + intf, + (struct ipmi_addr *) &si, + 0, + &msg, + intf, + NULL, + NULL, + 0, + intf->channels[0].address, + intf->channels[0].lun, + -1, 0); +} + +static void +guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) +{ + if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) + || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) + || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD)) + /* Not for me */ + return; + + if (msg->msg.data[0] != 0) { + /* Error from getting the GUID, the BMC doesn't have one. */ + intf->bmc->guid_set = 0; + goto out; + } + + if (msg->msg.data_len < 17) { + intf->bmc->guid_set = 0; + printk(KERN_WARNING PFX + "guid_handler: The GUID response from the BMC was too" + " short, it was %d but should have been 17. Assuming" + " GUID is not available.\n", + msg->msg.data_len); + goto out; + } + + memcpy(intf->bmc->guid, msg->msg.data, 16); + intf->bmc->guid_set = 1; + out: + wake_up(&intf->waitq); +} + +static void +get_guid(ipmi_smi_t intf) +{ + int rv; + + intf->bmc->guid_set = 0x2; + intf->null_user_handler = guid_handler; + rv = send_guid_cmd(intf, 0); + if (rv) + /* Send failed, no GUID available. */ + intf->bmc->guid_set = 0; + wait_event(intf->waitq, intf->bmc->guid_set != 2); + intf->null_user_handler = NULL; +} + +static int +send_channel_info_cmd(ipmi_smi_t intf, int chan) +{ + struct kernel_ipmi_msg msg; + unsigned char data[1]; + struct ipmi_system_interface_addr si; + + si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; + si.channel = IPMI_BMC_CHANNEL; + si.lun = 0; + + msg.netfn = IPMI_NETFN_APP_REQUEST; + msg.cmd = IPMI_GET_CHANNEL_INFO_CMD; + msg.data = data; + msg.data_len = 1; + data[0] = chan; + return i_ipmi_request(NULL, + intf, + (struct ipmi_addr *) &si, + 0, + &msg, + intf, + NULL, + NULL, + 0, + intf->channels[0].address, + intf->channels[0].lun, + -1, 0); +} + +static void +channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) +{ + int rv = 0; + int chan; + + if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) + && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) + && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) { + /* It's the one we want */ + if (msg->msg.data[0] != 0) { + /* Got an error from the channel, just go on. */ + + if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) { + /* + * If the MC does not support this + * command, that is legal. We just + * assume it has one IPMB at channel + * zero. + */ + intf->channels[0].medium + = IPMI_CHANNEL_MEDIUM_IPMB; + intf->channels[0].protocol + = IPMI_CHANNEL_PROTOCOL_IPMB; + + intf->curr_channel = IPMI_MAX_CHANNELS; + wake_up(&intf->waitq); + goto out; + } + goto next_channel; + } + if (msg->msg.data_len < 4) { + /* Message not big enough, just go on. */ + goto next_channel; + } + chan = intf->curr_channel; + intf->channels[chan].medium = msg->msg.data[2] & 0x7f; + intf->channels[chan].protocol = msg->msg.data[3] & 0x1f; + + next_channel: + intf->curr_channel++; + if (intf->curr_channel >= IPMI_MAX_CHANNELS) + wake_up(&intf->waitq); + else + rv = send_channel_info_cmd(intf, intf->curr_channel); + + if (rv) { + /* Got an error somehow, just give up. */ + printk(KERN_WARNING PFX + "Error sending channel information for channel" + " %d: %d\n", intf->curr_channel, rv); + + intf->curr_channel = IPMI_MAX_CHANNELS; + wake_up(&intf->waitq); + } + } + out: + return; +} + +static void ipmi_poll(ipmi_smi_t intf) +{ + if (intf->handlers->poll) + intf->handlers->poll(intf->send_info); + /* In case something came in */ + handle_new_recv_msgs(intf); +} + +void ipmi_poll_interface(ipmi_user_t user) +{ + ipmi_poll(user->intf); +} +EXPORT_SYMBOL(ipmi_poll_interface); + +int ipmi_register_smi(struct ipmi_smi_handlers *handlers, + void *send_info, + struct ipmi_device_id *device_id, + struct device *si_dev, + unsigned char slave_addr) +{ + int i, j; + int rv; + ipmi_smi_t intf; + ipmi_smi_t tintf; + struct list_head *link; + + /* + * Make sure the driver is actually initialized, this handles + * problems with initialization order. + */ + if (!initialized) { + rv = ipmi_init_msghandler(); + if (rv) + return rv; + /* + * The init code doesn't return an error if it was turned + * off, but it won't initialize. Check that. + */ + if (!initialized) + return -ENODEV; + } + + intf = kzalloc(sizeof(*intf), GFP_KERNEL); + if (!intf) + return -ENOMEM; + + intf->ipmi_version_major = ipmi_version_major(device_id); + intf->ipmi_version_minor = ipmi_version_minor(device_id); + + intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL); + if (!intf->bmc) { + kfree(intf); + return -ENOMEM; + } + intf->intf_num = -1; /* Mark it invalid for now. */ + kref_init(&intf->refcount); + intf->bmc->id = *device_id; + intf->si_dev = si_dev; + for (j = 0; j < IPMI_MAX_CHANNELS; j++) { + intf->channels[j].address = IPMI_BMC_SLAVE_ADDR; + intf->channels[j].lun = 2; + } + if (slave_addr != 0) + intf->channels[0].address = slave_addr; + INIT_LIST_HEAD(&intf->users); + intf->handlers = handlers; + intf->send_info = send_info; + spin_lock_init(&intf->seq_lock); + for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) { + intf->seq_table[j].inuse = 0; + intf->seq_table[j].seqid = 0; + } + intf->curr_seq = 0; +#ifdef CONFIG_PROC_FS + mutex_init(&intf->proc_entry_lock); +#endif + spin_lock_init(&intf->waiting_rcv_msgs_lock); + INIT_LIST_HEAD(&intf->waiting_rcv_msgs); + tasklet_init(&intf->recv_tasklet, + smi_recv_tasklet, + (unsigned long) intf); + atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); + spin_lock_init(&intf->xmit_msgs_lock); + INIT_LIST_HEAD(&intf->xmit_msgs); + INIT_LIST_HEAD(&intf->hp_xmit_msgs); + spin_lock_init(&intf->events_lock); + atomic_set(&intf->event_waiters, 0); + intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; + INIT_LIST_HEAD(&intf->waiting_events); + intf->waiting_events_count = 0; + mutex_init(&intf->cmd_rcvrs_mutex); + spin_lock_init(&intf->maintenance_mode_lock); + INIT_LIST_HEAD(&intf->cmd_rcvrs); + init_waitqueue_head(&intf->waitq); + for (i = 0; i < IPMI_NUM_STATS; i++) + atomic_set(&intf->stats[i], 0); + + intf->proc_dir = NULL; + + mutex_lock(&smi_watchers_mutex); + mutex_lock(&ipmi_interfaces_mutex); + /* Look for a hole in the numbers. */ + i = 0; + link = &ipmi_interfaces; + list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) { + if (tintf->intf_num != i) { + link = &tintf->link; + break; + } + i++; + } + /* Add the new interface in numeric order. */ + if (i == 0) + list_add_rcu(&intf->link, &ipmi_interfaces); + else + list_add_tail_rcu(&intf->link, link); + + rv = handlers->start_processing(send_info, intf); + if (rv) + goto out; + + get_guid(intf); + + if ((intf->ipmi_version_major > 1) + || ((intf->ipmi_version_major == 1) + && (intf->ipmi_version_minor >= 5))) { + /* + * Start scanning the channels to see what is + * available. + */ + intf->null_user_handler = channel_handler; + intf->curr_channel = 0; + rv = send_channel_info_cmd(intf, 0); + if (rv) { + printk(KERN_WARNING PFX + "Error sending channel information for channel" + " 0, %d\n", rv); + goto out; + } + + /* Wait for the channel info to be read. */ + wait_event(intf->waitq, + intf->curr_channel >= IPMI_MAX_CHANNELS); + intf->null_user_handler = NULL; + } else { + /* Assume a single IPMB channel at zero. */ + intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; + intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB; + intf->curr_channel = IPMI_MAX_CHANNELS; + } + + if (rv == 0) + rv = add_proc_entries(intf, i); + + rv = ipmi_bmc_register(intf, i); + + out: + if (rv) { + if (intf->proc_dir) + remove_proc_entries(intf); + intf->handlers = NULL; + list_del_rcu(&intf->link); + mutex_unlock(&ipmi_interfaces_mutex); + mutex_unlock(&smi_watchers_mutex); + synchronize_rcu(); + kref_put(&intf->refcount, intf_free); + } else { + /* + * Keep memory order straight for RCU readers. Make + * sure everything else is committed to memory before + * setting intf_num to mark the interface valid. + */ + smp_wmb(); + intf->intf_num = i; + mutex_unlock(&ipmi_interfaces_mutex); + /* After this point the interface is legal to use. */ + call_smi_watchers(i, intf->si_dev); + mutex_unlock(&smi_watchers_mutex); + } + + return rv; +} +EXPORT_SYMBOL(ipmi_register_smi); + +static void deliver_smi_err_response(ipmi_smi_t intf, + struct ipmi_smi_msg *msg, + unsigned char err) +{ + msg->rsp[0] = msg->data[0] | 4; + msg->rsp[1] = msg->data[1]; + msg->rsp[2] = err; + msg->rsp_size = 3; + /* It's an error, so it will never requeue, no need to check return. */ + handle_one_recv_msg(intf, msg); +} + +static void cleanup_smi_msgs(ipmi_smi_t intf) +{ + int i; + struct seq_table *ent; + struct ipmi_smi_msg *msg; + struct list_head *entry; + struct list_head tmplist; + + /* Clear out our transmit queues and hold the messages. */ + INIT_LIST_HEAD(&tmplist); + list_splice_tail(&intf->hp_xmit_msgs, &tmplist); + list_splice_tail(&intf->xmit_msgs, &tmplist); + + /* Current message first, to preserve order */ + while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) { + /* Wait for the message to clear out. */ + schedule_timeout(1); + } + + /* No need for locks, the interface is down. */ + + /* + * Return errors for all pending messages in queue and in the + * tables waiting for remote responses. + */ + while (!list_empty(&tmplist)) { + entry = tmplist.next; + list_del(entry); + msg = list_entry(entry, struct ipmi_smi_msg, link); + deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED); + } + + for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { + ent = &(intf->seq_table[i]); + if (!ent->inuse) + continue; + deliver_err_response(ent->recv_msg, IPMI_ERR_UNSPECIFIED); + } +} + +int ipmi_unregister_smi(ipmi_smi_t intf) +{ + struct ipmi_smi_watcher *w; + int intf_num = intf->intf_num; + ipmi_user_t user; + + ipmi_bmc_unregister(intf); + + mutex_lock(&smi_watchers_mutex); + mutex_lock(&ipmi_interfaces_mutex); + intf->intf_num = -1; + intf->in_shutdown = true; + list_del_rcu(&intf->link); + mutex_unlock(&ipmi_interfaces_mutex); + synchronize_rcu(); + + cleanup_smi_msgs(intf); + + /* Clean up the effects of users on the lower-level software. */ + mutex_lock(&ipmi_interfaces_mutex); + rcu_read_lock(); + list_for_each_entry_rcu(user, &intf->users, link) { + module_put(intf->handlers->owner); + if (intf->handlers->dec_usecount) + intf->handlers->dec_usecount(intf->send_info); + } + rcu_read_unlock(); + intf->handlers = NULL; + mutex_unlock(&ipmi_interfaces_mutex); + + remove_proc_entries(intf); + + /* + * Call all the watcher interfaces to tell them that + * an interface is gone. + */ + list_for_each_entry(w, &smi_watchers, link) + w->smi_gone(intf_num); + mutex_unlock(&smi_watchers_mutex); + + kref_put(&intf->refcount, intf_free); + return 0; +} +EXPORT_SYMBOL(ipmi_unregister_smi); + +static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf, + struct ipmi_smi_msg *msg) +{ + struct ipmi_ipmb_addr ipmb_addr; + struct ipmi_recv_msg *recv_msg; + + /* + * This is 11, not 10, because the response must contain a + * completion code. + */ + if (msg->rsp_size < 11) { + /* Message not big enough, just ignore it. */ + ipmi_inc_stat(intf, invalid_ipmb_responses); + return 0; + } + + if (msg->rsp[2] != 0) { + /* An error getting the response, just ignore it. */ + return 0; + } + + ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE; + ipmb_addr.slave_addr = msg->rsp[6]; + ipmb_addr.channel = msg->rsp[3] & 0x0f; + ipmb_addr.lun = msg->rsp[7] & 3; + + /* + * It's a response from a remote entity. Look up the sequence + * number and handle the response. + */ + if (intf_find_seq(intf, + msg->rsp[7] >> 2, + msg->rsp[3] & 0x0f, + msg->rsp[8], + (msg->rsp[4] >> 2) & (~1), + (struct ipmi_addr *) &(ipmb_addr), + &recv_msg)) { + /* + * We were unable to find the sequence number, + * so just nuke the message. + */ + ipmi_inc_stat(intf, unhandled_ipmb_responses); + return 0; + } + + memcpy(recv_msg->msg_data, + &(msg->rsp[9]), + msg->rsp_size - 9); + /* + * The other fields matched, so no need to set them, except + * for netfn, which needs to be the response that was + * returned, not the request value. + */ + recv_msg->msg.netfn = msg->rsp[4] >> 2; + recv_msg->msg.data = recv_msg->msg_data; + recv_msg->msg.data_len = msg->rsp_size - 10; + recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; + ipmi_inc_stat(intf, handled_ipmb_responses); + deliver_response(recv_msg); + + return 0; +} + +static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, + struct ipmi_smi_msg *msg) +{ + struct cmd_rcvr *rcvr; + int rv = 0; + unsigned char netfn; + unsigned char cmd; + unsigned char chan; + ipmi_user_t user = NULL; + struct ipmi_ipmb_addr *ipmb_addr; + struct ipmi_recv_msg *recv_msg; + + if (msg->rsp_size < 10) { + /* Message not big enough, just ignore it. */ + ipmi_inc_stat(intf, invalid_commands); + return 0; + } + + if (msg->rsp[2] != 0) { + /* An error getting the response, just ignore it. */ + return 0; + } + + netfn = msg->rsp[4] >> 2; + cmd = msg->rsp[8]; + chan = msg->rsp[3] & 0xf; + + rcu_read_lock(); + rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); + if (rcvr) { + user = rcvr->user; + kref_get(&user->refcount); + } else + user = NULL; + rcu_read_unlock(); + + if (user == NULL) { + /* We didn't find a user, deliver an error response. */ + ipmi_inc_stat(intf, unhandled_commands); + + msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); + msg->data[1] = IPMI_SEND_MSG_CMD; + msg->data[2] = msg->rsp[3]; + msg->data[3] = msg->rsp[6]; + msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3); + msg->data[5] = ipmb_checksum(&(msg->data[3]), 2); + msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address; + /* rqseq/lun */ + msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3); + msg->data[8] = msg->rsp[8]; /* cmd */ + msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE; + msg->data[10] = ipmb_checksum(&(msg->data[6]), 4); + msg->data_size = 11; + +#ifdef DEBUG_MSGING + { + int m; + printk("Invalid command:"); + for (m = 0; m < msg->data_size; m++) + printk(" %2.2x", msg->data[m]); + printk("\n"); + } +#endif + rcu_read_lock(); + if (!intf->in_shutdown) { + smi_send(intf, intf->handlers, msg, 0); + /* + * We used the message, so return the value + * that causes it to not be freed or + * queued. + */ + rv = -1; + } + rcu_read_unlock(); + } else { + /* Deliver the message to the user. */ + ipmi_inc_stat(intf, handled_commands); + + recv_msg = ipmi_alloc_recv_msg(); + if (!recv_msg) { + /* + * We couldn't allocate memory for the + * message, so requeue it for handling + * later. + */ + rv = 1; + kref_put(&user->refcount, free_user); + } else { + /* Extract the source address from the data. */ + ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; + ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE; + ipmb_addr->slave_addr = msg->rsp[6]; + ipmb_addr->lun = msg->rsp[7] & 3; + ipmb_addr->channel = msg->rsp[3] & 0xf; + + /* + * Extract the rest of the message information + * from the IPMB header. + */ + recv_msg->user = user; + recv_msg->recv_type = IPMI_CMD_RECV_TYPE; + recv_msg->msgid = msg->rsp[7] >> 2; + recv_msg->msg.netfn = msg->rsp[4] >> 2; + recv_msg->msg.cmd = msg->rsp[8]; + recv_msg->msg.data = recv_msg->msg_data; + + /* + * We chop off 10, not 9 bytes because the checksum + * at the end also needs to be removed. + */ + recv_msg->msg.data_len = msg->rsp_size - 10; + memcpy(recv_msg->msg_data, + &(msg->rsp[9]), + msg->rsp_size - 10); + deliver_response(recv_msg); + } + } + + return rv; +} + +static int handle_lan_get_msg_rsp(ipmi_smi_t intf, + struct ipmi_smi_msg *msg) +{ + struct ipmi_lan_addr lan_addr; + struct ipmi_recv_msg *recv_msg; + + + /* + * This is 13, not 12, because the response must contain a + * completion code. + */ + if (msg->rsp_size < 13) { + /* Message not big enough, just ignore it. */ + ipmi_inc_stat(intf, invalid_lan_responses); + return 0; + } + + if (msg->rsp[2] != 0) { + /* An error getting the response, just ignore it. */ + return 0; + } + + lan_addr.addr_type = IPMI_LAN_ADDR_TYPE; + lan_addr.session_handle = msg->rsp[4]; + lan_addr.remote_SWID = msg->rsp[8]; + lan_addr.local_SWID = msg->rsp[5]; + lan_addr.channel = msg->rsp[3] & 0x0f; + lan_addr.privilege = msg->rsp[3] >> 4; + lan_addr.lun = msg->rsp[9] & 3; + + /* + * It's a response from a remote entity. Look up the sequence + * number and handle the response. + */ + if (intf_find_seq(intf, + msg->rsp[9] >> 2, + msg->rsp[3] & 0x0f, + msg->rsp[10], + (msg->rsp[6] >> 2) & (~1), + (struct ipmi_addr *) &(lan_addr), + &recv_msg)) { + /* + * We were unable to find the sequence number, + * so just nuke the message. + */ + ipmi_inc_stat(intf, unhandled_lan_responses); + return 0; + } + + memcpy(recv_msg->msg_data, + &(msg->rsp[11]), + msg->rsp_size - 11); + /* + * The other fields matched, so no need to set them, except + * for netfn, which needs to be the response that was + * returned, not the request value. + */ + recv_msg->msg.netfn = msg->rsp[6] >> 2; + recv_msg->msg.data = recv_msg->msg_data; + recv_msg->msg.data_len = msg->rsp_size - 12; + recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; + ipmi_inc_stat(intf, handled_lan_responses); + deliver_response(recv_msg); + + return 0; +} + +static int handle_lan_get_msg_cmd(ipmi_smi_t intf, + struct ipmi_smi_msg *msg) +{ + struct cmd_rcvr *rcvr; + int rv = 0; + unsigned char netfn; + unsigned char cmd; + unsigned char chan; + ipmi_user_t user = NULL; + struct ipmi_lan_addr *lan_addr; + struct ipmi_recv_msg *recv_msg; + + if (msg->rsp_size < 12) { + /* Message not big enough, just ignore it. */ + ipmi_inc_stat(intf, invalid_commands); + return 0; + } + + if (msg->rsp[2] != 0) { + /* An error getting the response, just ignore it. */ + return 0; + } + + netfn = msg->rsp[6] >> 2; + cmd = msg->rsp[10]; + chan = msg->rsp[3] & 0xf; + + rcu_read_lock(); + rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); + if (rcvr) { + user = rcvr->user; + kref_get(&user->refcount); + } else + user = NULL; + rcu_read_unlock(); + + if (user == NULL) { + /* We didn't find a user, just give up. */ + ipmi_inc_stat(intf, unhandled_commands); + + /* + * Don't do anything with these messages, just allow + * them to be freed. + */ + rv = 0; + } else { + /* Deliver the message to the user. */ + ipmi_inc_stat(intf, handled_commands); + + recv_msg = ipmi_alloc_recv_msg(); + if (!recv_msg) { + /* + * We couldn't allocate memory for the + * message, so requeue it for handling later. + */ + rv = 1; + kref_put(&user->refcount, free_user); + } else { + /* Extract the source address from the data. */ + lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; + lan_addr->addr_type = IPMI_LAN_ADDR_TYPE; + lan_addr->session_handle = msg->rsp[4]; + lan_addr->remote_SWID = msg->rsp[8]; + lan_addr->local_SWID = msg->rsp[5]; + lan_addr->lun = msg->rsp[9] & 3; + lan_addr->channel = msg->rsp[3] & 0xf; + lan_addr->privilege = msg->rsp[3] >> 4; + + /* + * Extract the rest of the message information + * from the IPMB header. + */ + recv_msg->user = user; + recv_msg->recv_type = IPMI_CMD_RECV_TYPE; + recv_msg->msgid = msg->rsp[9] >> 2; + recv_msg->msg.netfn = msg->rsp[6] >> 2; + recv_msg->msg.cmd = msg->rsp[10]; + recv_msg->msg.data = recv_msg->msg_data; + + /* + * We chop off 12, not 11 bytes because the checksum + * at the end also needs to be removed. + */ + recv_msg->msg.data_len = msg->rsp_size - 12; + memcpy(recv_msg->msg_data, + &(msg->rsp[11]), + msg->rsp_size - 12); + deliver_response(recv_msg); + } + } + + return rv; +} + +/* + * This routine will handle "Get Message" command responses with + * channels that use an OEM Medium. The message format belongs to + * the OEM. See IPMI 2.0 specification, Chapter 6 and + * Chapter 22, sections 22.6 and 22.24 for more details. + */ +static int handle_oem_get_msg_cmd(ipmi_smi_t intf, + struct ipmi_smi_msg *msg) +{ + struct cmd_rcvr *rcvr; + int rv = 0; + unsigned char netfn; + unsigned char cmd; + unsigned char chan; + ipmi_user_t user = NULL; + struct ipmi_system_interface_addr *smi_addr; + struct ipmi_recv_msg *recv_msg; + + /* + * We expect the OEM SW to perform error checking + * so we just do some basic sanity checks + */ + if (msg->rsp_size < 4) { + /* Message not big enough, just ignore it. */ + ipmi_inc_stat(intf, invalid_commands); + return 0; + } + + if (msg->rsp[2] != 0) { + /* An error getting the response, just ignore it. */ + return 0; + } + + /* + * This is an OEM Message so the OEM needs to know how + * handle the message. We do no interpretation. + */ + netfn = msg->rsp[0] >> 2; + cmd = msg->rsp[1]; + chan = msg->rsp[3] & 0xf; + + rcu_read_lock(); + rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); + if (rcvr) { + user = rcvr->user; + kref_get(&user->refcount); + } else + user = NULL; + rcu_read_unlock(); + + if (user == NULL) { + /* We didn't find a user, just give up. */ + ipmi_inc_stat(intf, unhandled_commands); + + /* + * Don't do anything with these messages, just allow + * them to be freed. + */ + + rv = 0; + } else { + /* Deliver the message to the user. */ + ipmi_inc_stat(intf, handled_commands); + + recv_msg = ipmi_alloc_recv_msg(); + if (!recv_msg) { + /* + * We couldn't allocate memory for the + * message, so requeue it for handling + * later. + */ + rv = 1; + kref_put(&user->refcount, free_user); + } else { + /* + * OEM Messages are expected to be delivered via + * the system interface to SMS software. We might + * need to visit this again depending on OEM + * requirements + */ + smi_addr = ((struct ipmi_system_interface_addr *) + &(recv_msg->addr)); + smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; + smi_addr->channel = IPMI_BMC_CHANNEL; + smi_addr->lun = msg->rsp[0] & 3; + + recv_msg->user = user; + recv_msg->user_msg_data = NULL; + recv_msg->recv_type = IPMI_OEM_RECV_TYPE; + recv_msg->msg.netfn = msg->rsp[0] >> 2; + recv_msg->msg.cmd = msg->rsp[1]; + recv_msg->msg.data = recv_msg->msg_data; + + /* + * The message starts at byte 4 which follows the + * the Channel Byte in the "GET MESSAGE" command + */ + recv_msg->msg.data_len = msg->rsp_size - 4; + memcpy(recv_msg->msg_data, + &(msg->rsp[4]), + msg->rsp_size - 4); + deliver_response(recv_msg); + } + } + + return rv; +} + +static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg, + struct ipmi_smi_msg *msg) +{ + struct ipmi_system_interface_addr *smi_addr; + + recv_msg->msgid = 0; + smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr); + smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; + smi_addr->channel = IPMI_BMC_CHANNEL; + smi_addr->lun = msg->rsp[0] & 3; + recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE; + recv_msg->msg.netfn = msg->rsp[0] >> 2; + recv_msg->msg.cmd = msg->rsp[1]; + memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3); + recv_msg->msg.data = recv_msg->msg_data; + recv_msg->msg.data_len = msg->rsp_size - 3; +} + +static int handle_read_event_rsp(ipmi_smi_t intf, + struct ipmi_smi_msg *msg) +{ + struct ipmi_recv_msg *recv_msg, *recv_msg2; + struct list_head msgs; + ipmi_user_t user; + int rv = 0; + int deliver_count = 0; + unsigned long flags; + + if (msg->rsp_size < 19) { + /* Message is too small to be an IPMB event. */ + ipmi_inc_stat(intf, invalid_events); + return 0; + } + + if (msg->rsp[2] != 0) { + /* An error getting the event, just ignore it. */ + return 0; + } + + INIT_LIST_HEAD(&msgs); + + spin_lock_irqsave(&intf->events_lock, flags); + + ipmi_inc_stat(intf, events); + + /* + * Allocate and fill in one message for every user that is + * getting events. + */ + rcu_read_lock(); + list_for_each_entry_rcu(user, &intf->users, link) { + if (!user->gets_events) + continue; + + recv_msg = ipmi_alloc_recv_msg(); + if (!recv_msg) { + rcu_read_unlock(); + list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, + link) { + list_del(&recv_msg->link); + ipmi_free_recv_msg(recv_msg); + } + /* + * We couldn't allocate memory for the + * message, so requeue it for handling + * later. + */ + rv = 1; + goto out; + } + + deliver_count++; + + copy_event_into_recv_msg(recv_msg, msg); + recv_msg->user = user; + kref_get(&user->refcount); + list_add_tail(&(recv_msg->link), &msgs); + } + rcu_read_unlock(); + + if (deliver_count) { + /* Now deliver all the messages. */ + list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { + list_del(&recv_msg->link); + deliver_response(recv_msg); + } + } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) { + /* + * No one to receive the message, put it in queue if there's + * not already too many things in the queue. + */ + recv_msg = ipmi_alloc_recv_msg(); + if (!recv_msg) { + /* + * We couldn't allocate memory for the + * message, so requeue it for handling + * later. + */ + rv = 1; + goto out; + } + + copy_event_into_recv_msg(recv_msg, msg); + list_add_tail(&(recv_msg->link), &(intf->waiting_events)); + intf->waiting_events_count++; + } else if (!intf->event_msg_printed) { + /* + * There's too many things in the queue, discard this + * message. + */ + printk(KERN_WARNING PFX "Event queue full, discarding" + " incoming events\n"); + intf->event_msg_printed = 1; + } + + out: + spin_unlock_irqrestore(&(intf->events_lock), flags); + + return rv; +} + +static int handle_bmc_rsp(ipmi_smi_t intf, + struct ipmi_smi_msg *msg) +{ + struct ipmi_recv_msg *recv_msg; + struct ipmi_user *user; + + recv_msg = (struct ipmi_recv_msg *) msg->user_data; + if (recv_msg == NULL) { + printk(KERN_WARNING + "IPMI message received with no owner. This\n" + "could be because of a malformed message, or\n" + "because of a hardware error. Contact your\n" + "hardware vender for assistance\n"); + return 0; + } + + user = recv_msg->user; + /* Make sure the user still exists. */ + if (user && !user->valid) { + /* The user for the message went away, so give up. */ + ipmi_inc_stat(intf, unhandled_local_responses); + ipmi_free_recv_msg(recv_msg); + } else { + struct ipmi_system_interface_addr *smi_addr; + + ipmi_inc_stat(intf, handled_local_responses); + recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; + recv_msg->msgid = msg->msgid; + smi_addr = ((struct ipmi_system_interface_addr *) + &(recv_msg->addr)); + smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; + smi_addr->channel = IPMI_BMC_CHANNEL; + smi_addr->lun = msg->rsp[0] & 3; + recv_msg->msg.netfn = msg->rsp[0] >> 2; + recv_msg->msg.cmd = msg->rsp[1]; + memcpy(recv_msg->msg_data, + &(msg->rsp[2]), + msg->rsp_size - 2); + recv_msg->msg.data = recv_msg->msg_data; + recv_msg->msg.data_len = msg->rsp_size - 2; + deliver_response(recv_msg); + } + + return 0; +} + +/* + * Handle a received message. Return 1 if the message should be requeued, + * 0 if the message should be freed, or -1 if the message should not + * be freed or requeued. + */ +static int handle_one_recv_msg(ipmi_smi_t intf, + struct ipmi_smi_msg *msg) +{ + int requeue; + int chan; + +#ifdef DEBUG_MSGING + int m; + printk("Recv:"); + for (m = 0; m < msg->rsp_size; m++) + printk(" %2.2x", msg->rsp[m]); + printk("\n"); +#endif + if (msg->rsp_size < 2) { + /* Message is too small to be correct. */ + printk(KERN_WARNING PFX "BMC returned to small a message" + " for netfn %x cmd %x, got %d bytes\n", + (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size); + + /* Generate an error response for the message. */ + msg->rsp[0] = msg->data[0] | (1 << 2); + msg->rsp[1] = msg->data[1]; + msg->rsp[2] = IPMI_ERR_UNSPECIFIED; + msg->rsp_size = 3; + } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1)) + || (msg->rsp[1] != msg->data[1])) { + /* + * The NetFN and Command in the response is not even + * marginally correct. + */ + printk(KERN_WARNING PFX "BMC returned incorrect response," + " expected netfn %x cmd %x, got netfn %x cmd %x\n", + (msg->data[0] >> 2) | 1, msg->data[1], + msg->rsp[0] >> 2, msg->rsp[1]); + + /* Generate an error response for the message. */ + msg->rsp[0] = msg->data[0] | (1 << 2); + msg->rsp[1] = msg->data[1]; + msg->rsp[2] = IPMI_ERR_UNSPECIFIED; + msg->rsp_size = 3; + } + + if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) + && (msg->rsp[1] == IPMI_SEND_MSG_CMD) + && (msg->user_data != NULL)) { + /* + * It's a response to a response we sent. For this we + * deliver a send message response to the user. + */ + struct ipmi_recv_msg *recv_msg = msg->user_data; + + requeue = 0; + if (msg->rsp_size < 2) + /* Message is too small to be correct. */ + goto out; + + chan = msg->data[2] & 0x0f; + if (chan >= IPMI_MAX_CHANNELS) + /* Invalid channel number */ + goto out; + + if (!recv_msg) + goto out; + + /* Make sure the user still exists. */ + if (!recv_msg->user || !recv_msg->user->valid) + goto out; + + recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE; + recv_msg->msg.data = recv_msg->msg_data; + recv_msg->msg.data_len = 1; + recv_msg->msg_data[0] = msg->rsp[2]; + deliver_response(recv_msg); + } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) + && (msg->rsp[1] == IPMI_GET_MSG_CMD)) { + /* It's from the receive queue. */ + chan = msg->rsp[3] & 0xf; + if (chan >= IPMI_MAX_CHANNELS) { + /* Invalid channel number */ + requeue = 0; + goto out; + } + + /* + * We need to make sure the channels have been initialized. + * The channel_handler routine will set the "curr_channel" + * equal to or greater than IPMI_MAX_CHANNELS when all the + * channels for this interface have been initialized. + */ + if (intf->curr_channel < IPMI_MAX_CHANNELS) { + requeue = 0; /* Throw the message away */ + goto out; + } + + switch (intf->channels[chan].medium) { + case IPMI_CHANNEL_MEDIUM_IPMB: + if (msg->rsp[4] & 0x04) { + /* + * It's a response, so find the + * requesting message and send it up. + */ + requeue = handle_ipmb_get_msg_rsp(intf, msg); + } else { + /* + * It's a command to the SMS from some other + * entity. Handle that. + */ + requeue = handle_ipmb_get_msg_cmd(intf, msg); + } + break; + + case IPMI_CHANNEL_MEDIUM_8023LAN: + case IPMI_CHANNEL_MEDIUM_ASYNC: + if (msg->rsp[6] & 0x04) { + /* + * It's a response, so find the + * requesting message and send it up. + */ + requeue = handle_lan_get_msg_rsp(intf, msg); + } else { + /* + * It's a command to the SMS from some other + * entity. Handle that. + */ + requeue = handle_lan_get_msg_cmd(intf, msg); + } + break; + + default: + /* Check for OEM Channels. Clients had better + register for these commands. */ + if ((intf->channels[chan].medium + >= IPMI_CHANNEL_MEDIUM_OEM_MIN) + && (intf->channels[chan].medium + <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) { + requeue = handle_oem_get_msg_cmd(intf, msg); + } else { + /* + * We don't handle the channel type, so just + * free the message. + */ + requeue = 0; + } + } + + } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) + && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) { + /* It's an asynchronous event. */ + requeue = handle_read_event_rsp(intf, msg); + } else { + /* It's a response from the local BMC. */ + requeue = handle_bmc_rsp(intf, msg); + } + + out: + return requeue; +} + +/* + * If there are messages in the queue or pretimeouts, handle them. + */ +static void handle_new_recv_msgs(ipmi_smi_t intf) +{ + struct ipmi_smi_msg *smi_msg; + unsigned long flags = 0; + int rv; + int run_to_completion = intf->run_to_completion; + + /* See if any waiting messages need to be processed. */ + if (!run_to_completion) + spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); + while (!list_empty(&intf->waiting_rcv_msgs)) { + smi_msg = list_entry(intf->waiting_rcv_msgs.next, + struct ipmi_smi_msg, link); + if (!run_to_completion) + spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, + flags); + rv = handle_one_recv_msg(intf, smi_msg); + if (!run_to_completion) + spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); + if (rv > 0) { + /* + * To preserve message order, quit if we + * can't handle a message. + */ + break; + } else { + list_del(&smi_msg->link); + if (rv == 0) + /* Message handled */ + ipmi_free_smi_msg(smi_msg); + /* If rv < 0, fatal error, del but don't free. */ + } + } + if (!run_to_completion) + spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags); + + /* + * If the pretimout count is non-zero, decrement one from it and + * deliver pretimeouts to all the users. + */ + if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) { + ipmi_user_t user; + + rcu_read_lock(); + list_for_each_entry_rcu(user, &intf->users, link) { + if (user->handler->ipmi_watchdog_pretimeout) + user->handler->ipmi_watchdog_pretimeout( + user->handler_data); + } + rcu_read_unlock(); + } +} + +static void smi_recv_tasklet(unsigned long val) +{ + unsigned long flags = 0; /* keep us warning-free. */ + ipmi_smi_t intf = (ipmi_smi_t) val; + int run_to_completion = intf->run_to_completion; + struct ipmi_smi_msg *newmsg = NULL; + + /* + * Start the next message if available. + * + * Do this here, not in the actual receiver, because we may deadlock + * because the lower layer is allowed to hold locks while calling + * message delivery. + */ + if (!run_to_completion) + spin_lock_irqsave(&intf->xmit_msgs_lock, flags); + if (intf->curr_msg == NULL && !intf->in_shutdown) { + struct list_head *entry = NULL; + + /* Pick the high priority queue first. */ + if (!list_empty(&intf->hp_xmit_msgs)) + entry = intf->hp_xmit_msgs.next; + else if (!list_empty(&intf->xmit_msgs)) + entry = intf->xmit_msgs.next; + + if (entry) { + list_del(entry); + newmsg = list_entry(entry, struct ipmi_smi_msg, link); + intf->curr_msg = newmsg; + } + } + if (!run_to_completion) + spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); + if (newmsg) + intf->handlers->sender(intf->send_info, newmsg); + + handle_new_recv_msgs(intf); +} + +/* Handle a new message from the lower layer. */ +void ipmi_smi_msg_received(ipmi_smi_t intf, + struct ipmi_smi_msg *msg) +{ + unsigned long flags = 0; /* keep us warning-free. */ + int run_to_completion = intf->run_to_completion; + + if ((msg->data_size >= 2) + && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) + && (msg->data[1] == IPMI_SEND_MSG_CMD) + && (msg->user_data == NULL)) { + + if (intf->in_shutdown) + goto free_msg; + + /* + * This is the local response to a command send, start + * the timer for these. The user_data will not be + * NULL if this is a response send, and we will let + * response sends just go through. + */ + + /* + * Check for errors, if we get certain errors (ones + * that mean basically we can try again later), we + * ignore them and start the timer. Otherwise we + * report the error immediately. + */ + if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) + && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) + && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR) + && (msg->rsp[2] != IPMI_BUS_ERR) + && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) { + int chan = msg->rsp[3] & 0xf; + + /* Got an error sending the message, handle it. */ + if (chan >= IPMI_MAX_CHANNELS) + ; /* This shouldn't happen */ + else if ((intf->channels[chan].medium + == IPMI_CHANNEL_MEDIUM_8023LAN) + || (intf->channels[chan].medium + == IPMI_CHANNEL_MEDIUM_ASYNC)) + ipmi_inc_stat(intf, sent_lan_command_errs); + else + ipmi_inc_stat(intf, sent_ipmb_command_errs); + intf_err_seq(intf, msg->msgid, msg->rsp[2]); + } else + /* The message was sent, start the timer. */ + intf_start_seq_timer(intf, msg->msgid); + +free_msg: + ipmi_free_smi_msg(msg); + } else { + /* + * To preserve message order, we keep a queue and deliver from + * a tasklet. + */ + if (!run_to_completion) + spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); + list_add_tail(&msg->link, &intf->waiting_rcv_msgs); + if (!run_to_completion) + spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, + flags); + } + + if (!run_to_completion) + spin_lock_irqsave(&intf->xmit_msgs_lock, flags); + if (msg == intf->curr_msg) + intf->curr_msg = NULL; + if (!run_to_completion) + spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); + + if (run_to_completion) + smi_recv_tasklet((unsigned long) intf); + else + tasklet_schedule(&intf->recv_tasklet); +} +EXPORT_SYMBOL(ipmi_smi_msg_received); + +void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) +{ + if (intf->in_shutdown) + return; + + atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1); + tasklet_schedule(&intf->recv_tasklet); +} +EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); + +static struct ipmi_smi_msg * +smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, + unsigned char seq, long seqid) +{ + struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg(); + if (!smi_msg) + /* + * If we can't allocate the message, then just return, we + * get 4 retries, so this should be ok. + */ + return NULL; + + memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len); + smi_msg->data_size = recv_msg->msg.data_len; + smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid); + +#ifdef DEBUG_MSGING + { + int m; + printk("Resend: "); + for (m = 0; m < smi_msg->data_size; m++) + printk(" %2.2x", smi_msg->data[m]); + printk("\n"); + } +#endif + return smi_msg; +} + +static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, + struct list_head *timeouts, long timeout_period, + int slot, unsigned long *flags, + unsigned int *waiting_msgs) +{ + struct ipmi_recv_msg *msg; + struct ipmi_smi_handlers *handlers; + + if (intf->in_shutdown) + return; + + if (!ent->inuse) + return; + + ent->timeout -= timeout_period; + if (ent->timeout > 0) { + (*waiting_msgs)++; + return; + } + + if (ent->retries_left == 0) { + /* The message has used all its retries. */ + ent->inuse = 0; + msg = ent->recv_msg; + list_add_tail(&msg->link, timeouts); + if (ent->broadcast) + ipmi_inc_stat(intf, timed_out_ipmb_broadcasts); + else if (is_lan_addr(&ent->recv_msg->addr)) + ipmi_inc_stat(intf, timed_out_lan_commands); + else + ipmi_inc_stat(intf, timed_out_ipmb_commands); + } else { + struct ipmi_smi_msg *smi_msg; + /* More retries, send again. */ + + (*waiting_msgs)++; + + /* + * Start with the max timer, set to normal timer after + * the message is sent. + */ + ent->timeout = MAX_MSG_TIMEOUT; + ent->retries_left--; + smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot, + ent->seqid); + if (!smi_msg) { + if (is_lan_addr(&ent->recv_msg->addr)) + ipmi_inc_stat(intf, + dropped_rexmit_lan_commands); + else + ipmi_inc_stat(intf, + dropped_rexmit_ipmb_commands); + return; + } + + spin_unlock_irqrestore(&intf->seq_lock, *flags); + + /* + * Send the new message. We send with a zero + * priority. It timed out, I doubt time is that + * critical now, and high priority messages are really + * only for messages to the local MC, which don't get + * resent. + */ + handlers = intf->handlers; + if (handlers) { + if (is_lan_addr(&ent->recv_msg->addr)) + ipmi_inc_stat(intf, + retransmitted_lan_commands); + else + ipmi_inc_stat(intf, + retransmitted_ipmb_commands); + + smi_send(intf, intf->handlers, smi_msg, 0); + } else + ipmi_free_smi_msg(smi_msg); + + spin_lock_irqsave(&intf->seq_lock, *flags); + } +} + +static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period) +{ + struct list_head timeouts; + struct ipmi_recv_msg *msg, *msg2; + unsigned long flags; + int i; + unsigned int waiting_msgs = 0; + + /* + * Go through the seq table and find any messages that + * have timed out, putting them in the timeouts + * list. + */ + INIT_LIST_HEAD(&timeouts); + spin_lock_irqsave(&intf->seq_lock, flags); + for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) + check_msg_timeout(intf, &(intf->seq_table[i]), + &timeouts, timeout_period, i, + &flags, &waiting_msgs); + spin_unlock_irqrestore(&intf->seq_lock, flags); + + list_for_each_entry_safe(msg, msg2, &timeouts, link) + deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE); + + /* + * Maintenance mode handling. Check the timeout + * optimistically before we claim the lock. It may + * mean a timeout gets missed occasionally, but that + * only means the timeout gets extended by one period + * in that case. No big deal, and it avoids the lock + * most of the time. + */ + if (intf->auto_maintenance_timeout > 0) { + spin_lock_irqsave(&intf->maintenance_mode_lock, flags); + if (intf->auto_maintenance_timeout > 0) { + intf->auto_maintenance_timeout + -= timeout_period; + if (!intf->maintenance_mode + && (intf->auto_maintenance_timeout <= 0)) { + intf->maintenance_mode_enable = false; + maintenance_mode_update(intf); + } + } + spin_unlock_irqrestore(&intf->maintenance_mode_lock, + flags); + } + + tasklet_schedule(&intf->recv_tasklet); + + return waiting_msgs; +} + +static void ipmi_request_event(ipmi_smi_t intf) +{ + /* No event requests when in maintenance mode. */ + if (intf->maintenance_mode_enable) + return; + + if (!intf->in_shutdown) + intf->handlers->request_events(intf->send_info); +} + +static struct timer_list ipmi_timer; + +static atomic_t stop_operation; + +static void ipmi_timeout(unsigned long data) +{ + ipmi_smi_t intf; + int nt = 0; + + if (atomic_read(&stop_operation)) + return; + + rcu_read_lock(); + list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { + int lnt = 0; + + if (atomic_read(&intf->event_waiters)) { + intf->ticks_to_req_ev--; + if (intf->ticks_to_req_ev == 0) { + ipmi_request_event(intf); + intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; + } + lnt++; + } + + lnt += ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME); + + lnt = !!lnt; + if (lnt != intf->last_needs_timer && + intf->handlers->set_need_watch) + intf->handlers->set_need_watch(intf->send_info, lnt); + intf->last_needs_timer = lnt; + + nt += lnt; + } + rcu_read_unlock(); + + if (nt) + mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); +} + +static void need_waiter(ipmi_smi_t intf) +{ + /* Racy, but worst case we start the timer twice. */ + if (!timer_pending(&ipmi_timer)) + mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); +} + +static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0); +static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0); + +static void free_smi_msg(struct ipmi_smi_msg *msg) +{ + atomic_dec(&smi_msg_inuse_count); + kfree(msg); +} + +struct ipmi_smi_msg *ipmi_alloc_smi_msg(void) +{ + struct ipmi_smi_msg *rv; + rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC); + if (rv) { + rv->done = free_smi_msg; + rv->user_data = NULL; + atomic_inc(&smi_msg_inuse_count); + } + return rv; +} +EXPORT_SYMBOL(ipmi_alloc_smi_msg); + +static void free_recv_msg(struct ipmi_recv_msg *msg) +{ + atomic_dec(&recv_msg_inuse_count); + kfree(msg); +} + +static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) +{ + struct ipmi_recv_msg *rv; + + rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC); + if (rv) { + rv->user = NULL; + rv->done = free_recv_msg; + atomic_inc(&recv_msg_inuse_count); + } + return rv; +} + +void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) +{ + if (msg->user) + kref_put(&msg->user->refcount, free_user); + msg->done(msg); +} +EXPORT_SYMBOL(ipmi_free_recv_msg); + +#ifdef CONFIG_IPMI_PANIC_EVENT + +static atomic_t panic_done_count = ATOMIC_INIT(0); + +static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) +{ + atomic_dec(&panic_done_count); +} + +static void dummy_recv_done_handler(struct ipmi_recv_msg *msg) +{ + atomic_dec(&panic_done_count); +} + +/* + * Inside a panic, send a message and wait for a response. + */ +static void ipmi_panic_request_and_wait(ipmi_smi_t intf, + struct ipmi_addr *addr, + struct kernel_ipmi_msg *msg) +{ + struct ipmi_smi_msg smi_msg; + struct ipmi_recv_msg recv_msg; + int rv; + + smi_msg.done = dummy_smi_done_handler; + recv_msg.done = dummy_recv_done_handler; + atomic_add(2, &panic_done_count); + rv = i_ipmi_request(NULL, + intf, + addr, + 0, + msg, + intf, + &smi_msg, + &recv_msg, + 0, + intf->channels[0].address, + intf->channels[0].lun, + 0, 1); /* Don't retry, and don't wait. */ + if (rv) + atomic_sub(2, &panic_done_count); + while (atomic_read(&panic_done_count) != 0) + ipmi_poll(intf); +} + +#ifdef CONFIG_IPMI_PANIC_STRING +static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg) +{ + if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) + && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE) + && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD) + && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { + /* A get event receiver command, save it. */ + intf->event_receiver = msg->msg.data[1]; + intf->event_receiver_lun = msg->msg.data[2] & 0x3; + } +} + +static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg) +{ + if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) + && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) + && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD) + && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { + /* + * A get device id command, save if we are an event + * receiver or generator. + */ + intf->local_sel_device = (msg->msg.data[6] >> 2) & 1; + intf->local_event_generator = (msg->msg.data[6] >> 5) & 1; + } +} +#endif + +static void send_panic_events(char *str) +{ + struct kernel_ipmi_msg msg; + ipmi_smi_t intf; + unsigned char data[16]; + struct ipmi_system_interface_addr *si; + struct ipmi_addr addr; + + si = (struct ipmi_system_interface_addr *) &addr; + si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; + si->channel = IPMI_BMC_CHANNEL; + si->lun = 0; + + /* Fill in an event telling that we have failed. */ + msg.netfn = 0x04; /* Sensor or Event. */ + msg.cmd = 2; /* Platform event command. */ + msg.data = data; + msg.data_len = 8; + data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */ + data[1] = 0x03; /* This is for IPMI 1.0. */ + data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */ + data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */ + data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */ + + /* + * Put a few breadcrumbs in. Hopefully later we can add more things + * to make the panic events more useful. + */ + if (str) { + data[3] = str[0]; + data[6] = str[1]; + data[7] = str[2]; + } + + /* For every registered interface, send the event. */ + list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { + if (!intf->handlers) + /* Interface is not ready. */ + continue; + + intf->run_to_completion = 1; + /* Send the event announcing the panic. */ + intf->handlers->set_run_to_completion(intf->send_info, 1); + ipmi_panic_request_and_wait(intf, &addr, &msg); + } + +#ifdef CONFIG_IPMI_PANIC_STRING + /* + * On every interface, dump a bunch of OEM event holding the + * string. + */ + if (!str) + return; + + /* For every registered interface, send the event. */ + list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { + char *p = str; + struct ipmi_ipmb_addr *ipmb; + int j; + + if (intf->intf_num == -1) + /* Interface was not ready yet. */ + continue; + + /* + * intf_num is used as an marker to tell if the + * interface is valid. Thus we need a read barrier to + * make sure data fetched before checking intf_num + * won't be used. + */ + smp_rmb(); + + /* + * First job here is to figure out where to send the + * OEM events. There's no way in IPMI to send OEM + * events using an event send command, so we have to + * find the SEL to put them in and stick them in + * there. + */ + + /* Get capabilities from the get device id. */ + intf->local_sel_device = 0; + intf->local_event_generator = 0; + intf->event_receiver = 0; + + /* Request the device info from the local MC. */ + msg.netfn = IPMI_NETFN_APP_REQUEST; + msg.cmd = IPMI_GET_DEVICE_ID_CMD; + msg.data = NULL; + msg.data_len = 0; + intf->null_user_handler = device_id_fetcher; + ipmi_panic_request_and_wait(intf, &addr, &msg); + + if (intf->local_event_generator) { + /* Request the event receiver from the local MC. */ + msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST; + msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD; + msg.data = NULL; + msg.data_len = 0; + intf->null_user_handler = event_receiver_fetcher; + ipmi_panic_request_and_wait(intf, &addr, &msg); + } + intf->null_user_handler = NULL; + + /* + * Validate the event receiver. The low bit must not + * be 1 (it must be a valid IPMB address), it cannot + * be zero, and it must not be my address. + */ + if (((intf->event_receiver & 1) == 0) + && (intf->event_receiver != 0) + && (intf->event_receiver != intf->channels[0].address)) { + /* + * The event receiver is valid, send an IPMB + * message. + */ + ipmb = (struct ipmi_ipmb_addr *) &addr; + ipmb->addr_type = IPMI_IPMB_ADDR_TYPE; + ipmb->channel = 0; /* FIXME - is this right? */ + ipmb->lun = intf->event_receiver_lun; + ipmb->slave_addr = intf->event_receiver; + } else if (intf->local_sel_device) { + /* + * The event receiver was not valid (or was + * me), but I am an SEL device, just dump it + * in my SEL. + */ + si = (struct ipmi_system_interface_addr *) &addr; + si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; + si->channel = IPMI_BMC_CHANNEL; + si->lun = 0; + } else + continue; /* No where to send the event. */ + + msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */ + msg.cmd = IPMI_ADD_SEL_ENTRY_CMD; + msg.data = data; + msg.data_len = 16; + + j = 0; + while (*p) { + int size = strlen(p); + + if (size > 11) + size = 11; + data[0] = 0; + data[1] = 0; + data[2] = 0xf0; /* OEM event without timestamp. */ + data[3] = intf->channels[0].address; + data[4] = j++; /* sequence # */ + /* + * Always give 11 bytes, so strncpy will fill + * it with zeroes for me. + */ + strncpy(data+5, p, 11); + p += size; + + ipmi_panic_request_and_wait(intf, &addr, &msg); + } + } +#endif /* CONFIG_IPMI_PANIC_STRING */ +} +#endif /* CONFIG_IPMI_PANIC_EVENT */ + +static int has_panicked; + +static int panic_event(struct notifier_block *this, + unsigned long event, + void *ptr) +{ + ipmi_smi_t intf; + + if (has_panicked) + return NOTIFY_DONE; + has_panicked = 1; + + /* For every registered interface, set it to run to completion. */ + list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { + if (!intf->handlers) + /* Interface is not ready. */ + continue; + + intf->run_to_completion = 1; + intf->handlers->set_run_to_completion(intf->send_info, 1); + } + +#ifdef CONFIG_IPMI_PANIC_EVENT + send_panic_events(ptr); +#endif + + return NOTIFY_DONE; +} + +static struct notifier_block panic_block = { + .notifier_call = panic_event, + .next = NULL, + .priority = 200 /* priority: INT_MAX >= x >= 0 */ +}; + +static int ipmi_init_msghandler(void) +{ + int rv; + + if (initialized) + return 0; + + rv = driver_register(&ipmidriver.driver); + if (rv) { + printk(KERN_ERR PFX "Could not register IPMI driver\n"); + return rv; + } + + printk(KERN_INFO "ipmi message handler version " + IPMI_DRIVER_VERSION "\n"); + +#ifdef CONFIG_PROC_FS + proc_ipmi_root = proc_mkdir("ipmi", NULL); + if (!proc_ipmi_root) { + printk(KERN_ERR PFX "Unable to create IPMI proc dir"); + driver_unregister(&ipmidriver.driver); + return -ENOMEM; + } + +#endif /* CONFIG_PROC_FS */ + + setup_timer(&ipmi_timer, ipmi_timeout, 0); + mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); + + atomic_notifier_chain_register(&panic_notifier_list, &panic_block); + + initialized = 1; + + return 0; +} + +static int __init ipmi_init_msghandler_mod(void) +{ + ipmi_init_msghandler(); + return 0; +} + +static void __exit cleanup_ipmi(void) +{ + int count; + + if (!initialized) + return; + + atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block); + + /* + * This can't be called if any interfaces exist, so no worry + * about shutting down the interfaces. + */ + + /* + * Tell the timer to stop, then wait for it to stop. This + * avoids problems with race conditions removing the timer + * here. + */ + atomic_inc(&stop_operation); + del_timer_sync(&ipmi_timer); + +#ifdef CONFIG_PROC_FS + proc_remove(proc_ipmi_root); +#endif /* CONFIG_PROC_FS */ + + driver_unregister(&ipmidriver.driver); + + initialized = 0; + + /* Check for buffer leaks. */ + count = atomic_read(&smi_msg_inuse_count); + if (count != 0) + printk(KERN_WARNING PFX "SMI message count %d at exit\n", + count); + count = atomic_read(&recv_msg_inuse_count); + if (count != 0) + printk(KERN_WARNING PFX "recv message count %d at exit\n", + count); +} +module_exit(cleanup_ipmi); + +module_init(ipmi_init_msghandler_mod); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Corey Minyard "); +MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI" + " interface."); +MODULE_VERSION(IPMI_DRIVER_VERSION); diff --git a/kernel/drivers/char/ipmi/ipmi_powernv.c b/kernel/drivers/char/ipmi/ipmi_powernv.c new file mode 100644 index 000000000..8753b0f6a --- /dev/null +++ b/kernel/drivers/char/ipmi/ipmi_powernv.c @@ -0,0 +1,311 @@ +/* + * PowerNV OPAL IPMI driver + * + * Copyright 2014 IBM Corp. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#define pr_fmt(fmt) "ipmi-powernv: " fmt + +#include +#include +#include +#include + +#include + + +struct ipmi_smi_powernv { + u64 interface_id; + struct ipmi_device_id ipmi_id; + ipmi_smi_t intf; + u64 event; + struct notifier_block event_nb; + + /** + * We assume that there can only be one outstanding request, so + * keep the pending message in cur_msg. We protect this from concurrent + * updates through send & recv calls, (and consequently opal_msg, which + * is in-use when cur_msg is set) with msg_lock + */ + spinlock_t msg_lock; + struct ipmi_smi_msg *cur_msg; + struct opal_ipmi_msg *opal_msg; +}; + +static int ipmi_powernv_start_processing(void *send_info, ipmi_smi_t intf) +{ + struct ipmi_smi_powernv *smi = send_info; + + smi->intf = intf; + return 0; +} + +static void send_error_reply(struct ipmi_smi_powernv *smi, + struct ipmi_smi_msg *msg, u8 completion_code) +{ + msg->rsp[0] = msg->data[0] | 0x4; + msg->rsp[1] = msg->data[1]; + msg->rsp[2] = completion_code; + msg->rsp_size = 3; + ipmi_smi_msg_received(smi->intf, msg); +} + +static void ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg) +{ + struct ipmi_smi_powernv *smi = send_info; + struct opal_ipmi_msg *opal_msg; + unsigned long flags; + int comp, rc; + size_t size; + + /* ensure data_len will fit in the opal_ipmi_msg buffer... */ + if (msg->data_size > IPMI_MAX_MSG_LENGTH) { + comp = IPMI_REQ_LEN_EXCEEDED_ERR; + goto err; + } + + /* ... and that we at least have netfn and cmd bytes */ + if (msg->data_size < 2) { + comp = IPMI_REQ_LEN_INVALID_ERR; + goto err; + } + + spin_lock_irqsave(&smi->msg_lock, flags); + + if (smi->cur_msg) { + comp = IPMI_NODE_BUSY_ERR; + goto err_unlock; + } + + /* format our data for the OPAL API */ + opal_msg = smi->opal_msg; + opal_msg->version = OPAL_IPMI_MSG_FORMAT_VERSION_1; + opal_msg->netfn = msg->data[0]; + opal_msg->cmd = msg->data[1]; + if (msg->data_size > 2) + memcpy(opal_msg->data, msg->data + 2, msg->data_size - 2); + + /* data_size already includes the netfn and cmd bytes */ + size = sizeof(*opal_msg) + msg->data_size - 2; + + pr_devel("%s: opal_ipmi_send(0x%llx, %p, %ld)\n", __func__, + smi->interface_id, opal_msg, size); + rc = opal_ipmi_send(smi->interface_id, opal_msg, size); + pr_devel("%s: -> %d\n", __func__, rc); + + if (!rc) { + smi->cur_msg = msg; + spin_unlock_irqrestore(&smi->msg_lock, flags); + return; + } + + comp = IPMI_ERR_UNSPECIFIED; +err_unlock: + spin_unlock_irqrestore(&smi->msg_lock, flags); +err: + send_error_reply(smi, msg, comp); +} + +static int ipmi_powernv_recv(struct ipmi_smi_powernv *smi) +{ + struct opal_ipmi_msg *opal_msg; + struct ipmi_smi_msg *msg; + unsigned long flags; + uint64_t size; + int rc; + + pr_devel("%s: opal_ipmi_recv(%llx, msg, sz)\n", __func__, + smi->interface_id); + + spin_lock_irqsave(&smi->msg_lock, flags); + + if (!smi->cur_msg) { + spin_unlock_irqrestore(&smi->msg_lock, flags); + pr_warn("no current message?\n"); + return 0; + } + + msg = smi->cur_msg; + opal_msg = smi->opal_msg; + + size = cpu_to_be64(sizeof(*opal_msg) + IPMI_MAX_MSG_LENGTH); + + rc = opal_ipmi_recv(smi->interface_id, + opal_msg, + &size); + size = be64_to_cpu(size); + pr_devel("%s: -> %d (size %lld)\n", __func__, + rc, rc == 0 ? size : 0); + if (rc) { + spin_unlock_irqrestore(&smi->msg_lock, flags); + ipmi_free_smi_msg(msg); + return 0; + } + + if (size < sizeof(*opal_msg)) { + spin_unlock_irqrestore(&smi->msg_lock, flags); + pr_warn("unexpected IPMI message size %lld\n", size); + return 0; + } + + if (opal_msg->version != OPAL_IPMI_MSG_FORMAT_VERSION_1) { + spin_unlock_irqrestore(&smi->msg_lock, flags); + pr_warn("unexpected IPMI message format (version %d)\n", + opal_msg->version); + return 0; + } + + msg->rsp[0] = opal_msg->netfn; + msg->rsp[1] = opal_msg->cmd; + if (size > sizeof(*opal_msg)) + memcpy(&msg->rsp[2], opal_msg->data, size - sizeof(*opal_msg)); + msg->rsp_size = 2 + size - sizeof(*opal_msg); + + smi->cur_msg = NULL; + spin_unlock_irqrestore(&smi->msg_lock, flags); + ipmi_smi_msg_received(smi->intf, msg); + return 0; +} + +static void ipmi_powernv_request_events(void *send_info) +{ +} + +static void ipmi_powernv_set_run_to_completion(void *send_info, + bool run_to_completion) +{ +} + +static void ipmi_powernv_poll(void *send_info) +{ + struct ipmi_smi_powernv *smi = send_info; + + ipmi_powernv_recv(smi); +} + +static struct ipmi_smi_handlers ipmi_powernv_smi_handlers = { + .owner = THIS_MODULE, + .start_processing = ipmi_powernv_start_processing, + .sender = ipmi_powernv_send, + .request_events = ipmi_powernv_request_events, + .set_run_to_completion = ipmi_powernv_set_run_to_completion, + .poll = ipmi_powernv_poll, +}; + +static int ipmi_opal_event(struct notifier_block *nb, + unsigned long events, void *change) +{ + struct ipmi_smi_powernv *smi = container_of(nb, + struct ipmi_smi_powernv, event_nb); + + if (events & smi->event) + ipmi_powernv_recv(smi); + return 0; +} + +static int ipmi_powernv_probe(struct platform_device *pdev) +{ + struct ipmi_smi_powernv *ipmi; + struct device *dev; + u32 prop; + int rc; + + if (!pdev || !pdev->dev.of_node) + return -ENODEV; + + dev = &pdev->dev; + + ipmi = devm_kzalloc(dev, sizeof(*ipmi), GFP_KERNEL); + if (!ipmi) + return -ENOMEM; + + spin_lock_init(&ipmi->msg_lock); + + rc = of_property_read_u32(dev->of_node, "ibm,ipmi-interface-id", + &prop); + if (rc) { + dev_warn(dev, "No interface ID property\n"); + goto err_free; + } + ipmi->interface_id = prop; + + rc = of_property_read_u32(dev->of_node, "interrupts", &prop); + if (rc) { + dev_warn(dev, "No interrupts property\n"); + goto err_free; + } + + ipmi->event = 1ull << prop; + ipmi->event_nb.notifier_call = ipmi_opal_event; + + rc = opal_notifier_register(&ipmi->event_nb); + if (rc) { + dev_warn(dev, "OPAL notifier registration failed (%d)\n", rc); + goto err_free; + } + + ipmi->opal_msg = devm_kmalloc(dev, + sizeof(*ipmi->opal_msg) + IPMI_MAX_MSG_LENGTH, + GFP_KERNEL); + if (!ipmi->opal_msg) { + rc = -ENOMEM; + goto err_unregister; + } + + /* todo: query actual ipmi_device_id */ + rc = ipmi_register_smi(&ipmi_powernv_smi_handlers, ipmi, + &ipmi->ipmi_id, dev, 0); + if (rc) { + dev_warn(dev, "IPMI SMI registration failed (%d)\n", rc); + goto err_free_msg; + } + + dev_set_drvdata(dev, ipmi); + return 0; + +err_free_msg: + devm_kfree(dev, ipmi->opal_msg); +err_unregister: + opal_notifier_unregister(&ipmi->event_nb); +err_free: + devm_kfree(dev, ipmi); + return rc; +} + +static int ipmi_powernv_remove(struct platform_device *pdev) +{ + struct ipmi_smi_powernv *smi = dev_get_drvdata(&pdev->dev); + + ipmi_unregister_smi(smi->intf); + opal_notifier_unregister(&smi->event_nb); + return 0; +} + +static const struct of_device_id ipmi_powernv_match[] = { + { .compatible = "ibm,opal-ipmi" }, + { }, +}; + + +static struct platform_driver powernv_ipmi_driver = { + .driver = { + .name = "ipmi-powernv", + .owner = THIS_MODULE, + .of_match_table = ipmi_powernv_match, + }, + .probe = ipmi_powernv_probe, + .remove = ipmi_powernv_remove, +}; + + +module_platform_driver(powernv_ipmi_driver); + +MODULE_DEVICE_TABLE(of, ipmi_powernv_match); +MODULE_DESCRIPTION("powernv IPMI driver"); +MODULE_AUTHOR("Jeremy Kerr "); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/ipmi/ipmi_poweroff.c b/kernel/drivers/char/ipmi/ipmi_poweroff.c new file mode 100644 index 000000000..9f2e3be2c --- /dev/null +++ b/kernel/drivers/char/ipmi/ipmi_poweroff.c @@ -0,0 +1,749 @@ +/* + * ipmi_poweroff.c + * + * MontaVista IPMI Poweroff extension to sys_reboot + * + * Author: MontaVista Software, Inc. + * Steven Dake + * Corey Minyard + * source@mvista.com + * + * Copyright 2002,2004 MontaVista Software Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PFX "IPMI poweroff: " + +static void ipmi_po_smi_gone(int if_num); +static void ipmi_po_new_smi(int if_num, struct device *device); + +/* Definitions for controlling power off (if the system supports it). It + * conveniently matches the IPMI chassis control values. */ +#define IPMI_CHASSIS_POWER_DOWN 0 /* power down, the default. */ +#define IPMI_CHASSIS_POWER_CYCLE 0x02 /* power cycle */ + +/* the IPMI data command */ +static int poweroff_powercycle; + +/* Which interface to use, -1 means the first we see. */ +static int ifnum_to_use = -1; + +/* Our local state. */ +static int ready; +static ipmi_user_t ipmi_user; +static int ipmi_ifnum; +static void (*specific_poweroff_func)(ipmi_user_t user); + +/* Holds the old poweroff function so we can restore it on removal. */ +static void (*old_poweroff_func)(void); + +static int set_param_ifnum(const char *val, struct kernel_param *kp) +{ + int rv = param_set_int(val, kp); + if (rv) + return rv; + if ((ifnum_to_use < 0) || (ifnum_to_use == ipmi_ifnum)) + return 0; + + ipmi_po_smi_gone(ipmi_ifnum); + ipmi_po_new_smi(ifnum_to_use, NULL); + return 0; +} + +module_param_call(ifnum_to_use, set_param_ifnum, param_get_int, + &ifnum_to_use, 0644); +MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog " + "timer. Setting to -1 defaults to the first registered " + "interface"); + +/* parameter definition to allow user to flag power cycle */ +module_param(poweroff_powercycle, int, 0644); +MODULE_PARM_DESC(poweroff_powercycle, + " Set to non-zero to enable power cycle instead of power" + " down. Power cycle is contingent on hardware support," + " otherwise it defaults back to power down."); + +/* Stuff from the get device id command. */ +static unsigned int mfg_id; +static unsigned int prod_id; +static unsigned char capabilities; +static unsigned char ipmi_version; + +/* + * We use our own messages for this operation, we don't let the system + * allocate them, since we may be in a panic situation. The whole + * thing is single-threaded, anyway, so multiple messages are not + * required. + */ +static atomic_t dummy_count = ATOMIC_INIT(0); +static void dummy_smi_free(struct ipmi_smi_msg *msg) +{ + atomic_dec(&dummy_count); +} +static void dummy_recv_free(struct ipmi_recv_msg *msg) +{ + atomic_dec(&dummy_count); +} +static struct ipmi_smi_msg halt_smi_msg = { + .done = dummy_smi_free +}; +static struct ipmi_recv_msg halt_recv_msg = { + .done = dummy_recv_free +}; + + +/* + * Code to send a message and wait for the response. + */ + +static void receive_handler(struct ipmi_recv_msg *recv_msg, void *handler_data) +{ + struct completion *comp = recv_msg->user_msg_data; + + if (comp) + complete(comp); +} + +static struct ipmi_user_hndl ipmi_poweroff_handler = { + .ipmi_recv_hndl = receive_handler +}; + + +static int ipmi_request_wait_for_response(ipmi_user_t user, + struct ipmi_addr *addr, + struct kernel_ipmi_msg *send_msg) +{ + int rv; + struct completion comp; + + init_completion(&comp); + + rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, &comp, + &halt_smi_msg, &halt_recv_msg, 0); + if (rv) + return rv; + + wait_for_completion(&comp); + + return halt_recv_msg.msg.data[0]; +} + +/* Wait for message to complete, spinning. */ +static int ipmi_request_in_rc_mode(ipmi_user_t user, + struct ipmi_addr *addr, + struct kernel_ipmi_msg *send_msg) +{ + int rv; + + atomic_set(&dummy_count, 2); + rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, NULL, + &halt_smi_msg, &halt_recv_msg, 0); + if (rv) { + atomic_set(&dummy_count, 0); + return rv; + } + + /* + * Spin until our message is done. + */ + while (atomic_read(&dummy_count) > 0) { + ipmi_poll_interface(user); + cpu_relax(); + } + + return halt_recv_msg.msg.data[0]; +} + +/* + * ATCA Support + */ + +#define IPMI_NETFN_ATCA 0x2c +#define IPMI_ATCA_SET_POWER_CMD 0x11 +#define IPMI_ATCA_GET_ADDR_INFO_CMD 0x01 +#define IPMI_PICMG_ID 0 + +#define IPMI_NETFN_OEM 0x2e +#define IPMI_ATCA_PPS_GRACEFUL_RESTART 0x11 +#define IPMI_ATCA_PPS_IANA "\x00\x40\x0A" +#define IPMI_MOTOROLA_MANUFACTURER_ID 0x0000A1 +#define IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID 0x0051 + +static void (*atca_oem_poweroff_hook)(ipmi_user_t user); + +static void pps_poweroff_atca(ipmi_user_t user) +{ + struct ipmi_system_interface_addr smi_addr; + struct kernel_ipmi_msg send_msg; + int rv; + /* + * Configure IPMI address for local access + */ + smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; + smi_addr.channel = IPMI_BMC_CHANNEL; + smi_addr.lun = 0; + + printk(KERN_INFO PFX "PPS powerdown hook used"); + + send_msg.netfn = IPMI_NETFN_OEM; + send_msg.cmd = IPMI_ATCA_PPS_GRACEFUL_RESTART; + send_msg.data = IPMI_ATCA_PPS_IANA; + send_msg.data_len = 3; + rv = ipmi_request_in_rc_mode(user, + (struct ipmi_addr *) &smi_addr, + &send_msg); + if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) { + printk(KERN_ERR PFX "Unable to send ATCA ," + " IPMI error 0x%x\n", rv); + } + return; +} + +static int ipmi_atca_detect(ipmi_user_t user) +{ + struct ipmi_system_interface_addr smi_addr; + struct kernel_ipmi_msg send_msg; + int rv; + unsigned char data[1]; + + /* + * Configure IPMI address for local access + */ + smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; + smi_addr.channel = IPMI_BMC_CHANNEL; + smi_addr.lun = 0; + + /* + * Use get address info to check and see if we are ATCA + */ + send_msg.netfn = IPMI_NETFN_ATCA; + send_msg.cmd = IPMI_ATCA_GET_ADDR_INFO_CMD; + data[0] = IPMI_PICMG_ID; + send_msg.data = data; + send_msg.data_len = sizeof(data); + rv = ipmi_request_wait_for_response(user, + (struct ipmi_addr *) &smi_addr, + &send_msg); + + printk(KERN_INFO PFX "ATCA Detect mfg 0x%X prod 0x%X\n", + mfg_id, prod_id); + if ((mfg_id == IPMI_MOTOROLA_MANUFACTURER_ID) + && (prod_id == IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID)) { + printk(KERN_INFO PFX + "Installing Pigeon Point Systems Poweroff Hook\n"); + atca_oem_poweroff_hook = pps_poweroff_atca; + } + return !rv; +} + +static void ipmi_poweroff_atca(ipmi_user_t user) +{ + struct ipmi_system_interface_addr smi_addr; + struct kernel_ipmi_msg send_msg; + int rv; + unsigned char data[4]; + + /* + * Configure IPMI address for local access + */ + smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; + smi_addr.channel = IPMI_BMC_CHANNEL; + smi_addr.lun = 0; + + printk(KERN_INFO PFX "Powering down via ATCA power command\n"); + + /* + * Power down + */ + send_msg.netfn = IPMI_NETFN_ATCA; + send_msg.cmd = IPMI_ATCA_SET_POWER_CMD; + data[0] = IPMI_PICMG_ID; + data[1] = 0; /* FRU id */ + data[2] = 0; /* Power Level */ + data[3] = 0; /* Don't change saved presets */ + send_msg.data = data; + send_msg.data_len = sizeof(data); + rv = ipmi_request_in_rc_mode(user, + (struct ipmi_addr *) &smi_addr, + &send_msg); + /* + * At this point, the system may be shutting down, and most + * serial drivers (if used) will have interrupts turned off + * it may be better to ignore IPMI_UNKNOWN_ERR_COMPLETION_CODE + * return code + */ + if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) { + printk(KERN_ERR PFX "Unable to send ATCA powerdown message," + " IPMI error 0x%x\n", rv); + goto out; + } + + if (atca_oem_poweroff_hook) + atca_oem_poweroff_hook(user); + out: + return; +} + +/* + * CPI1 Support + */ + +#define IPMI_NETFN_OEM_1 0xf8 +#define OEM_GRP_CMD_SET_RESET_STATE 0x84 +#define OEM_GRP_CMD_SET_POWER_STATE 0x82 +#define IPMI_NETFN_OEM_8 0xf8 +#define OEM_GRP_CMD_REQUEST_HOTSWAP_CTRL 0x80 +#define OEM_GRP_CMD_GET_SLOT_GA 0xa3 +#define IPMI_NETFN_SENSOR_EVT 0x10 +#define IPMI_CMD_GET_EVENT_RECEIVER 0x01 + +#define IPMI_CPI1_PRODUCT_ID 0x000157 +#define IPMI_CPI1_MANUFACTURER_ID 0x0108 + +static int ipmi_cpi1_detect(ipmi_user_t user) +{ + return ((mfg_id == IPMI_CPI1_MANUFACTURER_ID) + && (prod_id == IPMI_CPI1_PRODUCT_ID)); +} + +static void ipmi_poweroff_cpi1(ipmi_user_t user) +{ + struct ipmi_system_interface_addr smi_addr; + struct ipmi_ipmb_addr ipmb_addr; + struct kernel_ipmi_msg send_msg; + int rv; + unsigned char data[1]; + int slot; + unsigned char hotswap_ipmb; + unsigned char aer_addr; + unsigned char aer_lun; + + /* + * Configure IPMI address for local access + */ + smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; + smi_addr.channel = IPMI_BMC_CHANNEL; + smi_addr.lun = 0; + + printk(KERN_INFO PFX "Powering down via CPI1 power command\n"); + + /* + * Get IPMI ipmb address + */ + send_msg.netfn = IPMI_NETFN_OEM_8 >> 2; + send_msg.cmd = OEM_GRP_CMD_GET_SLOT_GA; + send_msg.data = NULL; + send_msg.data_len = 0; + rv = ipmi_request_in_rc_mode(user, + (struct ipmi_addr *) &smi_addr, + &send_msg); + if (rv) + goto out; + slot = halt_recv_msg.msg.data[1]; + hotswap_ipmb = (slot > 9) ? (0xb0 + 2 * slot) : (0xae + 2 * slot); + + /* + * Get active event receiver + */ + send_msg.netfn = IPMI_NETFN_SENSOR_EVT >> 2; + send_msg.cmd = IPMI_CMD_GET_EVENT_RECEIVER; + send_msg.data = NULL; + send_msg.data_len = 0; + rv = ipmi_request_in_rc_mode(user, + (struct ipmi_addr *) &smi_addr, + &send_msg); + if (rv) + goto out; + aer_addr = halt_recv_msg.msg.data[1]; + aer_lun = halt_recv_msg.msg.data[2]; + + /* + * Setup IPMB address target instead of local target + */ + ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE; + ipmb_addr.channel = 0; + ipmb_addr.slave_addr = aer_addr; + ipmb_addr.lun = aer_lun; + + /* + * Send request hotswap control to remove blade from dpv + */ + send_msg.netfn = IPMI_NETFN_OEM_8 >> 2; + send_msg.cmd = OEM_GRP_CMD_REQUEST_HOTSWAP_CTRL; + send_msg.data = &hotswap_ipmb; + send_msg.data_len = 1; + ipmi_request_in_rc_mode(user, + (struct ipmi_addr *) &ipmb_addr, + &send_msg); + + /* + * Set reset asserted + */ + send_msg.netfn = IPMI_NETFN_OEM_1 >> 2; + send_msg.cmd = OEM_GRP_CMD_SET_RESET_STATE; + send_msg.data = data; + data[0] = 1; /* Reset asserted state */ + send_msg.data_len = 1; + rv = ipmi_request_in_rc_mode(user, + (struct ipmi_addr *) &smi_addr, + &send_msg); + if (rv) + goto out; + + /* + * Power down + */ + send_msg.netfn = IPMI_NETFN_OEM_1 >> 2; + send_msg.cmd = OEM_GRP_CMD_SET_POWER_STATE; + send_msg.data = data; + data[0] = 1; /* Power down state */ + send_msg.data_len = 1; + rv = ipmi_request_in_rc_mode(user, + (struct ipmi_addr *) &smi_addr, + &send_msg); + if (rv) + goto out; + + out: + return; +} + +/* + * ipmi_dell_chassis_detect() + * Dell systems with IPMI < 1.5 don't set the chassis capability bit + * but they can handle a chassis poweroff or powercycle command. + */ + +#define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00} +static int ipmi_dell_chassis_detect(ipmi_user_t user) +{ + const char ipmi_version_major = ipmi_version & 0xF; + const char ipmi_version_minor = (ipmi_version >> 4) & 0xF; + const char mfr[3] = DELL_IANA_MFR_ID; + if (!memcmp(mfr, &mfg_id, sizeof(mfr)) && + ipmi_version_major <= 1 && + ipmi_version_minor < 5) + return 1; + return 0; +} + +/* + * Standard chassis support + */ + +#define IPMI_NETFN_CHASSIS_REQUEST 0 +#define IPMI_CHASSIS_CONTROL_CMD 0x02 + +static int ipmi_chassis_detect(ipmi_user_t user) +{ + /* Chassis support, use it. */ + return (capabilities & 0x80); +} + +static void ipmi_poweroff_chassis(ipmi_user_t user) +{ + struct ipmi_system_interface_addr smi_addr; + struct kernel_ipmi_msg send_msg; + int rv; + unsigned char data[1]; + + /* + * Configure IPMI address for local access + */ + smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; + smi_addr.channel = IPMI_BMC_CHANNEL; + smi_addr.lun = 0; + + powercyclefailed: + printk(KERN_INFO PFX "Powering %s via IPMI chassis control command\n", + (poweroff_powercycle ? "cycle" : "down")); + + /* + * Power down + */ + send_msg.netfn = IPMI_NETFN_CHASSIS_REQUEST; + send_msg.cmd = IPMI_CHASSIS_CONTROL_CMD; + if (poweroff_powercycle) + data[0] = IPMI_CHASSIS_POWER_CYCLE; + else + data[0] = IPMI_CHASSIS_POWER_DOWN; + send_msg.data = data; + send_msg.data_len = sizeof(data); + rv = ipmi_request_in_rc_mode(user, + (struct ipmi_addr *) &smi_addr, + &send_msg); + if (rv) { + if (poweroff_powercycle) { + /* power cycle failed, default to power down */ + printk(KERN_ERR PFX "Unable to send chassis power " \ + "cycle message, IPMI error 0x%x\n", rv); + poweroff_powercycle = 0; + goto powercyclefailed; + } + + printk(KERN_ERR PFX "Unable to send chassis power " \ + "down message, IPMI error 0x%x\n", rv); + } +} + + +/* Table of possible power off functions. */ +struct poweroff_function { + char *platform_type; + int (*detect)(ipmi_user_t user); + void (*poweroff_func)(ipmi_user_t user); +}; + +static struct poweroff_function poweroff_functions[] = { + { .platform_type = "ATCA", + .detect = ipmi_atca_detect, + .poweroff_func = ipmi_poweroff_atca }, + { .platform_type = "CPI1", + .detect = ipmi_cpi1_detect, + .poweroff_func = ipmi_poweroff_cpi1 }, + { .platform_type = "chassis", + .detect = ipmi_dell_chassis_detect, + .poweroff_func = ipmi_poweroff_chassis }, + /* Chassis should generally be last, other things should override + it. */ + { .platform_type = "chassis", + .detect = ipmi_chassis_detect, + .poweroff_func = ipmi_poweroff_chassis }, +}; +#define NUM_PO_FUNCS (sizeof(poweroff_functions) \ + / sizeof(struct poweroff_function)) + + +/* Called on a powerdown request. */ +static void ipmi_poweroff_function(void) +{ + if (!ready) + return; + + /* Use run-to-completion mode, since interrupts may be off. */ + specific_poweroff_func(ipmi_user); +} + +/* Wait for an IPMI interface to be installed, the first one installed + will be grabbed by this code and used to perform the powerdown. */ +static void ipmi_po_new_smi(int if_num, struct device *device) +{ + struct ipmi_system_interface_addr smi_addr; + struct kernel_ipmi_msg send_msg; + int rv; + int i; + + if (ready) + return; + + if ((ifnum_to_use >= 0) && (ifnum_to_use != if_num)) + return; + + rv = ipmi_create_user(if_num, &ipmi_poweroff_handler, NULL, + &ipmi_user); + if (rv) { + printk(KERN_ERR PFX "could not create IPMI user, error %d\n", + rv); + return; + } + + ipmi_ifnum = if_num; + + /* + * Do a get device ide and store some results, since this is + * used by several functions. + */ + smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; + smi_addr.channel = IPMI_BMC_CHANNEL; + smi_addr.lun = 0; + + send_msg.netfn = IPMI_NETFN_APP_REQUEST; + send_msg.cmd = IPMI_GET_DEVICE_ID_CMD; + send_msg.data = NULL; + send_msg.data_len = 0; + rv = ipmi_request_wait_for_response(ipmi_user, + (struct ipmi_addr *) &smi_addr, + &send_msg); + if (rv) { + printk(KERN_ERR PFX "Unable to send IPMI get device id info," + " IPMI error 0x%x\n", rv); + goto out_err; + } + + if (halt_recv_msg.msg.data_len < 12) { + printk(KERN_ERR PFX "(chassis) IPMI get device id info too," + " short, was %d bytes, needed %d bytes\n", + halt_recv_msg.msg.data_len, 12); + goto out_err; + } + + mfg_id = (halt_recv_msg.msg.data[7] + | (halt_recv_msg.msg.data[8] << 8) + | (halt_recv_msg.msg.data[9] << 16)); + prod_id = (halt_recv_msg.msg.data[10] + | (halt_recv_msg.msg.data[11] << 8)); + capabilities = halt_recv_msg.msg.data[6]; + ipmi_version = halt_recv_msg.msg.data[5]; + + + /* Scan for a poweroff method */ + for (i = 0; i < NUM_PO_FUNCS; i++) { + if (poweroff_functions[i].detect(ipmi_user)) + goto found; + } + + out_err: + printk(KERN_ERR PFX "Unable to find a poweroff function that" + " will work, giving up\n"); + ipmi_destroy_user(ipmi_user); + return; + + found: + printk(KERN_INFO PFX "Found a %s style poweroff function\n", + poweroff_functions[i].platform_type); + specific_poweroff_func = poweroff_functions[i].poweroff_func; + old_poweroff_func = pm_power_off; + pm_power_off = ipmi_poweroff_function; + ready = 1; +} + +static void ipmi_po_smi_gone(int if_num) +{ + if (!ready) + return; + + if (ipmi_ifnum != if_num) + return; + + ready = 0; + ipmi_destroy_user(ipmi_user); + pm_power_off = old_poweroff_func; +} + +static struct ipmi_smi_watcher smi_watcher = { + .owner = THIS_MODULE, + .new_smi = ipmi_po_new_smi, + .smi_gone = ipmi_po_smi_gone +}; + + +#ifdef CONFIG_PROC_FS +#include + +static struct ctl_table ipmi_table[] = { + { .procname = "poweroff_powercycle", + .data = &poweroff_powercycle, + .maxlen = sizeof(poweroff_powercycle), + .mode = 0644, + .proc_handler = proc_dointvec }, + { } +}; + +static struct ctl_table ipmi_dir_table[] = { + { .procname = "ipmi", + .mode = 0555, + .child = ipmi_table }, + { } +}; + +static struct ctl_table ipmi_root_table[] = { + { .procname = "dev", + .mode = 0555, + .child = ipmi_dir_table }, + { } +}; + +static struct ctl_table_header *ipmi_table_header; +#endif /* CONFIG_PROC_FS */ + +/* + * Startup and shutdown functions. + */ +static int __init ipmi_poweroff_init(void) +{ + int rv; + + printk(KERN_INFO "Copyright (C) 2004 MontaVista Software -" + " IPMI Powerdown via sys_reboot.\n"); + + if (poweroff_powercycle) + printk(KERN_INFO PFX "Power cycle is enabled.\n"); + +#ifdef CONFIG_PROC_FS + ipmi_table_header = register_sysctl_table(ipmi_root_table); + if (!ipmi_table_header) { + printk(KERN_ERR PFX "Unable to register powercycle sysctl\n"); + rv = -ENOMEM; + goto out_err; + } +#endif + + rv = ipmi_smi_watcher_register(&smi_watcher); + +#ifdef CONFIG_PROC_FS + if (rv) { + unregister_sysctl_table(ipmi_table_header); + printk(KERN_ERR PFX "Unable to register SMI watcher: %d\n", rv); + goto out_err; + } + + out_err: +#endif + return rv; +} + +#ifdef MODULE +static void __exit ipmi_poweroff_cleanup(void) +{ + int rv; + +#ifdef CONFIG_PROC_FS + unregister_sysctl_table(ipmi_table_header); +#endif + + ipmi_smi_watcher_unregister(&smi_watcher); + + if (ready) { + rv = ipmi_destroy_user(ipmi_user); + if (rv) + printk(KERN_ERR PFX "could not cleanup the IPMI" + " user: 0x%x\n", rv); + pm_power_off = old_poweroff_func; + } +} +module_exit(ipmi_poweroff_cleanup); +#endif + +module_init(ipmi_poweroff_init); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Corey Minyard "); +MODULE_DESCRIPTION("IPMI Poweroff extension to sys_reboot"); diff --git a/kernel/drivers/char/ipmi/ipmi_si_intf.c b/kernel/drivers/char/ipmi/ipmi_si_intf.c new file mode 100644 index 000000000..8a45e92ff --- /dev/null +++ b/kernel/drivers/char/ipmi/ipmi_si_intf.c @@ -0,0 +1,3874 @@ +/* + * ipmi_si.c + * + * The interface to the IPMI driver for the system interfaces (KCS, SMIC, + * BT). + * + * Author: MontaVista Software, Inc. + * Corey Minyard + * source@mvista.com + * + * Copyright 2002 MontaVista Software Inc. + * Copyright 2006 IBM Corp., Christian Krafft + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* + * This file holds the "policy" for the interface to the SMI state + * machine. It does the configuration, handles timers and interrupts, + * and drives the real SMI state machine. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipmi_si_sm.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_PARISC +#include /* for register_parisc_driver() stuff */ +#include +#endif + +#define PFX "ipmi_si: " + +/* Measure times between events in the driver. */ +#undef DEBUG_TIMING + +/* Call every 10 ms. */ +#define SI_TIMEOUT_TIME_USEC 10000 +#define SI_USEC_PER_JIFFY (1000000/HZ) +#define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY) +#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a + short timeout */ + +enum si_intf_state { + SI_NORMAL, + SI_GETTING_FLAGS, + SI_GETTING_EVENTS, + SI_CLEARING_FLAGS, + SI_GETTING_MESSAGES, + SI_CHECKING_ENABLES, + SI_SETTING_ENABLES + /* FIXME - add watchdog stuff. */ +}; + +/* Some BT-specific defines we need here. */ +#define IPMI_BT_INTMASK_REG 2 +#define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2 +#define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1 + +enum si_type { + SI_KCS, SI_SMIC, SI_BT +}; +static char *si_to_str[] = { "kcs", "smic", "bt" }; + +#define DEVICE_NAME "ipmi_si" + +static struct platform_driver ipmi_driver; + +/* + * Indexes into stats[] in smi_info below. + */ +enum si_stat_indexes { + /* + * Number of times the driver requested a timer while an operation + * was in progress. + */ + SI_STAT_short_timeouts = 0, + + /* + * Number of times the driver requested a timer while nothing was in + * progress. + */ + SI_STAT_long_timeouts, + + /* Number of times the interface was idle while being polled. */ + SI_STAT_idles, + + /* Number of interrupts the driver handled. */ + SI_STAT_interrupts, + + /* Number of time the driver got an ATTN from the hardware. */ + SI_STAT_attentions, + + /* Number of times the driver requested flags from the hardware. */ + SI_STAT_flag_fetches, + + /* Number of times the hardware didn't follow the state machine. */ + SI_STAT_hosed_count, + + /* Number of completed messages. */ + SI_STAT_complete_transactions, + + /* Number of IPMI events received from the hardware. */ + SI_STAT_events, + + /* Number of watchdog pretimeouts. */ + SI_STAT_watchdog_pretimeouts, + + /* Number of asynchronous messages received. */ + SI_STAT_incoming_messages, + + + /* This *must* remain last, add new values above this. */ + SI_NUM_STATS +}; + +struct smi_info { + int intf_num; + ipmi_smi_t intf; + struct si_sm_data *si_sm; + struct si_sm_handlers *handlers; + enum si_type si_type; + spinlock_t si_lock; + struct ipmi_smi_msg *waiting_msg; + struct ipmi_smi_msg *curr_msg; + enum si_intf_state si_state; + + /* + * Used to handle the various types of I/O that can occur with + * IPMI + */ + struct si_sm_io io; + int (*io_setup)(struct smi_info *info); + void (*io_cleanup)(struct smi_info *info); + int (*irq_setup)(struct smi_info *info); + void (*irq_cleanup)(struct smi_info *info); + unsigned int io_size; + enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */ + void (*addr_source_cleanup)(struct smi_info *info); + void *addr_source_data; + + /* + * Per-OEM handler, called from handle_flags(). Returns 1 + * when handle_flags() needs to be re-run or 0 indicating it + * set si_state itself. + */ + int (*oem_data_avail_handler)(struct smi_info *smi_info); + + /* + * Flags from the last GET_MSG_FLAGS command, used when an ATTN + * is set to hold the flags until we are done handling everything + * from the flags. + */ +#define RECEIVE_MSG_AVAIL 0x01 +#define EVENT_MSG_BUFFER_FULL 0x02 +#define WDT_PRE_TIMEOUT_INT 0x08 +#define OEM0_DATA_AVAIL 0x20 +#define OEM1_DATA_AVAIL 0x40 +#define OEM2_DATA_AVAIL 0x80 +#define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \ + OEM1_DATA_AVAIL | \ + OEM2_DATA_AVAIL) + unsigned char msg_flags; + + /* Does the BMC have an event buffer? */ + bool has_event_buffer; + + /* + * If set to true, this will request events the next time the + * state machine is idle. + */ + atomic_t req_events; + + /* + * If true, run the state machine to completion on every send + * call. Generally used after a panic to make sure stuff goes + * out. + */ + bool run_to_completion; + + /* The I/O port of an SI interface. */ + int port; + + /* + * The space between start addresses of the two ports. For + * instance, if the first port is 0xca2 and the spacing is 4, then + * the second port is 0xca6. + */ + unsigned int spacing; + + /* zero if no irq; */ + int irq; + + /* The timer for this si. */ + struct timer_list si_timer; + + /* This flag is set, if the timer is running (timer_pending() isn't enough) */ + bool timer_running; + + /* The time (in jiffies) the last timeout occurred at. */ + unsigned long last_timeout_jiffies; + + /* Are we waiting for the events, pretimeouts, received msgs? */ + atomic_t need_watch; + + /* + * The driver will disable interrupts when it gets into a + * situation where it cannot handle messages due to lack of + * memory. Once that situation clears up, it will re-enable + * interrupts. + */ + bool interrupt_disabled; + + /* + * Does the BMC support events? + */ + bool supports_event_msg_buff; + + /* + * Can we clear the global enables receive irq bit? + */ + bool cannot_clear_recv_irq_bit; + + /* + * Did we get an attention that we did not handle? + */ + bool got_attn; + + /* From the get device id response... */ + struct ipmi_device_id device_id; + + /* Driver model stuff. */ + struct device *dev; + struct platform_device *pdev; + + /* + * True if we allocated the device, false if it came from + * someplace else (like PCI). + */ + bool dev_registered; + + /* Slave address, could be reported from DMI. */ + unsigned char slave_addr; + + /* Counters and things for the proc filesystem. */ + atomic_t stats[SI_NUM_STATS]; + + struct task_struct *thread; + + struct list_head link; + union ipmi_smi_info_union addr_info; +}; + +#define smi_inc_stat(smi, stat) \ + atomic_inc(&(smi)->stats[SI_STAT_ ## stat]) +#define smi_get_stat(smi, stat) \ + ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat])) + +#define SI_MAX_PARMS 4 + +static int force_kipmid[SI_MAX_PARMS]; +static int num_force_kipmid; +#ifdef CONFIG_PCI +static bool pci_registered; +#endif +#ifdef CONFIG_ACPI +static bool pnp_registered; +#endif +#ifdef CONFIG_PARISC +static bool parisc_registered; +#endif + +static unsigned int kipmid_max_busy_us[SI_MAX_PARMS]; +static int num_max_busy_us; + +static bool unload_when_empty = true; + +static int add_smi(struct smi_info *smi); +static int try_smi_init(struct smi_info *smi); +static void cleanup_one_si(struct smi_info *to_clean); +static void cleanup_ipmi_si(void); + +#ifdef DEBUG_TIMING +void debug_timestamp(char *msg) +{ + struct timespec64 t; + + getnstimeofday64(&t); + pr_debug("**%s: %lld.%9.9ld\n", msg, (long long) t.tv_sec, t.tv_nsec); +} +#else +#define debug_timestamp(x) +#endif + +static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list); +static int register_xaction_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&xaction_notifier_list, nb); +} + +static void deliver_recv_msg(struct smi_info *smi_info, + struct ipmi_smi_msg *msg) +{ + /* Deliver the message to the upper layer. */ + if (smi_info->intf) + ipmi_smi_msg_received(smi_info->intf, msg); + else + ipmi_free_smi_msg(msg); +} + +static void return_hosed_msg(struct smi_info *smi_info, int cCode) +{ + struct ipmi_smi_msg *msg = smi_info->curr_msg; + + if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED) + cCode = IPMI_ERR_UNSPECIFIED; + /* else use it as is */ + + /* Make it a response */ + msg->rsp[0] = msg->data[0] | 4; + msg->rsp[1] = msg->data[1]; + msg->rsp[2] = cCode; + msg->rsp_size = 3; + + smi_info->curr_msg = NULL; + deliver_recv_msg(smi_info, msg); +} + +static enum si_sm_result start_next_msg(struct smi_info *smi_info) +{ + int rv; + + if (!smi_info->waiting_msg) { + smi_info->curr_msg = NULL; + rv = SI_SM_IDLE; + } else { + int err; + + smi_info->curr_msg = smi_info->waiting_msg; + smi_info->waiting_msg = NULL; + debug_timestamp("Start2"); + err = atomic_notifier_call_chain(&xaction_notifier_list, + 0, smi_info); + if (err & NOTIFY_STOP_MASK) { + rv = SI_SM_CALL_WITHOUT_DELAY; + goto out; + } + err = smi_info->handlers->start_transaction( + smi_info->si_sm, + smi_info->curr_msg->data, + smi_info->curr_msg->data_size); + if (err) + return_hosed_msg(smi_info, err); + + rv = SI_SM_CALL_WITHOUT_DELAY; + } + out: + return rv; +} + +static void start_check_enables(struct smi_info *smi_info) +{ + unsigned char msg[2]; + + msg[0] = (IPMI_NETFN_APP_REQUEST << 2); + msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; + + smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); + smi_info->si_state = SI_CHECKING_ENABLES; +} + +static void start_clear_flags(struct smi_info *smi_info) +{ + unsigned char msg[3]; + + /* Make sure the watchdog pre-timeout flag is not set at startup. */ + msg[0] = (IPMI_NETFN_APP_REQUEST << 2); + msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; + msg[2] = WDT_PRE_TIMEOUT_INT; + + smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); + smi_info->si_state = SI_CLEARING_FLAGS; +} + +static void start_getting_msg_queue(struct smi_info *smi_info) +{ + smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); + smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD; + smi_info->curr_msg->data_size = 2; + + smi_info->handlers->start_transaction( + smi_info->si_sm, + smi_info->curr_msg->data, + smi_info->curr_msg->data_size); + smi_info->si_state = SI_GETTING_MESSAGES; +} + +static void start_getting_events(struct smi_info *smi_info) +{ + smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); + smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; + smi_info->curr_msg->data_size = 2; + + smi_info->handlers->start_transaction( + smi_info->si_sm, + smi_info->curr_msg->data, + smi_info->curr_msg->data_size); + smi_info->si_state = SI_GETTING_EVENTS; +} + +static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) +{ + smi_info->last_timeout_jiffies = jiffies; + mod_timer(&smi_info->si_timer, new_val); + smi_info->timer_running = true; +} + +/* + * When we have a situtaion where we run out of memory and cannot + * allocate messages, we just leave them in the BMC and run the system + * polled until we can allocate some memory. Once we have some + * memory, we will re-enable the interrupt. + * + * Note that we cannot just use disable_irq(), since the interrupt may + * be shared. + */ +static inline bool disable_si_irq(struct smi_info *smi_info) +{ + if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { + smi_info->interrupt_disabled = true; + start_check_enables(smi_info); + return true; + } + return false; +} + +static inline bool enable_si_irq(struct smi_info *smi_info) +{ + if ((smi_info->irq) && (smi_info->interrupt_disabled)) { + smi_info->interrupt_disabled = false; + start_check_enables(smi_info); + return true; + } + return false; +} + +/* + * Allocate a message. If unable to allocate, start the interrupt + * disable process and return NULL. If able to allocate but + * interrupts are disabled, free the message and return NULL after + * starting the interrupt enable process. + */ +static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info) +{ + struct ipmi_smi_msg *msg; + + msg = ipmi_alloc_smi_msg(); + if (!msg) { + if (!disable_si_irq(smi_info)) + smi_info->si_state = SI_NORMAL; + } else if (enable_si_irq(smi_info)) { + ipmi_free_smi_msg(msg); + msg = NULL; + } + return msg; +} + +static void handle_flags(struct smi_info *smi_info) +{ + retry: + if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) { + /* Watchdog pre-timeout */ + smi_inc_stat(smi_info, watchdog_pretimeouts); + + start_clear_flags(smi_info); + smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; + if (smi_info->intf) + ipmi_smi_watchdog_pretimeout(smi_info->intf); + } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { + /* Messages available. */ + smi_info->curr_msg = alloc_msg_handle_irq(smi_info); + if (!smi_info->curr_msg) + return; + + start_getting_msg_queue(smi_info); + } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) { + /* Events available. */ + smi_info->curr_msg = alloc_msg_handle_irq(smi_info); + if (!smi_info->curr_msg) + return; + + start_getting_events(smi_info); + } else if (smi_info->msg_flags & OEM_DATA_AVAIL && + smi_info->oem_data_avail_handler) { + if (smi_info->oem_data_avail_handler(smi_info)) + goto retry; + } else + smi_info->si_state = SI_NORMAL; +} + +/* + * Global enables we care about. + */ +#define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \ + IPMI_BMC_EVT_MSG_INTR) + +static u8 current_global_enables(struct smi_info *smi_info, u8 base, + bool *irq_on) +{ + u8 enables = 0; + + if (smi_info->supports_event_msg_buff) + enables |= IPMI_BMC_EVT_MSG_BUFF; + + if ((smi_info->irq && !smi_info->interrupt_disabled) || + smi_info->cannot_clear_recv_irq_bit) + enables |= IPMI_BMC_RCV_MSG_INTR; + + if (smi_info->supports_event_msg_buff && + smi_info->irq && !smi_info->interrupt_disabled) + + enables |= IPMI_BMC_EVT_MSG_INTR; + + *irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR); + + return enables; +} + +static void check_bt_irq(struct smi_info *smi_info, bool irq_on) +{ + u8 irqstate = smi_info->io.inputb(&smi_info->io, IPMI_BT_INTMASK_REG); + + irqstate &= IPMI_BT_INTMASK_ENABLE_IRQ_BIT; + + if ((bool)irqstate == irq_on) + return; + + if (irq_on) + smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, + IPMI_BT_INTMASK_ENABLE_IRQ_BIT); + else + smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 0); +} + +static void handle_transaction_done(struct smi_info *smi_info) +{ + struct ipmi_smi_msg *msg; + + debug_timestamp("Done"); + switch (smi_info->si_state) { + case SI_NORMAL: + if (!smi_info->curr_msg) + break; + + smi_info->curr_msg->rsp_size + = smi_info->handlers->get_result( + smi_info->si_sm, + smi_info->curr_msg->rsp, + IPMI_MAX_MSG_LENGTH); + + /* + * Do this here becase deliver_recv_msg() releases the + * lock, and a new message can be put in during the + * time the lock is released. + */ + msg = smi_info->curr_msg; + smi_info->curr_msg = NULL; + deliver_recv_msg(smi_info, msg); + break; + + case SI_GETTING_FLAGS: + { + unsigned char msg[4]; + unsigned int len; + + /* We got the flags from the SMI, now handle them. */ + len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4); + if (msg[2] != 0) { + /* Error fetching flags, just give up for now. */ + smi_info->si_state = SI_NORMAL; + } else if (len < 4) { + /* + * Hmm, no flags. That's technically illegal, but + * don't use uninitialized data. + */ + smi_info->si_state = SI_NORMAL; + } else { + smi_info->msg_flags = msg[3]; + handle_flags(smi_info); + } + break; + } + + case SI_CLEARING_FLAGS: + { + unsigned char msg[3]; + + /* We cleared the flags. */ + smi_info->handlers->get_result(smi_info->si_sm, msg, 3); + if (msg[2] != 0) { + /* Error clearing flags */ + dev_warn(smi_info->dev, + "Error clearing flags: %2.2x\n", msg[2]); + } + smi_info->si_state = SI_NORMAL; + break; + } + + case SI_GETTING_EVENTS: + { + smi_info->curr_msg->rsp_size + = smi_info->handlers->get_result( + smi_info->si_sm, + smi_info->curr_msg->rsp, + IPMI_MAX_MSG_LENGTH); + + /* + * Do this here becase deliver_recv_msg() releases the + * lock, and a new message can be put in during the + * time the lock is released. + */ + msg = smi_info->curr_msg; + smi_info->curr_msg = NULL; + if (msg->rsp[2] != 0) { + /* Error getting event, probably done. */ + msg->done(msg); + + /* Take off the event flag. */ + smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL; + handle_flags(smi_info); + } else { + smi_inc_stat(smi_info, events); + + /* + * Do this before we deliver the message + * because delivering the message releases the + * lock and something else can mess with the + * state. + */ + handle_flags(smi_info); + + deliver_recv_msg(smi_info, msg); + } + break; + } + + case SI_GETTING_MESSAGES: + { + smi_info->curr_msg->rsp_size + = smi_info->handlers->get_result( + smi_info->si_sm, + smi_info->curr_msg->rsp, + IPMI_MAX_MSG_LENGTH); + + /* + * Do this here becase deliver_recv_msg() releases the + * lock, and a new message can be put in during the + * time the lock is released. + */ + msg = smi_info->curr_msg; + smi_info->curr_msg = NULL; + if (msg->rsp[2] != 0) { + /* Error getting event, probably done. */ + msg->done(msg); + + /* Take off the msg flag. */ + smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL; + handle_flags(smi_info); + } else { + smi_inc_stat(smi_info, incoming_messages); + + /* + * Do this before we deliver the message + * because delivering the message releases the + * lock and something else can mess with the + * state. + */ + handle_flags(smi_info); + + deliver_recv_msg(smi_info, msg); + } + break; + } + + case SI_CHECKING_ENABLES: + { + unsigned char msg[4]; + u8 enables; + bool irq_on; + + /* We got the flags from the SMI, now handle them. */ + smi_info->handlers->get_result(smi_info->si_sm, msg, 4); + if (msg[2] != 0) { + dev_warn(smi_info->dev, + "Couldn't get irq info: %x.\n", msg[2]); + dev_warn(smi_info->dev, + "Maybe ok, but ipmi might run very slowly.\n"); + smi_info->si_state = SI_NORMAL; + break; + } + enables = current_global_enables(smi_info, 0, &irq_on); + if (smi_info->si_type == SI_BT) + /* BT has its own interrupt enable bit. */ + check_bt_irq(smi_info, irq_on); + if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) { + /* Enables are not correct, fix them. */ + msg[0] = (IPMI_NETFN_APP_REQUEST << 2); + msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; + msg[2] = enables | (msg[3] & ~GLOBAL_ENABLES_MASK); + smi_info->handlers->start_transaction( + smi_info->si_sm, msg, 3); + smi_info->si_state = SI_SETTING_ENABLES; + } else if (smi_info->supports_event_msg_buff) { + smi_info->curr_msg = ipmi_alloc_smi_msg(); + if (!smi_info->curr_msg) { + smi_info->si_state = SI_NORMAL; + break; + } + start_getting_msg_queue(smi_info); + } else { + smi_info->si_state = SI_NORMAL; + } + break; + } + + case SI_SETTING_ENABLES: + { + unsigned char msg[4]; + + smi_info->handlers->get_result(smi_info->si_sm, msg, 4); + if (msg[2] != 0) + dev_warn(smi_info->dev, + "Could not set the global enables: 0x%x.\n", + msg[2]); + + if (smi_info->supports_event_msg_buff) { + smi_info->curr_msg = ipmi_alloc_smi_msg(); + if (!smi_info->curr_msg) { + smi_info->si_state = SI_NORMAL; + break; + } + start_getting_msg_queue(smi_info); + } else { + smi_info->si_state = SI_NORMAL; + } + break; + } + } +} + +/* + * Called on timeouts and events. Timeouts should pass the elapsed + * time, interrupts should pass in zero. Must be called with + * si_lock held and interrupts disabled. + */ +static enum si_sm_result smi_event_handler(struct smi_info *smi_info, + int time) +{ + enum si_sm_result si_sm_result; + + restart: + /* + * There used to be a loop here that waited a little while + * (around 25us) before giving up. That turned out to be + * pointless, the minimum delays I was seeing were in the 300us + * range, which is far too long to wait in an interrupt. So + * we just run until the state machine tells us something + * happened or it needs a delay. + */ + si_sm_result = smi_info->handlers->event(smi_info->si_sm, time); + time = 0; + while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY) + si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); + + if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) { + smi_inc_stat(smi_info, complete_transactions); + + handle_transaction_done(smi_info); + si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); + } else if (si_sm_result == SI_SM_HOSED) { + smi_inc_stat(smi_info, hosed_count); + + /* + * Do the before return_hosed_msg, because that + * releases the lock. + */ + smi_info->si_state = SI_NORMAL; + if (smi_info->curr_msg != NULL) { + /* + * If we were handling a user message, format + * a response to send to the upper layer to + * tell it about the error. + */ + return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED); + } + si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); + } + + /* + * We prefer handling attn over new messages. But don't do + * this if there is not yet an upper layer to handle anything. + */ + if (likely(smi_info->intf) && + (si_sm_result == SI_SM_ATTN || smi_info->got_attn)) { + unsigned char msg[2]; + + if (smi_info->si_state != SI_NORMAL) { + /* + * We got an ATTN, but we are doing something else. + * Handle the ATTN later. + */ + smi_info->got_attn = true; + } else { + smi_info->got_attn = false; + smi_inc_stat(smi_info, attentions); + + /* + * Got a attn, send down a get message flags to see + * what's causing it. It would be better to handle + * this in the upper layer, but due to the way + * interrupts work with the SMI, that's not really + * possible. + */ + msg[0] = (IPMI_NETFN_APP_REQUEST << 2); + msg[1] = IPMI_GET_MSG_FLAGS_CMD; + + smi_info->handlers->start_transaction( + smi_info->si_sm, msg, 2); + smi_info->si_state = SI_GETTING_FLAGS; + goto restart; + } + } + + /* If we are currently idle, try to start the next message. */ + if (si_sm_result == SI_SM_IDLE) { + smi_inc_stat(smi_info, idles); + + si_sm_result = start_next_msg(smi_info); + if (si_sm_result != SI_SM_IDLE) + goto restart; + } + + if ((si_sm_result == SI_SM_IDLE) + && (atomic_read(&smi_info->req_events))) { + /* + * We are idle and the upper layer requested that I fetch + * events, so do so. + */ + atomic_set(&smi_info->req_events, 0); + + /* + * Take this opportunity to check the interrupt and + * message enable state for the BMC. The BMC can be + * asynchronously reset, and may thus get interrupts + * disable and messages disabled. + */ + if (smi_info->supports_event_msg_buff || smi_info->irq) { + start_check_enables(smi_info); + } else { + smi_info->curr_msg = alloc_msg_handle_irq(smi_info); + if (!smi_info->curr_msg) + goto out; + + start_getting_events(smi_info); + } + goto restart; + } + out: + return si_sm_result; +} + +static void check_start_timer_thread(struct smi_info *smi_info) +{ + if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) { + smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); + + if (smi_info->thread) + wake_up_process(smi_info->thread); + + start_next_msg(smi_info); + smi_event_handler(smi_info, 0); + } +} + +static void sender(void *send_info, + struct ipmi_smi_msg *msg) +{ + struct smi_info *smi_info = send_info; + enum si_sm_result result; + unsigned long flags; + + debug_timestamp("Enqueue"); + + if (smi_info->run_to_completion) { + /* + * If we are running to completion, start it and run + * transactions until everything is clear. + */ + smi_info->waiting_msg = msg; + + /* + * Run to completion means we are single-threaded, no + * need for locks. + */ + + result = smi_event_handler(smi_info, 0); + while (result != SI_SM_IDLE) { + udelay(SI_SHORT_TIMEOUT_USEC); + result = smi_event_handler(smi_info, + SI_SHORT_TIMEOUT_USEC); + } + return; + } + + spin_lock_irqsave(&smi_info->si_lock, flags); + /* + * The following two lines don't need to be under the lock for + * the lock's sake, but they do need SMP memory barriers to + * avoid getting things out of order. We are already claiming + * the lock, anyway, so just do it under the lock to avoid the + * ordering problem. + */ + BUG_ON(smi_info->waiting_msg); + smi_info->waiting_msg = msg; + check_start_timer_thread(smi_info); + spin_unlock_irqrestore(&smi_info->si_lock, flags); +} + +static void set_run_to_completion(void *send_info, bool i_run_to_completion) +{ + struct smi_info *smi_info = send_info; + enum si_sm_result result; + + smi_info->run_to_completion = i_run_to_completion; + if (i_run_to_completion) { + result = smi_event_handler(smi_info, 0); + while (result != SI_SM_IDLE) { + udelay(SI_SHORT_TIMEOUT_USEC); + result = smi_event_handler(smi_info, + SI_SHORT_TIMEOUT_USEC); + } + } +} + +/* + * Use -1 in the nsec value of the busy waiting timespec to tell that + * we are spinning in kipmid looking for something and not delaying + * between checks + */ +static inline void ipmi_si_set_not_busy(struct timespec64 *ts) +{ + ts->tv_nsec = -1; +} +static inline int ipmi_si_is_busy(struct timespec64 *ts) +{ + return ts->tv_nsec != -1; +} + +static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result, + const struct smi_info *smi_info, + struct timespec64 *busy_until) +{ + unsigned int max_busy_us = 0; + + if (smi_info->intf_num < num_max_busy_us) + max_busy_us = kipmid_max_busy_us[smi_info->intf_num]; + if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY) + ipmi_si_set_not_busy(busy_until); + else if (!ipmi_si_is_busy(busy_until)) { + getnstimeofday64(busy_until); + timespec64_add_ns(busy_until, max_busy_us*NSEC_PER_USEC); + } else { + struct timespec64 now; + + getnstimeofday64(&now); + if (unlikely(timespec64_compare(&now, busy_until) > 0)) { + ipmi_si_set_not_busy(busy_until); + return 0; + } + } + return 1; +} + + +/* + * A busy-waiting loop for speeding up IPMI operation. + * + * Lousy hardware makes this hard. This is only enabled for systems + * that are not BT and do not have interrupts. It starts spinning + * when an operation is complete or until max_busy tells it to stop + * (if that is enabled). See the paragraph on kimid_max_busy_us in + * Documentation/IPMI.txt for details. + */ +static int ipmi_thread(void *data) +{ + struct smi_info *smi_info = data; + unsigned long flags; + enum si_sm_result smi_result; + struct timespec64 busy_until; + + ipmi_si_set_not_busy(&busy_until); + set_user_nice(current, MAX_NICE); + while (!kthread_should_stop()) { + int busy_wait; + + spin_lock_irqsave(&(smi_info->si_lock), flags); + smi_result = smi_event_handler(smi_info, 0); + + /* + * If the driver is doing something, there is a possible + * race with the timer. If the timer handler see idle, + * and the thread here sees something else, the timer + * handler won't restart the timer even though it is + * required. So start it here if necessary. + */ + if (smi_result != SI_SM_IDLE && !smi_info->timer_running) + smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); + + spin_unlock_irqrestore(&(smi_info->si_lock), flags); + busy_wait = ipmi_thread_busy_wait(smi_result, smi_info, + &busy_until); + if (smi_result == SI_SM_CALL_WITHOUT_DELAY) + ; /* do nothing */ + else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) + schedule(); + else if (smi_result == SI_SM_IDLE) { + if (atomic_read(&smi_info->need_watch)) { + schedule_timeout_interruptible(100); + } else { + /* Wait to be woken up when we are needed. */ + __set_current_state(TASK_INTERRUPTIBLE); + schedule(); + } + } else + schedule_timeout_interruptible(1); + } + return 0; +} + + +static void poll(void *send_info) +{ + struct smi_info *smi_info = send_info; + unsigned long flags = 0; + bool run_to_completion = smi_info->run_to_completion; + + /* + * Make sure there is some delay in the poll loop so we can + * drive time forward and timeout things. + */ + udelay(10); + if (!run_to_completion) + spin_lock_irqsave(&smi_info->si_lock, flags); + smi_event_handler(smi_info, 10); + if (!run_to_completion) + spin_unlock_irqrestore(&smi_info->si_lock, flags); +} + +static void request_events(void *send_info) +{ + struct smi_info *smi_info = send_info; + + if (!smi_info->has_event_buffer) + return; + + atomic_set(&smi_info->req_events, 1); +} + +static void set_need_watch(void *send_info, bool enable) +{ + struct smi_info *smi_info = send_info; + unsigned long flags; + + atomic_set(&smi_info->need_watch, enable); + spin_lock_irqsave(&smi_info->si_lock, flags); + check_start_timer_thread(smi_info); + spin_unlock_irqrestore(&smi_info->si_lock, flags); +} + +static int initialized; + +static void smi_timeout(unsigned long data) +{ + struct smi_info *smi_info = (struct smi_info *) data; + enum si_sm_result smi_result; + unsigned long flags; + unsigned long jiffies_now; + long time_diff; + long timeout; + + spin_lock_irqsave(&(smi_info->si_lock), flags); + debug_timestamp("Timer"); + + jiffies_now = jiffies; + time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) + * SI_USEC_PER_JIFFY); + smi_result = smi_event_handler(smi_info, time_diff); + + if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { + /* Running with interrupts, only do long timeouts. */ + timeout = jiffies + SI_TIMEOUT_JIFFIES; + smi_inc_stat(smi_info, long_timeouts); + goto do_mod_timer; + } + + /* + * If the state machine asks for a short delay, then shorten + * the timer timeout. + */ + if (smi_result == SI_SM_CALL_WITH_DELAY) { + smi_inc_stat(smi_info, short_timeouts); + timeout = jiffies + 1; + } else { + smi_inc_stat(smi_info, long_timeouts); + timeout = jiffies + SI_TIMEOUT_JIFFIES; + } + + do_mod_timer: + if (smi_result != SI_SM_IDLE) + smi_mod_timer(smi_info, timeout); + else + smi_info->timer_running = false; + spin_unlock_irqrestore(&(smi_info->si_lock), flags); +} + +static irqreturn_t si_irq_handler(int irq, void *data) +{ + struct smi_info *smi_info = data; + unsigned long flags; + + spin_lock_irqsave(&(smi_info->si_lock), flags); + + smi_inc_stat(smi_info, interrupts); + + debug_timestamp("Interrupt"); + + smi_event_handler(smi_info, 0); + spin_unlock_irqrestore(&(smi_info->si_lock), flags); + return IRQ_HANDLED; +} + +static irqreturn_t si_bt_irq_handler(int irq, void *data) +{ + struct smi_info *smi_info = data; + /* We need to clear the IRQ flag for the BT interface. */ + smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, + IPMI_BT_INTMASK_CLEAR_IRQ_BIT + | IPMI_BT_INTMASK_ENABLE_IRQ_BIT); + return si_irq_handler(irq, data); +} + +static int smi_start_processing(void *send_info, + ipmi_smi_t intf) +{ + struct smi_info *new_smi = send_info; + int enable = 0; + + new_smi->intf = intf; + + /* Try to claim any interrupts. */ + if (new_smi->irq_setup) + new_smi->irq_setup(new_smi); + + /* Set up the timer that drives the interface. */ + setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi); + smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES); + + /* + * Check if the user forcefully enabled the daemon. + */ + if (new_smi->intf_num < num_force_kipmid) + enable = force_kipmid[new_smi->intf_num]; + /* + * The BT interface is efficient enough to not need a thread, + * and there is no need for a thread if we have interrupts. + */ + else if ((new_smi->si_type != SI_BT) && (!new_smi->irq)) + enable = 1; + + if (enable) { + new_smi->thread = kthread_run(ipmi_thread, new_smi, + "kipmi%d", new_smi->intf_num); + if (IS_ERR(new_smi->thread)) { + dev_notice(new_smi->dev, "Could not start" + " kernel thread due to error %ld, only using" + " timers to drive the interface\n", + PTR_ERR(new_smi->thread)); + new_smi->thread = NULL; + } + } + + return 0; +} + +static int get_smi_info(void *send_info, struct ipmi_smi_info *data) +{ + struct smi_info *smi = send_info; + + data->addr_src = smi->addr_source; + data->dev = smi->dev; + data->addr_info = smi->addr_info; + get_device(smi->dev); + + return 0; +} + +static void set_maintenance_mode(void *send_info, bool enable) +{ + struct smi_info *smi_info = send_info; + + if (!enable) + atomic_set(&smi_info->req_events, 0); +} + +static struct ipmi_smi_handlers handlers = { + .owner = THIS_MODULE, + .start_processing = smi_start_processing, + .get_smi_info = get_smi_info, + .sender = sender, + .request_events = request_events, + .set_need_watch = set_need_watch, + .set_maintenance_mode = set_maintenance_mode, + .set_run_to_completion = set_run_to_completion, + .poll = poll, +}; + +/* + * There can be 4 IO ports passed in (with or without IRQs), 4 addresses, + * a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS. + */ + +static LIST_HEAD(smi_infos); +static DEFINE_MUTEX(smi_infos_lock); +static int smi_num; /* Used to sequence the SMIs */ + +#define DEFAULT_REGSPACING 1 +#define DEFAULT_REGSIZE 1 + +#ifdef CONFIG_ACPI +static bool si_tryacpi = 1; +#endif +#ifdef CONFIG_DMI +static bool si_trydmi = 1; +#endif +static bool si_tryplatform = 1; +#ifdef CONFIG_PCI +static bool si_trypci = 1; +#endif +static bool si_trydefaults = IS_ENABLED(CONFIG_IPMI_SI_PROBE_DEFAULTS); +static char *si_type[SI_MAX_PARMS]; +#define MAX_SI_TYPE_STR 30 +static char si_type_str[MAX_SI_TYPE_STR]; +static unsigned long addrs[SI_MAX_PARMS]; +static unsigned int num_addrs; +static unsigned int ports[SI_MAX_PARMS]; +static unsigned int num_ports; +static int irqs[SI_MAX_PARMS]; +static unsigned int num_irqs; +static int regspacings[SI_MAX_PARMS]; +static unsigned int num_regspacings; +static int regsizes[SI_MAX_PARMS]; +static unsigned int num_regsizes; +static int regshifts[SI_MAX_PARMS]; +static unsigned int num_regshifts; +static int slave_addrs[SI_MAX_PARMS]; /* Leaving 0 chooses the default value */ +static unsigned int num_slave_addrs; + +#define IPMI_IO_ADDR_SPACE 0 +#define IPMI_MEM_ADDR_SPACE 1 +static char *addr_space_to_str[] = { "i/o", "mem" }; + +static int hotmod_handler(const char *val, struct kernel_param *kp); + +module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200); +MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See" + " Documentation/IPMI.txt in the kernel sources for the" + " gory details."); + +#ifdef CONFIG_ACPI +module_param_named(tryacpi, si_tryacpi, bool, 0); +MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the" + " default scan of the interfaces identified via ACPI"); +#endif +#ifdef CONFIG_DMI +module_param_named(trydmi, si_trydmi, bool, 0); +MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the" + " default scan of the interfaces identified via DMI"); +#endif +module_param_named(tryplatform, si_tryplatform, bool, 0); +MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the" + " default scan of the interfaces identified via platform" + " interfaces like openfirmware"); +#ifdef CONFIG_PCI +module_param_named(trypci, si_trypci, bool, 0); +MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the" + " default scan of the interfaces identified via pci"); +#endif +module_param_named(trydefaults, si_trydefaults, bool, 0); +MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the" + " default scan of the KCS and SMIC interface at the standard" + " address"); +module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0); +MODULE_PARM_DESC(type, "Defines the type of each interface, each" + " interface separated by commas. The types are 'kcs'," + " 'smic', and 'bt'. For example si_type=kcs,bt will set" + " the first interface to kcs and the second to bt"); +module_param_array(addrs, ulong, &num_addrs, 0); +MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the" + " addresses separated by commas. Only use if an interface" + " is in memory. Otherwise, set it to zero or leave" + " it blank."); +module_param_array(ports, uint, &num_ports, 0); +MODULE_PARM_DESC(ports, "Sets the port address of each interface, the" + " addresses separated by commas. Only use if an interface" + " is a port. Otherwise, set it to zero or leave" + " it blank."); +module_param_array(irqs, int, &num_irqs, 0); +MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the" + " addresses separated by commas. Only use if an interface" + " has an interrupt. Otherwise, set it to zero or leave" + " it blank."); +module_param_array(regspacings, int, &num_regspacings, 0); +MODULE_PARM_DESC(regspacings, "The number of bytes between the start address" + " and each successive register used by the interface. For" + " instance, if the start address is 0xca2 and the spacing" + " is 2, then the second address is at 0xca4. Defaults" + " to 1."); +module_param_array(regsizes, int, &num_regsizes, 0); +MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes." + " This should generally be 1, 2, 4, or 8 for an 8-bit," + " 16-bit, 32-bit, or 64-bit register. Use this if you" + " the 8-bit IPMI register has to be read from a larger" + " register."); +module_param_array(regshifts, int, &num_regshifts, 0); +MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the." + " IPMI register, in bits. For instance, if the data" + " is read from a 32-bit word and the IPMI data is in" + " bit 8-15, then the shift would be 8"); +module_param_array(slave_addrs, int, &num_slave_addrs, 0); +MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for" + " the controller. Normally this is 0x20, but can be" + " overridden by this parm. This is an array indexed" + " by interface number."); +module_param_array(force_kipmid, int, &num_force_kipmid, 0); +MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or" + " disabled(0). Normally the IPMI driver auto-detects" + " this, but the value may be overridden by this parm."); +module_param(unload_when_empty, bool, 0); +MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are" + " specified or found, default is 1. Setting to 0" + " is useful for hot add of devices using hotmod."); +module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644); +MODULE_PARM_DESC(kipmid_max_busy_us, + "Max time (in microseconds) to busy-wait for IPMI data before" + " sleeping. 0 (default) means to wait forever. Set to 100-500" + " if kipmid is using up a lot of CPU time."); + + +static void std_irq_cleanup(struct smi_info *info) +{ + if (info->si_type == SI_BT) + /* Disable the interrupt in the BT interface. */ + info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0); + free_irq(info->irq, info); +} + +static int std_irq_setup(struct smi_info *info) +{ + int rv; + + if (!info->irq) + return 0; + + if (info->si_type == SI_BT) { + rv = request_irq(info->irq, + si_bt_irq_handler, + IRQF_SHARED, + DEVICE_NAME, + info); + if (!rv) + /* Enable the interrupt in the BT interface. */ + info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, + IPMI_BT_INTMASK_ENABLE_IRQ_BIT); + } else + rv = request_irq(info->irq, + si_irq_handler, + IRQF_SHARED, + DEVICE_NAME, + info); + if (rv) { + dev_warn(info->dev, "%s unable to claim interrupt %d," + " running polled\n", + DEVICE_NAME, info->irq); + info->irq = 0; + } else { + info->irq_cleanup = std_irq_cleanup; + dev_info(info->dev, "Using irq %d\n", info->irq); + } + + return rv; +} + +static unsigned char port_inb(struct si_sm_io *io, unsigned int offset) +{ + unsigned int addr = io->addr_data; + + return inb(addr + (offset * io->regspacing)); +} + +static void port_outb(struct si_sm_io *io, unsigned int offset, + unsigned char b) +{ + unsigned int addr = io->addr_data; + + outb(b, addr + (offset * io->regspacing)); +} + +static unsigned char port_inw(struct si_sm_io *io, unsigned int offset) +{ + unsigned int addr = io->addr_data; + + return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff; +} + +static void port_outw(struct si_sm_io *io, unsigned int offset, + unsigned char b) +{ + unsigned int addr = io->addr_data; + + outw(b << io->regshift, addr + (offset * io->regspacing)); +} + +static unsigned char port_inl(struct si_sm_io *io, unsigned int offset) +{ + unsigned int addr = io->addr_data; + + return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff; +} + +static void port_outl(struct si_sm_io *io, unsigned int offset, + unsigned char b) +{ + unsigned int addr = io->addr_data; + + outl(b << io->regshift, addr+(offset * io->regspacing)); +} + +static void port_cleanup(struct smi_info *info) +{ + unsigned int addr = info->io.addr_data; + int idx; + + if (addr) { + for (idx = 0; idx < info->io_size; idx++) + release_region(addr + idx * info->io.regspacing, + info->io.regsize); + } +} + +static int port_setup(struct smi_info *info) +{ + unsigned int addr = info->io.addr_data; + int idx; + + if (!addr) + return -ENODEV; + + info->io_cleanup = port_cleanup; + + /* + * Figure out the actual inb/inw/inl/etc routine to use based + * upon the register size. + */ + switch (info->io.regsize) { + case 1: + info->io.inputb = port_inb; + info->io.outputb = port_outb; + break; + case 2: + info->io.inputb = port_inw; + info->io.outputb = port_outw; + break; + case 4: + info->io.inputb = port_inl; + info->io.outputb = port_outl; + break; + default: + dev_warn(info->dev, "Invalid register size: %d\n", + info->io.regsize); + return -EINVAL; + } + + /* + * Some BIOSes reserve disjoint I/O regions in their ACPI + * tables. This causes problems when trying to register the + * entire I/O region. Therefore we must register each I/O + * port separately. + */ + for (idx = 0; idx < info->io_size; idx++) { + if (request_region(addr + idx * info->io.regspacing, + info->io.regsize, DEVICE_NAME) == NULL) { + /* Undo allocations */ + while (idx--) { + release_region(addr + idx * info->io.regspacing, + info->io.regsize); + } + return -EIO; + } + } + return 0; +} + +static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset) +{ + return readb((io->addr)+(offset * io->regspacing)); +} + +static void intf_mem_outb(struct si_sm_io *io, unsigned int offset, + unsigned char b) +{ + writeb(b, (io->addr)+(offset * io->regspacing)); +} + +static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset) +{ + return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift) + & 0xff; +} + +static void intf_mem_outw(struct si_sm_io *io, unsigned int offset, + unsigned char b) +{ + writeb(b << io->regshift, (io->addr)+(offset * io->regspacing)); +} + +static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset) +{ + return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift) + & 0xff; +} + +static void intf_mem_outl(struct si_sm_io *io, unsigned int offset, + unsigned char b) +{ + writel(b << io->regshift, (io->addr)+(offset * io->regspacing)); +} + +#ifdef readq +static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset) +{ + return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift) + & 0xff; +} + +static void mem_outq(struct si_sm_io *io, unsigned int offset, + unsigned char b) +{ + writeq(b << io->regshift, (io->addr)+(offset * io->regspacing)); +} +#endif + +static void mem_cleanup(struct smi_info *info) +{ + unsigned long addr = info->io.addr_data; + int mapsize; + + if (info->io.addr) { + iounmap(info->io.addr); + + mapsize = ((info->io_size * info->io.regspacing) + - (info->io.regspacing - info->io.regsize)); + + release_mem_region(addr, mapsize); + } +} + +static int mem_setup(struct smi_info *info) +{ + unsigned long addr = info->io.addr_data; + int mapsize; + + if (!addr) + return -ENODEV; + + info->io_cleanup = mem_cleanup; + + /* + * Figure out the actual readb/readw/readl/etc routine to use based + * upon the register size. + */ + switch (info->io.regsize) { + case 1: + info->io.inputb = intf_mem_inb; + info->io.outputb = intf_mem_outb; + break; + case 2: + info->io.inputb = intf_mem_inw; + info->io.outputb = intf_mem_outw; + break; + case 4: + info->io.inputb = intf_mem_inl; + info->io.outputb = intf_mem_outl; + break; +#ifdef readq + case 8: + info->io.inputb = mem_inq; + info->io.outputb = mem_outq; + break; +#endif + default: + dev_warn(info->dev, "Invalid register size: %d\n", + info->io.regsize); + return -EINVAL; + } + + /* + * Calculate the total amount of memory to claim. This is an + * unusual looking calculation, but it avoids claiming any + * more memory than it has to. It will claim everything + * between the first address to the end of the last full + * register. + */ + mapsize = ((info->io_size * info->io.regspacing) + - (info->io.regspacing - info->io.regsize)); + + if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL) + return -EIO; + + info->io.addr = ioremap(addr, mapsize); + if (info->io.addr == NULL) { + release_mem_region(addr, mapsize); + return -EIO; + } + return 0; +} + +/* + * Parms come in as [:op2[:op3...]]. ops are: + * add|remove,kcs|bt|smic,mem|i/o,
[,[,[,...]]] + * Options are: + * rsp= + * rsi= + * rsh= + * irq= + * ipmb= + */ +enum hotmod_op { HM_ADD, HM_REMOVE }; +struct hotmod_vals { + char *name; + int val; +}; +static struct hotmod_vals hotmod_ops[] = { + { "add", HM_ADD }, + { "remove", HM_REMOVE }, + { NULL } +}; +static struct hotmod_vals hotmod_si[] = { + { "kcs", SI_KCS }, + { "smic", SI_SMIC }, + { "bt", SI_BT }, + { NULL } +}; +static struct hotmod_vals hotmod_as[] = { + { "mem", IPMI_MEM_ADDR_SPACE }, + { "i/o", IPMI_IO_ADDR_SPACE }, + { NULL } +}; + +static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr) +{ + char *s; + int i; + + s = strchr(*curr, ','); + if (!s) { + printk(KERN_WARNING PFX "No hotmod %s given.\n", name); + return -EINVAL; + } + *s = '\0'; + s++; + for (i = 0; v[i].name; i++) { + if (strcmp(*curr, v[i].name) == 0) { + *val = v[i].val; + *curr = s; + return 0; + } + } + + printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr); + return -EINVAL; +} + +static int check_hotmod_int_op(const char *curr, const char *option, + const char *name, int *val) +{ + char *n; + + if (strcmp(curr, name) == 0) { + if (!option) { + printk(KERN_WARNING PFX + "No option given for '%s'\n", + curr); + return -EINVAL; + } + *val = simple_strtoul(option, &n, 0); + if ((*n != '\0') || (*option == '\0')) { + printk(KERN_WARNING PFX + "Bad option given for '%s'\n", + curr); + return -EINVAL; + } + return 1; + } + return 0; +} + +static struct smi_info *smi_info_alloc(void) +{ + struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL); + + if (info) + spin_lock_init(&info->si_lock); + return info; +} + +static int hotmod_handler(const char *val, struct kernel_param *kp) +{ + char *str = kstrdup(val, GFP_KERNEL); + int rv; + char *next, *curr, *s, *n, *o; + enum hotmod_op op; + enum si_type si_type; + int addr_space; + unsigned long addr; + int regspacing; + int regsize; + int regshift; + int irq; + int ipmb; + int ival; + int len; + struct smi_info *info; + + if (!str) + return -ENOMEM; + + /* Kill any trailing spaces, as we can get a "\n" from echo. */ + len = strlen(str); + ival = len - 1; + while ((ival >= 0) && isspace(str[ival])) { + str[ival] = '\0'; + ival--; + } + + for (curr = str; curr; curr = next) { + regspacing = 1; + regsize = 1; + regshift = 0; + irq = 0; + ipmb = 0; /* Choose the default if not specified */ + + next = strchr(curr, ':'); + if (next) { + *next = '\0'; + next++; + } + + rv = parse_str(hotmod_ops, &ival, "operation", &curr); + if (rv) + break; + op = ival; + + rv = parse_str(hotmod_si, &ival, "interface type", &curr); + if (rv) + break; + si_type = ival; + + rv = parse_str(hotmod_as, &addr_space, "address space", &curr); + if (rv) + break; + + s = strchr(curr, ','); + if (s) { + *s = '\0'; + s++; + } + addr = simple_strtoul(curr, &n, 0); + if ((*n != '\0') || (*curr == '\0')) { + printk(KERN_WARNING PFX "Invalid hotmod address" + " '%s'\n", curr); + break; + } + + while (s) { + curr = s; + s = strchr(curr, ','); + if (s) { + *s = '\0'; + s++; + } + o = strchr(curr, '='); + if (o) { + *o = '\0'; + o++; + } + rv = check_hotmod_int_op(curr, o, "rsp", ®spacing); + if (rv < 0) + goto out; + else if (rv) + continue; + rv = check_hotmod_int_op(curr, o, "rsi", ®size); + if (rv < 0) + goto out; + else if (rv) + continue; + rv = check_hotmod_int_op(curr, o, "rsh", ®shift); + if (rv < 0) + goto out; + else if (rv) + continue; + rv = check_hotmod_int_op(curr, o, "irq", &irq); + if (rv < 0) + goto out; + else if (rv) + continue; + rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb); + if (rv < 0) + goto out; + else if (rv) + continue; + + rv = -EINVAL; + printk(KERN_WARNING PFX + "Invalid hotmod option '%s'\n", + curr); + goto out; + } + + if (op == HM_ADD) { + info = smi_info_alloc(); + if (!info) { + rv = -ENOMEM; + goto out; + } + + info->addr_source = SI_HOTMOD; + info->si_type = si_type; + info->io.addr_data = addr; + info->io.addr_type = addr_space; + if (addr_space == IPMI_MEM_ADDR_SPACE) + info->io_setup = mem_setup; + else + info->io_setup = port_setup; + + info->io.addr = NULL; + info->io.regspacing = regspacing; + if (!info->io.regspacing) + info->io.regspacing = DEFAULT_REGSPACING; + info->io.regsize = regsize; + if (!info->io.regsize) + info->io.regsize = DEFAULT_REGSPACING; + info->io.regshift = regshift; + info->irq = irq; + if (info->irq) + info->irq_setup = std_irq_setup; + info->slave_addr = ipmb; + + rv = add_smi(info); + if (rv) { + kfree(info); + goto out; + } + rv = try_smi_init(info); + if (rv) { + cleanup_one_si(info); + goto out; + } + } else { + /* remove */ + struct smi_info *e, *tmp_e; + + mutex_lock(&smi_infos_lock); + list_for_each_entry_safe(e, tmp_e, &smi_infos, link) { + if (e->io.addr_type != addr_space) + continue; + if (e->si_type != si_type) + continue; + if (e->io.addr_data == addr) + cleanup_one_si(e); + } + mutex_unlock(&smi_infos_lock); + } + } + rv = len; + out: + kfree(str); + return rv; +} + +static int hardcode_find_bmc(void) +{ + int ret = -ENODEV; + int i; + struct smi_info *info; + + for (i = 0; i < SI_MAX_PARMS; i++) { + if (!ports[i] && !addrs[i]) + continue; + + info = smi_info_alloc(); + if (!info) + return -ENOMEM; + + info->addr_source = SI_HARDCODED; + printk(KERN_INFO PFX "probing via hardcoded address\n"); + + if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) { + info->si_type = SI_KCS; + } else if (strcmp(si_type[i], "smic") == 0) { + info->si_type = SI_SMIC; + } else if (strcmp(si_type[i], "bt") == 0) { + info->si_type = SI_BT; + } else { + printk(KERN_WARNING PFX "Interface type specified " + "for interface %d, was invalid: %s\n", + i, si_type[i]); + kfree(info); + continue; + } + + if (ports[i]) { + /* An I/O port */ + info->io_setup = port_setup; + info->io.addr_data = ports[i]; + info->io.addr_type = IPMI_IO_ADDR_SPACE; + } else if (addrs[i]) { + /* A memory port */ + info->io_setup = mem_setup; + info->io.addr_data = addrs[i]; + info->io.addr_type = IPMI_MEM_ADDR_SPACE; + } else { + printk(KERN_WARNING PFX "Interface type specified " + "for interface %d, but port and address were " + "not set or set to zero.\n", i); + kfree(info); + continue; + } + + info->io.addr = NULL; + info->io.regspacing = regspacings[i]; + if (!info->io.regspacing) + info->io.regspacing = DEFAULT_REGSPACING; + info->io.regsize = regsizes[i]; + if (!info->io.regsize) + info->io.regsize = DEFAULT_REGSPACING; + info->io.regshift = regshifts[i]; + info->irq = irqs[i]; + if (info->irq) + info->irq_setup = std_irq_setup; + info->slave_addr = slave_addrs[i]; + + if (!add_smi(info)) { + if (try_smi_init(info)) + cleanup_one_si(info); + ret = 0; + } else { + kfree(info); + } + } + return ret; +} + +#ifdef CONFIG_ACPI + +#include + +/* + * Once we get an ACPI failure, we don't try any more, because we go + * through the tables sequentially. Once we don't find a table, there + * are no more. + */ +static int acpi_failure; + +/* For GPE-type interrupts. */ +static u32 ipmi_acpi_gpe(acpi_handle gpe_device, + u32 gpe_number, void *context) +{ + struct smi_info *smi_info = context; + unsigned long flags; + + spin_lock_irqsave(&(smi_info->si_lock), flags); + + smi_inc_stat(smi_info, interrupts); + + debug_timestamp("ACPI_GPE"); + + smi_event_handler(smi_info, 0); + spin_unlock_irqrestore(&(smi_info->si_lock), flags); + + return ACPI_INTERRUPT_HANDLED; +} + +static void acpi_gpe_irq_cleanup(struct smi_info *info) +{ + if (!info->irq) + return; + + acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe); +} + +static int acpi_gpe_irq_setup(struct smi_info *info) +{ + acpi_status status; + + if (!info->irq) + return 0; + + status = acpi_install_gpe_handler(NULL, + info->irq, + ACPI_GPE_LEVEL_TRIGGERED, + &ipmi_acpi_gpe, + info); + if (status != AE_OK) { + dev_warn(info->dev, "%s unable to claim ACPI GPE %d," + " running polled\n", DEVICE_NAME, info->irq); + info->irq = 0; + return -EINVAL; + } else { + info->irq_cleanup = acpi_gpe_irq_cleanup; + dev_info(info->dev, "Using ACPI GPE %d\n", info->irq); + return 0; + } +} + +/* + * Defined at + * http://h21007.www2.hp.com/portal/download/files/unprot/hpspmi.pdf + */ +struct SPMITable { + s8 Signature[4]; + u32 Length; + u8 Revision; + u8 Checksum; + s8 OEMID[6]; + s8 OEMTableID[8]; + s8 OEMRevision[4]; + s8 CreatorID[4]; + s8 CreatorRevision[4]; + u8 InterfaceType; + u8 IPMIlegacy; + s16 SpecificationRevision; + + /* + * Bit 0 - SCI interrupt supported + * Bit 1 - I/O APIC/SAPIC + */ + u8 InterruptType; + + /* + * If bit 0 of InterruptType is set, then this is the SCI + * interrupt in the GPEx_STS register. + */ + u8 GPE; + + s16 Reserved; + + /* + * If bit 1 of InterruptType is set, then this is the I/O + * APIC/SAPIC interrupt. + */ + u32 GlobalSystemInterrupt; + + /* The actual register address. */ + struct acpi_generic_address addr; + + u8 UID[4]; + + s8 spmi_id[1]; /* A '\0' terminated array starts here. */ +}; + +static int try_init_spmi(struct SPMITable *spmi) +{ + struct smi_info *info; + int rv; + + if (spmi->IPMIlegacy != 1) { + printk(KERN_INFO PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy); + return -ENODEV; + } + + info = smi_info_alloc(); + if (!info) { + printk(KERN_ERR PFX "Could not allocate SI data (3)\n"); + return -ENOMEM; + } + + info->addr_source = SI_SPMI; + printk(KERN_INFO PFX "probing via SPMI\n"); + + /* Figure out the interface type. */ + switch (spmi->InterfaceType) { + case 1: /* KCS */ + info->si_type = SI_KCS; + break; + case 2: /* SMIC */ + info->si_type = SI_SMIC; + break; + case 3: /* BT */ + info->si_type = SI_BT; + break; + case 4: /* SSIF, just ignore */ + kfree(info); + return -EIO; + default: + printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n", + spmi->InterfaceType); + kfree(info); + return -EIO; + } + + if (spmi->InterruptType & 1) { + /* We've got a GPE interrupt. */ + info->irq = spmi->GPE; + info->irq_setup = acpi_gpe_irq_setup; + } else if (spmi->InterruptType & 2) { + /* We've got an APIC/SAPIC interrupt. */ + info->irq = spmi->GlobalSystemInterrupt; + info->irq_setup = std_irq_setup; + } else { + /* Use the default interrupt setting. */ + info->irq = 0; + info->irq_setup = NULL; + } + + if (spmi->addr.bit_width) { + /* A (hopefully) properly formed register bit width. */ + info->io.regspacing = spmi->addr.bit_width / 8; + } else { + info->io.regspacing = DEFAULT_REGSPACING; + } + info->io.regsize = info->io.regspacing; + info->io.regshift = spmi->addr.bit_offset; + + if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { + info->io_setup = mem_setup; + info->io.addr_type = IPMI_MEM_ADDR_SPACE; + } else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) { + info->io_setup = port_setup; + info->io.addr_type = IPMI_IO_ADDR_SPACE; + } else { + kfree(info); + printk(KERN_WARNING PFX "Unknown ACPI I/O Address type\n"); + return -EIO; + } + info->io.addr_data = spmi->addr.address; + + pr_info("ipmi_si: SPMI: %s %#lx regsize %d spacing %d irq %d\n", + (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem", + info->io.addr_data, info->io.regsize, info->io.regspacing, + info->irq); + + rv = add_smi(info); + if (rv) + kfree(info); + + return rv; +} + +static void spmi_find_bmc(void) +{ + acpi_status status; + struct SPMITable *spmi; + int i; + + if (acpi_disabled) + return; + + if (acpi_failure) + return; + + for (i = 0; ; i++) { + status = acpi_get_table(ACPI_SIG_SPMI, i+1, + (struct acpi_table_header **)&spmi); + if (status != AE_OK) + return; + + try_init_spmi(spmi); + } +} + +static int ipmi_pnp_probe(struct pnp_dev *dev, + const struct pnp_device_id *dev_id) +{ + struct acpi_device *acpi_dev; + struct smi_info *info; + struct resource *res, *res_second; + acpi_handle handle; + acpi_status status; + unsigned long long tmp; + int rv = -EINVAL; + + acpi_dev = pnp_acpi_device(dev); + if (!acpi_dev) + return -ENODEV; + + info = smi_info_alloc(); + if (!info) + return -ENOMEM; + + info->addr_source = SI_ACPI; + printk(KERN_INFO PFX "probing via ACPI\n"); + + handle = acpi_dev->handle; + info->addr_info.acpi_info.acpi_handle = handle; + + /* _IFT tells us the interface type: KCS, BT, etc */ + status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp); + if (ACPI_FAILURE(status)) { + dev_err(&dev->dev, "Could not find ACPI IPMI interface type\n"); + goto err_free; + } + + switch (tmp) { + case 1: + info->si_type = SI_KCS; + break; + case 2: + info->si_type = SI_SMIC; + break; + case 3: + info->si_type = SI_BT; + break; + case 4: /* SSIF, just ignore */ + rv = -ENODEV; + goto err_free; + default: + dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp); + goto err_free; + } + + res = pnp_get_resource(dev, IORESOURCE_IO, 0); + if (res) { + info->io_setup = port_setup; + info->io.addr_type = IPMI_IO_ADDR_SPACE; + } else { + res = pnp_get_resource(dev, IORESOURCE_MEM, 0); + if (res) { + info->io_setup = mem_setup; + info->io.addr_type = IPMI_MEM_ADDR_SPACE; + } + } + if (!res) { + dev_err(&dev->dev, "no I/O or memory address\n"); + goto err_free; + } + info->io.addr_data = res->start; + + info->io.regspacing = DEFAULT_REGSPACING; + res_second = pnp_get_resource(dev, + (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? + IORESOURCE_IO : IORESOURCE_MEM, + 1); + if (res_second) { + if (res_second->start > info->io.addr_data) + info->io.regspacing = res_second->start - info->io.addr_data; + } + info->io.regsize = DEFAULT_REGSPACING; + info->io.regshift = 0; + + /* If _GPE exists, use it; otherwise use standard interrupts */ + status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp); + if (ACPI_SUCCESS(status)) { + info->irq = tmp; + info->irq_setup = acpi_gpe_irq_setup; + } else if (pnp_irq_valid(dev, 0)) { + info->irq = pnp_irq(dev, 0); + info->irq_setup = std_irq_setup; + } + + info->dev = &dev->dev; + pnp_set_drvdata(dev, info); + + dev_info(info->dev, "%pR regsize %d spacing %d irq %d\n", + res, info->io.regsize, info->io.regspacing, + info->irq); + + rv = add_smi(info); + if (rv) + kfree(info); + + return rv; + +err_free: + kfree(info); + return rv; +} + +static void ipmi_pnp_remove(struct pnp_dev *dev) +{ + struct smi_info *info = pnp_get_drvdata(dev); + + cleanup_one_si(info); +} + +static const struct pnp_device_id pnp_dev_table[] = { + {"IPI0001", 0}, + {"", 0}, +}; + +static struct pnp_driver ipmi_pnp_driver = { + .name = DEVICE_NAME, + .probe = ipmi_pnp_probe, + .remove = ipmi_pnp_remove, + .id_table = pnp_dev_table, +}; + +MODULE_DEVICE_TABLE(pnp, pnp_dev_table); +#endif + +#ifdef CONFIG_DMI +struct dmi_ipmi_data { + u8 type; + u8 addr_space; + unsigned long base_addr; + u8 irq; + u8 offset; + u8 slave_addr; +}; + +static int decode_dmi(const struct dmi_header *dm, + struct dmi_ipmi_data *dmi) +{ + const u8 *data = (const u8 *)dm; + unsigned long base_addr; + u8 reg_spacing; + u8 len = dm->length; + + dmi->type = data[4]; + + memcpy(&base_addr, data+8, sizeof(unsigned long)); + if (len >= 0x11) { + if (base_addr & 1) { + /* I/O */ + base_addr &= 0xFFFE; + dmi->addr_space = IPMI_IO_ADDR_SPACE; + } else + /* Memory */ + dmi->addr_space = IPMI_MEM_ADDR_SPACE; + + /* If bit 4 of byte 0x10 is set, then the lsb for the address + is odd. */ + dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4); + + dmi->irq = data[0x11]; + + /* The top two bits of byte 0x10 hold the register spacing. */ + reg_spacing = (data[0x10] & 0xC0) >> 6; + switch (reg_spacing) { + case 0x00: /* Byte boundaries */ + dmi->offset = 1; + break; + case 0x01: /* 32-bit boundaries */ + dmi->offset = 4; + break; + case 0x02: /* 16-byte boundaries */ + dmi->offset = 16; + break; + default: + /* Some other interface, just ignore it. */ + return -EIO; + } + } else { + /* Old DMI spec. */ + /* + * Note that technically, the lower bit of the base + * address should be 1 if the address is I/O and 0 if + * the address is in memory. So many systems get that + * wrong (and all that I have seen are I/O) so we just + * ignore that bit and assume I/O. Systems that use + * memory should use the newer spec, anyway. + */ + dmi->base_addr = base_addr & 0xfffe; + dmi->addr_space = IPMI_IO_ADDR_SPACE; + dmi->offset = 1; + } + + dmi->slave_addr = data[6]; + + return 0; +} + +static void try_init_dmi(struct dmi_ipmi_data *ipmi_data) +{ + struct smi_info *info; + + info = smi_info_alloc(); + if (!info) { + printk(KERN_ERR PFX "Could not allocate SI data\n"); + return; + } + + info->addr_source = SI_SMBIOS; + printk(KERN_INFO PFX "probing via SMBIOS\n"); + + switch (ipmi_data->type) { + case 0x01: /* KCS */ + info->si_type = SI_KCS; + break; + case 0x02: /* SMIC */ + info->si_type = SI_SMIC; + break; + case 0x03: /* BT */ + info->si_type = SI_BT; + break; + default: + kfree(info); + return; + } + + switch (ipmi_data->addr_space) { + case IPMI_MEM_ADDR_SPACE: + info->io_setup = mem_setup; + info->io.addr_type = IPMI_MEM_ADDR_SPACE; + break; + + case IPMI_IO_ADDR_SPACE: + info->io_setup = port_setup; + info->io.addr_type = IPMI_IO_ADDR_SPACE; + break; + + default: + kfree(info); + printk(KERN_WARNING PFX "Unknown SMBIOS I/O Address type: %d\n", + ipmi_data->addr_space); + return; + } + info->io.addr_data = ipmi_data->base_addr; + + info->io.regspacing = ipmi_data->offset; + if (!info->io.regspacing) + info->io.regspacing = DEFAULT_REGSPACING; + info->io.regsize = DEFAULT_REGSPACING; + info->io.regshift = 0; + + info->slave_addr = ipmi_data->slave_addr; + + info->irq = ipmi_data->irq; + if (info->irq) + info->irq_setup = std_irq_setup; + + pr_info("ipmi_si: SMBIOS: %s %#lx regsize %d spacing %d irq %d\n", + (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem", + info->io.addr_data, info->io.regsize, info->io.regspacing, + info->irq); + + if (add_smi(info)) + kfree(info); +} + +static void dmi_find_bmc(void) +{ + const struct dmi_device *dev = NULL; + struct dmi_ipmi_data data; + int rv; + + while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) { + memset(&data, 0, sizeof(data)); + rv = decode_dmi((const struct dmi_header *) dev->device_data, + &data); + if (!rv) + try_init_dmi(&data); + } +} +#endif /* CONFIG_DMI */ + +#ifdef CONFIG_PCI + +#define PCI_ERMC_CLASSCODE 0x0C0700 +#define PCI_ERMC_CLASSCODE_MASK 0xffffff00 +#define PCI_ERMC_CLASSCODE_TYPE_MASK 0xff +#define PCI_ERMC_CLASSCODE_TYPE_SMIC 0x00 +#define PCI_ERMC_CLASSCODE_TYPE_KCS 0x01 +#define PCI_ERMC_CLASSCODE_TYPE_BT 0x02 + +#define PCI_HP_VENDOR_ID 0x103C +#define PCI_MMC_DEVICE_ID 0x121A +#define PCI_MMC_ADDR_CW 0x10 + +static void ipmi_pci_cleanup(struct smi_info *info) +{ + struct pci_dev *pdev = info->addr_source_data; + + pci_disable_device(pdev); +} + +static int ipmi_pci_probe_regspacing(struct smi_info *info) +{ + if (info->si_type == SI_KCS) { + unsigned char status; + int regspacing; + + info->io.regsize = DEFAULT_REGSIZE; + info->io.regshift = 0; + info->io_size = 2; + info->handlers = &kcs_smi_handlers; + + /* detect 1, 4, 16byte spacing */ + for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) { + info->io.regspacing = regspacing; + if (info->io_setup(info)) { + dev_err(info->dev, + "Could not setup I/O space\n"); + return DEFAULT_REGSPACING; + } + /* write invalid cmd */ + info->io.outputb(&info->io, 1, 0x10); + /* read status back */ + status = info->io.inputb(&info->io, 1); + info->io_cleanup(info); + if (status) + return regspacing; + regspacing *= 4; + } + } + return DEFAULT_REGSPACING; +} + +static int ipmi_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int rv; + int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK; + struct smi_info *info; + + info = smi_info_alloc(); + if (!info) + return -ENOMEM; + + info->addr_source = SI_PCI; + dev_info(&pdev->dev, "probing via PCI"); + + switch (class_type) { + case PCI_ERMC_CLASSCODE_TYPE_SMIC: + info->si_type = SI_SMIC; + break; + + case PCI_ERMC_CLASSCODE_TYPE_KCS: + info->si_type = SI_KCS; + break; + + case PCI_ERMC_CLASSCODE_TYPE_BT: + info->si_type = SI_BT; + break; + + default: + kfree(info); + dev_info(&pdev->dev, "Unknown IPMI type: %d\n", class_type); + return -ENOMEM; + } + + rv = pci_enable_device(pdev); + if (rv) { + dev_err(&pdev->dev, "couldn't enable PCI device\n"); + kfree(info); + return rv; + } + + info->addr_source_cleanup = ipmi_pci_cleanup; + info->addr_source_data = pdev; + + if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) { + info->io_setup = port_setup; + info->io.addr_type = IPMI_IO_ADDR_SPACE; + } else { + info->io_setup = mem_setup; + info->io.addr_type = IPMI_MEM_ADDR_SPACE; + } + info->io.addr_data = pci_resource_start(pdev, 0); + + info->io.regspacing = ipmi_pci_probe_regspacing(info); + info->io.regsize = DEFAULT_REGSIZE; + info->io.regshift = 0; + + info->irq = pdev->irq; + if (info->irq) + info->irq_setup = std_irq_setup; + + info->dev = &pdev->dev; + pci_set_drvdata(pdev, info); + + dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n", + &pdev->resource[0], info->io.regsize, info->io.regspacing, + info->irq); + + rv = add_smi(info); + if (rv) { + kfree(info); + pci_disable_device(pdev); + } + + return rv; +} + +static void ipmi_pci_remove(struct pci_dev *pdev) +{ + struct smi_info *info = pci_get_drvdata(pdev); + cleanup_one_si(info); + pci_disable_device(pdev); +} + +static struct pci_device_id ipmi_pci_devices[] = { + { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) }, + { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, ipmi_pci_devices); + +static struct pci_driver ipmi_pci_driver = { + .name = DEVICE_NAME, + .id_table = ipmi_pci_devices, + .probe = ipmi_pci_probe, + .remove = ipmi_pci_remove, +}; +#endif /* CONFIG_PCI */ + +static const struct of_device_id ipmi_match[]; +static int ipmi_probe(struct platform_device *dev) +{ +#ifdef CONFIG_OF + const struct of_device_id *match; + struct smi_info *info; + struct resource resource; + const __be32 *regsize, *regspacing, *regshift; + struct device_node *np = dev->dev.of_node; + int ret; + int proplen; + + dev_info(&dev->dev, "probing via device tree\n"); + + match = of_match_device(ipmi_match, &dev->dev); + if (!match) + return -EINVAL; + + if (!of_device_is_available(np)) + return -EINVAL; + + ret = of_address_to_resource(np, 0, &resource); + if (ret) { + dev_warn(&dev->dev, PFX "invalid address from OF\n"); + return ret; + } + + regsize = of_get_property(np, "reg-size", &proplen); + if (regsize && proplen != 4) { + dev_warn(&dev->dev, PFX "invalid regsize from OF\n"); + return -EINVAL; + } + + regspacing = of_get_property(np, "reg-spacing", &proplen); + if (regspacing && proplen != 4) { + dev_warn(&dev->dev, PFX "invalid regspacing from OF\n"); + return -EINVAL; + } + + regshift = of_get_property(np, "reg-shift", &proplen); + if (regshift && proplen != 4) { + dev_warn(&dev->dev, PFX "invalid regshift from OF\n"); + return -EINVAL; + } + + info = smi_info_alloc(); + + if (!info) { + dev_err(&dev->dev, + "could not allocate memory for OF probe\n"); + return -ENOMEM; + } + + info->si_type = (enum si_type) match->data; + info->addr_source = SI_DEVICETREE; + info->irq_setup = std_irq_setup; + + if (resource.flags & IORESOURCE_IO) { + info->io_setup = port_setup; + info->io.addr_type = IPMI_IO_ADDR_SPACE; + } else { + info->io_setup = mem_setup; + info->io.addr_type = IPMI_MEM_ADDR_SPACE; + } + + info->io.addr_data = resource.start; + + info->io.regsize = regsize ? be32_to_cpup(regsize) : DEFAULT_REGSIZE; + info->io.regspacing = regspacing ? be32_to_cpup(regspacing) : DEFAULT_REGSPACING; + info->io.regshift = regshift ? be32_to_cpup(regshift) : 0; + + info->irq = irq_of_parse_and_map(dev->dev.of_node, 0); + info->dev = &dev->dev; + + dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %d\n", + info->io.addr_data, info->io.regsize, info->io.regspacing, + info->irq); + + dev_set_drvdata(&dev->dev, info); + + ret = add_smi(info); + if (ret) { + kfree(info); + return ret; + } +#endif + return 0; +} + +static int ipmi_remove(struct platform_device *dev) +{ +#ifdef CONFIG_OF + cleanup_one_si(dev_get_drvdata(&dev->dev)); +#endif + return 0; +} + +static const struct of_device_id ipmi_match[] = +{ + { .type = "ipmi", .compatible = "ipmi-kcs", + .data = (void *)(unsigned long) SI_KCS }, + { .type = "ipmi", .compatible = "ipmi-smic", + .data = (void *)(unsigned long) SI_SMIC }, + { .type = "ipmi", .compatible = "ipmi-bt", + .data = (void *)(unsigned long) SI_BT }, + {}, +}; + +static struct platform_driver ipmi_driver = { + .driver = { + .name = DEVICE_NAME, + .of_match_table = ipmi_match, + }, + .probe = ipmi_probe, + .remove = ipmi_remove, +}; + +#ifdef CONFIG_PARISC +static int ipmi_parisc_probe(struct parisc_device *dev) +{ + struct smi_info *info; + int rv; + + info = smi_info_alloc(); + + if (!info) { + dev_err(&dev->dev, + "could not allocate memory for PARISC probe\n"); + return -ENOMEM; + } + + info->si_type = SI_KCS; + info->addr_source = SI_DEVICETREE; + info->io_setup = mem_setup; + info->io.addr_type = IPMI_MEM_ADDR_SPACE; + info->io.addr_data = dev->hpa.start; + info->io.regsize = 1; + info->io.regspacing = 1; + info->io.regshift = 0; + info->irq = 0; /* no interrupt */ + info->irq_setup = NULL; + info->dev = &dev->dev; + + dev_dbg(&dev->dev, "addr 0x%lx\n", info->io.addr_data); + + dev_set_drvdata(&dev->dev, info); + + rv = add_smi(info); + if (rv) { + kfree(info); + return rv; + } + + return 0; +} + +static int ipmi_parisc_remove(struct parisc_device *dev) +{ + cleanup_one_si(dev_get_drvdata(&dev->dev)); + return 0; +} + +static struct parisc_device_id ipmi_parisc_tbl[] = { + { HPHW_MC, HVERSION_REV_ANY_ID, 0x004, 0xC0 }, + { 0, } +}; + +static struct parisc_driver ipmi_parisc_driver = { + .name = "ipmi", + .id_table = ipmi_parisc_tbl, + .probe = ipmi_parisc_probe, + .remove = ipmi_parisc_remove, +}; +#endif /* CONFIG_PARISC */ + +static int wait_for_msg_done(struct smi_info *smi_info) +{ + enum si_sm_result smi_result; + + smi_result = smi_info->handlers->event(smi_info->si_sm, 0); + for (;;) { + if (smi_result == SI_SM_CALL_WITH_DELAY || + smi_result == SI_SM_CALL_WITH_TICK_DELAY) { + schedule_timeout_uninterruptible(1); + smi_result = smi_info->handlers->event( + smi_info->si_sm, jiffies_to_usecs(1)); + } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { + smi_result = smi_info->handlers->event( + smi_info->si_sm, 0); + } else + break; + } + if (smi_result == SI_SM_HOSED) + /* + * We couldn't get the state machine to run, so whatever's at + * the port is probably not an IPMI SMI interface. + */ + return -ENODEV; + + return 0; +} + +static int try_get_dev_id(struct smi_info *smi_info) +{ + unsigned char msg[2]; + unsigned char *resp; + unsigned long resp_len; + int rv = 0; + + resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); + if (!resp) + return -ENOMEM; + + /* + * Do a Get Device ID command, since it comes back with some + * useful info. + */ + msg[0] = IPMI_NETFN_APP_REQUEST << 2; + msg[1] = IPMI_GET_DEVICE_ID_CMD; + smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); + + rv = wait_for_msg_done(smi_info); + if (rv) + goto out; + + resp_len = smi_info->handlers->get_result(smi_info->si_sm, + resp, IPMI_MAX_MSG_LENGTH); + + /* Check and record info from the get device id, in case we need it. */ + rv = ipmi_demangle_device_id(resp, resp_len, &smi_info->device_id); + + out: + kfree(resp); + return rv; +} + +/* + * Some BMCs do not support clearing the receive irq bit in the global + * enables (even if they don't support interrupts on the BMC). Check + * for this and handle it properly. + */ +static void check_clr_rcv_irq(struct smi_info *smi_info) +{ + unsigned char msg[3]; + unsigned char *resp; + unsigned long resp_len; + int rv; + + resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); + if (!resp) { + printk(KERN_WARNING PFX "Out of memory allocating response for" + " global enables command, cannot check recv irq bit" + " handling.\n"); + return; + } + + msg[0] = IPMI_NETFN_APP_REQUEST << 2; + msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; + smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); + + rv = wait_for_msg_done(smi_info); + if (rv) { + printk(KERN_WARNING PFX "Error getting response from get" + " global enables command, cannot check recv irq bit" + " handling.\n"); + goto out; + } + + resp_len = smi_info->handlers->get_result(smi_info->si_sm, + resp, IPMI_MAX_MSG_LENGTH); + + if (resp_len < 4 || + resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || + resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD || + resp[2] != 0) { + printk(KERN_WARNING PFX "Invalid return from get global" + " enables command, cannot check recv irq bit" + " handling.\n"); + rv = -EINVAL; + goto out; + } + + if ((resp[3] & IPMI_BMC_RCV_MSG_INTR) == 0) + /* Already clear, should work ok. */ + goto out; + + msg[0] = IPMI_NETFN_APP_REQUEST << 2; + msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; + msg[2] = resp[3] & ~IPMI_BMC_RCV_MSG_INTR; + smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); + + rv = wait_for_msg_done(smi_info); + if (rv) { + printk(KERN_WARNING PFX "Error getting response from set" + " global enables command, cannot check recv irq bit" + " handling.\n"); + goto out; + } + + resp_len = smi_info->handlers->get_result(smi_info->si_sm, + resp, IPMI_MAX_MSG_LENGTH); + + if (resp_len < 3 || + resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || + resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) { + printk(KERN_WARNING PFX "Invalid return from get global" + " enables command, cannot check recv irq bit" + " handling.\n"); + rv = -EINVAL; + goto out; + } + + if (resp[2] != 0) { + /* + * An error when setting the event buffer bit means + * clearing the bit is not supported. + */ + printk(KERN_WARNING PFX "The BMC does not support clearing" + " the recv irq bit, compensating, but the BMC needs to" + " be fixed.\n"); + smi_info->cannot_clear_recv_irq_bit = true; + } + out: + kfree(resp); +} + +static int try_enable_event_buffer(struct smi_info *smi_info) +{ + unsigned char msg[3]; + unsigned char *resp; + unsigned long resp_len; + int rv = 0; + + resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); + if (!resp) + return -ENOMEM; + + msg[0] = IPMI_NETFN_APP_REQUEST << 2; + msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; + smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); + + rv = wait_for_msg_done(smi_info); + if (rv) { + printk(KERN_WARNING PFX "Error getting response from get" + " global enables command, the event buffer is not" + " enabled.\n"); + goto out; + } + + resp_len = smi_info->handlers->get_result(smi_info->si_sm, + resp, IPMI_MAX_MSG_LENGTH); + + if (resp_len < 4 || + resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || + resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD || + resp[2] != 0) { + printk(KERN_WARNING PFX "Invalid return from get global" + " enables command, cannot enable the event buffer.\n"); + rv = -EINVAL; + goto out; + } + + if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) { + /* buffer is already enabled, nothing to do. */ + smi_info->supports_event_msg_buff = true; + goto out; + } + + msg[0] = IPMI_NETFN_APP_REQUEST << 2; + msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; + msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF; + smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); + + rv = wait_for_msg_done(smi_info); + if (rv) { + printk(KERN_WARNING PFX "Error getting response from set" + " global, enables command, the event buffer is not" + " enabled.\n"); + goto out; + } + + resp_len = smi_info->handlers->get_result(smi_info->si_sm, + resp, IPMI_MAX_MSG_LENGTH); + + if (resp_len < 3 || + resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || + resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) { + printk(KERN_WARNING PFX "Invalid return from get global," + "enables command, not enable the event buffer.\n"); + rv = -EINVAL; + goto out; + } + + if (resp[2] != 0) + /* + * An error when setting the event buffer bit means + * that the event buffer is not supported. + */ + rv = -ENOENT; + else + smi_info->supports_event_msg_buff = true; + + out: + kfree(resp); + return rv; +} + +static int smi_type_proc_show(struct seq_file *m, void *v) +{ + struct smi_info *smi = m->private; + + seq_printf(m, "%s\n", si_to_str[smi->si_type]); + + return 0; +} + +static int smi_type_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, smi_type_proc_show, PDE_DATA(inode)); +} + +static const struct file_operations smi_type_proc_ops = { + .open = smi_type_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int smi_si_stats_proc_show(struct seq_file *m, void *v) +{ + struct smi_info *smi = m->private; + + seq_printf(m, "interrupts_enabled: %d\n", + smi->irq && !smi->interrupt_disabled); + seq_printf(m, "short_timeouts: %u\n", + smi_get_stat(smi, short_timeouts)); + seq_printf(m, "long_timeouts: %u\n", + smi_get_stat(smi, long_timeouts)); + seq_printf(m, "idles: %u\n", + smi_get_stat(smi, idles)); + seq_printf(m, "interrupts: %u\n", + smi_get_stat(smi, interrupts)); + seq_printf(m, "attentions: %u\n", + smi_get_stat(smi, attentions)); + seq_printf(m, "flag_fetches: %u\n", + smi_get_stat(smi, flag_fetches)); + seq_printf(m, "hosed_count: %u\n", + smi_get_stat(smi, hosed_count)); + seq_printf(m, "complete_transactions: %u\n", + smi_get_stat(smi, complete_transactions)); + seq_printf(m, "events: %u\n", + smi_get_stat(smi, events)); + seq_printf(m, "watchdog_pretimeouts: %u\n", + smi_get_stat(smi, watchdog_pretimeouts)); + seq_printf(m, "incoming_messages: %u\n", + smi_get_stat(smi, incoming_messages)); + return 0; +} + +static int smi_si_stats_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, smi_si_stats_proc_show, PDE_DATA(inode)); +} + +static const struct file_operations smi_si_stats_proc_ops = { + .open = smi_si_stats_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int smi_params_proc_show(struct seq_file *m, void *v) +{ + struct smi_info *smi = m->private; + + seq_printf(m, + "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n", + si_to_str[smi->si_type], + addr_space_to_str[smi->io.addr_type], + smi->io.addr_data, + smi->io.regspacing, + smi->io.regsize, + smi->io.regshift, + smi->irq, + smi->slave_addr); + + return 0; +} + +static int smi_params_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, smi_params_proc_show, PDE_DATA(inode)); +} + +static const struct file_operations smi_params_proc_ops = { + .open = smi_params_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* + * oem_data_avail_to_receive_msg_avail + * @info - smi_info structure with msg_flags set + * + * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL + * Returns 1 indicating need to re-run handle_flags(). + */ +static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info) +{ + smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) | + RECEIVE_MSG_AVAIL); + return 1; +} + +/* + * setup_dell_poweredge_oem_data_handler + * @info - smi_info.device_id must be populated + * + * Systems that match, but have firmware version < 1.40 may assert + * OEM0_DATA_AVAIL on their own, without being told via Set Flags that + * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL + * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags + * as RECEIVE_MSG_AVAIL instead. + * + * As Dell has no plans to release IPMI 1.5 firmware that *ever* + * assert the OEM[012] bits, and if it did, the driver would have to + * change to handle that properly, we don't actually check for the + * firmware version. + * Device ID = 0x20 BMC on PowerEdge 8G servers + * Device Revision = 0x80 + * Firmware Revision1 = 0x01 BMC version 1.40 + * Firmware Revision2 = 0x40 BCD encoded + * IPMI Version = 0x51 IPMI 1.5 + * Manufacturer ID = A2 02 00 Dell IANA + * + * Additionally, PowerEdge systems with IPMI < 1.5 may also assert + * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL. + * + */ +#define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20 +#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80 +#define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51 +#define DELL_IANA_MFR_ID 0x0002a2 +static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info) +{ + struct ipmi_device_id *id = &smi_info->device_id; + if (id->manufacturer_id == DELL_IANA_MFR_ID) { + if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID && + id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV && + id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) { + smi_info->oem_data_avail_handler = + oem_data_avail_to_receive_msg_avail; + } else if (ipmi_version_major(id) < 1 || + (ipmi_version_major(id) == 1 && + ipmi_version_minor(id) < 5)) { + smi_info->oem_data_avail_handler = + oem_data_avail_to_receive_msg_avail; + } + } +} + +#define CANNOT_RETURN_REQUESTED_LENGTH 0xCA +static void return_hosed_msg_badsize(struct smi_info *smi_info) +{ + struct ipmi_smi_msg *msg = smi_info->curr_msg; + + /* Make it a response */ + msg->rsp[0] = msg->data[0] | 4; + msg->rsp[1] = msg->data[1]; + msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH; + msg->rsp_size = 3; + smi_info->curr_msg = NULL; + deliver_recv_msg(smi_info, msg); +} + +/* + * dell_poweredge_bt_xaction_handler + * @info - smi_info.device_id must be populated + * + * Dell PowerEdge servers with the BT interface (x6xx and 1750) will + * not respond to a Get SDR command if the length of the data + * requested is exactly 0x3A, which leads to command timeouts and no + * data returned. This intercepts such commands, and causes userspace + * callers to try again with a different-sized buffer, which succeeds. + */ + +#define STORAGE_NETFN 0x0A +#define STORAGE_CMD_GET_SDR 0x23 +static int dell_poweredge_bt_xaction_handler(struct notifier_block *self, + unsigned long unused, + void *in) +{ + struct smi_info *smi_info = in; + unsigned char *data = smi_info->curr_msg->data; + unsigned int size = smi_info->curr_msg->data_size; + if (size >= 8 && + (data[0]>>2) == STORAGE_NETFN && + data[1] == STORAGE_CMD_GET_SDR && + data[7] == 0x3A) { + return_hosed_msg_badsize(smi_info); + return NOTIFY_STOP; + } + return NOTIFY_DONE; +} + +static struct notifier_block dell_poweredge_bt_xaction_notifier = { + .notifier_call = dell_poweredge_bt_xaction_handler, +}; + +/* + * setup_dell_poweredge_bt_xaction_handler + * @info - smi_info.device_id must be filled in already + * + * Fills in smi_info.device_id.start_transaction_pre_hook + * when we know what function to use there. + */ +static void +setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info) +{ + struct ipmi_device_id *id = &smi_info->device_id; + if (id->manufacturer_id == DELL_IANA_MFR_ID && + smi_info->si_type == SI_BT) + register_xaction_notifier(&dell_poweredge_bt_xaction_notifier); +} + +/* + * setup_oem_data_handler + * @info - smi_info.device_id must be filled in already + * + * Fills in smi_info.device_id.oem_data_available_handler + * when we know what function to use there. + */ + +static void setup_oem_data_handler(struct smi_info *smi_info) +{ + setup_dell_poweredge_oem_data_handler(smi_info); +} + +static void setup_xaction_handlers(struct smi_info *smi_info) +{ + setup_dell_poweredge_bt_xaction_handler(smi_info); +} + +static inline void wait_for_timer_and_thread(struct smi_info *smi_info) +{ + if (smi_info->thread != NULL) + kthread_stop(smi_info->thread); + if (smi_info->timer_running) + del_timer_sync(&smi_info->si_timer); +} + +static struct ipmi_default_vals +{ + int type; + int port; +} ipmi_defaults[] = +{ + { .type = SI_KCS, .port = 0xca2 }, + { .type = SI_SMIC, .port = 0xca9 }, + { .type = SI_BT, .port = 0xe4 }, + { .port = 0 } +}; + +static void default_find_bmc(void) +{ + struct smi_info *info; + int i; + + for (i = 0; ; i++) { + if (!ipmi_defaults[i].port) + break; +#ifdef CONFIG_PPC + if (check_legacy_ioport(ipmi_defaults[i].port)) + continue; +#endif + info = smi_info_alloc(); + if (!info) + return; + + info->addr_source = SI_DEFAULT; + + info->si_type = ipmi_defaults[i].type; + info->io_setup = port_setup; + info->io.addr_data = ipmi_defaults[i].port; + info->io.addr_type = IPMI_IO_ADDR_SPACE; + + info->io.addr = NULL; + info->io.regspacing = DEFAULT_REGSPACING; + info->io.regsize = DEFAULT_REGSPACING; + info->io.regshift = 0; + + if (add_smi(info) == 0) { + if ((try_smi_init(info)) == 0) { + /* Found one... */ + printk(KERN_INFO PFX "Found default %s" + " state machine at %s address 0x%lx\n", + si_to_str[info->si_type], + addr_space_to_str[info->io.addr_type], + info->io.addr_data); + } else + cleanup_one_si(info); + } else { + kfree(info); + } + } +} + +static int is_new_interface(struct smi_info *info) +{ + struct smi_info *e; + + list_for_each_entry(e, &smi_infos, link) { + if (e->io.addr_type != info->io.addr_type) + continue; + if (e->io.addr_data == info->io.addr_data) + return 0; + } + + return 1; +} + +static int add_smi(struct smi_info *new_smi) +{ + int rv = 0; + + printk(KERN_INFO PFX "Adding %s-specified %s state machine", + ipmi_addr_src_to_str(new_smi->addr_source), + si_to_str[new_smi->si_type]); + mutex_lock(&smi_infos_lock); + if (!is_new_interface(new_smi)) { + printk(KERN_CONT " duplicate interface\n"); + rv = -EBUSY; + goto out_err; + } + + printk(KERN_CONT "\n"); + + /* So we know not to free it unless we have allocated one. */ + new_smi->intf = NULL; + new_smi->si_sm = NULL; + new_smi->handlers = NULL; + + list_add_tail(&new_smi->link, &smi_infos); + +out_err: + mutex_unlock(&smi_infos_lock); + return rv; +} + +static int try_smi_init(struct smi_info *new_smi) +{ + int rv = 0; + int i; + + printk(KERN_INFO PFX "Trying %s-specified %s state" + " machine at %s address 0x%lx, slave address 0x%x," + " irq %d\n", + ipmi_addr_src_to_str(new_smi->addr_source), + si_to_str[new_smi->si_type], + addr_space_to_str[new_smi->io.addr_type], + new_smi->io.addr_data, + new_smi->slave_addr, new_smi->irq); + + switch (new_smi->si_type) { + case SI_KCS: + new_smi->handlers = &kcs_smi_handlers; + break; + + case SI_SMIC: + new_smi->handlers = &smic_smi_handlers; + break; + + case SI_BT: + new_smi->handlers = &bt_smi_handlers; + break; + + default: + /* No support for anything else yet. */ + rv = -EIO; + goto out_err; + } + + /* Allocate the state machine's data and initialize it. */ + new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); + if (!new_smi->si_sm) { + printk(KERN_ERR PFX + "Could not allocate state machine memory\n"); + rv = -ENOMEM; + goto out_err; + } + new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm, + &new_smi->io); + + /* Now that we know the I/O size, we can set up the I/O. */ + rv = new_smi->io_setup(new_smi); + if (rv) { + printk(KERN_ERR PFX "Could not set up I/O space\n"); + goto out_err; + } + + /* Do low-level detection first. */ + if (new_smi->handlers->detect(new_smi->si_sm)) { + if (new_smi->addr_source) + printk(KERN_INFO PFX "Interface detection failed\n"); + rv = -ENODEV; + goto out_err; + } + + /* + * Attempt a get device id command. If it fails, we probably + * don't have a BMC here. + */ + rv = try_get_dev_id(new_smi); + if (rv) { + if (new_smi->addr_source) + printk(KERN_INFO PFX "There appears to be no BMC" + " at this location\n"); + goto out_err; + } + + check_clr_rcv_irq(new_smi); + + setup_oem_data_handler(new_smi); + setup_xaction_handlers(new_smi); + + new_smi->waiting_msg = NULL; + new_smi->curr_msg = NULL; + atomic_set(&new_smi->req_events, 0); + new_smi->run_to_completion = false; + for (i = 0; i < SI_NUM_STATS; i++) + atomic_set(&new_smi->stats[i], 0); + + new_smi->interrupt_disabled = true; + atomic_set(&new_smi->need_watch, 0); + new_smi->intf_num = smi_num; + smi_num++; + + rv = try_enable_event_buffer(new_smi); + if (rv == 0) + new_smi->has_event_buffer = true; + + /* + * Start clearing the flags before we enable interrupts or the + * timer to avoid racing with the timer. + */ + start_clear_flags(new_smi); + + /* + * IRQ is defined to be set when non-zero. req_events will + * cause a global flags check that will enable interrupts. + */ + if (new_smi->irq) { + new_smi->interrupt_disabled = false; + atomic_set(&new_smi->req_events, 1); + } + + if (!new_smi->dev) { + /* + * If we don't already have a device from something + * else (like PCI), then register a new one. + */ + new_smi->pdev = platform_device_alloc("ipmi_si", + new_smi->intf_num); + if (!new_smi->pdev) { + printk(KERN_ERR PFX + "Unable to allocate platform device\n"); + goto out_err; + } + new_smi->dev = &new_smi->pdev->dev; + new_smi->dev->driver = &ipmi_driver.driver; + + rv = platform_device_add(new_smi->pdev); + if (rv) { + printk(KERN_ERR PFX + "Unable to register system interface device:" + " %d\n", + rv); + goto out_err; + } + new_smi->dev_registered = true; + } + + rv = ipmi_register_smi(&handlers, + new_smi, + &new_smi->device_id, + new_smi->dev, + new_smi->slave_addr); + if (rv) { + dev_err(new_smi->dev, "Unable to register device: error %d\n", + rv); + goto out_err_stop_timer; + } + + rv = ipmi_smi_add_proc_entry(new_smi->intf, "type", + &smi_type_proc_ops, + new_smi); + if (rv) { + dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); + goto out_err_stop_timer; + } + + rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats", + &smi_si_stats_proc_ops, + new_smi); + if (rv) { + dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); + goto out_err_stop_timer; + } + + rv = ipmi_smi_add_proc_entry(new_smi->intf, "params", + &smi_params_proc_ops, + new_smi); + if (rv) { + dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); + goto out_err_stop_timer; + } + + dev_info(new_smi->dev, "IPMI %s interface initialized\n", + si_to_str[new_smi->si_type]); + + return 0; + + out_err_stop_timer: + wait_for_timer_and_thread(new_smi); + + out_err: + new_smi->interrupt_disabled = true; + + if (new_smi->intf) { + ipmi_smi_t intf = new_smi->intf; + new_smi->intf = NULL; + ipmi_unregister_smi(intf); + } + + if (new_smi->irq_cleanup) { + new_smi->irq_cleanup(new_smi); + new_smi->irq_cleanup = NULL; + } + + /* + * Wait until we know that we are out of any interrupt + * handlers might have been running before we freed the + * interrupt. + */ + synchronize_sched(); + + if (new_smi->si_sm) { + if (new_smi->handlers) + new_smi->handlers->cleanup(new_smi->si_sm); + kfree(new_smi->si_sm); + new_smi->si_sm = NULL; + } + if (new_smi->addr_source_cleanup) { + new_smi->addr_source_cleanup(new_smi); + new_smi->addr_source_cleanup = NULL; + } + if (new_smi->io_cleanup) { + new_smi->io_cleanup(new_smi); + new_smi->io_cleanup = NULL; + } + + if (new_smi->dev_registered) { + platform_device_unregister(new_smi->pdev); + new_smi->dev_registered = false; + } + + return rv; +} + +static int init_ipmi_si(void) +{ + int i; + char *str; + int rv; + struct smi_info *e; + enum ipmi_addr_src type = SI_INVALID; + + if (initialized) + return 0; + initialized = 1; + + if (si_tryplatform) { + rv = platform_driver_register(&ipmi_driver); + if (rv) { + printk(KERN_ERR PFX "Unable to register " + "driver: %d\n", rv); + return rv; + } + } + + /* Parse out the si_type string into its components. */ + str = si_type_str; + if (*str != '\0') { + for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) { + si_type[i] = str; + str = strchr(str, ','); + if (str) { + *str = '\0'; + str++; + } else { + break; + } + } + } + + printk(KERN_INFO "IPMI System Interface driver.\n"); + + /* If the user gave us a device, they presumably want us to use it */ + if (!hardcode_find_bmc()) + return 0; + +#ifdef CONFIG_PCI + if (si_trypci) { + rv = pci_register_driver(&ipmi_pci_driver); + if (rv) + printk(KERN_ERR PFX "Unable to register " + "PCI driver: %d\n", rv); + else + pci_registered = true; + } +#endif + +#ifdef CONFIG_ACPI + if (si_tryacpi) { + pnp_register_driver(&ipmi_pnp_driver); + pnp_registered = true; + } +#endif + +#ifdef CONFIG_DMI + if (si_trydmi) + dmi_find_bmc(); +#endif + +#ifdef CONFIG_ACPI + if (si_tryacpi) + spmi_find_bmc(); +#endif + +#ifdef CONFIG_PARISC + register_parisc_driver(&ipmi_parisc_driver); + parisc_registered = true; + /* poking PC IO addresses will crash machine, don't do it */ + si_trydefaults = 0; +#endif + + /* We prefer devices with interrupts, but in the case of a machine + with multiple BMCs we assume that there will be several instances + of a given type so if we succeed in registering a type then also + try to register everything else of the same type */ + + mutex_lock(&smi_infos_lock); + list_for_each_entry(e, &smi_infos, link) { + /* Try to register a device if it has an IRQ and we either + haven't successfully registered a device yet or this + device has the same type as one we successfully registered */ + if (e->irq && (!type || e->addr_source == type)) { + if (!try_smi_init(e)) { + type = e->addr_source; + } + } + } + + /* type will only have been set if we successfully registered an si */ + if (type) { + mutex_unlock(&smi_infos_lock); + return 0; + } + + /* Fall back to the preferred device */ + + list_for_each_entry(e, &smi_infos, link) { + if (!e->irq && (!type || e->addr_source == type)) { + if (!try_smi_init(e)) { + type = e->addr_source; + } + } + } + mutex_unlock(&smi_infos_lock); + + if (type) + return 0; + + if (si_trydefaults) { + mutex_lock(&smi_infos_lock); + if (list_empty(&smi_infos)) { + /* No BMC was found, try defaults. */ + mutex_unlock(&smi_infos_lock); + default_find_bmc(); + } else + mutex_unlock(&smi_infos_lock); + } + + mutex_lock(&smi_infos_lock); + if (unload_when_empty && list_empty(&smi_infos)) { + mutex_unlock(&smi_infos_lock); + cleanup_ipmi_si(); + printk(KERN_WARNING PFX + "Unable to find any System Interface(s)\n"); + return -ENODEV; + } else { + mutex_unlock(&smi_infos_lock); + return 0; + } +} +module_init(init_ipmi_si); + +static void cleanup_one_si(struct smi_info *to_clean) +{ + int rv = 0; + + if (!to_clean) + return; + + if (to_clean->intf) { + ipmi_smi_t intf = to_clean->intf; + + to_clean->intf = NULL; + rv = ipmi_unregister_smi(intf); + if (rv) { + pr_err(PFX "Unable to unregister device: errno=%d\n", + rv); + } + } + + if (to_clean->dev) + dev_set_drvdata(to_clean->dev, NULL); + + list_del(&to_clean->link); + + /* + * Make sure that interrupts, the timer and the thread are + * stopped and will not run again. + */ + if (to_clean->irq_cleanup) + to_clean->irq_cleanup(to_clean); + wait_for_timer_and_thread(to_clean); + + /* + * Timeouts are stopped, now make sure the interrupts are off + * in the BMC. Note that timers and CPU interrupts are off, + * so no need for locks. + */ + while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { + poll(to_clean); + schedule_timeout_uninterruptible(1); + } + disable_si_irq(to_clean); + while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { + poll(to_clean); + schedule_timeout_uninterruptible(1); + } + + if (to_clean->handlers) + to_clean->handlers->cleanup(to_clean->si_sm); + + kfree(to_clean->si_sm); + + if (to_clean->addr_source_cleanup) + to_clean->addr_source_cleanup(to_clean); + if (to_clean->io_cleanup) + to_clean->io_cleanup(to_clean); + + if (to_clean->dev_registered) + platform_device_unregister(to_clean->pdev); + + kfree(to_clean); +} + +static void cleanup_ipmi_si(void) +{ + struct smi_info *e, *tmp_e; + + if (!initialized) + return; + +#ifdef CONFIG_PCI + if (pci_registered) + pci_unregister_driver(&ipmi_pci_driver); +#endif +#ifdef CONFIG_ACPI + if (pnp_registered) + pnp_unregister_driver(&ipmi_pnp_driver); +#endif +#ifdef CONFIG_PARISC + if (parisc_registered) + unregister_parisc_driver(&ipmi_parisc_driver); +#endif + + platform_driver_unregister(&ipmi_driver); + + mutex_lock(&smi_infos_lock); + list_for_each_entry_safe(e, tmp_e, &smi_infos, link) + cleanup_one_si(e); + mutex_unlock(&smi_infos_lock); +} +module_exit(cleanup_ipmi_si); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Corey Minyard "); +MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT" + " system interfaces."); diff --git a/kernel/drivers/char/ipmi/ipmi_si_sm.h b/kernel/drivers/char/ipmi/ipmi_si_sm.h new file mode 100644 index 000000000..df89f7347 --- /dev/null +++ b/kernel/drivers/char/ipmi/ipmi_si_sm.h @@ -0,0 +1,141 @@ +/* + * ipmi_si_sm.h + * + * State machine interface for low-level IPMI system management + * interface state machines. This code is the interface between + * the ipmi_smi code (that handles the policy of a KCS, SMIC, or + * BT interface) and the actual low-level state machine. + * + * Author: MontaVista Software, Inc. + * Corey Minyard + * source@mvista.com + * + * Copyright 2002 MontaVista Software Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* + * This is defined by the state machines themselves, it is an opaque + * data type for them to use. + */ +struct si_sm_data; + +/* + * The structure for doing I/O in the state machine. The state + * machine doesn't have the actual I/O routines, they are done through + * this interface. + */ +struct si_sm_io { + unsigned char (*inputb)(struct si_sm_io *io, unsigned int offset); + void (*outputb)(struct si_sm_io *io, + unsigned int offset, + unsigned char b); + + /* + * Generic info used by the actual handling routines, the + * state machine shouldn't touch these. + */ + void __iomem *addr; + int regspacing; + int regsize; + int regshift; + int addr_type; + long addr_data; +}; + +/* Results of SMI events. */ +enum si_sm_result { + SI_SM_CALL_WITHOUT_DELAY, /* Call the driver again immediately */ + SI_SM_CALL_WITH_DELAY, /* Delay some before calling again. */ + SI_SM_CALL_WITH_TICK_DELAY,/* Delay >=1 tick before calling again. */ + SI_SM_TRANSACTION_COMPLETE, /* A transaction is finished. */ + SI_SM_IDLE, /* The SM is in idle state. */ + SI_SM_HOSED, /* The hardware violated the state machine. */ + + /* + * The hardware is asserting attn and the state machine is + * idle. + */ + SI_SM_ATTN +}; + +/* Handlers for the SMI state machine. */ +struct si_sm_handlers { + /* + * Put the version number of the state machine here so the + * upper layer can print it. + */ + char *version; + + /* + * Initialize the data and return the amount of I/O space to + * reserve for the space. + */ + unsigned int (*init_data)(struct si_sm_data *smi, + struct si_sm_io *io); + + /* + * Start a new transaction in the state machine. This will + * return -2 if the state machine is not idle, -1 if the size + * is invalid (to large or too small), or 0 if the transaction + * is successfully completed. + */ + int (*start_transaction)(struct si_sm_data *smi, + unsigned char *data, unsigned int size); + + /* + * Return the results after the transaction. This will return + * -1 if the buffer is too small, zero if no transaction is + * present, or the actual length of the result data. + */ + int (*get_result)(struct si_sm_data *smi, + unsigned char *data, unsigned int length); + + /* + * Call this periodically (for a polled interface) or upon + * receiving an interrupt (for a interrupt-driven interface). + * If interrupt driven, you should probably poll this + * periodically when not in idle state. This should be called + * with the time that passed since the last call, if it is + * significant. Time is in microseconds. + */ + enum si_sm_result (*event)(struct si_sm_data *smi, long time); + + /* + * Attempt to detect an SMI. Returns 0 on success or nonzero + * on failure. + */ + int (*detect)(struct si_sm_data *smi); + + /* The interface is shutting down, so clean it up. */ + void (*cleanup)(struct si_sm_data *smi); + + /* Return the size of the SMI structure in bytes. */ + int (*size)(void); +}; + +/* Current state machines that we can use. */ +extern struct si_sm_handlers kcs_smi_handlers; +extern struct si_sm_handlers smic_smi_handlers; +extern struct si_sm_handlers bt_smi_handlers; + diff --git a/kernel/drivers/char/ipmi/ipmi_smic_sm.c b/kernel/drivers/char/ipmi/ipmi_smic_sm.c new file mode 100644 index 000000000..c8e77afa8 --- /dev/null +++ b/kernel/drivers/char/ipmi/ipmi_smic_sm.c @@ -0,0 +1,600 @@ +/* + * ipmi_smic_sm.c + * + * The state-machine driver for an IPMI SMIC driver + * + * It started as a copy of Corey Minyard's driver for the KSC interface + * and the kernel patch "mmcdev-patch-245" by HP + * + * modified by: Hannes Schulz + * ipmi@schwaar.com + * + * + * Corey Minyard's driver for the KSC interface has the following + * copyright notice: + * Copyright 2002 MontaVista Software Inc. + * + * the kernel patch "mmcdev-patch-245" by HP has the following + * copyright notice: + * (c) Copyright 2001 Grant Grundler (c) Copyright + * 2001 Hewlett-Packard Company + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. */ + +#include /* For printk. */ +#include +#include +#include +#include /* for completion codes */ +#include "ipmi_si_sm.h" + +/* smic_debug is a bit-field + * SMIC_DEBUG_ENABLE - turned on for now + * SMIC_DEBUG_MSG - commands and their responses + * SMIC_DEBUG_STATES - state machine +*/ +#define SMIC_DEBUG_STATES 4 +#define SMIC_DEBUG_MSG 2 +#define SMIC_DEBUG_ENABLE 1 + +static int smic_debug = 1; +module_param(smic_debug, int, 0644); +MODULE_PARM_DESC(smic_debug, "debug bitmask, 1=enable, 2=messages, 4=states"); + +enum smic_states { + SMIC_IDLE, + SMIC_START_OP, + SMIC_OP_OK, + SMIC_WRITE_START, + SMIC_WRITE_NEXT, + SMIC_WRITE_END, + SMIC_WRITE2READ, + SMIC_READ_START, + SMIC_READ_NEXT, + SMIC_READ_END, + SMIC_HOSED +}; + +#define MAX_SMIC_READ_SIZE 80 +#define MAX_SMIC_WRITE_SIZE 80 +#define SMIC_MAX_ERROR_RETRIES 3 + +/* Timeouts in microseconds. */ +#define SMIC_RETRY_TIMEOUT (2*USEC_PER_SEC) + +/* SMIC Flags Register Bits */ +#define SMIC_RX_DATA_READY 0x80 +#define SMIC_TX_DATA_READY 0x40 + +/* + * SMIC_SMI and SMIC_EVM_DATA_AVAIL are only used by + * a few systems, and then only by Systems Management + * Interrupts, not by the OS. Always ignore these bits. + * + */ +#define SMIC_SMI 0x10 +#define SMIC_EVM_DATA_AVAIL 0x08 +#define SMIC_SMS_DATA_AVAIL 0x04 +#define SMIC_FLAG_BSY 0x01 + +/* SMIC Error Codes */ +#define EC_NO_ERROR 0x00 +#define EC_ABORTED 0x01 +#define EC_ILLEGAL_CONTROL 0x02 +#define EC_NO_RESPONSE 0x03 +#define EC_ILLEGAL_COMMAND 0x04 +#define EC_BUFFER_FULL 0x05 + +struct si_sm_data { + enum smic_states state; + struct si_sm_io *io; + unsigned char write_data[MAX_SMIC_WRITE_SIZE]; + int write_pos; + int write_count; + int orig_write_count; + unsigned char read_data[MAX_SMIC_READ_SIZE]; + int read_pos; + int truncated; + unsigned int error_retries; + long smic_timeout; +}; + +static unsigned int init_smic_data(struct si_sm_data *smic, + struct si_sm_io *io) +{ + smic->state = SMIC_IDLE; + smic->io = io; + smic->write_pos = 0; + smic->write_count = 0; + smic->orig_write_count = 0; + smic->read_pos = 0; + smic->error_retries = 0; + smic->truncated = 0; + smic->smic_timeout = SMIC_RETRY_TIMEOUT; + + /* We use 3 bytes of I/O. */ + return 3; +} + +static int start_smic_transaction(struct si_sm_data *smic, + unsigned char *data, unsigned int size) +{ + unsigned int i; + + if (size < 2) + return IPMI_REQ_LEN_INVALID_ERR; + if (size > MAX_SMIC_WRITE_SIZE) + return IPMI_REQ_LEN_EXCEEDED_ERR; + + if ((smic->state != SMIC_IDLE) && (smic->state != SMIC_HOSED)) + return IPMI_NOT_IN_MY_STATE_ERR; + + if (smic_debug & SMIC_DEBUG_MSG) { + printk(KERN_DEBUG "start_smic_transaction -"); + for (i = 0; i < size; i++) + printk(" %02x", (unsigned char) data[i]); + printk("\n"); + } + smic->error_retries = 0; + memcpy(smic->write_data, data, size); + smic->write_count = size; + smic->orig_write_count = size; + smic->write_pos = 0; + smic->read_pos = 0; + smic->state = SMIC_START_OP; + smic->smic_timeout = SMIC_RETRY_TIMEOUT; + return 0; +} + +static int smic_get_result(struct si_sm_data *smic, + unsigned char *data, unsigned int length) +{ + int i; + + if (smic_debug & SMIC_DEBUG_MSG) { + printk(KERN_DEBUG "smic_get result -"); + for (i = 0; i < smic->read_pos; i++) + printk(" %02x", smic->read_data[i]); + printk("\n"); + } + if (length < smic->read_pos) { + smic->read_pos = length; + smic->truncated = 1; + } + memcpy(data, smic->read_data, smic->read_pos); + + if ((length >= 3) && (smic->read_pos < 3)) { + data[2] = IPMI_ERR_UNSPECIFIED; + smic->read_pos = 3; + } + if (smic->truncated) { + data[2] = IPMI_ERR_MSG_TRUNCATED; + smic->truncated = 0; + } + return smic->read_pos; +} + +static inline unsigned char read_smic_flags(struct si_sm_data *smic) +{ + return smic->io->inputb(smic->io, 2); +} + +static inline unsigned char read_smic_status(struct si_sm_data *smic) +{ + return smic->io->inputb(smic->io, 1); +} + +static inline unsigned char read_smic_data(struct si_sm_data *smic) +{ + return smic->io->inputb(smic->io, 0); +} + +static inline void write_smic_flags(struct si_sm_data *smic, + unsigned char flags) +{ + smic->io->outputb(smic->io, 2, flags); +} + +static inline void write_smic_control(struct si_sm_data *smic, + unsigned char control) +{ + smic->io->outputb(smic->io, 1, control); +} + +static inline void write_si_sm_data(struct si_sm_data *smic, + unsigned char data) +{ + smic->io->outputb(smic->io, 0, data); +} + +static inline void start_error_recovery(struct si_sm_data *smic, char *reason) +{ + (smic->error_retries)++; + if (smic->error_retries > SMIC_MAX_ERROR_RETRIES) { + if (smic_debug & SMIC_DEBUG_ENABLE) + printk(KERN_WARNING + "ipmi_smic_drv: smic hosed: %s\n", reason); + smic->state = SMIC_HOSED; + } else { + smic->write_count = smic->orig_write_count; + smic->write_pos = 0; + smic->read_pos = 0; + smic->state = SMIC_START_OP; + smic->smic_timeout = SMIC_RETRY_TIMEOUT; + } +} + +static inline void write_next_byte(struct si_sm_data *smic) +{ + write_si_sm_data(smic, smic->write_data[smic->write_pos]); + (smic->write_pos)++; + (smic->write_count)--; +} + +static inline void read_next_byte(struct si_sm_data *smic) +{ + if (smic->read_pos >= MAX_SMIC_READ_SIZE) { + read_smic_data(smic); + smic->truncated = 1; + } else { + smic->read_data[smic->read_pos] = read_smic_data(smic); + smic->read_pos++; + } +} + +/* SMIC Control/Status Code Components */ +#define SMIC_GET_STATUS 0x00 /* Control form's name */ +#define SMIC_READY 0x00 /* Status form's name */ +#define SMIC_WR_START 0x01 /* Unified Control/Status names... */ +#define SMIC_WR_NEXT 0x02 +#define SMIC_WR_END 0x03 +#define SMIC_RD_START 0x04 +#define SMIC_RD_NEXT 0x05 +#define SMIC_RD_END 0x06 +#define SMIC_CODE_MASK 0x0f + +#define SMIC_CONTROL 0x00 +#define SMIC_STATUS 0x80 +#define SMIC_CS_MASK 0x80 + +#define SMIC_SMS 0x40 +#define SMIC_SMM 0x60 +#define SMIC_STREAM_MASK 0x60 + +/* SMIC Control Codes */ +#define SMIC_CC_SMS_GET_STATUS (SMIC_CONTROL|SMIC_SMS|SMIC_GET_STATUS) +#define SMIC_CC_SMS_WR_START (SMIC_CONTROL|SMIC_SMS|SMIC_WR_START) +#define SMIC_CC_SMS_WR_NEXT (SMIC_CONTROL|SMIC_SMS|SMIC_WR_NEXT) +#define SMIC_CC_SMS_WR_END (SMIC_CONTROL|SMIC_SMS|SMIC_WR_END) +#define SMIC_CC_SMS_RD_START (SMIC_CONTROL|SMIC_SMS|SMIC_RD_START) +#define SMIC_CC_SMS_RD_NEXT (SMIC_CONTROL|SMIC_SMS|SMIC_RD_NEXT) +#define SMIC_CC_SMS_RD_END (SMIC_CONTROL|SMIC_SMS|SMIC_RD_END) + +#define SMIC_CC_SMM_GET_STATUS (SMIC_CONTROL|SMIC_SMM|SMIC_GET_STATUS) +#define SMIC_CC_SMM_WR_START (SMIC_CONTROL|SMIC_SMM|SMIC_WR_START) +#define SMIC_CC_SMM_WR_NEXT (SMIC_CONTROL|SMIC_SMM|SMIC_WR_NEXT) +#define SMIC_CC_SMM_WR_END (SMIC_CONTROL|SMIC_SMM|SMIC_WR_END) +#define SMIC_CC_SMM_RD_START (SMIC_CONTROL|SMIC_SMM|SMIC_RD_START) +#define SMIC_CC_SMM_RD_NEXT (SMIC_CONTROL|SMIC_SMM|SMIC_RD_NEXT) +#define SMIC_CC_SMM_RD_END (SMIC_CONTROL|SMIC_SMM|SMIC_RD_END) + +/* SMIC Status Codes */ +#define SMIC_SC_SMS_READY (SMIC_STATUS|SMIC_SMS|SMIC_READY) +#define SMIC_SC_SMS_WR_START (SMIC_STATUS|SMIC_SMS|SMIC_WR_START) +#define SMIC_SC_SMS_WR_NEXT (SMIC_STATUS|SMIC_SMS|SMIC_WR_NEXT) +#define SMIC_SC_SMS_WR_END (SMIC_STATUS|SMIC_SMS|SMIC_WR_END) +#define SMIC_SC_SMS_RD_START (SMIC_STATUS|SMIC_SMS|SMIC_RD_START) +#define SMIC_SC_SMS_RD_NEXT (SMIC_STATUS|SMIC_SMS|SMIC_RD_NEXT) +#define SMIC_SC_SMS_RD_END (SMIC_STATUS|SMIC_SMS|SMIC_RD_END) + +#define SMIC_SC_SMM_READY (SMIC_STATUS|SMIC_SMM|SMIC_READY) +#define SMIC_SC_SMM_WR_START (SMIC_STATUS|SMIC_SMM|SMIC_WR_START) +#define SMIC_SC_SMM_WR_NEXT (SMIC_STATUS|SMIC_SMM|SMIC_WR_NEXT) +#define SMIC_SC_SMM_WR_END (SMIC_STATUS|SMIC_SMM|SMIC_WR_END) +#define SMIC_SC_SMM_RD_START (SMIC_STATUS|SMIC_SMM|SMIC_RD_START) +#define SMIC_SC_SMM_RD_NEXT (SMIC_STATUS|SMIC_SMM|SMIC_RD_NEXT) +#define SMIC_SC_SMM_RD_END (SMIC_STATUS|SMIC_SMM|SMIC_RD_END) + +/* these are the control/status codes we actually use + SMIC_CC_SMS_GET_STATUS 0x40 + SMIC_CC_SMS_WR_START 0x41 + SMIC_CC_SMS_WR_NEXT 0x42 + SMIC_CC_SMS_WR_END 0x43 + SMIC_CC_SMS_RD_START 0x44 + SMIC_CC_SMS_RD_NEXT 0x45 + SMIC_CC_SMS_RD_END 0x46 + + SMIC_SC_SMS_READY 0xC0 + SMIC_SC_SMS_WR_START 0xC1 + SMIC_SC_SMS_WR_NEXT 0xC2 + SMIC_SC_SMS_WR_END 0xC3 + SMIC_SC_SMS_RD_START 0xC4 + SMIC_SC_SMS_RD_NEXT 0xC5 + SMIC_SC_SMS_RD_END 0xC6 +*/ + +static enum si_sm_result smic_event(struct si_sm_data *smic, long time) +{ + unsigned char status; + unsigned char flags; + unsigned char data; + + if (smic->state == SMIC_HOSED) { + init_smic_data(smic, smic->io); + return SI_SM_HOSED; + } + if (smic->state != SMIC_IDLE) { + if (smic_debug & SMIC_DEBUG_STATES) + printk(KERN_DEBUG + "smic_event - smic->smic_timeout = %ld," + " time = %ld\n", + smic->smic_timeout, time); + /* + * FIXME: smic_event is sometimes called with time > + * SMIC_RETRY_TIMEOUT + */ + if (time < SMIC_RETRY_TIMEOUT) { + smic->smic_timeout -= time; + if (smic->smic_timeout < 0) { + start_error_recovery(smic, "smic timed out."); + return SI_SM_CALL_WITH_DELAY; + } + } + } + flags = read_smic_flags(smic); + if (flags & SMIC_FLAG_BSY) + return SI_SM_CALL_WITH_DELAY; + + status = read_smic_status(smic); + if (smic_debug & SMIC_DEBUG_STATES) + printk(KERN_DEBUG + "smic_event - state = %d, flags = 0x%02x," + " status = 0x%02x\n", + smic->state, flags, status); + + switch (smic->state) { + case SMIC_IDLE: + /* in IDLE we check for available messages */ + if (flags & SMIC_SMS_DATA_AVAIL) + return SI_SM_ATTN; + return SI_SM_IDLE; + + case SMIC_START_OP: + /* sanity check whether smic is really idle */ + write_smic_control(smic, SMIC_CC_SMS_GET_STATUS); + write_smic_flags(smic, flags | SMIC_FLAG_BSY); + smic->state = SMIC_OP_OK; + break; + + case SMIC_OP_OK: + if (status != SMIC_SC_SMS_READY) { + /* this should not happen */ + start_error_recovery(smic, + "state = SMIC_OP_OK," + " status != SMIC_SC_SMS_READY"); + return SI_SM_CALL_WITH_DELAY; + } + /* OK so far; smic is idle let us start ... */ + write_smic_control(smic, SMIC_CC_SMS_WR_START); + write_next_byte(smic); + write_smic_flags(smic, flags | SMIC_FLAG_BSY); + smic->state = SMIC_WRITE_START; + break; + + case SMIC_WRITE_START: + if (status != SMIC_SC_SMS_WR_START) { + start_error_recovery(smic, + "state = SMIC_WRITE_START, " + "status != SMIC_SC_SMS_WR_START"); + return SI_SM_CALL_WITH_DELAY; + } + /* + * we must not issue WR_(NEXT|END) unless + * TX_DATA_READY is set + * */ + if (flags & SMIC_TX_DATA_READY) { + if (smic->write_count == 1) { + /* last byte */ + write_smic_control(smic, SMIC_CC_SMS_WR_END); + smic->state = SMIC_WRITE_END; + } else { + write_smic_control(smic, SMIC_CC_SMS_WR_NEXT); + smic->state = SMIC_WRITE_NEXT; + } + write_next_byte(smic); + write_smic_flags(smic, flags | SMIC_FLAG_BSY); + } else + return SI_SM_CALL_WITH_DELAY; + break; + + case SMIC_WRITE_NEXT: + if (status != SMIC_SC_SMS_WR_NEXT) { + start_error_recovery(smic, + "state = SMIC_WRITE_NEXT, " + "status != SMIC_SC_SMS_WR_NEXT"); + return SI_SM_CALL_WITH_DELAY; + } + /* this is the same code as in SMIC_WRITE_START */ + if (flags & SMIC_TX_DATA_READY) { + if (smic->write_count == 1) { + write_smic_control(smic, SMIC_CC_SMS_WR_END); + smic->state = SMIC_WRITE_END; + } else { + write_smic_control(smic, SMIC_CC_SMS_WR_NEXT); + smic->state = SMIC_WRITE_NEXT; + } + write_next_byte(smic); + write_smic_flags(smic, flags | SMIC_FLAG_BSY); + } else + return SI_SM_CALL_WITH_DELAY; + break; + + case SMIC_WRITE_END: + if (status != SMIC_SC_SMS_WR_END) { + start_error_recovery(smic, + "state = SMIC_WRITE_END, " + "status != SMIC_SC_SMS_WR_END"); + return SI_SM_CALL_WITH_DELAY; + } + /* data register holds an error code */ + data = read_smic_data(smic); + if (data != 0) { + if (smic_debug & SMIC_DEBUG_ENABLE) + printk(KERN_DEBUG + "SMIC_WRITE_END: data = %02x\n", data); + start_error_recovery(smic, + "state = SMIC_WRITE_END, " + "data != SUCCESS"); + return SI_SM_CALL_WITH_DELAY; + } else + smic->state = SMIC_WRITE2READ; + break; + + case SMIC_WRITE2READ: + /* + * we must wait for RX_DATA_READY to be set before we + * can continue + */ + if (flags & SMIC_RX_DATA_READY) { + write_smic_control(smic, SMIC_CC_SMS_RD_START); + write_smic_flags(smic, flags | SMIC_FLAG_BSY); + smic->state = SMIC_READ_START; + } else + return SI_SM_CALL_WITH_DELAY; + break; + + case SMIC_READ_START: + if (status != SMIC_SC_SMS_RD_START) { + start_error_recovery(smic, + "state = SMIC_READ_START, " + "status != SMIC_SC_SMS_RD_START"); + return SI_SM_CALL_WITH_DELAY; + } + if (flags & SMIC_RX_DATA_READY) { + read_next_byte(smic); + write_smic_control(smic, SMIC_CC_SMS_RD_NEXT); + write_smic_flags(smic, flags | SMIC_FLAG_BSY); + smic->state = SMIC_READ_NEXT; + } else + return SI_SM_CALL_WITH_DELAY; + break; + + case SMIC_READ_NEXT: + switch (status) { + /* + * smic tells us that this is the last byte to be read + * --> clean up + */ + case SMIC_SC_SMS_RD_END: + read_next_byte(smic); + write_smic_control(smic, SMIC_CC_SMS_RD_END); + write_smic_flags(smic, flags | SMIC_FLAG_BSY); + smic->state = SMIC_READ_END; + break; + case SMIC_SC_SMS_RD_NEXT: + if (flags & SMIC_RX_DATA_READY) { + read_next_byte(smic); + write_smic_control(smic, SMIC_CC_SMS_RD_NEXT); + write_smic_flags(smic, flags | SMIC_FLAG_BSY); + smic->state = SMIC_READ_NEXT; + } else + return SI_SM_CALL_WITH_DELAY; + break; + default: + start_error_recovery( + smic, + "state = SMIC_READ_NEXT, " + "status != SMIC_SC_SMS_RD_(NEXT|END)"); + return SI_SM_CALL_WITH_DELAY; + } + break; + + case SMIC_READ_END: + if (status != SMIC_SC_SMS_READY) { + start_error_recovery(smic, + "state = SMIC_READ_END, " + "status != SMIC_SC_SMS_READY"); + return SI_SM_CALL_WITH_DELAY; + } + data = read_smic_data(smic); + /* data register holds an error code */ + if (data != 0) { + if (smic_debug & SMIC_DEBUG_ENABLE) + printk(KERN_DEBUG + "SMIC_READ_END: data = %02x\n", data); + start_error_recovery(smic, + "state = SMIC_READ_END, " + "data != SUCCESS"); + return SI_SM_CALL_WITH_DELAY; + } else { + smic->state = SMIC_IDLE; + return SI_SM_TRANSACTION_COMPLETE; + } + + case SMIC_HOSED: + init_smic_data(smic, smic->io); + return SI_SM_HOSED; + + default: + if (smic_debug & SMIC_DEBUG_ENABLE) { + printk(KERN_DEBUG "smic->state = %d\n", smic->state); + start_error_recovery(smic, "state = UNKNOWN"); + return SI_SM_CALL_WITH_DELAY; + } + } + smic->smic_timeout = SMIC_RETRY_TIMEOUT; + return SI_SM_CALL_WITHOUT_DELAY; +} + +static int smic_detect(struct si_sm_data *smic) +{ + /* + * It's impossible for the SMIC fnags register to be all 1's, + * (assuming a properly functioning, self-initialized BMC) + * but that's what you get from reading a bogus address, so we + * test that first. + */ + if (read_smic_flags(smic) == 0xff) + return 1; + + return 0; +} + +static void smic_cleanup(struct si_sm_data *kcs) +{ +} + +static int smic_size(void) +{ + return sizeof(struct si_sm_data); +} + +struct si_sm_handlers smic_smi_handlers = { + .init_data = init_smic_data, + .start_transaction = start_smic_transaction, + .get_result = smic_get_result, + .event = smic_event, + .detect = smic_detect, + .cleanup = smic_cleanup, + .size = smic_size, +}; diff --git a/kernel/drivers/char/ipmi/ipmi_ssif.c b/kernel/drivers/char/ipmi/ipmi_ssif.c new file mode 100644 index 000000000..207689c44 --- /dev/null +++ b/kernel/drivers/char/ipmi/ipmi_ssif.c @@ -0,0 +1,2017 @@ +/* + * ipmi_ssif.c + * + * The interface to the IPMI driver for SMBus access to a SMBus + * compliant device. Called SSIF by the IPMI spec. + * + * Author: Intel Corporation + * Todd Davis + * + * Rewritten by Corey Minyard to support the + * non-blocking I2C interface, add support for multi-part + * transactions, add PEC support, and general clenaup. + * + * Copyright 2003 Intel Corporation + * Copyright 2005 MontaVista Software + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +/* + * This file holds the "policy" for the interface to the SSIF state + * machine. It does the configuration, handles timers and interrupts, + * and drives the real SSIF state machine. + */ + +/* + * TODO: Figure out how to use SMB alerts. This will require a new + * interface into the I2C driver, I believe. + */ + +#if defined(MODVERSIONS) +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PFX "ipmi_ssif: " +#define DEVICE_NAME "ipmi_ssif" + +#define IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD 0x57 + +#define SSIF_IPMI_REQUEST 2 +#define SSIF_IPMI_MULTI_PART_REQUEST_START 6 +#define SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE 7 +#define SSIF_IPMI_RESPONSE 3 +#define SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE 9 + +/* ssif_debug is a bit-field + * SSIF_DEBUG_MSG - commands and their responses + * SSIF_DEBUG_STATES - message states + * SSIF_DEBUG_TIMING - Measure times between events in the driver + */ +#define SSIF_DEBUG_TIMING 4 +#define SSIF_DEBUG_STATE 2 +#define SSIF_DEBUG_MSG 1 +#define SSIF_NODEBUG 0 +#define SSIF_DEFAULT_DEBUG (SSIF_NODEBUG) + +/* + * Timer values + */ +#define SSIF_MSG_USEC 20000 /* 20ms between message tries. */ +#define SSIF_MSG_PART_USEC 5000 /* 5ms for a message part */ + +/* How many times to we retry sending/receiving the message. */ +#define SSIF_SEND_RETRIES 5 +#define SSIF_RECV_RETRIES 250 + +#define SSIF_MSG_MSEC (SSIF_MSG_USEC / 1000) +#define SSIF_MSG_JIFFIES ((SSIF_MSG_USEC * 1000) / TICK_NSEC) +#define SSIF_MSG_PART_JIFFIES ((SSIF_MSG_PART_USEC * 1000) / TICK_NSEC) + +enum ssif_intf_state { + SSIF_NORMAL, + SSIF_GETTING_FLAGS, + SSIF_GETTING_EVENTS, + SSIF_CLEARING_FLAGS, + SSIF_GETTING_MESSAGES, + /* FIXME - add watchdog stuff. */ +}; + +#define SSIF_IDLE(ssif) ((ssif)->ssif_state == SSIF_NORMAL \ + && (ssif)->curr_msg == NULL) + +/* + * Indexes into stats[] in ssif_info below. + */ +enum ssif_stat_indexes { + /* Number of total messages sent. */ + SSIF_STAT_sent_messages = 0, + + /* + * Number of message parts sent. Messages may be broken into + * parts if they are long. + */ + SSIF_STAT_sent_messages_parts, + + /* + * Number of time a message was retried. + */ + SSIF_STAT_send_retries, + + /* + * Number of times the send of a message failed. + */ + SSIF_STAT_send_errors, + + /* + * Number of message responses received. + */ + SSIF_STAT_received_messages, + + /* + * Number of message fragments received. + */ + SSIF_STAT_received_message_parts, + + /* + * Number of times the receive of a message was retried. + */ + SSIF_STAT_receive_retries, + + /* + * Number of errors receiving messages. + */ + SSIF_STAT_receive_errors, + + /* + * Number of times a flag fetch was requested. + */ + SSIF_STAT_flag_fetches, + + /* + * Number of times the hardware didn't follow the state machine. + */ + SSIF_STAT_hosed, + + /* + * Number of received events. + */ + SSIF_STAT_events, + + /* Number of asyncronous messages received. */ + SSIF_STAT_incoming_messages, + + /* Number of watchdog pretimeouts. */ + SSIF_STAT_watchdog_pretimeouts, + + /* Number of alers received. */ + SSIF_STAT_alerts, + + /* Always add statistics before this value, it must be last. */ + SSIF_NUM_STATS +}; + +struct ssif_addr_info { + unsigned short addr; + struct i2c_board_info binfo; + char *adapter_name; + int debug; + int slave_addr; + enum ipmi_addr_src addr_src; + union ipmi_smi_info_union addr_info; + + struct mutex clients_mutex; + struct list_head clients; + + struct list_head link; +}; + +struct ssif_info; + +typedef void (*ssif_i2c_done)(struct ssif_info *ssif_info, int result, + unsigned char *data, unsigned int len); + +struct ssif_info { + ipmi_smi_t intf; + int intf_num; + spinlock_t lock; + struct ipmi_smi_msg *waiting_msg; + struct ipmi_smi_msg *curr_msg; + enum ssif_intf_state ssif_state; + unsigned long ssif_debug; + + struct ipmi_smi_handlers handlers; + + enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */ + union ipmi_smi_info_union addr_info; + + /* + * Flags from the last GET_MSG_FLAGS command, used when an ATTN + * is set to hold the flags until we are done handling everything + * from the flags. + */ +#define RECEIVE_MSG_AVAIL 0x01 +#define EVENT_MSG_BUFFER_FULL 0x02 +#define WDT_PRE_TIMEOUT_INT 0x08 + unsigned char msg_flags; + + u8 global_enables; + bool has_event_buffer; + bool supports_alert; + + /* + * Used to tell what we should do with alerts. If we are + * waiting on a response, read the data immediately. + */ + bool got_alert; + bool waiting_alert; + + /* + * If set to true, this will request events the next time the + * state machine is idle. + */ + bool req_events; + + /* + * If set to true, this will request flags the next time the + * state machine is idle. + */ + bool req_flags; + + /* + * Used to perform timer operations when run-to-completion + * mode is on. This is a countdown timer. + */ + int rtc_us_timer; + + /* Used for sending/receiving data. +1 for the length. */ + unsigned char data[IPMI_MAX_MSG_LENGTH + 1]; + unsigned int data_len; + + /* Temp receive buffer, gets copied into data. */ + unsigned char recv[I2C_SMBUS_BLOCK_MAX]; + + struct i2c_client *client; + ssif_i2c_done done_handler; + + /* Thread interface handling */ + struct task_struct *thread; + struct completion wake_thread; + bool stopping; + int i2c_read_write; + int i2c_command; + unsigned char *i2c_data; + unsigned int i2c_size; + + /* From the device id response. */ + struct ipmi_device_id device_id; + + struct timer_list retry_timer; + int retries_left; + + /* Info from SSIF cmd */ + unsigned char max_xmit_msg_size; + unsigned char max_recv_msg_size; + unsigned int multi_support; + int supports_pec; + +#define SSIF_NO_MULTI 0 +#define SSIF_MULTI_2_PART 1 +#define SSIF_MULTI_n_PART 2 + unsigned char *multi_data; + unsigned int multi_len; + unsigned int multi_pos; + + atomic_t stats[SSIF_NUM_STATS]; +}; + +#define ssif_inc_stat(ssif, stat) \ + atomic_inc(&(ssif)->stats[SSIF_STAT_ ## stat]) +#define ssif_get_stat(ssif, stat) \ + ((unsigned int) atomic_read(&(ssif)->stats[SSIF_STAT_ ## stat])) + +static bool initialized; + +static atomic_t next_intf = ATOMIC_INIT(0); + +static void return_hosed_msg(struct ssif_info *ssif_info, + struct ipmi_smi_msg *msg); +static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags); +static int start_send(struct ssif_info *ssif_info, + unsigned char *data, + unsigned int len); + +static unsigned long *ipmi_ssif_lock_cond(struct ssif_info *ssif_info, + unsigned long *flags) +{ + spin_lock_irqsave(&ssif_info->lock, *flags); + return flags; +} + +static void ipmi_ssif_unlock_cond(struct ssif_info *ssif_info, + unsigned long *flags) +{ + spin_unlock_irqrestore(&ssif_info->lock, *flags); +} + +static void deliver_recv_msg(struct ssif_info *ssif_info, + struct ipmi_smi_msg *msg) +{ + ipmi_smi_t intf = ssif_info->intf; + + if (!intf) { + ipmi_free_smi_msg(msg); + } else if (msg->rsp_size < 0) { + return_hosed_msg(ssif_info, msg); + pr_err(PFX + "Malformed message in deliver_recv_msg: rsp_size = %d\n", + msg->rsp_size); + } else { + ipmi_smi_msg_received(intf, msg); + } +} + +static void return_hosed_msg(struct ssif_info *ssif_info, + struct ipmi_smi_msg *msg) +{ + ssif_inc_stat(ssif_info, hosed); + + /* Make it a response */ + msg->rsp[0] = msg->data[0] | 4; + msg->rsp[1] = msg->data[1]; + msg->rsp[2] = 0xFF; /* Unknown error. */ + msg->rsp_size = 3; + + deliver_recv_msg(ssif_info, msg); +} + +/* + * Must be called with the message lock held. This will release the + * message lock. Note that the caller will check SSIF_IDLE and start a + * new operation, so there is no need to check for new messages to + * start in here. + */ +static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags) +{ + unsigned char msg[3]; + + ssif_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; + ssif_info->ssif_state = SSIF_CLEARING_FLAGS; + ipmi_ssif_unlock_cond(ssif_info, flags); + + /* Make sure the watchdog pre-timeout flag is not set at startup. */ + msg[0] = (IPMI_NETFN_APP_REQUEST << 2); + msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; + msg[2] = WDT_PRE_TIMEOUT_INT; + + if (start_send(ssif_info, msg, 3) != 0) { + /* Error, just go to normal state. */ + ssif_info->ssif_state = SSIF_NORMAL; + } +} + +static void start_flag_fetch(struct ssif_info *ssif_info, unsigned long *flags) +{ + unsigned char mb[2]; + + ssif_info->req_flags = false; + ssif_info->ssif_state = SSIF_GETTING_FLAGS; + ipmi_ssif_unlock_cond(ssif_info, flags); + + mb[0] = (IPMI_NETFN_APP_REQUEST << 2); + mb[1] = IPMI_GET_MSG_FLAGS_CMD; + if (start_send(ssif_info, mb, 2) != 0) + ssif_info->ssif_state = SSIF_NORMAL; +} + +static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags, + struct ipmi_smi_msg *msg) +{ + if (start_send(ssif_info, msg->data, msg->data_size) != 0) { + unsigned long oflags; + + flags = ipmi_ssif_lock_cond(ssif_info, &oflags); + ssif_info->curr_msg = NULL; + ssif_info->ssif_state = SSIF_NORMAL; + ipmi_ssif_unlock_cond(ssif_info, flags); + ipmi_free_smi_msg(msg); + } +} + +static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags) +{ + struct ipmi_smi_msg *msg; + + ssif_info->req_events = false; + + msg = ipmi_alloc_smi_msg(); + if (!msg) { + ssif_info->ssif_state = SSIF_NORMAL; + return; + } + + ssif_info->curr_msg = msg; + ssif_info->ssif_state = SSIF_GETTING_EVENTS; + ipmi_ssif_unlock_cond(ssif_info, flags); + + msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); + msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; + msg->data_size = 2; + + check_start_send(ssif_info, flags, msg); +} + +static void start_recv_msg_fetch(struct ssif_info *ssif_info, + unsigned long *flags) +{ + struct ipmi_smi_msg *msg; + + msg = ipmi_alloc_smi_msg(); + if (!msg) { + ssif_info->ssif_state = SSIF_NORMAL; + return; + } + + ssif_info->curr_msg = msg; + ssif_info->ssif_state = SSIF_GETTING_MESSAGES; + ipmi_ssif_unlock_cond(ssif_info, flags); + + msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); + msg->data[1] = IPMI_GET_MSG_CMD; + msg->data_size = 2; + + check_start_send(ssif_info, flags, msg); +} + +/* + * Must be called with the message lock held. This will release the + * message lock. Note that the caller will check SSIF_IDLE and start a + * new operation, so there is no need to check for new messages to + * start in here. + */ +static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags) +{ + if (ssif_info->msg_flags & WDT_PRE_TIMEOUT_INT) { + ipmi_smi_t intf = ssif_info->intf; + /* Watchdog pre-timeout */ + ssif_inc_stat(ssif_info, watchdog_pretimeouts); + start_clear_flags(ssif_info, flags); + if (intf) + ipmi_smi_watchdog_pretimeout(intf); + } else if (ssif_info->msg_flags & RECEIVE_MSG_AVAIL) + /* Messages available. */ + start_recv_msg_fetch(ssif_info, flags); + else if (ssif_info->msg_flags & EVENT_MSG_BUFFER_FULL) + /* Events available. */ + start_event_fetch(ssif_info, flags); + else { + ssif_info->ssif_state = SSIF_NORMAL; + ipmi_ssif_unlock_cond(ssif_info, flags); + } +} + +static int ipmi_ssif_thread(void *data) +{ + struct ssif_info *ssif_info = data; + + while (!kthread_should_stop()) { + int result; + + /* Wait for something to do */ + result = wait_for_completion_interruptible( + &ssif_info->wake_thread); + if (ssif_info->stopping) + break; + if (result == -ERESTARTSYS) + continue; + init_completion(&ssif_info->wake_thread); + + if (ssif_info->i2c_read_write == I2C_SMBUS_WRITE) { + result = i2c_smbus_write_block_data( + ssif_info->client, ssif_info->i2c_command, + ssif_info->i2c_data[0], + ssif_info->i2c_data + 1); + ssif_info->done_handler(ssif_info, result, NULL, 0); + } else { + result = i2c_smbus_read_block_data( + ssif_info->client, ssif_info->i2c_command, + ssif_info->i2c_data); + if (result < 0) + ssif_info->done_handler(ssif_info, result, + NULL, 0); + else + ssif_info->done_handler(ssif_info, 0, + ssif_info->i2c_data, + result); + } + } + + return 0; +} + +static int ssif_i2c_send(struct ssif_info *ssif_info, + ssif_i2c_done handler, + int read_write, int command, + unsigned char *data, unsigned int size) +{ + ssif_info->done_handler = handler; + + ssif_info->i2c_read_write = read_write; + ssif_info->i2c_command = command; + ssif_info->i2c_data = data; + ssif_info->i2c_size = size; + complete(&ssif_info->wake_thread); + return 0; +} + + +static void msg_done_handler(struct ssif_info *ssif_info, int result, + unsigned char *data, unsigned int len); + +static void start_get(struct ssif_info *ssif_info) +{ + int rv; + + ssif_info->rtc_us_timer = 0; + ssif_info->multi_pos = 0; + + rv = ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ, + SSIF_IPMI_RESPONSE, + ssif_info->recv, I2C_SMBUS_BLOCK_DATA); + if (rv < 0) { + /* request failed, just return the error. */ + if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) + pr_info("Error from i2c_non_blocking_op(5)\n"); + + msg_done_handler(ssif_info, -EIO, NULL, 0); + } +} + +static void retry_timeout(unsigned long data) +{ + struct ssif_info *ssif_info = (void *) data; + unsigned long oflags, *flags; + bool waiting; + + if (ssif_info->stopping) + return; + + flags = ipmi_ssif_lock_cond(ssif_info, &oflags); + waiting = ssif_info->waiting_alert; + ssif_info->waiting_alert = false; + ipmi_ssif_unlock_cond(ssif_info, flags); + + if (waiting) + start_get(ssif_info); +} + + +static void ssif_alert(struct i2c_client *client, unsigned int data) +{ + struct ssif_info *ssif_info = i2c_get_clientdata(client); + unsigned long oflags, *flags; + bool do_get = false; + + ssif_inc_stat(ssif_info, alerts); + + flags = ipmi_ssif_lock_cond(ssif_info, &oflags); + if (ssif_info->waiting_alert) { + ssif_info->waiting_alert = false; + del_timer(&ssif_info->retry_timer); + do_get = true; + } else if (ssif_info->curr_msg) { + ssif_info->got_alert = true; + } + ipmi_ssif_unlock_cond(ssif_info, flags); + if (do_get) + start_get(ssif_info); +} + +static int start_resend(struct ssif_info *ssif_info); + +static void msg_done_handler(struct ssif_info *ssif_info, int result, + unsigned char *data, unsigned int len) +{ + struct ipmi_smi_msg *msg; + unsigned long oflags, *flags; + int rv; + + /* + * We are single-threaded here, so no need for a lock until we + * start messing with driver states or the queues. + */ + + if (result < 0) { + ssif_info->retries_left--; + if (ssif_info->retries_left > 0) { + ssif_inc_stat(ssif_info, receive_retries); + + flags = ipmi_ssif_lock_cond(ssif_info, &oflags); + ssif_info->waiting_alert = true; + ssif_info->rtc_us_timer = SSIF_MSG_USEC; + mod_timer(&ssif_info->retry_timer, + jiffies + SSIF_MSG_JIFFIES); + ipmi_ssif_unlock_cond(ssif_info, flags); + return; + } + + ssif_inc_stat(ssif_info, receive_errors); + + if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) + pr_info("Error in msg_done_handler: %d\n", result); + len = 0; + goto continue_op; + } + + if ((len > 1) && (ssif_info->multi_pos == 0) + && (data[0] == 0x00) && (data[1] == 0x01)) { + /* Start of multi-part read. Start the next transaction. */ + int i; + + ssif_inc_stat(ssif_info, received_message_parts); + + /* Remove the multi-part read marker. */ + len -= 2; + for (i = 0; i < len; i++) + ssif_info->data[i] = data[i+2]; + ssif_info->multi_len = len; + ssif_info->multi_pos = 1; + + rv = ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ, + SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE, + ssif_info->recv, I2C_SMBUS_BLOCK_DATA); + if (rv < 0) { + if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) + pr_info("Error from i2c_non_blocking_op(1)\n"); + + result = -EIO; + } else + return; + } else if (ssif_info->multi_pos) { + /* Middle of multi-part read. Start the next transaction. */ + int i; + unsigned char blocknum; + + if (len == 0) { + result = -EIO; + if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) + pr_info(PFX "Middle message with no data\n"); + + goto continue_op; + } + + blocknum = data[0]; + + if (ssif_info->multi_len + len - 1 > IPMI_MAX_MSG_LENGTH) { + /* Received message too big, abort the operation. */ + result = -E2BIG; + if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) + pr_info("Received message too big\n"); + + goto continue_op; + } + + /* Remove the blocknum from the data. */ + len--; + for (i = 0; i < len; i++) + ssif_info->data[i + ssif_info->multi_len] = data[i + 1]; + ssif_info->multi_len += len; + if (blocknum == 0xff) { + /* End of read */ + len = ssif_info->multi_len; + data = ssif_info->data; + } else if (blocknum + 1 != ssif_info->multi_pos) { + /* + * Out of sequence block, just abort. Block + * numbers start at zero for the second block, + * but multi_pos starts at one, so the +1. + */ + result = -EIO; + } else { + ssif_inc_stat(ssif_info, received_message_parts); + + ssif_info->multi_pos++; + + rv = ssif_i2c_send(ssif_info, msg_done_handler, + I2C_SMBUS_READ, + SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE, + ssif_info->recv, + I2C_SMBUS_BLOCK_DATA); + if (rv < 0) { + if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) + pr_info(PFX + "Error from ssif_i2c_send\n"); + + result = -EIO; + } else + return; + } + } + + if (result < 0) { + ssif_inc_stat(ssif_info, receive_errors); + } else { + ssif_inc_stat(ssif_info, received_messages); + ssif_inc_stat(ssif_info, received_message_parts); + } + + + continue_op: + if (ssif_info->ssif_debug & SSIF_DEBUG_STATE) + pr_info(PFX "DONE 1: state = %d, result=%d.\n", + ssif_info->ssif_state, result); + + flags = ipmi_ssif_lock_cond(ssif_info, &oflags); + msg = ssif_info->curr_msg; + if (msg) { + msg->rsp_size = len; + if (msg->rsp_size > IPMI_MAX_MSG_LENGTH) + msg->rsp_size = IPMI_MAX_MSG_LENGTH; + memcpy(msg->rsp, data, msg->rsp_size); + ssif_info->curr_msg = NULL; + } + + switch (ssif_info->ssif_state) { + case SSIF_NORMAL: + ipmi_ssif_unlock_cond(ssif_info, flags); + if (!msg) + break; + + if (result < 0) + return_hosed_msg(ssif_info, msg); + else + deliver_recv_msg(ssif_info, msg); + break; + + case SSIF_GETTING_FLAGS: + /* We got the flags from the SSIF, now handle them. */ + if ((result < 0) || (len < 4) || (data[2] != 0)) { + /* + * Error fetching flags, or invalid length, + * just give up for now. + */ + ssif_info->ssif_state = SSIF_NORMAL; + ipmi_ssif_unlock_cond(ssif_info, flags); + pr_warn(PFX "Error getting flags: %d %d, %x\n", + result, len, data[2]); + } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 + || data[1] != IPMI_GET_MSG_FLAGS_CMD) { + pr_warn(PFX "Invalid response getting flags: %x %x\n", + data[0], data[1]); + } else { + ssif_inc_stat(ssif_info, flag_fetches); + ssif_info->msg_flags = data[3]; + handle_flags(ssif_info, flags); + } + break; + + case SSIF_CLEARING_FLAGS: + /* We cleared the flags. */ + if ((result < 0) || (len < 3) || (data[2] != 0)) { + /* Error clearing flags */ + pr_warn(PFX "Error clearing flags: %d %d, %x\n", + result, len, data[2]); + } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 + || data[1] != IPMI_CLEAR_MSG_FLAGS_CMD) { + pr_warn(PFX "Invalid response clearing flags: %x %x\n", + data[0], data[1]); + } + ssif_info->ssif_state = SSIF_NORMAL; + ipmi_ssif_unlock_cond(ssif_info, flags); + break; + + case SSIF_GETTING_EVENTS: + if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) { + /* Error getting event, probably done. */ + msg->done(msg); + + /* Take off the event flag. */ + ssif_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL; + handle_flags(ssif_info, flags); + } else if (msg->rsp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 + || msg->rsp[1] != IPMI_READ_EVENT_MSG_BUFFER_CMD) { + pr_warn(PFX "Invalid response getting events: %x %x\n", + msg->rsp[0], msg->rsp[1]); + msg->done(msg); + /* Take off the event flag. */ + ssif_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL; + handle_flags(ssif_info, flags); + } else { + handle_flags(ssif_info, flags); + ssif_inc_stat(ssif_info, events); + deliver_recv_msg(ssif_info, msg); + } + break; + + case SSIF_GETTING_MESSAGES: + if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) { + /* Error getting event, probably done. */ + msg->done(msg); + + /* Take off the msg flag. */ + ssif_info->msg_flags &= ~RECEIVE_MSG_AVAIL; + handle_flags(ssif_info, flags); + } else if (msg->rsp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 + || msg->rsp[1] != IPMI_GET_MSG_CMD) { + pr_warn(PFX "Invalid response clearing flags: %x %x\n", + msg->rsp[0], msg->rsp[1]); + msg->done(msg); + + /* Take off the msg flag. */ + ssif_info->msg_flags &= ~RECEIVE_MSG_AVAIL; + handle_flags(ssif_info, flags); + } else { + ssif_inc_stat(ssif_info, incoming_messages); + handle_flags(ssif_info, flags); + deliver_recv_msg(ssif_info, msg); + } + break; + } + + flags = ipmi_ssif_lock_cond(ssif_info, &oflags); + if (SSIF_IDLE(ssif_info) && !ssif_info->stopping) { + if (ssif_info->req_events) + start_event_fetch(ssif_info, flags); + else if (ssif_info->req_flags) + start_flag_fetch(ssif_info, flags); + else + start_next_msg(ssif_info, flags); + } else + ipmi_ssif_unlock_cond(ssif_info, flags); + + if (ssif_info->ssif_debug & SSIF_DEBUG_STATE) + pr_info(PFX "DONE 2: state = %d.\n", ssif_info->ssif_state); +} + +static void msg_written_handler(struct ssif_info *ssif_info, int result, + unsigned char *data, unsigned int len) +{ + int rv; + + /* We are single-threaded here, so no need for a lock. */ + if (result < 0) { + ssif_info->retries_left--; + if (ssif_info->retries_left > 0) { + if (!start_resend(ssif_info)) { + ssif_inc_stat(ssif_info, send_retries); + return; + } + /* request failed, just return the error. */ + ssif_inc_stat(ssif_info, send_errors); + + if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) + pr_info(PFX + "Out of retries in msg_written_handler\n"); + msg_done_handler(ssif_info, -EIO, NULL, 0); + return; + } + + ssif_inc_stat(ssif_info, send_errors); + + /* + * Got an error on transmit, let the done routine + * handle it. + */ + if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) + pr_info("Error in msg_written_handler: %d\n", result); + + msg_done_handler(ssif_info, result, NULL, 0); + return; + } + + if (ssif_info->multi_data) { + /* + * In the middle of a multi-data write. See the comment + * in the SSIF_MULTI_n_PART case in the probe function + * for details on the intricacies of this. + */ + int left; + + ssif_inc_stat(ssif_info, sent_messages_parts); + + left = ssif_info->multi_len - ssif_info->multi_pos; + if (left > 32) + left = 32; + /* Length byte. */ + ssif_info->multi_data[ssif_info->multi_pos] = left; + ssif_info->multi_pos += left; + if (left < 32) + /* + * Write is finished. Note that we must end + * with a write of less than 32 bytes to + * complete the transaction, even if it is + * zero bytes. + */ + ssif_info->multi_data = NULL; + + rv = ssif_i2c_send(ssif_info, msg_written_handler, + I2C_SMBUS_WRITE, + SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE, + ssif_info->multi_data + ssif_info->multi_pos, + I2C_SMBUS_BLOCK_DATA); + if (rv < 0) { + /* request failed, just return the error. */ + ssif_inc_stat(ssif_info, send_errors); + + if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) + pr_info("Error from i2c_non_blocking_op(3)\n"); + msg_done_handler(ssif_info, -EIO, NULL, 0); + } + } else { + unsigned long oflags, *flags; + bool got_alert; + + ssif_inc_stat(ssif_info, sent_messages); + ssif_inc_stat(ssif_info, sent_messages_parts); + + flags = ipmi_ssif_lock_cond(ssif_info, &oflags); + got_alert = ssif_info->got_alert; + if (got_alert) { + ssif_info->got_alert = false; + ssif_info->waiting_alert = false; + } + + if (got_alert) { + ipmi_ssif_unlock_cond(ssif_info, flags); + /* The alert already happened, try now. */ + retry_timeout((unsigned long) ssif_info); + } else { + /* Wait a jiffie then request the next message */ + ssif_info->waiting_alert = true; + ssif_info->retries_left = SSIF_RECV_RETRIES; + ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC; + mod_timer(&ssif_info->retry_timer, + jiffies + SSIF_MSG_PART_JIFFIES); + ipmi_ssif_unlock_cond(ssif_info, flags); + } + } +} + +static int start_resend(struct ssif_info *ssif_info) +{ + int rv; + int command; + + ssif_info->got_alert = false; + + if (ssif_info->data_len > 32) { + command = SSIF_IPMI_MULTI_PART_REQUEST_START; + ssif_info->multi_data = ssif_info->data; + ssif_info->multi_len = ssif_info->data_len; + /* + * Subtle thing, this is 32, not 33, because we will + * overwrite the thing at position 32 (which was just + * transmitted) with the new length. + */ + ssif_info->multi_pos = 32; + ssif_info->data[0] = 32; + } else { + ssif_info->multi_data = NULL; + command = SSIF_IPMI_REQUEST; + ssif_info->data[0] = ssif_info->data_len; + } + + rv = ssif_i2c_send(ssif_info, msg_written_handler, I2C_SMBUS_WRITE, + command, ssif_info->data, I2C_SMBUS_BLOCK_DATA); + if (rv && (ssif_info->ssif_debug & SSIF_DEBUG_MSG)) + pr_info("Error from i2c_non_blocking_op(4)\n"); + return rv; +} + +static int start_send(struct ssif_info *ssif_info, + unsigned char *data, + unsigned int len) +{ + if (len > IPMI_MAX_MSG_LENGTH) + return -E2BIG; + if (len > ssif_info->max_xmit_msg_size) + return -E2BIG; + + ssif_info->retries_left = SSIF_SEND_RETRIES; + memcpy(ssif_info->data + 1, data, len); + ssif_info->data_len = len; + return start_resend(ssif_info); +} + +/* Must be called with the message lock held. */ +static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags) +{ + struct ipmi_smi_msg *msg; + unsigned long oflags; + + restart: + if (!SSIF_IDLE(ssif_info)) { + ipmi_ssif_unlock_cond(ssif_info, flags); + return; + } + + if (!ssif_info->waiting_msg) { + ssif_info->curr_msg = NULL; + ipmi_ssif_unlock_cond(ssif_info, flags); + } else { + int rv; + + ssif_info->curr_msg = ssif_info->waiting_msg; + ssif_info->waiting_msg = NULL; + ipmi_ssif_unlock_cond(ssif_info, flags); + rv = start_send(ssif_info, + ssif_info->curr_msg->data, + ssif_info->curr_msg->data_size); + if (rv) { + msg = ssif_info->curr_msg; + ssif_info->curr_msg = NULL; + return_hosed_msg(ssif_info, msg); + flags = ipmi_ssif_lock_cond(ssif_info, &oflags); + goto restart; + } + } +} + +static void sender(void *send_info, + struct ipmi_smi_msg *msg) +{ + struct ssif_info *ssif_info = (struct ssif_info *) send_info; + unsigned long oflags, *flags; + + BUG_ON(ssif_info->waiting_msg); + ssif_info->waiting_msg = msg; + + flags = ipmi_ssif_lock_cond(ssif_info, &oflags); + start_next_msg(ssif_info, flags); + + if (ssif_info->ssif_debug & SSIF_DEBUG_TIMING) { + struct timeval t; + + do_gettimeofday(&t); + pr_info("**Enqueue %02x %02x: %ld.%6.6ld\n", + msg->data[0], msg->data[1], + (long) t.tv_sec, (long) t.tv_usec); + } +} + +static int get_smi_info(void *send_info, struct ipmi_smi_info *data) +{ + struct ssif_info *ssif_info = send_info; + + data->addr_src = ssif_info->addr_source; + data->dev = &ssif_info->client->dev; + data->addr_info = ssif_info->addr_info; + get_device(data->dev); + + return 0; +} + +/* + * Instead of having our own timer to periodically check the message + * flags, we let the message handler drive us. + */ +static void request_events(void *send_info) +{ + struct ssif_info *ssif_info = (struct ssif_info *) send_info; + unsigned long oflags, *flags; + + if (!ssif_info->has_event_buffer) + return; + + flags = ipmi_ssif_lock_cond(ssif_info, &oflags); + /* + * Request flags first, not events, because the lower layer + * doesn't have a way to send an attention. But make sure + * event checking still happens. + */ + ssif_info->req_events = true; + if (SSIF_IDLE(ssif_info)) + start_flag_fetch(ssif_info, flags); + else { + ssif_info->req_flags = true; + ipmi_ssif_unlock_cond(ssif_info, flags); + } +} + +static int inc_usecount(void *send_info) +{ + struct ssif_info *ssif_info = send_info; + + if (!i2c_get_adapter(ssif_info->client->adapter->nr)) + return -ENODEV; + + i2c_use_client(ssif_info->client); + return 0; +} + +static void dec_usecount(void *send_info) +{ + struct ssif_info *ssif_info = send_info; + + i2c_release_client(ssif_info->client); + i2c_put_adapter(ssif_info->client->adapter); +} + +static int ssif_start_processing(void *send_info, + ipmi_smi_t intf) +{ + struct ssif_info *ssif_info = send_info; + + ssif_info->intf = intf; + + return 0; +} + +#define MAX_SSIF_BMCS 4 + +static unsigned short addr[MAX_SSIF_BMCS]; +static int num_addrs; +module_param_array(addr, ushort, &num_addrs, 0); +MODULE_PARM_DESC(addr, "The addresses to scan for IPMI BMCs on the SSIFs."); + +static char *adapter_name[MAX_SSIF_BMCS]; +static int num_adapter_names; +module_param_array(adapter_name, charp, &num_adapter_names, 0); +MODULE_PARM_DESC(adapter_name, "The string name of the I2C device that has the BMC. By default all devices are scanned."); + +static int slave_addrs[MAX_SSIF_BMCS]; +static int num_slave_addrs; +module_param_array(slave_addrs, int, &num_slave_addrs, 0); +MODULE_PARM_DESC(slave_addrs, + "The default IPMB slave address for the controller."); + +/* + * Bit 0 enables message debugging, bit 1 enables state debugging, and + * bit 2 enables timing debugging. This is an array indexed by + * interface number" + */ +static int dbg[MAX_SSIF_BMCS]; +static int num_dbg; +module_param_array(dbg, int, &num_dbg, 0); +MODULE_PARM_DESC(dbg, "Turn on debugging."); + +static bool ssif_dbg_probe; +module_param_named(dbg_probe, ssif_dbg_probe, bool, 0); +MODULE_PARM_DESC(dbg_probe, "Enable debugging of probing of adapters."); + +static int use_thread; +module_param(use_thread, int, 0); +MODULE_PARM_DESC(use_thread, "Use the thread interface."); + +static bool ssif_tryacpi = 1; +module_param_named(tryacpi, ssif_tryacpi, bool, 0); +MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the default scan of the interfaces identified via ACPI"); + +static bool ssif_trydmi = 1; +module_param_named(trydmi, ssif_trydmi, bool, 0); +MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the default scan of the interfaces identified via DMI (SMBIOS)"); + +static DEFINE_MUTEX(ssif_infos_mutex); +static LIST_HEAD(ssif_infos); + +static int ssif_remove(struct i2c_client *client) +{ + struct ssif_info *ssif_info = i2c_get_clientdata(client); + int rv; + + if (!ssif_info) + return 0; + + /* + * After this point, we won't deliver anything asychronously + * to the message handler. We can unregister ourself. + */ + rv = ipmi_unregister_smi(ssif_info->intf); + if (rv) { + pr_err(PFX "Unable to unregister device: errno=%d\n", rv); + return rv; + } + ssif_info->intf = NULL; + + /* make sure the driver is not looking for flags any more. */ + while (ssif_info->ssif_state != SSIF_NORMAL) + schedule_timeout(1); + + ssif_info->stopping = true; + del_timer_sync(&ssif_info->retry_timer); + if (ssif_info->thread) { + complete(&ssif_info->wake_thread); + kthread_stop(ssif_info->thread); + } + + /* + * No message can be outstanding now, we have removed the + * upper layer and it permitted us to do so. + */ + kfree(ssif_info); + return 0; +} + +static int do_cmd(struct i2c_client *client, int len, unsigned char *msg, + int *resp_len, unsigned char *resp) +{ + int retry_cnt; + int ret; + + retry_cnt = SSIF_SEND_RETRIES; + retry1: + ret = i2c_smbus_write_block_data(client, SSIF_IPMI_REQUEST, len, msg); + if (ret) { + retry_cnt--; + if (retry_cnt > 0) + goto retry1; + return -ENODEV; + } + + ret = -ENODEV; + retry_cnt = SSIF_RECV_RETRIES; + while (retry_cnt > 0) { + ret = i2c_smbus_read_block_data(client, SSIF_IPMI_RESPONSE, + resp); + if (ret > 0) + break; + msleep(SSIF_MSG_MSEC); + retry_cnt--; + if (retry_cnt <= 0) + break; + } + + if (ret > 0) { + /* Validate that the response is correct. */ + if (ret < 3 || + (resp[0] != (msg[0] | (1 << 2))) || + (resp[1] != msg[1])) + ret = -EINVAL; + else { + *resp_len = ret; + ret = 0; + } + } + + return ret; +} + +static int ssif_detect(struct i2c_client *client, struct i2c_board_info *info) +{ + unsigned char *resp; + unsigned char msg[3]; + int rv; + int len; + + resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); + if (!resp) + return -ENOMEM; + + /* Do a Get Device ID command, since it is required. */ + msg[0] = IPMI_NETFN_APP_REQUEST << 2; + msg[1] = IPMI_GET_DEVICE_ID_CMD; + rv = do_cmd(client, 2, msg, &len, resp); + if (rv) + rv = -ENODEV; + else + strlcpy(info->type, DEVICE_NAME, I2C_NAME_SIZE); + kfree(resp); + return rv; +} + +static int smi_type_proc_show(struct seq_file *m, void *v) +{ + seq_puts(m, "ssif\n"); + + return 0; +} + +static int smi_type_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, smi_type_proc_show, inode->i_private); +} + +static const struct file_operations smi_type_proc_ops = { + .open = smi_type_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int smi_stats_proc_show(struct seq_file *m, void *v) +{ + struct ssif_info *ssif_info = m->private; + + seq_printf(m, "sent_messages: %u\n", + ssif_get_stat(ssif_info, sent_messages)); + seq_printf(m, "sent_messages_parts: %u\n", + ssif_get_stat(ssif_info, sent_messages_parts)); + seq_printf(m, "send_retries: %u\n", + ssif_get_stat(ssif_info, send_retries)); + seq_printf(m, "send_errors: %u\n", + ssif_get_stat(ssif_info, send_errors)); + seq_printf(m, "received_messages: %u\n", + ssif_get_stat(ssif_info, received_messages)); + seq_printf(m, "received_message_parts: %u\n", + ssif_get_stat(ssif_info, received_message_parts)); + seq_printf(m, "receive_retries: %u\n", + ssif_get_stat(ssif_info, receive_retries)); + seq_printf(m, "receive_errors: %u\n", + ssif_get_stat(ssif_info, receive_errors)); + seq_printf(m, "flag_fetches: %u\n", + ssif_get_stat(ssif_info, flag_fetches)); + seq_printf(m, "hosed: %u\n", + ssif_get_stat(ssif_info, hosed)); + seq_printf(m, "events: %u\n", + ssif_get_stat(ssif_info, events)); + seq_printf(m, "watchdog_pretimeouts: %u\n", + ssif_get_stat(ssif_info, watchdog_pretimeouts)); + seq_printf(m, "alerts: %u\n", + ssif_get_stat(ssif_info, alerts)); + return 0; +} + +static int smi_stats_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, smi_stats_proc_show, PDE_DATA(inode)); +} + +static const struct file_operations smi_stats_proc_ops = { + .open = smi_stats_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int strcmp_nospace(char *s1, char *s2) +{ + while (*s1 && *s2) { + while (isspace(*s1)) + s1++; + while (isspace(*s2)) + s2++; + if (*s1 > *s2) + return 1; + if (*s1 < *s2) + return -1; + s1++; + s2++; + } + return 0; +} + +static struct ssif_addr_info *ssif_info_find(unsigned short addr, + char *adapter_name, + bool match_null_name) +{ + struct ssif_addr_info *info, *found = NULL; + +restart: + list_for_each_entry(info, &ssif_infos, link) { + if (info->binfo.addr == addr) { + if (info->adapter_name || adapter_name) { + if (!info->adapter_name != !adapter_name) { + /* One is NULL and one is not */ + continue; + } + if (adapter_name && + strcmp_nospace(info->adapter_name, + adapter_name)) + /* Names do not match */ + continue; + } + found = info; + break; + } + } + + if (!found && match_null_name) { + /* Try to get an exact match first, then try with a NULL name */ + adapter_name = NULL; + match_null_name = false; + goto restart; + } + + return found; +} + +static bool check_acpi(struct ssif_info *ssif_info, struct device *dev) +{ +#ifdef CONFIG_ACPI + acpi_handle acpi_handle; + + acpi_handle = ACPI_HANDLE(dev); + if (acpi_handle) { + ssif_info->addr_source = SI_ACPI; + ssif_info->addr_info.acpi_info.acpi_handle = acpi_handle; + return true; + } +#endif + return false; +} + +/* + * Global enables we care about. + */ +#define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \ + IPMI_BMC_EVT_MSG_INTR) + +static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) +{ + unsigned char msg[3]; + unsigned char *resp; + struct ssif_info *ssif_info; + int rv = 0; + int len; + int i; + u8 slave_addr = 0; + struct ssif_addr_info *addr_info = NULL; + + + resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); + if (!resp) + return -ENOMEM; + + ssif_info = kzalloc(sizeof(*ssif_info), GFP_KERNEL); + if (!ssif_info) { + kfree(resp); + return -ENOMEM; + } + + if (!check_acpi(ssif_info, &client->dev)) { + addr_info = ssif_info_find(client->addr, client->adapter->name, + true); + if (!addr_info) { + /* Must have come in through sysfs. */ + ssif_info->addr_source = SI_HOTMOD; + } else { + ssif_info->addr_source = addr_info->addr_src; + ssif_info->ssif_debug = addr_info->debug; + ssif_info->addr_info = addr_info->addr_info; + slave_addr = addr_info->slave_addr; + } + } + + pr_info(PFX "Trying %s-specified SSIF interface at i2c address 0x%x, adapter %s, slave address 0x%x\n", + ipmi_addr_src_to_str(ssif_info->addr_source), + client->addr, client->adapter->name, slave_addr); + + /* + * Do a Get Device ID command, since it comes back with some + * useful info. + */ + msg[0] = IPMI_NETFN_APP_REQUEST << 2; + msg[1] = IPMI_GET_DEVICE_ID_CMD; + rv = do_cmd(client, 2, msg, &len, resp); + if (rv) + goto out; + + rv = ipmi_demangle_device_id(resp, len, &ssif_info->device_id); + if (rv) + goto out; + + ssif_info->client = client; + i2c_set_clientdata(client, ssif_info); + + /* Now check for system interface capabilities */ + msg[0] = IPMI_NETFN_APP_REQUEST << 2; + msg[1] = IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD; + msg[2] = 0; /* SSIF */ + rv = do_cmd(client, 3, msg, &len, resp); + if (!rv && (len >= 3) && (resp[2] == 0)) { + if (len < 7) { + if (ssif_dbg_probe) + pr_info(PFX "SSIF info too short: %d\n", len); + goto no_support; + } + + /* Got a good SSIF response, handle it. */ + ssif_info->max_xmit_msg_size = resp[5]; + ssif_info->max_recv_msg_size = resp[6]; + ssif_info->multi_support = (resp[4] >> 6) & 0x3; + ssif_info->supports_pec = (resp[4] >> 3) & 0x1; + + /* Sanitize the data */ + switch (ssif_info->multi_support) { + case SSIF_NO_MULTI: + if (ssif_info->max_xmit_msg_size > 32) + ssif_info->max_xmit_msg_size = 32; + if (ssif_info->max_recv_msg_size > 32) + ssif_info->max_recv_msg_size = 32; + break; + + case SSIF_MULTI_2_PART: + if (ssif_info->max_xmit_msg_size > 63) + ssif_info->max_xmit_msg_size = 63; + if (ssif_info->max_recv_msg_size > 62) + ssif_info->max_recv_msg_size = 62; + break; + + case SSIF_MULTI_n_PART: + /* + * The specification is rather confusing at + * this point, but I think I understand what + * is meant. At least I have a workable + * solution. With multi-part messages, you + * cannot send a message that is a multiple of + * 32-bytes in length, because the start and + * middle messages are 32-bytes and the end + * message must be at least one byte. You + * can't fudge on an extra byte, that would + * screw up things like fru data writes. So + * we limit the length to 63 bytes. That way + * a 32-byte message gets sent as a single + * part. A larger message will be a 32-byte + * start and the next message is always going + * to be 1-31 bytes in length. Not ideal, but + * it should work. + */ + if (ssif_info->max_xmit_msg_size > 63) + ssif_info->max_xmit_msg_size = 63; + break; + + default: + /* Data is not sane, just give up. */ + goto no_support; + } + } else { + no_support: + /* Assume no multi-part or PEC support */ + pr_info(PFX "Error fetching SSIF: %d %d %2.2x, your system probably doesn't support this command so using defaults\n", + rv, len, resp[2]); + + ssif_info->max_xmit_msg_size = 32; + ssif_info->max_recv_msg_size = 32; + ssif_info->multi_support = SSIF_NO_MULTI; + ssif_info->supports_pec = 0; + } + + /* Make sure the NMI timeout is cleared. */ + msg[0] = IPMI_NETFN_APP_REQUEST << 2; + msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; + msg[2] = WDT_PRE_TIMEOUT_INT; + rv = do_cmd(client, 3, msg, &len, resp); + if (rv || (len < 3) || (resp[2] != 0)) + pr_warn(PFX "Unable to clear message flags: %d %d %2.2x\n", + rv, len, resp[2]); + + /* Attempt to enable the event buffer. */ + msg[0] = IPMI_NETFN_APP_REQUEST << 2; + msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; + rv = do_cmd(client, 2, msg, &len, resp); + if (rv || (len < 4) || (resp[2] != 0)) { + pr_warn(PFX "Error getting global enables: %d %d %2.2x\n", + rv, len, resp[2]); + rv = 0; /* Not fatal */ + goto found; + } + + ssif_info->global_enables = resp[3]; + + if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) { + ssif_info->has_event_buffer = true; + /* buffer is already enabled, nothing to do. */ + goto found; + } + + msg[0] = IPMI_NETFN_APP_REQUEST << 2; + msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; + msg[2] = ssif_info->global_enables | IPMI_BMC_EVT_MSG_BUFF; + rv = do_cmd(client, 3, msg, &len, resp); + if (rv || (len < 2)) { + pr_warn(PFX "Error setting global enables: %d %d %2.2x\n", + rv, len, resp[2]); + rv = 0; /* Not fatal */ + goto found; + } + + if (resp[2] == 0) { + /* A successful return means the event buffer is supported. */ + ssif_info->has_event_buffer = true; + ssif_info->global_enables |= IPMI_BMC_EVT_MSG_BUFF; + } + + msg[0] = IPMI_NETFN_APP_REQUEST << 2; + msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; + msg[2] = ssif_info->global_enables | IPMI_BMC_RCV_MSG_INTR; + rv = do_cmd(client, 3, msg, &len, resp); + if (rv || (len < 2)) { + pr_warn(PFX "Error setting global enables: %d %d %2.2x\n", + rv, len, resp[2]); + rv = 0; /* Not fatal */ + goto found; + } + + if (resp[2] == 0) { + /* A successful return means the alert is supported. */ + ssif_info->supports_alert = true; + ssif_info->global_enables |= IPMI_BMC_RCV_MSG_INTR; + } + + found: + ssif_info->intf_num = atomic_inc_return(&next_intf); + + if (ssif_dbg_probe) { + pr_info("ssif_probe: i2c_probe found device at i2c address %x\n", + client->addr); + } + + spin_lock_init(&ssif_info->lock); + ssif_info->ssif_state = SSIF_NORMAL; + init_timer(&ssif_info->retry_timer); + ssif_info->retry_timer.data = (unsigned long) ssif_info; + ssif_info->retry_timer.function = retry_timeout; + + for (i = 0; i < SSIF_NUM_STATS; i++) + atomic_set(&ssif_info->stats[i], 0); + + if (ssif_info->supports_pec) + ssif_info->client->flags |= I2C_CLIENT_PEC; + + ssif_info->handlers.owner = THIS_MODULE; + ssif_info->handlers.start_processing = ssif_start_processing; + ssif_info->handlers.get_smi_info = get_smi_info; + ssif_info->handlers.sender = sender; + ssif_info->handlers.request_events = request_events; + ssif_info->handlers.inc_usecount = inc_usecount; + ssif_info->handlers.dec_usecount = dec_usecount; + + { + unsigned int thread_num; + + thread_num = ((ssif_info->client->adapter->nr << 8) | + ssif_info->client->addr); + init_completion(&ssif_info->wake_thread); + ssif_info->thread = kthread_run(ipmi_ssif_thread, ssif_info, + "kssif%4.4x", thread_num); + if (IS_ERR(ssif_info->thread)) { + rv = PTR_ERR(ssif_info->thread); + dev_notice(&ssif_info->client->dev, + "Could not start kernel thread: error %d\n", + rv); + goto out; + } + } + + rv = ipmi_register_smi(&ssif_info->handlers, + ssif_info, + &ssif_info->device_id, + &ssif_info->client->dev, + slave_addr); + if (rv) { + pr_err(PFX "Unable to register device: error %d\n", rv); + goto out; + } + + rv = ipmi_smi_add_proc_entry(ssif_info->intf, "type", + &smi_type_proc_ops, + ssif_info); + if (rv) { + pr_err(PFX "Unable to create proc entry: %d\n", rv); + goto out_err_unreg; + } + + rv = ipmi_smi_add_proc_entry(ssif_info->intf, "ssif_stats", + &smi_stats_proc_ops, + ssif_info); + if (rv) { + pr_err(PFX "Unable to create proc entry: %d\n", rv); + goto out_err_unreg; + } + + out: + if (rv) + kfree(ssif_info); + kfree(resp); + return rv; + + out_err_unreg: + ipmi_unregister_smi(ssif_info->intf); + goto out; +} + +static int ssif_adapter_handler(struct device *adev, void *opaque) +{ + struct ssif_addr_info *addr_info = opaque; + + if (adev->type != &i2c_adapter_type) + return 0; + + i2c_new_device(to_i2c_adapter(adev), &addr_info->binfo); + + if (!addr_info->adapter_name) + return 1; /* Only try the first I2C adapter by default. */ + return 0; +} + +static int new_ssif_client(int addr, char *adapter_name, + int debug, int slave_addr, + enum ipmi_addr_src addr_src) +{ + struct ssif_addr_info *addr_info; + int rv = 0; + + mutex_lock(&ssif_infos_mutex); + if (ssif_info_find(addr, adapter_name, false)) { + rv = -EEXIST; + goto out_unlock; + } + + addr_info = kzalloc(sizeof(*addr_info), GFP_KERNEL); + if (!addr_info) { + rv = -ENOMEM; + goto out_unlock; + } + + if (adapter_name) { + addr_info->adapter_name = kstrdup(adapter_name, GFP_KERNEL); + if (!addr_info->adapter_name) { + kfree(addr_info); + rv = -ENOMEM; + goto out_unlock; + } + } + + strncpy(addr_info->binfo.type, DEVICE_NAME, + sizeof(addr_info->binfo.type)); + addr_info->binfo.addr = addr; + addr_info->binfo.platform_data = addr_info; + addr_info->debug = debug; + addr_info->slave_addr = slave_addr; + addr_info->addr_src = addr_src; + + list_add_tail(&addr_info->link, &ssif_infos); + + if (initialized) + i2c_for_each_dev(addr_info, ssif_adapter_handler); + /* Otherwise address list will get it */ + +out_unlock: + mutex_unlock(&ssif_infos_mutex); + return rv; +} + +static void free_ssif_clients(void) +{ + struct ssif_addr_info *info, *tmp; + + mutex_lock(&ssif_infos_mutex); + list_for_each_entry_safe(info, tmp, &ssif_infos, link) { + list_del(&info->link); + kfree(info->adapter_name); + kfree(info); + } + mutex_unlock(&ssif_infos_mutex); +} + +static unsigned short *ssif_address_list(void) +{ + struct ssif_addr_info *info; + unsigned int count = 0, i; + unsigned short *address_list; + + list_for_each_entry(info, &ssif_infos, link) + count++; + + address_list = kzalloc(sizeof(*address_list) * (count + 1), GFP_KERNEL); + if (!address_list) + return NULL; + + i = 0; + list_for_each_entry(info, &ssif_infos, link) { + unsigned short addr = info->binfo.addr; + int j; + + for (j = 0; j < i; j++) { + if (address_list[j] == addr) + goto skip_addr; + } + address_list[i] = addr; +skip_addr: + i++; + } + address_list[i] = I2C_CLIENT_END; + + return address_list; +} + +#ifdef CONFIG_ACPI +static struct acpi_device_id ssif_acpi_match[] = { + { "IPI0001", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, ssif_acpi_match); + +/* + * Once we get an ACPI failure, we don't try any more, because we go + * through the tables sequentially. Once we don't find a table, there + * are no more. + */ +static int acpi_failure; + +/* + * Defined in the IPMI 2.0 spec. + */ +struct SPMITable { + s8 Signature[4]; + u32 Length; + u8 Revision; + u8 Checksum; + s8 OEMID[6]; + s8 OEMTableID[8]; + s8 OEMRevision[4]; + s8 CreatorID[4]; + s8 CreatorRevision[4]; + u8 InterfaceType; + u8 IPMIlegacy; + s16 SpecificationRevision; + + /* + * Bit 0 - SCI interrupt supported + * Bit 1 - I/O APIC/SAPIC + */ + u8 InterruptType; + + /* + * If bit 0 of InterruptType is set, then this is the SCI + * interrupt in the GPEx_STS register. + */ + u8 GPE; + + s16 Reserved; + + /* + * If bit 1 of InterruptType is set, then this is the I/O + * APIC/SAPIC interrupt. + */ + u32 GlobalSystemInterrupt; + + /* The actual register address. */ + struct acpi_generic_address addr; + + u8 UID[4]; + + s8 spmi_id[1]; /* A '\0' terminated array starts here. */ +}; + +static int try_init_spmi(struct SPMITable *spmi) +{ + unsigned short myaddr; + + if (num_addrs >= MAX_SSIF_BMCS) + return -1; + + if (spmi->IPMIlegacy != 1) { + pr_warn("IPMI: Bad SPMI legacy: %d\n", spmi->IPMIlegacy); + return -ENODEV; + } + + if (spmi->InterfaceType != 4) + return -ENODEV; + + if (spmi->addr.space_id != ACPI_ADR_SPACE_SMBUS) { + pr_warn(PFX "Invalid ACPI SSIF I/O Address type: %d\n", + spmi->addr.space_id); + return -EIO; + } + + myaddr = spmi->addr.address >> 1; + + return new_ssif_client(myaddr, NULL, 0, 0, SI_SPMI); +} + +static void spmi_find_bmc(void) +{ + acpi_status status; + struct SPMITable *spmi; + int i; + + if (acpi_disabled) + return; + + if (acpi_failure) + return; + + for (i = 0; ; i++) { + status = acpi_get_table(ACPI_SIG_SPMI, i+1, + (struct acpi_table_header **)&spmi); + if (status != AE_OK) + return; + + try_init_spmi(spmi); + } +} +#else +static void spmi_find_bmc(void) { } +#endif + +#ifdef CONFIG_DMI +static int decode_dmi(const struct dmi_device *dmi_dev) +{ + struct dmi_header *dm = dmi_dev->device_data; + u8 *data = (u8 *) dm; + u8 len = dm->length; + unsigned short myaddr; + int slave_addr; + + if (num_addrs >= MAX_SSIF_BMCS) + return -1; + + if (len < 9) + return -1; + + if (data[0x04] != 4) /* Not SSIF */ + return -1; + + if ((data[8] >> 1) == 0) { + /* + * Some broken systems put the I2C address in + * the slave address field. We try to + * accommodate them here. + */ + myaddr = data[6] >> 1; + slave_addr = 0; + } else { + myaddr = data[8] >> 1; + slave_addr = data[6]; + } + + return new_ssif_client(myaddr, NULL, 0, 0, SI_SMBIOS); +} + +static void dmi_iterator(void) +{ + const struct dmi_device *dev = NULL; + + while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) + decode_dmi(dev); +} +#else +static void dmi_iterator(void) { } +#endif + +static const struct i2c_device_id ssif_id[] = { + { DEVICE_NAME, 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, ssif_id); + +static struct i2c_driver ssif_i2c_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .owner = THIS_MODULE, + .name = DEVICE_NAME + }, + .probe = ssif_probe, + .remove = ssif_remove, + .alert = ssif_alert, + .id_table = ssif_id, + .detect = ssif_detect +}; + +static int init_ipmi_ssif(void) +{ + int i; + int rv; + + if (initialized) + return 0; + + pr_info("IPMI SSIF Interface driver\n"); + + /* build list for i2c from addr list */ + for (i = 0; i < num_addrs; i++) { + rv = new_ssif_client(addr[i], adapter_name[i], + dbg[i], slave_addrs[i], + SI_HARDCODED); + if (rv) + pr_err(PFX + "Couldn't add hardcoded device at addr 0x%x\n", + addr[i]); + } + + if (ssif_tryacpi) + ssif_i2c_driver.driver.acpi_match_table = + ACPI_PTR(ssif_acpi_match); + if (ssif_trydmi) + dmi_iterator(); + if (ssif_tryacpi) + spmi_find_bmc(); + + ssif_i2c_driver.address_list = ssif_address_list(); + + rv = i2c_add_driver(&ssif_i2c_driver); + if (!rv) + initialized = true; + + return rv; +} +module_init(init_ipmi_ssif); + +static void cleanup_ipmi_ssif(void) +{ + if (!initialized) + return; + + initialized = false; + + i2c_del_driver(&ssif_i2c_driver); + + free_ssif_clients(); +} +module_exit(cleanup_ipmi_ssif); + +MODULE_AUTHOR("Todd C Davis , Corey Minyard "); +MODULE_DESCRIPTION("IPMI driver for management controllers on a SMBus"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/ipmi/ipmi_watchdog.c b/kernel/drivers/char/ipmi/ipmi_watchdog.c new file mode 100644 index 000000000..37b8be7cb --- /dev/null +++ b/kernel/drivers/char/ipmi/ipmi_watchdog.c @@ -0,0 +1,1385 @@ +/* + * ipmi_watchdog.c + * + * A watchdog timer based upon the IPMI interface. + * + * Author: MontaVista Software, Inc. + * Corey Minyard + * source@mvista.com + * + * Copyright 2002 MontaVista Software Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_X86 +/* + * This is ugly, but I've determined that x86 is the only architecture + * that can reasonably support the IPMI NMI watchdog timeout at this + * time. If another architecture adds this capability somehow, it + * will have to be a somewhat different mechanism and I have no idea + * how it will work. So in the unlikely event that another + * architecture supports this, we can figure out a good generic + * mechanism for it at that time. + */ +#include +#include +#define HAVE_DIE_NMI +#endif + +#define PFX "IPMI Watchdog: " + +/* + * The IPMI command/response information for the watchdog timer. + */ + +/* values for byte 1 of the set command, byte 2 of the get response. */ +#define WDOG_DONT_LOG (1 << 7) +#define WDOG_DONT_STOP_ON_SET (1 << 6) +#define WDOG_SET_TIMER_USE(byte, use) \ + byte = ((byte) & 0xf8) | ((use) & 0x7) +#define WDOG_GET_TIMER_USE(byte) ((byte) & 0x7) +#define WDOG_TIMER_USE_BIOS_FRB2 1 +#define WDOG_TIMER_USE_BIOS_POST 2 +#define WDOG_TIMER_USE_OS_LOAD 3 +#define WDOG_TIMER_USE_SMS_OS 4 +#define WDOG_TIMER_USE_OEM 5 + +/* values for byte 2 of the set command, byte 3 of the get response. */ +#define WDOG_SET_PRETIMEOUT_ACT(byte, use) \ + byte = ((byte) & 0x8f) | (((use) & 0x7) << 4) +#define WDOG_GET_PRETIMEOUT_ACT(byte) (((byte) >> 4) & 0x7) +#define WDOG_PRETIMEOUT_NONE 0 +#define WDOG_PRETIMEOUT_SMI 1 +#define WDOG_PRETIMEOUT_NMI 2 +#define WDOG_PRETIMEOUT_MSG_INT 3 + +/* Operations that can be performed on a pretimout. */ +#define WDOG_PREOP_NONE 0 +#define WDOG_PREOP_PANIC 1 +/* Cause data to be available to read. Doesn't work in NMI mode. */ +#define WDOG_PREOP_GIVE_DATA 2 + +/* Actions to perform on a full timeout. */ +#define WDOG_SET_TIMEOUT_ACT(byte, use) \ + byte = ((byte) & 0xf8) | ((use) & 0x7) +#define WDOG_GET_TIMEOUT_ACT(byte) ((byte) & 0x7) +#define WDOG_TIMEOUT_NONE 0 +#define WDOG_TIMEOUT_RESET 1 +#define WDOG_TIMEOUT_POWER_DOWN 2 +#define WDOG_TIMEOUT_POWER_CYCLE 3 + +/* + * Byte 3 of the get command, byte 4 of the get response is the + * pre-timeout in seconds. + */ + +/* Bits for setting byte 4 of the set command, byte 5 of the get response. */ +#define WDOG_EXPIRE_CLEAR_BIOS_FRB2 (1 << 1) +#define WDOG_EXPIRE_CLEAR_BIOS_POST (1 << 2) +#define WDOG_EXPIRE_CLEAR_OS_LOAD (1 << 3) +#define WDOG_EXPIRE_CLEAR_SMS_OS (1 << 4) +#define WDOG_EXPIRE_CLEAR_OEM (1 << 5) + +/* + * Setting/getting the watchdog timer value. This is for bytes 5 and + * 6 (the timeout time) of the set command, and bytes 6 and 7 (the + * timeout time) and 8 and 9 (the current countdown value) of the + * response. The timeout value is given in seconds (in the command it + * is 100ms intervals). + */ +#define WDOG_SET_TIMEOUT(byte1, byte2, val) \ + (byte1) = (((val) * 10) & 0xff), (byte2) = (((val) * 10) >> 8) +#define WDOG_GET_TIMEOUT(byte1, byte2) \ + (((byte1) | ((byte2) << 8)) / 10) + +#define IPMI_WDOG_RESET_TIMER 0x22 +#define IPMI_WDOG_SET_TIMER 0x24 +#define IPMI_WDOG_GET_TIMER 0x25 + +#define IPMI_WDOG_TIMER_NOT_INIT_RESP 0x80 + +static DEFINE_MUTEX(ipmi_watchdog_mutex); +static bool nowayout = WATCHDOG_NOWAYOUT; + +static ipmi_user_t watchdog_user; +static int watchdog_ifnum; + +/* Default the timeout to 10 seconds. */ +static int timeout = 10; + +/* The pre-timeout is disabled by default. */ +static int pretimeout; + +/* Default action is to reset the board on a timeout. */ +static unsigned char action_val = WDOG_TIMEOUT_RESET; + +static char action[16] = "reset"; + +static unsigned char preaction_val = WDOG_PRETIMEOUT_NONE; + +static char preaction[16] = "pre_none"; + +static unsigned char preop_val = WDOG_PREOP_NONE; + +static char preop[16] = "preop_none"; +static DEFINE_SPINLOCK(ipmi_read_lock); +static char data_to_read; +static DECLARE_WAIT_QUEUE_HEAD(read_q); +static struct fasync_struct *fasync_q; +static char pretimeout_since_last_heartbeat; +static char expect_close; + +static int ifnum_to_use = -1; + +/* Parameters to ipmi_set_timeout */ +#define IPMI_SET_TIMEOUT_NO_HB 0 +#define IPMI_SET_TIMEOUT_HB_IF_NECESSARY 1 +#define IPMI_SET_TIMEOUT_FORCE_HB 2 + +static int ipmi_set_timeout(int do_heartbeat); +static void ipmi_register_watchdog(int ipmi_intf); +static void ipmi_unregister_watchdog(int ipmi_intf); + +/* + * If true, the driver will start running as soon as it is configured + * and ready. + */ +static int start_now; + +static int set_param_timeout(const char *val, const struct kernel_param *kp) +{ + char *endp; + int l; + int rv = 0; + + if (!val) + return -EINVAL; + l = simple_strtoul(val, &endp, 0); + if (endp == val) + return -EINVAL; + + *((int *)kp->arg) = l; + if (watchdog_user) + rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); + + return rv; +} + +static struct kernel_param_ops param_ops_timeout = { + .set = set_param_timeout, + .get = param_get_int, +}; +#define param_check_timeout param_check_int + +typedef int (*action_fn)(const char *intval, char *outval); + +static int action_op(const char *inval, char *outval); +static int preaction_op(const char *inval, char *outval); +static int preop_op(const char *inval, char *outval); +static void check_parms(void); + +static int set_param_str(const char *val, const struct kernel_param *kp) +{ + action_fn fn = (action_fn) kp->arg; + int rv = 0; + char valcp[16]; + char *s; + + strncpy(valcp, val, 16); + valcp[15] = '\0'; + + s = strstrip(valcp); + + rv = fn(s, NULL); + if (rv) + goto out; + + check_parms(); + if (watchdog_user) + rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); + + out: + return rv; +} + +static int get_param_str(char *buffer, const struct kernel_param *kp) +{ + action_fn fn = (action_fn) kp->arg; + int rv; + + rv = fn(NULL, buffer); + if (rv) + return rv; + return strlen(buffer); +} + + +static int set_param_wdog_ifnum(const char *val, const struct kernel_param *kp) +{ + int rv = param_set_int(val, kp); + if (rv) + return rv; + if ((ifnum_to_use < 0) || (ifnum_to_use == watchdog_ifnum)) + return 0; + + ipmi_unregister_watchdog(watchdog_ifnum); + ipmi_register_watchdog(ifnum_to_use); + return 0; +} + +static struct kernel_param_ops param_ops_wdog_ifnum = { + .set = set_param_wdog_ifnum, + .get = param_get_int, +}; + +#define param_check_wdog_ifnum param_check_int + +static struct kernel_param_ops param_ops_str = { + .set = set_param_str, + .get = get_param_str, +}; + +module_param(ifnum_to_use, wdog_ifnum, 0644); +MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog " + "timer. Setting to -1 defaults to the first registered " + "interface"); + +module_param(timeout, timeout, 0644); +MODULE_PARM_DESC(timeout, "Timeout value in seconds."); + +module_param(pretimeout, timeout, 0644); +MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds."); + +module_param_cb(action, ¶m_ops_str, action_op, 0644); +MODULE_PARM_DESC(action, "Timeout action. One of: " + "reset, none, power_cycle, power_off."); + +module_param_cb(preaction, ¶m_ops_str, preaction_op, 0644); +MODULE_PARM_DESC(preaction, "Pretimeout action. One of: " + "pre_none, pre_smi, pre_nmi, pre_int."); + +module_param_cb(preop, ¶m_ops_str, preop_op, 0644); +MODULE_PARM_DESC(preop, "Pretimeout driver operation. One of: " + "preop_none, preop_panic, preop_give_data."); + +module_param(start_now, int, 0444); +MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as" + "soon as the driver is loaded."); + +module_param(nowayout, bool, 0644); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started " + "(default=CONFIG_WATCHDOG_NOWAYOUT)"); + +/* Default state of the timer. */ +static unsigned char ipmi_watchdog_state = WDOG_TIMEOUT_NONE; + +/* If shutting down via IPMI, we ignore the heartbeat. */ +static int ipmi_ignore_heartbeat; + +/* Is someone using the watchdog? Only one user is allowed. */ +static unsigned long ipmi_wdog_open; + +/* + * If set to 1, the heartbeat command will set the state to reset and + * start the timer. The timer doesn't normally run when the driver is + * first opened until the heartbeat is set the first time, this + * variable is used to accomplish this. + */ +static int ipmi_start_timer_on_heartbeat; + +/* IPMI version of the BMC. */ +static unsigned char ipmi_version_major; +static unsigned char ipmi_version_minor; + +/* If a pretimeout occurs, this is used to allow only one panic to happen. */ +static atomic_t preop_panic_excl = ATOMIC_INIT(-1); + +#ifdef HAVE_DIE_NMI +static int testing_nmi; +static int nmi_handler_registered; +#endif + +static int ipmi_heartbeat(void); + +/* + * We use a mutex to make sure that only one thing can send a set + * timeout at one time, because we only have one copy of the data. + * The mutex is claimed when the set_timeout is sent and freed + * when both messages are free. + */ +static atomic_t set_timeout_tofree = ATOMIC_INIT(0); +static DEFINE_MUTEX(set_timeout_lock); +static DECLARE_COMPLETION(set_timeout_wait); +static void set_timeout_free_smi(struct ipmi_smi_msg *msg) +{ + if (atomic_dec_and_test(&set_timeout_tofree)) + complete(&set_timeout_wait); +} +static void set_timeout_free_recv(struct ipmi_recv_msg *msg) +{ + if (atomic_dec_and_test(&set_timeout_tofree)) + complete(&set_timeout_wait); +} +static struct ipmi_smi_msg set_timeout_smi_msg = { + .done = set_timeout_free_smi +}; +static struct ipmi_recv_msg set_timeout_recv_msg = { + .done = set_timeout_free_recv +}; + +static int i_ipmi_set_timeout(struct ipmi_smi_msg *smi_msg, + struct ipmi_recv_msg *recv_msg, + int *send_heartbeat_now) +{ + struct kernel_ipmi_msg msg; + unsigned char data[6]; + int rv; + struct ipmi_system_interface_addr addr; + int hbnow = 0; + + + /* These can be cleared as we are setting the timeout. */ + pretimeout_since_last_heartbeat = 0; + + data[0] = 0; + WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS); + + if ((ipmi_version_major > 1) + || ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) { + /* This is an IPMI 1.5-only feature. */ + data[0] |= WDOG_DONT_STOP_ON_SET; + } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { + /* + * In ipmi 1.0, setting the timer stops the watchdog, we + * need to start it back up again. + */ + hbnow = 1; + } + + data[1] = 0; + WDOG_SET_TIMEOUT_ACT(data[1], ipmi_watchdog_state); + if ((pretimeout > 0) && (ipmi_watchdog_state != WDOG_TIMEOUT_NONE)) { + WDOG_SET_PRETIMEOUT_ACT(data[1], preaction_val); + data[2] = pretimeout; + } else { + WDOG_SET_PRETIMEOUT_ACT(data[1], WDOG_PRETIMEOUT_NONE); + data[2] = 0; /* No pretimeout. */ + } + data[3] = 0; + WDOG_SET_TIMEOUT(data[4], data[5], timeout); + + addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; + addr.channel = IPMI_BMC_CHANNEL; + addr.lun = 0; + + msg.netfn = 0x06; + msg.cmd = IPMI_WDOG_SET_TIMER; + msg.data = data; + msg.data_len = sizeof(data); + rv = ipmi_request_supply_msgs(watchdog_user, + (struct ipmi_addr *) &addr, + 0, + &msg, + NULL, + smi_msg, + recv_msg, + 1); + if (rv) { + printk(KERN_WARNING PFX "set timeout error: %d\n", + rv); + } + + if (send_heartbeat_now) + *send_heartbeat_now = hbnow; + + return rv; +} + +static int ipmi_set_timeout(int do_heartbeat) +{ + int send_heartbeat_now; + int rv; + + + /* We can only send one of these at a time. */ + mutex_lock(&set_timeout_lock); + + atomic_set(&set_timeout_tofree, 2); + + rv = i_ipmi_set_timeout(&set_timeout_smi_msg, + &set_timeout_recv_msg, + &send_heartbeat_now); + if (rv) { + mutex_unlock(&set_timeout_lock); + goto out; + } + + wait_for_completion(&set_timeout_wait); + + mutex_unlock(&set_timeout_lock); + + if ((do_heartbeat == IPMI_SET_TIMEOUT_FORCE_HB) + || ((send_heartbeat_now) + && (do_heartbeat == IPMI_SET_TIMEOUT_HB_IF_NECESSARY))) + rv = ipmi_heartbeat(); + +out: + return rv; +} + +static atomic_t panic_done_count = ATOMIC_INIT(0); + +static void panic_smi_free(struct ipmi_smi_msg *msg) +{ + atomic_dec(&panic_done_count); +} +static void panic_recv_free(struct ipmi_recv_msg *msg) +{ + atomic_dec(&panic_done_count); +} + +static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg = { + .done = panic_smi_free +}; +static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg = { + .done = panic_recv_free +}; + +static void panic_halt_ipmi_heartbeat(void) +{ + struct kernel_ipmi_msg msg; + struct ipmi_system_interface_addr addr; + int rv; + + /* + * Don't reset the timer if we have the timer turned off, that + * re-enables the watchdog. + */ + if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) + return; + + addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; + addr.channel = IPMI_BMC_CHANNEL; + addr.lun = 0; + + msg.netfn = 0x06; + msg.cmd = IPMI_WDOG_RESET_TIMER; + msg.data = NULL; + msg.data_len = 0; + atomic_add(2, &panic_done_count); + rv = ipmi_request_supply_msgs(watchdog_user, + (struct ipmi_addr *) &addr, + 0, + &msg, + NULL, + &panic_halt_heartbeat_smi_msg, + &panic_halt_heartbeat_recv_msg, + 1); + if (rv) + atomic_sub(2, &panic_done_count); +} + +static struct ipmi_smi_msg panic_halt_smi_msg = { + .done = panic_smi_free +}; +static struct ipmi_recv_msg panic_halt_recv_msg = { + .done = panic_recv_free +}; + +/* + * Special call, doesn't claim any locks. This is only to be called + * at panic or halt time, in run-to-completion mode, when the caller + * is the only CPU and the only thing that will be going is these IPMI + * calls. + */ +static void panic_halt_ipmi_set_timeout(void) +{ + int send_heartbeat_now; + int rv; + + /* Wait for the messages to be free. */ + while (atomic_read(&panic_done_count) != 0) + ipmi_poll_interface(watchdog_user); + atomic_add(2, &panic_done_count); + rv = i_ipmi_set_timeout(&panic_halt_smi_msg, + &panic_halt_recv_msg, + &send_heartbeat_now); + if (rv) { + atomic_sub(2, &panic_done_count); + printk(KERN_WARNING PFX + "Unable to extend the watchdog timeout."); + } else { + if (send_heartbeat_now) + panic_halt_ipmi_heartbeat(); + } + while (atomic_read(&panic_done_count) != 0) + ipmi_poll_interface(watchdog_user); +} + +/* + * We use a mutex to make sure that only one thing can send a + * heartbeat at one time, because we only have one copy of the data. + * The semaphore is claimed when the set_timeout is sent and freed + * when both messages are free. + */ +static atomic_t heartbeat_tofree = ATOMIC_INIT(0); +static DEFINE_MUTEX(heartbeat_lock); +static DECLARE_COMPLETION(heartbeat_wait); +static void heartbeat_free_smi(struct ipmi_smi_msg *msg) +{ + if (atomic_dec_and_test(&heartbeat_tofree)) + complete(&heartbeat_wait); +} +static void heartbeat_free_recv(struct ipmi_recv_msg *msg) +{ + if (atomic_dec_and_test(&heartbeat_tofree)) + complete(&heartbeat_wait); +} +static struct ipmi_smi_msg heartbeat_smi_msg = { + .done = heartbeat_free_smi +}; +static struct ipmi_recv_msg heartbeat_recv_msg = { + .done = heartbeat_free_recv +}; + +static int ipmi_heartbeat(void) +{ + struct kernel_ipmi_msg msg; + int rv; + struct ipmi_system_interface_addr addr; + int timeout_retries = 0; + + if (ipmi_ignore_heartbeat) + return 0; + + if (ipmi_start_timer_on_heartbeat) { + ipmi_start_timer_on_heartbeat = 0; + ipmi_watchdog_state = action_val; + return ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); + } else if (pretimeout_since_last_heartbeat) { + /* + * A pretimeout occurred, make sure we set the timeout. + * We don't want to set the action, though, we want to + * leave that alone (thus it can't be combined with the + * above operation. + */ + return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); + } + + mutex_lock(&heartbeat_lock); + +restart: + atomic_set(&heartbeat_tofree, 2); + + /* + * Don't reset the timer if we have the timer turned off, that + * re-enables the watchdog. + */ + if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) { + mutex_unlock(&heartbeat_lock); + return 0; + } + + addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; + addr.channel = IPMI_BMC_CHANNEL; + addr.lun = 0; + + msg.netfn = 0x06; + msg.cmd = IPMI_WDOG_RESET_TIMER; + msg.data = NULL; + msg.data_len = 0; + rv = ipmi_request_supply_msgs(watchdog_user, + (struct ipmi_addr *) &addr, + 0, + &msg, + NULL, + &heartbeat_smi_msg, + &heartbeat_recv_msg, + 1); + if (rv) { + mutex_unlock(&heartbeat_lock); + printk(KERN_WARNING PFX "heartbeat failure: %d\n", + rv); + return rv; + } + + /* Wait for the heartbeat to be sent. */ + wait_for_completion(&heartbeat_wait); + + if (heartbeat_recv_msg.msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP) { + timeout_retries++; + if (timeout_retries > 3) { + printk(KERN_ERR PFX ": Unable to restore the IPMI" + " watchdog's settings, giving up.\n"); + rv = -EIO; + goto out_unlock; + } + + /* + * The timer was not initialized, that means the BMC was + * probably reset and lost the watchdog information. Attempt + * to restore the timer's info. Note that we still hold + * the heartbeat lock, to keep a heartbeat from happening + * in this process, so must say no heartbeat to avoid a + * deadlock on this mutex. + */ + rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); + if (rv) { + printk(KERN_ERR PFX ": Unable to send the command to" + " set the watchdog's settings, giving up.\n"); + goto out_unlock; + } + + /* We might need a new heartbeat, so do it now */ + goto restart; + } else if (heartbeat_recv_msg.msg.data[0] != 0) { + /* + * Got an error in the heartbeat response. It was already + * reported in ipmi_wdog_msg_handler, but we should return + * an error here. + */ + rv = -EINVAL; + } + +out_unlock: + mutex_unlock(&heartbeat_lock); + + return rv; +} + +static struct watchdog_info ident = { + .options = 0, /* WDIOF_SETTIMEOUT, */ + .firmware_version = 1, + .identity = "IPMI" +}; + +static int ipmi_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + int i; + int val; + + switch (cmd) { + case WDIOC_GETSUPPORT: + i = copy_to_user(argp, &ident, sizeof(ident)); + return i ? -EFAULT : 0; + + case WDIOC_SETTIMEOUT: + i = copy_from_user(&val, argp, sizeof(int)); + if (i) + return -EFAULT; + timeout = val; + return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); + + case WDIOC_GETTIMEOUT: + i = copy_to_user(argp, &timeout, sizeof(timeout)); + if (i) + return -EFAULT; + return 0; + + case WDIOC_SETPRETIMEOUT: + i = copy_from_user(&val, argp, sizeof(int)); + if (i) + return -EFAULT; + pretimeout = val; + return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); + + case WDIOC_GETPRETIMEOUT: + i = copy_to_user(argp, &pretimeout, sizeof(pretimeout)); + if (i) + return -EFAULT; + return 0; + + case WDIOC_KEEPALIVE: + return ipmi_heartbeat(); + + case WDIOC_SETOPTIONS: + i = copy_from_user(&val, argp, sizeof(int)); + if (i) + return -EFAULT; + if (val & WDIOS_DISABLECARD) { + ipmi_watchdog_state = WDOG_TIMEOUT_NONE; + ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); + ipmi_start_timer_on_heartbeat = 0; + } + + if (val & WDIOS_ENABLECARD) { + ipmi_watchdog_state = action_val; + ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); + } + return 0; + + case WDIOC_GETSTATUS: + val = 0; + i = copy_to_user(argp, &val, sizeof(val)); + if (i) + return -EFAULT; + return 0; + + default: + return -ENOIOCTLCMD; + } +} + +static long ipmi_unlocked_ioctl(struct file *file, + unsigned int cmd, + unsigned long arg) +{ + int ret; + + mutex_lock(&ipmi_watchdog_mutex); + ret = ipmi_ioctl(file, cmd, arg); + mutex_unlock(&ipmi_watchdog_mutex); + + return ret; +} + +static ssize_t ipmi_write(struct file *file, + const char __user *buf, + size_t len, + loff_t *ppos) +{ + int rv; + + if (len) { + if (!nowayout) { + size_t i; + + /* In case it was set long ago */ + expect_close = 0; + + for (i = 0; i != len; i++) { + char c; + + if (get_user(c, buf + i)) + return -EFAULT; + if (c == 'V') + expect_close = 42; + } + } + rv = ipmi_heartbeat(); + if (rv) + return rv; + } + return len; +} + +static ssize_t ipmi_read(struct file *file, + char __user *buf, + size_t count, + loff_t *ppos) +{ + int rv = 0; + wait_queue_t wait; + + if (count <= 0) + return 0; + + /* + * Reading returns if the pretimeout has gone off, and it only does + * it once per pretimeout. + */ + spin_lock(&ipmi_read_lock); + if (!data_to_read) { + if (file->f_flags & O_NONBLOCK) { + rv = -EAGAIN; + goto out; + } + + init_waitqueue_entry(&wait, current); + add_wait_queue(&read_q, &wait); + while (!data_to_read) { + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock(&ipmi_read_lock); + schedule(); + spin_lock(&ipmi_read_lock); + } + remove_wait_queue(&read_q, &wait); + + if (signal_pending(current)) { + rv = -ERESTARTSYS; + goto out; + } + } + data_to_read = 0; + + out: + spin_unlock(&ipmi_read_lock); + + if (rv == 0) { + if (copy_to_user(buf, &data_to_read, 1)) + rv = -EFAULT; + else + rv = 1; + } + + return rv; +} + +static int ipmi_open(struct inode *ino, struct file *filep) +{ + switch (iminor(ino)) { + case WATCHDOG_MINOR: + if (test_and_set_bit(0, &ipmi_wdog_open)) + return -EBUSY; + + + /* + * Don't start the timer now, let it start on the + * first heartbeat. + */ + ipmi_start_timer_on_heartbeat = 1; + return nonseekable_open(ino, filep); + + default: + return (-ENODEV); + } +} + +static unsigned int ipmi_poll(struct file *file, poll_table *wait) +{ + unsigned int mask = 0; + + poll_wait(file, &read_q, wait); + + spin_lock(&ipmi_read_lock); + if (data_to_read) + mask |= (POLLIN | POLLRDNORM); + spin_unlock(&ipmi_read_lock); + + return mask; +} + +static int ipmi_fasync(int fd, struct file *file, int on) +{ + int result; + + result = fasync_helper(fd, file, on, &fasync_q); + + return (result); +} + +static int ipmi_close(struct inode *ino, struct file *filep) +{ + if (iminor(ino) == WATCHDOG_MINOR) { + if (expect_close == 42) { + ipmi_watchdog_state = WDOG_TIMEOUT_NONE; + ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); + } else { + printk(KERN_CRIT PFX + "Unexpected close, not stopping watchdog!\n"); + ipmi_heartbeat(); + } + clear_bit(0, &ipmi_wdog_open); + } + + expect_close = 0; + + return 0; +} + +static const struct file_operations ipmi_wdog_fops = { + .owner = THIS_MODULE, + .read = ipmi_read, + .poll = ipmi_poll, + .write = ipmi_write, + .unlocked_ioctl = ipmi_unlocked_ioctl, + .open = ipmi_open, + .release = ipmi_close, + .fasync = ipmi_fasync, + .llseek = no_llseek, +}; + +static struct miscdevice ipmi_wdog_miscdev = { + .minor = WATCHDOG_MINOR, + .name = "watchdog", + .fops = &ipmi_wdog_fops +}; + +static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg, + void *handler_data) +{ + if (msg->msg.cmd == IPMI_WDOG_RESET_TIMER && + msg->msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP) + printk(KERN_INFO PFX "response: The IPMI controller appears" + " to have been reset, will attempt to reinitialize" + " the watchdog timer\n"); + else if (msg->msg.data[0] != 0) + printk(KERN_ERR PFX "response: Error %x on cmd %x\n", + msg->msg.data[0], + msg->msg.cmd); + + ipmi_free_recv_msg(msg); +} + +static void ipmi_wdog_pretimeout_handler(void *handler_data) +{ + if (preaction_val != WDOG_PRETIMEOUT_NONE) { + if (preop_val == WDOG_PREOP_PANIC) { + if (atomic_inc_and_test(&preop_panic_excl)) + panic("Watchdog pre-timeout"); + } else if (preop_val == WDOG_PREOP_GIVE_DATA) { + spin_lock(&ipmi_read_lock); + data_to_read = 1; + wake_up_interruptible(&read_q); + kill_fasync(&fasync_q, SIGIO, POLL_IN); + + spin_unlock(&ipmi_read_lock); + } + } + + /* + * On some machines, the heartbeat will give an error and not + * work unless we re-enable the timer. So do so. + */ + pretimeout_since_last_heartbeat = 1; +} + +static struct ipmi_user_hndl ipmi_hndlrs = { + .ipmi_recv_hndl = ipmi_wdog_msg_handler, + .ipmi_watchdog_pretimeout = ipmi_wdog_pretimeout_handler +}; + +static void ipmi_register_watchdog(int ipmi_intf) +{ + int rv = -EBUSY; + + if (watchdog_user) + goto out; + + if ((ifnum_to_use >= 0) && (ifnum_to_use != ipmi_intf)) + goto out; + + watchdog_ifnum = ipmi_intf; + + rv = ipmi_create_user(ipmi_intf, &ipmi_hndlrs, NULL, &watchdog_user); + if (rv < 0) { + printk(KERN_CRIT PFX "Unable to register with ipmi\n"); + goto out; + } + + ipmi_get_version(watchdog_user, + &ipmi_version_major, + &ipmi_version_minor); + + rv = misc_register(&ipmi_wdog_miscdev); + if (rv < 0) { + ipmi_destroy_user(watchdog_user); + watchdog_user = NULL; + printk(KERN_CRIT PFX "Unable to register misc device\n"); + } + +#ifdef HAVE_DIE_NMI + if (nmi_handler_registered) { + int old_pretimeout = pretimeout; + int old_timeout = timeout; + int old_preop_val = preop_val; + + /* + * Set the pretimeout to go off in a second and give + * ourselves plenty of time to stop the timer. + */ + ipmi_watchdog_state = WDOG_TIMEOUT_RESET; + preop_val = WDOG_PREOP_NONE; /* Make sure nothing happens */ + pretimeout = 99; + timeout = 100; + + testing_nmi = 1; + + rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); + if (rv) { + printk(KERN_WARNING PFX "Error starting timer to" + " test NMI: 0x%x. The NMI pretimeout will" + " likely not work\n", rv); + rv = 0; + goto out_restore; + } + + msleep(1500); + + if (testing_nmi != 2) { + printk(KERN_WARNING PFX "IPMI NMI didn't seem to" + " occur. The NMI pretimeout will" + " likely not work\n"); + } + out_restore: + testing_nmi = 0; + preop_val = old_preop_val; + pretimeout = old_pretimeout; + timeout = old_timeout; + } +#endif + + out: + if ((start_now) && (rv == 0)) { + /* Run from startup, so start the timer now. */ + start_now = 0; /* Disable this function after first startup. */ + ipmi_watchdog_state = action_val; + ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); + printk(KERN_INFO PFX "Starting now!\n"); + } else { + /* Stop the timer now. */ + ipmi_watchdog_state = WDOG_TIMEOUT_NONE; + ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); + } +} + +static void ipmi_unregister_watchdog(int ipmi_intf) +{ + int rv; + + if (!watchdog_user) + goto out; + + if (watchdog_ifnum != ipmi_intf) + goto out; + + /* Make sure no one can call us any more. */ + misc_deregister(&ipmi_wdog_miscdev); + + /* + * Wait to make sure the message makes it out. The lower layer has + * pointers to our buffers, we want to make sure they are done before + * we release our memory. + */ + while (atomic_read(&set_timeout_tofree)) + schedule_timeout_uninterruptible(1); + + /* Disconnect from IPMI. */ + rv = ipmi_destroy_user(watchdog_user); + if (rv) { + printk(KERN_WARNING PFX "error unlinking from IPMI: %d\n", + rv); + } + watchdog_user = NULL; + + out: + return; +} + +#ifdef HAVE_DIE_NMI +static int +ipmi_nmi(unsigned int val, struct pt_regs *regs) +{ + /* + * If we get here, it's an NMI that's not a memory or I/O + * error. We can't truly tell if it's from IPMI or not + * without sending a message, and sending a message is almost + * impossible because of locking. + */ + + if (testing_nmi) { + testing_nmi = 2; + return NMI_HANDLED; + } + + /* If we are not expecting a timeout, ignore it. */ + if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) + return NMI_DONE; + + if (preaction_val != WDOG_PRETIMEOUT_NMI) + return NMI_DONE; + + /* + * If no one else handled the NMI, we assume it was the IPMI + * watchdog. + */ + if (preop_val == WDOG_PREOP_PANIC) { + /* On some machines, the heartbeat will give + an error and not work unless we re-enable + the timer. So do so. */ + pretimeout_since_last_heartbeat = 1; + if (atomic_inc_and_test(&preop_panic_excl)) + panic(PFX "pre-timeout"); + } + + return NMI_HANDLED; +} +#endif + +static int wdog_reboot_handler(struct notifier_block *this, + unsigned long code, + void *unused) +{ + static int reboot_event_handled; + + if ((watchdog_user) && (!reboot_event_handled)) { + /* Make sure we only do this once. */ + reboot_event_handled = 1; + + if (code == SYS_POWER_OFF || code == SYS_HALT) { + /* Disable the WDT if we are shutting down. */ + ipmi_watchdog_state = WDOG_TIMEOUT_NONE; + ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); + } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { + /* Set a long timer to let the reboot happens, but + reboot if it hangs, but only if the watchdog + timer was already running. */ + timeout = 120; + pretimeout = 0; + ipmi_watchdog_state = WDOG_TIMEOUT_RESET; + ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); + } + } + return NOTIFY_OK; +} + +static struct notifier_block wdog_reboot_notifier = { + .notifier_call = wdog_reboot_handler, + .next = NULL, + .priority = 0 +}; + +static int wdog_panic_handler(struct notifier_block *this, + unsigned long event, + void *unused) +{ + static int panic_event_handled; + + /* On a panic, if we have a panic timeout, make sure to extend + the watchdog timer to a reasonable value to complete the + panic, if the watchdog timer is running. Plus the + pretimeout is meaningless at panic time. */ + if (watchdog_user && !panic_event_handled && + ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { + /* Make sure we do this only once. */ + panic_event_handled = 1; + + timeout = 255; + pretimeout = 0; + panic_halt_ipmi_set_timeout(); + } + + return NOTIFY_OK; +} + +static struct notifier_block wdog_panic_notifier = { + .notifier_call = wdog_panic_handler, + .next = NULL, + .priority = 150 /* priority: INT_MAX >= x >= 0 */ +}; + + +static void ipmi_new_smi(int if_num, struct device *device) +{ + ipmi_register_watchdog(if_num); +} + +static void ipmi_smi_gone(int if_num) +{ + ipmi_unregister_watchdog(if_num); +} + +static struct ipmi_smi_watcher smi_watcher = { + .owner = THIS_MODULE, + .new_smi = ipmi_new_smi, + .smi_gone = ipmi_smi_gone +}; + +static int action_op(const char *inval, char *outval) +{ + if (outval) + strcpy(outval, action); + + if (!inval) + return 0; + + if (strcmp(inval, "reset") == 0) + action_val = WDOG_TIMEOUT_RESET; + else if (strcmp(inval, "none") == 0) + action_val = WDOG_TIMEOUT_NONE; + else if (strcmp(inval, "power_cycle") == 0) + action_val = WDOG_TIMEOUT_POWER_CYCLE; + else if (strcmp(inval, "power_off") == 0) + action_val = WDOG_TIMEOUT_POWER_DOWN; + else + return -EINVAL; + strcpy(action, inval); + return 0; +} + +static int preaction_op(const char *inval, char *outval) +{ + if (outval) + strcpy(outval, preaction); + + if (!inval) + return 0; + + if (strcmp(inval, "pre_none") == 0) + preaction_val = WDOG_PRETIMEOUT_NONE; + else if (strcmp(inval, "pre_smi") == 0) + preaction_val = WDOG_PRETIMEOUT_SMI; +#ifdef HAVE_DIE_NMI + else if (strcmp(inval, "pre_nmi") == 0) + preaction_val = WDOG_PRETIMEOUT_NMI; +#endif + else if (strcmp(inval, "pre_int") == 0) + preaction_val = WDOG_PRETIMEOUT_MSG_INT; + else + return -EINVAL; + strcpy(preaction, inval); + return 0; +} + +static int preop_op(const char *inval, char *outval) +{ + if (outval) + strcpy(outval, preop); + + if (!inval) + return 0; + + if (strcmp(inval, "preop_none") == 0) + preop_val = WDOG_PREOP_NONE; + else if (strcmp(inval, "preop_panic") == 0) + preop_val = WDOG_PREOP_PANIC; + else if (strcmp(inval, "preop_give_data") == 0) + preop_val = WDOG_PREOP_GIVE_DATA; + else + return -EINVAL; + strcpy(preop, inval); + return 0; +} + +static void check_parms(void) +{ +#ifdef HAVE_DIE_NMI + int do_nmi = 0; + int rv; + + if (preaction_val == WDOG_PRETIMEOUT_NMI) { + do_nmi = 1; + if (preop_val == WDOG_PREOP_GIVE_DATA) { + printk(KERN_WARNING PFX "Pretimeout op is to give data" + " but NMI pretimeout is enabled, setting" + " pretimeout op to none\n"); + preop_op("preop_none", NULL); + do_nmi = 0; + } + } + if (do_nmi && !nmi_handler_registered) { + rv = register_nmi_handler(NMI_UNKNOWN, ipmi_nmi, 0, + "ipmi"); + if (rv) { + printk(KERN_WARNING PFX + "Can't register nmi handler\n"); + return; + } else + nmi_handler_registered = 1; + } else if (!do_nmi && nmi_handler_registered) { + unregister_nmi_handler(NMI_UNKNOWN, "ipmi"); + nmi_handler_registered = 0; + } +#endif +} + +static int __init ipmi_wdog_init(void) +{ + int rv; + + if (action_op(action, NULL)) { + action_op("reset", NULL); + printk(KERN_INFO PFX "Unknown action '%s', defaulting to" + " reset\n", action); + } + + if (preaction_op(preaction, NULL)) { + preaction_op("pre_none", NULL); + printk(KERN_INFO PFX "Unknown preaction '%s', defaulting to" + " none\n", preaction); + } + + if (preop_op(preop, NULL)) { + preop_op("preop_none", NULL); + printk(KERN_INFO PFX "Unknown preop '%s', defaulting to" + " none\n", preop); + } + + check_parms(); + + register_reboot_notifier(&wdog_reboot_notifier); + atomic_notifier_chain_register(&panic_notifier_list, + &wdog_panic_notifier); + + rv = ipmi_smi_watcher_register(&smi_watcher); + if (rv) { +#ifdef HAVE_DIE_NMI + if (nmi_handler_registered) + unregister_nmi_handler(NMI_UNKNOWN, "ipmi"); +#endif + atomic_notifier_chain_unregister(&panic_notifier_list, + &wdog_panic_notifier); + unregister_reboot_notifier(&wdog_reboot_notifier); + printk(KERN_WARNING PFX "can't register smi watcher\n"); + return rv; + } + + printk(KERN_INFO PFX "driver initialized\n"); + + return 0; +} + +static void __exit ipmi_wdog_exit(void) +{ + ipmi_smi_watcher_unregister(&smi_watcher); + ipmi_unregister_watchdog(watchdog_ifnum); + +#ifdef HAVE_DIE_NMI + if (nmi_handler_registered) + unregister_nmi_handler(NMI_UNKNOWN, "ipmi"); +#endif + + atomic_notifier_chain_unregister(&panic_notifier_list, + &wdog_panic_notifier); + unregister_reboot_notifier(&wdog_reboot_notifier); +} +module_exit(ipmi_wdog_exit); +module_init(ipmi_wdog_init); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Corey Minyard "); +MODULE_DESCRIPTION("watchdog timer based upon the IPMI interface."); diff --git a/kernel/drivers/char/lp.c b/kernel/drivers/char/lp.c new file mode 100644 index 000000000..c4094c4e2 --- /dev/null +++ b/kernel/drivers/char/lp.c @@ -0,0 +1,1063 @@ +/* + * Generic parallel printer driver + * + * Copyright (C) 1992 by Jim Weigand and Linus Torvalds + * Copyright (C) 1992,1993 by Michael K. Johnson + * - Thanks much to Gunter Windau for pointing out to me where the error + * checking ought to be. + * Copyright (C) 1993 by Nigel Gamble (added interrupt code) + * Copyright (C) 1994 by Alan Cox (Modularised it) + * LPCAREFUL, LPABORT, LPGETSTATUS added by Chris Metcalf, metcalf@lcs.mit.edu + * Statistics and support for slow printers by Rob Janssen, rob@knoware.nl + * "lp=" command line parameters added by Grant Guenther, grant@torque.net + * lp_read (Status readback) support added by Carsten Gross, + * carsten@sol.wohnheim.uni-ulm.de + * Support for parport by Philip Blundell + * Parport sharing hacking by Andrea Arcangeli + * Fixed kernel_(to/from)_user memory copy to check for errors + * by Riccardo Facchetti + * 22-JAN-1998 Added support for devfs Richard Gooch + * Redesigned interrupt handling for handle printers with buggy handshake + * by Andrea Arcangeli, 11 May 1998 + * Full efficient handling of printer with buggy irq handshake (now I have + * understood the meaning of the strange handshake). This is done sending new + * characters if the interrupt is just happened, even if the printer say to + * be still BUSY. This is needed at least with Epson Stylus Color. To enable + * the new TRUST_IRQ mode read the `LP OPTIMIZATION' section below... + * Fixed the irq on the rising edge of the strobe case. + * Obsoleted the CAREFUL flag since a printer that doesn' t work with + * CAREFUL will block a bit after in lp_check_status(). + * Andrea Arcangeli, 15 Oct 1998 + * Obsoleted and removed all the lowlevel stuff implemented in the last + * month to use the IEEE1284 functions (that handle the _new_ compatibilty + * mode fine). + */ + +/* This driver should, in theory, work with any parallel port that has an + * appropriate low-level driver; all I/O is done through the parport + * abstraction layer. + * + * If this driver is built into the kernel, you can configure it using the + * kernel command-line. For example: + * + * lp=parport1,none,parport2 (bind lp0 to parport1, disable lp1 and + * bind lp2 to parport2) + * + * lp=auto (assign lp devices to all ports that + * have printers attached, as determined + * by the IEEE-1284 autoprobe) + * + * lp=reset (reset the printer during + * initialisation) + * + * lp=off (disable the printer driver entirely) + * + * If the driver is loaded as a module, similar functionality is available + * using module parameters. The equivalent of the above commands would be: + * + * # insmod lp.o parport=1,none,2 + * + * # insmod lp.o parport=auto + * + * # insmod lp.o reset=1 + */ + +/* COMPATIBILITY WITH OLD KERNELS + * + * Under Linux 2.0 and previous versions, lp devices were bound to ports at + * particular I/O addresses, as follows: + * + * lp0 0x3bc + * lp1 0x378 + * lp2 0x278 + * + * The new driver, by default, binds lp devices to parport devices as it + * finds them. This means that if you only have one port, it will be bound + * to lp0 regardless of its I/O address. If you need the old behaviour, you + * can force it using the parameters described above. + */ + +/* + * The new interrupt handling code take care of the buggy handshake + * of some HP and Epson printer: + * ___ + * ACK _______________ ___________ + * |__| + * ____ + * BUSY _________ _______ + * |____________| + * + * I discovered this using the printer scanner that you can find at: + * + * ftp://e-mind.com/pub/linux/pscan/ + * + * 11 May 98, Andrea Arcangeli + * + * My printer scanner run on an Epson Stylus Color show that such printer + * generates the irq on the _rising_ edge of the STROBE. Now lp handle + * this case fine too. + * + * 15 Oct 1998, Andrea Arcangeli + * + * The so called `buggy' handshake is really the well documented + * compatibility mode IEEE1284 handshake. They changed the well known + * Centronics handshake acking in the middle of busy expecting to not + * break drivers or legacy application, while they broken linux lp + * until I fixed it reverse engineering the protocol by hand some + * month ago... + * + * 14 Dec 1998, Andrea Arcangeli + * + * Copyright (C) 2000 by Tim Waugh (added LPSETTIMEOUT ioctl) + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#undef LP_STATS +#include + +#include +#include + +/* if you have more than 8 printers, remember to increase LP_NO */ +#define LP_NO 8 + +static DEFINE_MUTEX(lp_mutex); +static struct lp_struct lp_table[LP_NO]; + +static unsigned int lp_count = 0; +static struct class *lp_class; + +#ifdef CONFIG_LP_CONSOLE +static struct parport *console_registered; +#endif /* CONFIG_LP_CONSOLE */ + +#undef LP_DEBUG + +/* Bits used to manage claiming the parport device */ +#define LP_PREEMPT_REQUEST 1 +#define LP_PARPORT_CLAIMED 2 + +/* --- low-level port access ----------------------------------- */ + +#define r_dtr(x) (parport_read_data(lp_table[(x)].dev->port)) +#define r_str(x) (parport_read_status(lp_table[(x)].dev->port)) +#define w_ctr(x,y) do { parport_write_control(lp_table[(x)].dev->port, (y)); } while (0) +#define w_dtr(x,y) do { parport_write_data(lp_table[(x)].dev->port, (y)); } while (0) + +/* Claim the parport or block trying unless we've already claimed it */ +static void lp_claim_parport_or_block(struct lp_struct *this_lp) +{ + if (!test_and_set_bit(LP_PARPORT_CLAIMED, &this_lp->bits)) { + parport_claim_or_block (this_lp->dev); + } +} + +/* Claim the parport or block trying unless we've already claimed it */ +static void lp_release_parport(struct lp_struct *this_lp) +{ + if (test_and_clear_bit(LP_PARPORT_CLAIMED, &this_lp->bits)) { + parport_release (this_lp->dev); + } +} + + + +static int lp_preempt(void *handle) +{ + struct lp_struct *this_lp = (struct lp_struct *)handle; + set_bit(LP_PREEMPT_REQUEST, &this_lp->bits); + return (1); +} + + +/* + * Try to negotiate to a new mode; if unsuccessful negotiate to + * compatibility mode. Return the mode we ended up in. + */ +static int lp_negotiate(struct parport * port, int mode) +{ + if (parport_negotiate (port, mode) != 0) { + mode = IEEE1284_MODE_COMPAT; + parport_negotiate (port, mode); + } + + return (mode); +} + +static int lp_reset(int minor) +{ + int retval; + lp_claim_parport_or_block (&lp_table[minor]); + w_ctr(minor, LP_PSELECP); + udelay (LP_DELAY); + w_ctr(minor, LP_PSELECP | LP_PINITP); + retval = r_str(minor); + lp_release_parport (&lp_table[minor]); + return retval; +} + +static void lp_error (int minor) +{ + DEFINE_WAIT(wait); + int polling; + + if (LP_F(minor) & LP_ABORT) + return; + + polling = lp_table[minor].dev->port->irq == PARPORT_IRQ_NONE; + if (polling) lp_release_parport (&lp_table[minor]); + prepare_to_wait(&lp_table[minor].waitq, &wait, TASK_INTERRUPTIBLE); + schedule_timeout(LP_TIMEOUT_POLLED); + finish_wait(&lp_table[minor].waitq, &wait); + if (polling) lp_claim_parport_or_block (&lp_table[minor]); + else parport_yield_blocking (lp_table[minor].dev); +} + +static int lp_check_status(int minor) +{ + int error = 0; + unsigned int last = lp_table[minor].last_error; + unsigned char status = r_str(minor); + if ((status & LP_PERRORP) && !(LP_F(minor) & LP_CAREFUL)) + /* No error. */ + last = 0; + else if ((status & LP_POUTPA)) { + if (last != LP_POUTPA) { + last = LP_POUTPA; + printk(KERN_INFO "lp%d out of paper\n", minor); + } + error = -ENOSPC; + } else if (!(status & LP_PSELECD)) { + if (last != LP_PSELECD) { + last = LP_PSELECD; + printk(KERN_INFO "lp%d off-line\n", minor); + } + error = -EIO; + } else if (!(status & LP_PERRORP)) { + if (last != LP_PERRORP) { + last = LP_PERRORP; + printk(KERN_INFO "lp%d on fire\n", minor); + } + error = -EIO; + } else { + last = 0; /* Come here if LP_CAREFUL is set and no + errors are reported. */ + } + + lp_table[minor].last_error = last; + + if (last != 0) + lp_error(minor); + + return error; +} + +static int lp_wait_ready(int minor, int nonblock) +{ + int error = 0; + + /* If we're not in compatibility mode, we're ready now! */ + if (lp_table[minor].current_mode != IEEE1284_MODE_COMPAT) { + return (0); + } + + do { + error = lp_check_status (minor); + if (error && (nonblock || (LP_F(minor) & LP_ABORT))) + break; + if (signal_pending (current)) { + error = -EINTR; + break; + } + } while (error); + return error; +} + +static ssize_t lp_write(struct file * file, const char __user * buf, + size_t count, loff_t *ppos) +{ + unsigned int minor = iminor(file_inode(file)); + struct parport *port = lp_table[minor].dev->port; + char *kbuf = lp_table[minor].lp_buffer; + ssize_t retv = 0; + ssize_t written; + size_t copy_size = count; + int nonblock = ((file->f_flags & O_NONBLOCK) || + (LP_F(minor) & LP_ABORT)); + +#ifdef LP_STATS + if (time_after(jiffies, lp_table[minor].lastcall + LP_TIME(minor))) + lp_table[minor].runchars = 0; + + lp_table[minor].lastcall = jiffies; +#endif + + /* Need to copy the data from user-space. */ + if (copy_size > LP_BUFFER_SIZE) + copy_size = LP_BUFFER_SIZE; + + if (mutex_lock_interruptible(&lp_table[minor].port_mutex)) + return -EINTR; + + if (copy_from_user (kbuf, buf, copy_size)) { + retv = -EFAULT; + goto out_unlock; + } + + /* Claim Parport or sleep until it becomes available + */ + lp_claim_parport_or_block (&lp_table[minor]); + /* Go to the proper mode. */ + lp_table[minor].current_mode = lp_negotiate (port, + lp_table[minor].best_mode); + + parport_set_timeout (lp_table[minor].dev, + (nonblock ? PARPORT_INACTIVITY_O_NONBLOCK + : lp_table[minor].timeout)); + + if ((retv = lp_wait_ready (minor, nonblock)) == 0) + do { + /* Write the data. */ + written = parport_write (port, kbuf, copy_size); + if (written > 0) { + copy_size -= written; + count -= written; + buf += written; + retv += written; + } + + if (signal_pending (current)) { + if (retv == 0) + retv = -EINTR; + + break; + } + + if (copy_size > 0) { + /* incomplete write -> check error ! */ + int error; + + parport_negotiate (lp_table[minor].dev->port, + IEEE1284_MODE_COMPAT); + lp_table[minor].current_mode = IEEE1284_MODE_COMPAT; + + error = lp_wait_ready (minor, nonblock); + + if (error) { + if (retv == 0) + retv = error; + break; + } else if (nonblock) { + if (retv == 0) + retv = -EAGAIN; + break; + } + + parport_yield_blocking (lp_table[minor].dev); + lp_table[minor].current_mode + = lp_negotiate (port, + lp_table[minor].best_mode); + + } else if (need_resched()) + schedule (); + + if (count) { + copy_size = count; + if (copy_size > LP_BUFFER_SIZE) + copy_size = LP_BUFFER_SIZE; + + if (copy_from_user(kbuf, buf, copy_size)) { + if (retv == 0) + retv = -EFAULT; + break; + } + } + } while (count > 0); + + if (test_and_clear_bit(LP_PREEMPT_REQUEST, + &lp_table[minor].bits)) { + printk(KERN_INFO "lp%d releasing parport\n", minor); + parport_negotiate (lp_table[minor].dev->port, + IEEE1284_MODE_COMPAT); + lp_table[minor].current_mode = IEEE1284_MODE_COMPAT; + lp_release_parport (&lp_table[minor]); + } +out_unlock: + mutex_unlock(&lp_table[minor].port_mutex); + + return retv; +} + +#ifdef CONFIG_PARPORT_1284 + +/* Status readback conforming to ieee1284 */ +static ssize_t lp_read(struct file * file, char __user * buf, + size_t count, loff_t *ppos) +{ + DEFINE_WAIT(wait); + unsigned int minor=iminor(file_inode(file)); + struct parport *port = lp_table[minor].dev->port; + ssize_t retval = 0; + char *kbuf = lp_table[minor].lp_buffer; + int nonblock = ((file->f_flags & O_NONBLOCK) || + (LP_F(minor) & LP_ABORT)); + + if (count > LP_BUFFER_SIZE) + count = LP_BUFFER_SIZE; + + if (mutex_lock_interruptible(&lp_table[minor].port_mutex)) + return -EINTR; + + lp_claim_parport_or_block (&lp_table[minor]); + + parport_set_timeout (lp_table[minor].dev, + (nonblock ? PARPORT_INACTIVITY_O_NONBLOCK + : lp_table[minor].timeout)); + + parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); + if (parport_negotiate (lp_table[minor].dev->port, + IEEE1284_MODE_NIBBLE)) { + retval = -EIO; + goto out; + } + + while (retval == 0) { + retval = parport_read (port, kbuf, count); + + if (retval > 0) + break; + + if (nonblock) { + retval = -EAGAIN; + break; + } + + /* Wait for data. */ + + if (lp_table[minor].dev->port->irq == PARPORT_IRQ_NONE) { + parport_negotiate (lp_table[minor].dev->port, + IEEE1284_MODE_COMPAT); + lp_error (minor); + if (parport_negotiate (lp_table[minor].dev->port, + IEEE1284_MODE_NIBBLE)) { + retval = -EIO; + goto out; + } + } else { + prepare_to_wait(&lp_table[minor].waitq, &wait, TASK_INTERRUPTIBLE); + schedule_timeout(LP_TIMEOUT_POLLED); + finish_wait(&lp_table[minor].waitq, &wait); + } + + if (signal_pending (current)) { + retval = -ERESTARTSYS; + break; + } + + cond_resched (); + } + parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); + out: + lp_release_parport (&lp_table[minor]); + + if (retval > 0 && copy_to_user (buf, kbuf, retval)) + retval = -EFAULT; + + mutex_unlock(&lp_table[minor].port_mutex); + + return retval; +} + +#endif /* IEEE 1284 support */ + +static int lp_open(struct inode * inode, struct file * file) +{ + unsigned int minor = iminor(inode); + int ret = 0; + + mutex_lock(&lp_mutex); + if (minor >= LP_NO) { + ret = -ENXIO; + goto out; + } + if ((LP_F(minor) & LP_EXIST) == 0) { + ret = -ENXIO; + goto out; + } + if (test_and_set_bit(LP_BUSY_BIT_POS, &LP_F(minor))) { + ret = -EBUSY; + goto out; + } + /* If ABORTOPEN is set and the printer is offline or out of paper, + we may still want to open it to perform ioctl()s. Therefore we + have commandeered O_NONBLOCK, even though it is being used in + a non-standard manner. This is strictly a Linux hack, and + should most likely only ever be used by the tunelp application. */ + if ((LP_F(minor) & LP_ABORTOPEN) && !(file->f_flags & O_NONBLOCK)) { + int status; + lp_claim_parport_or_block (&lp_table[minor]); + status = r_str(minor); + lp_release_parport (&lp_table[minor]); + if (status & LP_POUTPA) { + printk(KERN_INFO "lp%d out of paper\n", minor); + LP_F(minor) &= ~LP_BUSY; + ret = -ENOSPC; + goto out; + } else if (!(status & LP_PSELECD)) { + printk(KERN_INFO "lp%d off-line\n", minor); + LP_F(minor) &= ~LP_BUSY; + ret = -EIO; + goto out; + } else if (!(status & LP_PERRORP)) { + printk(KERN_ERR "lp%d printer error\n", minor); + LP_F(minor) &= ~LP_BUSY; + ret = -EIO; + goto out; + } + } + lp_table[minor].lp_buffer = kmalloc(LP_BUFFER_SIZE, GFP_KERNEL); + if (!lp_table[minor].lp_buffer) { + LP_F(minor) &= ~LP_BUSY; + ret = -ENOMEM; + goto out; + } + /* Determine if the peripheral supports ECP mode */ + lp_claim_parport_or_block (&lp_table[minor]); + if ( (lp_table[minor].dev->port->modes & PARPORT_MODE_ECP) && + !parport_negotiate (lp_table[minor].dev->port, + IEEE1284_MODE_ECP)) { + printk (KERN_INFO "lp%d: ECP mode\n", minor); + lp_table[minor].best_mode = IEEE1284_MODE_ECP; + } else { + lp_table[minor].best_mode = IEEE1284_MODE_COMPAT; + } + /* Leave peripheral in compatibility mode */ + parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); + lp_release_parport (&lp_table[minor]); + lp_table[minor].current_mode = IEEE1284_MODE_COMPAT; +out: + mutex_unlock(&lp_mutex); + return ret; +} + +static int lp_release(struct inode * inode, struct file * file) +{ + unsigned int minor = iminor(inode); + + lp_claim_parport_or_block (&lp_table[minor]); + parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); + lp_table[minor].current_mode = IEEE1284_MODE_COMPAT; + lp_release_parport (&lp_table[minor]); + kfree(lp_table[minor].lp_buffer); + lp_table[minor].lp_buffer = NULL; + LP_F(minor) &= ~LP_BUSY; + return 0; +} + +static int lp_do_ioctl(unsigned int minor, unsigned int cmd, + unsigned long arg, void __user *argp) +{ + int status; + int retval = 0; + +#ifdef LP_DEBUG + printk(KERN_DEBUG "lp%d ioctl, cmd: 0x%x, arg: 0x%lx\n", minor, cmd, arg); +#endif + if (minor >= LP_NO) + return -ENODEV; + if ((LP_F(minor) & LP_EXIST) == 0) + return -ENODEV; + switch ( cmd ) { + case LPTIME: + if (arg > UINT_MAX / HZ) + return -EINVAL; + LP_TIME(minor) = arg * HZ/100; + break; + case LPCHAR: + LP_CHAR(minor) = arg; + break; + case LPABORT: + if (arg) + LP_F(minor) |= LP_ABORT; + else + LP_F(minor) &= ~LP_ABORT; + break; + case LPABORTOPEN: + if (arg) + LP_F(minor) |= LP_ABORTOPEN; + else + LP_F(minor) &= ~LP_ABORTOPEN; + break; + case LPCAREFUL: + if (arg) + LP_F(minor) |= LP_CAREFUL; + else + LP_F(minor) &= ~LP_CAREFUL; + break; + case LPWAIT: + LP_WAIT(minor) = arg; + break; + case LPSETIRQ: + return -EINVAL; + break; + case LPGETIRQ: + if (copy_to_user(argp, &LP_IRQ(minor), + sizeof(int))) + return -EFAULT; + break; + case LPGETSTATUS: + if (mutex_lock_interruptible(&lp_table[minor].port_mutex)) + return -EINTR; + lp_claim_parport_or_block (&lp_table[minor]); + status = r_str(minor); + lp_release_parport (&lp_table[minor]); + mutex_unlock(&lp_table[minor].port_mutex); + + if (copy_to_user(argp, &status, sizeof(int))) + return -EFAULT; + break; + case LPRESET: + lp_reset(minor); + break; +#ifdef LP_STATS + case LPGETSTATS: + if (copy_to_user(argp, &LP_STAT(minor), + sizeof(struct lp_stats))) + return -EFAULT; + if (capable(CAP_SYS_ADMIN)) + memset(&LP_STAT(minor), 0, + sizeof(struct lp_stats)); + break; +#endif + case LPGETFLAGS: + status = LP_F(minor); + if (copy_to_user(argp, &status, sizeof(int))) + return -EFAULT; + break; + + default: + retval = -EINVAL; + } + return retval; +} + +static int lp_set_timeout(unsigned int minor, struct timeval *par_timeout) +{ + long to_jiffies; + + /* Convert to jiffies, place in lp_table */ + if ((par_timeout->tv_sec < 0) || + (par_timeout->tv_usec < 0)) { + return -EINVAL; + } + to_jiffies = DIV_ROUND_UP(par_timeout->tv_usec, 1000000/HZ); + to_jiffies += par_timeout->tv_sec * (long) HZ; + if (to_jiffies <= 0) { + return -EINVAL; + } + lp_table[minor].timeout = to_jiffies; + return 0; +} + +static long lp_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + unsigned int minor; + struct timeval par_timeout; + int ret; + + minor = iminor(file_inode(file)); + mutex_lock(&lp_mutex); + switch (cmd) { + case LPSETTIMEOUT: + if (copy_from_user(&par_timeout, (void __user *)arg, + sizeof (struct timeval))) { + ret = -EFAULT; + break; + } + ret = lp_set_timeout(minor, &par_timeout); + break; + default: + ret = lp_do_ioctl(minor, cmd, arg, (void __user *)arg); + break; + } + mutex_unlock(&lp_mutex); + + return ret; +} + +#ifdef CONFIG_COMPAT +static long lp_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + unsigned int minor; + struct timeval par_timeout; + int ret; + + minor = iminor(file_inode(file)); + mutex_lock(&lp_mutex); + switch (cmd) { + case LPSETTIMEOUT: + if (compat_get_timeval(&par_timeout, compat_ptr(arg))) { + ret = -EFAULT; + break; + } + ret = lp_set_timeout(minor, &par_timeout); + break; +#ifdef LP_STATS + case LPGETSTATS: + /* FIXME: add an implementation if you set LP_STATS */ + ret = -EINVAL; + break; +#endif + default: + ret = lp_do_ioctl(minor, cmd, arg, compat_ptr(arg)); + break; + } + mutex_unlock(&lp_mutex); + + return ret; +} +#endif + +static const struct file_operations lp_fops = { + .owner = THIS_MODULE, + .write = lp_write, + .unlocked_ioctl = lp_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = lp_compat_ioctl, +#endif + .open = lp_open, + .release = lp_release, +#ifdef CONFIG_PARPORT_1284 + .read = lp_read, +#endif + .llseek = noop_llseek, +}; + +/* --- support for console on the line printer ----------------- */ + +#ifdef CONFIG_LP_CONSOLE + +#define CONSOLE_LP 0 + +/* If the printer is out of paper, we can either lose the messages or + * stall until the printer is happy again. Define CONSOLE_LP_STRICT + * non-zero to get the latter behaviour. */ +#define CONSOLE_LP_STRICT 1 + +/* The console must be locked when we get here. */ + +static void lp_console_write (struct console *co, const char *s, + unsigned count) +{ + struct pardevice *dev = lp_table[CONSOLE_LP].dev; + struct parport *port = dev->port; + ssize_t written; + + if (parport_claim (dev)) + /* Nothing we can do. */ + return; + + parport_set_timeout (dev, 0); + + /* Go to compatibility mode. */ + parport_negotiate (port, IEEE1284_MODE_COMPAT); + + do { + /* Write the data, converting LF->CRLF as we go. */ + ssize_t canwrite = count; + char *lf = memchr (s, '\n', count); + if (lf) + canwrite = lf - s; + + if (canwrite > 0) { + written = parport_write (port, s, canwrite); + + if (written <= 0) + continue; + + s += written; + count -= written; + canwrite -= written; + } + + if (lf && canwrite <= 0) { + const char *crlf = "\r\n"; + int i = 2; + + /* Dodge the original '\n', and put '\r\n' instead. */ + s++; + count--; + do { + written = parport_write (port, crlf, i); + if (written > 0) + i -= written, crlf += written; + } while (i > 0 && (CONSOLE_LP_STRICT || written > 0)); + } + } while (count > 0 && (CONSOLE_LP_STRICT || written > 0)); + + parport_release (dev); +} + +static struct console lpcons = { + .name = "lp", + .write = lp_console_write, + .flags = CON_PRINTBUFFER, +}; + +#endif /* console on line printer */ + +/* --- initialisation code ------------------------------------- */ + +static int parport_nr[LP_NO] = { [0 ... LP_NO-1] = LP_PARPORT_UNSPEC }; +static char *parport[LP_NO]; +static bool reset; + +module_param_array(parport, charp, NULL, 0); +module_param(reset, bool, 0); + +#ifndef MODULE +static int __init lp_setup (char *str) +{ + static int parport_ptr; + int x; + + if (get_option(&str, &x)) { + if (x == 0) { + /* disable driver on "lp=" or "lp=0" */ + parport_nr[0] = LP_PARPORT_OFF; + } else { + printk(KERN_WARNING "warning: 'lp=0x%x' is deprecated, ignored\n", x); + return 0; + } + } else if (!strncmp(str, "parport", 7)) { + int n = simple_strtoul(str+7, NULL, 10); + if (parport_ptr < LP_NO) + parport_nr[parport_ptr++] = n; + else + printk(KERN_INFO "lp: too many ports, %s ignored.\n", + str); + } else if (!strcmp(str, "auto")) { + parport_nr[0] = LP_PARPORT_AUTO; + } else if (!strcmp(str, "none")) { + parport_nr[parport_ptr++] = LP_PARPORT_NONE; + } else if (!strcmp(str, "reset")) { + reset = 1; + } + return 1; +} +#endif + +static int lp_register(int nr, struct parport *port) +{ + lp_table[nr].dev = parport_register_device(port, "lp", + lp_preempt, NULL, NULL, 0, + (void *) &lp_table[nr]); + if (lp_table[nr].dev == NULL) + return 1; + lp_table[nr].flags |= LP_EXIST; + + if (reset) + lp_reset(nr); + + device_create(lp_class, port->dev, MKDEV(LP_MAJOR, nr), NULL, + "lp%d", nr); + + printk(KERN_INFO "lp%d: using %s (%s).\n", nr, port->name, + (port->irq == PARPORT_IRQ_NONE)?"polling":"interrupt-driven"); + +#ifdef CONFIG_LP_CONSOLE + if (!nr) { + if (port->modes & PARPORT_MODE_SAFEININT) { + register_console(&lpcons); + console_registered = port; + printk (KERN_INFO "lp%d: console ready\n", CONSOLE_LP); + } else + printk (KERN_ERR "lp%d: cannot run console on %s\n", + CONSOLE_LP, port->name); + } +#endif + + return 0; +} + +static void lp_attach (struct parport *port) +{ + unsigned int i; + + switch (parport_nr[0]) { + case LP_PARPORT_UNSPEC: + case LP_PARPORT_AUTO: + if (parport_nr[0] == LP_PARPORT_AUTO && + port->probe_info[0].class != PARPORT_CLASS_PRINTER) + return; + if (lp_count == LP_NO) { + printk(KERN_INFO "lp: ignoring parallel port (max. %d)\n",LP_NO); + return; + } + if (!lp_register(lp_count, port)) + lp_count++; + break; + + default: + for (i = 0; i < LP_NO; i++) { + if (port->number == parport_nr[i]) { + if (!lp_register(i, port)) + lp_count++; + break; + } + } + break; + } +} + +static void lp_detach (struct parport *port) +{ + /* Write this some day. */ +#ifdef CONFIG_LP_CONSOLE + if (console_registered == port) { + unregister_console(&lpcons); + console_registered = NULL; + } +#endif /* CONFIG_LP_CONSOLE */ +} + +static struct parport_driver lp_driver = { + .name = "lp", + .attach = lp_attach, + .detach = lp_detach, +}; + +static int __init lp_init (void) +{ + int i, err = 0; + + if (parport_nr[0] == LP_PARPORT_OFF) + return 0; + + for (i = 0; i < LP_NO; i++) { + lp_table[i].dev = NULL; + lp_table[i].flags = 0; + lp_table[i].chars = LP_INIT_CHAR; + lp_table[i].time = LP_INIT_TIME; + lp_table[i].wait = LP_INIT_WAIT; + lp_table[i].lp_buffer = NULL; +#ifdef LP_STATS + lp_table[i].lastcall = 0; + lp_table[i].runchars = 0; + memset (&lp_table[i].stats, 0, sizeof (struct lp_stats)); +#endif + lp_table[i].last_error = 0; + init_waitqueue_head (&lp_table[i].waitq); + init_waitqueue_head (&lp_table[i].dataq); + mutex_init(&lp_table[i].port_mutex); + lp_table[i].timeout = 10 * HZ; + } + + if (register_chrdev (LP_MAJOR, "lp", &lp_fops)) { + printk (KERN_ERR "lp: unable to get major %d\n", LP_MAJOR); + return -EIO; + } + + lp_class = class_create(THIS_MODULE, "printer"); + if (IS_ERR(lp_class)) { + err = PTR_ERR(lp_class); + goto out_reg; + } + + if (parport_register_driver (&lp_driver)) { + printk (KERN_ERR "lp: unable to register with parport\n"); + err = -EIO; + goto out_class; + } + + if (!lp_count) { + printk (KERN_INFO "lp: driver loaded but no devices found\n"); +#ifndef CONFIG_PARPORT_1284 + if (parport_nr[0] == LP_PARPORT_AUTO) + printk (KERN_INFO "lp: (is IEEE 1284 support enabled?)\n"); +#endif + } + + return 0; + +out_class: + class_destroy(lp_class); +out_reg: + unregister_chrdev(LP_MAJOR, "lp"); + return err; +} + +static int __init lp_init_module (void) +{ + if (parport[0]) { + /* The user gave some parameters. Let's see what they were. */ + if (!strncmp(parport[0], "auto", 4)) + parport_nr[0] = LP_PARPORT_AUTO; + else { + int n; + for (n = 0; n < LP_NO && parport[n]; n++) { + if (!strncmp(parport[n], "none", 4)) + parport_nr[n] = LP_PARPORT_NONE; + else { + char *ep; + unsigned long r = simple_strtoul(parport[n], &ep, 0); + if (ep != parport[n]) + parport_nr[n] = r; + else { + printk(KERN_ERR "lp: bad port specifier `%s'\n", parport[n]); + return -ENODEV; + } + } + } + } + } + + return lp_init(); +} + +static void lp_cleanup_module (void) +{ + unsigned int offset; + + parport_unregister_driver (&lp_driver); + +#ifdef CONFIG_LP_CONSOLE + unregister_console (&lpcons); +#endif + + unregister_chrdev(LP_MAJOR, "lp"); + for (offset = 0; offset < LP_NO; offset++) { + if (lp_table[offset].dev == NULL) + continue; + parport_unregister_device(lp_table[offset].dev); + device_destroy(lp_class, MKDEV(LP_MAJOR, offset)); + } + class_destroy(lp_class); +} + +__setup("lp=", lp_setup); +module_init(lp_init_module); +module_exit(lp_cleanup_module); + +MODULE_ALIAS_CHARDEV_MAJOR(LP_MAJOR); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/mbcs.c b/kernel/drivers/char/mbcs.c new file mode 100644 index 000000000..e5d3e3f7a --- /dev/null +++ b/kernel/drivers/char/mbcs.c @@ -0,0 +1,852 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved. + */ + +/* + * MOATB Core Services driver. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mbcs.h" + +#define MBCS_DEBUG 0 +#if MBCS_DEBUG +#define DBG(fmt...) printk(KERN_ALERT fmt) +#else +#define DBG(fmt...) +#endif +static DEFINE_MUTEX(mbcs_mutex); +static int mbcs_major; + +static LIST_HEAD(soft_list); + +/* + * file operations + */ +static const struct file_operations mbcs_ops = { + .open = mbcs_open, + .llseek = mbcs_sram_llseek, + .read = mbcs_sram_read, + .write = mbcs_sram_write, + .mmap = mbcs_gscr_mmap, +}; + +struct mbcs_callback_arg { + int minor; + struct cx_dev *cx_dev; +}; + +static inline void mbcs_getdma_init(struct getdma *gdma) +{ + memset(gdma, 0, sizeof(struct getdma)); + gdma->DoneIntEnable = 1; +} + +static inline void mbcs_putdma_init(struct putdma *pdma) +{ + memset(pdma, 0, sizeof(struct putdma)); + pdma->DoneIntEnable = 1; +} + +static inline void mbcs_algo_init(struct algoblock *algo_soft) +{ + memset(algo_soft, 0, sizeof(struct algoblock)); +} + +static inline void mbcs_getdma_set(void *mmr, + uint64_t hostAddr, + uint64_t localAddr, + uint64_t localRamSel, + uint64_t numPkts, + uint64_t amoEnable, + uint64_t intrEnable, + uint64_t peerIO, + uint64_t amoHostDest, + uint64_t amoModType, uint64_t intrHostDest, + uint64_t intrVector) +{ + union dma_control rdma_control; + union dma_amo_dest amo_dest; + union intr_dest intr_dest; + union dma_localaddr local_addr; + union dma_hostaddr host_addr; + + rdma_control.dma_control_reg = 0; + amo_dest.dma_amo_dest_reg = 0; + intr_dest.intr_dest_reg = 0; + local_addr.dma_localaddr_reg = 0; + host_addr.dma_hostaddr_reg = 0; + + host_addr.dma_sys_addr = hostAddr; + MBCS_MMR_SET(mmr, MBCS_RD_DMA_SYS_ADDR, host_addr.dma_hostaddr_reg); + + local_addr.dma_ram_addr = localAddr; + local_addr.dma_ram_sel = localRamSel; + MBCS_MMR_SET(mmr, MBCS_RD_DMA_LOC_ADDR, local_addr.dma_localaddr_reg); + + rdma_control.dma_op_length = numPkts; + rdma_control.done_amo_en = amoEnable; + rdma_control.done_int_en = intrEnable; + rdma_control.pio_mem_n = peerIO; + MBCS_MMR_SET(mmr, MBCS_RD_DMA_CTRL, rdma_control.dma_control_reg); + + amo_dest.dma_amo_sys_addr = amoHostDest; + amo_dest.dma_amo_mod_type = amoModType; + MBCS_MMR_SET(mmr, MBCS_RD_DMA_AMO_DEST, amo_dest.dma_amo_dest_reg); + + intr_dest.address = intrHostDest; + intr_dest.int_vector = intrVector; + MBCS_MMR_SET(mmr, MBCS_RD_DMA_INT_DEST, intr_dest.intr_dest_reg); + +} + +static inline void mbcs_putdma_set(void *mmr, + uint64_t hostAddr, + uint64_t localAddr, + uint64_t localRamSel, + uint64_t numPkts, + uint64_t amoEnable, + uint64_t intrEnable, + uint64_t peerIO, + uint64_t amoHostDest, + uint64_t amoModType, + uint64_t intrHostDest, uint64_t intrVector) +{ + union dma_control wdma_control; + union dma_amo_dest amo_dest; + union intr_dest intr_dest; + union dma_localaddr local_addr; + union dma_hostaddr host_addr; + + wdma_control.dma_control_reg = 0; + amo_dest.dma_amo_dest_reg = 0; + intr_dest.intr_dest_reg = 0; + local_addr.dma_localaddr_reg = 0; + host_addr.dma_hostaddr_reg = 0; + + host_addr.dma_sys_addr = hostAddr; + MBCS_MMR_SET(mmr, MBCS_WR_DMA_SYS_ADDR, host_addr.dma_hostaddr_reg); + + local_addr.dma_ram_addr = localAddr; + local_addr.dma_ram_sel = localRamSel; + MBCS_MMR_SET(mmr, MBCS_WR_DMA_LOC_ADDR, local_addr.dma_localaddr_reg); + + wdma_control.dma_op_length = numPkts; + wdma_control.done_amo_en = amoEnable; + wdma_control.done_int_en = intrEnable; + wdma_control.pio_mem_n = peerIO; + MBCS_MMR_SET(mmr, MBCS_WR_DMA_CTRL, wdma_control.dma_control_reg); + + amo_dest.dma_amo_sys_addr = amoHostDest; + amo_dest.dma_amo_mod_type = amoModType; + MBCS_MMR_SET(mmr, MBCS_WR_DMA_AMO_DEST, amo_dest.dma_amo_dest_reg); + + intr_dest.address = intrHostDest; + intr_dest.int_vector = intrVector; + MBCS_MMR_SET(mmr, MBCS_WR_DMA_INT_DEST, intr_dest.intr_dest_reg); + +} + +static inline void mbcs_algo_set(void *mmr, + uint64_t amoHostDest, + uint64_t amoModType, + uint64_t intrHostDest, + uint64_t intrVector, uint64_t algoStepCount) +{ + union dma_amo_dest amo_dest; + union intr_dest intr_dest; + union algo_step step; + + step.algo_step_reg = 0; + intr_dest.intr_dest_reg = 0; + amo_dest.dma_amo_dest_reg = 0; + + amo_dest.dma_amo_sys_addr = amoHostDest; + amo_dest.dma_amo_mod_type = amoModType; + MBCS_MMR_SET(mmr, MBCS_ALG_AMO_DEST, amo_dest.dma_amo_dest_reg); + + intr_dest.address = intrHostDest; + intr_dest.int_vector = intrVector; + MBCS_MMR_SET(mmr, MBCS_ALG_INT_DEST, intr_dest.intr_dest_reg); + + step.alg_step_cnt = algoStepCount; + MBCS_MMR_SET(mmr, MBCS_ALG_STEP, step.algo_step_reg); +} + +static inline int mbcs_getdma_start(struct mbcs_soft *soft) +{ + void *mmr_base; + struct getdma *gdma; + uint64_t numPkts; + union cm_control cm_control; + + mmr_base = soft->mmr_base; + gdma = &soft->getdma; + + /* check that host address got setup */ + if (!gdma->hostAddr) + return -1; + + numPkts = + (gdma->bytes + (MBCS_CACHELINE_SIZE - 1)) / MBCS_CACHELINE_SIZE; + + /* program engine */ + mbcs_getdma_set(mmr_base, tiocx_dma_addr(gdma->hostAddr), + gdma->localAddr, + (gdma->localAddr < MB2) ? 0 : + (gdma->localAddr < MB4) ? 1 : + (gdma->localAddr < MB6) ? 2 : 3, + numPkts, + gdma->DoneAmoEnable, + gdma->DoneIntEnable, + gdma->peerIO, + gdma->amoHostDest, + gdma->amoModType, + gdma->intrHostDest, gdma->intrVector); + + /* start engine */ + cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL); + cm_control.rd_dma_go = 1; + MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg); + + return 0; + +} + +static inline int mbcs_putdma_start(struct mbcs_soft *soft) +{ + void *mmr_base; + struct putdma *pdma; + uint64_t numPkts; + union cm_control cm_control; + + mmr_base = soft->mmr_base; + pdma = &soft->putdma; + + /* check that host address got setup */ + if (!pdma->hostAddr) + return -1; + + numPkts = + (pdma->bytes + (MBCS_CACHELINE_SIZE - 1)) / MBCS_CACHELINE_SIZE; + + /* program engine */ + mbcs_putdma_set(mmr_base, tiocx_dma_addr(pdma->hostAddr), + pdma->localAddr, + (pdma->localAddr < MB2) ? 0 : + (pdma->localAddr < MB4) ? 1 : + (pdma->localAddr < MB6) ? 2 : 3, + numPkts, + pdma->DoneAmoEnable, + pdma->DoneIntEnable, + pdma->peerIO, + pdma->amoHostDest, + pdma->amoModType, + pdma->intrHostDest, pdma->intrVector); + + /* start engine */ + cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL); + cm_control.wr_dma_go = 1; + MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg); + + return 0; + +} + +static inline int mbcs_algo_start(struct mbcs_soft *soft) +{ + struct algoblock *algo_soft = &soft->algo; + void *mmr_base = soft->mmr_base; + union cm_control cm_control; + + if (mutex_lock_interruptible(&soft->algolock)) + return -ERESTARTSYS; + + atomic_set(&soft->algo_done, 0); + + mbcs_algo_set(mmr_base, + algo_soft->amoHostDest, + algo_soft->amoModType, + algo_soft->intrHostDest, + algo_soft->intrVector, algo_soft->algoStepCount); + + /* start algorithm */ + cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL); + cm_control.alg_done_int_en = 1; + cm_control.alg_go = 1; + MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg); + + mutex_unlock(&soft->algolock); + + return 0; +} + +static inline ssize_t +do_mbcs_sram_dmawrite(struct mbcs_soft *soft, uint64_t hostAddr, + size_t len, loff_t * off) +{ + int rv = 0; + + if (mutex_lock_interruptible(&soft->dmawritelock)) + return -ERESTARTSYS; + + atomic_set(&soft->dmawrite_done, 0); + + soft->putdma.hostAddr = hostAddr; + soft->putdma.localAddr = *off; + soft->putdma.bytes = len; + + if (mbcs_putdma_start(soft) < 0) { + DBG(KERN_ALERT "do_mbcs_sram_dmawrite: " + "mbcs_putdma_start failed\n"); + rv = -EAGAIN; + goto dmawrite_exit; + } + + if (wait_event_interruptible(soft->dmawrite_queue, + atomic_read(&soft->dmawrite_done))) { + rv = -ERESTARTSYS; + goto dmawrite_exit; + } + + rv = len; + *off += len; + +dmawrite_exit: + mutex_unlock(&soft->dmawritelock); + + return rv; +} + +static inline ssize_t +do_mbcs_sram_dmaread(struct mbcs_soft *soft, uint64_t hostAddr, + size_t len, loff_t * off) +{ + int rv = 0; + + if (mutex_lock_interruptible(&soft->dmareadlock)) + return -ERESTARTSYS; + + atomic_set(&soft->dmawrite_done, 0); + + soft->getdma.hostAddr = hostAddr; + soft->getdma.localAddr = *off; + soft->getdma.bytes = len; + + if (mbcs_getdma_start(soft) < 0) { + DBG(KERN_ALERT "mbcs_strategy: mbcs_getdma_start failed\n"); + rv = -EAGAIN; + goto dmaread_exit; + } + + if (wait_event_interruptible(soft->dmaread_queue, + atomic_read(&soft->dmaread_done))) { + rv = -ERESTARTSYS; + goto dmaread_exit; + } + + rv = len; + *off += len; + +dmaread_exit: + mutex_unlock(&soft->dmareadlock); + + return rv; +} + +static int mbcs_open(struct inode *ip, struct file *fp) +{ + struct mbcs_soft *soft; + int minor; + + mutex_lock(&mbcs_mutex); + minor = iminor(ip); + + /* Nothing protects access to this list... */ + list_for_each_entry(soft, &soft_list, list) { + if (soft->nasid == minor) { + fp->private_data = soft->cxdev; + mutex_unlock(&mbcs_mutex); + return 0; + } + } + + mutex_unlock(&mbcs_mutex); + return -ENODEV; +} + +static ssize_t mbcs_sram_read(struct file * fp, char __user *buf, size_t len, loff_t * off) +{ + struct cx_dev *cx_dev = fp->private_data; + struct mbcs_soft *soft = cx_dev->soft; + uint64_t hostAddr; + int rv = 0; + + hostAddr = __get_dma_pages(GFP_KERNEL, get_order(len)); + if (hostAddr == 0) + return -ENOMEM; + + rv = do_mbcs_sram_dmawrite(soft, hostAddr, len, off); + if (rv < 0) + goto exit; + + if (copy_to_user(buf, (void *)hostAddr, len)) + rv = -EFAULT; + + exit: + free_pages(hostAddr, get_order(len)); + + return rv; +} + +static ssize_t +mbcs_sram_write(struct file * fp, const char __user *buf, size_t len, loff_t * off) +{ + struct cx_dev *cx_dev = fp->private_data; + struct mbcs_soft *soft = cx_dev->soft; + uint64_t hostAddr; + int rv = 0; + + hostAddr = __get_dma_pages(GFP_KERNEL, get_order(len)); + if (hostAddr == 0) + return -ENOMEM; + + if (copy_from_user((void *)hostAddr, buf, len)) { + rv = -EFAULT; + goto exit; + } + + rv = do_mbcs_sram_dmaread(soft, hostAddr, len, off); + + exit: + free_pages(hostAddr, get_order(len)); + + return rv; +} + +static loff_t mbcs_sram_llseek(struct file * filp, loff_t off, int whence) +{ + loff_t newpos; + + switch (whence) { + case SEEK_SET: + newpos = off; + break; + + case SEEK_CUR: + newpos = filp->f_pos + off; + break; + + case SEEK_END: + newpos = MBCS_SRAM_SIZE + off; + break; + + default: /* can't happen */ + return -EINVAL; + } + + if (newpos < 0) + return -EINVAL; + + filp->f_pos = newpos; + + return newpos; +} + +static uint64_t mbcs_pioaddr(struct mbcs_soft *soft, uint64_t offset) +{ + uint64_t mmr_base; + + mmr_base = (uint64_t) (soft->mmr_base + offset); + + return mmr_base; +} + +static void mbcs_debug_pioaddr_set(struct mbcs_soft *soft) +{ + soft->debug_addr = mbcs_pioaddr(soft, MBCS_DEBUG_START); +} + +static void mbcs_gscr_pioaddr_set(struct mbcs_soft *soft) +{ + soft->gscr_addr = mbcs_pioaddr(soft, MBCS_GSCR_START); +} + +static int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma) +{ + struct cx_dev *cx_dev = fp->private_data; + struct mbcs_soft *soft = cx_dev->soft; + + if (vma->vm_pgoff != 0) + return -EINVAL; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + /* Remap-pfn-range will mark the range VM_IO */ + if (remap_pfn_range(vma, + vma->vm_start, + __pa(soft->gscr_addr) >> PAGE_SHIFT, + PAGE_SIZE, + vma->vm_page_prot)) + return -EAGAIN; + + return 0; +} + +/** + * mbcs_completion_intr_handler - Primary completion handler. + * @irq: irq + * @arg: soft struct for device + * + */ +static irqreturn_t +mbcs_completion_intr_handler(int irq, void *arg) +{ + struct mbcs_soft *soft = (struct mbcs_soft *)arg; + void *mmr_base; + union cm_status cm_status; + union cm_control cm_control; + + mmr_base = soft->mmr_base; + cm_status.cm_status_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_STATUS); + + if (cm_status.rd_dma_done) { + /* stop dma-read engine, clear status */ + cm_control.cm_control_reg = + MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL); + cm_control.rd_dma_clr = 1; + MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, + cm_control.cm_control_reg); + atomic_set(&soft->dmaread_done, 1); + wake_up(&soft->dmaread_queue); + } + if (cm_status.wr_dma_done) { + /* stop dma-write engine, clear status */ + cm_control.cm_control_reg = + MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL); + cm_control.wr_dma_clr = 1; + MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, + cm_control.cm_control_reg); + atomic_set(&soft->dmawrite_done, 1); + wake_up(&soft->dmawrite_queue); + } + if (cm_status.alg_done) { + /* clear status */ + cm_control.cm_control_reg = + MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL); + cm_control.alg_done_clr = 1; + MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, + cm_control.cm_control_reg); + atomic_set(&soft->algo_done, 1); + wake_up(&soft->algo_queue); + } + + return IRQ_HANDLED; +} + +/** + * mbcs_intr_alloc - Allocate interrupts. + * @dev: device pointer + * + */ +static int mbcs_intr_alloc(struct cx_dev *dev) +{ + struct sn_irq_info *sn_irq; + struct mbcs_soft *soft; + struct getdma *getdma; + struct putdma *putdma; + struct algoblock *algo; + + soft = dev->soft; + getdma = &soft->getdma; + putdma = &soft->putdma; + algo = &soft->algo; + + soft->get_sn_irq = NULL; + soft->put_sn_irq = NULL; + soft->algo_sn_irq = NULL; + + sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1); + if (sn_irq == NULL) + return -EAGAIN; + soft->get_sn_irq = sn_irq; + getdma->intrHostDest = sn_irq->irq_xtalkaddr; + getdma->intrVector = sn_irq->irq_irq; + if (request_irq(sn_irq->irq_irq, + (void *)mbcs_completion_intr_handler, IRQF_SHARED, + "MBCS get intr", (void *)soft)) { + tiocx_irq_free(soft->get_sn_irq); + return -EAGAIN; + } + + sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1); + if (sn_irq == NULL) { + free_irq(soft->get_sn_irq->irq_irq, soft); + tiocx_irq_free(soft->get_sn_irq); + return -EAGAIN; + } + soft->put_sn_irq = sn_irq; + putdma->intrHostDest = sn_irq->irq_xtalkaddr; + putdma->intrVector = sn_irq->irq_irq; + if (request_irq(sn_irq->irq_irq, + (void *)mbcs_completion_intr_handler, IRQF_SHARED, + "MBCS put intr", (void *)soft)) { + tiocx_irq_free(soft->put_sn_irq); + free_irq(soft->get_sn_irq->irq_irq, soft); + tiocx_irq_free(soft->get_sn_irq); + return -EAGAIN; + } + + sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1); + if (sn_irq == NULL) { + free_irq(soft->put_sn_irq->irq_irq, soft); + tiocx_irq_free(soft->put_sn_irq); + free_irq(soft->get_sn_irq->irq_irq, soft); + tiocx_irq_free(soft->get_sn_irq); + return -EAGAIN; + } + soft->algo_sn_irq = sn_irq; + algo->intrHostDest = sn_irq->irq_xtalkaddr; + algo->intrVector = sn_irq->irq_irq; + if (request_irq(sn_irq->irq_irq, + (void *)mbcs_completion_intr_handler, IRQF_SHARED, + "MBCS algo intr", (void *)soft)) { + tiocx_irq_free(soft->algo_sn_irq); + free_irq(soft->put_sn_irq->irq_irq, soft); + tiocx_irq_free(soft->put_sn_irq); + free_irq(soft->get_sn_irq->irq_irq, soft); + tiocx_irq_free(soft->get_sn_irq); + return -EAGAIN; + } + + return 0; +} + +/** + * mbcs_intr_dealloc - Remove interrupts. + * @dev: device pointer + * + */ +static void mbcs_intr_dealloc(struct cx_dev *dev) +{ + struct mbcs_soft *soft; + + soft = dev->soft; + + free_irq(soft->get_sn_irq->irq_irq, soft); + tiocx_irq_free(soft->get_sn_irq); + free_irq(soft->put_sn_irq->irq_irq, soft); + tiocx_irq_free(soft->put_sn_irq); + free_irq(soft->algo_sn_irq->irq_irq, soft); + tiocx_irq_free(soft->algo_sn_irq); +} + +static inline int mbcs_hw_init(struct mbcs_soft *soft) +{ + void *mmr_base = soft->mmr_base; + union cm_control cm_control; + union cm_req_timeout cm_req_timeout; + uint64_t err_stat; + + cm_req_timeout.cm_req_timeout_reg = + MBCS_MMR_GET(mmr_base, MBCS_CM_REQ_TOUT); + + cm_req_timeout.time_out = MBCS_CM_CONTROL_REQ_TOUT_MASK; + MBCS_MMR_SET(mmr_base, MBCS_CM_REQ_TOUT, + cm_req_timeout.cm_req_timeout_reg); + + mbcs_gscr_pioaddr_set(soft); + mbcs_debug_pioaddr_set(soft); + + /* clear errors */ + err_stat = MBCS_MMR_GET(mmr_base, MBCS_CM_ERR_STAT); + MBCS_MMR_SET(mmr_base, MBCS_CM_CLR_ERR_STAT, err_stat); + MBCS_MMR_ZERO(mmr_base, MBCS_CM_ERROR_DETAIL1); + + /* enable interrupts */ + /* turn off 2^23 (INT_EN_PIO_REQ_ADDR_INV) */ + MBCS_MMR_SET(mmr_base, MBCS_CM_ERR_INT_EN, 0x3ffffff7e00ffUL); + + /* arm status regs and clear engines */ + cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL); + cm_control.rearm_stat_regs = 1; + cm_control.alg_clr = 1; + cm_control.wr_dma_clr = 1; + cm_control.rd_dma_clr = 1; + + MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg); + + return 0; +} + +static ssize_t show_algo(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct cx_dev *cx_dev = to_cx_dev(dev); + struct mbcs_soft *soft = cx_dev->soft; + uint64_t debug0; + + /* + * By convention, the first debug register contains the + * algorithm number and revision. + */ + debug0 = *(uint64_t *) soft->debug_addr; + + return sprintf(buf, "0x%x 0x%x\n", + upper_32_bits(debug0), lower_32_bits(debug0)); +} + +static ssize_t store_algo(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + int n; + struct cx_dev *cx_dev = to_cx_dev(dev); + struct mbcs_soft *soft = cx_dev->soft; + + if (count <= 0) + return 0; + + n = simple_strtoul(buf, NULL, 0); + + if (n == 1) { + mbcs_algo_start(soft); + if (wait_event_interruptible(soft->algo_queue, + atomic_read(&soft->algo_done))) + return -ERESTARTSYS; + } + + return count; +} + +DEVICE_ATTR(algo, 0644, show_algo, store_algo); + +/** + * mbcs_probe - Initialize for device + * @dev: device pointer + * @device_id: id table pointer + * + */ +static int mbcs_probe(struct cx_dev *dev, const struct cx_device_id *id) +{ + struct mbcs_soft *soft; + + dev->soft = NULL; + + soft = kzalloc(sizeof(struct mbcs_soft), GFP_KERNEL); + if (soft == NULL) + return -ENOMEM; + + soft->nasid = dev->cx_id.nasid; + list_add(&soft->list, &soft_list); + soft->mmr_base = (void *)tiocx_swin_base(dev->cx_id.nasid); + dev->soft = soft; + soft->cxdev = dev; + + init_waitqueue_head(&soft->dmawrite_queue); + init_waitqueue_head(&soft->dmaread_queue); + init_waitqueue_head(&soft->algo_queue); + + mutex_init(&soft->dmawritelock); + mutex_init(&soft->dmareadlock); + mutex_init(&soft->algolock); + + mbcs_getdma_init(&soft->getdma); + mbcs_putdma_init(&soft->putdma); + mbcs_algo_init(&soft->algo); + + mbcs_hw_init(soft); + + /* Allocate interrupts */ + mbcs_intr_alloc(dev); + + device_create_file(&dev->dev, &dev_attr_algo); + + return 0; +} + +static int mbcs_remove(struct cx_dev *dev) +{ + if (dev->soft) { + mbcs_intr_dealloc(dev); + kfree(dev->soft); + } + + device_remove_file(&dev->dev, &dev_attr_algo); + + return 0; +} + +static const struct cx_device_id mbcs_id_table[] = { + { + .part_num = MBCS_PART_NUM, + .mfg_num = MBCS_MFG_NUM, + }, + { + .part_num = MBCS_PART_NUM_ALG0, + .mfg_num = MBCS_MFG_NUM, + }, + {0, 0} +}; + +MODULE_DEVICE_TABLE(cx, mbcs_id_table); + +static struct cx_drv mbcs_driver = { + .name = DEVICE_NAME, + .id_table = mbcs_id_table, + .probe = mbcs_probe, + .remove = mbcs_remove, +}; + +static void __exit mbcs_exit(void) +{ + unregister_chrdev(mbcs_major, DEVICE_NAME); + cx_driver_unregister(&mbcs_driver); +} + +static int __init mbcs_init(void) +{ + int rv; + + if (!ia64_platform_is("sn2")) + return -ENODEV; + + // Put driver into chrdevs[]. Get major number. + rv = register_chrdev(mbcs_major, DEVICE_NAME, &mbcs_ops); + if (rv < 0) { + DBG(KERN_ALERT "mbcs_init: can't get major number. %d\n", rv); + return rv; + } + mbcs_major = rv; + + return cx_driver_register(&mbcs_driver); +} + +module_init(mbcs_init); +module_exit(mbcs_exit); + +MODULE_AUTHOR("Bruce Losure "); +MODULE_DESCRIPTION("Driver for MOATB Core Services"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/mbcs.h b/kernel/drivers/char/mbcs.h new file mode 100644 index 000000000..1a36884c4 --- /dev/null +++ b/kernel/drivers/char/mbcs.h @@ -0,0 +1,553 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved. + */ + +#ifndef __MBCS_H__ +#define __MBCS_H__ + +/* + * General macros + */ +#define MB (1024*1024) +#define MB2 (2*MB) +#define MB4 (4*MB) +#define MB6 (6*MB) + +/* + * Offsets and masks + */ +#define MBCS_CM_ID 0x0000 /* Identification */ +#define MBCS_CM_STATUS 0x0008 /* Status */ +#define MBCS_CM_ERROR_DETAIL1 0x0010 /* Error Detail1 */ +#define MBCS_CM_ERROR_DETAIL2 0x0018 /* Error Detail2 */ +#define MBCS_CM_CONTROL 0x0020 /* Control */ +#define MBCS_CM_REQ_TOUT 0x0028 /* Request Time-out */ +#define MBCS_CM_ERR_INT_DEST 0x0038 /* Error Interrupt Destination */ +#define MBCS_CM_TARG_FL 0x0050 /* Target Flush */ +#define MBCS_CM_ERR_STAT 0x0060 /* Error Status */ +#define MBCS_CM_CLR_ERR_STAT 0x0068 /* Clear Error Status */ +#define MBCS_CM_ERR_INT_EN 0x0070 /* Error Interrupt Enable */ +#define MBCS_RD_DMA_SYS_ADDR 0x0100 /* Read DMA System Address */ +#define MBCS_RD_DMA_LOC_ADDR 0x0108 /* Read DMA Local Address */ +#define MBCS_RD_DMA_CTRL 0x0110 /* Read DMA Control */ +#define MBCS_RD_DMA_AMO_DEST 0x0118 /* Read DMA AMO Destination */ +#define MBCS_RD_DMA_INT_DEST 0x0120 /* Read DMA Interrupt Destination */ +#define MBCS_RD_DMA_AUX_STAT 0x0130 /* Read DMA Auxiliary Status */ +#define MBCS_WR_DMA_SYS_ADDR 0x0200 /* Write DMA System Address */ +#define MBCS_WR_DMA_LOC_ADDR 0x0208 /* Write DMA Local Address */ +#define MBCS_WR_DMA_CTRL 0x0210 /* Write DMA Control */ +#define MBCS_WR_DMA_AMO_DEST 0x0218 /* Write DMA AMO Destination */ +#define MBCS_WR_DMA_INT_DEST 0x0220 /* Write DMA Interrupt Destination */ +#define MBCS_WR_DMA_AUX_STAT 0x0230 /* Write DMA Auxiliary Status */ +#define MBCS_ALG_AMO_DEST 0x0300 /* Algorithm AMO Destination */ +#define MBCS_ALG_INT_DEST 0x0308 /* Algorithm Interrupt Destination */ +#define MBCS_ALG_OFFSETS 0x0310 +#define MBCS_ALG_STEP 0x0318 /* Algorithm Step */ + +#define MBCS_GSCR_START 0x0000000 +#define MBCS_DEBUG_START 0x0100000 +#define MBCS_RAM0_START 0x0200000 +#define MBCS_RAM1_START 0x0400000 +#define MBCS_RAM2_START 0x0600000 + +#define MBCS_CM_CONTROL_REQ_TOUT_MASK 0x0000000000ffffffUL +//#define PIO_BASE_ADDR_BASE_OFFSET_MASK 0x00fffffffff00000UL + +#define MBCS_SRAM_SIZE (1024*1024) +#define MBCS_CACHELINE_SIZE 128 + +/* + * MMR get's and put's + */ +#define MBCS_MMR_ADDR(mmr_base, offset)((uint64_t *)(mmr_base + offset)) +#define MBCS_MMR_SET(mmr_base, offset, value) { \ + uint64_t *mbcs_mmr_set_u64p, readback; \ + mbcs_mmr_set_u64p = (uint64_t *)(mmr_base + offset); \ + *mbcs_mmr_set_u64p = value; \ + readback = *mbcs_mmr_set_u64p; \ +} +#define MBCS_MMR_GET(mmr_base, offset) *(uint64_t *)(mmr_base + offset) +#define MBCS_MMR_ZERO(mmr_base, offset) MBCS_MMR_SET(mmr_base, offset, 0) + +/* + * MBCS mmr structures + */ +union cm_id { + uint64_t cm_id_reg; + struct { + uint64_t always_one:1, // 0 + mfg_id:11, // 11:1 + part_num:16, // 27:12 + bitstream_rev:8, // 35:28 + :28; // 63:36 + }; +}; + +union cm_status { + uint64_t cm_status_reg; + struct { + uint64_t pending_reads:8, // 7:0 + pending_writes:8, // 15:8 + ice_rsp_credits:8, // 23:16 + ice_req_credits:8, // 31:24 + cm_req_credits:8, // 39:32 + :1, // 40 + rd_dma_in_progress:1, // 41 + rd_dma_done:1, // 42 + :1, // 43 + wr_dma_in_progress:1, // 44 + wr_dma_done:1, // 45 + alg_waiting:1, // 46 + alg_pipe_running:1, // 47 + alg_done:1, // 48 + :3, // 51:49 + pending_int_reqs:8, // 59:52 + :3, // 62:60 + alg_half_speed_sel:1; // 63 + }; +}; + +union cm_error_detail1 { + uint64_t cm_error_detail1_reg; + struct { + uint64_t packet_type:4, // 3:0 + source_id:2, // 5:4 + data_size:2, // 7:6 + tnum:8, // 15:8 + byte_enable:8, // 23:16 + gfx_cred:8, // 31:24 + read_type:2, // 33:32 + pio_or_memory:1, // 34 + head_cw_error:1, // 35 + :12, // 47:36 + head_error_bit:1, // 48 + data_error_bit:1, // 49 + :13, // 62:50 + valid:1; // 63 + }; +}; + +union cm_error_detail2 { + uint64_t cm_error_detail2_reg; + struct { + uint64_t address:56, // 55:0 + :8; // 63:56 + }; +}; + +union cm_control { + uint64_t cm_control_reg; + struct { + uint64_t cm_id:2, // 1:0 + :2, // 3:2 + max_trans:5, // 8:4 + :3, // 11:9 + address_mode:1, // 12 + :7, // 19:13 + credit_limit:8, // 27:20 + :5, // 32:28 + rearm_stat_regs:1, // 33 + prescalar_byp:1, // 34 + force_gap_war:1, // 35 + rd_dma_go:1, // 36 + wr_dma_go:1, // 37 + alg_go:1, // 38 + rd_dma_clr:1, // 39 + wr_dma_clr:1, // 40 + alg_clr:1, // 41 + :2, // 43:42 + alg_wait_step:1, // 44 + alg_done_amo_en:1, // 45 + alg_done_int_en:1, // 46 + :1, // 47 + alg_sram0_locked:1, // 48 + alg_sram1_locked:1, // 49 + alg_sram2_locked:1, // 50 + alg_done_clr:1, // 51 + :12; // 63:52 + }; +}; + +union cm_req_timeout { + uint64_t cm_req_timeout_reg; + struct { + uint64_t time_out:24, // 23:0 + :40; // 63:24 + }; +}; + +union intr_dest { + uint64_t intr_dest_reg; + struct { + uint64_t address:56, // 55:0 + int_vector:8; // 63:56 + }; +}; + +union cm_error_status { + uint64_t cm_error_status_reg; + struct { + uint64_t ecc_sbe:1, // 0 + ecc_mbe:1, // 1 + unsupported_req:1, // 2 + unexpected_rsp:1, // 3 + bad_length:1, // 4 + bad_datavalid:1, // 5 + buffer_overflow:1, // 6 + request_timeout:1, // 7 + :8, // 15:8 + head_inv_data_size:1, // 16 + rsp_pactype_inv:1, // 17 + head_sb_err:1, // 18 + missing_head:1, // 19 + head_inv_rd_type:1, // 20 + head_cmd_err_bit:1, // 21 + req_addr_align_inv:1, // 22 + pio_req_addr_inv:1, // 23 + req_range_dsize_inv:1, // 24 + early_term:1, // 25 + early_tail:1, // 26 + missing_tail:1, // 27 + data_flit_sb_err:1, // 28 + cm2hcm_req_cred_of:1, // 29 + cm2hcm_rsp_cred_of:1, // 30 + rx_bad_didn:1, // 31 + rd_dma_err_rsp:1, // 32 + rd_dma_tnum_tout:1, // 33 + rd_dma_multi_tnum_tou:1, // 34 + wr_dma_err_rsp:1, // 35 + wr_dma_tnum_tout:1, // 36 + wr_dma_multi_tnum_tou:1, // 37 + alg_data_overflow:1, // 38 + alg_data_underflow:1, // 39 + ram0_access_conflict:1, // 40 + ram1_access_conflict:1, // 41 + ram2_access_conflict:1, // 42 + ram0_perr:1, // 43 + ram1_perr:1, // 44 + ram2_perr:1, // 45 + int_gen_rsp_err:1, // 46 + int_gen_tnum_tout:1, // 47 + rd_dma_prog_err:1, // 48 + wr_dma_prog_err:1, // 49 + :14; // 63:50 + }; +}; + +union cm_clr_error_status { + uint64_t cm_clr_error_status_reg; + struct { + uint64_t clr_ecc_sbe:1, // 0 + clr_ecc_mbe:1, // 1 + clr_unsupported_req:1, // 2 + clr_unexpected_rsp:1, // 3 + clr_bad_length:1, // 4 + clr_bad_datavalid:1, // 5 + clr_buffer_overflow:1, // 6 + clr_request_timeout:1, // 7 + :8, // 15:8 + clr_head_inv_data_siz:1, // 16 + clr_rsp_pactype_inv:1, // 17 + clr_head_sb_err:1, // 18 + clr_missing_head:1, // 19 + clr_head_inv_rd_type:1, // 20 + clr_head_cmd_err_bit:1, // 21 + clr_req_addr_align_in:1, // 22 + clr_pio_req_addr_inv:1, // 23 + clr_req_range_dsize_i:1, // 24 + clr_early_term:1, // 25 + clr_early_tail:1, // 26 + clr_missing_tail:1, // 27 + clr_data_flit_sb_err:1, // 28 + clr_cm2hcm_req_cred_o:1, // 29 + clr_cm2hcm_rsp_cred_o:1, // 30 + clr_rx_bad_didn:1, // 31 + clr_rd_dma_err_rsp:1, // 32 + clr_rd_dma_tnum_tout:1, // 33 + clr_rd_dma_multi_tnum:1, // 34 + clr_wr_dma_err_rsp:1, // 35 + clr_wr_dma_tnum_tout:1, // 36 + clr_wr_dma_multi_tnum:1, // 37 + clr_alg_data_overflow:1, // 38 + clr_alg_data_underflo:1, // 39 + clr_ram0_access_confl:1, // 40 + clr_ram1_access_confl:1, // 41 + clr_ram2_access_confl:1, // 42 + clr_ram0_perr:1, // 43 + clr_ram1_perr:1, // 44 + clr_ram2_perr:1, // 45 + clr_int_gen_rsp_err:1, // 46 + clr_int_gen_tnum_tout:1, // 47 + clr_rd_dma_prog_err:1, // 48 + clr_wr_dma_prog_err:1, // 49 + :14; // 63:50 + }; +}; + +union cm_error_intr_enable { + uint64_t cm_error_intr_enable_reg; + struct { + uint64_t int_en_ecc_sbe:1, // 0 + int_en_ecc_mbe:1, // 1 + int_en_unsupported_re:1, // 2 + int_en_unexpected_rsp:1, // 3 + int_en_bad_length:1, // 4 + int_en_bad_datavalid:1, // 5 + int_en_buffer_overflo:1, // 6 + int_en_request_timeou:1, // 7 + :8, // 15:8 + int_en_head_inv_data_:1, // 16 + int_en_rsp_pactype_in:1, // 17 + int_en_head_sb_err:1, // 18 + int_en_missing_head:1, // 19 + int_en_head_inv_rd_ty:1, // 20 + int_en_head_cmd_err_b:1, // 21 + int_en_req_addr_align:1, // 22 + int_en_pio_req_addr_i:1, // 23 + int_en_req_range_dsiz:1, // 24 + int_en_early_term:1, // 25 + int_en_early_tail:1, // 26 + int_en_missing_tail:1, // 27 + int_en_data_flit_sb_e:1, // 28 + int_en_cm2hcm_req_cre:1, // 29 + int_en_cm2hcm_rsp_cre:1, // 30 + int_en_rx_bad_didn:1, // 31 + int_en_rd_dma_err_rsp:1, // 32 + int_en_rd_dma_tnum_to:1, // 33 + int_en_rd_dma_multi_t:1, // 34 + int_en_wr_dma_err_rsp:1, // 35 + int_en_wr_dma_tnum_to:1, // 36 + int_en_wr_dma_multi_t:1, // 37 + int_en_alg_data_overf:1, // 38 + int_en_alg_data_under:1, // 39 + int_en_ram0_access_co:1, // 40 + int_en_ram1_access_co:1, // 41 + int_en_ram2_access_co:1, // 42 + int_en_ram0_perr:1, // 43 + int_en_ram1_perr:1, // 44 + int_en_ram2_perr:1, // 45 + int_en_int_gen_rsp_er:1, // 46 + int_en_int_gen_tnum_t:1, // 47 + int_en_rd_dma_prog_er:1, // 48 + int_en_wr_dma_prog_er:1, // 49 + :14; // 63:50 + }; +}; + +struct cm_mmr { + union cm_id id; + union cm_status status; + union cm_error_detail1 err_detail1; + union cm_error_detail2 err_detail2; + union cm_control control; + union cm_req_timeout req_timeout; + uint64_t reserved1[1]; + union intr_dest int_dest; + uint64_t reserved2[2]; + uint64_t targ_flush; + uint64_t reserved3[1]; + union cm_error_status err_status; + union cm_clr_error_status clr_err_status; + union cm_error_intr_enable int_enable; +}; + +union dma_hostaddr { + uint64_t dma_hostaddr_reg; + struct { + uint64_t dma_sys_addr:56, // 55:0 + :8; // 63:56 + }; +}; + +union dma_localaddr { + uint64_t dma_localaddr_reg; + struct { + uint64_t dma_ram_addr:21, // 20:0 + dma_ram_sel:2, // 22:21 + :41; // 63:23 + }; +}; + +union dma_control { + uint64_t dma_control_reg; + struct { + uint64_t dma_op_length:16, // 15:0 + :18, // 33:16 + done_amo_en:1, // 34 + done_int_en:1, // 35 + :1, // 36 + pio_mem_n:1, // 37 + :26; // 63:38 + }; +}; + +union dma_amo_dest { + uint64_t dma_amo_dest_reg; + struct { + uint64_t dma_amo_sys_addr:56, // 55:0 + dma_amo_mod_type:3, // 58:56 + :5; // 63:59 + }; +}; + +union rdma_aux_status { + uint64_t rdma_aux_status_reg; + struct { + uint64_t op_num_pacs_left:17, // 16:0 + :5, // 21:17 + lrsp_buff_empty:1, // 22 + :17, // 39:23 + pending_reqs_left:6, // 45:40 + :18; // 63:46 + }; +}; + +struct rdma_mmr { + union dma_hostaddr host_addr; + union dma_localaddr local_addr; + union dma_control control; + union dma_amo_dest amo_dest; + union intr_dest intr_dest; + union rdma_aux_status aux_status; +}; + +union wdma_aux_status { + uint64_t wdma_aux_status_reg; + struct { + uint64_t op_num_pacs_left:17, // 16:0 + :4, // 20:17 + lreq_buff_empty:1, // 21 + :18, // 39:22 + pending_reqs_left:6, // 45:40 + :18; // 63:46 + }; +}; + +struct wdma_mmr { + union dma_hostaddr host_addr; + union dma_localaddr local_addr; + union dma_control control; + union dma_amo_dest amo_dest; + union intr_dest intr_dest; + union wdma_aux_status aux_status; +}; + +union algo_step { + uint64_t algo_step_reg; + struct { + uint64_t alg_step_cnt:16, // 15:0 + :48; // 63:16 + }; +}; + +struct algo_mmr { + union dma_amo_dest amo_dest; + union intr_dest intr_dest; + union { + uint64_t algo_offset_reg; + struct { + uint64_t sram0_offset:7, // 6:0 + reserved0:1, // 7 + sram1_offset:7, // 14:8 + reserved1:1, // 15 + sram2_offset:7, // 22:16 + reserved2:14; // 63:23 + }; + } sram_offset; + union algo_step step; +}; + +struct mbcs_mmr { + struct cm_mmr cm; + uint64_t reserved1[17]; + struct rdma_mmr rdDma; + uint64_t reserved2[25]; + struct wdma_mmr wrDma; + uint64_t reserved3[25]; + struct algo_mmr algo; + uint64_t reserved4[156]; +}; + +/* + * defines + */ +#define DEVICE_NAME "mbcs" +#define MBCS_PART_NUM 0xfff0 +#define MBCS_PART_NUM_ALG0 0xf001 +#define MBCS_MFG_NUM 0x1 + +struct algoblock { + uint64_t amoHostDest; + uint64_t amoModType; + uint64_t intrHostDest; + uint64_t intrVector; + uint64_t algoStepCount; +}; + +struct getdma { + uint64_t hostAddr; + uint64_t localAddr; + uint64_t bytes; + uint64_t DoneAmoEnable; + uint64_t DoneIntEnable; + uint64_t peerIO; + uint64_t amoHostDest; + uint64_t amoModType; + uint64_t intrHostDest; + uint64_t intrVector; +}; + +struct putdma { + uint64_t hostAddr; + uint64_t localAddr; + uint64_t bytes; + uint64_t DoneAmoEnable; + uint64_t DoneIntEnable; + uint64_t peerIO; + uint64_t amoHostDest; + uint64_t amoModType; + uint64_t intrHostDest; + uint64_t intrVector; +}; + +struct mbcs_soft { + struct list_head list; + struct cx_dev *cxdev; + int major; + int nasid; + void *mmr_base; + wait_queue_head_t dmawrite_queue; + wait_queue_head_t dmaread_queue; + wait_queue_head_t algo_queue; + struct sn_irq_info *get_sn_irq; + struct sn_irq_info *put_sn_irq; + struct sn_irq_info *algo_sn_irq; + struct getdma getdma; + struct putdma putdma; + struct algoblock algo; + uint64_t gscr_addr; // pio addr + uint64_t ram0_addr; // pio addr + uint64_t ram1_addr; // pio addr + uint64_t ram2_addr; // pio addr + uint64_t debug_addr; // pio addr + atomic_t dmawrite_done; + atomic_t dmaread_done; + atomic_t algo_done; + struct mutex dmawritelock; + struct mutex dmareadlock; + struct mutex algolock; +}; + +static int mbcs_open(struct inode *ip, struct file *fp); +static ssize_t mbcs_sram_read(struct file *fp, char __user *buf, size_t len, + loff_t * off); +static ssize_t mbcs_sram_write(struct file *fp, const char __user *buf, size_t len, + loff_t * off); +static loff_t mbcs_sram_llseek(struct file *filp, loff_t off, int whence); +static int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma); + +#endif // __MBCS_H__ diff --git a/kernel/drivers/char/mem.c b/kernel/drivers/char/mem.c new file mode 100644 index 000000000..6b1721f97 --- /dev/null +++ b/kernel/drivers/char/mem.c @@ -0,0 +1,872 @@ +/* + * linux/drivers/char/mem.c + * + * Copyright (C) 1991, 1992 Linus Torvalds + * + * Added devfs support. + * Jan-11-1998, C. Scott Ananian + * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#ifdef CONFIG_IA64 +# include +#endif + +#define DEVPORT_MINOR 4 + +static inline unsigned long size_inside_page(unsigned long start, + unsigned long size) +{ + unsigned long sz; + + sz = PAGE_SIZE - (start & (PAGE_SIZE - 1)); + + return min(sz, size); +} + +#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE +static inline int valid_phys_addr_range(phys_addr_t addr, size_t count) +{ + return addr + count <= __pa(high_memory); +} + +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) +{ + return 1; +} +#endif + +#ifdef CONFIG_STRICT_DEVMEM +static inline int range_is_allowed(unsigned long pfn, unsigned long size) +{ + u64 from = ((u64)pfn) << PAGE_SHIFT; + u64 to = from + size; + u64 cursor = from; + + while (cursor < to) { + if (!devmem_is_allowed(pfn)) { + printk(KERN_INFO + "Program %s tried to access /dev/mem between %Lx->%Lx.\n", + current->comm, from, to); + return 0; + } + cursor += PAGE_SIZE; + pfn++; + } + return 1; +} +#else +static inline int range_is_allowed(unsigned long pfn, unsigned long size) +{ + return 1; +} +#endif + +#ifndef unxlate_dev_mem_ptr +#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr +void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) +{ +} +#endif + +/* + * This funcion reads the *physical* memory. The f_pos points directly to the + * memory location. + */ +static ssize_t read_mem(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + phys_addr_t p = *ppos; + ssize_t read, sz; + void *ptr; + + if (p != *ppos) + return 0; + + if (!valid_phys_addr_range(p, count)) + return -EFAULT; + read = 0; +#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED + /* we don't have page 0 mapped on sparc and m68k.. */ + if (p < PAGE_SIZE) { + sz = size_inside_page(p, count); + if (sz > 0) { + if (clear_user(buf, sz)) + return -EFAULT; + buf += sz; + p += sz; + count -= sz; + read += sz; + } + } +#endif + + while (count > 0) { + unsigned long remaining; + + sz = size_inside_page(p, count); + + if (!range_is_allowed(p >> PAGE_SHIFT, count)) + return -EPERM; + + /* + * On ia64 if a page has been mapped somewhere as uncached, then + * it must also be accessed uncached by the kernel or data + * corruption may occur. + */ + ptr = xlate_dev_mem_ptr(p); + if (!ptr) + return -EFAULT; + + remaining = copy_to_user(buf, ptr, sz); + unxlate_dev_mem_ptr(p, ptr); + if (remaining) + return -EFAULT; + + buf += sz; + p += sz; + count -= sz; + read += sz; + } + + *ppos += read; + return read; +} + +static ssize_t write_mem(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + phys_addr_t p = *ppos; + ssize_t written, sz; + unsigned long copied; + void *ptr; + + if (p != *ppos) + return -EFBIG; + + if (!valid_phys_addr_range(p, count)) + return -EFAULT; + + written = 0; + +#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED + /* we don't have page 0 mapped on sparc and m68k.. */ + if (p < PAGE_SIZE) { + sz = size_inside_page(p, count); + /* Hmm. Do something? */ + buf += sz; + p += sz; + count -= sz; + written += sz; + } +#endif + + while (count > 0) { + sz = size_inside_page(p, count); + + if (!range_is_allowed(p >> PAGE_SHIFT, sz)) + return -EPERM; + + /* + * On ia64 if a page has been mapped somewhere as uncached, then + * it must also be accessed uncached by the kernel or data + * corruption may occur. + */ + ptr = xlate_dev_mem_ptr(p); + if (!ptr) { + if (written) + break; + return -EFAULT; + } + + copied = copy_from_user(ptr, buf, sz); + unxlate_dev_mem_ptr(p, ptr); + if (copied) { + written += sz - copied; + if (written) + break; + return -EFAULT; + } + + buf += sz; + p += sz; + count -= sz; + written += sz; + } + + *ppos += written; + return written; +} + +int __weak phys_mem_access_prot_allowed(struct file *file, + unsigned long pfn, unsigned long size, pgprot_t *vma_prot) +{ + return 1; +} + +#ifndef __HAVE_PHYS_MEM_ACCESS_PROT + +/* + * Architectures vary in how they handle caching for addresses + * outside of main memory. + * + */ +#ifdef pgprot_noncached +static int uncached_access(struct file *file, phys_addr_t addr) +{ +#if defined(CONFIG_IA64) + /* + * On ia64, we ignore O_DSYNC because we cannot tolerate memory + * attribute aliases. + */ + return !(efi_mem_attributes(addr) & EFI_MEMORY_WB); +#elif defined(CONFIG_MIPS) + { + extern int __uncached_access(struct file *file, + unsigned long addr); + + return __uncached_access(file, addr); + } +#else + /* + * Accessing memory above the top the kernel knows about or through a + * file pointer + * that was marked O_DSYNC will be done non-cached. + */ + if (file->f_flags & O_DSYNC) + return 1; + return addr >= __pa(high_memory); +#endif +} +#endif + +static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot) +{ +#ifdef pgprot_noncached + phys_addr_t offset = pfn << PAGE_SHIFT; + + if (uncached_access(file, offset)) + return pgprot_noncached(vma_prot); +#endif + return vma_prot; +} +#endif + +#ifndef CONFIG_MMU +static unsigned long get_unmapped_area_mem(struct file *file, + unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags) +{ + if (!valid_mmap_phys_addr_range(pgoff, len)) + return (unsigned long) -EINVAL; + return pgoff << PAGE_SHIFT; +} + +/* permit direct mmap, for read, write or exec */ +static unsigned memory_mmap_capabilities(struct file *file) +{ + return NOMMU_MAP_DIRECT | + NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC; +} + +static unsigned zero_mmap_capabilities(struct file *file) +{ + return NOMMU_MAP_COPY; +} + +/* can't do an in-place private mapping if there's no MMU */ +static inline int private_mapping_ok(struct vm_area_struct *vma) +{ + return vma->vm_flags & VM_MAYSHARE; +} +#else + +static inline int private_mapping_ok(struct vm_area_struct *vma) +{ + return 1; +} +#endif + +static const struct vm_operations_struct mmap_mem_ops = { +#ifdef CONFIG_HAVE_IOREMAP_PROT + .access = generic_access_phys +#endif +}; + +static int mmap_mem(struct file *file, struct vm_area_struct *vma) +{ + size_t size = vma->vm_end - vma->vm_start; + + if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) + return -EINVAL; + + if (!private_mapping_ok(vma)) + return -ENOSYS; + + if (!range_is_allowed(vma->vm_pgoff, size)) + return -EPERM; + + if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size, + &vma->vm_page_prot)) + return -EINVAL; + + vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff, + size, + vma->vm_page_prot); + + vma->vm_ops = &mmap_mem_ops; + + /* Remap-pfn-range will mark the range VM_IO */ + if (remap_pfn_range(vma, + vma->vm_start, + vma->vm_pgoff, + size, + vma->vm_page_prot)) { + return -EAGAIN; + } + return 0; +} + +static int mmap_kmem(struct file *file, struct vm_area_struct *vma) +{ + unsigned long pfn; + + /* Turn a kernel-virtual address into a physical page frame */ + pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT; + + /* + * RED-PEN: on some architectures there is more mapped memory than + * available in mem_map which pfn_valid checks for. Perhaps should add a + * new macro here. + * + * RED-PEN: vmalloc is not supported right now. + */ + if (!pfn_valid(pfn)) + return -EIO; + + vma->vm_pgoff = pfn; + return mmap_mem(file, vma); +} + +/* + * This function reads the *virtual* memory as seen by the kernel. + */ +static ssize_t read_kmem(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned long p = *ppos; + ssize_t low_count, read, sz; + char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ + int err = 0; + + read = 0; + if (p < (unsigned long) high_memory) { + low_count = count; + if (count > (unsigned long)high_memory - p) + low_count = (unsigned long)high_memory - p; + +#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED + /* we don't have page 0 mapped on sparc and m68k.. */ + if (p < PAGE_SIZE && low_count > 0) { + sz = size_inside_page(p, low_count); + if (clear_user(buf, sz)) + return -EFAULT; + buf += sz; + p += sz; + read += sz; + low_count -= sz; + count -= sz; + } +#endif + while (low_count > 0) { + sz = size_inside_page(p, low_count); + + /* + * On ia64 if a page has been mapped somewhere as + * uncached, then it must also be accessed uncached + * by the kernel or data corruption may occur + */ + kbuf = xlate_dev_kmem_ptr((void *)p); + + if (copy_to_user(buf, kbuf, sz)) + return -EFAULT; + buf += sz; + p += sz; + read += sz; + low_count -= sz; + count -= sz; + } + } + + if (count > 0) { + kbuf = (char *)__get_free_page(GFP_KERNEL); + if (!kbuf) + return -ENOMEM; + while (count > 0) { + sz = size_inside_page(p, count); + if (!is_vmalloc_or_module_addr((void *)p)) { + err = -ENXIO; + break; + } + sz = vread(kbuf, (char *)p, sz); + if (!sz) + break; + if (copy_to_user(buf, kbuf, sz)) { + err = -EFAULT; + break; + } + count -= sz; + buf += sz; + read += sz; + p += sz; + } + free_page((unsigned long)kbuf); + } + *ppos = p; + return read ? read : err; +} + + +static ssize_t do_write_kmem(unsigned long p, const char __user *buf, + size_t count, loff_t *ppos) +{ + ssize_t written, sz; + unsigned long copied; + + written = 0; +#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED + /* we don't have page 0 mapped on sparc and m68k.. */ + if (p < PAGE_SIZE) { + sz = size_inside_page(p, count); + /* Hmm. Do something? */ + buf += sz; + p += sz; + count -= sz; + written += sz; + } +#endif + + while (count > 0) { + void *ptr; + + sz = size_inside_page(p, count); + + /* + * On ia64 if a page has been mapped somewhere as uncached, then + * it must also be accessed uncached by the kernel or data + * corruption may occur. + */ + ptr = xlate_dev_kmem_ptr((void *)p); + + copied = copy_from_user(ptr, buf, sz); + if (copied) { + written += sz - copied; + if (written) + break; + return -EFAULT; + } + buf += sz; + p += sz; + count -= sz; + written += sz; + } + + *ppos += written; + return written; +} + +/* + * This function writes to the *virtual* memory as seen by the kernel. + */ +static ssize_t write_kmem(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned long p = *ppos; + ssize_t wrote = 0; + ssize_t virtr = 0; + char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ + int err = 0; + + if (p < (unsigned long) high_memory) { + unsigned long to_write = min_t(unsigned long, count, + (unsigned long)high_memory - p); + wrote = do_write_kmem(p, buf, to_write, ppos); + if (wrote != to_write) + return wrote; + p += wrote; + buf += wrote; + count -= wrote; + } + + if (count > 0) { + kbuf = (char *)__get_free_page(GFP_KERNEL); + if (!kbuf) + return wrote ? wrote : -ENOMEM; + while (count > 0) { + unsigned long sz = size_inside_page(p, count); + unsigned long n; + + if (!is_vmalloc_or_module_addr((void *)p)) { + err = -ENXIO; + break; + } + n = copy_from_user(kbuf, buf, sz); + if (n) { + err = -EFAULT; + break; + } + vwrite(kbuf, (char *)p, sz); + count -= sz; + buf += sz; + virtr += sz; + p += sz; + } + free_page((unsigned long)kbuf); + } + + *ppos = p; + return virtr + wrote ? : err; +} + +static ssize_t read_port(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned long i = *ppos; + char __user *tmp = buf; + + if (!access_ok(VERIFY_WRITE, buf, count)) + return -EFAULT; + while (count-- > 0 && i < 65536) { + if (__put_user(inb(i), tmp) < 0) + return -EFAULT; + i++; + tmp++; + } + *ppos = i; + return tmp-buf; +} + +static ssize_t write_port(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned long i = *ppos; + const char __user *tmp = buf; + + if (!access_ok(VERIFY_READ, buf, count)) + return -EFAULT; + while (count-- > 0 && i < 65536) { + char c; + + if (__get_user(c, tmp)) { + if (tmp > buf) + break; + return -EFAULT; + } + outb(c, i); + i++; + tmp++; + } + *ppos = i; + return tmp-buf; +} + +static ssize_t read_null(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + return 0; +} + +static ssize_t write_null(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + return count; +} + +static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to) +{ + return 0; +} + +static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from) +{ + size_t count = iov_iter_count(from); + iov_iter_advance(from, count); + return count; +} + +static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf, + struct splice_desc *sd) +{ + return sd->len; +} + +static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out, + loff_t *ppos, size_t len, unsigned int flags) +{ + return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null); +} + +static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter) +{ + size_t written = 0; + + while (iov_iter_count(iter)) { + size_t chunk = iov_iter_count(iter), n; + + if (chunk > PAGE_SIZE) + chunk = PAGE_SIZE; /* Just for latency reasons */ + n = iov_iter_zero(chunk, iter); + if (!n && iov_iter_count(iter)) + return written ? written : -EFAULT; + written += n; + if (signal_pending(current)) + return written ? written : -ERESTARTSYS; + cond_resched(); + } + return written; +} + +static int mmap_zero(struct file *file, struct vm_area_struct *vma) +{ +#ifndef CONFIG_MMU + return -ENOSYS; +#endif + if (vma->vm_flags & VM_SHARED) + return shmem_zero_setup(vma); + return 0; +} + +static ssize_t write_full(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + return -ENOSPC; +} + +/* + * Special lseek() function for /dev/null and /dev/zero. Most notably, you + * can fopen() both devices with "a" now. This was previously impossible. + * -- SRB. + */ +static loff_t null_lseek(struct file *file, loff_t offset, int orig) +{ + return file->f_pos = 0; +} + +/* + * The memory devices use the full 32/64 bits of the offset, and so we cannot + * check against negative addresses: they are ok. The return value is weird, + * though, in that case (0). + * + * also note that seeking relative to the "end of file" isn't supported: + * it has no meaning, so it returns -EINVAL. + */ +static loff_t memory_lseek(struct file *file, loff_t offset, int orig) +{ + loff_t ret; + + mutex_lock(&file_inode(file)->i_mutex); + switch (orig) { + case SEEK_CUR: + offset += file->f_pos; + case SEEK_SET: + /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */ + if (IS_ERR_VALUE((unsigned long long)offset)) { + ret = -EOVERFLOW; + break; + } + file->f_pos = offset; + ret = file->f_pos; + force_successful_syscall_return(); + break; + default: + ret = -EINVAL; + } + mutex_unlock(&file_inode(file)->i_mutex); + return ret; +} + +static int open_port(struct inode *inode, struct file *filp) +{ + return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; +} + +#define zero_lseek null_lseek +#define full_lseek null_lseek +#define write_zero write_null +#define write_iter_zero write_iter_null +#define open_mem open_port +#define open_kmem open_mem + +static const struct file_operations __maybe_unused mem_fops = { + .llseek = memory_lseek, + .read = read_mem, + .write = write_mem, + .mmap = mmap_mem, + .open = open_mem, +#ifndef CONFIG_MMU + .get_unmapped_area = get_unmapped_area_mem, + .mmap_capabilities = memory_mmap_capabilities, +#endif +}; + +static const struct file_operations __maybe_unused kmem_fops = { + .llseek = memory_lseek, + .read = read_kmem, + .write = write_kmem, + .mmap = mmap_kmem, + .open = open_kmem, +#ifndef CONFIG_MMU + .get_unmapped_area = get_unmapped_area_mem, + .mmap_capabilities = memory_mmap_capabilities, +#endif +}; + +static const struct file_operations null_fops = { + .llseek = null_lseek, + .read = read_null, + .write = write_null, + .read_iter = read_iter_null, + .write_iter = write_iter_null, + .splice_write = splice_write_null, +}; + +static const struct file_operations __maybe_unused port_fops = { + .llseek = memory_lseek, + .read = read_port, + .write = write_port, + .open = open_port, +}; + +static const struct file_operations zero_fops = { + .llseek = zero_lseek, + .write = write_zero, + .read_iter = read_iter_zero, + .write_iter = write_iter_zero, + .mmap = mmap_zero, +#ifndef CONFIG_MMU + .mmap_capabilities = zero_mmap_capabilities, +#endif +}; + +static const struct file_operations full_fops = { + .llseek = full_lseek, + .read_iter = read_iter_zero, + .write = write_full, +}; + +static const struct memdev { + const char *name; + umode_t mode; + const struct file_operations *fops; + fmode_t fmode; +} devlist[] = { +#ifdef CONFIG_DEVMEM + [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET }, +#endif +#ifdef CONFIG_DEVKMEM + [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET }, +#endif + [3] = { "null", 0666, &null_fops, 0 }, +#ifdef CONFIG_DEVPORT + [4] = { "port", 0, &port_fops, 0 }, +#endif + [5] = { "zero", 0666, &zero_fops, 0 }, + [7] = { "full", 0666, &full_fops, 0 }, + [8] = { "random", 0666, &random_fops, 0 }, + [9] = { "urandom", 0666, &urandom_fops, 0 }, +#ifdef CONFIG_PRINTK + [11] = { "kmsg", 0644, &kmsg_fops, 0 }, +#endif +}; + +static int memory_open(struct inode *inode, struct file *filp) +{ + int minor; + const struct memdev *dev; + + minor = iminor(inode); + if (minor >= ARRAY_SIZE(devlist)) + return -ENXIO; + + dev = &devlist[minor]; + if (!dev->fops) + return -ENXIO; + + filp->f_op = dev->fops; + filp->f_mode |= dev->fmode; + + if (dev->fops->open) + return dev->fops->open(inode, filp); + + return 0; +} + +static const struct file_operations memory_fops = { + .open = memory_open, + .llseek = noop_llseek, +}; + +static char *mem_devnode(struct device *dev, umode_t *mode) +{ + if (mode && devlist[MINOR(dev->devt)].mode) + *mode = devlist[MINOR(dev->devt)].mode; + return NULL; +} + +static struct class *mem_class; + +static int __init chr_dev_init(void) +{ + int minor; + + if (register_chrdev(MEM_MAJOR, "mem", &memory_fops)) + printk("unable to get major %d for memory devs\n", MEM_MAJOR); + + mem_class = class_create(THIS_MODULE, "mem"); + if (IS_ERR(mem_class)) + return PTR_ERR(mem_class); + + mem_class->devnode = mem_devnode; + for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) { + if (!devlist[minor].name) + continue; + + /* + * Create /dev/port? + */ + if ((minor == DEVPORT_MINOR) && !arch_has_dev_port()) + continue; + + device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor), + NULL, devlist[minor].name); + } + + return tty_init(); +} + +fs_initcall(chr_dev_init); diff --git a/kernel/drivers/char/misc.c b/kernel/drivers/char/misc.c new file mode 100644 index 000000000..9fd5a91e0 --- /dev/null +++ b/kernel/drivers/char/misc.c @@ -0,0 +1,301 @@ +/* + * linux/drivers/char/misc.c + * + * Generic misc open routine by Johan Myreen + * + * Based on code from Linus + * + * Teemu Rantanen's Microsoft Busmouse support and Derrick Cole's + * changes incorporated into 0.97pl4 + * by Peter Cervasio (pete%q106fm.uucp@wupost.wustl.edu) (08SEP92) + * See busmouse.c for particulars. + * + * Made things a lot mode modular - easy to compile in just one or two + * of the misc drivers, as they are now completely independent. Linus. + * + * Support for loadable modules. 8-Sep-95 Philip Blundell + * + * Fixed a failing symbol register to free the device registration + * Alan Cox 21-Jan-96 + * + * Dynamic minors and /proc/mice by Alessandro Rubini. 26-Mar-96 + * + * Renamed to misc and miscdevice to be more accurate. Alan Cox 26-Mar-96 + * + * Handling of mouse minor numbers for kerneld: + * Idea by Jacques Gelinas , + * adapted by Bjorn Ekwall + * corrected by Alan Cox + * + * Changes for kmod (from kerneld): + * Cyrus Durgin + * + * Added devfs support. Richard Gooch 10-Jan-1998 + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Head entry for the doubly linked miscdevice list + */ +static LIST_HEAD(misc_list); +static DEFINE_MUTEX(misc_mtx); + +/* + * Assigned numbers, used for dynamic minors + */ +#define DYNAMIC_MINORS 64 /* like dynamic majors */ +static DECLARE_BITMAP(misc_minors, DYNAMIC_MINORS); + +#ifdef CONFIG_PROC_FS +static void *misc_seq_start(struct seq_file *seq, loff_t *pos) +{ + mutex_lock(&misc_mtx); + return seq_list_start(&misc_list, *pos); +} + +static void *misc_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + return seq_list_next(v, &misc_list, pos); +} + +static void misc_seq_stop(struct seq_file *seq, void *v) +{ + mutex_unlock(&misc_mtx); +} + +static int misc_seq_show(struct seq_file *seq, void *v) +{ + const struct miscdevice *p = list_entry(v, struct miscdevice, list); + + seq_printf(seq, "%3i %s\n", p->minor, p->name ? p->name : ""); + return 0; +} + + +static const struct seq_operations misc_seq_ops = { + .start = misc_seq_start, + .next = misc_seq_next, + .stop = misc_seq_stop, + .show = misc_seq_show, +}; + +static int misc_seq_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &misc_seq_ops); +} + +static const struct file_operations misc_proc_fops = { + .owner = THIS_MODULE, + .open = misc_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; +#endif + +static int misc_open(struct inode * inode, struct file * file) +{ + int minor = iminor(inode); + struct miscdevice *c; + int err = -ENODEV; + const struct file_operations *new_fops = NULL; + + mutex_lock(&misc_mtx); + + list_for_each_entry(c, &misc_list, list) { + if (c->minor == minor) { + new_fops = fops_get(c->fops); + break; + } + } + + if (!new_fops) { + mutex_unlock(&misc_mtx); + request_module("char-major-%d-%d", MISC_MAJOR, minor); + mutex_lock(&misc_mtx); + + list_for_each_entry(c, &misc_list, list) { + if (c->minor == minor) { + new_fops = fops_get(c->fops); + break; + } + } + if (!new_fops) + goto fail; + } + + /* + * Place the miscdevice in the file's + * private_data so it can be used by the + * file operations, including f_op->open below + */ + file->private_data = c; + + err = 0; + replace_fops(file, new_fops); + if (file->f_op->open) + err = file->f_op->open(inode,file); +fail: + mutex_unlock(&misc_mtx); + return err; +} + +static struct class *misc_class; + +static const struct file_operations misc_fops = { + .owner = THIS_MODULE, + .open = misc_open, + .llseek = noop_llseek, +}; + +/** + * misc_register - register a miscellaneous device + * @misc: device structure + * + * Register a miscellaneous device with the kernel. If the minor + * number is set to %MISC_DYNAMIC_MINOR a minor number is assigned + * and placed in the minor field of the structure. For other cases + * the minor number requested is used. + * + * The structure passed is linked into the kernel and may not be + * destroyed until it has been unregistered. By default, an open() + * syscall to the device sets file->private_data to point to the + * structure. Drivers don't need open in fops for this. + * + * A zero is returned on success and a negative errno code for + * failure. + */ + +int misc_register(struct miscdevice * misc) +{ + dev_t dev; + int err = 0; + + INIT_LIST_HEAD(&misc->list); + + mutex_lock(&misc_mtx); + + if (misc->minor == MISC_DYNAMIC_MINOR) { + int i = find_first_zero_bit(misc_minors, DYNAMIC_MINORS); + if (i >= DYNAMIC_MINORS) { + err = -EBUSY; + goto out; + } + misc->minor = DYNAMIC_MINORS - i - 1; + set_bit(i, misc_minors); + } else { + struct miscdevice *c; + + list_for_each_entry(c, &misc_list, list) { + if (c->minor == misc->minor) { + err = -EBUSY; + goto out; + } + } + } + + dev = MKDEV(MISC_MAJOR, misc->minor); + + misc->this_device = + device_create_with_groups(misc_class, misc->parent, dev, + misc, misc->groups, "%s", misc->name); + if (IS_ERR(misc->this_device)) { + int i = DYNAMIC_MINORS - misc->minor - 1; + if (i < DYNAMIC_MINORS && i >= 0) + clear_bit(i, misc_minors); + err = PTR_ERR(misc->this_device); + goto out; + } + + /* + * Add it to the front, so that later devices can "override" + * earlier defaults + */ + list_add(&misc->list, &misc_list); + out: + mutex_unlock(&misc_mtx); + return err; +} + +/** + * misc_deregister - unregister a miscellaneous device + * @misc: device to unregister + * + * Unregister a miscellaneous device that was previously + * successfully registered with misc_register(). Success + * is indicated by a zero return, a negative errno code + * indicates an error. + */ + +int misc_deregister(struct miscdevice *misc) +{ + int i = DYNAMIC_MINORS - misc->minor - 1; + + if (WARN_ON(list_empty(&misc->list))) + return -EINVAL; + + mutex_lock(&misc_mtx); + list_del(&misc->list); + device_destroy(misc_class, MKDEV(MISC_MAJOR, misc->minor)); + if (i < DYNAMIC_MINORS && i >= 0) + clear_bit(i, misc_minors); + mutex_unlock(&misc_mtx); + return 0; +} + +EXPORT_SYMBOL(misc_register); +EXPORT_SYMBOL(misc_deregister); + +static char *misc_devnode(struct device *dev, umode_t *mode) +{ + struct miscdevice *c = dev_get_drvdata(dev); + + if (mode && c->mode) + *mode = c->mode; + if (c->nodename) + return kstrdup(c->nodename, GFP_KERNEL); + return NULL; +} + +static int __init misc_init(void) +{ + int err; + +#ifdef CONFIG_PROC_FS + proc_create("misc", 0, NULL, &misc_proc_fops); +#endif + misc_class = class_create(THIS_MODULE, "misc"); + err = PTR_ERR(misc_class); + if (IS_ERR(misc_class)) + goto fail_remove; + + err = -EIO; + if (register_chrdev(MISC_MAJOR,"misc",&misc_fops)) + goto fail_printk; + misc_class->devnode = misc_devnode; + return 0; + +fail_printk: + printk("unable to get major %d for misc devices\n", MISC_MAJOR); + class_destroy(misc_class); +fail_remove: + remove_proc_entry("misc", NULL); + return err; +} +subsys_initcall(misc_init); diff --git a/kernel/drivers/char/mmtimer.c b/kernel/drivers/char/mmtimer.c new file mode 100644 index 000000000..3d6c0671e --- /dev/null +++ b/kernel/drivers/char/mmtimer.c @@ -0,0 +1,858 @@ +/* + * Timer device implementation for SGI SN platforms. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2001-2006 Silicon Graphics, Inc. All rights reserved. + * + * This driver exports an API that should be supportable by any HPET or IA-PC + * multimedia timer. The code below is currently specific to the SGI Altix + * SHub RTC, however. + * + * 11/01/01 - jbarnes - initial revision + * 9/10/04 - Christoph Lameter - remove interrupt support for kernel inclusion + * 10/1/04 - Christoph Lameter - provide posix clock CLOCK_SGI_CYCLE + * 10/13/04 - Christoph Lameter, Dimitri Sivanich - provide timer interrupt + * support via the posix timer interface + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +MODULE_AUTHOR("Jesse Barnes "); +MODULE_DESCRIPTION("SGI Altix RTC Timer"); +MODULE_LICENSE("GPL"); + +/* name of the device, usually in /dev */ +#define MMTIMER_NAME "mmtimer" +#define MMTIMER_DESC "SGI Altix RTC Timer" +#define MMTIMER_VERSION "2.1" + +#define RTC_BITS 55 /* 55 bits for this implementation */ + +static struct k_clock sgi_clock; + +extern unsigned long sn_rtc_cycles_per_second; + +#define RTC_COUNTER_ADDR ((long *)LOCAL_MMR_ADDR(SH_RTC)) + +#define rtc_time() (*RTC_COUNTER_ADDR) + +static DEFINE_MUTEX(mmtimer_mutex); +static long mmtimer_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); +static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma); + +/* + * Period in femtoseconds (10^-15 s) + */ +static unsigned long mmtimer_femtoperiod = 0; + +static const struct file_operations mmtimer_fops = { + .owner = THIS_MODULE, + .mmap = mmtimer_mmap, + .unlocked_ioctl = mmtimer_ioctl, + .llseek = noop_llseek, +}; + +/* + * We only have comparison registers RTC1-4 currently available per + * node. RTC0 is used by SAL. + */ +/* Check for an RTC interrupt pending */ +static int mmtimer_int_pending(int comparator) +{ + if (HUB_L((unsigned long *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)) & + SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator) + return 1; + else + return 0; +} + +/* Clear the RTC interrupt pending bit */ +static void mmtimer_clr_int_pending(int comparator) +{ + HUB_S((u64 *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), + SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator); +} + +/* Setup timer on comparator RTC1 */ +static void mmtimer_setup_int_0(int cpu, u64 expires) +{ + u64 val; + + /* Disable interrupt */ + HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE), 0UL); + + /* Initialize comparator value */ + HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPB), -1L); + + /* Clear pending bit */ + mmtimer_clr_int_pending(0); + + val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC1_INT_CONFIG_IDX_SHFT) | + ((u64)cpu_physical_id(cpu) << + SH_RTC1_INT_CONFIG_PID_SHFT); + + /* Set configuration */ + HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_CONFIG), val); + + /* Enable RTC interrupts */ + HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE), 1UL); + + /* Initialize comparator value */ + HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPB), expires); + + +} + +/* Setup timer on comparator RTC2 */ +static void mmtimer_setup_int_1(int cpu, u64 expires) +{ + u64 val; + + HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_ENABLE), 0UL); + + HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPC), -1L); + + mmtimer_clr_int_pending(1); + + val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC2_INT_CONFIG_IDX_SHFT) | + ((u64)cpu_physical_id(cpu) << + SH_RTC2_INT_CONFIG_PID_SHFT); + + HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_CONFIG), val); + + HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_ENABLE), 1UL); + + HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPC), expires); +} + +/* Setup timer on comparator RTC3 */ +static void mmtimer_setup_int_2(int cpu, u64 expires) +{ + u64 val; + + HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE), 0UL); + + HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPD), -1L); + + mmtimer_clr_int_pending(2); + + val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC3_INT_CONFIG_IDX_SHFT) | + ((u64)cpu_physical_id(cpu) << + SH_RTC3_INT_CONFIG_PID_SHFT); + + HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_CONFIG), val); + + HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE), 1UL); + + HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPD), expires); +} + +/* + * This function must be called with interrupts disabled and preemption off + * in order to insure that the setup succeeds in a deterministic time frame. + * It will check if the interrupt setup succeeded. + */ +static int mmtimer_setup(int cpu, int comparator, unsigned long expires, + u64 *set_completion_time) +{ + switch (comparator) { + case 0: + mmtimer_setup_int_0(cpu, expires); + break; + case 1: + mmtimer_setup_int_1(cpu, expires); + break; + case 2: + mmtimer_setup_int_2(cpu, expires); + break; + } + /* We might've missed our expiration time */ + *set_completion_time = rtc_time(); + if (*set_completion_time <= expires) + return 1; + + /* + * If an interrupt is already pending then its okay + * if not then we failed + */ + return mmtimer_int_pending(comparator); +} + +static int mmtimer_disable_int(long nasid, int comparator) +{ + switch (comparator) { + case 0: + nasid == -1 ? HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE), + 0UL) : REMOTE_HUB_S(nasid, SH_RTC1_INT_ENABLE, 0UL); + break; + case 1: + nasid == -1 ? HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_ENABLE), + 0UL) : REMOTE_HUB_S(nasid, SH_RTC2_INT_ENABLE, 0UL); + break; + case 2: + nasid == -1 ? HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE), + 0UL) : REMOTE_HUB_S(nasid, SH_RTC3_INT_ENABLE, 0UL); + break; + default: + return -EFAULT; + } + return 0; +} + +#define COMPARATOR 1 /* The comparator to use */ + +#define TIMER_OFF 0xbadcabLL /* Timer is not setup */ +#define TIMER_SET 0 /* Comparator is set for this timer */ + +#define MMTIMER_INTERVAL_RETRY_INCREMENT_DEFAULT 40 + +/* There is one of these for each timer */ +struct mmtimer { + struct rb_node list; + struct k_itimer *timer; + int cpu; +}; + +struct mmtimer_node { + spinlock_t lock ____cacheline_aligned; + struct rb_root timer_head; + struct rb_node *next; + struct tasklet_struct tasklet; +}; +static struct mmtimer_node *timers; + +static unsigned mmtimer_interval_retry_increment = + MMTIMER_INTERVAL_RETRY_INCREMENT_DEFAULT; +module_param(mmtimer_interval_retry_increment, uint, 0644); +MODULE_PARM_DESC(mmtimer_interval_retry_increment, + "RTC ticks to add to expiration on interval retry (default 40)"); + +/* + * Add a new mmtimer struct to the node's mmtimer list. + * This function assumes the struct mmtimer_node is locked. + */ +static void mmtimer_add_list(struct mmtimer *n) +{ + int nodeid = n->timer->it.mmtimer.node; + unsigned long expires = n->timer->it.mmtimer.expires; + struct rb_node **link = &timers[nodeid].timer_head.rb_node; + struct rb_node *parent = NULL; + struct mmtimer *x; + + /* + * Find the right place in the rbtree: + */ + while (*link) { + parent = *link; + x = rb_entry(parent, struct mmtimer, list); + + if (expires < x->timer->it.mmtimer.expires) + link = &(*link)->rb_left; + else + link = &(*link)->rb_right; + } + + /* + * Insert the timer to the rbtree and check whether it + * replaces the first pending timer + */ + rb_link_node(&n->list, parent, link); + rb_insert_color(&n->list, &timers[nodeid].timer_head); + + if (!timers[nodeid].next || expires < rb_entry(timers[nodeid].next, + struct mmtimer, list)->timer->it.mmtimer.expires) + timers[nodeid].next = &n->list; +} + +/* + * Set the comparator for the next timer. + * This function assumes the struct mmtimer_node is locked. + */ +static void mmtimer_set_next_timer(int nodeid) +{ + struct mmtimer_node *n = &timers[nodeid]; + struct mmtimer *x; + struct k_itimer *t; + u64 expires, exp, set_completion_time; + int i; + +restart: + if (n->next == NULL) + return; + + x = rb_entry(n->next, struct mmtimer, list); + t = x->timer; + if (!t->it.mmtimer.incr) { + /* Not an interval timer */ + if (!mmtimer_setup(x->cpu, COMPARATOR, + t->it.mmtimer.expires, + &set_completion_time)) { + /* Late setup, fire now */ + tasklet_schedule(&n->tasklet); + } + return; + } + + /* Interval timer */ + i = 0; + expires = exp = t->it.mmtimer.expires; + while (!mmtimer_setup(x->cpu, COMPARATOR, expires, + &set_completion_time)) { + int to; + + i++; + expires = set_completion_time + + mmtimer_interval_retry_increment + (1 << i); + /* Calculate overruns as we go. */ + to = ((u64)(expires - exp) / t->it.mmtimer.incr); + if (to) { + t->it_overrun += to; + t->it.mmtimer.expires += t->it.mmtimer.incr * to; + exp = t->it.mmtimer.expires; + } + if (i > 20) { + printk(KERN_ALERT "mmtimer: cannot reschedule timer\n"); + t->it.mmtimer.clock = TIMER_OFF; + n->next = rb_next(&x->list); + rb_erase(&x->list, &n->timer_head); + kfree(x); + goto restart; + } + } +} + +/** + * mmtimer_ioctl - ioctl interface for /dev/mmtimer + * @file: file structure for the device + * @cmd: command to execute + * @arg: optional argument to command + * + * Executes the command specified by @cmd. Returns 0 for success, < 0 for + * failure. + * + * Valid commands: + * + * %MMTIMER_GETOFFSET - Should return the offset (relative to the start + * of the page where the registers are mapped) for the counter in question. + * + * %MMTIMER_GETRES - Returns the resolution of the clock in femto (10^-15) + * seconds + * + * %MMTIMER_GETFREQ - Copies the frequency of the clock in Hz to the address + * specified by @arg + * + * %MMTIMER_GETBITS - Returns the number of bits in the clock's counter + * + * %MMTIMER_MMAPAVAIL - Returns 1 if the registers can be mmap'd into userspace + * + * %MMTIMER_GETCOUNTER - Gets the current value in the counter and places it + * in the address specified by @arg. + */ +static long mmtimer_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int ret = 0; + + mutex_lock(&mmtimer_mutex); + + switch (cmd) { + case MMTIMER_GETOFFSET: /* offset of the counter */ + /* + * SN RTC registers are on their own 64k page + */ + if(PAGE_SIZE <= (1 << 16)) + ret = (((long)RTC_COUNTER_ADDR) & (PAGE_SIZE-1)) / 8; + else + ret = -ENOSYS; + break; + + case MMTIMER_GETRES: /* resolution of the clock in 10^-15 s */ + if(copy_to_user((unsigned long __user *)arg, + &mmtimer_femtoperiod, sizeof(unsigned long))) + ret = -EFAULT; + break; + + case MMTIMER_GETFREQ: /* frequency in Hz */ + if(copy_to_user((unsigned long __user *)arg, + &sn_rtc_cycles_per_second, + sizeof(unsigned long))) + ret = -EFAULT; + break; + + case MMTIMER_GETBITS: /* number of bits in the clock */ + ret = RTC_BITS; + break; + + case MMTIMER_MMAPAVAIL: /* can we mmap the clock into userspace? */ + ret = (PAGE_SIZE <= (1 << 16)) ? 1 : 0; + break; + + case MMTIMER_GETCOUNTER: + if(copy_to_user((unsigned long __user *)arg, + RTC_COUNTER_ADDR, sizeof(unsigned long))) + ret = -EFAULT; + break; + default: + ret = -ENOTTY; + break; + } + mutex_unlock(&mmtimer_mutex); + return ret; +} + +/** + * mmtimer_mmap - maps the clock's registers into userspace + * @file: file structure for the device + * @vma: VMA to map the registers into + * + * Calls remap_pfn_range() to map the clock's registers into + * the calling process' address space. + */ +static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma) +{ + unsigned long mmtimer_addr; + + if (vma->vm_end - vma->vm_start != PAGE_SIZE) + return -EINVAL; + + if (vma->vm_flags & VM_WRITE) + return -EPERM; + + if (PAGE_SIZE > (1 << 16)) + return -ENOSYS; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + mmtimer_addr = __pa(RTC_COUNTER_ADDR); + mmtimer_addr &= ~(PAGE_SIZE - 1); + mmtimer_addr &= 0xfffffffffffffffUL; + + if (remap_pfn_range(vma, vma->vm_start, mmtimer_addr >> PAGE_SHIFT, + PAGE_SIZE, vma->vm_page_prot)) { + printk(KERN_ERR "remap_pfn_range failed in mmtimer.c\n"); + return -EAGAIN; + } + + return 0; +} + +static struct miscdevice mmtimer_miscdev = { + SGI_MMTIMER, + MMTIMER_NAME, + &mmtimer_fops +}; + +static struct timespec sgi_clock_offset; +static int sgi_clock_period; + +/* + * Posix Timer Interface + */ + +static struct timespec sgi_clock_offset; +static int sgi_clock_period; + +static int sgi_clock_get(clockid_t clockid, struct timespec *tp) +{ + u64 nsec; + + nsec = rtc_time() * sgi_clock_period + + sgi_clock_offset.tv_nsec; + *tp = ns_to_timespec(nsec); + tp->tv_sec += sgi_clock_offset.tv_sec; + return 0; +}; + +static int sgi_clock_set(const clockid_t clockid, const struct timespec *tp) +{ + + u64 nsec; + u32 rem; + + nsec = rtc_time() * sgi_clock_period; + + sgi_clock_offset.tv_sec = tp->tv_sec - div_u64_rem(nsec, NSEC_PER_SEC, &rem); + + if (rem <= tp->tv_nsec) + sgi_clock_offset.tv_nsec = tp->tv_sec - rem; + else { + sgi_clock_offset.tv_nsec = tp->tv_sec + NSEC_PER_SEC - rem; + sgi_clock_offset.tv_sec--; + } + return 0; +} + +/** + * mmtimer_interrupt - timer interrupt handler + * @irq: irq received + * @dev_id: device the irq came from + * + * Called when one of the comarators matches the counter, This + * routine will send signals to processes that have requested + * them. + * + * This interrupt is run in an interrupt context + * by the SHUB. It is therefore safe to locally access SHub + * registers. + */ +static irqreturn_t +mmtimer_interrupt(int irq, void *dev_id) +{ + unsigned long expires = 0; + int result = IRQ_NONE; + unsigned indx = cpu_to_node(smp_processor_id()); + struct mmtimer *base; + + spin_lock(&timers[indx].lock); + base = rb_entry(timers[indx].next, struct mmtimer, list); + if (base == NULL) { + spin_unlock(&timers[indx].lock); + return result; + } + + if (base->cpu == smp_processor_id()) { + if (base->timer) + expires = base->timer->it.mmtimer.expires; + /* expires test won't work with shared irqs */ + if ((mmtimer_int_pending(COMPARATOR) > 0) || + (expires && (expires <= rtc_time()))) { + mmtimer_clr_int_pending(COMPARATOR); + tasklet_schedule(&timers[indx].tasklet); + result = IRQ_HANDLED; + } + } + spin_unlock(&timers[indx].lock); + return result; +} + +static void mmtimer_tasklet(unsigned long data) +{ + int nodeid = data; + struct mmtimer_node *mn = &timers[nodeid]; + struct mmtimer *x; + struct k_itimer *t; + unsigned long flags; + + /* Send signal and deal with periodic signals */ + spin_lock_irqsave(&mn->lock, flags); + if (!mn->next) + goto out; + + x = rb_entry(mn->next, struct mmtimer, list); + t = x->timer; + + if (t->it.mmtimer.clock == TIMER_OFF) + goto out; + + t->it_overrun = 0; + + mn->next = rb_next(&x->list); + rb_erase(&x->list, &mn->timer_head); + + if (posix_timer_event(t, 0) != 0) + t->it_overrun++; + + if(t->it.mmtimer.incr) { + t->it.mmtimer.expires += t->it.mmtimer.incr; + mmtimer_add_list(x); + } else { + /* Ensure we don't false trigger in mmtimer_interrupt */ + t->it.mmtimer.clock = TIMER_OFF; + t->it.mmtimer.expires = 0; + kfree(x); + } + /* Set comparator for next timer, if there is one */ + mmtimer_set_next_timer(nodeid); + + t->it_overrun_last = t->it_overrun; +out: + spin_unlock_irqrestore(&mn->lock, flags); +} + +static int sgi_timer_create(struct k_itimer *timer) +{ + /* Insure that a newly created timer is off */ + timer->it.mmtimer.clock = TIMER_OFF; + return 0; +} + +/* This does not really delete a timer. It just insures + * that the timer is not active + * + * Assumption: it_lock is already held with irq's disabled + */ +static int sgi_timer_del(struct k_itimer *timr) +{ + cnodeid_t nodeid = timr->it.mmtimer.node; + unsigned long irqflags; + + spin_lock_irqsave(&timers[nodeid].lock, irqflags); + if (timr->it.mmtimer.clock != TIMER_OFF) { + unsigned long expires = timr->it.mmtimer.expires; + struct rb_node *n = timers[nodeid].timer_head.rb_node; + struct mmtimer *uninitialized_var(t); + int r = 0; + + timr->it.mmtimer.clock = TIMER_OFF; + timr->it.mmtimer.expires = 0; + + while (n) { + t = rb_entry(n, struct mmtimer, list); + if (t->timer == timr) + break; + + if (expires < t->timer->it.mmtimer.expires) + n = n->rb_left; + else + n = n->rb_right; + } + + if (!n) { + spin_unlock_irqrestore(&timers[nodeid].lock, irqflags); + return 0; + } + + if (timers[nodeid].next == n) { + timers[nodeid].next = rb_next(n); + r = 1; + } + + rb_erase(n, &timers[nodeid].timer_head); + kfree(t); + + if (r) { + mmtimer_disable_int(cnodeid_to_nasid(nodeid), + COMPARATOR); + mmtimer_set_next_timer(nodeid); + } + } + spin_unlock_irqrestore(&timers[nodeid].lock, irqflags); + return 0; +} + +/* Assumption: it_lock is already held with irq's disabled */ +static void sgi_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) +{ + + if (timr->it.mmtimer.clock == TIMER_OFF) { + cur_setting->it_interval.tv_nsec = 0; + cur_setting->it_interval.tv_sec = 0; + cur_setting->it_value.tv_nsec = 0; + cur_setting->it_value.tv_sec =0; + return; + } + + cur_setting->it_interval = ns_to_timespec(timr->it.mmtimer.incr * sgi_clock_period); + cur_setting->it_value = ns_to_timespec((timr->it.mmtimer.expires - rtc_time()) * sgi_clock_period); +} + + +static int sgi_timer_set(struct k_itimer *timr, int flags, + struct itimerspec * new_setting, + struct itimerspec * old_setting) +{ + unsigned long when, period, irqflags; + int err = 0; + cnodeid_t nodeid; + struct mmtimer *base; + struct rb_node *n; + + if (old_setting) + sgi_timer_get(timr, old_setting); + + sgi_timer_del(timr); + when = timespec_to_ns(&new_setting->it_value); + period = timespec_to_ns(&new_setting->it_interval); + + if (when == 0) + /* Clear timer */ + return 0; + + base = kmalloc(sizeof(struct mmtimer), GFP_KERNEL); + if (base == NULL) + return -ENOMEM; + + if (flags & TIMER_ABSTIME) { + struct timespec n; + unsigned long now; + + getnstimeofday(&n); + now = timespec_to_ns(&n); + if (when > now) + when -= now; + else + /* Fire the timer immediately */ + when = 0; + } + + /* + * Convert to sgi clock period. Need to keep rtc_time() as near as possible + * to getnstimeofday() in order to be as faithful as possible to the time + * specified. + */ + when = (when + sgi_clock_period - 1) / sgi_clock_period + rtc_time(); + period = (period + sgi_clock_period - 1) / sgi_clock_period; + + /* + * We are allocating a local SHub comparator. If we would be moved to another + * cpu then another SHub may be local to us. Prohibit that by switching off + * preemption. + */ + preempt_disable(); + + nodeid = cpu_to_node(smp_processor_id()); + + /* Lock the node timer structure */ + spin_lock_irqsave(&timers[nodeid].lock, irqflags); + + base->timer = timr; + base->cpu = smp_processor_id(); + + timr->it.mmtimer.clock = TIMER_SET; + timr->it.mmtimer.node = nodeid; + timr->it.mmtimer.incr = period; + timr->it.mmtimer.expires = when; + + n = timers[nodeid].next; + + /* Add the new struct mmtimer to node's timer list */ + mmtimer_add_list(base); + + if (timers[nodeid].next == n) { + /* No need to reprogram comparator for now */ + spin_unlock_irqrestore(&timers[nodeid].lock, irqflags); + preempt_enable(); + return err; + } + + /* We need to reprogram the comparator */ + if (n) + mmtimer_disable_int(cnodeid_to_nasid(nodeid), COMPARATOR); + + mmtimer_set_next_timer(nodeid); + + /* Unlock the node timer structure */ + spin_unlock_irqrestore(&timers[nodeid].lock, irqflags); + + preempt_enable(); + + return err; +} + +static int sgi_clock_getres(const clockid_t which_clock, struct timespec *tp) +{ + tp->tv_sec = 0; + tp->tv_nsec = sgi_clock_period; + return 0; +} + +static struct k_clock sgi_clock = { + .clock_set = sgi_clock_set, + .clock_get = sgi_clock_get, + .clock_getres = sgi_clock_getres, + .timer_create = sgi_timer_create, + .timer_set = sgi_timer_set, + .timer_del = sgi_timer_del, + .timer_get = sgi_timer_get +}; + +/** + * mmtimer_init - device initialization routine + * + * Does initial setup for the mmtimer device. + */ +static int __init mmtimer_init(void) +{ + cnodeid_t node, maxn = -1; + + if (!ia64_platform_is("sn2")) + return 0; + + /* + * Sanity check the cycles/sec variable + */ + if (sn_rtc_cycles_per_second < 100000) { + printk(KERN_ERR "%s: unable to determine clock frequency\n", + MMTIMER_NAME); + goto out1; + } + + mmtimer_femtoperiod = ((unsigned long)1E15 + sn_rtc_cycles_per_second / + 2) / sn_rtc_cycles_per_second; + + if (request_irq(SGI_MMTIMER_VECTOR, mmtimer_interrupt, IRQF_PERCPU, MMTIMER_NAME, NULL)) { + printk(KERN_WARNING "%s: unable to allocate interrupt.", + MMTIMER_NAME); + goto out1; + } + + if (misc_register(&mmtimer_miscdev)) { + printk(KERN_ERR "%s: failed to register device\n", + MMTIMER_NAME); + goto out2; + } + + /* Get max numbered node, calculate slots needed */ + for_each_online_node(node) { + maxn = node; + } + maxn++; + + /* Allocate list of node ptrs to mmtimer_t's */ + timers = kzalloc(sizeof(struct mmtimer_node)*maxn, GFP_KERNEL); + if (!timers) { + printk(KERN_ERR "%s: failed to allocate memory for device\n", + MMTIMER_NAME); + goto out3; + } + + /* Initialize struct mmtimer's for each online node */ + for_each_online_node(node) { + spin_lock_init(&timers[node].lock); + tasklet_init(&timers[node].tasklet, mmtimer_tasklet, + (unsigned long) node); + } + + sgi_clock_period = NSEC_PER_SEC / sn_rtc_cycles_per_second; + posix_timers_register_clock(CLOCK_SGI_CYCLE, &sgi_clock); + + printk(KERN_INFO "%s: v%s, %ld MHz\n", MMTIMER_DESC, MMTIMER_VERSION, + sn_rtc_cycles_per_second/(unsigned long)1E6); + + return 0; + +out3: + misc_deregister(&mmtimer_miscdev); +out2: + free_irq(SGI_MMTIMER_VECTOR, NULL); +out1: + return -1; +} + +module_init(mmtimer_init); diff --git a/kernel/drivers/char/msm_smd_pkt.c b/kernel/drivers/char/msm_smd_pkt.c new file mode 100644 index 000000000..ba82a06d9 --- /dev/null +++ b/kernel/drivers/char/msm_smd_pkt.c @@ -0,0 +1,465 @@ +/* Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ +/* + * SMD Packet Driver -- Provides userspace interface to SMD packet ports. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define NUM_SMD_PKT_PORTS 9 +#define DEVICE_NAME "smdpkt" +#define MAX_BUF_SIZE 2048 + +struct smd_pkt_dev { + struct cdev cdev; + struct device *devicep; + + struct smd_channel *ch; + int open_count; + struct mutex ch_lock; + struct mutex rx_lock; + struct mutex tx_lock; + wait_queue_head_t ch_read_wait_queue; + wait_queue_head_t ch_opened_wait_queue; + + int i; + + unsigned char tx_buf[MAX_BUF_SIZE]; + unsigned char rx_buf[MAX_BUF_SIZE]; + int remote_open; + +} *smd_pkt_devp[NUM_SMD_PKT_PORTS]; + +struct class *smd_pkt_classp; +static dev_t smd_pkt_number; + +static int msm_smd_pkt_debug_enable; +module_param_named(debug_enable, msm_smd_pkt_debug_enable, + int, S_IRUGO | S_IWUSR | S_IWGRP); + +#ifdef DEBUG +#define D_DUMP_BUFFER(prestr, cnt, buf) do { \ + int i; \ + if (msm_smd_pkt_debug_enable) { \ + pr_debug("%s", prestr); \ + for (i = 0; i < cnt; i++) \ + pr_debug("%.2x", buf[i]); \ + pr_debug("\n"); \ + } \ + } while (0) +#else +#define D_DUMP_BUFFER(prestr, cnt, buf) do {} while (0) +#endif + +#ifdef DEBUG +#define DBG(x...) do { \ + if (msm_smd_pkt_debug_enable) \ + pr_debug(x); \ + } while (0) +#else +#define DBG(x...) do {} while (0) +#endif + +static void check_and_wakeup_reader(struct smd_pkt_dev *smd_pkt_devp) +{ + int sz; + + if (!smd_pkt_devp || !smd_pkt_devp->ch) + return; + + sz = smd_cur_packet_size(smd_pkt_devp->ch); + if (sz == 0) { + DBG("no packet\n"); + return; + } + if (sz > smd_read_avail(smd_pkt_devp->ch)) { + DBG("incomplete packet\n"); + return; + } + + DBG("waking up reader\n"); + wake_up_interruptible(&smd_pkt_devp->ch_read_wait_queue); +} + +static int smd_pkt_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + int r, bytes_read; + struct smd_pkt_dev *smd_pkt_devp; + struct smd_channel *chl; + + DBG("read %d bytes\n", count); + if (count > MAX_BUF_SIZE) + return -EINVAL; + + smd_pkt_devp = file->private_data; + if (!smd_pkt_devp || !smd_pkt_devp->ch) + return -EINVAL; + + chl = smd_pkt_devp->ch; +wait_for_packet: + r = wait_event_interruptible(smd_pkt_devp->ch_read_wait_queue, + (smd_cur_packet_size(chl) > 0 && + smd_read_avail(chl) >= + smd_cur_packet_size(chl))); + + if (r < 0) { + if (r != -ERESTARTSYS) + pr_err("wait returned %d\n", r); + return r; + } + + mutex_lock(&smd_pkt_devp->rx_lock); + + bytes_read = smd_cur_packet_size(smd_pkt_devp->ch); + if (bytes_read == 0 || + bytes_read < smd_read_avail(smd_pkt_devp->ch)) { + mutex_unlock(&smd_pkt_devp->rx_lock); + DBG("Nothing to read\n"); + goto wait_for_packet; + } + + if (bytes_read > count) { + mutex_unlock(&smd_pkt_devp->rx_lock); + pr_info("packet size %d > buffer size %d", bytes_read, count); + return -EINVAL; + } + + r = smd_read(smd_pkt_devp->ch, smd_pkt_devp->rx_buf, bytes_read); + if (r != bytes_read) { + mutex_unlock(&smd_pkt_devp->rx_lock); + pr_err("smd_read failed to read %d bytes: %d\n", bytes_read, r); + return -EIO; + } + + D_DUMP_BUFFER("read: ", bytes_read, smd_pkt_devp->rx_buf); + r = copy_to_user(buf, smd_pkt_devp->rx_buf, bytes_read); + mutex_unlock(&smd_pkt_devp->rx_lock); + if (r) { + pr_err("copy_to_user failed %d\n", r); + return -EFAULT; + } + + DBG("read complete %d bytes\n", bytes_read); + check_and_wakeup_reader(smd_pkt_devp); + + return bytes_read; +} + +static int smd_pkt_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + int r; + struct smd_pkt_dev *smd_pkt_devp; + + if (count > MAX_BUF_SIZE) + return -EINVAL; + + DBG("writing %d bytes\n", count); + + smd_pkt_devp = file->private_data; + if (!smd_pkt_devp || !smd_pkt_devp->ch) + return -EINVAL; + + mutex_lock(&smd_pkt_devp->tx_lock); + if (smd_write_avail(smd_pkt_devp->ch) < count) { + mutex_unlock(&smd_pkt_devp->tx_lock); + DBG("Not enough space to write\n"); + return -ENOMEM; + } + + D_DUMP_BUFFER("write: ", count, buf); + r = copy_from_user(smd_pkt_devp->tx_buf, buf, count); + if (r) { + mutex_unlock(&smd_pkt_devp->tx_lock); + pr_err("copy_from_user failed %d\n", r); + return -EFAULT; + } + + r = smd_write(smd_pkt_devp->ch, smd_pkt_devp->tx_buf, count); + if (r != count) { + mutex_unlock(&smd_pkt_devp->tx_lock); + pr_err("smd_write failed to write %d bytes: %d.\n", count, r); + return -EIO; + } + mutex_unlock(&smd_pkt_devp->tx_lock); + + DBG("wrote %d bytes\n", count); + return count; +} + +static unsigned int smd_pkt_poll(struct file *file, poll_table *wait) +{ + struct smd_pkt_dev *smd_pkt_devp; + unsigned int mask = 0; + + smd_pkt_devp = file->private_data; + if (!smd_pkt_devp) + return POLLERR; + + DBG("poll waiting\n"); + poll_wait(file, &smd_pkt_devp->ch_read_wait_queue, wait); + if (smd_read_avail(smd_pkt_devp->ch)) + mask |= POLLIN | POLLRDNORM; + + DBG("poll return\n"); + return mask; +} + +static void smd_pkt_ch_notify(void *priv, unsigned event) +{ + struct smd_pkt_dev *smd_pkt_devp = priv; + + if (smd_pkt_devp->ch == 0) + return; + + switch (event) { + case SMD_EVENT_DATA: + DBG("data\n"); + check_and_wakeup_reader(smd_pkt_devp); + break; + + case SMD_EVENT_OPEN: + DBG("remote open\n"); + smd_pkt_devp->remote_open = 1; + wake_up_interruptible(&smd_pkt_devp->ch_opened_wait_queue); + break; + + case SMD_EVENT_CLOSE: + smd_pkt_devp->remote_open = 0; + pr_info("remote closed\n"); + break; + + default: + pr_err("unknown event %d\n", event); + break; + } +} + +static char *smd_pkt_dev_name[] = { + "smdcntl0", + "smdcntl1", + "smdcntl2", + "smdcntl3", + "smdcntl4", + "smdcntl5", + "smdcntl6", + "smdcntl7", + "smd22", +}; + +static char *smd_ch_name[] = { + "DATA5_CNTL", + "DATA6_CNTL", + "DATA7_CNTL", + "DATA8_CNTL", + "DATA9_CNTL", + "DATA12_CNTL", + "DATA13_CNTL", + "DATA14_CNTL", + "DATA22", +}; + +static int smd_pkt_open(struct inode *inode, struct file *file) +{ + int r = 0; + struct smd_pkt_dev *smd_pkt_devp; + + smd_pkt_devp = container_of(inode->i_cdev, struct smd_pkt_dev, cdev); + if (!smd_pkt_devp) + return -EINVAL; + + file->private_data = smd_pkt_devp; + + mutex_lock(&smd_pkt_devp->ch_lock); + if (smd_pkt_devp->open_count == 0) { + r = smd_open(smd_ch_name[smd_pkt_devp->i], + &smd_pkt_devp->ch, smd_pkt_devp, + smd_pkt_ch_notify); + if (r < 0) { + pr_err("smd_open failed for %s, %d\n", + smd_ch_name[smd_pkt_devp->i], r); + goto out; + } + + r = wait_event_interruptible_timeout( + smd_pkt_devp->ch_opened_wait_queue, + smd_pkt_devp->remote_open, + msecs_to_jiffies(2 * HZ)); + if (r == 0) + r = -ETIMEDOUT; + + if (r < 0) { + pr_err("wait returned %d\n", r); + smd_close(smd_pkt_devp->ch); + smd_pkt_devp->ch = 0; + } else { + smd_pkt_devp->open_count++; + r = 0; + } + } +out: + mutex_unlock(&smd_pkt_devp->ch_lock); + return r; +} + +static int smd_pkt_release(struct inode *inode, struct file *file) +{ + int r = 0; + struct smd_pkt_dev *smd_pkt_devp = file->private_data; + + if (!smd_pkt_devp) + return -EINVAL; + + mutex_lock(&smd_pkt_devp->ch_lock); + if (--smd_pkt_devp->open_count == 0) { + r = smd_close(smd_pkt_devp->ch); + smd_pkt_devp->ch = 0; + } + mutex_unlock(&smd_pkt_devp->ch_lock); + + return r; +} + +static const struct file_operations smd_pkt_fops = { + .owner = THIS_MODULE, + .open = smd_pkt_open, + .release = smd_pkt_release, + .read = smd_pkt_read, + .write = smd_pkt_write, + .poll = smd_pkt_poll, +}; + +static int __init smd_pkt_init(void) +{ + int i; + int r; + + r = alloc_chrdev_region(&smd_pkt_number, 0, + NUM_SMD_PKT_PORTS, DEVICE_NAME); + if (r) { + pr_err("alloc_chrdev_region() failed %d\n", r); + return r; + } + + smd_pkt_classp = class_create(THIS_MODULE, DEVICE_NAME); + if (IS_ERR(smd_pkt_classp)) { + r = PTR_ERR(smd_pkt_classp); + pr_err("class_create() failed %d\n", r); + goto unreg_chardev; + } + + for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) { + smd_pkt_devp[i] = kzalloc(sizeof(struct smd_pkt_dev), + GFP_KERNEL); + if (!smd_pkt_devp[i]) { + pr_err("kmalloc() failed\n"); + goto clean_cdevs; + } + + smd_pkt_devp[i]->i = i; + + init_waitqueue_head(&smd_pkt_devp[i]->ch_read_wait_queue); + smd_pkt_devp[i]->remote_open = 0; + init_waitqueue_head(&smd_pkt_devp[i]->ch_opened_wait_queue); + + mutex_init(&smd_pkt_devp[i]->ch_lock); + mutex_init(&smd_pkt_devp[i]->rx_lock); + mutex_init(&smd_pkt_devp[i]->tx_lock); + + cdev_init(&smd_pkt_devp[i]->cdev, &smd_pkt_fops); + smd_pkt_devp[i]->cdev.owner = THIS_MODULE; + + r = cdev_add(&smd_pkt_devp[i]->cdev, + (smd_pkt_number + i), 1); + if (r) { + pr_err("cdev_add() failed %d\n", r); + kfree(smd_pkt_devp[i]); + goto clean_cdevs; + } + + smd_pkt_devp[i]->devicep = + device_create(smd_pkt_classp, NULL, + (smd_pkt_number + i), NULL, + smd_pkt_dev_name[i]); + if (IS_ERR(smd_pkt_devp[i]->devicep)) { + r = PTR_ERR(smd_pkt_devp[i]->devicep); + pr_err("device_create() failed %d\n", r); + cdev_del(&smd_pkt_devp[i]->cdev); + kfree(smd_pkt_devp[i]); + goto clean_cdevs; + } + + } + + pr_info("SMD Packet Port Driver Initialized.\n"); + return 0; + +clean_cdevs: + if (i > 0) { + while (--i >= 0) { + mutex_destroy(&smd_pkt_devp[i]->ch_lock); + mutex_destroy(&smd_pkt_devp[i]->rx_lock); + mutex_destroy(&smd_pkt_devp[i]->tx_lock); + cdev_del(&smd_pkt_devp[i]->cdev); + kfree(smd_pkt_devp[i]); + device_destroy(smd_pkt_classp, + MKDEV(MAJOR(smd_pkt_number), i)); + } + } + + class_destroy(smd_pkt_classp); +unreg_chardev: + unregister_chrdev_region(MAJOR(smd_pkt_number), NUM_SMD_PKT_PORTS); + return r; +} +module_init(smd_pkt_init); + +static void __exit smd_pkt_cleanup(void) +{ + int i; + + for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) { + mutex_destroy(&smd_pkt_devp[i]->ch_lock); + mutex_destroy(&smd_pkt_devp[i]->rx_lock); + mutex_destroy(&smd_pkt_devp[i]->tx_lock); + cdev_del(&smd_pkt_devp[i]->cdev); + kfree(smd_pkt_devp[i]); + device_destroy(smd_pkt_classp, + MKDEV(MAJOR(smd_pkt_number), i)); + } + + class_destroy(smd_pkt_classp); + unregister_chrdev_region(MAJOR(smd_pkt_number), NUM_SMD_PKT_PORTS); +} +module_exit(smd_pkt_cleanup); + +MODULE_DESCRIPTION("MSM Shared Memory Packet Port"); +MODULE_LICENSE("GPL v2"); diff --git a/kernel/drivers/char/mspec.c b/kernel/drivers/char/mspec.c new file mode 100644 index 000000000..f1d7fa45c --- /dev/null +++ b/kernel/drivers/char/mspec.c @@ -0,0 +1,450 @@ +/* + * Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights + * reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +/* + * SN Platform Special Memory (mspec) Support + * + * This driver exports the SN special memory (mspec) facility to user + * processes. + * There are three types of memory made available thru this driver: + * fetchops, uncached and cached. + * + * Fetchops are atomic memory operations that are implemented in the + * memory controller on SGI SN hardware. + * + * Uncached are used for memory write combining feature of the ia64 + * cpu. + * + * Cached are used for areas of memory that are used as cached addresses + * on our partition and used as uncached addresses from other partitions. + * Due to a design constraint of the SN2 Shub, you can not have processors + * on the same FSB perform both a cached and uncached reference to the + * same cache line. These special memory cached regions prevent the + * kernel from ever dropping in a TLB entry and therefore prevent the + * processor from ever speculating a cache line from this page. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define FETCHOP_ID "SGI Fetchop," +#define CACHED_ID "Cached," +#define UNCACHED_ID "Uncached" +#define REVISION "4.0" +#define MSPEC_BASENAME "mspec" + +/* + * Page types allocated by the device. + */ +enum mspec_page_type { + MSPEC_FETCHOP = 1, + MSPEC_CACHED, + MSPEC_UNCACHED +}; + +#ifdef CONFIG_SGI_SN +static int is_sn2; +#else +#define is_sn2 0 +#endif + +/* + * One of these structures is allocated when an mspec region is mmaped. The + * structure is pointed to by the vma->vm_private_data field in the vma struct. + * This structure is used to record the addresses of the mspec pages. + * This structure is shared by all vma's that are split off from the + * original vma when split_vma()'s are done. + * + * The refcnt is incremented atomically because mm->mmap_sem does not + * protect in fork case where multiple tasks share the vma_data. + */ +struct vma_data { + atomic_t refcnt; /* Number of vmas sharing the data. */ + spinlock_t lock; /* Serialize access to this structure. */ + int count; /* Number of pages allocated. */ + enum mspec_page_type type; /* Type of pages allocated. */ + int flags; /* See VMD_xxx below. */ + unsigned long vm_start; /* Original (unsplit) base. */ + unsigned long vm_end; /* Original (unsplit) end. */ + unsigned long maddr[0]; /* Array of MSPEC addresses. */ +}; + +#define VMD_VMALLOCED 0x1 /* vmalloc'd rather than kmalloc'd */ + +/* used on shub2 to clear FOP cache in the HUB */ +static unsigned long scratch_page[MAX_NUMNODES]; +#define SH2_AMO_CACHE_ENTRIES 4 + +static inline int +mspec_zero_block(unsigned long addr, int len) +{ + int status; + + if (is_sn2) { + if (is_shub2()) { + int nid; + void *p; + int i; + + nid = nasid_to_cnodeid(get_node_number(__pa(addr))); + p = (void *)TO_AMO(scratch_page[nid]); + + for (i=0; i < SH2_AMO_CACHE_ENTRIES; i++) { + FETCHOP_LOAD_OP(p, FETCHOP_LOAD); + p += FETCHOP_VAR_SIZE; + } + } + + status = bte_copy(0, addr & ~__IA64_UNCACHED_OFFSET, len, + BTE_WACQUIRE | BTE_ZERO_FILL, NULL); + } else { + memset((char *) addr, 0, len); + status = 0; + } + return status; +} + +/* + * mspec_open + * + * Called when a device mapping is created by a means other than mmap + * (via fork, munmap, etc.). Increments the reference count on the + * underlying mspec data so it is not freed prematurely. + */ +static void +mspec_open(struct vm_area_struct *vma) +{ + struct vma_data *vdata; + + vdata = vma->vm_private_data; + atomic_inc(&vdata->refcnt); +} + +/* + * mspec_close + * + * Called when unmapping a device mapping. Frees all mspec pages + * belonging to all the vma's sharing this vma_data structure. + */ +static void +mspec_close(struct vm_area_struct *vma) +{ + struct vma_data *vdata; + int index, last_index; + unsigned long my_page; + + vdata = vma->vm_private_data; + + if (!atomic_dec_and_test(&vdata->refcnt)) + return; + + last_index = (vdata->vm_end - vdata->vm_start) >> PAGE_SHIFT; + for (index = 0; index < last_index; index++) { + if (vdata->maddr[index] == 0) + continue; + /* + * Clear the page before sticking it back + * into the pool. + */ + my_page = vdata->maddr[index]; + vdata->maddr[index] = 0; + if (!mspec_zero_block(my_page, PAGE_SIZE)) + uncached_free_page(my_page, 1); + else + printk(KERN_WARNING "mspec_close(): " + "failed to zero page %ld\n", my_page); + } + + if (vdata->flags & VMD_VMALLOCED) + vfree(vdata); + else + kfree(vdata); +} + +/* + * mspec_fault + * + * Creates a mspec page and maps it to user space. + */ +static int +mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + unsigned long paddr, maddr; + unsigned long pfn; + pgoff_t index = vmf->pgoff; + struct vma_data *vdata = vma->vm_private_data; + + maddr = (volatile unsigned long) vdata->maddr[index]; + if (maddr == 0) { + maddr = uncached_alloc_page(numa_node_id(), 1); + if (maddr == 0) + return VM_FAULT_OOM; + + spin_lock(&vdata->lock); + if (vdata->maddr[index] == 0) { + vdata->count++; + vdata->maddr[index] = maddr; + } else { + uncached_free_page(maddr, 1); + maddr = vdata->maddr[index]; + } + spin_unlock(&vdata->lock); + } + + if (vdata->type == MSPEC_FETCHOP) + paddr = TO_AMO(maddr); + else + paddr = maddr & ~__IA64_UNCACHED_OFFSET; + + pfn = paddr >> PAGE_SHIFT; + + /* + * vm_insert_pfn can fail with -EBUSY, but in that case it will + * be because another thread has installed the pte first, so it + * is no problem. + */ + vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); + + return VM_FAULT_NOPAGE; +} + +static const struct vm_operations_struct mspec_vm_ops = { + .open = mspec_open, + .close = mspec_close, + .fault = mspec_fault, +}; + +/* + * mspec_mmap + * + * Called when mmapping the device. Initializes the vma with a fault handler + * and private data structure necessary to allocate, track, and free the + * underlying pages. + */ +static int +mspec_mmap(struct file *file, struct vm_area_struct *vma, + enum mspec_page_type type) +{ + struct vma_data *vdata; + int pages, vdata_size, flags = 0; + + if (vma->vm_pgoff != 0) + return -EINVAL; + + if ((vma->vm_flags & VM_SHARED) == 0) + return -EINVAL; + + if ((vma->vm_flags & VM_WRITE) == 0) + return -EPERM; + + pages = vma_pages(vma); + vdata_size = sizeof(struct vma_data) + pages * sizeof(long); + if (vdata_size <= PAGE_SIZE) + vdata = kzalloc(vdata_size, GFP_KERNEL); + else { + vdata = vzalloc(vdata_size); + flags = VMD_VMALLOCED; + } + if (!vdata) + return -ENOMEM; + + vdata->vm_start = vma->vm_start; + vdata->vm_end = vma->vm_end; + vdata->flags = flags; + vdata->type = type; + spin_lock_init(&vdata->lock); + atomic_set(&vdata->refcnt, 1); + vma->vm_private_data = vdata; + + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; + if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED) + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_ops = &mspec_vm_ops; + + return 0; +} + +static int +fetchop_mmap(struct file *file, struct vm_area_struct *vma) +{ + return mspec_mmap(file, vma, MSPEC_FETCHOP); +} + +static int +cached_mmap(struct file *file, struct vm_area_struct *vma) +{ + return mspec_mmap(file, vma, MSPEC_CACHED); +} + +static int +uncached_mmap(struct file *file, struct vm_area_struct *vma) +{ + return mspec_mmap(file, vma, MSPEC_UNCACHED); +} + +static const struct file_operations fetchop_fops = { + .owner = THIS_MODULE, + .mmap = fetchop_mmap, + .llseek = noop_llseek, +}; + +static struct miscdevice fetchop_miscdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "sgi_fetchop", + .fops = &fetchop_fops +}; + +static const struct file_operations cached_fops = { + .owner = THIS_MODULE, + .mmap = cached_mmap, + .llseek = noop_llseek, +}; + +static struct miscdevice cached_miscdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "mspec_cached", + .fops = &cached_fops +}; + +static const struct file_operations uncached_fops = { + .owner = THIS_MODULE, + .mmap = uncached_mmap, + .llseek = noop_llseek, +}; + +static struct miscdevice uncached_miscdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "mspec_uncached", + .fops = &uncached_fops +}; + +/* + * mspec_init + * + * Called at boot time to initialize the mspec facility. + */ +static int __init +mspec_init(void) +{ + int ret; + int nid; + + /* + * The fetchop device only works on SN2 hardware, uncached and cached + * memory drivers should both be valid on all ia64 hardware + */ +#ifdef CONFIG_SGI_SN + if (ia64_platform_is("sn2")) { + is_sn2 = 1; + if (is_shub2()) { + ret = -ENOMEM; + for_each_node_state(nid, N_ONLINE) { + int actual_nid; + int nasid; + unsigned long phys; + + scratch_page[nid] = uncached_alloc_page(nid, 1); + if (scratch_page[nid] == 0) + goto free_scratch_pages; + phys = __pa(scratch_page[nid]); + nasid = get_node_number(phys); + actual_nid = nasid_to_cnodeid(nasid); + if (actual_nid != nid) + goto free_scratch_pages; + } + } + + ret = misc_register(&fetchop_miscdev); + if (ret) { + printk(KERN_ERR + "%s: failed to register device %i\n", + FETCHOP_ID, ret); + goto free_scratch_pages; + } + } +#endif + ret = misc_register(&cached_miscdev); + if (ret) { + printk(KERN_ERR "%s: failed to register device %i\n", + CACHED_ID, ret); + if (is_sn2) + misc_deregister(&fetchop_miscdev); + goto free_scratch_pages; + } + ret = misc_register(&uncached_miscdev); + if (ret) { + printk(KERN_ERR "%s: failed to register device %i\n", + UNCACHED_ID, ret); + misc_deregister(&cached_miscdev); + if (is_sn2) + misc_deregister(&fetchop_miscdev); + goto free_scratch_pages; + } + + printk(KERN_INFO "%s %s initialized devices: %s %s %s\n", + MSPEC_BASENAME, REVISION, is_sn2 ? FETCHOP_ID : "", + CACHED_ID, UNCACHED_ID); + + return 0; + + free_scratch_pages: + for_each_node(nid) { + if (scratch_page[nid] != 0) + uncached_free_page(scratch_page[nid], 1); + } + return ret; +} + +static void __exit +mspec_exit(void) +{ + int nid; + + misc_deregister(&uncached_miscdev); + misc_deregister(&cached_miscdev); + if (is_sn2) { + misc_deregister(&fetchop_miscdev); + + for_each_node(nid) { + if (scratch_page[nid] != 0) + uncached_free_page(scratch_page[nid], 1); + } + } +} + +module_init(mspec_init); +module_exit(mspec_exit); + +MODULE_AUTHOR("Silicon Graphics, Inc. "); +MODULE_DESCRIPTION("Driver for SGI SN special memory operations"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/mwave/3780i.c b/kernel/drivers/char/mwave/3780i.c new file mode 100644 index 000000000..28740046b --- /dev/null +++ b/kernel/drivers/char/mwave/3780i.c @@ -0,0 +1,738 @@ +/* +* +* 3780i.c -- helper routines for the 3780i DSP +* +* +* Written By: Mike Sullivan IBM Corporation +* +* Copyright (C) 1999 IBM Corporation +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* NO WARRANTY +* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR +* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT +* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, +* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is +* solely responsible for determining the appropriateness of using and +* distributing the Program and assumes all risks associated with its +* exercise of rights under this Agreement, including but not limited to +* the risks and costs of program errors, damage to or loss of data, +* programs or equipment, and unavailability or interruption of operations. +* +* DISCLAIMER OF LIABILITY +* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +* +* +* 10/23/2000 - Alpha Release +* First release to the public +*/ + +#include +#include +#include +#include +#include +#include /* cond_resched() */ + +#include +#include +#include +#include "smapi.h" +#include "mwavedd.h" +#include "3780i.h" + +static DEFINE_SPINLOCK(dsp_lock); + +static void PaceMsaAccess(unsigned short usDspBaseIO) +{ + cond_resched(); + udelay(100); + cond_resched(); +} + +unsigned short dsp3780I_ReadMsaCfg(unsigned short usDspBaseIO, + unsigned long ulMsaAddr) +{ + unsigned long flags; + unsigned short val; + + PRINTK_3(TRACE_3780I, + "3780i::dsp3780I_ReadMsaCfg entry usDspBaseIO %x ulMsaAddr %lx\n", + usDspBaseIO, ulMsaAddr); + + spin_lock_irqsave(&dsp_lock, flags); + OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulMsaAddr); + OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulMsaAddr >> 16)); + val = InWordDsp(DSP_MsaDataDSISHigh); + spin_unlock_irqrestore(&dsp_lock, flags); + + PRINTK_2(TRACE_3780I, "3780i::dsp3780I_ReadMsaCfg exit val %x\n", val); + + return val; +} + +void dsp3780I_WriteMsaCfg(unsigned short usDspBaseIO, + unsigned long ulMsaAddr, unsigned short usValue) +{ + unsigned long flags; + + PRINTK_4(TRACE_3780I, + "3780i::dsp3780i_WriteMsaCfg entry usDspBaseIO %x ulMsaAddr %lx usValue %x\n", + usDspBaseIO, ulMsaAddr, usValue); + + spin_lock_irqsave(&dsp_lock, flags); + OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulMsaAddr); + OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulMsaAddr >> 16)); + OutWordDsp(DSP_MsaDataDSISHigh, usValue); + spin_unlock_irqrestore(&dsp_lock, flags); +} + +static void dsp3780I_WriteGenCfg(unsigned short usDspBaseIO, unsigned uIndex, + unsigned char ucValue) +{ + DSP_ISA_SLAVE_CONTROL rSlaveControl; + DSP_ISA_SLAVE_CONTROL rSlaveControl_Save; + + + PRINTK_4(TRACE_3780I, + "3780i::dsp3780i_WriteGenCfg entry usDspBaseIO %x uIndex %x ucValue %x\n", + usDspBaseIO, uIndex, ucValue); + + MKBYTE(rSlaveControl) = InByteDsp(DSP_IsaSlaveControl); + + PRINTK_2(TRACE_3780I, + "3780i::dsp3780i_WriteGenCfg rSlaveControl %x\n", + MKBYTE(rSlaveControl)); + + rSlaveControl_Save = rSlaveControl; + rSlaveControl.ConfigMode = TRUE; + + PRINTK_2(TRACE_3780I, + "3780i::dsp3780i_WriteGenCfg entry rSlaveControl+ConfigMode %x\n", + MKBYTE(rSlaveControl)); + + OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl)); + OutByteDsp(DSP_ConfigAddress, (unsigned char) uIndex); + OutByteDsp(DSP_ConfigData, ucValue); + OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl_Save)); + + PRINTK_1(TRACE_3780I, "3780i::dsp3780i_WriteGenCfg exit\n"); + + +} + +#if 0 +unsigned char dsp3780I_ReadGenCfg(unsigned short usDspBaseIO, + unsigned uIndex) +{ + DSP_ISA_SLAVE_CONTROL rSlaveControl; + DSP_ISA_SLAVE_CONTROL rSlaveControl_Save; + unsigned char ucValue; + + + PRINTK_3(TRACE_3780I, + "3780i::dsp3780i_ReadGenCfg entry usDspBaseIO %x uIndex %x\n", + usDspBaseIO, uIndex); + + MKBYTE(rSlaveControl) = InByteDsp(DSP_IsaSlaveControl); + rSlaveControl_Save = rSlaveControl; + rSlaveControl.ConfigMode = TRUE; + OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl)); + OutByteDsp(DSP_ConfigAddress, (unsigned char) uIndex); + ucValue = InByteDsp(DSP_ConfigData); + OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl_Save)); + + PRINTK_2(TRACE_3780I, + "3780i::dsp3780i_ReadGenCfg exit ucValue %x\n", ucValue); + + + return ucValue; +} +#endif /* 0 */ + +int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings, + unsigned short *pIrqMap, + unsigned short *pDmaMap) +{ + unsigned long flags; + unsigned short usDspBaseIO = pSettings->usDspBaseIO; + int i; + DSP_UART_CFG_1 rUartCfg1; + DSP_UART_CFG_2 rUartCfg2; + DSP_HBRIDGE_CFG_1 rHBridgeCfg1; + DSP_HBRIDGE_CFG_2 rHBridgeCfg2; + DSP_BUSMASTER_CFG_1 rBusmasterCfg1; + DSP_BUSMASTER_CFG_2 rBusmasterCfg2; + DSP_ISA_PROT_CFG rIsaProtCfg; + DSP_POWER_MGMT_CFG rPowerMgmtCfg; + DSP_HBUS_TIMER_CFG rHBusTimerCfg; + DSP_LBUS_TIMEOUT_DISABLE rLBusTimeoutDisable; + DSP_CHIP_RESET rChipReset; + DSP_CLOCK_CONTROL_1 rClockControl1; + DSP_CLOCK_CONTROL_2 rClockControl2; + DSP_ISA_SLAVE_CONTROL rSlaveControl; + DSP_HBRIDGE_CONTROL rHBridgeControl; + unsigned short ChipID = 0; + unsigned short tval; + + + PRINTK_2(TRACE_3780I, + "3780i::dsp3780I_EnableDSP entry pSettings->bDSPEnabled %x\n", + pSettings->bDSPEnabled); + + + if (!pSettings->bDSPEnabled) { + PRINTK_ERROR( KERN_ERR "3780i::dsp3780I_EnableDSP: Error: DSP not enabled. Aborting.\n" ); + return -EIO; + } + + + PRINTK_2(TRACE_3780I, + "3780i::dsp3780i_EnableDSP entry pSettings->bModemEnabled %x\n", + pSettings->bModemEnabled); + + if (pSettings->bModemEnabled) { + rUartCfg1.Reserved = rUartCfg2.Reserved = 0; + rUartCfg1.IrqActiveLow = pSettings->bUartIrqActiveLow; + rUartCfg1.IrqPulse = pSettings->bUartIrqPulse; + rUartCfg1.Irq = + (unsigned char) pIrqMap[pSettings->usUartIrq]; + switch (pSettings->usUartBaseIO) { + case 0x03F8: + rUartCfg1.BaseIO = 0; + break; + case 0x02F8: + rUartCfg1.BaseIO = 1; + break; + case 0x03E8: + rUartCfg1.BaseIO = 2; + break; + case 0x02E8: + rUartCfg1.BaseIO = 3; + break; + } + rUartCfg2.Enable = TRUE; + } + + rHBridgeCfg1.Reserved = rHBridgeCfg2.Reserved = 0; + rHBridgeCfg1.IrqActiveLow = pSettings->bDspIrqActiveLow; + rHBridgeCfg1.IrqPulse = pSettings->bDspIrqPulse; + rHBridgeCfg1.Irq = (unsigned char) pIrqMap[pSettings->usDspIrq]; + rHBridgeCfg1.AccessMode = 1; + rHBridgeCfg2.Enable = TRUE; + + + rBusmasterCfg2.Reserved = 0; + rBusmasterCfg1.Dma = (unsigned char) pDmaMap[pSettings->usDspDma]; + rBusmasterCfg1.NumTransfers = + (unsigned char) pSettings->usNumTransfers; + rBusmasterCfg1.ReRequest = (unsigned char) pSettings->usReRequest; + rBusmasterCfg1.MEMCS16 = pSettings->bEnableMEMCS16; + rBusmasterCfg2.IsaMemCmdWidth = + (unsigned char) pSettings->usIsaMemCmdWidth; + + + rIsaProtCfg.Reserved = 0; + rIsaProtCfg.GateIOCHRDY = pSettings->bGateIOCHRDY; + + rPowerMgmtCfg.Reserved = 0; + rPowerMgmtCfg.Enable = pSettings->bEnablePwrMgmt; + + rHBusTimerCfg.LoadValue = + (unsigned char) pSettings->usHBusTimerLoadValue; + + rLBusTimeoutDisable.Reserved = 0; + rLBusTimeoutDisable.DisableTimeout = + pSettings->bDisableLBusTimeout; + + MKWORD(rChipReset) = ~pSettings->usChipletEnable; + + rClockControl1.Reserved1 = rClockControl1.Reserved2 = 0; + rClockControl1.N_Divisor = pSettings->usN_Divisor; + rClockControl1.M_Multiplier = pSettings->usM_Multiplier; + + rClockControl2.Reserved = 0; + rClockControl2.PllBypass = pSettings->bPllBypass; + + /* Issue a soft reset to the chip */ + /* Note: Since we may be coming in with 3780i clocks suspended, we must keep + * soft-reset active for 10ms. + */ + rSlaveControl.ClockControl = 0; + rSlaveControl.SoftReset = TRUE; + rSlaveControl.ConfigMode = FALSE; + rSlaveControl.Reserved = 0; + + PRINTK_4(TRACE_3780I, + "3780i::dsp3780i_EnableDSP usDspBaseIO %x index %x taddr %x\n", + usDspBaseIO, DSP_IsaSlaveControl, + usDspBaseIO + DSP_IsaSlaveControl); + + PRINTK_2(TRACE_3780I, + "3780i::dsp3780i_EnableDSP rSlaveContrl %x\n", + MKWORD(rSlaveControl)); + + spin_lock_irqsave(&dsp_lock, flags); + OutWordDsp(DSP_IsaSlaveControl, MKWORD(rSlaveControl)); + MKWORD(tval) = InWordDsp(DSP_IsaSlaveControl); + + PRINTK_2(TRACE_3780I, + "3780i::dsp3780i_EnableDSP rSlaveControl 2 %x\n", tval); + + + for (i = 0; i < 11; i++) + udelay(2000); + + rSlaveControl.SoftReset = FALSE; + OutWordDsp(DSP_IsaSlaveControl, MKWORD(rSlaveControl)); + + MKWORD(tval) = InWordDsp(DSP_IsaSlaveControl); + + PRINTK_2(TRACE_3780I, + "3780i::dsp3780i_EnableDSP rSlaveControl 3 %x\n", tval); + + + /* Program our general configuration registers */ + WriteGenCfg(DSP_HBridgeCfg1Index, MKBYTE(rHBridgeCfg1)); + WriteGenCfg(DSP_HBridgeCfg2Index, MKBYTE(rHBridgeCfg2)); + WriteGenCfg(DSP_BusMasterCfg1Index, MKBYTE(rBusmasterCfg1)); + WriteGenCfg(DSP_BusMasterCfg2Index, MKBYTE(rBusmasterCfg2)); + WriteGenCfg(DSP_IsaProtCfgIndex, MKBYTE(rIsaProtCfg)); + WriteGenCfg(DSP_PowerMgCfgIndex, MKBYTE(rPowerMgmtCfg)); + WriteGenCfg(DSP_HBusTimerCfgIndex, MKBYTE(rHBusTimerCfg)); + + if (pSettings->bModemEnabled) { + WriteGenCfg(DSP_UartCfg1Index, MKBYTE(rUartCfg1)); + WriteGenCfg(DSP_UartCfg2Index, MKBYTE(rUartCfg2)); + } + + + rHBridgeControl.EnableDspInt = FALSE; + rHBridgeControl.MemAutoInc = TRUE; + rHBridgeControl.IoAutoInc = FALSE; + rHBridgeControl.DiagnosticMode = FALSE; + + PRINTK_3(TRACE_3780I, + "3780i::dsp3780i_EnableDSP DSP_HBridgeControl %x rHBridgeControl %x\n", + DSP_HBridgeControl, MKWORD(rHBridgeControl)); + + OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl)); + spin_unlock_irqrestore(&dsp_lock, flags); + WriteMsaCfg(DSP_LBusTimeoutDisable, MKWORD(rLBusTimeoutDisable)); + WriteMsaCfg(DSP_ClockControl_1, MKWORD(rClockControl1)); + WriteMsaCfg(DSP_ClockControl_2, MKWORD(rClockControl2)); + WriteMsaCfg(DSP_ChipReset, MKWORD(rChipReset)); + + ChipID = ReadMsaCfg(DSP_ChipID); + + PRINTK_2(TRACE_3780I, + "3780i::dsp3780I_EnableDSP exiting bRC=TRUE, ChipID %x\n", + ChipID); + + return 0; +} + +int dsp3780I_DisableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings) +{ + unsigned long flags; + unsigned short usDspBaseIO = pSettings->usDspBaseIO; + DSP_ISA_SLAVE_CONTROL rSlaveControl; + + + PRINTK_1(TRACE_3780I, "3780i::dsp3780i_DisableDSP entry\n"); + + rSlaveControl.ClockControl = 0; + rSlaveControl.SoftReset = TRUE; + rSlaveControl.ConfigMode = FALSE; + rSlaveControl.Reserved = 0; + spin_lock_irqsave(&dsp_lock, flags); + OutWordDsp(DSP_IsaSlaveControl, MKWORD(rSlaveControl)); + + udelay(5); + + rSlaveControl.ClockControl = 1; + OutWordDsp(DSP_IsaSlaveControl, MKWORD(rSlaveControl)); + spin_unlock_irqrestore(&dsp_lock, flags); + + udelay(5); + + + PRINTK_1(TRACE_3780I, "3780i::dsp3780i_DisableDSP exit\n"); + + return 0; +} + +int dsp3780I_Reset(DSP_3780I_CONFIG_SETTINGS * pSettings) +{ + unsigned long flags; + unsigned short usDspBaseIO = pSettings->usDspBaseIO; + DSP_BOOT_DOMAIN rBootDomain; + DSP_HBRIDGE_CONTROL rHBridgeControl; + + + PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Reset entry\n"); + + spin_lock_irqsave(&dsp_lock, flags); + /* Mask DSP to PC interrupt */ + MKWORD(rHBridgeControl) = InWordDsp(DSP_HBridgeControl); + + PRINTK_2(TRACE_3780I, "3780i::dsp3780i_Reset rHBridgeControl %x\n", + MKWORD(rHBridgeControl)); + + rHBridgeControl.EnableDspInt = FALSE; + OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl)); + spin_unlock_irqrestore(&dsp_lock, flags); + + /* Reset the core via the boot domain register */ + rBootDomain.ResetCore = TRUE; + rBootDomain.Halt = TRUE; + rBootDomain.NMI = TRUE; + rBootDomain.Reserved = 0; + + PRINTK_2(TRACE_3780I, "3780i::dsp3780i_Reset rBootDomain %x\n", + MKWORD(rBootDomain)); + + WriteMsaCfg(DSP_MspBootDomain, MKWORD(rBootDomain)); + + /* Reset all the chiplets and then reactivate them */ + WriteMsaCfg(DSP_ChipReset, 0xFFFF); + udelay(5); + WriteMsaCfg(DSP_ChipReset, + (unsigned short) (~pSettings->usChipletEnable)); + + + PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Reset exit bRC=0\n"); + + return 0; +} + + +int dsp3780I_Run(DSP_3780I_CONFIG_SETTINGS * pSettings) +{ + unsigned long flags; + unsigned short usDspBaseIO = pSettings->usDspBaseIO; + DSP_BOOT_DOMAIN rBootDomain; + DSP_HBRIDGE_CONTROL rHBridgeControl; + + + PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Run entry\n"); + + + /* Transition the core to a running state */ + rBootDomain.ResetCore = TRUE; + rBootDomain.Halt = FALSE; + rBootDomain.NMI = TRUE; + rBootDomain.Reserved = 0; + WriteMsaCfg(DSP_MspBootDomain, MKWORD(rBootDomain)); + + udelay(5); + + rBootDomain.ResetCore = FALSE; + WriteMsaCfg(DSP_MspBootDomain, MKWORD(rBootDomain)); + udelay(5); + + rBootDomain.NMI = FALSE; + WriteMsaCfg(DSP_MspBootDomain, MKWORD(rBootDomain)); + udelay(5); + + /* Enable DSP to PC interrupt */ + spin_lock_irqsave(&dsp_lock, flags); + MKWORD(rHBridgeControl) = InWordDsp(DSP_HBridgeControl); + rHBridgeControl.EnableDspInt = TRUE; + + PRINTK_2(TRACE_3780I, "3780i::dsp3780i_Run rHBridgeControl %x\n", + MKWORD(rHBridgeControl)); + + OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl)); + spin_unlock_irqrestore(&dsp_lock, flags); + + + PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Run exit bRC=TRUE\n"); + + return 0; +} + + +int dsp3780I_ReadDStore(unsigned short usDspBaseIO, void __user *pvBuffer, + unsigned uCount, unsigned long ulDSPAddr) +{ + unsigned long flags; + unsigned short __user *pusBuffer = pvBuffer; + unsigned short val; + + + PRINTK_5(TRACE_3780I, + "3780i::dsp3780I_ReadDStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n", + usDspBaseIO, pusBuffer, uCount, ulDSPAddr); + + + /* Set the initial MSA address. No adjustments need to be made to data store addresses */ + spin_lock_irqsave(&dsp_lock, flags); + OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr); + OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulDSPAddr >> 16)); + spin_unlock_irqrestore(&dsp_lock, flags); + + /* Transfer the memory block */ + while (uCount-- != 0) { + spin_lock_irqsave(&dsp_lock, flags); + val = InWordDsp(DSP_MsaDataDSISHigh); + spin_unlock_irqrestore(&dsp_lock, flags); + if(put_user(val, pusBuffer++)) + return -EFAULT; + + PRINTK_3(TRACE_3780I, + "3780I::dsp3780I_ReadDStore uCount %x val %x\n", + uCount, val); + + PaceMsaAccess(usDspBaseIO); + } + + + PRINTK_1(TRACE_3780I, + "3780I::dsp3780I_ReadDStore exit bRC=TRUE\n"); + + return 0; +} + +int dsp3780I_ReadAndClearDStore(unsigned short usDspBaseIO, + void __user *pvBuffer, unsigned uCount, + unsigned long ulDSPAddr) +{ + unsigned long flags; + unsigned short __user *pusBuffer = pvBuffer; + unsigned short val; + + + PRINTK_5(TRACE_3780I, + "3780i::dsp3780I_ReadAndDStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n", + usDspBaseIO, pusBuffer, uCount, ulDSPAddr); + + + /* Set the initial MSA address. No adjustments need to be made to data store addresses */ + spin_lock_irqsave(&dsp_lock, flags); + OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr); + OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulDSPAddr >> 16)); + spin_unlock_irqrestore(&dsp_lock, flags); + + /* Transfer the memory block */ + while (uCount-- != 0) { + spin_lock_irqsave(&dsp_lock, flags); + val = InWordDsp(DSP_ReadAndClear); + spin_unlock_irqrestore(&dsp_lock, flags); + if(put_user(val, pusBuffer++)) + return -EFAULT; + + PRINTK_3(TRACE_3780I, + "3780I::dsp3780I_ReadAndCleanDStore uCount %x val %x\n", + uCount, val); + + PaceMsaAccess(usDspBaseIO); + } + + + PRINTK_1(TRACE_3780I, + "3780I::dsp3780I_ReadAndClearDStore exit bRC=TRUE\n"); + + return 0; +} + + +int dsp3780I_WriteDStore(unsigned short usDspBaseIO, void __user *pvBuffer, + unsigned uCount, unsigned long ulDSPAddr) +{ + unsigned long flags; + unsigned short __user *pusBuffer = pvBuffer; + + + PRINTK_5(TRACE_3780I, + "3780i::dsp3780D_WriteDStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n", + usDspBaseIO, pusBuffer, uCount, ulDSPAddr); + + + /* Set the initial MSA address. No adjustments need to be made to data store addresses */ + spin_lock_irqsave(&dsp_lock, flags); + OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr); + OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulDSPAddr >> 16)); + spin_unlock_irqrestore(&dsp_lock, flags); + + /* Transfer the memory block */ + while (uCount-- != 0) { + unsigned short val; + if(get_user(val, pusBuffer++)) + return -EFAULT; + spin_lock_irqsave(&dsp_lock, flags); + OutWordDsp(DSP_MsaDataDSISHigh, val); + spin_unlock_irqrestore(&dsp_lock, flags); + + PRINTK_3(TRACE_3780I, + "3780I::dsp3780I_WriteDStore uCount %x val %x\n", + uCount, val); + + PaceMsaAccess(usDspBaseIO); + } + + + PRINTK_1(TRACE_3780I, + "3780I::dsp3780D_WriteDStore exit bRC=TRUE\n"); + + return 0; +} + + +int dsp3780I_ReadIStore(unsigned short usDspBaseIO, void __user *pvBuffer, + unsigned uCount, unsigned long ulDSPAddr) +{ + unsigned long flags; + unsigned short __user *pusBuffer = pvBuffer; + + PRINTK_5(TRACE_3780I, + "3780i::dsp3780I_ReadIStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n", + usDspBaseIO, pusBuffer, uCount, ulDSPAddr); + + /* + * Set the initial MSA address. To convert from an instruction store + * address to an MSA address + * shift the address two bits to the left and set bit 22 + */ + ulDSPAddr = (ulDSPAddr << 2) | (1 << 22); + spin_lock_irqsave(&dsp_lock, flags); + OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr); + OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulDSPAddr >> 16)); + spin_unlock_irqrestore(&dsp_lock, flags); + + /* Transfer the memory block */ + while (uCount-- != 0) { + unsigned short val_lo, val_hi; + spin_lock_irqsave(&dsp_lock, flags); + val_lo = InWordDsp(DSP_MsaDataISLow); + val_hi = InWordDsp(DSP_MsaDataDSISHigh); + spin_unlock_irqrestore(&dsp_lock, flags); + if(put_user(val_lo, pusBuffer++)) + return -EFAULT; + if(put_user(val_hi, pusBuffer++)) + return -EFAULT; + + PRINTK_4(TRACE_3780I, + "3780I::dsp3780I_ReadIStore uCount %x val_lo %x val_hi %x\n", + uCount, val_lo, val_hi); + + PaceMsaAccess(usDspBaseIO); + + } + + PRINTK_1(TRACE_3780I, + "3780I::dsp3780I_ReadIStore exit bRC=TRUE\n"); + + return 0; +} + + +int dsp3780I_WriteIStore(unsigned short usDspBaseIO, void __user *pvBuffer, + unsigned uCount, unsigned long ulDSPAddr) +{ + unsigned long flags; + unsigned short __user *pusBuffer = pvBuffer; + + PRINTK_5(TRACE_3780I, + "3780i::dsp3780I_WriteIStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n", + usDspBaseIO, pusBuffer, uCount, ulDSPAddr); + + + /* + * Set the initial MSA address. To convert from an instruction store + * address to an MSA address + * shift the address two bits to the left and set bit 22 + */ + ulDSPAddr = (ulDSPAddr << 2) | (1 << 22); + spin_lock_irqsave(&dsp_lock, flags); + OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr); + OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulDSPAddr >> 16)); + spin_unlock_irqrestore(&dsp_lock, flags); + + /* Transfer the memory block */ + while (uCount-- != 0) { + unsigned short val_lo, val_hi; + if(get_user(val_lo, pusBuffer++)) + return -EFAULT; + if(get_user(val_hi, pusBuffer++)) + return -EFAULT; + spin_lock_irqsave(&dsp_lock, flags); + OutWordDsp(DSP_MsaDataISLow, val_lo); + OutWordDsp(DSP_MsaDataDSISHigh, val_hi); + spin_unlock_irqrestore(&dsp_lock, flags); + + PRINTK_4(TRACE_3780I, + "3780I::dsp3780I_WriteIStore uCount %x val_lo %x val_hi %x\n", + uCount, val_lo, val_hi); + + PaceMsaAccess(usDspBaseIO); + + } + + PRINTK_1(TRACE_3780I, + "3780I::dsp3780I_WriteIStore exit bRC=TRUE\n"); + + return 0; +} + + +int dsp3780I_GetIPCSource(unsigned short usDspBaseIO, + unsigned short *pusIPCSource) +{ + unsigned long flags; + DSP_HBRIDGE_CONTROL rHBridgeControl; + unsigned short temp; + + + PRINTK_3(TRACE_3780I, + "3780i::dsp3780I_GetIPCSource entry usDspBaseIO %x pusIPCSource %p\n", + usDspBaseIO, pusIPCSource); + + /* + * Disable DSP to PC interrupts, read the interrupt register, + * clear the pending IPC bits, and reenable DSP to PC interrupts + */ + spin_lock_irqsave(&dsp_lock, flags); + MKWORD(rHBridgeControl) = InWordDsp(DSP_HBridgeControl); + rHBridgeControl.EnableDspInt = FALSE; + OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl)); + + *pusIPCSource = InWordDsp(DSP_Interrupt); + temp = (unsigned short) ~(*pusIPCSource); + + PRINTK_3(TRACE_3780I, + "3780i::dsp3780I_GetIPCSource, usIPCSource %x ~ %x\n", + *pusIPCSource, temp); + + OutWordDsp(DSP_Interrupt, (unsigned short) ~(*pusIPCSource)); + + rHBridgeControl.EnableDspInt = TRUE; + OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl)); + spin_unlock_irqrestore(&dsp_lock, flags); + + + PRINTK_2(TRACE_3780I, + "3780i::dsp3780I_GetIPCSource exit usIPCSource %x\n", + *pusIPCSource); + + return 0; +} diff --git a/kernel/drivers/char/mwave/3780i.h b/kernel/drivers/char/mwave/3780i.h new file mode 100644 index 000000000..fba6ab116 --- /dev/null +++ b/kernel/drivers/char/mwave/3780i.h @@ -0,0 +1,358 @@ +/* +* +* 3780i.h -- declarations for 3780i.c +* +* +* Written By: Mike Sullivan IBM Corporation +* +* Copyright (C) 1999 IBM Corporation +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* NO WARRANTY +* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR +* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT +* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, +* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is +* solely responsible for determining the appropriateness of using and +* distributing the Program and assumes all risks associated with its +* exercise of rights under this Agreement, including but not limited to +* the risks and costs of program errors, damage to or loss of data, +* programs or equipment, and unavailability or interruption of operations. +* +* DISCLAIMER OF LIABILITY +* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +* +* +* 10/23/2000 - Alpha Release +* First release to the public +*/ + +#ifndef _LINUX_3780I_H +#define _LINUX_3780I_H + +#include + +/* DSP I/O port offsets and definitions */ +#define DSP_IsaSlaveControl 0x0000 /* ISA slave control register */ +#define DSP_IsaSlaveStatus 0x0001 /* ISA slave status register */ +#define DSP_ConfigAddress 0x0002 /* General config address register */ +#define DSP_ConfigData 0x0003 /* General config data register */ +#define DSP_HBridgeControl 0x0002 /* HBridge control register */ +#define DSP_MsaAddrLow 0x0004 /* MSP System Address, low word */ +#define DSP_MsaAddrHigh 0x0006 /* MSP System Address, high word */ +#define DSP_MsaDataDSISHigh 0x0008 /* MSA data register: d-store word or high byte of i-store */ +#define DSP_MsaDataISLow 0x000A /* MSA data register: low word of i-store */ +#define DSP_ReadAndClear 0x000C /* MSA read and clear data register */ +#define DSP_Interrupt 0x000E /* Interrupt register (IPC source) */ + +typedef struct { + unsigned char ClockControl:1; /* RW: Clock control: 0=normal, 1=stop 3780i clocks */ + unsigned char SoftReset:1; /* RW: Soft reset 0=normal, 1=soft reset active */ + unsigned char ConfigMode:1; /* RW: Configuration mode, 0=normal, 1=config mode */ + unsigned char Reserved:5; /* 0: Reserved */ +} DSP_ISA_SLAVE_CONTROL; + + +typedef struct { + unsigned short EnableDspInt:1; /* RW: Enable DSP to X86 ISA interrupt 0=mask it, 1=enable it */ + unsigned short MemAutoInc:1; /* RW: Memory address auto increment, 0=disable, 1=enable */ + unsigned short IoAutoInc:1; /* RW: I/O address auto increment, 0=disable, 1=enable */ + unsigned short DiagnosticMode:1; /* RW: Disgnostic mode 0=nromal, 1=diagnostic mode */ + unsigned short IsaPacingTimer:12; /* R: ISA access pacing timer: count of core cycles stolen */ +} DSP_HBRIDGE_CONTROL; + + +/* DSP register indexes used with the configuration register address (index) register */ +#define DSP_UartCfg1Index 0x0003 /* UART config register 1 */ +#define DSP_UartCfg2Index 0x0004 /* UART config register 2 */ +#define DSP_HBridgeCfg1Index 0x0007 /* HBridge config register 1 */ +#define DSP_HBridgeCfg2Index 0x0008 /* HBridge config register 2 */ +#define DSP_BusMasterCfg1Index 0x0009 /* ISA bus master config register 1 */ +#define DSP_BusMasterCfg2Index 0x000A /* ISA bus master config register 2 */ +#define DSP_IsaProtCfgIndex 0x000F /* ISA protocol control register */ +#define DSP_PowerMgCfgIndex 0x0010 /* Low poser suspend/resume enable */ +#define DSP_HBusTimerCfgIndex 0x0011 /* HBUS timer load value */ + +typedef struct { + unsigned char IrqActiveLow:1; /* RW: IRQ active high or low: 0=high, 1=low */ + unsigned char IrqPulse:1; /* RW: IRQ pulse or level: 0=level, 1=pulse */ + unsigned char Irq:3; /* RW: IRQ selection */ + unsigned char BaseIO:2; /* RW: Base I/O selection */ + unsigned char Reserved:1; /* 0: Reserved */ +} DSP_UART_CFG_1; + +typedef struct { + unsigned char Enable:1; /* RW: Enable I/O and IRQ: 0=FALSE, 1=TRUE */ + unsigned char Reserved:7; /* 0: Reserved */ +} DSP_UART_CFG_2; + +typedef struct { + unsigned char IrqActiveLow:1; /* RW: IRQ active high=0 or low=1 */ + unsigned char IrqPulse:1; /* RW: IRQ pulse=1 or level=0 */ + unsigned char Irq:3; /* RW: IRQ selection */ + unsigned char AccessMode:1; /* RW: 16-bit register access method 0=byte, 1=word */ + unsigned char Reserved:2; /* 0: Reserved */ +} DSP_HBRIDGE_CFG_1; + +typedef struct { + unsigned char Enable:1; /* RW: enable I/O and IRQ: 0=FALSE, 1=TRUE */ + unsigned char Reserved:7; /* 0: Reserved */ +} DSP_HBRIDGE_CFG_2; + + +typedef struct { + unsigned char Dma:3; /* RW: DMA channel selection */ + unsigned char NumTransfers:2; /* RW: Maximum # of transfers once being granted the ISA bus */ + unsigned char ReRequest:2; /* RW: Minimum delay between releasing the ISA bus and requesting it again */ + unsigned char MEMCS16:1; /* RW: ISA signal MEMCS16: 0=disabled, 1=enabled */ +} DSP_BUSMASTER_CFG_1; + +typedef struct { + unsigned char IsaMemCmdWidth:2; /* RW: ISA memory command width */ + unsigned char Reserved:6; /* 0: Reserved */ +} DSP_BUSMASTER_CFG_2; + + +typedef struct { + unsigned char GateIOCHRDY:1; /* RW: Enable IOCHRDY gating: 0=FALSE, 1=TRUE */ + unsigned char Reserved:7; /* 0: Reserved */ +} DSP_ISA_PROT_CFG; + +typedef struct { + unsigned char Enable:1; /* RW: Enable low power suspend/resume 0=FALSE, 1=TRUE */ + unsigned char Reserved:7; /* 0: Reserved */ +} DSP_POWER_MGMT_CFG; + +typedef struct { + unsigned char LoadValue:8; /* RW: HBUS timer load value */ +} DSP_HBUS_TIMER_CFG; + + + +/* DSP registers that exist in MSA I/O space */ +#define DSP_ChipID 0x80000000 +#define DSP_MspBootDomain 0x80000580 +#define DSP_LBusTimeoutDisable 0x80000580 +#define DSP_ClockControl_1 0x8000058A +#define DSP_ClockControl_2 0x8000058C +#define DSP_ChipReset 0x80000588 +#define DSP_GpioModeControl_15_8 0x80000082 +#define DSP_GpioDriverEnable_15_8 0x80000076 +#define DSP_GpioOutputData_15_8 0x80000072 + +typedef struct { + unsigned short NMI:1; /* RW: non maskable interrupt */ + unsigned short Halt:1; /* RW: Halt MSP clock */ + unsigned short ResetCore:1; /* RW: Reset MSP core interface */ + unsigned short Reserved:13; /* 0: Reserved */ +} DSP_BOOT_DOMAIN; + +typedef struct { + unsigned short DisableTimeout:1; /* RW: Disable LBus timeout */ + unsigned short Reserved:15; /* 0: Reserved */ +} DSP_LBUS_TIMEOUT_DISABLE; + +typedef struct { + unsigned short Memory:1; /* RW: Reset memory interface */ + unsigned short SerialPort1:1; /* RW: Reset serial port 1 interface */ + unsigned short SerialPort2:1; /* RW: Reset serial port 2 interface */ + unsigned short SerialPort3:1; /* RW: Reset serial port 3 interface */ + unsigned short Gpio:1; /* RW: Reset GPIO interface */ + unsigned short Dma:1; /* RW: Reset DMA interface */ + unsigned short SoundBlaster:1; /* RW: Reset soundblaster interface */ + unsigned short Uart:1; /* RW: Reset UART interface */ + unsigned short Midi:1; /* RW: Reset MIDI interface */ + unsigned short IsaMaster:1; /* RW: Reset ISA master interface */ + unsigned short Reserved:6; /* 0: Reserved */ +} DSP_CHIP_RESET; + +typedef struct { + unsigned short N_Divisor:6; /* RW: (N) PLL output clock divisor */ + unsigned short Reserved1:2; /* 0: reserved */ + unsigned short M_Multiplier:6; /* RW: (M) PLL feedback clock multiplier */ + unsigned short Reserved2:2; /* 0: reserved */ +} DSP_CLOCK_CONTROL_1; + +typedef struct { + unsigned short PllBypass:1; /* RW: PLL Bypass */ + unsigned short Reserved:15; /* 0: Reserved */ +} DSP_CLOCK_CONTROL_2; + +typedef struct { + unsigned short Latch8:1; + unsigned short Latch9:1; + unsigned short Latch10:1; + unsigned short Latch11:1; + unsigned short Latch12:1; + unsigned short Latch13:1; + unsigned short Latch14:1; + unsigned short Latch15:1; + unsigned short Mask8:1; + unsigned short Mask9:1; + unsigned short Mask10:1; + unsigned short Mask11:1; + unsigned short Mask12:1; + unsigned short Mask13:1; + unsigned short Mask14:1; + unsigned short Mask15:1; +} DSP_GPIO_OUTPUT_DATA_15_8; + +typedef struct { + unsigned short Enable8:1; + unsigned short Enable9:1; + unsigned short Enable10:1; + unsigned short Enable11:1; + unsigned short Enable12:1; + unsigned short Enable13:1; + unsigned short Enable14:1; + unsigned short Enable15:1; + unsigned short Mask8:1; + unsigned short Mask9:1; + unsigned short Mask10:1; + unsigned short Mask11:1; + unsigned short Mask12:1; + unsigned short Mask13:1; + unsigned short Mask14:1; + unsigned short Mask15:1; +} DSP_GPIO_DRIVER_ENABLE_15_8; + +typedef struct { + unsigned short GpioMode8:2; + unsigned short GpioMode9:2; + unsigned short GpioMode10:2; + unsigned short GpioMode11:2; + unsigned short GpioMode12:2; + unsigned short GpioMode13:2; + unsigned short GpioMode14:2; + unsigned short GpioMode15:2; +} DSP_GPIO_MODE_15_8; + +/* Component masks that are defined in dspmgr.h */ +#define MW_ADC_MASK 0x0001 +#define MW_AIC2_MASK 0x0006 +#define MW_MIDI_MASK 0x0008 +#define MW_CDDAC_MASK 0x8001 +#define MW_AIC1_MASK 0xE006 +#define MW_UART_MASK 0xE00A +#define MW_ACI_MASK 0xE00B + +/* +* Definition of 3780i configuration structure. Unless otherwise stated, +* these values are provided as input to the 3780i support layer. At present, +* the only values maintained by the 3780i support layer are the saved UART +* registers. +*/ +typedef struct _DSP_3780I_CONFIG_SETTINGS { + + /* Location of base configuration register */ + unsigned short usBaseConfigIO; + + /* Enables for various DSP components */ + int bDSPEnabled; + int bModemEnabled; + int bInterruptClaimed; + + /* IRQ, DMA, and Base I/O addresses for various DSP components */ + unsigned short usDspIrq; + unsigned short usDspDma; + unsigned short usDspBaseIO; + unsigned short usUartIrq; + unsigned short usUartBaseIO; + + /* IRQ modes for various DSP components */ + int bDspIrqActiveLow; + int bUartIrqActiveLow; + int bDspIrqPulse; + int bUartIrqPulse; + + /* Card abilities */ + unsigned uIps; + unsigned uDStoreSize; + unsigned uIStoreSize; + unsigned uDmaBandwidth; + + /* Adapter specific 3780i settings */ + unsigned short usNumTransfers; + unsigned short usReRequest; + int bEnableMEMCS16; + unsigned short usIsaMemCmdWidth; + int bGateIOCHRDY; + int bEnablePwrMgmt; + unsigned short usHBusTimerLoadValue; + int bDisableLBusTimeout; + unsigned short usN_Divisor; + unsigned short usM_Multiplier; + int bPllBypass; + unsigned short usChipletEnable; /* Used with the chip reset register to enable specific chiplets */ + + /* Saved UART registers. These are maintained by the 3780i support layer. */ + int bUartSaved; /* True after a successful save of the UART registers */ + unsigned char ucIER; /* Interrupt enable register */ + unsigned char ucFCR; /* FIFO control register */ + unsigned char ucLCR; /* Line control register */ + unsigned char ucMCR; /* Modem control register */ + unsigned char ucSCR; /* Scratch register */ + unsigned char ucDLL; /* Divisor latch, low byte */ + unsigned char ucDLM; /* Divisor latch, high byte */ +} DSP_3780I_CONFIG_SETTINGS; + + +/* 3780i support functions */ +int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings, + unsigned short *pIrqMap, + unsigned short *pDmaMap); +int dsp3780I_DisableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings); +int dsp3780I_Reset(DSP_3780I_CONFIG_SETTINGS * pSettings); +int dsp3780I_Run(DSP_3780I_CONFIG_SETTINGS * pSettings); +int dsp3780I_ReadDStore(unsigned short usDspBaseIO, void __user *pvBuffer, + unsigned uCount, unsigned long ulDSPAddr); +int dsp3780I_ReadAndClearDStore(unsigned short usDspBaseIO, + void __user *pvBuffer, unsigned uCount, + unsigned long ulDSPAddr); +int dsp3780I_WriteDStore(unsigned short usDspBaseIO, void __user *pvBuffer, + unsigned uCount, unsigned long ulDSPAddr); +int dsp3780I_ReadIStore(unsigned short usDspBaseIO, void __user *pvBuffer, + unsigned uCount, unsigned long ulDSPAddr); +int dsp3780I_WriteIStore(unsigned short usDspBaseIO, void __user *pvBuffer, + unsigned uCount, unsigned long ulDSPAddr); +unsigned short dsp3780I_ReadMsaCfg(unsigned short usDspBaseIO, + unsigned long ulMsaAddr); +void dsp3780I_WriteMsaCfg(unsigned short usDspBaseIO, + unsigned long ulMsaAddr, unsigned short usValue); +int dsp3780I_GetIPCSource(unsigned short usDspBaseIO, + unsigned short *pusIPCSource); + +/* I/O port access macros */ +#define MKWORD(var) (*((unsigned short *)(&var))) +#define MKBYTE(var) (*((unsigned char *)(&var))) + +#define WriteMsaCfg(addr,value) dsp3780I_WriteMsaCfg(usDspBaseIO,addr,value) +#define ReadMsaCfg(addr) dsp3780I_ReadMsaCfg(usDspBaseIO,addr) +#define WriteGenCfg(index,value) dsp3780I_WriteGenCfg(usDspBaseIO,index,value) +#define ReadGenCfg(index) dsp3780I_ReadGenCfg(usDspBaseIO,index) + +#define InWordDsp(index) inw(usDspBaseIO+index) +#define InByteDsp(index) inb(usDspBaseIO+index) +#define OutWordDsp(index,value) outw(value,usDspBaseIO+index) +#define OutByteDsp(index,value) outb(value,usDspBaseIO+index) + +#endif diff --git a/kernel/drivers/char/mwave/Makefile b/kernel/drivers/char/mwave/Makefile new file mode 100644 index 000000000..efa6a82e5 --- /dev/null +++ b/kernel/drivers/char/mwave/Makefile @@ -0,0 +1,15 @@ +# +# Makefile for ACP Modem (Mwave). +# +# See the README file in this directory for more info. +# + +obj-$(CONFIG_MWAVE) += mwave.o + +mwave-y := mwavedd.o smapi.o tp3780i.o 3780i.o + +# To have the mwave driver disable other uarts if necessary +# ccflags-y := -DMWAVE_FUTZ_WITH_OTHER_DEVICES + +# To compile in lots (~20 KiB) of run-time enablable printk()s for debugging: +ccflags-y += -DMW_TRACE diff --git a/kernel/drivers/char/mwave/README b/kernel/drivers/char/mwave/README new file mode 100644 index 000000000..c2a58f428 --- /dev/null +++ b/kernel/drivers/char/mwave/README @@ -0,0 +1,47 @@ +Module options +-------------- + +The mwave module takes the following options. Note that these options +are not saved by the BIOS and so do not persist after unload and reload. + + mwave_debug=value, where value is bitwise OR of trace flags: + 0x0001 mwavedd api tracing + 0x0002 smapi api tracing + 0x0004 3780i tracing + 0x0008 tp3780i tracing + + Tracing only occurs if the driver has been compiled with the + MW_TRACE macro #defined (i.e. let ccflags-y := -DMW_TRACE + in the Makefile). + + mwave_3780i_irq=5/7/10/11/15 + If the dsp irq has not been setup and stored in bios by the + thinkpad configuration utility then this parameter allows the + irq used by the dsp to be configured. + + mwave_3780i_io=0x130/0x350/0x0070/0xDB0 + If the dsp io range has not been setup and stored in bios by the + thinkpad configuration utility then this parameter allows the + io range used by the dsp to be configured. + + mwave_uart_irq=3/4 + If the mwave's uart irq has not been setup and stored in bios by the + thinkpad configuration utility then this parameter allows the + irq used by the mwave uart to be configured. + + mwave_uart_io=0x3f8/0x2f8/0x3E8/0x2E8 + If the uart io range has not been setup and stored in bios by the + thinkpad configuration utility then this parameter allows the + io range used by the mwave uart to be configured. + +Example to enable the 3780i DSP using ttyS1 resources: + + insmod mwave mwave_3780i_irq=10 mwave_3780i_io=0x0130 mwave_uart_irq=3 mwave_uart_io=0x2f8 + +Accessing the driver +-------------------- + +You must also create a node for the driver: + mkdir -p /dev/modems + mknod --mode=660 /dev/modems/mwave c 10 219 + diff --git a/kernel/drivers/char/mwave/mwavedd.c b/kernel/drivers/char/mwave/mwavedd.c new file mode 100644 index 000000000..164544afd --- /dev/null +++ b/kernel/drivers/char/mwave/mwavedd.c @@ -0,0 +1,697 @@ +/* +* +* mwavedd.c -- mwave device driver +* +* +* Written By: Mike Sullivan IBM Corporation +* +* Copyright (C) 1999 IBM Corporation +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* NO WARRANTY +* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR +* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT +* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, +* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is +* solely responsible for determining the appropriateness of using and +* distributing the Program and assumes all risks associated with its +* exercise of rights under this Agreement, including but not limited to +* the risks and costs of program errors, damage to or loss of data, +* programs or equipment, and unavailability or interruption of operations. +* +* DISCLAIMER OF LIABILITY +* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +* +* +* 10/23/2000 - Alpha Release +* First release to the public +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "smapi.h" +#include "mwavedd.h" +#include "3780i.h" +#include "tp3780i.h" + +MODULE_DESCRIPTION("3780i Advanced Communications Processor (Mwave) driver"); +MODULE_AUTHOR("Mike Sullivan and Paul Schroeder"); +MODULE_LICENSE("GPL"); + +/* +* These parameters support the setting of MWave resources. Note that no +* checks are made against other devices (ie. superio) for conflicts. +* We'll depend on users using the tpctl utility to do that for now +*/ +static DEFINE_MUTEX(mwave_mutex); +int mwave_debug = 0; +int mwave_3780i_irq = 0; +int mwave_3780i_io = 0; +int mwave_uart_irq = 0; +int mwave_uart_io = 0; +module_param(mwave_debug, int, 0); +module_param(mwave_3780i_irq, int, 0); +module_param(mwave_3780i_io, int, 0); +module_param(mwave_uart_irq, int, 0); +module_param(mwave_uart_io, int, 0); + +static int mwave_open(struct inode *inode, struct file *file); +static int mwave_close(struct inode *inode, struct file *file); +static long mwave_ioctl(struct file *filp, unsigned int iocmd, + unsigned long ioarg); + +MWAVE_DEVICE_DATA mwave_s_mdd; + +static int mwave_open(struct inode *inode, struct file *file) +{ + unsigned int retval = 0; + + PRINTK_3(TRACE_MWAVE, + "mwavedd::mwave_open, entry inode %p file %p\n", + inode, file); + PRINTK_2(TRACE_MWAVE, + "mwavedd::mwave_open, exit return retval %x\n", retval); + + return retval; +} + +static int mwave_close(struct inode *inode, struct file *file) +{ + unsigned int retval = 0; + + PRINTK_3(TRACE_MWAVE, + "mwavedd::mwave_close, entry inode %p file %p\n", + inode, file); + + PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_close, exit retval %x\n", + retval); + + return retval; +} + +static long mwave_ioctl(struct file *file, unsigned int iocmd, + unsigned long ioarg) +{ + unsigned int retval = 0; + pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd; + void __user *arg = (void __user *)ioarg; + + PRINTK_4(TRACE_MWAVE, + "mwavedd::mwave_ioctl, entry file %p cmd %x arg %x\n", + file, iocmd, (int) ioarg); + + switch (iocmd) { + + case IOCTL_MW_RESET: + PRINTK_1(TRACE_MWAVE, + "mwavedd::mwave_ioctl, IOCTL_MW_RESET" + " calling tp3780I_ResetDSP\n"); + mutex_lock(&mwave_mutex); + retval = tp3780I_ResetDSP(&pDrvData->rBDData); + mutex_unlock(&mwave_mutex); + PRINTK_2(TRACE_MWAVE, + "mwavedd::mwave_ioctl, IOCTL_MW_RESET" + " retval %x from tp3780I_ResetDSP\n", + retval); + break; + + case IOCTL_MW_RUN: + PRINTK_1(TRACE_MWAVE, + "mwavedd::mwave_ioctl, IOCTL_MW_RUN" + " calling tp3780I_StartDSP\n"); + mutex_lock(&mwave_mutex); + retval = tp3780I_StartDSP(&pDrvData->rBDData); + mutex_unlock(&mwave_mutex); + PRINTK_2(TRACE_MWAVE, + "mwavedd::mwave_ioctl, IOCTL_MW_RUN" + " retval %x from tp3780I_StartDSP\n", + retval); + break; + + case IOCTL_MW_DSP_ABILITIES: { + MW_ABILITIES rAbilities; + + PRINTK_1(TRACE_MWAVE, + "mwavedd::mwave_ioctl," + " IOCTL_MW_DSP_ABILITIES calling" + " tp3780I_QueryAbilities\n"); + mutex_lock(&mwave_mutex); + retval = tp3780I_QueryAbilities(&pDrvData->rBDData, + &rAbilities); + mutex_unlock(&mwave_mutex); + PRINTK_2(TRACE_MWAVE, + "mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES" + " retval %x from tp3780I_QueryAbilities\n", + retval); + if (retval == 0) { + if( copy_to_user(arg, &rAbilities, + sizeof(MW_ABILITIES)) ) + return -EFAULT; + } + PRINTK_2(TRACE_MWAVE, + "mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES" + " exit retval %x\n", + retval); + } + break; + + case IOCTL_MW_READ_DATA: + case IOCTL_MW_READCLEAR_DATA: { + MW_READWRITE rReadData; + unsigned short __user *pusBuffer = NULL; + + if( copy_from_user(&rReadData, arg, + sizeof(MW_READWRITE)) ) + return -EFAULT; + pusBuffer = (unsigned short __user *) (rReadData.pBuf); + + PRINTK_4(TRACE_MWAVE, + "mwavedd::mwave_ioctl IOCTL_MW_READ_DATA," + " size %lx, ioarg %lx pusBuffer %p\n", + rReadData.ulDataLength, ioarg, pusBuffer); + mutex_lock(&mwave_mutex); + retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData, + iocmd, + pusBuffer, + rReadData.ulDataLength, + rReadData.usDspAddress); + mutex_unlock(&mwave_mutex); + } + break; + + case IOCTL_MW_READ_INST: { + MW_READWRITE rReadData; + unsigned short __user *pusBuffer = NULL; + + if( copy_from_user(&rReadData, arg, + sizeof(MW_READWRITE)) ) + return -EFAULT; + pusBuffer = (unsigned short __user *) (rReadData.pBuf); + + PRINTK_4(TRACE_MWAVE, + "mwavedd::mwave_ioctl IOCTL_MW_READ_INST," + " size %lx, ioarg %lx pusBuffer %p\n", + rReadData.ulDataLength / 2, ioarg, + pusBuffer); + mutex_lock(&mwave_mutex); + retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData, + iocmd, pusBuffer, + rReadData.ulDataLength / 2, + rReadData.usDspAddress); + mutex_unlock(&mwave_mutex); + } + break; + + case IOCTL_MW_WRITE_DATA: { + MW_READWRITE rWriteData; + unsigned short __user *pusBuffer = NULL; + + if( copy_from_user(&rWriteData, arg, + sizeof(MW_READWRITE)) ) + return -EFAULT; + pusBuffer = (unsigned short __user *) (rWriteData.pBuf); + + PRINTK_4(TRACE_MWAVE, + "mwavedd::mwave_ioctl IOCTL_MW_WRITE_DATA," + " size %lx, ioarg %lx pusBuffer %p\n", + rWriteData.ulDataLength, ioarg, + pusBuffer); + mutex_lock(&mwave_mutex); + retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData, + iocmd, pusBuffer, + rWriteData.ulDataLength, + rWriteData.usDspAddress); + mutex_unlock(&mwave_mutex); + } + break; + + case IOCTL_MW_WRITE_INST: { + MW_READWRITE rWriteData; + unsigned short __user *pusBuffer = NULL; + + if( copy_from_user(&rWriteData, arg, + sizeof(MW_READWRITE)) ) + return -EFAULT; + pusBuffer = (unsigned short __user *)(rWriteData.pBuf); + + PRINTK_4(TRACE_MWAVE, + "mwavedd::mwave_ioctl IOCTL_MW_WRITE_INST," + " size %lx, ioarg %lx pusBuffer %p\n", + rWriteData.ulDataLength, ioarg, + pusBuffer); + mutex_lock(&mwave_mutex); + retval = tp3780I_ReadWriteDspIStore(&pDrvData->rBDData, + iocmd, pusBuffer, + rWriteData.ulDataLength, + rWriteData.usDspAddress); + mutex_unlock(&mwave_mutex); + } + break; + + case IOCTL_MW_REGISTER_IPC: { + unsigned int ipcnum = (unsigned int) ioarg; + + if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) { + PRINTK_ERROR(KERN_ERR_MWAVE + "mwavedd::mwave_ioctl:" + " IOCTL_MW_REGISTER_IPC:" + " Error: Invalid ipcnum %x\n", + ipcnum); + return -EINVAL; + } + PRINTK_3(TRACE_MWAVE, + "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC" + " ipcnum %x entry usIntCount %x\n", + ipcnum, + pDrvData->IPCs[ipcnum].usIntCount); + + mutex_lock(&mwave_mutex); + pDrvData->IPCs[ipcnum].bIsHere = FALSE; + pDrvData->IPCs[ipcnum].bIsEnabled = TRUE; + mutex_unlock(&mwave_mutex); + + PRINTK_2(TRACE_MWAVE, + "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC" + " ipcnum %x exit\n", + ipcnum); + } + break; + + case IOCTL_MW_GET_IPC: { + unsigned int ipcnum = (unsigned int) ioarg; + + if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) { + PRINTK_ERROR(KERN_ERR_MWAVE + "mwavedd::mwave_ioctl:" + " IOCTL_MW_GET_IPC: Error:" + " Invalid ipcnum %x\n", ipcnum); + return -EINVAL; + } + PRINTK_3(TRACE_MWAVE, + "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC" + " ipcnum %x, usIntCount %x\n", + ipcnum, + pDrvData->IPCs[ipcnum].usIntCount); + + mutex_lock(&mwave_mutex); + if (pDrvData->IPCs[ipcnum].bIsEnabled == TRUE) { + DECLARE_WAITQUEUE(wait, current); + + PRINTK_2(TRACE_MWAVE, + "mwavedd::mwave_ioctl, thread for" + " ipc %x going to sleep\n", + ipcnum); + add_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait); + pDrvData->IPCs[ipcnum].bIsHere = TRUE; + set_current_state(TASK_INTERRUPTIBLE); + /* check whether an event was signalled by */ + /* the interrupt handler while we were gone */ + if (pDrvData->IPCs[ipcnum].usIntCount == 1) { /* first int has occurred (race condition) */ + pDrvData->IPCs[ipcnum].usIntCount = 2; /* first int has been handled */ + PRINTK_2(TRACE_MWAVE, + "mwavedd::mwave_ioctl" + " IOCTL_MW_GET_IPC ipcnum %x" + " handling first int\n", + ipcnum); + } else { /* either 1st int has not yet occurred, or we have already handled the first int */ + schedule(); + if (pDrvData->IPCs[ipcnum].usIntCount == 1) { + pDrvData->IPCs[ipcnum].usIntCount = 2; + } + PRINTK_2(TRACE_MWAVE, + "mwavedd::mwave_ioctl" + " IOCTL_MW_GET_IPC ipcnum %x" + " woke up and returning to" + " application\n", + ipcnum); + } + pDrvData->IPCs[ipcnum].bIsHere = FALSE; + remove_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait); + set_current_state(TASK_RUNNING); + PRINTK_2(TRACE_MWAVE, + "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC," + " returning thread for ipc %x" + " processing\n", + ipcnum); + } + mutex_unlock(&mwave_mutex); + } + break; + + case IOCTL_MW_UNREGISTER_IPC: { + unsigned int ipcnum = (unsigned int) ioarg; + + PRINTK_2(TRACE_MWAVE, + "mwavedd::mwave_ioctl IOCTL_MW_UNREGISTER_IPC" + " ipcnum %x\n", + ipcnum); + if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) { + PRINTK_ERROR(KERN_ERR_MWAVE + "mwavedd::mwave_ioctl:" + " IOCTL_MW_UNREGISTER_IPC:" + " Error: Invalid ipcnum %x\n", + ipcnum); + return -EINVAL; + } + mutex_lock(&mwave_mutex); + if (pDrvData->IPCs[ipcnum].bIsEnabled == TRUE) { + pDrvData->IPCs[ipcnum].bIsEnabled = FALSE; + if (pDrvData->IPCs[ipcnum].bIsHere == TRUE) { + wake_up_interruptible(&pDrvData->IPCs[ipcnum].ipc_wait_queue); + } + } + mutex_unlock(&mwave_mutex); + } + break; + + default: + return -ENOTTY; + break; + } /* switch */ + + PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, exit retval %x\n", retval); + + return retval; +} + + +static ssize_t mwave_read(struct file *file, char __user *buf, size_t count, + loff_t * ppos) +{ + PRINTK_5(TRACE_MWAVE, + "mwavedd::mwave_read entry file %p, buf %p, count %zx ppos %p\n", + file, buf, count, ppos); + + return -EINVAL; +} + + +static ssize_t mwave_write(struct file *file, const char __user *buf, + size_t count, loff_t * ppos) +{ + PRINTK_5(TRACE_MWAVE, + "mwavedd::mwave_write entry file %p, buf %p," + " count %zx ppos %p\n", + file, buf, count, ppos); + + return -EINVAL; +} + + +static int register_serial_portandirq(unsigned int port, int irq) +{ + struct uart_8250_port uart; + + switch ( port ) { + case 0x3f8: + case 0x2f8: + case 0x3e8: + case 0x2e8: + /* OK */ + break; + default: + PRINTK_ERROR(KERN_ERR_MWAVE + "mwavedd::register_serial_portandirq:" + " Error: Illegal port %x\n", port ); + return -1; + } /* switch */ + /* port is okay */ + + switch ( irq ) { + case 3: + case 4: + case 5: + case 7: + /* OK */ + break; + default: + PRINTK_ERROR(KERN_ERR_MWAVE + "mwavedd::register_serial_portandirq:" + " Error: Illegal irq %x\n", irq ); + return -1; + } /* switch */ + /* irq is okay */ + + memset(&uart, 0, sizeof(uart)); + + uart.port.uartclk = 1843200; + uart.port.iobase = port; + uart.port.irq = irq; + uart.port.iotype = UPIO_PORT; + uart.port.flags = UPF_SHARE_IRQ; + return serial8250_register_8250_port(&uart); +} + + +static const struct file_operations mwave_fops = { + .owner = THIS_MODULE, + .read = mwave_read, + .write = mwave_write, + .unlocked_ioctl = mwave_ioctl, + .open = mwave_open, + .release = mwave_close, + .llseek = default_llseek, +}; + + +static struct miscdevice mwave_misc_dev = { MWAVE_MINOR, "mwave", &mwave_fops }; + +#if 0 /* totally b0rked */ +/* + * sysfs support + */ + +struct device mwave_device; + +/* Prevent code redundancy, create a macro for mwave_show_* functions. */ +#define mwave_show_function(attr_name, format_string, field) \ +static ssize_t mwave_show_##attr_name(struct device *dev, struct device_attribute *attr, char *buf) \ +{ \ + DSP_3780I_CONFIG_SETTINGS *pSettings = \ + &mwave_s_mdd.rBDData.rDspSettings; \ + return sprintf(buf, format_string, pSettings->field); \ +} + +/* All of our attributes are read attributes. */ +#define mwave_dev_rd_attr(attr_name, format_string, field) \ + mwave_show_function(attr_name, format_string, field) \ +static DEVICE_ATTR(attr_name, S_IRUGO, mwave_show_##attr_name, NULL) + +mwave_dev_rd_attr (3780i_dma, "%i\n", usDspDma); +mwave_dev_rd_attr (3780i_irq, "%i\n", usDspIrq); +mwave_dev_rd_attr (3780i_io, "%#.4x\n", usDspBaseIO); +mwave_dev_rd_attr (uart_irq, "%i\n", usUartIrq); +mwave_dev_rd_attr (uart_io, "%#.4x\n", usUartBaseIO); + +static struct device_attribute * const mwave_dev_attrs[] = { + &dev_attr_3780i_dma, + &dev_attr_3780i_irq, + &dev_attr_3780i_io, + &dev_attr_uart_irq, + &dev_attr_uart_io, +}; +#endif + +/* +* mwave_init is called on module load +* +* mwave_exit is called on module unload +* mwave_exit is also used to clean up after an aborted mwave_init +*/ +static void mwave_exit(void) +{ + pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd; + + PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_exit entry\n"); + +#if 0 + for (i = 0; i < pDrvData->nr_registered_attrs; i++) + device_remove_file(&mwave_device, mwave_dev_attrs[i]); + pDrvData->nr_registered_attrs = 0; + + if (pDrvData->device_registered) { + device_unregister(&mwave_device); + pDrvData->device_registered = FALSE; + } +#endif + + if ( pDrvData->sLine >= 0 ) { + serial8250_unregister_port(pDrvData->sLine); + } + if (pDrvData->bMwaveDevRegistered) { + misc_deregister(&mwave_misc_dev); + } + if (pDrvData->bDSPEnabled) { + tp3780I_DisableDSP(&pDrvData->rBDData); + } + if (pDrvData->bResourcesClaimed) { + tp3780I_ReleaseResources(&pDrvData->rBDData); + } + if (pDrvData->bBDInitialized) { + tp3780I_Cleanup(&pDrvData->rBDData); + } + + PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_exit exit\n"); +} + +module_exit(mwave_exit); + +static int __init mwave_init(void) +{ + int i; + int retval = 0; + pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd; + + PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_init entry\n"); + + memset(&mwave_s_mdd, 0, sizeof(MWAVE_DEVICE_DATA)); + + pDrvData->bBDInitialized = FALSE; + pDrvData->bResourcesClaimed = FALSE; + pDrvData->bDSPEnabled = FALSE; + pDrvData->bDSPReset = FALSE; + pDrvData->bMwaveDevRegistered = FALSE; + pDrvData->sLine = -1; + + for (i = 0; i < ARRAY_SIZE(pDrvData->IPCs); i++) { + pDrvData->IPCs[i].bIsEnabled = FALSE; + pDrvData->IPCs[i].bIsHere = FALSE; + pDrvData->IPCs[i].usIntCount = 0; /* no ints received yet */ + init_waitqueue_head(&pDrvData->IPCs[i].ipc_wait_queue); + } + + retval = tp3780I_InitializeBoardData(&pDrvData->rBDData); + PRINTK_2(TRACE_MWAVE, + "mwavedd::mwave_init, return from tp3780I_InitializeBoardData" + " retval %x\n", + retval); + if (retval) { + PRINTK_ERROR(KERN_ERR_MWAVE + "mwavedd::mwave_init: Error:" + " Failed to initialize board data\n"); + goto cleanup_error; + } + pDrvData->bBDInitialized = TRUE; + + retval = tp3780I_CalcResources(&pDrvData->rBDData); + PRINTK_2(TRACE_MWAVE, + "mwavedd::mwave_init, return from tp3780I_CalcResources" + " retval %x\n", + retval); + if (retval) { + PRINTK_ERROR(KERN_ERR_MWAVE + "mwavedd:mwave_init: Error:" + " Failed to calculate resources\n"); + goto cleanup_error; + } + + retval = tp3780I_ClaimResources(&pDrvData->rBDData); + PRINTK_2(TRACE_MWAVE, + "mwavedd::mwave_init, return from tp3780I_ClaimResources" + " retval %x\n", + retval); + if (retval) { + PRINTK_ERROR(KERN_ERR_MWAVE + "mwavedd:mwave_init: Error:" + " Failed to claim resources\n"); + goto cleanup_error; + } + pDrvData->bResourcesClaimed = TRUE; + + retval = tp3780I_EnableDSP(&pDrvData->rBDData); + PRINTK_2(TRACE_MWAVE, + "mwavedd::mwave_init, return from tp3780I_EnableDSP" + " retval %x\n", + retval); + if (retval) { + PRINTK_ERROR(KERN_ERR_MWAVE + "mwavedd:mwave_init: Error:" + " Failed to enable DSP\n"); + goto cleanup_error; + } + pDrvData->bDSPEnabled = TRUE; + + if (misc_register(&mwave_misc_dev) < 0) { + PRINTK_ERROR(KERN_ERR_MWAVE + "mwavedd:mwave_init: Error:" + " Failed to register misc device\n"); + goto cleanup_error; + } + pDrvData->bMwaveDevRegistered = TRUE; + + pDrvData->sLine = register_serial_portandirq( + pDrvData->rBDData.rDspSettings.usUartBaseIO, + pDrvData->rBDData.rDspSettings.usUartIrq + ); + if (pDrvData->sLine < 0) { + PRINTK_ERROR(KERN_ERR_MWAVE + "mwavedd:mwave_init: Error:" + " Failed to register serial driver\n"); + goto cleanup_error; + } + /* uart is registered */ + +#if 0 + /* sysfs */ + memset(&mwave_device, 0, sizeof (struct device)); + dev_set_name(&mwave_device, "mwave"); + + if (device_register(&mwave_device)) + goto cleanup_error; + pDrvData->device_registered = TRUE; + for (i = 0; i < ARRAY_SIZE(mwave_dev_attrs); i++) { + if(device_create_file(&mwave_device, mwave_dev_attrs[i])) { + PRINTK_ERROR(KERN_ERR_MWAVE + "mwavedd:mwave_init: Error:" + " Failed to create sysfs file %s\n", + mwave_dev_attrs[i]->attr.name); + goto cleanup_error; + } + pDrvData->nr_registered_attrs++; + } +#endif + + /* SUCCESS! */ + return 0; + +cleanup_error: + PRINTK_ERROR(KERN_ERR_MWAVE + "mwavedd::mwave_init: Error:" + " Failed to initialize\n"); + mwave_exit(); /* clean up */ + + return -EIO; +} + +module_init(mwave_init); + diff --git a/kernel/drivers/char/mwave/mwavedd.h b/kernel/drivers/char/mwave/mwavedd.h new file mode 100644 index 000000000..7e0d530e2 --- /dev/null +++ b/kernel/drivers/char/mwave/mwavedd.h @@ -0,0 +1,152 @@ +/* +* +* mwavedd.h -- declarations for mwave device driver +* +* +* Written By: Mike Sullivan IBM Corporation +* +* Copyright (C) 1999 IBM Corporation +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* NO WARRANTY +* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR +* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT +* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, +* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is +* solely responsible for determining the appropriateness of using and +* distributing the Program and assumes all risks associated with its +* exercise of rights under this Agreement, including but not limited to +* the risks and costs of program errors, damage to or loss of data, +* programs or equipment, and unavailability or interruption of operations. +* +* DISCLAIMER OF LIABILITY +* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +* +* +* 10/23/2000 - Alpha Release +* First release to the public +*/ + +#ifndef _LINUX_MWAVEDD_H +#define _LINUX_MWAVEDD_H +#include "3780i.h" +#include "tp3780i.h" +#include "smapi.h" +#include "mwavepub.h" +#include +#include +#include + +extern int mwave_debug; +extern int mwave_3780i_irq; +extern int mwave_3780i_io; +extern int mwave_uart_irq; +extern int mwave_uart_io; + +#define PRINTK_ERROR printk +#define KERN_ERR_MWAVE KERN_ERR "mwave: " + +#define TRACE_MWAVE 0x0001 +#define TRACE_SMAPI 0x0002 +#define TRACE_3780I 0x0004 +#define TRACE_TP3780I 0x0008 + +#ifdef MW_TRACE +#define PRINTK_1(f,s) \ + if (f & (mwave_debug)) { \ + printk(s); \ + } + +#define PRINTK_2(f,s,v1) \ + if (f & (mwave_debug)) { \ + printk(s,v1); \ + } + +#define PRINTK_3(f,s,v1,v2) \ + if (f & (mwave_debug)) { \ + printk(s,v1,v2); \ + } + +#define PRINTK_4(f,s,v1,v2,v3) \ + if (f & (mwave_debug)) { \ + printk(s,v1,v2,v3); \ + } + +#define PRINTK_5(f,s,v1,v2,v3,v4) \ + if (f & (mwave_debug)) { \ + printk(s,v1,v2,v3,v4); \ + } + +#define PRINTK_6(f,s,v1,v2,v3,v4,v5) \ + if (f & (mwave_debug)) { \ + printk(s,v1,v2,v3,v4,v5); \ + } + +#define PRINTK_7(f,s,v1,v2,v3,v4,v5,v6) \ + if (f & (mwave_debug)) { \ + printk(s,v1,v2,v3,v4,v5,v6); \ + } + +#define PRINTK_8(f,s,v1,v2,v3,v4,v5,v6,v7) \ + if (f & (mwave_debug)) { \ + printk(s,v1,v2,v3,v4,v5,v6,v7); \ + } + +#else +#define PRINTK_1(f,s) +#define PRINTK_2(f,s,v1) +#define PRINTK_3(f,s,v1,v2) +#define PRINTK_4(f,s,v1,v2,v3) +#define PRINTK_5(f,s,v1,v2,v3,v4) +#define PRINTK_6(f,s,v1,v2,v3,v4,v5) +#define PRINTK_7(f,s,v1,v2,v3,v4,v5,v6) +#define PRINTK_8(f,s,v1,v2,v3,v4,v5,v6,v7) +#endif + + +typedef struct _MWAVE_IPC { + unsigned short usIntCount; /* 0=none, 1=first, 2=greater than 1st */ + BOOLEAN bIsEnabled; + BOOLEAN bIsHere; + /* entry spin lock */ + wait_queue_head_t ipc_wait_queue; +} MWAVE_IPC; + +typedef struct _MWAVE_DEVICE_DATA { + THINKPAD_BD_DATA rBDData; /* board driver's data area */ + unsigned long ulIPCSource_ISR; /* IPC source bits for recently processed intr, set during ISR processing */ + unsigned long ulIPCSource_DPC; /* IPC source bits for recently processed intr, set during DPC processing */ + BOOLEAN bBDInitialized; + BOOLEAN bResourcesClaimed; + BOOLEAN bDSPEnabled; + BOOLEAN bDSPReset; + MWAVE_IPC IPCs[16]; + BOOLEAN bMwaveDevRegistered; + short sLine; + int nr_registered_attrs; + int device_registered; + +} MWAVE_DEVICE_DATA, *pMWAVE_DEVICE_DATA; + +extern MWAVE_DEVICE_DATA mwave_s_mdd; + +#endif diff --git a/kernel/drivers/char/mwave/mwavepub.h b/kernel/drivers/char/mwave/mwavepub.h new file mode 100644 index 000000000..60c961ae2 --- /dev/null +++ b/kernel/drivers/char/mwave/mwavepub.h @@ -0,0 +1,89 @@ +/* +* +* mwavepub.h -- PUBLIC declarations for the mwave driver +* and applications using it +* +* +* Written By: Mike Sullivan IBM Corporation +* +* Copyright (C) 1999 IBM Corporation +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* NO WARRANTY +* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR +* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT +* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, +* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is +* solely responsible for determining the appropriateness of using and +* distributing the Program and assumes all risks associated with its +* exercise of rights under this Agreement, including but not limited to +* the risks and costs of program errors, damage to or loss of data, +* programs or equipment, and unavailability or interruption of operations. +* +* DISCLAIMER OF LIABILITY +* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +* +* +* 10/23/2000 - Alpha Release +* First release to the public +*/ + +#ifndef _LINUX_MWAVEPUB_H +#define _LINUX_MWAVEPUB_H + +#include + + +typedef struct _MW_ABILITIES { + unsigned long instr_per_sec; + unsigned long data_size; + unsigned long inst_size; + unsigned long bus_dma_bw; + unsigned short uart_enable; + short component_count; + unsigned long component_list[7]; + char mwave_os_name[16]; + char bios_task_name[16]; +} MW_ABILITIES, *pMW_ABILITIES; + + +typedef struct _MW_READWRITE { + unsigned short usDspAddress; /* The dsp address */ + unsigned long ulDataLength; /* The size in bytes of the data or user buffer */ + void __user *pBuf; /* Input:variable sized buffer */ +} MW_READWRITE, *pMW_READWRITE; + +#define IOCTL_MW_RESET _IO(MWAVE_MINOR,1) +#define IOCTL_MW_RUN _IO(MWAVE_MINOR,2) +#define IOCTL_MW_DSP_ABILITIES _IOR(MWAVE_MINOR,3,MW_ABILITIES) +#define IOCTL_MW_READ_DATA _IOR(MWAVE_MINOR,4,MW_READWRITE) +#define IOCTL_MW_READCLEAR_DATA _IOR(MWAVE_MINOR,5,MW_READWRITE) +#define IOCTL_MW_READ_INST _IOR(MWAVE_MINOR,6,MW_READWRITE) +#define IOCTL_MW_WRITE_DATA _IOW(MWAVE_MINOR,7,MW_READWRITE) +#define IOCTL_MW_WRITE_INST _IOW(MWAVE_MINOR,8,MW_READWRITE) +#define IOCTL_MW_REGISTER_IPC _IOW(MWAVE_MINOR,9,int) +#define IOCTL_MW_UNREGISTER_IPC _IOW(MWAVE_MINOR,10,int) +#define IOCTL_MW_GET_IPC _IOW(MWAVE_MINOR,11,int) +#define IOCTL_MW_TRACE _IOR(MWAVE_MINOR,12,MW_READWRITE) + + +#endif diff --git a/kernel/drivers/char/mwave/smapi.c b/kernel/drivers/char/mwave/smapi.c new file mode 100644 index 000000000..6187fd14b --- /dev/null +++ b/kernel/drivers/char/mwave/smapi.c @@ -0,0 +1,570 @@ +/* +* +* smapi.c -- SMAPI interface routines +* +* +* Written By: Mike Sullivan IBM Corporation +* +* Copyright (C) 1999 IBM Corporation +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* NO WARRANTY +* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR +* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT +* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, +* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is +* solely responsible for determining the appropriateness of using and +* distributing the Program and assumes all risks associated with its +* exercise of rights under this Agreement, including but not limited to +* the risks and costs of program errors, damage to or loss of data, +* programs or equipment, and unavailability or interruption of operations. +* +* DISCLAIMER OF LIABILITY +* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +* +* +* 10/23/2000 - Alpha Release +* First release to the public +*/ + +#include +#include /* CMOS defines */ +#include "smapi.h" +#include "mwavedd.h" + +static unsigned short g_usSmapiPort = 0; + + +static int smapi_request(unsigned short inBX, unsigned short inCX, + unsigned short inDI, unsigned short inSI, + unsigned short *outAX, unsigned short *outBX, + unsigned short *outCX, unsigned short *outDX, + unsigned short *outDI, unsigned short *outSI) +{ + unsigned short myoutAX = 2, *pmyoutAX = &myoutAX; + unsigned short myoutBX = 3, *pmyoutBX = &myoutBX; + unsigned short myoutCX = 4, *pmyoutCX = &myoutCX; + unsigned short myoutDX = 5, *pmyoutDX = &myoutDX; + unsigned short myoutDI = 6, *pmyoutDI = &myoutDI; + unsigned short myoutSI = 7, *pmyoutSI = &myoutSI; + unsigned short usSmapiOK = -EIO, *pusSmapiOK = &usSmapiOK; + unsigned int inBXCX = (inBX << 16) | inCX; + unsigned int inDISI = (inDI << 16) | inSI; + int retval = 0; + + PRINTK_5(TRACE_SMAPI, "inBX %x inCX %x inDI %x inSI %x\n", + inBX, inCX, inDI, inSI); + + __asm__ __volatile__("movw $0x5380,%%ax\n\t" + "movl %7,%%ebx\n\t" + "shrl $16, %%ebx\n\t" + "movw %7,%%cx\n\t" + "movl %8,%%edi\n\t" + "shrl $16,%%edi\n\t" + "movw %8,%%si\n\t" + "movw %9,%%dx\n\t" + "out %%al,%%dx\n\t" + "out %%al,$0x4F\n\t" + "cmpb $0x53,%%ah\n\t" + "je 2f\n\t" + "1:\n\t" + "orb %%ah,%%ah\n\t" + "jnz 2f\n\t" + "movw %%ax,%0\n\t" + "movw %%bx,%1\n\t" + "movw %%cx,%2\n\t" + "movw %%dx,%3\n\t" + "movw %%di,%4\n\t" + "movw %%si,%5\n\t" + "movw $1,%6\n\t" + "2:\n\t":"=m"(*(unsigned short *) pmyoutAX), + "=m"(*(unsigned short *) pmyoutBX), + "=m"(*(unsigned short *) pmyoutCX), + "=m"(*(unsigned short *) pmyoutDX), + "=m"(*(unsigned short *) pmyoutDI), + "=m"(*(unsigned short *) pmyoutSI), + "=m"(*(unsigned short *) pusSmapiOK) + :"m"(inBXCX), "m"(inDISI), "m"(g_usSmapiPort) + :"%eax", "%ebx", "%ecx", "%edx", "%edi", + "%esi"); + + PRINTK_8(TRACE_SMAPI, + "myoutAX %x myoutBX %x myoutCX %x myoutDX %x myoutDI %x myoutSI %x usSmapiOK %x\n", + myoutAX, myoutBX, myoutCX, myoutDX, myoutDI, myoutSI, + usSmapiOK); + *outAX = myoutAX; + *outBX = myoutBX; + *outCX = myoutCX; + *outDX = myoutDX; + *outDI = myoutDI; + *outSI = myoutSI; + + retval = (usSmapiOK == 1) ? 0 : -EIO; + PRINTK_2(TRACE_SMAPI, "smapi::smapi_request exit retval %x\n", retval); + return retval; +} + + +int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings) +{ + int bRC = -EIO; + unsigned short usAX, usBX, usCX, usDX, usDI, usSI; + unsigned short ausDspBases[] = { 0x0030, 0x4E30, 0x8E30, 0xCE30, 0x0130, 0x0350, 0x0070, 0x0DB0 }; + unsigned short ausUartBases[] = { 0x03F8, 0x02F8, 0x03E8, 0x02E8 }; + unsigned short numDspBases = 8; + unsigned short numUartBases = 4; + + PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg entry\n"); + + bRC = smapi_request(0x1802, 0x0000, 0, 0, + &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); + if (bRC) { + PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Error: Could not get DSP Settings. Aborting.\n"); + return bRC; + } + + PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg, smapi_request OK\n"); + + pSettings->bDSPPresent = ((usBX & 0x0100) != 0); + pSettings->bDSPEnabled = ((usCX & 0x0001) != 0); + pSettings->usDspIRQ = usSI & 0x00FF; + pSettings->usDspDMA = (usSI & 0xFF00) >> 8; + if ((usDI & 0x00FF) < numDspBases) { + pSettings->usDspBaseIO = ausDspBases[usDI & 0x00FF]; + } else { + pSettings->usDspBaseIO = 0; + } + PRINTK_6(TRACE_SMAPI, + "smapi::smapi_query_DSP_cfg get DSP Settings bDSPPresent %x bDSPEnabled %x usDspIRQ %x usDspDMA %x usDspBaseIO %x\n", + pSettings->bDSPPresent, pSettings->bDSPEnabled, + pSettings->usDspIRQ, pSettings->usDspDMA, + pSettings->usDspBaseIO); + + /* check for illegal values */ + if ( pSettings->usDspBaseIO == 0 ) + PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: DSP base I/O address is 0\n"); + if ( pSettings->usDspIRQ == 0 ) + PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: DSP IRQ line is 0\n"); + + bRC = smapi_request(0x1804, 0x0000, 0, 0, + &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); + if (bRC) { + PRINTK_ERROR("smapi::smapi_query_DSP_cfg: Error: Could not get DSP modem settings. Aborting.\n"); + return bRC; + } + + PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg, smapi_request OK\n"); + + pSettings->bModemEnabled = ((usCX & 0x0001) != 0); + pSettings->usUartIRQ = usSI & 0x000F; + if (((usSI & 0xFF00) >> 8) < numUartBases) { + pSettings->usUartBaseIO = ausUartBases[(usSI & 0xFF00) >> 8]; + } else { + pSettings->usUartBaseIO = 0; + } + + PRINTK_4(TRACE_SMAPI, + "smapi::smapi_query_DSP_cfg get DSP modem settings bModemEnabled %x usUartIRQ %x usUartBaseIO %x\n", + pSettings->bModemEnabled, + pSettings->usUartIRQ, + pSettings->usUartBaseIO); + + /* check for illegal values */ + if ( pSettings->usUartBaseIO == 0 ) + PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: UART base I/O address is 0\n"); + if ( pSettings->usUartIRQ == 0 ) + PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: UART IRQ line is 0\n"); + + PRINTK_2(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg exit bRC %x\n", bRC); + + return bRC; +} + + +int smapi_set_DSP_cfg(void) +{ + int bRC = -EIO; + int i; + unsigned short usAX, usBX, usCX, usDX, usDI, usSI; + unsigned short ausDspBases[] = { 0x0030, 0x4E30, 0x8E30, 0xCE30, 0x0130, 0x0350, 0x0070, 0x0DB0 }; + unsigned short ausUartBases[] = { 0x03F8, 0x02F8, 0x03E8, 0x02E8 }; + unsigned short ausDspIrqs[] = { 5, 7, 10, 11, 15 }; + unsigned short ausUartIrqs[] = { 3, 4 }; + + unsigned short numDspBases = 8; + unsigned short numUartBases = 4; + unsigned short numDspIrqs = 5; + unsigned short numUartIrqs = 2; + unsigned short dspio_index = 0, uartio_index = 0; + + PRINTK_5(TRACE_SMAPI, + "smapi::smapi_set_DSP_cfg entry mwave_3780i_irq %x mwave_3780i_io %x mwave_uart_irq %x mwave_uart_io %x\n", + mwave_3780i_irq, mwave_3780i_io, mwave_uart_irq, mwave_uart_io); + + if (mwave_3780i_io) { + for (i = 0; i < numDspBases; i++) { + if (mwave_3780i_io == ausDspBases[i]) + break; + } + if (i == numDspBases) { + PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_io address %x. Aborting.\n", mwave_3780i_io); + return bRC; + } + dspio_index = i; + } + + if (mwave_3780i_irq) { + for (i = 0; i < numDspIrqs; i++) { + if (mwave_3780i_irq == ausDspIrqs[i]) + break; + } + if (i == numDspIrqs) { + PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_irq %x. Aborting.\n", mwave_3780i_irq); + return bRC; + } + } + + if (mwave_uart_io) { + for (i = 0; i < numUartBases; i++) { + if (mwave_uart_io == ausUartBases[i]) + break; + } + if (i == numUartBases) { + PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_io address %x. Aborting.\n", mwave_uart_io); + return bRC; + } + uartio_index = i; + } + + + if (mwave_uart_irq) { + for (i = 0; i < numUartIrqs; i++) { + if (mwave_uart_irq == ausUartIrqs[i]) + break; + } + if (i == numUartIrqs) { + PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_irq %x. Aborting.\n", mwave_uart_irq); + return bRC; + } + } + + if (mwave_uart_irq || mwave_uart_io) { + + /* Check serial port A */ + bRC = smapi_request(0x1402, 0x0000, 0, 0, + &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); + if (bRC) goto exit_smapi_request_error; + /* bRC == 0 */ + if (usBX & 0x0100) { /* serial port A is present */ + if (usCX & 1) { /* serial port is enabled */ + if ((usSI & 0xFF) == mwave_uart_irq) { +#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES + PRINTK_ERROR(KERN_ERR_MWAVE + "smapi::smapi_set_DSP_cfg: Serial port A irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq); +#else + PRINTK_3(TRACE_SMAPI, + "smapi::smapi_set_DSP_cfg: Serial port A irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq); +#endif +#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES + PRINTK_1(TRACE_SMAPI, + "smapi::smapi_set_DSP_cfg Disabling conflicting serial port\n"); + bRC = smapi_request(0x1403, 0x0100, 0, usSI, + &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); + if (bRC) goto exit_smapi_request_error; + bRC = smapi_request(0x1402, 0x0000, 0, 0, + &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); + if (bRC) goto exit_smapi_request_error; +#else + goto exit_conflict; +#endif + } else { + if ((usSI >> 8) == uartio_index) { +#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES + PRINTK_ERROR(KERN_ERR_MWAVE + "smapi::smapi_set_DSP_cfg: Serial port A base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]); +#else + PRINTK_3(TRACE_SMAPI, + "smapi::smapi_set_DSP_cfg: Serial port A base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]); +#endif +#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES + PRINTK_1(TRACE_SMAPI, + "smapi::smapi_set_DSP_cfg Disabling conflicting serial port A\n"); + bRC = smapi_request (0x1403, 0x0100, 0, usSI, + &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); + if (bRC) goto exit_smapi_request_error; + bRC = smapi_request (0x1402, 0x0000, 0, 0, + &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); + if (bRC) goto exit_smapi_request_error; +#else + goto exit_conflict; +#endif + } + } + } + } + + /* Check serial port B */ + bRC = smapi_request(0x1404, 0x0000, 0, 0, + &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); + if (bRC) goto exit_smapi_request_error; + /* bRC == 0 */ + if (usBX & 0x0100) { /* serial port B is present */ + if (usCX & 1) { /* serial port is enabled */ + if ((usSI & 0xFF) == mwave_uart_irq) { +#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES + PRINTK_ERROR(KERN_ERR_MWAVE + "smapi::smapi_set_DSP_cfg: Serial port B irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq); +#else + PRINTK_3(TRACE_SMAPI, + "smapi::smapi_set_DSP_cfg: Serial port B irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq); +#endif +#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES + PRINTK_1(TRACE_SMAPI, + "smapi::smapi_set_DSP_cfg Disabling conflicting serial port B\n"); + bRC = smapi_request(0x1405, 0x0100, 0, usSI, + &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); + if (bRC) goto exit_smapi_request_error; + bRC = smapi_request(0x1404, 0x0000, 0, 0, + &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); + if (bRC) goto exit_smapi_request_error; +#else + goto exit_conflict; +#endif + } else { + if ((usSI >> 8) == uartio_index) { +#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES + PRINTK_ERROR(KERN_ERR_MWAVE + "smapi::smapi_set_DSP_cfg: Serial port B base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]); +#else + PRINTK_3(TRACE_SMAPI, + "smapi::smapi_set_DSP_cfg: Serial port B base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]); +#endif +#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES + PRINTK_1 (TRACE_SMAPI, + "smapi::smapi_set_DSP_cfg Disabling conflicting serial port B\n"); + bRC = smapi_request (0x1405, 0x0100, 0, usSI, + &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); + if (bRC) goto exit_smapi_request_error; + bRC = smapi_request (0x1404, 0x0000, 0, 0, + &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); + if (bRC) goto exit_smapi_request_error; +#else + goto exit_conflict; +#endif + } + } + } + } + + /* Check IR port */ + bRC = smapi_request(0x1700, 0x0000, 0, 0, + &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); + if (bRC) goto exit_smapi_request_error; + bRC = smapi_request(0x1704, 0x0000, 0, 0, + &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); + if (bRC) goto exit_smapi_request_error; + /* bRC == 0 */ + if ((usCX & 0xff) != 0xff) { /* IR port not disabled */ + if ((usCX & 0xff) == mwave_uart_irq) { +#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES + PRINTK_ERROR(KERN_ERR_MWAVE + "smapi::smapi_set_DSP_cfg: IR port irq %x conflicts with mwave_uart_irq %x\n", usCX & 0xff, mwave_uart_irq); +#else + PRINTK_3(TRACE_SMAPI, + "smapi::smapi_set_DSP_cfg: IR port irq %x conflicts with mwave_uart_irq %x\n", usCX & 0xff, mwave_uart_irq); +#endif +#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES + PRINTK_1(TRACE_SMAPI, + "smapi::smapi_set_DSP_cfg Disabling conflicting IR port\n"); + bRC = smapi_request(0x1701, 0x0100, 0, 0, + &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); + if (bRC) goto exit_smapi_request_error; + bRC = smapi_request(0x1700, 0, 0, 0, + &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); + if (bRC) goto exit_smapi_request_error; + bRC = smapi_request(0x1705, 0x01ff, 0, usSI, + &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); + if (bRC) goto exit_smapi_request_error; + bRC = smapi_request(0x1704, 0x0000, 0, 0, + &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); + if (bRC) goto exit_smapi_request_error; +#else + goto exit_conflict; +#endif + } else { + if ((usSI & 0xff) == uartio_index) { +#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES + PRINTK_ERROR(KERN_ERR_MWAVE + "smapi::smapi_set_DSP_cfg: IR port base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI & 0xff], ausUartBases[uartio_index]); +#else + PRINTK_3(TRACE_SMAPI, + "smapi::smapi_set_DSP_cfg: IR port base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI & 0xff], ausUartBases[uartio_index]); +#endif +#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES + PRINTK_1(TRACE_SMAPI, + "smapi::smapi_set_DSP_cfg Disabling conflicting IR port\n"); + bRC = smapi_request(0x1701, 0x0100, 0, 0, + &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); + if (bRC) goto exit_smapi_request_error; + bRC = smapi_request(0x1700, 0, 0, 0, + &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); + if (bRC) goto exit_smapi_request_error; + bRC = smapi_request(0x1705, 0x01ff, 0, usSI, + &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); + if (bRC) goto exit_smapi_request_error; + bRC = smapi_request(0x1704, 0x0000, 0, 0, + &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); + if (bRC) goto exit_smapi_request_error; +#else + goto exit_conflict; +#endif + } + } + } + } + + bRC = smapi_request(0x1802, 0x0000, 0, 0, + &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); + if (bRC) goto exit_smapi_request_error; + + if (mwave_3780i_io) { + usDI = dspio_index; + } + if (mwave_3780i_irq) { + usSI = (usSI & 0xff00) | mwave_3780i_irq; + } + + bRC = smapi_request(0x1803, 0x0101, usDI, usSI, + &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); + if (bRC) goto exit_smapi_request_error; + + bRC = smapi_request(0x1804, 0x0000, 0, 0, + &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); + if (bRC) goto exit_smapi_request_error; + + if (mwave_uart_io) { + usSI = (usSI & 0x00ff) | (uartio_index << 8); + } + if (mwave_uart_irq) { + usSI = (usSI & 0xff00) | mwave_uart_irq; + } + bRC = smapi_request(0x1805, 0x0101, 0, usSI, + &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); + if (bRC) goto exit_smapi_request_error; + + bRC = smapi_request(0x1802, 0x0000, 0, 0, + &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); + if (bRC) goto exit_smapi_request_error; + + bRC = smapi_request(0x1804, 0x0000, 0, 0, + &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); + if (bRC) goto exit_smapi_request_error; + +/* normal exit: */ + PRINTK_1(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg exit\n"); + return 0; + +exit_conflict: + /* Message has already been printed */ + return -EIO; + +exit_smapi_request_error: + PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg exit on smapi_request error bRC %x\n", bRC); + return bRC; +} + + +int smapi_set_DSP_power_state(BOOLEAN bOn) +{ + int bRC = -EIO; + unsigned short usAX, usBX, usCX, usDX, usDI, usSI; + unsigned short usPowerFunction; + + PRINTK_2(TRACE_SMAPI, "smapi::smapi_set_DSP_power_state entry bOn %x\n", bOn); + + usPowerFunction = (bOn) ? 1 : 0; + + bRC = smapi_request(0x4901, 0x0000, 0, usPowerFunction, + &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); + + PRINTK_2(TRACE_SMAPI, "smapi::smapi_set_DSP_power_state exit bRC %x\n", bRC); + + return bRC; +} + +#if 0 +static int SmapiQuerySystemID(void) +{ + int bRC = -EIO; + unsigned short usAX = 0xffff, usBX = 0xffff, usCX = 0xffff, + usDX = 0xffff, usDI = 0xffff, usSI = 0xffff; + + printk("smapi::SmapiQUerySystemID entry\n"); + bRC = smapi_request(0x0000, 0, 0, 0, + &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); + + if (bRC == 0) { + printk("AX=%x, BX=%x, CX=%x, DX=%x, DI=%x, SI=%x\n", + usAX, usBX, usCX, usDX, usDI, usSI); + } else { + printk("smapi::SmapiQuerySystemID smapi_request error\n"); + } + + return bRC; +} +#endif /* 0 */ + +int smapi_init(void) +{ + int retval = -EIO; + unsigned short usSmapiID = 0; + unsigned long flags; + + PRINTK_1(TRACE_SMAPI, "smapi::smapi_init entry\n"); + + spin_lock_irqsave(&rtc_lock, flags); + usSmapiID = CMOS_READ(0x7C); + usSmapiID |= (CMOS_READ(0x7D) << 8); + spin_unlock_irqrestore(&rtc_lock, flags); + PRINTK_2(TRACE_SMAPI, "smapi::smapi_init usSmapiID %x\n", usSmapiID); + + if (usSmapiID == 0x5349) { + spin_lock_irqsave(&rtc_lock, flags); + g_usSmapiPort = CMOS_READ(0x7E); + g_usSmapiPort |= (CMOS_READ(0x7F) << 8); + spin_unlock_irqrestore(&rtc_lock, flags); + if (g_usSmapiPort == 0) { + PRINTK_ERROR("smapi::smapi_init, ERROR unable to read from SMAPI port\n"); + } else { + PRINTK_2(TRACE_SMAPI, + "smapi::smapi_init, exit TRUE g_usSmapiPort %x\n", + g_usSmapiPort); + retval = 0; + //SmapiQuerySystemID(); + } + } else { + PRINTK_ERROR("smapi::smapi_init, ERROR invalid usSmapiID\n"); + retval = -ENXIO; + } + + return retval; +} diff --git a/kernel/drivers/char/mwave/smapi.h b/kernel/drivers/char/mwave/smapi.h new file mode 100644 index 000000000..64b2ec142 --- /dev/null +++ b/kernel/drivers/char/mwave/smapi.h @@ -0,0 +1,80 @@ +/* +* +* smapi.h -- declarations for SMAPI interface routines +* +* +* Written By: Mike Sullivan IBM Corporation +* +* Copyright (C) 1999 IBM Corporation +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* NO WARRANTY +* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR +* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT +* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, +* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is +* solely responsible for determining the appropriateness of using and +* distributing the Program and assumes all risks associated with its +* exercise of rights under this Agreement, including but not limited to +* the risks and costs of program errors, damage to or loss of data, +* programs or equipment, and unavailability or interruption of operations. +* +* DISCLAIMER OF LIABILITY +* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +* +* +* 10/23/2000 - Alpha Release +* First release to the public +*/ + +#ifndef _LINUX_SMAPI_H +#define _LINUX_SMAPI_H + +#define TRUE 1 +#define FALSE 0 +#define BOOLEAN int + +typedef struct { + int bDSPPresent; + int bDSPEnabled; + int bModemEnabled; + int bMIDIEnabled; + int bSblstEnabled; + unsigned short usDspIRQ; + unsigned short usDspDMA; + unsigned short usDspBaseIO; + unsigned short usUartIRQ; + unsigned short usUartBaseIO; + unsigned short usMidiIRQ; + unsigned short usMidiBaseIO; + unsigned short usSndblstIRQ; + unsigned short usSndblstDMA; + unsigned short usSndblstBaseIO; +} SMAPI_DSP_SETTINGS; + +int smapi_init(void); +int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings); +int smapi_set_DSP_cfg(void); +int smapi_set_DSP_power_state(BOOLEAN bOn); + + +#endif diff --git a/kernel/drivers/char/mwave/tp3780i.c b/kernel/drivers/char/mwave/tp3780i.c new file mode 100644 index 000000000..04e6d6a27 --- /dev/null +++ b/kernel/drivers/char/mwave/tp3780i.c @@ -0,0 +1,580 @@ +/* +* +* tp3780i.c -- board driver for 3780i on ThinkPads +* +* +* Written By: Mike Sullivan IBM Corporation +* +* Copyright (C) 1999 IBM Corporation +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* NO WARRANTY +* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR +* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT +* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, +* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is +* solely responsible for determining the appropriateness of using and +* distributing the Program and assumes all risks associated with its +* exercise of rights under this Agreement, including but not limited to +* the risks and costs of program errors, damage to or loss of data, +* programs or equipment, and unavailability or interruption of operations. +* +* DISCLAIMER OF LIABILITY +* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +* +* +* 10/23/2000 - Alpha Release +* First release to the public +*/ + +#include +#include +#include +#include +#include +#include "smapi.h" +#include "mwavedd.h" +#include "tp3780i.h" +#include "3780i.h" +#include "mwavepub.h" + +static unsigned short s_ausThinkpadIrqToField[16] = + { 0xFFFF, 0xFFFF, 0xFFFF, 0x0001, 0x0002, 0x0003, 0xFFFF, 0x0004, + 0xFFFF, 0xFFFF, 0x0005, 0x0006, 0xFFFF, 0xFFFF, 0xFFFF, 0x0007 }; +static unsigned short s_ausThinkpadDmaToField[8] = + { 0x0001, 0x0002, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0003, 0x0004 }; +static unsigned short s_numIrqs = 16, s_numDmas = 8; + + +static void EnableSRAM(THINKPAD_BD_DATA * pBDData) +{ + DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; + unsigned short usDspBaseIO = pSettings->usDspBaseIO; + DSP_GPIO_OUTPUT_DATA_15_8 rGpioOutputData; + DSP_GPIO_DRIVER_ENABLE_15_8 rGpioDriverEnable; + DSP_GPIO_MODE_15_8 rGpioMode; + + PRINTK_1(TRACE_TP3780I, "tp3780i::EnableSRAM, entry\n"); + + MKWORD(rGpioMode) = ReadMsaCfg(DSP_GpioModeControl_15_8); + rGpioMode.GpioMode10 = 0; + WriteMsaCfg(DSP_GpioModeControl_15_8, MKWORD(rGpioMode)); + + MKWORD(rGpioDriverEnable) = 0; + rGpioDriverEnable.Enable10 = TRUE; + rGpioDriverEnable.Mask10 = TRUE; + WriteMsaCfg(DSP_GpioDriverEnable_15_8, MKWORD(rGpioDriverEnable)); + + MKWORD(rGpioOutputData) = 0; + rGpioOutputData.Latch10 = 0; + rGpioOutputData.Mask10 = TRUE; + WriteMsaCfg(DSP_GpioOutputData_15_8, MKWORD(rGpioOutputData)); + + PRINTK_1(TRACE_TP3780I, "tp3780i::EnableSRAM exit\n"); +} + + +static irqreturn_t UartInterrupt(int irq, void *dev_id) +{ + PRINTK_3(TRACE_TP3780I, + "tp3780i::UartInterrupt entry irq %x dev_id %p\n", irq, dev_id); + return IRQ_HANDLED; +} + +static irqreturn_t DspInterrupt(int irq, void *dev_id) +{ + pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd; + DSP_3780I_CONFIG_SETTINGS *pSettings = &pDrvData->rBDData.rDspSettings; + unsigned short usDspBaseIO = pSettings->usDspBaseIO; + unsigned short usIPCSource = 0, usIsolationMask, usPCNum; + + PRINTK_3(TRACE_TP3780I, + "tp3780i::DspInterrupt entry irq %x dev_id %p\n", irq, dev_id); + + if (dsp3780I_GetIPCSource(usDspBaseIO, &usIPCSource) == 0) { + PRINTK_2(TRACE_TP3780I, + "tp3780i::DspInterrupt, return from dsp3780i_GetIPCSource, usIPCSource %x\n", + usIPCSource); + usIsolationMask = 1; + for (usPCNum = 1; usPCNum <= 16; usPCNum++) { + if (usIPCSource & usIsolationMask) { + usIPCSource &= ~usIsolationMask; + PRINTK_3(TRACE_TP3780I, + "tp3780i::DspInterrupt usPCNum %x usIPCSource %x\n", + usPCNum, usIPCSource); + if (pDrvData->IPCs[usPCNum - 1].usIntCount == 0) { + pDrvData->IPCs[usPCNum - 1].usIntCount = 1; + } + PRINTK_2(TRACE_TP3780I, + "tp3780i::DspInterrupt usIntCount %x\n", + pDrvData->IPCs[usPCNum - 1].usIntCount); + if (pDrvData->IPCs[usPCNum - 1].bIsEnabled == TRUE) { + PRINTK_2(TRACE_TP3780I, + "tp3780i::DspInterrupt, waking up usPCNum %x\n", + usPCNum - 1); + wake_up_interruptible(&pDrvData->IPCs[usPCNum - 1].ipc_wait_queue); + } else { + PRINTK_2(TRACE_TP3780I, + "tp3780i::DspInterrupt, no one waiting for IPC %x\n", + usPCNum - 1); + } + } + if (usIPCSource == 0) + break; + /* try next IPC */ + usIsolationMask = usIsolationMask << 1; + } + } else { + PRINTK_1(TRACE_TP3780I, + "tp3780i::DspInterrupt, return false from dsp3780i_GetIPCSource\n"); + } + PRINTK_1(TRACE_TP3780I, "tp3780i::DspInterrupt exit\n"); + return IRQ_HANDLED; +} + + +int tp3780I_InitializeBoardData(THINKPAD_BD_DATA * pBDData) +{ + int retval = 0; + DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; + + + PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_InitializeBoardData entry pBDData %p\n", pBDData); + + pBDData->bDSPEnabled = FALSE; + pSettings->bInterruptClaimed = FALSE; + + retval = smapi_init(); + if (retval) { + PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_InitializeBoardData: Error: SMAPI is not available on this machine\n"); + } else { + if (mwave_3780i_irq || mwave_3780i_io || mwave_uart_irq || mwave_uart_io) { + retval = smapi_set_DSP_cfg(); + } + } + + PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_InitializeBoardData exit retval %x\n", retval); + + return retval; +} + +int tp3780I_Cleanup(THINKPAD_BD_DATA * pBDData) +{ + int retval = 0; + + PRINTK_2(TRACE_TP3780I, + "tp3780i::tp3780I_Cleanup entry and exit pBDData %p\n", pBDData); + + return retval; +} + +int tp3780I_CalcResources(THINKPAD_BD_DATA * pBDData) +{ + SMAPI_DSP_SETTINGS rSmapiInfo; + DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; + + PRINTK_2(TRACE_TP3780I, + "tp3780i::tp3780I_CalcResources entry pBDData %p\n", pBDData); + + if (smapi_query_DSP_cfg(&rSmapiInfo)) { + PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_CalcResources: Error: Could not query DSP config. Aborting.\n"); + return -EIO; + } + + /* Sanity check */ + if ( + ( rSmapiInfo.usDspIRQ == 0 ) + || ( rSmapiInfo.usDspBaseIO == 0 ) + || ( rSmapiInfo.usUartIRQ == 0 ) + || ( rSmapiInfo.usUartBaseIO == 0 ) + ) { + PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_CalcResources: Error: Illegal resource setting. Aborting.\n"); + return -EIO; + } + + pSettings->bDSPEnabled = (rSmapiInfo.bDSPEnabled && rSmapiInfo.bDSPPresent); + pSettings->bModemEnabled = rSmapiInfo.bModemEnabled; + pSettings->usDspIrq = rSmapiInfo.usDspIRQ; + pSettings->usDspDma = rSmapiInfo.usDspDMA; + pSettings->usDspBaseIO = rSmapiInfo.usDspBaseIO; + pSettings->usUartIrq = rSmapiInfo.usUartIRQ; + pSettings->usUartBaseIO = rSmapiInfo.usUartBaseIO; + + pSettings->uDStoreSize = TP_ABILITIES_DATA_SIZE; + pSettings->uIStoreSize = TP_ABILITIES_INST_SIZE; + pSettings->uIps = TP_ABILITIES_INTS_PER_SEC; + + if (pSettings->bDSPEnabled && pSettings->bModemEnabled && pSettings->usDspIrq == pSettings->usUartIrq) { + pBDData->bShareDspIrq = pBDData->bShareUartIrq = 1; + } else { + pBDData->bShareDspIrq = pBDData->bShareUartIrq = 0; + } + + PRINTK_1(TRACE_TP3780I, "tp3780i::tp3780I_CalcResources exit\n"); + + return 0; +} + + +int tp3780I_ClaimResources(THINKPAD_BD_DATA * pBDData) +{ + int retval = 0; + DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; + struct resource *pres; + + PRINTK_2(TRACE_TP3780I, + "tp3780i::tp3780I_ClaimResources entry pBDData %p\n", pBDData); + + pres = request_region(pSettings->usDspBaseIO, 16, "mwave_3780i"); + if ( pres == NULL ) retval = -EIO; + + if (retval) { + PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_ClaimResources: Error: Could not claim I/O region starting at %x\n", pSettings->usDspBaseIO); + retval = -EIO; + } + + PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ClaimResources exit retval %x\n", retval); + + return retval; +} + +int tp3780I_ReleaseResources(THINKPAD_BD_DATA * pBDData) +{ + int retval = 0; + DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; + + PRINTK_2(TRACE_TP3780I, + "tp3780i::tp3780I_ReleaseResources entry pBDData %p\n", pBDData); + + release_region(pSettings->usDspBaseIO & (~3), 16); + + if (pSettings->bInterruptClaimed) { + free_irq(pSettings->usDspIrq, NULL); + pSettings->bInterruptClaimed = FALSE; + } + + PRINTK_2(TRACE_TP3780I, + "tp3780i::tp3780I_ReleaseResources exit retval %x\n", retval); + + return retval; +} + + + +int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData) +{ + DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; + BOOLEAN bDSPPoweredUp = FALSE, bInterruptAllocated = FALSE; + + PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_EnableDSP entry pBDData %p\n", pBDData); + + if (pBDData->bDSPEnabled) { + PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: DSP already enabled!\n"); + goto exit_cleanup; + } + + if (!pSettings->bDSPEnabled) { + PRINTK_ERROR(KERN_ERR_MWAVE "tp3780::tp3780I_EnableDSP: Error: pSettings->bDSPEnabled not set\n"); + goto exit_cleanup; + } + + if ( + (pSettings->usDspIrq >= s_numIrqs) + || (pSettings->usDspDma >= s_numDmas) + || (s_ausThinkpadIrqToField[pSettings->usDspIrq] == 0xFFFF) + || (s_ausThinkpadDmaToField[pSettings->usDspDma] == 0xFFFF) + ) { + PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: invalid irq %x\n", pSettings->usDspIrq); + goto exit_cleanup; + } + + if ( + ((pSettings->usDspBaseIO & 0xF00F) != 0) + || (pSettings->usDspBaseIO & 0x0FF0) == 0 + ) { + PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: Invalid DSP base I/O address %x\n", pSettings->usDspBaseIO); + goto exit_cleanup; + } + + if (pSettings->bModemEnabled) { + if ( + pSettings->usUartIrq >= s_numIrqs + || s_ausThinkpadIrqToField[pSettings->usUartIrq] == 0xFFFF + ) { + PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: Invalid UART IRQ %x\n", pSettings->usUartIrq); + goto exit_cleanup; + } + switch (pSettings->usUartBaseIO) { + case 0x03F8: + case 0x02F8: + case 0x03E8: + case 0x02E8: + break; + + default: + PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Error: Invalid UART base I/O address %x\n", pSettings->usUartBaseIO); + goto exit_cleanup; + } + } + + pSettings->bDspIrqActiveLow = pSettings->bDspIrqPulse = TRUE; + pSettings->bUartIrqActiveLow = pSettings->bUartIrqPulse = TRUE; + + if (pBDData->bShareDspIrq) { + pSettings->bDspIrqActiveLow = FALSE; + } + if (pBDData->bShareUartIrq) { + pSettings->bUartIrqActiveLow = FALSE; + } + + pSettings->usNumTransfers = TP_CFG_NumTransfers; + pSettings->usReRequest = TP_CFG_RerequestTimer; + pSettings->bEnableMEMCS16 = TP_CFG_MEMCS16; + pSettings->usIsaMemCmdWidth = TP_CFG_IsaMemCmdWidth; + pSettings->bGateIOCHRDY = TP_CFG_GateIOCHRDY; + pSettings->bEnablePwrMgmt = TP_CFG_EnablePwrMgmt; + pSettings->usHBusTimerLoadValue = TP_CFG_HBusTimerValue; + pSettings->bDisableLBusTimeout = TP_CFG_DisableLBusTimeout; + pSettings->usN_Divisor = TP_CFG_N_Divisor; + pSettings->usM_Multiplier = TP_CFG_M_Multiplier; + pSettings->bPllBypass = TP_CFG_PllBypass; + pSettings->usChipletEnable = TP_CFG_ChipletEnable; + + if (request_irq(pSettings->usUartIrq, &UartInterrupt, 0, "mwave_uart", NULL)) { + PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: Could not get UART IRQ %x\n", pSettings->usUartIrq); + goto exit_cleanup; + } else { /* no conflict just release */ + free_irq(pSettings->usUartIrq, NULL); + } + + if (request_irq(pSettings->usDspIrq, &DspInterrupt, 0, "mwave_3780i", NULL)) { + PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Error: Could not get 3780i IRQ %x\n", pSettings->usDspIrq); + goto exit_cleanup; + } else { + PRINTK_3(TRACE_TP3780I, + "tp3780i::tp3780I_EnableDSP, got interrupt %x bShareDspIrq %x\n", + pSettings->usDspIrq, pBDData->bShareDspIrq); + bInterruptAllocated = TRUE; + pSettings->bInterruptClaimed = TRUE; + } + + smapi_set_DSP_power_state(FALSE); + if (smapi_set_DSP_power_state(TRUE)) { + PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: smapi_set_DSP_power_state(TRUE) failed\n"); + goto exit_cleanup; + } else { + bDSPPoweredUp = TRUE; + } + + if (dsp3780I_EnableDSP(pSettings, s_ausThinkpadIrqToField, s_ausThinkpadDmaToField)) { + PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Error: dsp7880I_EnableDSP() failed\n"); + goto exit_cleanup; + } + + EnableSRAM(pBDData); + + pBDData->bDSPEnabled = TRUE; + + PRINTK_1(TRACE_TP3780I, "tp3780i::tp3780I_EnableDSP exit\n"); + + return 0; + +exit_cleanup: + PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Cleaning up\n"); + if (bDSPPoweredUp) + smapi_set_DSP_power_state(FALSE); + if (bInterruptAllocated) { + free_irq(pSettings->usDspIrq, NULL); + pSettings->bInterruptClaimed = FALSE; + } + return -EIO; +} + + +int tp3780I_DisableDSP(THINKPAD_BD_DATA * pBDData) +{ + int retval = 0; + DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; + + PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_DisableDSP entry pBDData %p\n", pBDData); + + if (pBDData->bDSPEnabled) { + dsp3780I_DisableDSP(&pBDData->rDspSettings); + if (pSettings->bInterruptClaimed) { + free_irq(pSettings->usDspIrq, NULL); + pSettings->bInterruptClaimed = FALSE; + } + smapi_set_DSP_power_state(FALSE); + pBDData->bDSPEnabled = FALSE; + } + + PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_DisableDSP exit retval %x\n", retval); + + return retval; +} + + +int tp3780I_ResetDSP(THINKPAD_BD_DATA * pBDData) +{ + int retval = 0; + DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; + + PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ResetDSP entry pBDData %p\n", + pBDData); + + if (dsp3780I_Reset(pSettings) == 0) { + EnableSRAM(pBDData); + } else { + retval = -EIO; + } + + PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ResetDSP exit retval %x\n", retval); + + return retval; +} + + +int tp3780I_StartDSP(THINKPAD_BD_DATA * pBDData) +{ + int retval = 0; + DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; + + PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_StartDSP entry pBDData %p\n", pBDData); + + if (dsp3780I_Run(pSettings) == 0) { + // @BUG @TBD EnableSRAM(pBDData); + } else { + retval = -EIO; + } + + PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_StartDSP exit retval %x\n", retval); + + return retval; +} + + +int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities) +{ + int retval = 0; + + PRINTK_2(TRACE_TP3780I, + "tp3780i::tp3780I_QueryAbilities entry pBDData %p\n", pBDData); + + memset(pAbilities, 0, sizeof(*pAbilities)); + /* fill out standard constant fields */ + pAbilities->instr_per_sec = pBDData->rDspSettings.uIps; + pAbilities->data_size = pBDData->rDspSettings.uDStoreSize; + pAbilities->inst_size = pBDData->rDspSettings.uIStoreSize; + pAbilities->bus_dma_bw = pBDData->rDspSettings.uDmaBandwidth; + + /* fill out dynamically determined fields */ + pAbilities->component_list[0] = 0x00010000 | MW_ADC_MASK; + pAbilities->component_list[1] = 0x00010000 | MW_ACI_MASK; + pAbilities->component_list[2] = 0x00010000 | MW_AIC1_MASK; + pAbilities->component_list[3] = 0x00010000 | MW_AIC2_MASK; + pAbilities->component_list[4] = 0x00010000 | MW_CDDAC_MASK; + pAbilities->component_list[5] = 0x00010000 | MW_MIDI_MASK; + pAbilities->component_list[6] = 0x00010000 | MW_UART_MASK; + pAbilities->component_count = 7; + + /* Fill out Mwave OS and BIOS task names */ + + memcpy(pAbilities->mwave_os_name, TP_ABILITIES_MWAVEOS_NAME, + sizeof(TP_ABILITIES_MWAVEOS_NAME)); + memcpy(pAbilities->bios_task_name, TP_ABILITIES_BIOSTASK_NAME, + sizeof(TP_ABILITIES_BIOSTASK_NAME)); + + PRINTK_1(TRACE_TP3780I, + "tp3780i::tp3780I_QueryAbilities exit retval=SUCCESSFUL\n"); + + return retval; +} + +int tp3780I_ReadWriteDspDStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode, + void __user *pvBuffer, unsigned int uCount, + unsigned long ulDSPAddr) +{ + int retval = 0; + DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; + unsigned short usDspBaseIO = pSettings->usDspBaseIO; + BOOLEAN bRC = 0; + + PRINTK_6(TRACE_TP3780I, + "tp3780i::tp3780I_ReadWriteDspDStore entry pBDData %p, uOpcode %x, pvBuffer %p, uCount %x, ulDSPAddr %lx\n", + pBDData, uOpcode, pvBuffer, uCount, ulDSPAddr); + + if (pBDData->bDSPEnabled) { + switch (uOpcode) { + case IOCTL_MW_READ_DATA: + bRC = dsp3780I_ReadDStore(usDspBaseIO, pvBuffer, uCount, ulDSPAddr); + break; + + case IOCTL_MW_READCLEAR_DATA: + bRC = dsp3780I_ReadAndClearDStore(usDspBaseIO, pvBuffer, uCount, ulDSPAddr); + break; + + case IOCTL_MW_WRITE_DATA: + bRC = dsp3780I_WriteDStore(usDspBaseIO, pvBuffer, uCount, ulDSPAddr); + break; + } + } + + retval = (bRC) ? -EIO : 0; + PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ReadWriteDspDStore exit retval %x\n", retval); + + return retval; +} + + +int tp3780I_ReadWriteDspIStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode, + void __user *pvBuffer, unsigned int uCount, + unsigned long ulDSPAddr) +{ + int retval = 0; + DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; + unsigned short usDspBaseIO = pSettings->usDspBaseIO; + BOOLEAN bRC = 0; + + PRINTK_6(TRACE_TP3780I, + "tp3780i::tp3780I_ReadWriteDspIStore entry pBDData %p, uOpcode %x, pvBuffer %p, uCount %x, ulDSPAddr %lx\n", + pBDData, uOpcode, pvBuffer, uCount, ulDSPAddr); + + if (pBDData->bDSPEnabled) { + switch (uOpcode) { + case IOCTL_MW_READ_INST: + bRC = dsp3780I_ReadIStore(usDspBaseIO, pvBuffer, uCount, ulDSPAddr); + break; + + case IOCTL_MW_WRITE_INST: + bRC = dsp3780I_WriteIStore(usDspBaseIO, pvBuffer, uCount, ulDSPAddr); + break; + } + } + + retval = (bRC) ? -EIO : 0; + + PRINTK_2(TRACE_TP3780I, + "tp3780i::tp3780I_ReadWriteDspIStore exit retval %x\n", retval); + + return retval; +} + diff --git a/kernel/drivers/char/mwave/tp3780i.h b/kernel/drivers/char/mwave/tp3780i.h new file mode 100644 index 000000000..07685b685 --- /dev/null +++ b/kernel/drivers/char/mwave/tp3780i.h @@ -0,0 +1,103 @@ +/* +* +* tp3780i.h -- declarations for tp3780i.c +* +* +* Written By: Mike Sullivan IBM Corporation +* +* Copyright (C) 1999 IBM Corporation +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* NO WARRANTY +* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR +* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT +* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, +* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is +* solely responsible for determining the appropriateness of using and +* distributing the Program and assumes all risks associated with its +* exercise of rights under this Agreement, including but not limited to +* the risks and costs of program errors, damage to or loss of data, +* programs or equipment, and unavailability or interruption of operations. +* +* DISCLAIMER OF LIABILITY +* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +* +* +* 10/23/2000 - Alpha Release +* First release to the public +*/ + +#ifndef _LINUX_TP3780I_H +#define _LINUX_TP3780I_H + +#include +#include "mwavepub.h" + + +/* DSP abilities constants for 3780i based Thinkpads */ +#define TP_ABILITIES_INTS_PER_SEC 39160800 +#define TP_ABILITIES_DATA_SIZE 32768 +#define TP_ABILITIES_INST_SIZE 32768 +#define TP_ABILITIES_MWAVEOS_NAME "mwaveos0700.dsp" +#define TP_ABILITIES_BIOSTASK_NAME "mwbio701.dsp" + + +/* DSP configuration values for 3780i based Thinkpads */ +#define TP_CFG_NumTransfers 3 /* 16 transfers */ +#define TP_CFG_RerequestTimer 1 /* 2 usec */ +#define TP_CFG_MEMCS16 0 /* Disabled, 16-bit memory assumed */ +#define TP_CFG_IsaMemCmdWidth 3 /* 295 nsec (16-bit) */ +#define TP_CFG_GateIOCHRDY 0 /* No IOCHRDY gating */ +#define TP_CFG_EnablePwrMgmt 1 /* Enable low poser suspend/resume */ +#define TP_CFG_HBusTimerValue 255 /* HBus timer load value */ +#define TP_CFG_DisableLBusTimeout 0 /* Enable LBus timeout */ +#define TP_CFG_N_Divisor 32 /* Clock = 39.1608 Mhz */ +#define TP_CFG_M_Multiplier 37 /* " */ +#define TP_CFG_PllBypass 0 /* don't bypass */ +#define TP_CFG_ChipletEnable 0xFFFF /* Enable all chiplets */ + +typedef struct { + int bDSPEnabled; + int bShareDspIrq; + int bShareUartIrq; + DSP_3780I_CONFIG_SETTINGS rDspSettings; +} THINKPAD_BD_DATA; + +int tp3780I_InitializeBoardData(THINKPAD_BD_DATA * pBDData); +int tp3780I_CalcResources(THINKPAD_BD_DATA * pBDData); +int tp3780I_ClaimResources(THINKPAD_BD_DATA * pBDData); +int tp3780I_ReleaseResources(THINKPAD_BD_DATA * pBDData); +int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData); +int tp3780I_DisableDSP(THINKPAD_BD_DATA * pBDData); +int tp3780I_ResetDSP(THINKPAD_BD_DATA * pBDData); +int tp3780I_StartDSP(THINKPAD_BD_DATA * pBDData); +int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities); +int tp3780I_Cleanup(THINKPAD_BD_DATA * pBDData); +int tp3780I_ReadWriteDspDStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode, + void __user *pvBuffer, unsigned int uCount, + unsigned long ulDSPAddr); +int tp3780I_ReadWriteDspIStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode, + void __user *pvBuffer, unsigned int uCount, + unsigned long ulDSPAddr); + + +#endif diff --git a/kernel/drivers/char/nsc_gpio.c b/kernel/drivers/char/nsc_gpio.c new file mode 100644 index 000000000..b07b119ae --- /dev/null +++ b/kernel/drivers/char/nsc_gpio.c @@ -0,0 +1,139 @@ +/* linux/drivers/char/nsc_gpio.c + + National Semiconductor common GPIO device-file/VFS methods. + Allows a user space process to control the GPIO pins. + + Copyright (c) 2001,2002 Christer Weinigel + Copyright (c) 2005 Jim Cromie +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define NAME "nsc_gpio" + +void nsc_gpio_dump(struct nsc_gpio_ops *amp, unsigned index) +{ + /* retrieve current config w/o changing it */ + u32 config = amp->gpio_config(index, ~0, 0); + + /* user requested via 'v' command, so its INFO */ + dev_info(amp->dev, "io%02u: 0x%04x %s %s %s %s %s %s %s\tio:%d/%d\n", + index, config, + (config & 1) ? "OE" : "TS", /* output-enabled/tristate */ + (config & 2) ? "PP" : "OD", /* push pull / open drain */ + (config & 4) ? "PUE" : "PUD", /* pull up enabled/disabled */ + (config & 8) ? "LOCKED" : "", /* locked / unlocked */ + (config & 16) ? "LEVEL" : "EDGE",/* level/edge input */ + (config & 32) ? "HI" : "LO", /* trigger on rise/fall edge */ + (config & 64) ? "DEBOUNCE" : "", /* debounce */ + + amp->gpio_get(index), amp->gpio_current(index)); +} + +ssize_t nsc_gpio_write(struct file *file, const char __user *data, + size_t len, loff_t *ppos) +{ + unsigned m = iminor(file_inode(file)); + struct nsc_gpio_ops *amp = file->private_data; + struct device *dev = amp->dev; + size_t i; + int err = 0; + + for (i = 0; i < len; ++i) { + char c; + if (get_user(c, data + i)) + return -EFAULT; + switch (c) { + case '0': + amp->gpio_set(m, 0); + break; + case '1': + amp->gpio_set(m, 1); + break; + case 'O': + dev_dbg(dev, "GPIO%d output enabled\n", m); + amp->gpio_config(m, ~1, 1); + break; + case 'o': + dev_dbg(dev, "GPIO%d output disabled\n", m); + amp->gpio_config(m, ~1, 0); + break; + case 'T': + dev_dbg(dev, "GPIO%d output is push pull\n", m); + amp->gpio_config(m, ~2, 2); + break; + case 't': + dev_dbg(dev, "GPIO%d output is open drain\n", m); + amp->gpio_config(m, ~2, 0); + break; + case 'P': + dev_dbg(dev, "GPIO%d pull up enabled\n", m); + amp->gpio_config(m, ~4, 4); + break; + case 'p': + dev_dbg(dev, "GPIO%d pull up disabled\n", m); + amp->gpio_config(m, ~4, 0); + break; + case 'v': + /* View Current pin settings */ + amp->gpio_dump(amp, m); + break; + case '\n': + /* end of settings string, do nothing */ + break; + default: + dev_err(dev, "io%2d bad setting: chr<0x%2x>\n", + m, (int)c); + err++; + } + } + if (err) + return -EINVAL; /* full string handled, report error */ + + return len; +} + +ssize_t nsc_gpio_read(struct file *file, char __user * buf, + size_t len, loff_t * ppos) +{ + unsigned m = iminor(file_inode(file)); + int value; + struct nsc_gpio_ops *amp = file->private_data; + + value = amp->gpio_get(m); + if (put_user(value ? '1' : '0', buf)) + return -EFAULT; + + return 1; +} + +/* common file-ops routines for both scx200_gpio and pc87360_gpio */ +EXPORT_SYMBOL(nsc_gpio_write); +EXPORT_SYMBOL(nsc_gpio_read); +EXPORT_SYMBOL(nsc_gpio_dump); + +static int __init nsc_gpio_init(void) +{ + printk(KERN_DEBUG NAME " initializing\n"); + return 0; +} + +static void __exit nsc_gpio_cleanup(void) +{ + printk(KERN_DEBUG NAME " cleanup\n"); +} + +module_init(nsc_gpio_init); +module_exit(nsc_gpio_cleanup); + +MODULE_AUTHOR("Jim Cromie "); +MODULE_DESCRIPTION("NatSemi GPIO Common Methods"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/nvram.c b/kernel/drivers/char/nvram.c new file mode 100644 index 000000000..9df78e2cc --- /dev/null +++ b/kernel/drivers/char/nvram.c @@ -0,0 +1,724 @@ +/* + * CMOS/NV-RAM driver for Linux + * + * Copyright (C) 1997 Roman Hodek + * idea by and with help from Richard Jelinek + * Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com) + * + * This driver allows you to access the contents of the non-volatile memory in + * the mc146818rtc.h real-time clock. This chip is built into all PCs and into + * many Atari machines. In the former it's called "CMOS-RAM", in the latter + * "NVRAM" (NV stands for non-volatile). + * + * The data are supplied as a (seekable) character device, /dev/nvram. The + * size of this file is dependent on the controller. The usual size is 114, + * the number of freely available bytes in the memory (i.e., not used by the + * RTC itself). + * + * Checksums over the NVRAM contents are managed by this driver. In case of a + * bad checksum, reads and writes return -EIO. The checksum can be initialized + * to a sane state either by ioctl(NVRAM_INIT) (clear whole NVRAM) or + * ioctl(NVRAM_SETCKS) (doesn't change contents, just makes checksum valid + * again; use with care!) + * + * This file also provides some functions for other parts of the kernel that + * want to access the NVRAM: nvram_{read,write,check_checksum,set_checksum}. + * Obviously this can be used only if this driver is always configured into + * the kernel and is not a module. Since the functions are used by some Atari + * drivers, this is the case on the Atari. + * + * + * 1.1 Cesar Barros: SMP locking fixes + * added changelog + * 1.2 Erik Gilling: Cobalt Networks support + * Tim Hockin: general cleanup, Cobalt support + * 1.3 Wim Van Sebroeck: convert PRINT_PROC to seq_file + */ + +#define NVRAM_VERSION "1.3" + +#include +#include + +#define PC 1 +#define ATARI 2 + +/* select machine configuration */ +#if defined(CONFIG_ATARI) +# define MACH ATARI +#elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) /* and ?? */ +# define MACH PC +#else +# error Cannot build nvram driver for this machine configuration. +#endif + +#if MACH == PC + +/* RTC in a PC */ +#define CHECK_DRIVER_INIT() 1 + +/* On PCs, the checksum is built only over bytes 2..31 */ +#define PC_CKS_RANGE_START 2 +#define PC_CKS_RANGE_END 31 +#define PC_CKS_LOC 32 +#define NVRAM_BYTES (128-NVRAM_FIRST_BYTE) + +#define mach_check_checksum pc_check_checksum +#define mach_set_checksum pc_set_checksum +#define mach_proc_infos pc_proc_infos + +#endif + +#if MACH == ATARI + +/* Special parameters for RTC in Atari machines */ +#include +#include +#define RTC_PORT(x) (TT_RTC_BAS + 2*(x)) +#define CHECK_DRIVER_INIT() (MACH_IS_ATARI && ATARIHW_PRESENT(TT_CLK)) + +#define NVRAM_BYTES 50 + +/* On Ataris, the checksum is over all bytes except the checksum bytes + * themselves; these are at the very end */ +#define ATARI_CKS_RANGE_START 0 +#define ATARI_CKS_RANGE_END 47 +#define ATARI_CKS_LOC 48 + +#define mach_check_checksum atari_check_checksum +#define mach_set_checksum atari_set_checksum +#define mach_proc_infos atari_proc_infos + +#endif + +/* Note that *all* calls to CMOS_READ and CMOS_WRITE must be done with + * rtc_lock held. Due to the index-port/data-port design of the RTC, we + * don't want two different things trying to get to it at once. (e.g. the + * periodic 11 min sync from kernel/time/ntp.c vs. this driver.) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +static DEFINE_MUTEX(nvram_mutex); +static DEFINE_SPINLOCK(nvram_state_lock); +static int nvram_open_cnt; /* #times opened */ +static int nvram_open_mode; /* special open modes */ +#define NVRAM_WRITE 1 /* opened for writing (exclusive) */ +#define NVRAM_EXCL 2 /* opened with O_EXCL */ + +static int mach_check_checksum(void); +static void mach_set_checksum(void); + +#ifdef CONFIG_PROC_FS +static void mach_proc_infos(unsigned char *contents, struct seq_file *seq, + void *offset); +#endif + +/* + * These functions are provided to be called internally or by other parts of + * the kernel. It's up to the caller to ensure correct checksum before reading + * or after writing (needs to be done only once). + * + * It is worth noting that these functions all access bytes of general + * purpose memory in the NVRAM - that is to say, they all add the + * NVRAM_FIRST_BYTE offset. Pass them offsets into NVRAM as if you did not + * know about the RTC cruft. + */ + +unsigned char __nvram_read_byte(int i) +{ + return CMOS_READ(NVRAM_FIRST_BYTE + i); +} +EXPORT_SYMBOL(__nvram_read_byte); + +unsigned char nvram_read_byte(int i) +{ + unsigned long flags; + unsigned char c; + + spin_lock_irqsave(&rtc_lock, flags); + c = __nvram_read_byte(i); + spin_unlock_irqrestore(&rtc_lock, flags); + return c; +} +EXPORT_SYMBOL(nvram_read_byte); + +/* This races nicely with trying to read with checksum checking (nvram_read) */ +void __nvram_write_byte(unsigned char c, int i) +{ + CMOS_WRITE(c, NVRAM_FIRST_BYTE + i); +} +EXPORT_SYMBOL(__nvram_write_byte); + +void nvram_write_byte(unsigned char c, int i) +{ + unsigned long flags; + + spin_lock_irqsave(&rtc_lock, flags); + __nvram_write_byte(c, i); + spin_unlock_irqrestore(&rtc_lock, flags); +} +EXPORT_SYMBOL(nvram_write_byte); + +int __nvram_check_checksum(void) +{ + return mach_check_checksum(); +} +EXPORT_SYMBOL(__nvram_check_checksum); + +int nvram_check_checksum(void) +{ + unsigned long flags; + int rv; + + spin_lock_irqsave(&rtc_lock, flags); + rv = __nvram_check_checksum(); + spin_unlock_irqrestore(&rtc_lock, flags); + return rv; +} +EXPORT_SYMBOL(nvram_check_checksum); + +static void __nvram_set_checksum(void) +{ + mach_set_checksum(); +} + +#if 0 +void nvram_set_checksum(void) +{ + unsigned long flags; + + spin_lock_irqsave(&rtc_lock, flags); + __nvram_set_checksum(); + spin_unlock_irqrestore(&rtc_lock, flags); +} +#endif /* 0 */ + +/* + * The are the file operation function for user access to /dev/nvram + */ + +static loff_t nvram_llseek(struct file *file, loff_t offset, int origin) +{ + switch (origin) { + case 0: + /* nothing to do */ + break; + case 1: + offset += file->f_pos; + break; + case 2: + offset += NVRAM_BYTES; + break; + default: + return -EINVAL; + } + + return (offset >= 0) ? (file->f_pos = offset) : -EINVAL; +} + +static ssize_t nvram_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned char contents[NVRAM_BYTES]; + unsigned i = *ppos; + unsigned char *tmp; + + spin_lock_irq(&rtc_lock); + + if (!__nvram_check_checksum()) + goto checksum_err; + + for (tmp = contents; count-- > 0 && i < NVRAM_BYTES; ++i, ++tmp) + *tmp = __nvram_read_byte(i); + + spin_unlock_irq(&rtc_lock); + + if (copy_to_user(buf, contents, tmp - contents)) + return -EFAULT; + + *ppos = i; + + return tmp - contents; + +checksum_err: + spin_unlock_irq(&rtc_lock); + return -EIO; +} + +static ssize_t nvram_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned char contents[NVRAM_BYTES]; + unsigned i = *ppos; + unsigned char *tmp; + + if (i >= NVRAM_BYTES) + return 0; /* Past EOF */ + + if (count > NVRAM_BYTES - i) + count = NVRAM_BYTES - i; + if (count > NVRAM_BYTES) + return -EFAULT; /* Can't happen, but prove it to gcc */ + + if (copy_from_user(contents, buf, count)) + return -EFAULT; + + spin_lock_irq(&rtc_lock); + + if (!__nvram_check_checksum()) + goto checksum_err; + + for (tmp = contents; count--; ++i, ++tmp) + __nvram_write_byte(*tmp, i); + + __nvram_set_checksum(); + + spin_unlock_irq(&rtc_lock); + + *ppos = i; + + return tmp - contents; + +checksum_err: + spin_unlock_irq(&rtc_lock); + return -EIO; +} + +static long nvram_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int i; + + switch (cmd) { + + case NVRAM_INIT: + /* initialize NVRAM contents and checksum */ + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + mutex_lock(&nvram_mutex); + spin_lock_irq(&rtc_lock); + + for (i = 0; i < NVRAM_BYTES; ++i) + __nvram_write_byte(0, i); + __nvram_set_checksum(); + + spin_unlock_irq(&rtc_lock); + mutex_unlock(&nvram_mutex); + return 0; + + case NVRAM_SETCKS: + /* just set checksum, contents unchanged (maybe useful after + * checksum garbaged somehow...) */ + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + mutex_lock(&nvram_mutex); + spin_lock_irq(&rtc_lock); + __nvram_set_checksum(); + spin_unlock_irq(&rtc_lock); + mutex_unlock(&nvram_mutex); + return 0; + + default: + return -ENOTTY; + } +} + +static int nvram_open(struct inode *inode, struct file *file) +{ + spin_lock(&nvram_state_lock); + + if ((nvram_open_cnt && (file->f_flags & O_EXCL)) || + (nvram_open_mode & NVRAM_EXCL) || + ((file->f_mode & FMODE_WRITE) && (nvram_open_mode & NVRAM_WRITE))) { + spin_unlock(&nvram_state_lock); + return -EBUSY; + } + + if (file->f_flags & O_EXCL) + nvram_open_mode |= NVRAM_EXCL; + if (file->f_mode & FMODE_WRITE) + nvram_open_mode |= NVRAM_WRITE; + nvram_open_cnt++; + + spin_unlock(&nvram_state_lock); + + return 0; +} + +static int nvram_release(struct inode *inode, struct file *file) +{ + spin_lock(&nvram_state_lock); + + nvram_open_cnt--; + + /* if only one instance is open, clear the EXCL bit */ + if (nvram_open_mode & NVRAM_EXCL) + nvram_open_mode &= ~NVRAM_EXCL; + if (file->f_mode & FMODE_WRITE) + nvram_open_mode &= ~NVRAM_WRITE; + + spin_unlock(&nvram_state_lock); + + return 0; +} + +#ifndef CONFIG_PROC_FS +static int nvram_add_proc_fs(void) +{ + return 0; +} + +#else + +static int nvram_proc_read(struct seq_file *seq, void *offset) +{ + unsigned char contents[NVRAM_BYTES]; + int i = 0; + + spin_lock_irq(&rtc_lock); + for (i = 0; i < NVRAM_BYTES; ++i) + contents[i] = __nvram_read_byte(i); + spin_unlock_irq(&rtc_lock); + + mach_proc_infos(contents, seq, offset); + + return 0; +} + +static int nvram_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, nvram_proc_read, NULL); +} + +static const struct file_operations nvram_proc_fops = { + .owner = THIS_MODULE, + .open = nvram_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int nvram_add_proc_fs(void) +{ + if (!proc_create("driver/nvram", 0, NULL, &nvram_proc_fops)) + return -ENOMEM; + return 0; +} + +#endif /* CONFIG_PROC_FS */ + +static const struct file_operations nvram_fops = { + .owner = THIS_MODULE, + .llseek = nvram_llseek, + .read = nvram_read, + .write = nvram_write, + .unlocked_ioctl = nvram_ioctl, + .open = nvram_open, + .release = nvram_release, +}; + +static struct miscdevice nvram_dev = { + NVRAM_MINOR, + "nvram", + &nvram_fops +}; + +static int __init nvram_init(void) +{ + int ret; + + /* First test whether the driver should init at all */ + if (!CHECK_DRIVER_INIT()) + return -ENODEV; + + ret = misc_register(&nvram_dev); + if (ret) { + printk(KERN_ERR "nvram: can't misc_register on minor=%d\n", + NVRAM_MINOR); + goto out; + } + ret = nvram_add_proc_fs(); + if (ret) { + printk(KERN_ERR "nvram: can't create /proc/driver/nvram\n"); + goto outmisc; + } + ret = 0; + printk(KERN_INFO "Non-volatile memory driver v" NVRAM_VERSION "\n"); +out: + return ret; +outmisc: + misc_deregister(&nvram_dev); + goto out; +} + +static void __exit nvram_cleanup_module(void) +{ + remove_proc_entry("driver/nvram", NULL); + misc_deregister(&nvram_dev); +} + +module_init(nvram_init); +module_exit(nvram_cleanup_module); + +/* + * Machine specific functions + */ + +#if MACH == PC + +static int pc_check_checksum(void) +{ + int i; + unsigned short sum = 0; + unsigned short expect; + + for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i) + sum += __nvram_read_byte(i); + expect = __nvram_read_byte(PC_CKS_LOC)<<8 | + __nvram_read_byte(PC_CKS_LOC+1); + return (sum & 0xffff) == expect; +} + +static void pc_set_checksum(void) +{ + int i; + unsigned short sum = 0; + + for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i) + sum += __nvram_read_byte(i); + __nvram_write_byte(sum >> 8, PC_CKS_LOC); + __nvram_write_byte(sum & 0xff, PC_CKS_LOC + 1); +} + +#ifdef CONFIG_PROC_FS + +static char *floppy_types[] = { + "none", "5.25'' 360k", "5.25'' 1.2M", "3.5'' 720k", "3.5'' 1.44M", + "3.5'' 2.88M", "3.5'' 2.88M" +}; + +static char *gfx_types[] = { + "EGA, VGA, ... (with BIOS)", + "CGA (40 cols)", + "CGA (80 cols)", + "monochrome", +}; + +static void pc_proc_infos(unsigned char *nvram, struct seq_file *seq, + void *offset) +{ + int checksum; + int type; + + spin_lock_irq(&rtc_lock); + checksum = __nvram_check_checksum(); + spin_unlock_irq(&rtc_lock); + + seq_printf(seq, "Checksum status: %svalid\n", checksum ? "" : "not "); + + seq_printf(seq, "# floppies : %d\n", + (nvram[6] & 1) ? (nvram[6] >> 6) + 1 : 0); + seq_printf(seq, "Floppy 0 type : "); + type = nvram[2] >> 4; + if (type < ARRAY_SIZE(floppy_types)) + seq_printf(seq, "%s\n", floppy_types[type]); + else + seq_printf(seq, "%d (unknown)\n", type); + seq_printf(seq, "Floppy 1 type : "); + type = nvram[2] & 0x0f; + if (type < ARRAY_SIZE(floppy_types)) + seq_printf(seq, "%s\n", floppy_types[type]); + else + seq_printf(seq, "%d (unknown)\n", type); + + seq_printf(seq, "HD 0 type : "); + type = nvram[4] >> 4; + if (type) + seq_printf(seq, "%02x\n", type == 0x0f ? nvram[11] : type); + else + seq_printf(seq, "none\n"); + + seq_printf(seq, "HD 1 type : "); + type = nvram[4] & 0x0f; + if (type) + seq_printf(seq, "%02x\n", type == 0x0f ? nvram[12] : type); + else + seq_printf(seq, "none\n"); + + seq_printf(seq, "HD type 48 data: %d/%d/%d C/H/S, precomp %d, lz %d\n", + nvram[18] | (nvram[19] << 8), + nvram[20], nvram[25], + nvram[21] | (nvram[22] << 8), nvram[23] | (nvram[24] << 8)); + seq_printf(seq, "HD type 49 data: %d/%d/%d C/H/S, precomp %d, lz %d\n", + nvram[39] | (nvram[40] << 8), + nvram[41], nvram[46], + nvram[42] | (nvram[43] << 8), nvram[44] | (nvram[45] << 8)); + + seq_printf(seq, "DOS base memory: %d kB\n", nvram[7] | (nvram[8] << 8)); + seq_printf(seq, "Extended memory: %d kB (configured), %d kB (tested)\n", + nvram[9] | (nvram[10] << 8), nvram[34] | (nvram[35] << 8)); + + seq_printf(seq, "Gfx adapter : %s\n", + gfx_types[(nvram[6] >> 4) & 3]); + + seq_printf(seq, "FPU : %sinstalled\n", + (nvram[6] & 2) ? "" : "not "); + + return; +} +#endif + +#endif /* MACH == PC */ + +#if MACH == ATARI + +static int atari_check_checksum(void) +{ + int i; + unsigned char sum = 0; + + for (i = ATARI_CKS_RANGE_START; i <= ATARI_CKS_RANGE_END; ++i) + sum += __nvram_read_byte(i); + return (__nvram_read_byte(ATARI_CKS_LOC) == (~sum & 0xff)) && + (__nvram_read_byte(ATARI_CKS_LOC + 1) == (sum & 0xff)); +} + +static void atari_set_checksum(void) +{ + int i; + unsigned char sum = 0; + + for (i = ATARI_CKS_RANGE_START; i <= ATARI_CKS_RANGE_END; ++i) + sum += __nvram_read_byte(i); + __nvram_write_byte(~sum, ATARI_CKS_LOC); + __nvram_write_byte(sum, ATARI_CKS_LOC + 1); +} + +#ifdef CONFIG_PROC_FS + +static struct { + unsigned char val; + char *name; +} boot_prefs[] = { + { 0x80, "TOS" }, + { 0x40, "ASV" }, + { 0x20, "NetBSD (?)" }, + { 0x10, "Linux" }, + { 0x00, "unspecified" } +}; + +static char *languages[] = { + "English (US)", + "German", + "French", + "English (UK)", + "Spanish", + "Italian", + "6 (undefined)", + "Swiss (French)", + "Swiss (German)" +}; + +static char *dateformat[] = { + "MM%cDD%cYY", + "DD%cMM%cYY", + "YY%cMM%cDD", + "YY%cDD%cMM", + "4 (undefined)", + "5 (undefined)", + "6 (undefined)", + "7 (undefined)" +}; + +static char *colors[] = { + "2", "4", "16", "256", "65536", "??", "??", "??" +}; + +static void atari_proc_infos(unsigned char *nvram, struct seq_file *seq, + void *offset) +{ + int checksum = nvram_check_checksum(); + int i; + unsigned vmode; + + seq_printf(seq, "Checksum status : %svalid\n", checksum ? "" : "not "); + + seq_printf(seq, "Boot preference : "); + for (i = ARRAY_SIZE(boot_prefs) - 1; i >= 0; --i) { + if (nvram[1] == boot_prefs[i].val) { + seq_printf(seq, "%s\n", boot_prefs[i].name); + break; + } + } + if (i < 0) + seq_printf(seq, "0x%02x (undefined)\n", nvram[1]); + + seq_printf(seq, "SCSI arbitration : %s\n", + (nvram[16] & 0x80) ? "on" : "off"); + seq_printf(seq, "SCSI host ID : "); + if (nvram[16] & 0x80) + seq_printf(seq, "%d\n", nvram[16] & 7); + else + seq_printf(seq, "n/a\n"); + + /* the following entries are defined only for the Falcon */ + if ((atari_mch_cookie >> 16) != ATARI_MCH_FALCON) + return; + + seq_printf(seq, "OS language : "); + if (nvram[6] < ARRAY_SIZE(languages)) + seq_printf(seq, "%s\n", languages[nvram[6]]); + else + seq_printf(seq, "%u (undefined)\n", nvram[6]); + seq_printf(seq, "Keyboard language: "); + if (nvram[7] < ARRAY_SIZE(languages)) + seq_printf(seq, "%s\n", languages[nvram[7]]); + else + seq_printf(seq, "%u (undefined)\n", nvram[7]); + seq_printf(seq, "Date format : "); + seq_printf(seq, dateformat[nvram[8] & 7], + nvram[9] ? nvram[9] : '/', nvram[9] ? nvram[9] : '/'); + seq_printf(seq, ", %dh clock\n", nvram[8] & 16 ? 24 : 12); + seq_printf(seq, "Boot delay : "); + if (nvram[10] == 0) + seq_printf(seq, "default"); + else + seq_printf(seq, "%ds%s\n", nvram[10], + nvram[10] < 8 ? ", no memory test" : ""); + + vmode = (nvram[14] << 8) || nvram[15]; + seq_printf(seq, + "Video mode : %s colors, %d columns, %s %s monitor\n", + colors[vmode & 7], + vmode & 8 ? 80 : 40, + vmode & 16 ? "VGA" : "TV", vmode & 32 ? "PAL" : "NTSC"); + seq_printf(seq, " %soverscan, compat. mode %s%s\n", + vmode & 64 ? "" : "no ", + vmode & 128 ? "on" : "off", + vmode & 256 ? + (vmode & 16 ? ", line doubling" : ", half screen") : ""); + + return; +} +#endif + +#endif /* MACH == ATARI */ + +MODULE_LICENSE("GPL"); +MODULE_ALIAS_MISCDEV(NVRAM_MINOR); diff --git a/kernel/drivers/char/nwbutton.c b/kernel/drivers/char/nwbutton.c new file mode 100644 index 000000000..76c490fa0 --- /dev/null +++ b/kernel/drivers/char/nwbutton.c @@ -0,0 +1,247 @@ +/* + * NetWinder Button Driver- + * Copyright (C) Alex Holden 1998, 1999. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define __NWBUTTON_C /* Tell the header file who we are */ +#include "nwbutton.h" + +static void button_sequence_finished (unsigned long parameters); + +static int button_press_count; /* The count of button presses */ +/* Times for the end of a sequence */ +static DEFINE_TIMER(button_timer, button_sequence_finished, 0, 0); +static DECLARE_WAIT_QUEUE_HEAD(button_wait_queue); /* Used for blocking read */ +static char button_output_buffer[32]; /* Stores data to write out of device */ +static int bcount; /* The number of bytes in the buffer */ +static int bdelay = BUTTON_DELAY; /* The delay, in jiffies */ +static struct button_callback button_callback_list[32]; /* The callback list */ +static int callback_count; /* The number of callbacks registered */ +static int reboot_count = NUM_PRESSES_REBOOT; /* Number of presses to reboot */ + +/* + * This function is called by other drivers to register a callback function + * to be called when a particular number of button presses occurs. + * The callback list is a static array of 32 entries (I somehow doubt many + * people are ever going to want to register more than 32 different actions + * to be performed by the kernel on different numbers of button presses ;). + * However, if an attempt to register a 33rd entry (perhaps a stuck loop + * somewhere registering the same entry over and over?) it will fail to + * do so and return -ENOMEM. If an attempt is made to register a null pointer, + * it will fail to do so and return -EINVAL. + * Because callbacks can be unregistered at random the list can become + * fragmented, so we need to search through the list until we find the first + * free entry. + * + * FIXME: Has anyone spotted any locking functions int his code recently ?? + */ + +int button_add_callback (void (*callback) (void), int count) +{ + int lp = 0; + if (callback_count == 32) { + return -ENOMEM; + } + if (!callback) { + return -EINVAL; + } + callback_count++; + for (; (button_callback_list [lp].callback); lp++); + button_callback_list [lp].callback = callback; + button_callback_list [lp].count = count; + return 0; +} + +/* + * This function is called by other drivers to deregister a callback function. + * If you attempt to unregister a callback which does not exist, it will fail + * with -EINVAL. If there is more than one entry with the same address, + * because it searches the list from end to beginning, it will unregister the + * last one to be registered first (FILO- First In Last Out). + * Note that this is not necessarily true if the entries are not submitted + * at the same time, because another driver could have unregistered a callback + * between the submissions creating a gap earlier in the list, which would + * be filled first at submission time. + */ + +int button_del_callback (void (*callback) (void)) +{ + int lp = 31; + if (!callback) { + return -EINVAL; + } + while (lp >= 0) { + if ((button_callback_list [lp].callback) == callback) { + button_callback_list [lp].callback = NULL; + button_callback_list [lp].count = 0; + callback_count--; + return 0; + } + lp--; + } + return -EINVAL; +} + +/* + * This function is called by button_sequence_finished to search through the + * list of callback functions, and call any of them whose count argument + * matches the current count of button presses. It starts at the beginning + * of the list and works up to the end. It will refuse to follow a null + * pointer (which should never happen anyway). + */ + +static void button_consume_callbacks (int bpcount) +{ + int lp = 0; + for (; lp <= 31; lp++) { + if ((button_callback_list [lp].count) == bpcount) { + if (button_callback_list [lp].callback) { + button_callback_list[lp].callback(); + } + } + } +} + +/* + * This function is called when the button_timer times out. + * ie. When you don't press the button for bdelay jiffies, this is taken to + * mean you have ended the sequence of key presses, and this function is + * called to wind things up (write the press_count out to /dev/button, call + * any matching registered function callbacks, initiate reboot, etc.). + */ + +static void button_sequence_finished (unsigned long parameters) +{ +#ifdef CONFIG_NWBUTTON_REBOOT /* Reboot using button is enabled */ + if (button_press_count == reboot_count) + kill_cad_pid(SIGINT, 1); /* Ask init to reboot us */ +#endif /* CONFIG_NWBUTTON_REBOOT */ + button_consume_callbacks (button_press_count); + bcount = sprintf (button_output_buffer, "%d\n", button_press_count); + button_press_count = 0; /* Reset the button press counter */ + wake_up_interruptible (&button_wait_queue); +} + +/* + * This handler is called when the orange button is pressed (GPIO 10 of the + * SuperIO chip, which maps to logical IRQ 26). If the press_count is 0, + * this is the first press, so it starts a timer and increments the counter. + * If it is higher than 0, it deletes the old timer, starts a new one, and + * increments the counter. + */ + +static irqreturn_t button_handler (int irq, void *dev_id) +{ + button_press_count++; + mod_timer(&button_timer, jiffies + bdelay); + + return IRQ_HANDLED; +} + +/* + * This function is called when a user space program attempts to read + * /dev/nwbutton. It puts the device to sleep on the wait queue until + * button_sequence_finished writes some data to the buffer and flushes + * the queue, at which point it writes the data out to the device and + * returns the number of characters it has written. This function is + * reentrant, so that many processes can be attempting to read from the + * device at any one time. + */ + +static int button_read (struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + DEFINE_WAIT(wait); + prepare_to_wait(&button_wait_queue, &wait, TASK_INTERRUPTIBLE); + schedule(); + finish_wait(&button_wait_queue, &wait); + return (copy_to_user (buffer, &button_output_buffer, bcount)) + ? -EFAULT : bcount; +} + +/* + * This structure is the file operations structure, which specifies what + * callbacks functions the kernel should call when a user mode process + * attempts to perform these operations on the device. + */ + +static const struct file_operations button_fops = { + .owner = THIS_MODULE, + .read = button_read, + .llseek = noop_llseek, +}; + +/* + * This structure is the misc device structure, which specifies the minor + * device number (158 in this case), the name of the device (for /proc/misc), + * and the address of the above file operations structure. + */ + +static struct miscdevice button_misc_device = { + BUTTON_MINOR, + "nwbutton", + &button_fops, +}; + +/* + * This function is called to initialise the driver, either from misc.c at + * bootup if the driver is compiled into the kernel, or from init_module + * below at module insert time. It attempts to register the device node + * and the IRQ and fails with a warning message if either fails, though + * neither ever should because the device number and IRQ are unique to + * this driver. + */ + +static int __init nwbutton_init(void) +{ + if (!machine_is_netwinder()) + return -ENODEV; + + printk (KERN_INFO "NetWinder Button Driver Version %s (C) Alex Holden " + " 1998.\n", VERSION); + + if (misc_register (&button_misc_device)) { + printk (KERN_WARNING "nwbutton: Couldn't register device 10, " + "%d.\n", BUTTON_MINOR); + return -EBUSY; + } + + if (request_irq (IRQ_NETWINDER_BUTTON, button_handler, 0, + "nwbutton", NULL)) { + printk (KERN_WARNING "nwbutton: IRQ %d is not free.\n", + IRQ_NETWINDER_BUTTON); + misc_deregister (&button_misc_device); + return -EIO; + } + return 0; +} + +static void __exit nwbutton_exit (void) +{ + free_irq (IRQ_NETWINDER_BUTTON, NULL); + misc_deregister (&button_misc_device); +} + + +MODULE_AUTHOR("Alex Holden"); +MODULE_LICENSE("GPL"); + +module_init(nwbutton_init); +module_exit(nwbutton_exit); diff --git a/kernel/drivers/char/nwbutton.h b/kernel/drivers/char/nwbutton.h new file mode 100644 index 000000000..c3ebc16ce --- /dev/null +++ b/kernel/drivers/char/nwbutton.h @@ -0,0 +1,40 @@ +#ifndef __NWBUTTON_H +#define __NWBUTTON_H + +/* + * NetWinder Button Driver- + * Copyright (C) Alex Holden 1998, 1999. + */ + +#ifdef __NWBUTTON_C /* Actually compiling the driver itself */ + +/* Various defines: */ + +#define NUM_PRESSES_REBOOT 2 /* How many presses to activate shutdown */ +#define BUTTON_DELAY 30 /* How many jiffies for sequence to end */ +#define VERSION "0.3" /* Driver version number */ +#define BUTTON_MINOR 158 /* Major 10, Minor 158, /dev/nwbutton */ + +/* Structure definitions: */ + +struct button_callback { + void (*callback) (void); + int count; +}; + +/* Function prototypes: */ + +static void button_sequence_finished (unsigned long parameters); +static irqreturn_t button_handler (int irq, void *dev_id); +int button_init (void); +int button_add_callback (void (*callback) (void), int count); +int button_del_callback (void (*callback) (void)); +static void button_consume_callbacks (int bpcount); + +#else /* Not compiling the driver itself */ + +extern int button_add_callback (void (*callback) (void), int count); +extern int button_del_callback (void (*callback) (void)); + +#endif /* __NWBUTTON_C */ +#endif /* __NWBUTTON_H */ diff --git a/kernel/drivers/char/nwflash.c b/kernel/drivers/char/nwflash.c new file mode 100644 index 000000000..e371480d3 --- /dev/null +++ b/kernel/drivers/char/nwflash.c @@ -0,0 +1,654 @@ +/* + * Flash memory interface rev.5 driver for the Intel + * Flash chips used on the NetWinder. + * + * 20/08/2000 RMK use __ioremap to map flash into virtual memory + * make a few more places use "volatile" + * 22/05/2001 RMK - Lock read against write + * - merge printk level changes (with mods) from Alan Cox. + * - use *ppos as the file position, not file->f_pos. + * - fix check for out of range pos and r/w size + * + * Please note that we are tampering with the only flash chip in the + * machine, which contains the bootup code. We therefore have the + * power to convert these machines into doorstops... + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/*****************************************************************************/ +#include + +#define NWFLASH_VERSION "6.4" + +static DEFINE_MUTEX(flash_mutex); +static void kick_open(void); +static int get_flash_id(void); +static int erase_block(int nBlock); +static int write_block(unsigned long p, const char __user *buf, int count); + +#define KFLASH_SIZE 1024*1024 //1 Meg +#define KFLASH_SIZE4 4*1024*1024 //4 Meg +#define KFLASH_ID 0x89A6 //Intel flash +#define KFLASH_ID4 0xB0D4 //Intel flash 4Meg + +static bool flashdebug; //if set - we will display progress msgs + +static int gbWriteEnable; +static int gbWriteBase64Enable; +static volatile unsigned char *FLASH_BASE; +static int gbFlashSize = KFLASH_SIZE; +static DEFINE_MUTEX(nwflash_mutex); + +static int get_flash_id(void) +{ + volatile unsigned int c1, c2; + + /* + * try to get flash chip ID + */ + kick_open(); + c2 = inb(0x80); + *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x90; + udelay(15); + c1 = *(volatile unsigned char *) FLASH_BASE; + c2 = inb(0x80); + + /* + * on 4 Meg flash the second byte is actually at offset 2... + */ + if (c1 == 0xB0) + c2 = *(volatile unsigned char *) (FLASH_BASE + 2); + else + c2 = *(volatile unsigned char *) (FLASH_BASE + 1); + + c2 += (c1 << 8); + + /* + * set it back to read mode + */ + *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0xFF; + + if (c2 == KFLASH_ID4) + gbFlashSize = KFLASH_SIZE4; + + return c2; +} + +static long flash_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) +{ + mutex_lock(&flash_mutex); + switch (cmd) { + case CMD_WRITE_DISABLE: + gbWriteBase64Enable = 0; + gbWriteEnable = 0; + break; + + case CMD_WRITE_ENABLE: + gbWriteEnable = 1; + break; + + case CMD_WRITE_BASE64K_ENABLE: + gbWriteBase64Enable = 1; + break; + + default: + gbWriteBase64Enable = 0; + gbWriteEnable = 0; + mutex_unlock(&flash_mutex); + return -EINVAL; + } + mutex_unlock(&flash_mutex); + return 0; +} + +static ssize_t flash_read(struct file *file, char __user *buf, size_t size, + loff_t *ppos) +{ + ssize_t ret; + + if (flashdebug) + printk(KERN_DEBUG "flash_read: flash_read: offset=0x%llx, " + "buffer=%p, count=0x%zx.\n", *ppos, buf, size); + /* + * We now lock against reads and writes. --rmk + */ + if (mutex_lock_interruptible(&nwflash_mutex)) + return -ERESTARTSYS; + + ret = simple_read_from_buffer(buf, size, ppos, (void *)FLASH_BASE, gbFlashSize); + mutex_unlock(&nwflash_mutex); + + return ret; +} + +static ssize_t flash_write(struct file *file, const char __user *buf, + size_t size, loff_t * ppos) +{ + unsigned long p = *ppos; + unsigned int count = size; + int written; + int nBlock, temp, rc; + int i, j; + + if (flashdebug) + printk("flash_write: offset=0x%lX, buffer=0x%p, count=0x%X.\n", + p, buf, count); + + if (!gbWriteEnable) + return -EINVAL; + + if (p < 64 * 1024 && (!gbWriteBase64Enable)) + return -EINVAL; + + /* + * check for out of range pos or count + */ + if (p >= gbFlashSize) + return count ? -ENXIO : 0; + + if (count > gbFlashSize - p) + count = gbFlashSize - p; + + if (!access_ok(VERIFY_READ, buf, count)) + return -EFAULT; + + /* + * We now lock against reads and writes. --rmk + */ + if (mutex_lock_interruptible(&nwflash_mutex)) + return -ERESTARTSYS; + + written = 0; + + nBlock = (int) p >> 16; //block # of 64K bytes + + /* + * # of 64K blocks to erase and write + */ + temp = ((int) (p + count) >> 16) - nBlock + 1; + + /* + * write ends at exactly 64k boundary? + */ + if (((int) (p + count) & 0xFFFF) == 0) + temp -= 1; + + if (flashdebug) + printk(KERN_DEBUG "flash_write: writing %d block(s) " + "starting at %d.\n", temp, nBlock); + + for (; temp; temp--, nBlock++) { + if (flashdebug) + printk(KERN_DEBUG "flash_write: erasing block %d.\n", nBlock); + + /* + * first we have to erase the block(s), where we will write... + */ + i = 0; + j = 0; + RetryBlock: + do { + rc = erase_block(nBlock); + i++; + } while (rc && i < 10); + + if (rc) { + printk(KERN_ERR "flash_write: erase error %x\n", rc); + break; + } + if (flashdebug) + printk(KERN_DEBUG "flash_write: writing offset %lX, " + "from buf %p, bytes left %X.\n", p, buf, + count - written); + + /* + * write_block will limit write to space left in this block + */ + rc = write_block(p, buf, count - written); + j++; + + /* + * if somehow write verify failed? Can't happen?? + */ + if (!rc) { + /* + * retry up to 10 times + */ + if (j < 10) + goto RetryBlock; + else + /* + * else quit with error... + */ + rc = -1; + + } + if (rc < 0) { + printk(KERN_ERR "flash_write: write error %X\n", rc); + break; + } + p += rc; + buf += rc; + written += rc; + *ppos += rc; + + if (flashdebug) + printk(KERN_DEBUG "flash_write: written 0x%X bytes OK.\n", written); + } + + mutex_unlock(&nwflash_mutex); + + return written; +} + + +/* + * The memory devices use the full 32/64 bits of the offset, and so we cannot + * check against negative addresses: they are ok. The return value is weird, + * though, in that case (0). + * + * also note that seeking relative to the "end of file" isn't supported: + * it has no meaning, so it returns -EINVAL. + */ +static loff_t flash_llseek(struct file *file, loff_t offset, int orig) +{ + loff_t ret; + + mutex_lock(&flash_mutex); + if (flashdebug) + printk(KERN_DEBUG "flash_llseek: offset=0x%X, orig=0x%X.\n", + (unsigned int) offset, orig); + + switch (orig) { + case 0: + if (offset < 0) { + ret = -EINVAL; + break; + } + + if ((unsigned int) offset > gbFlashSize) { + ret = -EINVAL; + break; + } + + file->f_pos = (unsigned int) offset; + ret = file->f_pos; + break; + case 1: + if ((file->f_pos + offset) > gbFlashSize) { + ret = -EINVAL; + break; + } + if ((file->f_pos + offset) < 0) { + ret = -EINVAL; + break; + } + file->f_pos += offset; + ret = file->f_pos; + break; + default: + ret = -EINVAL; + } + mutex_unlock(&flash_mutex); + return ret; +} + + +/* + * assume that main Write routine did the parameter checking... + * so just go ahead and erase, what requested! + */ + +static int erase_block(int nBlock) +{ + volatile unsigned int c1; + volatile unsigned char *pWritePtr; + unsigned long timeout; + int temp, temp1; + + /* + * reset footbridge to the correct offset 0 (...0..3) + */ + *CSR_ROMWRITEREG = 0; + + /* + * dummy ROM read + */ + c1 = *(volatile unsigned char *) (FLASH_BASE + 0x8000); + + kick_open(); + /* + * reset status if old errors + */ + *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50; + + /* + * erase a block... + * aim at the middle of a current block... + */ + pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + 0x8000 + (nBlock << 16))); + /* + * dummy read + */ + c1 = *pWritePtr; + + kick_open(); + /* + * erase + */ + *(volatile unsigned char *) pWritePtr = 0x20; + + /* + * confirm + */ + *(volatile unsigned char *) pWritePtr = 0xD0; + + /* + * wait 10 ms + */ + msleep(10); + + /* + * wait while erasing in process (up to 10 sec) + */ + timeout = jiffies + 10 * HZ; + c1 = 0; + while (!(c1 & 0x80) && time_before(jiffies, timeout)) { + msleep(10); + /* + * read any address + */ + c1 = *(volatile unsigned char *) (pWritePtr); + // printk("Flash_erase: status=%X.\n",c1); + } + + /* + * set flash for normal read access + */ + kick_open(); +// *(volatile unsigned char*)(FLASH_BASE+0x8000) = 0xFF; + *(volatile unsigned char *) pWritePtr = 0xFF; //back to normal operation + + /* + * check if erase errors were reported + */ + if (c1 & 0x20) { + printk(KERN_ERR "flash_erase: err at %p\n", pWritePtr); + + /* + * reset error + */ + *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50; + return -2; + } + + /* + * just to make sure - verify if erased OK... + */ + msleep(10); + + pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + (nBlock << 16))); + + for (temp = 0; temp < 16 * 1024; temp++, pWritePtr += 4) { + if ((temp1 = *(volatile unsigned int *) pWritePtr) != 0xFFFFFFFF) { + printk(KERN_ERR "flash_erase: verify err at %p = %X\n", + pWritePtr, temp1); + return -1; + } + } + + return 0; + +} + +/* + * write_block will limit number of bytes written to the space in this block + */ +static int write_block(unsigned long p, const char __user *buf, int count) +{ + volatile unsigned int c1; + volatile unsigned int c2; + unsigned char *pWritePtr; + unsigned int uAddress; + unsigned int offset; + unsigned long timeout; + unsigned long timeout1; + + pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + p)); + + /* + * check if write will end in this block.... + */ + offset = p & 0xFFFF; + + if (offset + count > 0x10000) + count = 0x10000 - offset; + + /* + * wait up to 30 sec for this block + */ + timeout = jiffies + 30 * HZ; + + for (offset = 0; offset < count; offset++, pWritePtr++) { + uAddress = (unsigned int) pWritePtr; + uAddress &= 0xFFFFFFFC; + if (__get_user(c2, buf + offset)) + return -EFAULT; + + WriteRetry: + /* + * dummy read + */ + c1 = *(volatile unsigned char *) (FLASH_BASE + 0x8000); + + /* + * kick open the write gate + */ + kick_open(); + + /* + * program footbridge to the correct offset...0..3 + */ + *CSR_ROMWRITEREG = (unsigned int) pWritePtr & 3; + + /* + * write cmd + */ + *(volatile unsigned char *) (uAddress) = 0x40; + + /* + * data to write + */ + *(volatile unsigned char *) (uAddress) = c2; + + /* + * get status + */ + *(volatile unsigned char *) (FLASH_BASE + 0x10000) = 0x70; + + c1 = 0; + + /* + * wait up to 1 sec for this byte + */ + timeout1 = jiffies + 1 * HZ; + + /* + * while not ready... + */ + while (!(c1 & 0x80) && time_before(jiffies, timeout1)) + c1 = *(volatile unsigned char *) (FLASH_BASE + 0x8000); + + /* + * if timeout getting status + */ + if (time_after_eq(jiffies, timeout1)) { + kick_open(); + /* + * reset err + */ + *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50; + + goto WriteRetry; + } + /* + * switch on read access, as a default flash operation mode + */ + kick_open(); + /* + * read access + */ + *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0xFF; + + /* + * if hardware reports an error writing, and not timeout - + * reset the chip and retry + */ + if (c1 & 0x10) { + kick_open(); + /* + * reset err + */ + *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50; + + /* + * before timeout? + */ + if (time_before(jiffies, timeout)) { + if (flashdebug) + printk(KERN_DEBUG "write_block: Retrying write at 0x%X)n", + pWritePtr - FLASH_BASE); + + /* + * wait couple ms + */ + msleep(10); + + goto WriteRetry; + } else { + printk(KERN_ERR "write_block: timeout at 0x%X\n", + pWritePtr - FLASH_BASE); + /* + * return error -2 + */ + return -2; + + } + } + } + + msleep(10); + + pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + p)); + + for (offset = 0; offset < count; offset++) { + char c, c1; + if (__get_user(c, buf)) + return -EFAULT; + buf++; + if ((c1 = *pWritePtr++) != c) { + printk(KERN_ERR "write_block: verify error at 0x%X (%02X!=%02X)\n", + pWritePtr - FLASH_BASE, c1, c); + return 0; + } + } + + return count; +} + + +static void kick_open(void) +{ + unsigned long flags; + + /* + * we want to write a bit pattern XXX1 to Xilinx to enable + * the write gate, which will be open for about the next 2ms. + */ + raw_spin_lock_irqsave(&nw_gpio_lock, flags); + nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE); + raw_spin_unlock_irqrestore(&nw_gpio_lock, flags); + + /* + * let the ISA bus to catch on... + */ + udelay(25); +} + +static const struct file_operations flash_fops = +{ + .owner = THIS_MODULE, + .llseek = flash_llseek, + .read = flash_read, + .write = flash_write, + .unlocked_ioctl = flash_ioctl, +}; + +static struct miscdevice flash_miscdev = +{ + FLASH_MINOR, + "nwflash", + &flash_fops +}; + +static int __init nwflash_init(void) +{ + int ret = -ENODEV; + + if (machine_is_netwinder()) { + int id; + + FLASH_BASE = ioremap(DC21285_FLASH, KFLASH_SIZE4); + if (!FLASH_BASE) + goto out; + + id = get_flash_id(); + if ((id != KFLASH_ID) && (id != KFLASH_ID4)) { + ret = -ENXIO; + iounmap((void *)FLASH_BASE); + printk("Flash: incorrect ID 0x%04X.\n", id); + goto out; + } + + printk("Flash ROM driver v.%s, flash device ID 0x%04X, size %d Mb.\n", + NWFLASH_VERSION, id, gbFlashSize / (1024 * 1024)); + + ret = misc_register(&flash_miscdev); + if (ret < 0) { + iounmap((void *)FLASH_BASE); + } + } +out: + return ret; +} + +static void __exit nwflash_exit(void) +{ + misc_deregister(&flash_miscdev); + iounmap((void *)FLASH_BASE); +} + +MODULE_LICENSE("GPL"); + +module_param(flashdebug, bool, 0644); + +module_init(nwflash_init); +module_exit(nwflash_exit); diff --git a/kernel/drivers/char/pc8736x_gpio.c b/kernel/drivers/char/pc8736x_gpio.c new file mode 100644 index 000000000..3f79a9fb6 --- /dev/null +++ b/kernel/drivers/char/pc8736x_gpio.c @@ -0,0 +1,352 @@ +/* linux/drivers/char/pc8736x_gpio.c + + National Semiconductor PC8736x GPIO driver. Allows a user space + process to play with the GPIO pins. + + Copyright (c) 2005,2006 Jim Cromie + + adapted from linux/drivers/char/scx200_gpio.c + Copyright (c) 2001,2002 Christer Weinigel , +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DEVNAME "pc8736x_gpio" + +MODULE_AUTHOR("Jim Cromie "); +MODULE_DESCRIPTION("NatSemi/Winbond PC-8736x GPIO Pin Driver"); +MODULE_LICENSE("GPL"); + +static int major; /* default to dynamic major */ +module_param(major, int, 0); +MODULE_PARM_DESC(major, "Major device number"); + +static DEFINE_MUTEX(pc8736x_gpio_config_lock); +static unsigned pc8736x_gpio_base; +static u8 pc8736x_gpio_shadow[4]; + +#define SIO_BASE1 0x2E /* 1st command-reg to check */ +#define SIO_BASE2 0x4E /* alt command-reg to check */ + +#define SIO_SID 0x20 /* SuperI/O ID Register */ +#define SIO_SID_PC87365 0xe5 /* Expected value in ID Register for PC87365 */ +#define SIO_SID_PC87366 0xe9 /* Expected value in ID Register for PC87366 */ + +#define SIO_CF1 0x21 /* chip config, bit0 is chip enable */ + +#define PC8736X_GPIO_RANGE 16 /* ioaddr range */ +#define PC8736X_GPIO_CT 32 /* minors matching 4 8 bit ports */ + +#define SIO_UNIT_SEL 0x7 /* unit select reg */ +#define SIO_UNIT_ACT 0x30 /* unit enable */ +#define SIO_GPIO_UNIT 0x7 /* unit number of GPIO */ +#define SIO_VLM_UNIT 0x0D +#define SIO_TMS_UNIT 0x0E + +/* config-space addrs to read/write each unit's runtime addr */ +#define SIO_BASE_HADDR 0x60 +#define SIO_BASE_LADDR 0x61 + +/* GPIO config-space pin-control addresses */ +#define SIO_GPIO_PIN_SELECT 0xF0 +#define SIO_GPIO_PIN_CONFIG 0xF1 +#define SIO_GPIO_PIN_EVENT 0xF2 + +static unsigned char superio_cmd = 0; +static unsigned char selected_device = 0xFF; /* bogus start val */ + +/* GPIO port runtime access, functionality */ +static int port_offset[] = { 0, 4, 8, 10 }; /* non-uniform offsets ! */ +/* static int event_capable[] = { 1, 1, 0, 0 }; ports 2,3 are hobbled */ + +#define PORT_OUT 0 +#define PORT_IN 1 +#define PORT_EVT_EN 2 +#define PORT_EVT_STST 3 + +static struct platform_device *pdev; /* use in dev_*() */ + +static inline void superio_outb(int addr, int val) +{ + outb_p(addr, superio_cmd); + outb_p(val, superio_cmd + 1); +} + +static inline int superio_inb(int addr) +{ + outb_p(addr, superio_cmd); + return inb_p(superio_cmd + 1); +} + +static int pc8736x_superio_present(void) +{ + int id; + + /* try the 2 possible values, read a hardware reg to verify */ + superio_cmd = SIO_BASE1; + id = superio_inb(SIO_SID); + if (id == SIO_SID_PC87365 || id == SIO_SID_PC87366) + return superio_cmd; + + superio_cmd = SIO_BASE2; + id = superio_inb(SIO_SID); + if (id == SIO_SID_PC87365 || id == SIO_SID_PC87366) + return superio_cmd; + + return 0; +} + +static void device_select(unsigned devldn) +{ + superio_outb(SIO_UNIT_SEL, devldn); + selected_device = devldn; +} + +static void select_pin(unsigned iminor) +{ + /* select GPIO port/pin from device minor number */ + device_select(SIO_GPIO_UNIT); + superio_outb(SIO_GPIO_PIN_SELECT, + ((iminor << 1) & 0xF0) | (iminor & 0x7)); +} + +static inline u32 pc8736x_gpio_configure_fn(unsigned index, u32 mask, u32 bits, + u32 func_slct) +{ + u32 config, new_config; + + mutex_lock(&pc8736x_gpio_config_lock); + + device_select(SIO_GPIO_UNIT); + select_pin(index); + + /* read current config value */ + config = superio_inb(func_slct); + + /* set new config */ + new_config = (config & mask) | bits; + superio_outb(func_slct, new_config); + + mutex_unlock(&pc8736x_gpio_config_lock); + + return config; +} + +static u32 pc8736x_gpio_configure(unsigned index, u32 mask, u32 bits) +{ + return pc8736x_gpio_configure_fn(index, mask, bits, + SIO_GPIO_PIN_CONFIG); +} + +static int pc8736x_gpio_get(unsigned minor) +{ + int port, bit, val; + + port = minor >> 3; + bit = minor & 7; + val = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_IN); + val >>= bit; + val &= 1; + + dev_dbg(&pdev->dev, "_gpio_get(%d from %x bit %d) == val %d\n", + minor, pc8736x_gpio_base + port_offset[port] + PORT_IN, bit, + val); + + return val; +} + +static void pc8736x_gpio_set(unsigned minor, int val) +{ + int port, bit, curval; + + minor &= 0x1f; + port = minor >> 3; + bit = minor & 7; + curval = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_OUT); + + dev_dbg(&pdev->dev, "addr:%x cur:%x bit-pos:%d cur-bit:%x + new:%d -> bit-new:%d\n", + pc8736x_gpio_base + port_offset[port] + PORT_OUT, + curval, bit, (curval & ~(1 << bit)), val, (val << bit)); + + val = (curval & ~(1 << bit)) | (val << bit); + + dev_dbg(&pdev->dev, "gpio_set(minor:%d port:%d bit:%d)" + " %2x -> %2x\n", minor, port, bit, curval, val); + + outb_p(val, pc8736x_gpio_base + port_offset[port] + PORT_OUT); + + curval = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_OUT); + val = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_IN); + + dev_dbg(&pdev->dev, "wrote %x, read: %x\n", curval, val); + pc8736x_gpio_shadow[port] = val; +} + +static int pc8736x_gpio_current(unsigned minor) +{ + int port, bit; + minor &= 0x1f; + port = minor >> 3; + bit = minor & 7; + return ((pc8736x_gpio_shadow[port] >> bit) & 0x01); +} + +static void pc8736x_gpio_change(unsigned index) +{ + pc8736x_gpio_set(index, !pc8736x_gpio_current(index)); +} + +static struct nsc_gpio_ops pc8736x_gpio_ops = { + .owner = THIS_MODULE, + .gpio_config = pc8736x_gpio_configure, + .gpio_dump = nsc_gpio_dump, + .gpio_get = pc8736x_gpio_get, + .gpio_set = pc8736x_gpio_set, + .gpio_change = pc8736x_gpio_change, + .gpio_current = pc8736x_gpio_current +}; + +static int pc8736x_gpio_open(struct inode *inode, struct file *file) +{ + unsigned m = iminor(inode); + file->private_data = &pc8736x_gpio_ops; + + dev_dbg(&pdev->dev, "open %d\n", m); + + if (m >= PC8736X_GPIO_CT) + return -EINVAL; + return nonseekable_open(inode, file); +} + +static const struct file_operations pc8736x_gpio_fileops = { + .owner = THIS_MODULE, + .open = pc8736x_gpio_open, + .write = nsc_gpio_write, + .read = nsc_gpio_read, + .llseek = no_llseek, +}; + +static void __init pc8736x_init_shadow(void) +{ + int port; + + /* read the current values driven on the GPIO signals */ + for (port = 0; port < 4; ++port) + pc8736x_gpio_shadow[port] + = inb_p(pc8736x_gpio_base + port_offset[port] + + PORT_OUT); + +} + +static struct cdev pc8736x_gpio_cdev; + +static int __init pc8736x_gpio_init(void) +{ + int rc; + dev_t devid; + + pdev = platform_device_alloc(DEVNAME, 0); + if (!pdev) + return -ENOMEM; + + rc = platform_device_add(pdev); + if (rc) { + rc = -ENODEV; + goto undo_platform_dev_alloc; + } + dev_info(&pdev->dev, "NatSemi pc8736x GPIO Driver Initializing\n"); + + if (!pc8736x_superio_present()) { + rc = -ENODEV; + dev_err(&pdev->dev, "no device found\n"); + goto undo_platform_dev_add; + } + pc8736x_gpio_ops.dev = &pdev->dev; + + /* Verify that chip and it's GPIO unit are both enabled. + My BIOS does this, so I take minimum action here + */ + rc = superio_inb(SIO_CF1); + if (!(rc & 0x01)) { + rc = -ENODEV; + dev_err(&pdev->dev, "device not enabled\n"); + goto undo_platform_dev_add; + } + device_select(SIO_GPIO_UNIT); + if (!superio_inb(SIO_UNIT_ACT)) { + rc = -ENODEV; + dev_err(&pdev->dev, "GPIO unit not enabled\n"); + goto undo_platform_dev_add; + } + + /* read the GPIO unit base addr that chip responds to */ + pc8736x_gpio_base = (superio_inb(SIO_BASE_HADDR) << 8 + | superio_inb(SIO_BASE_LADDR)); + + if (!request_region(pc8736x_gpio_base, PC8736X_GPIO_RANGE, DEVNAME)) { + rc = -ENODEV; + dev_err(&pdev->dev, "GPIO ioport %x busy\n", + pc8736x_gpio_base); + goto undo_platform_dev_add; + } + dev_info(&pdev->dev, "GPIO ioport %x reserved\n", pc8736x_gpio_base); + + if (major) { + devid = MKDEV(major, 0); + rc = register_chrdev_region(devid, PC8736X_GPIO_CT, DEVNAME); + } else { + rc = alloc_chrdev_region(&devid, 0, PC8736X_GPIO_CT, DEVNAME); + major = MAJOR(devid); + } + + if (rc < 0) { + dev_err(&pdev->dev, "register-chrdev failed: %d\n", rc); + goto undo_request_region; + } + if (!major) { + major = rc; + dev_dbg(&pdev->dev, "got dynamic major %d\n", major); + } + + pc8736x_init_shadow(); + + /* ignore minor errs, and succeed */ + cdev_init(&pc8736x_gpio_cdev, &pc8736x_gpio_fileops); + cdev_add(&pc8736x_gpio_cdev, devid, PC8736X_GPIO_CT); + + return 0; + +undo_request_region: + release_region(pc8736x_gpio_base, PC8736X_GPIO_RANGE); +undo_platform_dev_add: + platform_device_del(pdev); +undo_platform_dev_alloc: + platform_device_put(pdev); + + return rc; +} + +static void __exit pc8736x_gpio_cleanup(void) +{ + dev_dbg(&pdev->dev, "cleanup\n"); + + cdev_del(&pc8736x_gpio_cdev); + unregister_chrdev_region(MKDEV(major,0), PC8736X_GPIO_CT); + release_region(pc8736x_gpio_base, PC8736X_GPIO_RANGE); + + platform_device_unregister(pdev); +} + +module_init(pc8736x_gpio_init); +module_exit(pc8736x_gpio_cleanup); diff --git a/kernel/drivers/char/pcmcia/Kconfig b/kernel/drivers/char/pcmcia/Kconfig new file mode 100644 index 000000000..8d3dfb0c8 --- /dev/null +++ b/kernel/drivers/char/pcmcia/Kconfig @@ -0,0 +1,56 @@ +# +# PCMCIA character device configuration +# + +menu "PCMCIA character devices" + depends on PCMCIA!=n + +config SYNCLINK_CS + tristate "SyncLink PC Card support" + depends on PCMCIA && TTY + help + Enable support for the SyncLink PC Card serial adapter, running + asynchronous and HDLC communications up to 512Kbps. The port is + selectable for RS-232, V.35, RS-449, RS-530, and X.21 + + This driver may be built as a module ( = code which can be + inserted in and removed from the running kernel whenever you want). + The module will be called synclink_cs. If you want to do that, say M + here. + +config CARDMAN_4000 + tristate "Omnikey Cardman 4000 support" + depends on PCMCIA + select BITREVERSE + help + Enable support for the Omnikey Cardman 4000 PCMCIA Smartcard + reader. + + This kernel driver requires additional userspace support, either + by the vendor-provided PC/SC ifd_handler (http://www.omnikey.com/), + or via the cm4000 backend of OpenCT (http://www.opensc-project.org/opensc). + +config CARDMAN_4040 + tristate "Omnikey CardMan 4040 support" + depends on PCMCIA + help + Enable support for the Omnikey CardMan 4040 PCMCIA Smartcard + reader. + + This card is basically a USB CCID device connected to a FIFO + in I/O space. To use the kernel driver, you will need either the + PC/SC ifdhandler provided from the Omnikey homepage + (http://www.omnikey.com/), or a current development version of OpenCT + (http://www.opensc-project.org/opensc). + +config IPWIRELESS + tristate "IPWireless 3G UMTS PCMCIA card support" + depends on PCMCIA && NETDEVICES && TTY + select PPP + help + This is a driver for 3G UMTS PCMCIA card from IPWireless company. In + some countries (for example Czech Republic, T-Mobile ISP) this card + is shipped for service called UMTS 4G. + +endmenu + diff --git a/kernel/drivers/char/pcmcia/Makefile b/kernel/drivers/char/pcmcia/Makefile new file mode 100644 index 000000000..0aae20985 --- /dev/null +++ b/kernel/drivers/char/pcmcia/Makefile @@ -0,0 +1,9 @@ +# +# drivers/char/pcmcia/Makefile +# +# Makefile for the Linux PCMCIA char device drivers. +# + +obj-$(CONFIG_SYNCLINK_CS) += synclink_cs.o +obj-$(CONFIG_CARDMAN_4000) += cm4000_cs.o +obj-$(CONFIG_CARDMAN_4040) += cm4040_cs.o diff --git a/kernel/drivers/char/pcmcia/cm4000_cs.c b/kernel/drivers/char/pcmcia/cm4000_cs.c new file mode 100644 index 000000000..c115217c7 --- /dev/null +++ b/kernel/drivers/char/pcmcia/cm4000_cs.c @@ -0,0 +1,1924 @@ + /* + * A driver for the PCMCIA Smartcard Reader "Omnikey CardMan Mobile 4000" + * + * cm4000_cs.c support.linux@omnikey.com + * + * Tue Oct 23 11:32:43 GMT 2001 herp - cleaned up header files + * Sun Jan 20 10:11:15 MET 2002 herp - added modversion header files + * Thu Nov 14 16:34:11 GMT 2002 mh - added PPS functionality + * Tue Nov 19 16:36:27 GMT 2002 mh - added SUSPEND/RESUME functionailty + * Wed Jul 28 12:55:01 CEST 2004 mh - kernel 2.6 adjustments + * + * current version: 2.4.0gm4 + * + * (C) 2000,2001,2002,2003,2004 Omnikey AG + * + * (C) 2005-2006 Harald Welte + * - Adhere to Kernel CodingStyle + * - Port to 2.6.13 "new" style PCMCIA + * - Check for copy_{from,to}_user return values + * - Use nonseekable_open() + * - add class interface for udev device creation + * + * All rights reserved. Licensed under dual BSD/GPL license. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +/* #define ATR_CSUM */ + +#define reader_to_dev(x) (&x->p_dev->dev) + +/* n (debug level) is ignored */ +/* additional debug output may be enabled by re-compiling with + * CM4000_DEBUG set */ +/* #define CM4000_DEBUG */ +#define DEBUGP(n, rdr, x, args...) do { \ + dev_dbg(reader_to_dev(rdr), "%s:" x, \ + __func__ , ## args); \ + } while (0) + +static DEFINE_MUTEX(cmm_mutex); + +#define T_1SEC (HZ) +#define T_10MSEC msecs_to_jiffies(10) +#define T_20MSEC msecs_to_jiffies(20) +#define T_40MSEC msecs_to_jiffies(40) +#define T_50MSEC msecs_to_jiffies(50) +#define T_100MSEC msecs_to_jiffies(100) +#define T_500MSEC msecs_to_jiffies(500) + +static void cm4000_release(struct pcmcia_device *link); + +static int major; /* major number we get from the kernel */ + +/* note: the first state has to have number 0 always */ + +#define M_FETCH_ATR 0 +#define M_TIMEOUT_WAIT 1 +#define M_READ_ATR_LEN 2 +#define M_READ_ATR 3 +#define M_ATR_PRESENT 4 +#define M_BAD_CARD 5 +#define M_CARDOFF 6 + +#define LOCK_IO 0 +#define LOCK_MONITOR 1 + +#define IS_AUTOPPS_ACT 6 +#define IS_PROCBYTE_PRESENT 7 +#define IS_INVREV 8 +#define IS_ANY_T0 9 +#define IS_ANY_T1 10 +#define IS_ATR_PRESENT 11 +#define IS_ATR_VALID 12 +#define IS_CMM_ABSENT 13 +#define IS_BAD_LENGTH 14 +#define IS_BAD_CSUM 15 +#define IS_BAD_CARD 16 + +#define REG_FLAGS0(x) (x + 0) +#define REG_FLAGS1(x) (x + 1) +#define REG_NUM_BYTES(x) (x + 2) +#define REG_BUF_ADDR(x) (x + 3) +#define REG_BUF_DATA(x) (x + 4) +#define REG_NUM_SEND(x) (x + 5) +#define REG_BAUDRATE(x) (x + 6) +#define REG_STOPBITS(x) (x + 7) + +struct cm4000_dev { + struct pcmcia_device *p_dev; + + unsigned char atr[MAX_ATR]; + unsigned char rbuf[512]; + unsigned char sbuf[512]; + + wait_queue_head_t devq; /* when removing cardman must not be + zeroed! */ + + wait_queue_head_t ioq; /* if IO is locked, wait on this Q */ + wait_queue_head_t atrq; /* wait for ATR valid */ + wait_queue_head_t readq; /* used by write to wake blk.read */ + + /* warning: do not move this fields. + * initialising to zero depends on it - see ZERO_DEV below. */ + unsigned char atr_csum; + unsigned char atr_len_retry; + unsigned short atr_len; + unsigned short rlen; /* bytes avail. after write */ + unsigned short rpos; /* latest read pos. write zeroes */ + unsigned char procbyte; /* T=0 procedure byte */ + unsigned char mstate; /* state of card monitor */ + unsigned char cwarn; /* slow down warning */ + unsigned char flags0; /* cardman IO-flags 0 */ + unsigned char flags1; /* cardman IO-flags 1 */ + unsigned int mdelay; /* variable monitor speeds, in jiffies */ + + unsigned int baudv; /* baud value for speed */ + unsigned char ta1; + unsigned char proto; /* T=0, T=1, ... */ + unsigned long flags; /* lock+flags (MONITOR,IO,ATR) * for concurrent + access */ + + unsigned char pts[4]; + + struct timer_list timer; /* used to keep monitor running */ + int monitor_running; +}; + +#define ZERO_DEV(dev) \ + memset(&dev->atr_csum,0, \ + sizeof(struct cm4000_dev) - \ + offsetof(struct cm4000_dev, atr_csum)) + +static struct pcmcia_device *dev_table[CM4000_MAX_DEV]; +static struct class *cmm_class; + +/* This table doesn't use spaces after the comma between fields and thus + * violates CodingStyle. However, I don't really think wrapping it around will + * make it any clearer to read -HW */ +static unsigned char fi_di_table[10][14] = { +/*FI 00 01 02 03 04 05 06 07 08 09 10 11 12 13 */ +/*DI */ +/* 0 */ {0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11}, +/* 1 */ {0x01,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x91,0x11,0x11,0x11,0x11}, +/* 2 */ {0x02,0x12,0x22,0x32,0x11,0x11,0x11,0x11,0x11,0x92,0xA2,0xB2,0x11,0x11}, +/* 3 */ {0x03,0x13,0x23,0x33,0x43,0x53,0x63,0x11,0x11,0x93,0xA3,0xB3,0xC3,0xD3}, +/* 4 */ {0x04,0x14,0x24,0x34,0x44,0x54,0x64,0x11,0x11,0x94,0xA4,0xB4,0xC4,0xD4}, +/* 5 */ {0x00,0x15,0x25,0x35,0x45,0x55,0x65,0x11,0x11,0x95,0xA5,0xB5,0xC5,0xD5}, +/* 6 */ {0x06,0x16,0x26,0x36,0x46,0x56,0x66,0x11,0x11,0x96,0xA6,0xB6,0xC6,0xD6}, +/* 7 */ {0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11}, +/* 8 */ {0x08,0x11,0x28,0x38,0x48,0x58,0x68,0x11,0x11,0x98,0xA8,0xB8,0xC8,0xD8}, +/* 9 */ {0x09,0x19,0x29,0x39,0x49,0x59,0x69,0x11,0x11,0x99,0xA9,0xB9,0xC9,0xD9} +}; + +#ifndef CM4000_DEBUG +#define xoutb outb +#define xinb inb +#else +static inline void xoutb(unsigned char val, unsigned short port) +{ + pr_debug("outb(val=%.2x,port=%.4x)\n", val, port); + outb(val, port); +} +static inline unsigned char xinb(unsigned short port) +{ + unsigned char val; + + val = inb(port); + pr_debug("%.2x=inb(%.4x)\n", val, port); + + return val; +} +#endif + +static inline unsigned char invert_revert(unsigned char ch) +{ + return bitrev8(~ch); +} + +static void str_invert_revert(unsigned char *b, int len) +{ + int i; + + for (i = 0; i < len; i++) + b[i] = invert_revert(b[i]); +} + +#define ATRLENCK(dev,pos) \ + if (pos>=dev->atr_len || pos>=MAX_ATR) \ + goto return_0; + +static unsigned int calc_baudv(unsigned char fidi) +{ + unsigned int wcrcf, wbrcf, fi_rfu, di_rfu; + + fi_rfu = 372; + di_rfu = 1; + + /* FI */ + switch ((fidi >> 4) & 0x0F) { + case 0x00: + wcrcf = 372; + break; + case 0x01: + wcrcf = 372; + break; + case 0x02: + wcrcf = 558; + break; + case 0x03: + wcrcf = 744; + break; + case 0x04: + wcrcf = 1116; + break; + case 0x05: + wcrcf = 1488; + break; + case 0x06: + wcrcf = 1860; + break; + case 0x07: + wcrcf = fi_rfu; + break; + case 0x08: + wcrcf = fi_rfu; + break; + case 0x09: + wcrcf = 512; + break; + case 0x0A: + wcrcf = 768; + break; + case 0x0B: + wcrcf = 1024; + break; + case 0x0C: + wcrcf = 1536; + break; + case 0x0D: + wcrcf = 2048; + break; + default: + wcrcf = fi_rfu; + break; + } + + /* DI */ + switch (fidi & 0x0F) { + case 0x00: + wbrcf = di_rfu; + break; + case 0x01: + wbrcf = 1; + break; + case 0x02: + wbrcf = 2; + break; + case 0x03: + wbrcf = 4; + break; + case 0x04: + wbrcf = 8; + break; + case 0x05: + wbrcf = 16; + break; + case 0x06: + wbrcf = 32; + break; + case 0x07: + wbrcf = di_rfu; + break; + case 0x08: + wbrcf = 12; + break; + case 0x09: + wbrcf = 20; + break; + default: + wbrcf = di_rfu; + break; + } + + return (wcrcf / wbrcf); +} + +static unsigned short io_read_num_rec_bytes(unsigned int iobase, + unsigned short *s) +{ + unsigned short tmp; + + tmp = *s = 0; + do { + *s = tmp; + tmp = inb(REG_NUM_BYTES(iobase)) | + (inb(REG_FLAGS0(iobase)) & 4 ? 0x100 : 0); + } while (tmp != *s); + + return *s; +} + +static int parse_atr(struct cm4000_dev *dev) +{ + unsigned char any_t1, any_t0; + unsigned char ch, ifno; + int ix, done; + + DEBUGP(3, dev, "-> parse_atr: dev->atr_len = %i\n", dev->atr_len); + + if (dev->atr_len < 3) { + DEBUGP(5, dev, "parse_atr: atr_len < 3\n"); + return 0; + } + + if (dev->atr[0] == 0x3f) + set_bit(IS_INVREV, &dev->flags); + else + clear_bit(IS_INVREV, &dev->flags); + ix = 1; + ifno = 1; + ch = dev->atr[1]; + dev->proto = 0; /* XXX PROTO */ + any_t1 = any_t0 = done = 0; + dev->ta1 = 0x11; /* defaults to 9600 baud */ + do { + if (ifno == 1 && (ch & 0x10)) { + /* read first interface byte and TA1 is present */ + dev->ta1 = dev->atr[2]; + DEBUGP(5, dev, "Card says FiDi is 0x%.2x\n", dev->ta1); + ifno++; + } else if ((ifno == 2) && (ch & 0x10)) { /* TA(2) */ + dev->ta1 = 0x11; + ifno++; + } + + DEBUGP(5, dev, "Yi=%.2x\n", ch & 0xf0); + ix += ((ch & 0x10) >> 4) /* no of int.face chars */ + +((ch & 0x20) >> 5) + + ((ch & 0x40) >> 6) + + ((ch & 0x80) >> 7); + /* ATRLENCK(dev,ix); */ + if (ch & 0x80) { /* TDi */ + ch = dev->atr[ix]; + if ((ch & 0x0f)) { + any_t1 = 1; + DEBUGP(5, dev, "card is capable of T=1\n"); + } else { + any_t0 = 1; + DEBUGP(5, dev, "card is capable of T=0\n"); + } + } else + done = 1; + } while (!done); + + DEBUGP(5, dev, "ix=%d noHist=%d any_t1=%d\n", + ix, dev->atr[1] & 15, any_t1); + if (ix + 1 + (dev->atr[1] & 0x0f) + any_t1 != dev->atr_len) { + DEBUGP(5, dev, "length error\n"); + return 0; + } + if (any_t0) + set_bit(IS_ANY_T0, &dev->flags); + + if (any_t1) { /* compute csum */ + dev->atr_csum = 0; +#ifdef ATR_CSUM + for (i = 1; i < dev->atr_len; i++) + dev->atr_csum ^= dev->atr[i]; + if (dev->atr_csum) { + set_bit(IS_BAD_CSUM, &dev->flags); + DEBUGP(5, dev, "bad checksum\n"); + goto return_0; + } +#endif + if (any_t0 == 0) + dev->proto = 1; /* XXX PROTO */ + set_bit(IS_ANY_T1, &dev->flags); + } + + return 1; +} + +struct card_fixup { + char atr[12]; + u_int8_t atr_len; + u_int8_t stopbits; +}; + +static struct card_fixup card_fixups[] = { + { /* ACOS */ + .atr = { 0x3b, 0xb3, 0x11, 0x00, 0x00, 0x41, 0x01 }, + .atr_len = 7, + .stopbits = 0x03, + }, + { /* Motorola */ + .atr = {0x3b, 0x76, 0x13, 0x00, 0x00, 0x80, 0x62, 0x07, + 0x41, 0x81, 0x81 }, + .atr_len = 11, + .stopbits = 0x04, + }, +}; + +static void set_cardparameter(struct cm4000_dev *dev) +{ + int i; + unsigned int iobase = dev->p_dev->resource[0]->start; + u_int8_t stopbits = 0x02; /* ISO default */ + + DEBUGP(3, dev, "-> set_cardparameter\n"); + + dev->flags1 = dev->flags1 | (((dev->baudv - 1) & 0x0100) >> 8); + xoutb(dev->flags1, REG_FLAGS1(iobase)); + DEBUGP(5, dev, "flags1 = 0x%02x\n", dev->flags1); + + /* set baudrate */ + xoutb((unsigned char)((dev->baudv - 1) & 0xFF), REG_BAUDRATE(iobase)); + + DEBUGP(5, dev, "baudv = %i -> write 0x%02x\n", dev->baudv, + ((dev->baudv - 1) & 0xFF)); + + /* set stopbits */ + for (i = 0; i < ARRAY_SIZE(card_fixups); i++) { + if (!memcmp(dev->atr, card_fixups[i].atr, + card_fixups[i].atr_len)) + stopbits = card_fixups[i].stopbits; + } + xoutb(stopbits, REG_STOPBITS(iobase)); + + DEBUGP(3, dev, "<- set_cardparameter\n"); +} + +static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq) +{ + + unsigned long tmp, i; + unsigned short num_bytes_read; + unsigned char pts_reply[4]; + ssize_t rc; + unsigned int iobase = dev->p_dev->resource[0]->start; + + rc = 0; + + DEBUGP(3, dev, "-> set_protocol\n"); + DEBUGP(5, dev, "ptsreq->Protocol = 0x%.8x, ptsreq->Flags=0x%.8x, " + "ptsreq->pts1=0x%.2x, ptsreq->pts2=0x%.2x, " + "ptsreq->pts3=0x%.2x\n", (unsigned int)ptsreq->protocol, + (unsigned int)ptsreq->flags, ptsreq->pts1, ptsreq->pts2, + ptsreq->pts3); + + /* Fill PTS structure */ + dev->pts[0] = 0xff; + dev->pts[1] = 0x00; + tmp = ptsreq->protocol; + while ((tmp = (tmp >> 1)) > 0) + dev->pts[1]++; + dev->proto = dev->pts[1]; /* Set new protocol */ + dev->pts[1] = (0x01 << 4) | (dev->pts[1]); + + /* Correct Fi/Di according to CM4000 Fi/Di table */ + DEBUGP(5, dev, "Ta(1) from ATR is 0x%.2x\n", dev->ta1); + /* set Fi/Di according to ATR TA(1) */ + dev->pts[2] = fi_di_table[dev->ta1 & 0x0F][(dev->ta1 >> 4) & 0x0F]; + + /* Calculate PCK character */ + dev->pts[3] = dev->pts[0] ^ dev->pts[1] ^ dev->pts[2]; + + DEBUGP(5, dev, "pts0=%.2x, pts1=%.2x, pts2=%.2x, pts3=%.2x\n", + dev->pts[0], dev->pts[1], dev->pts[2], dev->pts[3]); + + /* check card convention */ + if (test_bit(IS_INVREV, &dev->flags)) + str_invert_revert(dev->pts, 4); + + /* reset SM */ + xoutb(0x80, REG_FLAGS0(iobase)); + + /* Enable access to the message buffer */ + DEBUGP(5, dev, "Enable access to the messages buffer\n"); + dev->flags1 = 0x20 /* T_Active */ + | (test_bit(IS_INVREV, &dev->flags) ? 0x02 : 0x00) /* inv parity */ + | ((dev->baudv >> 8) & 0x01); /* MSB-baud */ + xoutb(dev->flags1, REG_FLAGS1(iobase)); + + DEBUGP(5, dev, "Enable message buffer -> flags1 = 0x%.2x\n", + dev->flags1); + + /* write challenge to the buffer */ + DEBUGP(5, dev, "Write challenge to buffer: "); + for (i = 0; i < 4; i++) { + xoutb(i, REG_BUF_ADDR(iobase)); + xoutb(dev->pts[i], REG_BUF_DATA(iobase)); /* buf data */ +#ifdef CM4000_DEBUG + pr_debug("0x%.2x ", dev->pts[i]); + } + pr_debug("\n"); +#else + } +#endif + + /* set number of bytes to write */ + DEBUGP(5, dev, "Set number of bytes to write\n"); + xoutb(0x04, REG_NUM_SEND(iobase)); + + /* Trigger CARDMAN CONTROLLER */ + xoutb(0x50, REG_FLAGS0(iobase)); + + /* Monitor progress */ + /* wait for xmit done */ + DEBUGP(5, dev, "Waiting for NumRecBytes getting valid\n"); + + for (i = 0; i < 100; i++) { + if (inb(REG_FLAGS0(iobase)) & 0x08) { + DEBUGP(5, dev, "NumRecBytes is valid\n"); + break; + } + mdelay(10); + } + if (i == 100) { + DEBUGP(5, dev, "Timeout waiting for NumRecBytes getting " + "valid\n"); + rc = -EIO; + goto exit_setprotocol; + } + + DEBUGP(5, dev, "Reading NumRecBytes\n"); + for (i = 0; i < 100; i++) { + io_read_num_rec_bytes(iobase, &num_bytes_read); + if (num_bytes_read >= 4) { + DEBUGP(2, dev, "NumRecBytes = %i\n", num_bytes_read); + break; + } + mdelay(10); + } + + /* check whether it is a short PTS reply? */ + if (num_bytes_read == 3) + i = 0; + + if (i == 100) { + DEBUGP(5, dev, "Timeout reading num_bytes_read\n"); + rc = -EIO; + goto exit_setprotocol; + } + + DEBUGP(5, dev, "Reset the CARDMAN CONTROLLER\n"); + xoutb(0x80, REG_FLAGS0(iobase)); + + /* Read PPS reply */ + DEBUGP(5, dev, "Read PPS reply\n"); + for (i = 0; i < num_bytes_read; i++) { + xoutb(i, REG_BUF_ADDR(iobase)); + pts_reply[i] = inb(REG_BUF_DATA(iobase)); + } + +#ifdef CM4000_DEBUG + DEBUGP(2, dev, "PTSreply: "); + for (i = 0; i < num_bytes_read; i++) { + pr_debug("0x%.2x ", pts_reply[i]); + } + pr_debug("\n"); +#endif /* CM4000_DEBUG */ + + DEBUGP(5, dev, "Clear Tactive in Flags1\n"); + xoutb(0x20, REG_FLAGS1(iobase)); + + /* Compare ptsreq and ptsreply */ + if ((dev->pts[0] == pts_reply[0]) && + (dev->pts[1] == pts_reply[1]) && + (dev->pts[2] == pts_reply[2]) && (dev->pts[3] == pts_reply[3])) { + /* setcardparameter according to PPS */ + dev->baudv = calc_baudv(dev->pts[2]); + set_cardparameter(dev); + } else if ((dev->pts[0] == pts_reply[0]) && + ((dev->pts[1] & 0xef) == pts_reply[1]) && + ((pts_reply[0] ^ pts_reply[1]) == pts_reply[2])) { + /* short PTS reply, set card parameter to default values */ + dev->baudv = calc_baudv(0x11); + set_cardparameter(dev); + } else + rc = -EIO; + +exit_setprotocol: + DEBUGP(3, dev, "<- set_protocol\n"); + return rc; +} + +static int io_detect_cm4000(unsigned int iobase, struct cm4000_dev *dev) +{ + + /* note: statemachine is assumed to be reset */ + if (inb(REG_FLAGS0(iobase)) & 8) { + clear_bit(IS_ATR_VALID, &dev->flags); + set_bit(IS_CMM_ABSENT, &dev->flags); + return 0; /* detect CMM = 1 -> failure */ + } + /* xoutb(0x40, REG_FLAGS1(iobase)); detectCMM */ + xoutb(dev->flags1 | 0x40, REG_FLAGS1(iobase)); + if ((inb(REG_FLAGS0(iobase)) & 8) == 0) { + clear_bit(IS_ATR_VALID, &dev->flags); + set_bit(IS_CMM_ABSENT, &dev->flags); + return 0; /* detect CMM=0 -> failure */ + } + /* clear detectCMM again by restoring original flags1 */ + xoutb(dev->flags1, REG_FLAGS1(iobase)); + return 1; +} + +static void terminate_monitor(struct cm4000_dev *dev) +{ + + /* tell the monitor to stop and wait until + * it terminates. + */ + DEBUGP(3, dev, "-> terminate_monitor\n"); + wait_event_interruptible(dev->devq, + test_and_set_bit(LOCK_MONITOR, + (void *)&dev->flags)); + + /* now, LOCK_MONITOR has been set. + * allow a last cycle in the monitor. + * the monitor will indicate that it has + * finished by clearing this bit. + */ + DEBUGP(5, dev, "Now allow last cycle of monitor!\n"); + while (test_bit(LOCK_MONITOR, (void *)&dev->flags)) + msleep(25); + + DEBUGP(5, dev, "Delete timer\n"); + del_timer_sync(&dev->timer); +#ifdef CM4000_DEBUG + dev->monitor_running = 0; +#endif + + DEBUGP(3, dev, "<- terminate_monitor\n"); +} + +/* + * monitor the card every 50msec. as a side-effect, retrieve the + * atr once a card is inserted. another side-effect of retrieving the + * atr is that the card will be powered on, so there is no need to + * power on the card explictely from the application: the driver + * is already doing that for you. + */ + +static void monitor_card(unsigned long p) +{ + struct cm4000_dev *dev = (struct cm4000_dev *) p; + unsigned int iobase = dev->p_dev->resource[0]->start; + unsigned short s; + struct ptsreq ptsreq; + int i, atrc; + + DEBUGP(7, dev, "-> monitor_card\n"); + + /* if someone has set the lock for us: we're done! */ + if (test_and_set_bit(LOCK_MONITOR, &dev->flags)) { + DEBUGP(4, dev, "About to stop monitor\n"); + /* no */ + dev->rlen = + dev->rpos = + dev->atr_csum = dev->atr_len_retry = dev->cwarn = 0; + dev->mstate = M_FETCH_ATR; + clear_bit(LOCK_MONITOR, &dev->flags); + /* close et al. are sleeping on devq, so wake it */ + wake_up_interruptible(&dev->devq); + DEBUGP(2, dev, "<- monitor_card (we are done now)\n"); + return; + } + + /* try to lock io: if it is already locked, just add another timer */ + if (test_and_set_bit(LOCK_IO, (void *)&dev->flags)) { + DEBUGP(4, dev, "Couldn't get IO lock\n"); + goto return_with_timer; + } + + /* is a card/a reader inserted at all ? */ + dev->flags0 = xinb(REG_FLAGS0(iobase)); + DEBUGP(7, dev, "dev->flags0 = 0x%2x\n", dev->flags0); + DEBUGP(7, dev, "smartcard present: %s\n", + dev->flags0 & 1 ? "yes" : "no"); + DEBUGP(7, dev, "cardman present: %s\n", + dev->flags0 == 0xff ? "no" : "yes"); + + if ((dev->flags0 & 1) == 0 /* no smartcard inserted */ + || dev->flags0 == 0xff) { /* no cardman inserted */ + /* no */ + dev->rlen = + dev->rpos = + dev->atr_csum = dev->atr_len_retry = dev->cwarn = 0; + dev->mstate = M_FETCH_ATR; + + dev->flags &= 0x000000ff; /* only keep IO and MONITOR locks */ + + if (dev->flags0 == 0xff) { + DEBUGP(4, dev, "set IS_CMM_ABSENT bit\n"); + set_bit(IS_CMM_ABSENT, &dev->flags); + } else if (test_bit(IS_CMM_ABSENT, &dev->flags)) { + DEBUGP(4, dev, "clear IS_CMM_ABSENT bit " + "(card is removed)\n"); + clear_bit(IS_CMM_ABSENT, &dev->flags); + } + + goto release_io; + } else if ((dev->flags0 & 1) && test_bit(IS_CMM_ABSENT, &dev->flags)) { + /* cardman and card present but cardman was absent before + * (after suspend with inserted card) */ + DEBUGP(4, dev, "clear IS_CMM_ABSENT bit (card is inserted)\n"); + clear_bit(IS_CMM_ABSENT, &dev->flags); + } + + if (test_bit(IS_ATR_VALID, &dev->flags) == 1) { + DEBUGP(7, dev, "believe ATR is already valid (do nothing)\n"); + goto release_io; + } + + switch (dev->mstate) { + unsigned char flags0; + case M_CARDOFF: + DEBUGP(4, dev, "M_CARDOFF\n"); + flags0 = inb(REG_FLAGS0(iobase)); + if (flags0 & 0x02) { + /* wait until Flags0 indicate power is off */ + dev->mdelay = T_10MSEC; + } else { + /* Flags0 indicate power off and no card inserted now; + * Reset CARDMAN CONTROLLER */ + xoutb(0x80, REG_FLAGS0(iobase)); + + /* prepare for fetching ATR again: after card off ATR + * is read again automatically */ + dev->rlen = + dev->rpos = + dev->atr_csum = + dev->atr_len_retry = dev->cwarn = 0; + dev->mstate = M_FETCH_ATR; + + /* minimal gap between CARDOFF and read ATR is 50msec */ + dev->mdelay = T_50MSEC; + } + break; + case M_FETCH_ATR: + DEBUGP(4, dev, "M_FETCH_ATR\n"); + xoutb(0x80, REG_FLAGS0(iobase)); + DEBUGP(4, dev, "Reset BAUDV to 9600\n"); + dev->baudv = 0x173; /* 9600 */ + xoutb(0x02, REG_STOPBITS(iobase)); /* stopbits=2 */ + xoutb(0x73, REG_BAUDRATE(iobase)); /* baud value */ + xoutb(0x21, REG_FLAGS1(iobase)); /* T_Active=1, baud + value */ + /* warm start vs. power on: */ + xoutb(dev->flags0 & 2 ? 0x46 : 0x44, REG_FLAGS0(iobase)); + dev->mdelay = T_40MSEC; + dev->mstate = M_TIMEOUT_WAIT; + break; + case M_TIMEOUT_WAIT: + DEBUGP(4, dev, "M_TIMEOUT_WAIT\n"); + /* numRecBytes */ + io_read_num_rec_bytes(iobase, &dev->atr_len); + dev->mdelay = T_10MSEC; + dev->mstate = M_READ_ATR_LEN; + break; + case M_READ_ATR_LEN: + DEBUGP(4, dev, "M_READ_ATR_LEN\n"); + /* infinite loop possible, since there is no timeout */ + +#define MAX_ATR_LEN_RETRY 100 + + if (dev->atr_len == io_read_num_rec_bytes(iobase, &s)) { + if (dev->atr_len_retry++ >= MAX_ATR_LEN_RETRY) { /* + XX msec */ + dev->mdelay = T_10MSEC; + dev->mstate = M_READ_ATR; + } + } else { + dev->atr_len = s; + dev->atr_len_retry = 0; /* set new timeout */ + } + + DEBUGP(4, dev, "Current ATR_LEN = %i\n", dev->atr_len); + break; + case M_READ_ATR: + DEBUGP(4, dev, "M_READ_ATR\n"); + xoutb(0x80, REG_FLAGS0(iobase)); /* reset SM */ + for (i = 0; i < dev->atr_len; i++) { + xoutb(i, REG_BUF_ADDR(iobase)); + dev->atr[i] = inb(REG_BUF_DATA(iobase)); + } + /* Deactivate T_Active flags */ + DEBUGP(4, dev, "Deactivate T_Active flags\n"); + dev->flags1 = 0x01; + xoutb(dev->flags1, REG_FLAGS1(iobase)); + + /* atr is present (which doesn't mean it's valid) */ + set_bit(IS_ATR_PRESENT, &dev->flags); + if (dev->atr[0] == 0x03) + str_invert_revert(dev->atr, dev->atr_len); + atrc = parse_atr(dev); + if (atrc == 0) { /* atr invalid */ + dev->mdelay = 0; + dev->mstate = M_BAD_CARD; + } else { + dev->mdelay = T_50MSEC; + dev->mstate = M_ATR_PRESENT; + set_bit(IS_ATR_VALID, &dev->flags); + } + + if (test_bit(IS_ATR_VALID, &dev->flags) == 1) { + DEBUGP(4, dev, "monitor_card: ATR valid\n"); + /* if ta1 == 0x11, no PPS necessary (default values) */ + /* do not do PPS with multi protocol cards */ + if ((test_bit(IS_AUTOPPS_ACT, &dev->flags) == 0) && + (dev->ta1 != 0x11) && + !(test_bit(IS_ANY_T0, &dev->flags) && + test_bit(IS_ANY_T1, &dev->flags))) { + DEBUGP(4, dev, "Perform AUTOPPS\n"); + set_bit(IS_AUTOPPS_ACT, &dev->flags); + ptsreq.protocol = (0x01 << dev->proto); + ptsreq.flags = 0x01; + ptsreq.pts1 = 0x00; + ptsreq.pts2 = 0x00; + ptsreq.pts3 = 0x00; + if (set_protocol(dev, &ptsreq) == 0) { + DEBUGP(4, dev, "AUTOPPS ret SUCC\n"); + clear_bit(IS_AUTOPPS_ACT, &dev->flags); + wake_up_interruptible(&dev->atrq); + } else { + DEBUGP(4, dev, "AUTOPPS failed: " + "repower using defaults\n"); + /* prepare for repowering */ + clear_bit(IS_ATR_PRESENT, &dev->flags); + clear_bit(IS_ATR_VALID, &dev->flags); + dev->rlen = + dev->rpos = + dev->atr_csum = + dev->atr_len_retry = dev->cwarn = 0; + dev->mstate = M_FETCH_ATR; + + dev->mdelay = T_50MSEC; + } + } else { + /* for cards which use slightly different + * params (extra guard time) */ + set_cardparameter(dev); + if (test_bit(IS_AUTOPPS_ACT, &dev->flags) == 1) + DEBUGP(4, dev, "AUTOPPS already active " + "2nd try:use default values\n"); + if (dev->ta1 == 0x11) + DEBUGP(4, dev, "No AUTOPPS necessary " + "TA(1)==0x11\n"); + if (test_bit(IS_ANY_T0, &dev->flags) + && test_bit(IS_ANY_T1, &dev->flags)) + DEBUGP(4, dev, "Do NOT perform AUTOPPS " + "with multiprotocol cards\n"); + clear_bit(IS_AUTOPPS_ACT, &dev->flags); + wake_up_interruptible(&dev->atrq); + } + } else { + DEBUGP(4, dev, "ATR invalid\n"); + wake_up_interruptible(&dev->atrq); + } + break; + case M_BAD_CARD: + DEBUGP(4, dev, "M_BAD_CARD\n"); + /* slow down warning, but prompt immediately after insertion */ + if (dev->cwarn == 0 || dev->cwarn == 10) { + set_bit(IS_BAD_CARD, &dev->flags); + dev_warn(&dev->p_dev->dev, MODULE_NAME ": "); + if (test_bit(IS_BAD_CSUM, &dev->flags)) { + DEBUGP(4, dev, "ATR checksum (0x%.2x, should " + "be zero) failed\n", dev->atr_csum); + } +#ifdef CM4000_DEBUG + else if (test_bit(IS_BAD_LENGTH, &dev->flags)) { + DEBUGP(4, dev, "ATR length error\n"); + } else { + DEBUGP(4, dev, "card damaged or wrong way " + "inserted\n"); + } +#endif + dev->cwarn = 0; + wake_up_interruptible(&dev->atrq); /* wake open */ + } + dev->cwarn++; + dev->mdelay = T_100MSEC; + dev->mstate = M_FETCH_ATR; + break; + default: + DEBUGP(7, dev, "Unknown action\n"); + break; /* nothing */ + } + +release_io: + DEBUGP(7, dev, "release_io\n"); + clear_bit(LOCK_IO, &dev->flags); + wake_up_interruptible(&dev->ioq); /* whoever needs IO */ + +return_with_timer: + DEBUGP(7, dev, "<- monitor_card (returns with timer)\n"); + mod_timer(&dev->timer, jiffies + dev->mdelay); + clear_bit(LOCK_MONITOR, &dev->flags); +} + +/* Interface to userland (file_operations) */ + +static ssize_t cmm_read(struct file *filp, __user char *buf, size_t count, + loff_t *ppos) +{ + struct cm4000_dev *dev = filp->private_data; + unsigned int iobase = dev->p_dev->resource[0]->start; + ssize_t rc; + int i, j, k; + + DEBUGP(2, dev, "-> cmm_read(%s,%d)\n", current->comm, current->pid); + + if (count == 0) /* according to manpage */ + return 0; + + if (!pcmcia_dev_present(dev->p_dev) || /* device removed */ + test_bit(IS_CMM_ABSENT, &dev->flags)) + return -ENODEV; + + if (test_bit(IS_BAD_CSUM, &dev->flags)) + return -EIO; + + /* also see the note about this in cmm_write */ + if (wait_event_interruptible + (dev->atrq, + ((filp->f_flags & O_NONBLOCK) + || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags) != 0)))) { + if (filp->f_flags & O_NONBLOCK) + return -EAGAIN; + return -ERESTARTSYS; + } + + if (test_bit(IS_ATR_VALID, &dev->flags) == 0) + return -EIO; + + /* this one implements blocking IO */ + if (wait_event_interruptible + (dev->readq, + ((filp->f_flags & O_NONBLOCK) || (dev->rpos < dev->rlen)))) { + if (filp->f_flags & O_NONBLOCK) + return -EAGAIN; + return -ERESTARTSYS; + } + + /* lock io */ + if (wait_event_interruptible + (dev->ioq, + ((filp->f_flags & O_NONBLOCK) + || (test_and_set_bit(LOCK_IO, (void *)&dev->flags) == 0)))) { + if (filp->f_flags & O_NONBLOCK) + return -EAGAIN; + return -ERESTARTSYS; + } + + rc = 0; + dev->flags0 = inb(REG_FLAGS0(iobase)); + if ((dev->flags0 & 1) == 0 /* no smartcard inserted */ + || dev->flags0 == 0xff) { /* no cardman inserted */ + clear_bit(IS_ATR_VALID, &dev->flags); + if (dev->flags0 & 1) { + set_bit(IS_CMM_ABSENT, &dev->flags); + rc = -ENODEV; + } else { + rc = -EIO; + } + goto release_io; + } + + DEBUGP(4, dev, "begin read answer\n"); + j = min(count, (size_t)(dev->rlen - dev->rpos)); + k = dev->rpos; + if (k + j > 255) + j = 256 - k; + DEBUGP(4, dev, "read1 j=%d\n", j); + for (i = 0; i < j; i++) { + xoutb(k++, REG_BUF_ADDR(iobase)); + dev->rbuf[i] = xinb(REG_BUF_DATA(iobase)); + } + j = min(count, (size_t)(dev->rlen - dev->rpos)); + if (k + j > 255) { + DEBUGP(4, dev, "read2 j=%d\n", j); + dev->flags1 |= 0x10; /* MSB buf addr set */ + xoutb(dev->flags1, REG_FLAGS1(iobase)); + for (; i < j; i++) { + xoutb(k++, REG_BUF_ADDR(iobase)); + dev->rbuf[i] = xinb(REG_BUF_DATA(iobase)); + } + } + + if (dev->proto == 0 && count > dev->rlen - dev->rpos && i) { + DEBUGP(4, dev, "T=0 and count > buffer\n"); + dev->rbuf[i] = dev->rbuf[i - 1]; + dev->rbuf[i - 1] = dev->procbyte; + j++; + } + count = j; + + dev->rpos = dev->rlen + 1; + + /* Clear T1Active */ + DEBUGP(4, dev, "Clear T1Active\n"); + dev->flags1 &= 0xdf; + xoutb(dev->flags1, REG_FLAGS1(iobase)); + + xoutb(0, REG_FLAGS1(iobase)); /* clear detectCMM */ + /* last check before exit */ + if (!io_detect_cm4000(iobase, dev)) { + rc = -ENODEV; + goto release_io; + } + + if (test_bit(IS_INVREV, &dev->flags) && count > 0) + str_invert_revert(dev->rbuf, count); + + if (copy_to_user(buf, dev->rbuf, count)) + rc = -EFAULT; + +release_io: + clear_bit(LOCK_IO, &dev->flags); + wake_up_interruptible(&dev->ioq); + + DEBUGP(2, dev, "<- cmm_read returns: rc = %Zi\n", + (rc < 0 ? rc : count)); + return rc < 0 ? rc : count; +} + +static ssize_t cmm_write(struct file *filp, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct cm4000_dev *dev = filp->private_data; + unsigned int iobase = dev->p_dev->resource[0]->start; + unsigned short s; + unsigned char tmp; + unsigned char infolen; + unsigned char sendT0; + unsigned short nsend; + unsigned short nr; + ssize_t rc; + int i; + + DEBUGP(2, dev, "-> cmm_write(%s,%d)\n", current->comm, current->pid); + + if (count == 0) /* according to manpage */ + return 0; + + if (dev->proto == 0 && count < 4) { + /* T0 must have at least 4 bytes */ + DEBUGP(4, dev, "T0 short write\n"); + return -EIO; + } + + nr = count & 0x1ff; /* max bytes to write */ + + sendT0 = dev->proto ? 0 : nr > 5 ? 0x08 : 0; + + if (!pcmcia_dev_present(dev->p_dev) || /* device removed */ + test_bit(IS_CMM_ABSENT, &dev->flags)) + return -ENODEV; + + if (test_bit(IS_BAD_CSUM, &dev->flags)) { + DEBUGP(4, dev, "bad csum\n"); + return -EIO; + } + + /* + * wait for atr to become valid. + * note: it is important to lock this code. if we dont, the monitor + * could be run between test_bit and the call to sleep on the + * atr-queue. if *then* the monitor detects atr valid, it will wake up + * any process on the atr-queue, *but* since we have been interrupted, + * we do not yet sleep on this queue. this would result in a missed + * wake_up and the calling process would sleep forever (until + * interrupted). also, do *not* restore_flags before sleep_on, because + * this could result in the same situation! + */ + if (wait_event_interruptible + (dev->atrq, + ((filp->f_flags & O_NONBLOCK) + || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags) != 0)))) { + if (filp->f_flags & O_NONBLOCK) + return -EAGAIN; + return -ERESTARTSYS; + } + + if (test_bit(IS_ATR_VALID, &dev->flags) == 0) { /* invalid atr */ + DEBUGP(4, dev, "invalid ATR\n"); + return -EIO; + } + + /* lock io */ + if (wait_event_interruptible + (dev->ioq, + ((filp->f_flags & O_NONBLOCK) + || (test_and_set_bit(LOCK_IO, (void *)&dev->flags) == 0)))) { + if (filp->f_flags & O_NONBLOCK) + return -EAGAIN; + return -ERESTARTSYS; + } + + if (copy_from_user(dev->sbuf, buf, ((count > 512) ? 512 : count))) + return -EFAULT; + + rc = 0; + dev->flags0 = inb(REG_FLAGS0(iobase)); + if ((dev->flags0 & 1) == 0 /* no smartcard inserted */ + || dev->flags0 == 0xff) { /* no cardman inserted */ + clear_bit(IS_ATR_VALID, &dev->flags); + if (dev->flags0 & 1) { + set_bit(IS_CMM_ABSENT, &dev->flags); + rc = -ENODEV; + } else { + DEBUGP(4, dev, "IO error\n"); + rc = -EIO; + } + goto release_io; + } + + xoutb(0x80, REG_FLAGS0(iobase)); /* reset SM */ + + if (!io_detect_cm4000(iobase, dev)) { + rc = -ENODEV; + goto release_io; + } + + /* reflect T=0 send/read mode in flags1 */ + dev->flags1 |= (sendT0); + + set_cardparameter(dev); + + /* dummy read, reset flag procedure received */ + tmp = inb(REG_FLAGS1(iobase)); + + dev->flags1 = 0x20 /* T_Active */ + | (sendT0) + | (test_bit(IS_INVREV, &dev->flags) ? 2 : 0)/* inverse parity */ + | (((dev->baudv - 1) & 0x0100) >> 8); /* MSB-Baud */ + DEBUGP(1, dev, "set dev->flags1 = 0x%.2x\n", dev->flags1); + xoutb(dev->flags1, REG_FLAGS1(iobase)); + + /* xmit data */ + DEBUGP(4, dev, "Xmit data\n"); + for (i = 0; i < nr; i++) { + if (i >= 256) { + dev->flags1 = 0x20 /* T_Active */ + | (sendT0) /* SendT0 */ + /* inverse parity: */ + | (test_bit(IS_INVREV, &dev->flags) ? 2 : 0) + | (((dev->baudv - 1) & 0x0100) >> 8) /* MSB-Baud */ + | 0x10; /* set address high */ + DEBUGP(4, dev, "dev->flags = 0x%.2x - set address " + "high\n", dev->flags1); + xoutb(dev->flags1, REG_FLAGS1(iobase)); + } + if (test_bit(IS_INVREV, &dev->flags)) { + DEBUGP(4, dev, "Apply inverse convention for 0x%.2x " + "-> 0x%.2x\n", (unsigned char)dev->sbuf[i], + invert_revert(dev->sbuf[i])); + xoutb(i, REG_BUF_ADDR(iobase)); + xoutb(invert_revert(dev->sbuf[i]), + REG_BUF_DATA(iobase)); + } else { + xoutb(i, REG_BUF_ADDR(iobase)); + xoutb(dev->sbuf[i], REG_BUF_DATA(iobase)); + } + } + DEBUGP(4, dev, "Xmit done\n"); + + if (dev->proto == 0) { + /* T=0 proto: 0 byte reply */ + if (nr == 4) { + DEBUGP(4, dev, "T=0 assumes 0 byte reply\n"); + xoutb(i, REG_BUF_ADDR(iobase)); + if (test_bit(IS_INVREV, &dev->flags)) + xoutb(0xff, REG_BUF_DATA(iobase)); + else + xoutb(0x00, REG_BUF_DATA(iobase)); + } + + /* numSendBytes */ + if (sendT0) + nsend = nr; + else { + if (nr == 4) + nsend = 5; + else { + nsend = 5 + (unsigned char)dev->sbuf[4]; + if (dev->sbuf[4] == 0) + nsend += 0x100; + } + } + } else + nsend = nr; + + /* T0: output procedure byte */ + if (test_bit(IS_INVREV, &dev->flags)) { + DEBUGP(4, dev, "T=0 set Procedure byte (inverse-reverse) " + "0x%.2x\n", invert_revert(dev->sbuf[1])); + xoutb(invert_revert(dev->sbuf[1]), REG_NUM_BYTES(iobase)); + } else { + DEBUGP(4, dev, "T=0 set Procedure byte 0x%.2x\n", dev->sbuf[1]); + xoutb(dev->sbuf[1], REG_NUM_BYTES(iobase)); + } + + DEBUGP(1, dev, "set NumSendBytes = 0x%.2x\n", + (unsigned char)(nsend & 0xff)); + xoutb((unsigned char)(nsend & 0xff), REG_NUM_SEND(iobase)); + + DEBUGP(1, dev, "Trigger CARDMAN CONTROLLER (0x%.2x)\n", + 0x40 /* SM_Active */ + | (dev->flags0 & 2 ? 0 : 4) /* power on if needed */ + |(dev->proto ? 0x10 : 0x08) /* T=1/T=0 */ + |(nsend & 0x100) >> 8 /* MSB numSendBytes */ ); + xoutb(0x40 /* SM_Active */ + | (dev->flags0 & 2 ? 0 : 4) /* power on if needed */ + |(dev->proto ? 0x10 : 0x08) /* T=1/T=0 */ + |(nsend & 0x100) >> 8, /* MSB numSendBytes */ + REG_FLAGS0(iobase)); + + /* wait for xmit done */ + if (dev->proto == 1) { + DEBUGP(4, dev, "Wait for xmit done\n"); + for (i = 0; i < 1000; i++) { + if (inb(REG_FLAGS0(iobase)) & 0x08) + break; + msleep_interruptible(10); + } + if (i == 1000) { + DEBUGP(4, dev, "timeout waiting for xmit done\n"); + rc = -EIO; + goto release_io; + } + } + + /* T=1: wait for infoLen */ + + infolen = 0; + if (dev->proto) { + /* wait until infoLen is valid */ + for (i = 0; i < 6000; i++) { /* max waiting time of 1 min */ + io_read_num_rec_bytes(iobase, &s); + if (s >= 3) { + infolen = inb(REG_FLAGS1(iobase)); + DEBUGP(4, dev, "infolen=%d\n", infolen); + break; + } + msleep_interruptible(10); + } + if (i == 6000) { + DEBUGP(4, dev, "timeout waiting for infoLen\n"); + rc = -EIO; + goto release_io; + } + } else + clear_bit(IS_PROCBYTE_PRESENT, &dev->flags); + + /* numRecBytes | bit9 of numRecytes */ + io_read_num_rec_bytes(iobase, &dev->rlen); + for (i = 0; i < 600; i++) { /* max waiting time of 2 sec */ + if (dev->proto) { + if (dev->rlen >= infolen + 4) + break; + } + msleep_interruptible(10); + /* numRecBytes | bit9 of numRecytes */ + io_read_num_rec_bytes(iobase, &s); + if (s > dev->rlen) { + DEBUGP(1, dev, "NumRecBytes inc (reset timeout)\n"); + i = 0; /* reset timeout */ + dev->rlen = s; + } + /* T=0: we are done when numRecBytes doesn't + * increment any more and NoProcedureByte + * is set and numRecBytes == bytes sent + 6 + * (header bytes + data + 1 for sw2) + * except when the card replies an error + * which means, no data will be sent back. + */ + else if (dev->proto == 0) { + if ((inb(REG_BUF_ADDR(iobase)) & 0x80)) { + /* no procedure byte received since last read */ + DEBUGP(1, dev, "NoProcedure byte set\n"); + /* i=0; */ + } else { + /* procedure byte received since last read */ + DEBUGP(1, dev, "NoProcedure byte unset " + "(reset timeout)\n"); + dev->procbyte = inb(REG_FLAGS1(iobase)); + DEBUGP(1, dev, "Read procedure byte 0x%.2x\n", + dev->procbyte); + i = 0; /* resettimeout */ + } + if (inb(REG_FLAGS0(iobase)) & 0x08) { + DEBUGP(1, dev, "T0Done flag (read reply)\n"); + break; + } + } + if (dev->proto) + infolen = inb(REG_FLAGS1(iobase)); + } + if (i == 600) { + DEBUGP(1, dev, "timeout waiting for numRecBytes\n"); + rc = -EIO; + goto release_io; + } else { + if (dev->proto == 0) { + DEBUGP(1, dev, "Wait for T0Done bit to be set\n"); + for (i = 0; i < 1000; i++) { + if (inb(REG_FLAGS0(iobase)) & 0x08) + break; + msleep_interruptible(10); + } + if (i == 1000) { + DEBUGP(1, dev, "timeout waiting for T0Done\n"); + rc = -EIO; + goto release_io; + } + + dev->procbyte = inb(REG_FLAGS1(iobase)); + DEBUGP(4, dev, "Read procedure byte 0x%.2x\n", + dev->procbyte); + + io_read_num_rec_bytes(iobase, &dev->rlen); + DEBUGP(4, dev, "Read NumRecBytes = %i\n", dev->rlen); + + } + } + /* T=1: read offset=zero, T=0: read offset=after challenge */ + dev->rpos = dev->proto ? 0 : nr == 4 ? 5 : nr > dev->rlen ? 5 : nr; + DEBUGP(4, dev, "dev->rlen = %i, dev->rpos = %i, nr = %i\n", + dev->rlen, dev->rpos, nr); + +release_io: + DEBUGP(4, dev, "Reset SM\n"); + xoutb(0x80, REG_FLAGS0(iobase)); /* reset SM */ + + if (rc < 0) { + DEBUGP(4, dev, "Write failed but clear T_Active\n"); + dev->flags1 &= 0xdf; + xoutb(dev->flags1, REG_FLAGS1(iobase)); + } + + clear_bit(LOCK_IO, &dev->flags); + wake_up_interruptible(&dev->ioq); + wake_up_interruptible(&dev->readq); /* tell read we have data */ + + /* ITSEC E2: clear write buffer */ + memset((char *)dev->sbuf, 0, 512); + + /* return error or actually written bytes */ + DEBUGP(2, dev, "<- cmm_write\n"); + return rc < 0 ? rc : nr; +} + +static void start_monitor(struct cm4000_dev *dev) +{ + DEBUGP(3, dev, "-> start_monitor\n"); + if (!dev->monitor_running) { + DEBUGP(5, dev, "create, init and add timer\n"); + setup_timer(&dev->timer, monitor_card, (unsigned long)dev); + dev->monitor_running = 1; + mod_timer(&dev->timer, jiffies); + } else + DEBUGP(5, dev, "monitor already running\n"); + DEBUGP(3, dev, "<- start_monitor\n"); +} + +static void stop_monitor(struct cm4000_dev *dev) +{ + DEBUGP(3, dev, "-> stop_monitor\n"); + if (dev->monitor_running) { + DEBUGP(5, dev, "stopping monitor\n"); + terminate_monitor(dev); + /* reset monitor SM */ + clear_bit(IS_ATR_VALID, &dev->flags); + clear_bit(IS_ATR_PRESENT, &dev->flags); + } else + DEBUGP(5, dev, "monitor already stopped\n"); + DEBUGP(3, dev, "<- stop_monitor\n"); +} + +static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct cm4000_dev *dev = filp->private_data; + unsigned int iobase = dev->p_dev->resource[0]->start; + struct inode *inode = file_inode(filp); + struct pcmcia_device *link; + int size; + int rc; + void __user *argp = (void __user *)arg; +#ifdef CM4000_DEBUG + char *ioctl_names[CM_IOC_MAXNR + 1] = { + [_IOC_NR(CM_IOCGSTATUS)] "CM_IOCGSTATUS", + [_IOC_NR(CM_IOCGATR)] "CM_IOCGATR", + [_IOC_NR(CM_IOCARDOFF)] "CM_IOCARDOFF", + [_IOC_NR(CM_IOCSPTS)] "CM_IOCSPTS", + [_IOC_NR(CM_IOSDBGLVL)] "CM4000_DBGLVL", + }; + DEBUGP(3, dev, "cmm_ioctl(device=%d.%d) %s\n", imajor(inode), + iminor(inode), ioctl_names[_IOC_NR(cmd)]); +#endif + + mutex_lock(&cmm_mutex); + rc = -ENODEV; + link = dev_table[iminor(inode)]; + if (!pcmcia_dev_present(link)) { + DEBUGP(4, dev, "DEV_OK false\n"); + goto out; + } + + if (test_bit(IS_CMM_ABSENT, &dev->flags)) { + DEBUGP(4, dev, "CMM_ABSENT flag set\n"); + goto out; + } + rc = -EINVAL; + + if (_IOC_TYPE(cmd) != CM_IOC_MAGIC) { + DEBUGP(4, dev, "ioctype mismatch\n"); + goto out; + } + if (_IOC_NR(cmd) > CM_IOC_MAXNR) { + DEBUGP(4, dev, "iocnr mismatch\n"); + goto out; + } + size = _IOC_SIZE(cmd); + rc = -EFAULT; + DEBUGP(4, dev, "iocdir=%.4x iocr=%.4x iocw=%.4x iocsize=%d cmd=%.4x\n", + _IOC_DIR(cmd), _IOC_READ, _IOC_WRITE, size, cmd); + + if (_IOC_DIR(cmd) & _IOC_READ) { + if (!access_ok(VERIFY_WRITE, argp, size)) + goto out; + } + if (_IOC_DIR(cmd) & _IOC_WRITE) { + if (!access_ok(VERIFY_READ, argp, size)) + goto out; + } + rc = 0; + + switch (cmd) { + case CM_IOCGSTATUS: + DEBUGP(4, dev, " ... in CM_IOCGSTATUS\n"); + { + int status; + + /* clear other bits, but leave inserted & powered as + * they are */ + status = dev->flags0 & 3; + if (test_bit(IS_ATR_PRESENT, &dev->flags)) + status |= CM_ATR_PRESENT; + if (test_bit(IS_ATR_VALID, &dev->flags)) + status |= CM_ATR_VALID; + if (test_bit(IS_CMM_ABSENT, &dev->flags)) + status |= CM_NO_READER; + if (test_bit(IS_BAD_CARD, &dev->flags)) + status |= CM_BAD_CARD; + if (copy_to_user(argp, &status, sizeof(int))) + rc = -EFAULT; + } + break; + case CM_IOCGATR: + DEBUGP(4, dev, "... in CM_IOCGATR\n"); + { + struct atreq __user *atreq = argp; + int tmp; + /* allow nonblocking io and being interrupted */ + if (wait_event_interruptible + (dev->atrq, + ((filp->f_flags & O_NONBLOCK) + || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags) + != 0)))) { + if (filp->f_flags & O_NONBLOCK) + rc = -EAGAIN; + else + rc = -ERESTARTSYS; + break; + } + + rc = -EFAULT; + if (test_bit(IS_ATR_VALID, &dev->flags) == 0) { + tmp = -1; + if (copy_to_user(&(atreq->atr_len), &tmp, + sizeof(int))) + break; + } else { + if (copy_to_user(atreq->atr, dev->atr, + dev->atr_len)) + break; + + tmp = dev->atr_len; + if (copy_to_user(&(atreq->atr_len), &tmp, sizeof(int))) + break; + } + rc = 0; + break; + } + case CM_IOCARDOFF: + +#ifdef CM4000_DEBUG + DEBUGP(4, dev, "... in CM_IOCARDOFF\n"); + if (dev->flags0 & 0x01) { + DEBUGP(4, dev, " Card inserted\n"); + } else { + DEBUGP(2, dev, " No card inserted\n"); + } + if (dev->flags0 & 0x02) { + DEBUGP(4, dev, " Card powered\n"); + } else { + DEBUGP(2, dev, " Card not powered\n"); + } +#endif + + /* is a card inserted and powered? */ + if ((dev->flags0 & 0x01) && (dev->flags0 & 0x02)) { + + /* get IO lock */ + if (wait_event_interruptible + (dev->ioq, + ((filp->f_flags & O_NONBLOCK) + || (test_and_set_bit(LOCK_IO, (void *)&dev->flags) + == 0)))) { + if (filp->f_flags & O_NONBLOCK) + rc = -EAGAIN; + else + rc = -ERESTARTSYS; + break; + } + /* Set Flags0 = 0x42 */ + DEBUGP(4, dev, "Set Flags0=0x42 \n"); + xoutb(0x42, REG_FLAGS0(iobase)); + clear_bit(IS_ATR_PRESENT, &dev->flags); + clear_bit(IS_ATR_VALID, &dev->flags); + dev->mstate = M_CARDOFF; + clear_bit(LOCK_IO, &dev->flags); + if (wait_event_interruptible + (dev->atrq, + ((filp->f_flags & O_NONBLOCK) + || (test_bit(IS_ATR_VALID, (void *)&dev->flags) != + 0)))) { + if (filp->f_flags & O_NONBLOCK) + rc = -EAGAIN; + else + rc = -ERESTARTSYS; + break; + } + } + /* release lock */ + clear_bit(LOCK_IO, &dev->flags); + wake_up_interruptible(&dev->ioq); + + rc = 0; + break; + case CM_IOCSPTS: + { + struct ptsreq krnptsreq; + + if (copy_from_user(&krnptsreq, argp, + sizeof(struct ptsreq))) { + rc = -EFAULT; + break; + } + + rc = 0; + DEBUGP(4, dev, "... in CM_IOCSPTS\n"); + /* wait for ATR to get valid */ + if (wait_event_interruptible + (dev->atrq, + ((filp->f_flags & O_NONBLOCK) + || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags) + != 0)))) { + if (filp->f_flags & O_NONBLOCK) + rc = -EAGAIN; + else + rc = -ERESTARTSYS; + break; + } + /* get IO lock */ + if (wait_event_interruptible + (dev->ioq, + ((filp->f_flags & O_NONBLOCK) + || (test_and_set_bit(LOCK_IO, (void *)&dev->flags) + == 0)))) { + if (filp->f_flags & O_NONBLOCK) + rc = -EAGAIN; + else + rc = -ERESTARTSYS; + break; + } + + if ((rc = set_protocol(dev, &krnptsreq)) != 0) { + /* auto power_on again */ + dev->mstate = M_FETCH_ATR; + clear_bit(IS_ATR_VALID, &dev->flags); + } + /* release lock */ + clear_bit(LOCK_IO, &dev->flags); + wake_up_interruptible(&dev->ioq); + + } + break; +#ifdef CM4000_DEBUG + case CM_IOSDBGLVL: + rc = -ENOTTY; + break; +#endif + default: + DEBUGP(4, dev, "... in default (unknown IOCTL code)\n"); + rc = -ENOTTY; + } +out: + mutex_unlock(&cmm_mutex); + return rc; +} + +static int cmm_open(struct inode *inode, struct file *filp) +{ + struct cm4000_dev *dev; + struct pcmcia_device *link; + int minor = iminor(inode); + int ret; + + if (minor >= CM4000_MAX_DEV) + return -ENODEV; + + mutex_lock(&cmm_mutex); + link = dev_table[minor]; + if (link == NULL || !pcmcia_dev_present(link)) { + ret = -ENODEV; + goto out; + } + + if (link->open) { + ret = -EBUSY; + goto out; + } + + dev = link->priv; + filp->private_data = dev; + + DEBUGP(2, dev, "-> cmm_open(device=%d.%d process=%s,%d)\n", + imajor(inode), minor, current->comm, current->pid); + + /* init device variables, they may be "polluted" after close + * or, the device may never have been closed (i.e. open failed) + */ + + ZERO_DEV(dev); + + /* opening will always block since the + * monitor will be started by open, which + * means we have to wait for ATR becoming + * valid = block until valid (or card + * inserted) + */ + if (filp->f_flags & O_NONBLOCK) { + ret = -EAGAIN; + goto out; + } + + dev->mdelay = T_50MSEC; + + /* start monitoring the cardstatus */ + start_monitor(dev); + + link->open = 1; /* only one open per device */ + + DEBUGP(2, dev, "<- cmm_open\n"); + ret = nonseekable_open(inode, filp); +out: + mutex_unlock(&cmm_mutex); + return ret; +} + +static int cmm_close(struct inode *inode, struct file *filp) +{ + struct cm4000_dev *dev; + struct pcmcia_device *link; + int minor = iminor(inode); + + if (minor >= CM4000_MAX_DEV) + return -ENODEV; + + link = dev_table[minor]; + if (link == NULL) + return -ENODEV; + + dev = link->priv; + + DEBUGP(2, dev, "-> cmm_close(maj/min=%d.%d)\n", + imajor(inode), minor); + + stop_monitor(dev); + + ZERO_DEV(dev); + + link->open = 0; /* only one open per device */ + wake_up(&dev->devq); /* socket removed? */ + + DEBUGP(2, dev, "cmm_close\n"); + return 0; +} + +static void cmm_cm4000_release(struct pcmcia_device * link) +{ + struct cm4000_dev *dev = link->priv; + + /* dont terminate the monitor, rather rely on + * close doing that for us. + */ + DEBUGP(3, dev, "-> cmm_cm4000_release\n"); + while (link->open) { + printk(KERN_INFO MODULE_NAME ": delaying release until " + "process has terminated\n"); + /* note: don't interrupt us: + * close the applications which own + * the devices _first_ ! + */ + wait_event(dev->devq, (link->open == 0)); + } + /* dev->devq=NULL; this cannot be zeroed earlier */ + DEBUGP(3, dev, "<- cmm_cm4000_release\n"); + return; +} + +/*==== Interface to PCMCIA Layer =======================================*/ + +static int cm4000_config_check(struct pcmcia_device *p_dev, void *priv_data) +{ + return pcmcia_request_io(p_dev); +} + +static int cm4000_config(struct pcmcia_device * link, int devno) +{ + struct cm4000_dev *dev; + + link->config_flags |= CONF_AUTO_SET_IO; + + /* read the config-tuples */ + if (pcmcia_loop_config(link, cm4000_config_check, NULL)) + goto cs_release; + + if (pcmcia_enable_device(link)) + goto cs_release; + + dev = link->priv; + + return 0; + +cs_release: + cm4000_release(link); + return -ENODEV; +} + +static int cm4000_suspend(struct pcmcia_device *link) +{ + struct cm4000_dev *dev; + + dev = link->priv; + stop_monitor(dev); + + return 0; +} + +static int cm4000_resume(struct pcmcia_device *link) +{ + struct cm4000_dev *dev; + + dev = link->priv; + if (link->open) + start_monitor(dev); + + return 0; +} + +static void cm4000_release(struct pcmcia_device *link) +{ + cmm_cm4000_release(link); /* delay release until device closed */ + pcmcia_disable_device(link); +} + +static int cm4000_probe(struct pcmcia_device *link) +{ + struct cm4000_dev *dev; + int i, ret; + + for (i = 0; i < CM4000_MAX_DEV; i++) + if (dev_table[i] == NULL) + break; + + if (i == CM4000_MAX_DEV) { + printk(KERN_NOTICE MODULE_NAME ": all devices in use\n"); + return -ENODEV; + } + + /* create a new cm4000_cs device */ + dev = kzalloc(sizeof(struct cm4000_dev), GFP_KERNEL); + if (dev == NULL) + return -ENOMEM; + + dev->p_dev = link; + link->priv = dev; + dev_table[i] = link; + + init_waitqueue_head(&dev->devq); + init_waitqueue_head(&dev->ioq); + init_waitqueue_head(&dev->atrq); + init_waitqueue_head(&dev->readq); + + ret = cm4000_config(link, i); + if (ret) { + dev_table[i] = NULL; + kfree(dev); + return ret; + } + + device_create(cmm_class, NULL, MKDEV(major, i), NULL, "cmm%d", i); + + return 0; +} + +static void cm4000_detach(struct pcmcia_device *link) +{ + struct cm4000_dev *dev = link->priv; + int devno; + + /* find device */ + for (devno = 0; devno < CM4000_MAX_DEV; devno++) + if (dev_table[devno] == link) + break; + if (devno == CM4000_MAX_DEV) + return; + + stop_monitor(dev); + + cm4000_release(link); + + dev_table[devno] = NULL; + kfree(dev); + + device_destroy(cmm_class, MKDEV(major, devno)); + + return; +} + +static const struct file_operations cm4000_fops = { + .owner = THIS_MODULE, + .read = cmm_read, + .write = cmm_write, + .unlocked_ioctl = cmm_ioctl, + .open = cmm_open, + .release= cmm_close, + .llseek = no_llseek, +}; + +static const struct pcmcia_device_id cm4000_ids[] = { + PCMCIA_DEVICE_MANF_CARD(0x0223, 0x0002), + PCMCIA_DEVICE_PROD_ID12("CardMan", "4000", 0x2FB368CA, 0xA2BD8C39), + PCMCIA_DEVICE_NULL, +}; +MODULE_DEVICE_TABLE(pcmcia, cm4000_ids); + +static struct pcmcia_driver cm4000_driver = { + .owner = THIS_MODULE, + .name = "cm4000_cs", + .probe = cm4000_probe, + .remove = cm4000_detach, + .suspend = cm4000_suspend, + .resume = cm4000_resume, + .id_table = cm4000_ids, +}; + +static int __init cmm_init(void) +{ + int rc; + + cmm_class = class_create(THIS_MODULE, "cardman_4000"); + if (IS_ERR(cmm_class)) + return PTR_ERR(cmm_class); + + major = register_chrdev(0, DEVICE_NAME, &cm4000_fops); + if (major < 0) { + printk(KERN_WARNING MODULE_NAME + ": could not get major number\n"); + class_destroy(cmm_class); + return major; + } + + rc = pcmcia_register_driver(&cm4000_driver); + if (rc < 0) { + unregister_chrdev(major, DEVICE_NAME); + class_destroy(cmm_class); + return rc; + } + + return 0; +} + +static void __exit cmm_exit(void) +{ + pcmcia_unregister_driver(&cm4000_driver); + unregister_chrdev(major, DEVICE_NAME); + class_destroy(cmm_class); +}; + +module_init(cmm_init); +module_exit(cmm_exit); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/kernel/drivers/char/pcmcia/cm4040_cs.c b/kernel/drivers/char/pcmcia/cm4040_cs.c new file mode 100644 index 000000000..8dd48a2be --- /dev/null +++ b/kernel/drivers/char/pcmcia/cm4040_cs.c @@ -0,0 +1,687 @@ +/* + * A driver for the Omnikey PCMCIA smartcard reader CardMan 4040 + * + * (c) 2000-2004 Omnikey AG (http://www.omnikey.com/) + * + * (C) 2005-2006 Harald Welte + * - add support for poll() + * - driver cleanup + * - add waitqueues + * - adhere to linux kernel coding style and policies + * - support 2.6.13 "new style" pcmcia interface + * - add class interface for udev device creation + * + * The device basically is a USB CCID compliant device that has been + * attached to an I/O-Mapped FIFO. + * + * All rights reserved, Dual BSD/GPL Licensed. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "cm4040_cs.h" + + +#define reader_to_dev(x) (&x->p_dev->dev) + +/* n (debug level) is ignored */ +/* additional debug output may be enabled by re-compiling with + * CM4040_DEBUG set */ +/* #define CM4040_DEBUG */ +#define DEBUGP(n, rdr, x, args...) do { \ + dev_dbg(reader_to_dev(rdr), "%s:" x, \ + __func__ , ## args); \ + } while (0) + +static DEFINE_MUTEX(cm4040_mutex); + +#define CCID_DRIVER_BULK_DEFAULT_TIMEOUT (150*HZ) +#define CCID_DRIVER_ASYNC_POWERUP_TIMEOUT (35*HZ) +#define CCID_DRIVER_MINIMUM_TIMEOUT (3*HZ) +#define READ_WRITE_BUFFER_SIZE 512 +#define POLL_LOOP_COUNT 1000 + +/* how often to poll for fifo status change */ +#define POLL_PERIOD msecs_to_jiffies(10) + +static void reader_release(struct pcmcia_device *link); + +static int major; +static struct class *cmx_class; + +#define BS_READABLE 0x01 +#define BS_WRITABLE 0x02 + +struct reader_dev { + struct pcmcia_device *p_dev; + wait_queue_head_t devq; + wait_queue_head_t poll_wait; + wait_queue_head_t read_wait; + wait_queue_head_t write_wait; + unsigned long buffer_status; + unsigned long timeout; + unsigned char s_buf[READ_WRITE_BUFFER_SIZE]; + unsigned char r_buf[READ_WRITE_BUFFER_SIZE]; + struct timer_list poll_timer; +}; + +static struct pcmcia_device *dev_table[CM_MAX_DEV]; + +#ifndef CM4040_DEBUG +#define xoutb outb +#define xinb inb +#else +static inline void xoutb(unsigned char val, unsigned short port) +{ + pr_debug("outb(val=%.2x,port=%.4x)\n", val, port); + outb(val, port); +} + +static inline unsigned char xinb(unsigned short port) +{ + unsigned char val; + + val = inb(port); + pr_debug("%.2x=inb(%.4x)\n", val, port); + return val; +} +#endif + +/* poll the device fifo status register. not to be confused with + * the poll syscall. */ +static void cm4040_do_poll(unsigned long dummy) +{ + struct reader_dev *dev = (struct reader_dev *) dummy; + unsigned int obs = xinb(dev->p_dev->resource[0]->start + + REG_OFFSET_BUFFER_STATUS); + + if ((obs & BSR_BULK_IN_FULL)) { + set_bit(BS_READABLE, &dev->buffer_status); + DEBUGP(4, dev, "waking up read_wait\n"); + wake_up_interruptible(&dev->read_wait); + } else + clear_bit(BS_READABLE, &dev->buffer_status); + + if (!(obs & BSR_BULK_OUT_FULL)) { + set_bit(BS_WRITABLE, &dev->buffer_status); + DEBUGP(4, dev, "waking up write_wait\n"); + wake_up_interruptible(&dev->write_wait); + } else + clear_bit(BS_WRITABLE, &dev->buffer_status); + + if (dev->buffer_status) + wake_up_interruptible(&dev->poll_wait); + + mod_timer(&dev->poll_timer, jiffies + POLL_PERIOD); +} + +static void cm4040_stop_poll(struct reader_dev *dev) +{ + del_timer_sync(&dev->poll_timer); +} + +static int wait_for_bulk_out_ready(struct reader_dev *dev) +{ + int i, rc; + int iobase = dev->p_dev->resource[0]->start; + + for (i = 0; i < POLL_LOOP_COUNT; i++) { + if ((xinb(iobase + REG_OFFSET_BUFFER_STATUS) + & BSR_BULK_OUT_FULL) == 0) { + DEBUGP(4, dev, "BulkOut empty (i=%d)\n", i); + return 1; + } + } + + DEBUGP(4, dev, "wait_event_interruptible_timeout(timeout=%ld\n", + dev->timeout); + rc = wait_event_interruptible_timeout(dev->write_wait, + test_and_clear_bit(BS_WRITABLE, + &dev->buffer_status), + dev->timeout); + + if (rc > 0) + DEBUGP(4, dev, "woke up: BulkOut empty\n"); + else if (rc == 0) + DEBUGP(4, dev, "woke up: BulkOut full, returning 0 :(\n"); + else if (rc < 0) + DEBUGP(4, dev, "woke up: signal arrived\n"); + + return rc; +} + +/* Write to Sync Control Register */ +static int write_sync_reg(unsigned char val, struct reader_dev *dev) +{ + int iobase = dev->p_dev->resource[0]->start; + int rc; + + rc = wait_for_bulk_out_ready(dev); + if (rc <= 0) + return rc; + + xoutb(val, iobase + REG_OFFSET_SYNC_CONTROL); + rc = wait_for_bulk_out_ready(dev); + if (rc <= 0) + return rc; + + return 1; +} + +static int wait_for_bulk_in_ready(struct reader_dev *dev) +{ + int i, rc; + int iobase = dev->p_dev->resource[0]->start; + + for (i = 0; i < POLL_LOOP_COUNT; i++) { + if ((xinb(iobase + REG_OFFSET_BUFFER_STATUS) + & BSR_BULK_IN_FULL) == BSR_BULK_IN_FULL) { + DEBUGP(3, dev, "BulkIn full (i=%d)\n", i); + return 1; + } + } + + DEBUGP(4, dev, "wait_event_interruptible_timeout(timeout=%ld\n", + dev->timeout); + rc = wait_event_interruptible_timeout(dev->read_wait, + test_and_clear_bit(BS_READABLE, + &dev->buffer_status), + dev->timeout); + if (rc > 0) + DEBUGP(4, dev, "woke up: BulkIn full\n"); + else if (rc == 0) + DEBUGP(4, dev, "woke up: BulkIn not full, returning 0 :(\n"); + else if (rc < 0) + DEBUGP(4, dev, "woke up: signal arrived\n"); + + return rc; +} + +static ssize_t cm4040_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + struct reader_dev *dev = filp->private_data; + int iobase = dev->p_dev->resource[0]->start; + size_t bytes_to_read; + unsigned long i; + size_t min_bytes_to_read; + int rc; + unsigned char uc; + + DEBUGP(2, dev, "-> cm4040_read(%s,%d)\n", current->comm, current->pid); + + if (count == 0) + return 0; + + if (count < 10) + return -EFAULT; + + if (filp->f_flags & O_NONBLOCK) { + DEBUGP(4, dev, "filep->f_flags O_NONBLOCK set\n"); + DEBUGP(2, dev, "<- cm4040_read (failure)\n"); + return -EAGAIN; + } + + if (!pcmcia_dev_present(dev->p_dev)) + return -ENODEV; + + for (i = 0; i < 5; i++) { + rc = wait_for_bulk_in_ready(dev); + if (rc <= 0) { + DEBUGP(5, dev, "wait_for_bulk_in_ready rc=%.2x\n", rc); + DEBUGP(2, dev, "<- cm4040_read (failed)\n"); + if (rc == -ERESTARTSYS) + return rc; + return -EIO; + } + dev->r_buf[i] = xinb(iobase + REG_OFFSET_BULK_IN); +#ifdef CM4040_DEBUG + pr_debug("%lu:%2x ", i, dev->r_buf[i]); + } + pr_debug("\n"); +#else + } +#endif + + bytes_to_read = 5 + le32_to_cpu(*(__le32 *)&dev->r_buf[1]); + + DEBUGP(6, dev, "BytesToRead=%zu\n", bytes_to_read); + + min_bytes_to_read = min(count, bytes_to_read + 5); + min_bytes_to_read = min_t(size_t, min_bytes_to_read, READ_WRITE_BUFFER_SIZE); + + DEBUGP(6, dev, "Min=%zu\n", min_bytes_to_read); + + for (i = 0; i < (min_bytes_to_read-5); i++) { + rc = wait_for_bulk_in_ready(dev); + if (rc <= 0) { + DEBUGP(5, dev, "wait_for_bulk_in_ready rc=%.2x\n", rc); + DEBUGP(2, dev, "<- cm4040_read (failed)\n"); + if (rc == -ERESTARTSYS) + return rc; + return -EIO; + } + dev->r_buf[i+5] = xinb(iobase + REG_OFFSET_BULK_IN); +#ifdef CM4040_DEBUG + pr_debug("%lu:%2x ", i, dev->r_buf[i]); + } + pr_debug("\n"); +#else + } +#endif + + *ppos = min_bytes_to_read; + if (copy_to_user(buf, dev->r_buf, min_bytes_to_read)) + return -EFAULT; + + rc = wait_for_bulk_in_ready(dev); + if (rc <= 0) { + DEBUGP(5, dev, "wait_for_bulk_in_ready rc=%.2x\n", rc); + DEBUGP(2, dev, "<- cm4040_read (failed)\n"); + if (rc == -ERESTARTSYS) + return rc; + return -EIO; + } + + rc = write_sync_reg(SCR_READER_TO_HOST_DONE, dev); + if (rc <= 0) { + DEBUGP(5, dev, "write_sync_reg c=%.2x\n", rc); + DEBUGP(2, dev, "<- cm4040_read (failed)\n"); + if (rc == -ERESTARTSYS) + return rc; + else + return -EIO; + } + + uc = xinb(iobase + REG_OFFSET_BULK_IN); + + DEBUGP(2, dev, "<- cm4040_read (successfully)\n"); + return min_bytes_to_read; +} + +static ssize_t cm4040_write(struct file *filp, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct reader_dev *dev = filp->private_data; + int iobase = dev->p_dev->resource[0]->start; + ssize_t rc; + int i; + unsigned int bytes_to_write; + + DEBUGP(2, dev, "-> cm4040_write(%s,%d)\n", current->comm, current->pid); + + if (count == 0) { + DEBUGP(2, dev, "<- cm4040_write empty read (successfully)\n"); + return 0; + } + + if ((count < 5) || (count > READ_WRITE_BUFFER_SIZE)) { + DEBUGP(2, dev, "<- cm4040_write buffersize=%Zd < 5\n", count); + return -EIO; + } + + if (filp->f_flags & O_NONBLOCK) { + DEBUGP(4, dev, "filep->f_flags O_NONBLOCK set\n"); + DEBUGP(4, dev, "<- cm4040_write (failure)\n"); + return -EAGAIN; + } + + if (!pcmcia_dev_present(dev->p_dev)) + return -ENODEV; + + bytes_to_write = count; + if (copy_from_user(dev->s_buf, buf, bytes_to_write)) + return -EFAULT; + + switch (dev->s_buf[0]) { + case CMD_PC_TO_RDR_XFRBLOCK: + case CMD_PC_TO_RDR_SECURE: + case CMD_PC_TO_RDR_TEST_SECURE: + case CMD_PC_TO_RDR_OK_SECURE: + dev->timeout = CCID_DRIVER_BULK_DEFAULT_TIMEOUT; + break; + + case CMD_PC_TO_RDR_ICCPOWERON: + dev->timeout = CCID_DRIVER_ASYNC_POWERUP_TIMEOUT; + break; + + case CMD_PC_TO_RDR_GETSLOTSTATUS: + case CMD_PC_TO_RDR_ICCPOWEROFF: + case CMD_PC_TO_RDR_GETPARAMETERS: + case CMD_PC_TO_RDR_RESETPARAMETERS: + case CMD_PC_TO_RDR_SETPARAMETERS: + case CMD_PC_TO_RDR_ESCAPE: + case CMD_PC_TO_RDR_ICCCLOCK: + default: + dev->timeout = CCID_DRIVER_MINIMUM_TIMEOUT; + break; + } + + rc = write_sync_reg(SCR_HOST_TO_READER_START, dev); + if (rc <= 0) { + DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc); + DEBUGP(2, dev, "<- cm4040_write (failed)\n"); + if (rc == -ERESTARTSYS) + return rc; + else + return -EIO; + } + + DEBUGP(4, dev, "start \n"); + + for (i = 0; i < bytes_to_write; i++) { + rc = wait_for_bulk_out_ready(dev); + if (rc <= 0) { + DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2Zx\n", + rc); + DEBUGP(2, dev, "<- cm4040_write (failed)\n"); + if (rc == -ERESTARTSYS) + return rc; + else + return -EIO; + } + + xoutb(dev->s_buf[i],iobase + REG_OFFSET_BULK_OUT); + } + DEBUGP(4, dev, "end\n"); + + rc = write_sync_reg(SCR_HOST_TO_READER_DONE, dev); + + if (rc <= 0) { + DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc); + DEBUGP(2, dev, "<- cm4040_write (failed)\n"); + if (rc == -ERESTARTSYS) + return rc; + else + return -EIO; + } + + DEBUGP(2, dev, "<- cm4040_write (successfully)\n"); + return count; +} + +static unsigned int cm4040_poll(struct file *filp, poll_table *wait) +{ + struct reader_dev *dev = filp->private_data; + unsigned int mask = 0; + + poll_wait(filp, &dev->poll_wait, wait); + + if (test_and_clear_bit(BS_READABLE, &dev->buffer_status)) + mask |= POLLIN | POLLRDNORM; + if (test_and_clear_bit(BS_WRITABLE, &dev->buffer_status)) + mask |= POLLOUT | POLLWRNORM; + + DEBUGP(2, dev, "<- cm4040_poll(%u)\n", mask); + + return mask; +} + +static int cm4040_open(struct inode *inode, struct file *filp) +{ + struct reader_dev *dev; + struct pcmcia_device *link; + int minor = iminor(inode); + int ret; + + if (minor >= CM_MAX_DEV) + return -ENODEV; + + mutex_lock(&cm4040_mutex); + link = dev_table[minor]; + if (link == NULL || !pcmcia_dev_present(link)) { + ret = -ENODEV; + goto out; + } + + if (link->open) { + ret = -EBUSY; + goto out; + } + + dev = link->priv; + filp->private_data = dev; + + if (filp->f_flags & O_NONBLOCK) { + DEBUGP(4, dev, "filep->f_flags O_NONBLOCK set\n"); + ret = -EAGAIN; + goto out; + } + + link->open = 1; + + dev->poll_timer.data = (unsigned long) dev; + mod_timer(&dev->poll_timer, jiffies + POLL_PERIOD); + + DEBUGP(2, dev, "<- cm4040_open (successfully)\n"); + ret = nonseekable_open(inode, filp); +out: + mutex_unlock(&cm4040_mutex); + return ret; +} + +static int cm4040_close(struct inode *inode, struct file *filp) +{ + struct reader_dev *dev = filp->private_data; + struct pcmcia_device *link; + int minor = iminor(inode); + + DEBUGP(2, dev, "-> cm4040_close(maj/min=%d.%d)\n", imajor(inode), + iminor(inode)); + + if (minor >= CM_MAX_DEV) + return -ENODEV; + + link = dev_table[minor]; + if (link == NULL) + return -ENODEV; + + cm4040_stop_poll(dev); + + link->open = 0; + wake_up(&dev->devq); + + DEBUGP(2, dev, "<- cm4040_close\n"); + return 0; +} + +static void cm4040_reader_release(struct pcmcia_device *link) +{ + struct reader_dev *dev = link->priv; + + DEBUGP(3, dev, "-> cm4040_reader_release\n"); + while (link->open) { + DEBUGP(3, dev, KERN_INFO MODULE_NAME ": delaying release " + "until process has terminated\n"); + wait_event(dev->devq, (link->open == 0)); + } + DEBUGP(3, dev, "<- cm4040_reader_release\n"); + return; +} + +static int cm4040_config_check(struct pcmcia_device *p_dev, void *priv_data) +{ + return pcmcia_request_io(p_dev); +} + + +static int reader_config(struct pcmcia_device *link, int devno) +{ + struct reader_dev *dev; + int fail_rc; + + link->config_flags |= CONF_AUTO_SET_IO; + + if (pcmcia_loop_config(link, cm4040_config_check, NULL)) + goto cs_release; + + fail_rc = pcmcia_enable_device(link); + if (fail_rc != 0) { + dev_printk(KERN_INFO, &link->dev, + "pcmcia_enable_device failed 0x%x\n", + fail_rc); + goto cs_release; + } + + dev = link->priv; + + DEBUGP(2, dev, "device " DEVICE_NAME "%d at %pR\n", devno, + link->resource[0]); + DEBUGP(2, dev, "<- reader_config (succ)\n"); + + return 0; + +cs_release: + reader_release(link); + return -ENODEV; +} + +static void reader_release(struct pcmcia_device *link) +{ + cm4040_reader_release(link); + pcmcia_disable_device(link); +} + +static int reader_probe(struct pcmcia_device *link) +{ + struct reader_dev *dev; + int i, ret; + + for (i = 0; i < CM_MAX_DEV; i++) { + if (dev_table[i] == NULL) + break; + } + + if (i == CM_MAX_DEV) + return -ENODEV; + + dev = kzalloc(sizeof(struct reader_dev), GFP_KERNEL); + if (dev == NULL) + return -ENOMEM; + + dev->timeout = CCID_DRIVER_MINIMUM_TIMEOUT; + dev->buffer_status = 0; + + link->priv = dev; + dev->p_dev = link; + + dev_table[i] = link; + + init_waitqueue_head(&dev->devq); + init_waitqueue_head(&dev->poll_wait); + init_waitqueue_head(&dev->read_wait); + init_waitqueue_head(&dev->write_wait); + setup_timer(&dev->poll_timer, cm4040_do_poll, 0); + + ret = reader_config(link, i); + if (ret) { + dev_table[i] = NULL; + kfree(dev); + return ret; + } + + device_create(cmx_class, NULL, MKDEV(major, i), NULL, "cmx%d", i); + + return 0; +} + +static void reader_detach(struct pcmcia_device *link) +{ + struct reader_dev *dev = link->priv; + int devno; + + /* find device */ + for (devno = 0; devno < CM_MAX_DEV; devno++) { + if (dev_table[devno] == link) + break; + } + if (devno == CM_MAX_DEV) + return; + + reader_release(link); + + dev_table[devno] = NULL; + kfree(dev); + + device_destroy(cmx_class, MKDEV(major, devno)); + + return; +} + +static const struct file_operations reader_fops = { + .owner = THIS_MODULE, + .read = cm4040_read, + .write = cm4040_write, + .open = cm4040_open, + .release = cm4040_close, + .poll = cm4040_poll, + .llseek = no_llseek, +}; + +static const struct pcmcia_device_id cm4040_ids[] = { + PCMCIA_DEVICE_MANF_CARD(0x0223, 0x0200), + PCMCIA_DEVICE_PROD_ID12("OMNIKEY", "CardMan 4040", + 0xE32CDD8C, 0x8F23318B), + PCMCIA_DEVICE_NULL, +}; +MODULE_DEVICE_TABLE(pcmcia, cm4040_ids); + +static struct pcmcia_driver reader_driver = { + .owner = THIS_MODULE, + .name = "cm4040_cs", + .probe = reader_probe, + .remove = reader_detach, + .id_table = cm4040_ids, +}; + +static int __init cm4040_init(void) +{ + int rc; + + cmx_class = class_create(THIS_MODULE, "cardman_4040"); + if (IS_ERR(cmx_class)) + return PTR_ERR(cmx_class); + + major = register_chrdev(0, DEVICE_NAME, &reader_fops); + if (major < 0) { + printk(KERN_WARNING MODULE_NAME + ": could not get major number\n"); + class_destroy(cmx_class); + return major; + } + + rc = pcmcia_register_driver(&reader_driver); + if (rc < 0) { + unregister_chrdev(major, DEVICE_NAME); + class_destroy(cmx_class); + return rc; + } + + return 0; +} + +static void __exit cm4040_exit(void) +{ + pcmcia_unregister_driver(&reader_driver); + unregister_chrdev(major, DEVICE_NAME); + class_destroy(cmx_class); +} + +module_init(cm4040_init); +module_exit(cm4040_exit); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/kernel/drivers/char/pcmcia/cm4040_cs.h b/kernel/drivers/char/pcmcia/cm4040_cs.h new file mode 100644 index 000000000..9a8b805c5 --- /dev/null +++ b/kernel/drivers/char/pcmcia/cm4040_cs.h @@ -0,0 +1,47 @@ +#ifndef _CM4040_H_ +#define _CM4040_H_ + +#define CM_MAX_DEV 4 + +#define DEVICE_NAME "cmx" +#define MODULE_NAME "cm4040_cs" + +#define REG_OFFSET_BULK_OUT 0 +#define REG_OFFSET_BULK_IN 0 +#define REG_OFFSET_BUFFER_STATUS 1 +#define REG_OFFSET_SYNC_CONTROL 2 + +#define BSR_BULK_IN_FULL 0x02 +#define BSR_BULK_OUT_FULL 0x01 + +#define SCR_HOST_TO_READER_START 0x80 +#define SCR_ABORT 0x40 +#define SCR_EN_NOTIFY 0x20 +#define SCR_ACK_NOTIFY 0x10 +#define SCR_READER_TO_HOST_DONE 0x08 +#define SCR_HOST_TO_READER_DONE 0x04 +#define SCR_PULSE_INTERRUPT 0x02 +#define SCR_POWER_DOWN 0x01 + + +#define CMD_PC_TO_RDR_ICCPOWERON 0x62 +#define CMD_PC_TO_RDR_GETSLOTSTATUS 0x65 +#define CMD_PC_TO_RDR_ICCPOWEROFF 0x63 +#define CMD_PC_TO_RDR_SECURE 0x69 +#define CMD_PC_TO_RDR_GETPARAMETERS 0x6C +#define CMD_PC_TO_RDR_RESETPARAMETERS 0x6D +#define CMD_PC_TO_RDR_SETPARAMETERS 0x61 +#define CMD_PC_TO_RDR_XFRBLOCK 0x6F +#define CMD_PC_TO_RDR_ESCAPE 0x6B +#define CMD_PC_TO_RDR_ICCCLOCK 0x6E +#define CMD_PC_TO_RDR_TEST_SECURE 0x74 +#define CMD_PC_TO_RDR_OK_SECURE 0x89 + + +#define CMD_RDR_TO_PC_SLOTSTATUS 0x81 +#define CMD_RDR_TO_PC_DATABLOCK 0x80 +#define CMD_RDR_TO_PC_PARAMETERS 0x82 +#define CMD_RDR_TO_PC_ESCAPE 0x83 +#define CMD_RDR_TO_PC_OK_SECURE 0x89 + +#endif /* _CM4040_H_ */ diff --git a/kernel/drivers/char/pcmcia/synclink_cs.c b/kernel/drivers/char/pcmcia/synclink_cs.c new file mode 100644 index 000000000..0ea998605 --- /dev/null +++ b/kernel/drivers/char/pcmcia/synclink_cs.c @@ -0,0 +1,4340 @@ +/* + * linux/drivers/char/pcmcia/synclink_cs.c + * + * $Id: synclink_cs.c,v 4.34 2005/09/08 13:20:54 paulkf Exp $ + * + * Device driver for Microgate SyncLink PC Card + * multiprotocol serial adapter. + * + * written by Paul Fulghum for Microgate Corporation + * paulkf@microgate.com + * + * Microgate and SyncLink are trademarks of Microgate Corporation + * + * This code is released under the GNU General Public License (GPL) + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#define VERSION(ver,rel,seq) (((ver)<<16) | ((rel)<<8) | (seq)) +#if defined(__i386__) +# define BREAKPOINT() asm(" int $3"); +#else +# define BREAKPOINT() { } +#endif + +#define MAX_DEVICE_COUNT 4 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_CS_MODULE)) +#define SYNCLINK_GENERIC_HDLC 1 +#else +#define SYNCLINK_GENERIC_HDLC 0 +#endif + +#define GET_USER(error,value,addr) error = get_user(value,addr) +#define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0 +#define PUT_USER(error,value,addr) error = put_user(value,addr) +#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0 + +#include + +static MGSL_PARAMS default_params = { + MGSL_MODE_HDLC, /* unsigned long mode */ + 0, /* unsigned char loopback; */ + HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */ + HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */ + 0, /* unsigned long clock_speed; */ + 0xff, /* unsigned char addr_filter; */ + HDLC_CRC_16_CCITT, /* unsigned short crc_type; */ + HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */ + HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */ + 9600, /* unsigned long data_rate; */ + 8, /* unsigned char data_bits; */ + 1, /* unsigned char stop_bits; */ + ASYNC_PARITY_NONE /* unsigned char parity; */ +}; + +typedef struct { + int count; + unsigned char status; + char data[1]; +} RXBUF; + +/* The queue of BH actions to be performed */ + +#define BH_RECEIVE 1 +#define BH_TRANSMIT 2 +#define BH_STATUS 4 + +#define IO_PIN_SHUTDOWN_LIMIT 100 + +#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) + +struct _input_signal_events { + int ri_up; + int ri_down; + int dsr_up; + int dsr_down; + int dcd_up; + int dcd_down; + int cts_up; + int cts_down; +}; + + +/* + * Device instance data structure + */ + +typedef struct _mgslpc_info { + struct tty_port port; + void *if_ptr; /* General purpose pointer (used by SPPP) */ + int magic; + int line; + + struct mgsl_icount icount; + + int timeout; + int x_char; /* xon/xoff character */ + unsigned char read_status_mask; + unsigned char ignore_status_mask; + + unsigned char *tx_buf; + int tx_put; + int tx_get; + int tx_count; + + /* circular list of fixed length rx buffers */ + + unsigned char *rx_buf; /* memory allocated for all rx buffers */ + int rx_buf_total_size; /* size of memory allocated for rx buffers */ + int rx_put; /* index of next empty rx buffer */ + int rx_get; /* index of next full rx buffer */ + int rx_buf_size; /* size in bytes of single rx buffer */ + int rx_buf_count; /* total number of rx buffers */ + int rx_frame_count; /* number of full rx buffers */ + + wait_queue_head_t status_event_wait_q; + wait_queue_head_t event_wait_q; + struct timer_list tx_timer; /* HDLC transmit timeout timer */ + struct _mgslpc_info *next_device; /* device list link */ + + unsigned short imra_value; + unsigned short imrb_value; + unsigned char pim_value; + + spinlock_t lock; + struct work_struct task; /* task structure for scheduling bh */ + + u32 max_frame_size; + + u32 pending_bh; + + bool bh_running; + bool bh_requested; + + int dcd_chkcount; /* check counts to prevent */ + int cts_chkcount; /* too many IRQs if a signal */ + int dsr_chkcount; /* is floating */ + int ri_chkcount; + + bool rx_enabled; + bool rx_overflow; + + bool tx_enabled; + bool tx_active; + bool tx_aborting; + u32 idle_mode; + + int if_mode; /* serial interface selection (RS-232, v.35 etc) */ + + char device_name[25]; /* device instance name */ + + unsigned int io_base; /* base I/O address of adapter */ + unsigned int irq_level; + + MGSL_PARAMS params; /* communications parameters */ + + unsigned char serial_signals; /* current serial signal states */ + + bool irq_occurred; /* for diagnostics use */ + char testing_irq; + unsigned int init_error; /* startup error (DIAGS) */ + + char *flag_buf; + bool drop_rts_on_tx_done; + + struct _input_signal_events input_signal_events; + + /* PCMCIA support */ + struct pcmcia_device *p_dev; + int stop; + + /* SPPP/Cisco HDLC device parts */ + int netcount; + spinlock_t netlock; + +#if SYNCLINK_GENERIC_HDLC + struct net_device *netdev; +#endif + +} MGSLPC_INFO; + +#define MGSLPC_MAGIC 0x5402 + +/* + * The size of the serial xmit buffer is 1 page, or 4096 bytes + */ +#define TXBUFSIZE 4096 + + +#define CHA 0x00 /* channel A offset */ +#define CHB 0x40 /* channel B offset */ + +/* + * FIXME: PPC has PVR defined in asm/reg.h. For now we just undef it. + */ +#undef PVR + +#define RXFIFO 0 +#define TXFIFO 0 +#define STAR 0x20 +#define CMDR 0x20 +#define RSTA 0x21 +#define PRE 0x21 +#define MODE 0x22 +#define TIMR 0x23 +#define XAD1 0x24 +#define XAD2 0x25 +#define RAH1 0x26 +#define RAH2 0x27 +#define DAFO 0x27 +#define RAL1 0x28 +#define RFC 0x28 +#define RHCR 0x29 +#define RAL2 0x29 +#define RBCL 0x2a +#define XBCL 0x2a +#define RBCH 0x2b +#define XBCH 0x2b +#define CCR0 0x2c +#define CCR1 0x2d +#define CCR2 0x2e +#define CCR3 0x2f +#define VSTR 0x34 +#define BGR 0x34 +#define RLCR 0x35 +#define AML 0x36 +#define AMH 0x37 +#define GIS 0x38 +#define IVA 0x38 +#define IPC 0x39 +#define ISR 0x3a +#define IMR 0x3a +#define PVR 0x3c +#define PIS 0x3d +#define PIM 0x3d +#define PCR 0x3e +#define CCR4 0x3f + +// IMR/ISR + +#define IRQ_BREAK_ON BIT15 // rx break detected +#define IRQ_DATAOVERRUN BIT14 // receive data overflow +#define IRQ_ALLSENT BIT13 // all sent +#define IRQ_UNDERRUN BIT12 // transmit data underrun +#define IRQ_TIMER BIT11 // timer interrupt +#define IRQ_CTS BIT10 // CTS status change +#define IRQ_TXREPEAT BIT9 // tx message repeat +#define IRQ_TXFIFO BIT8 // transmit pool ready +#define IRQ_RXEOM BIT7 // receive message end +#define IRQ_EXITHUNT BIT6 // receive frame start +#define IRQ_RXTIME BIT6 // rx char timeout +#define IRQ_DCD BIT2 // carrier detect status change +#define IRQ_OVERRUN BIT1 // receive frame overflow +#define IRQ_RXFIFO BIT0 // receive pool full + +// STAR + +#define XFW BIT6 // transmit FIFO write enable +#define CEC BIT2 // command executing +#define CTS BIT1 // CTS state + +#define PVR_DTR BIT0 +#define PVR_DSR BIT1 +#define PVR_RI BIT2 +#define PVR_AUTOCTS BIT3 +#define PVR_RS232 0x20 /* 0010b */ +#define PVR_V35 0xe0 /* 1110b */ +#define PVR_RS422 0x40 /* 0100b */ + +/* Register access functions */ + +#define write_reg(info, reg, val) outb((val),(info)->io_base + (reg)) +#define read_reg(info, reg) inb((info)->io_base + (reg)) + +#define read_reg16(info, reg) inw((info)->io_base + (reg)) +#define write_reg16(info, reg, val) outw((val), (info)->io_base + (reg)) + +#define set_reg_bits(info, reg, mask) \ + write_reg(info, (reg), \ + (unsigned char) (read_reg(info, (reg)) | (mask))) +#define clear_reg_bits(info, reg, mask) \ + write_reg(info, (reg), \ + (unsigned char) (read_reg(info, (reg)) & ~(mask))) +/* + * interrupt enable/disable routines + */ +static void irq_disable(MGSLPC_INFO *info, unsigned char channel, unsigned short mask) +{ + if (channel == CHA) { + info->imra_value |= mask; + write_reg16(info, CHA + IMR, info->imra_value); + } else { + info->imrb_value |= mask; + write_reg16(info, CHB + IMR, info->imrb_value); + } +} +static void irq_enable(MGSLPC_INFO *info, unsigned char channel, unsigned short mask) +{ + if (channel == CHA) { + info->imra_value &= ~mask; + write_reg16(info, CHA + IMR, info->imra_value); + } else { + info->imrb_value &= ~mask; + write_reg16(info, CHB + IMR, info->imrb_value); + } +} + +#define port_irq_disable(info, mask) \ + { info->pim_value |= (mask); write_reg(info, PIM, info->pim_value); } + +#define port_irq_enable(info, mask) \ + { info->pim_value &= ~(mask); write_reg(info, PIM, info->pim_value); } + +static void rx_start(MGSLPC_INFO *info); +static void rx_stop(MGSLPC_INFO *info); + +static void tx_start(MGSLPC_INFO *info, struct tty_struct *tty); +static void tx_stop(MGSLPC_INFO *info); +static void tx_set_idle(MGSLPC_INFO *info); + +static void get_signals(MGSLPC_INFO *info); +static void set_signals(MGSLPC_INFO *info); + +static void reset_device(MGSLPC_INFO *info); + +static void hdlc_mode(MGSLPC_INFO *info); +static void async_mode(MGSLPC_INFO *info); + +static void tx_timeout(unsigned long context); + +static int carrier_raised(struct tty_port *port); +static void dtr_rts(struct tty_port *port, int onoff); + +#if SYNCLINK_GENERIC_HDLC +#define dev_to_port(D) (dev_to_hdlc(D)->priv) +static void hdlcdev_tx_done(MGSLPC_INFO *info); +static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size); +static int hdlcdev_init(MGSLPC_INFO *info); +static void hdlcdev_exit(MGSLPC_INFO *info); +#endif + +static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit); + +static bool register_test(MGSLPC_INFO *info); +static bool irq_test(MGSLPC_INFO *info); +static int adapter_test(MGSLPC_INFO *info); + +static int claim_resources(MGSLPC_INFO *info); +static void release_resources(MGSLPC_INFO *info); +static int mgslpc_add_device(MGSLPC_INFO *info); +static void mgslpc_remove_device(MGSLPC_INFO *info); + +static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty); +static void rx_reset_buffers(MGSLPC_INFO *info); +static int rx_alloc_buffers(MGSLPC_INFO *info); +static void rx_free_buffers(MGSLPC_INFO *info); + +static irqreturn_t mgslpc_isr(int irq, void *dev_id); + +/* + * Bottom half interrupt handlers + */ +static void bh_handler(struct work_struct *work); +static void bh_transmit(MGSLPC_INFO *info, struct tty_struct *tty); +static void bh_status(MGSLPC_INFO *info); + +/* + * ioctl handlers + */ +static int tiocmget(struct tty_struct *tty); +static int tiocmset(struct tty_struct *tty, + unsigned int set, unsigned int clear); +static int get_stats(MGSLPC_INFO *info, struct mgsl_icount __user *user_icount); +static int get_params(MGSLPC_INFO *info, MGSL_PARAMS __user *user_params); +static int set_params(MGSLPC_INFO *info, MGSL_PARAMS __user *new_params, struct tty_struct *tty); +static int get_txidle(MGSLPC_INFO *info, int __user *idle_mode); +static int set_txidle(MGSLPC_INFO *info, int idle_mode); +static int set_txenable(MGSLPC_INFO *info, int enable, struct tty_struct *tty); +static int tx_abort(MGSLPC_INFO *info); +static int set_rxenable(MGSLPC_INFO *info, int enable); +static int wait_events(MGSLPC_INFO *info, int __user *mask); + +static MGSLPC_INFO *mgslpc_device_list = NULL; +static int mgslpc_device_count = 0; + +/* + * Set this param to non-zero to load eax with the + * .text section address and breakpoint on module load. + * This is useful for use with gdb and add-symbol-file command. + */ +static bool break_on_load=0; + +/* + * Driver major number, defaults to zero to get auto + * assigned major number. May be forced as module parameter. + */ +static int ttymajor=0; + +static int debug_level = 0; +static int maxframe[MAX_DEVICE_COUNT] = {0,}; + +module_param(break_on_load, bool, 0); +module_param(ttymajor, int, 0); +module_param(debug_level, int, 0); +module_param_array(maxframe, int, NULL, 0); + +MODULE_LICENSE("GPL"); + +static char *driver_name = "SyncLink PC Card driver"; +static char *driver_version = "$Revision: 4.34 $"; + +static struct tty_driver *serial_driver; + +/* number of characters left in xmit buffer before we ask for more */ +#define WAKEUP_CHARS 256 + +static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty); +static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout); + +/* PCMCIA prototypes */ + +static int mgslpc_config(struct pcmcia_device *link); +static void mgslpc_release(u_long arg); +static void mgslpc_detach(struct pcmcia_device *p_dev); + +/* + * 1st function defined in .text section. Calling this function in + * init_module() followed by a breakpoint allows a remote debugger + * (gdb) to get the .text address for the add-symbol-file command. + * This allows remote debugging of dynamically loadable modules. + */ +static void* mgslpc_get_text_ptr(void) +{ + return mgslpc_get_text_ptr; +} + +/** + * line discipline callback wrappers + * + * The wrappers maintain line discipline references + * while calling into the line discipline. + * + * ldisc_receive_buf - pass receive data to line discipline + */ + +static void ldisc_receive_buf(struct tty_struct *tty, + const __u8 *data, char *flags, int count) +{ + struct tty_ldisc *ld; + if (!tty) + return; + ld = tty_ldisc_ref(tty); + if (ld) { + if (ld->ops->receive_buf) + ld->ops->receive_buf(tty, data, flags, count); + tty_ldisc_deref(ld); + } +} + +static const struct tty_port_operations mgslpc_port_ops = { + .carrier_raised = carrier_raised, + .dtr_rts = dtr_rts +}; + +static int mgslpc_probe(struct pcmcia_device *link) +{ + MGSLPC_INFO *info; + int ret; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("mgslpc_attach\n"); + + info = kzalloc(sizeof(MGSLPC_INFO), GFP_KERNEL); + if (!info) { + printk("Error can't allocate device instance data\n"); + return -ENOMEM; + } + + info->magic = MGSLPC_MAGIC; + tty_port_init(&info->port); + info->port.ops = &mgslpc_port_ops; + INIT_WORK(&info->task, bh_handler); + info->max_frame_size = 4096; + info->port.close_delay = 5*HZ/10; + info->port.closing_wait = 30*HZ; + init_waitqueue_head(&info->status_event_wait_q); + init_waitqueue_head(&info->event_wait_q); + spin_lock_init(&info->lock); + spin_lock_init(&info->netlock); + memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS)); + info->idle_mode = HDLC_TXIDLE_FLAGS; + info->imra_value = 0xffff; + info->imrb_value = 0xffff; + info->pim_value = 0xff; + + info->p_dev = link; + link->priv = info; + + /* Initialize the struct pcmcia_device structure */ + + ret = mgslpc_config(link); + if (ret != 0) + goto failed; + + ret = mgslpc_add_device(info); + if (ret != 0) + goto failed_release; + + return 0; + +failed_release: + mgslpc_release((u_long)link); +failed: + tty_port_destroy(&info->port); + kfree(info); + return ret; +} + +/* Card has been inserted. + */ + +static int mgslpc_ioprobe(struct pcmcia_device *p_dev, void *priv_data) +{ + return pcmcia_request_io(p_dev); +} + +static int mgslpc_config(struct pcmcia_device *link) +{ + MGSLPC_INFO *info = link->priv; + int ret; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("mgslpc_config(0x%p)\n", link); + + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; + + ret = pcmcia_loop_config(link, mgslpc_ioprobe, NULL); + if (ret != 0) + goto failed; + + link->config_index = 8; + link->config_regs = PRESENT_OPTION; + + ret = pcmcia_request_irq(link, mgslpc_isr); + if (ret) + goto failed; + ret = pcmcia_enable_device(link); + if (ret) + goto failed; + + info->io_base = link->resource[0]->start; + info->irq_level = link->irq; + return 0; + +failed: + mgslpc_release((u_long)link); + return -ENODEV; +} + +/* Card has been removed. + * Unregister device and release PCMCIA configuration. + * If device is open, postpone until it is closed. + */ +static void mgslpc_release(u_long arg) +{ + struct pcmcia_device *link = (struct pcmcia_device *)arg; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("mgslpc_release(0x%p)\n", link); + + pcmcia_disable_device(link); +} + +static void mgslpc_detach(struct pcmcia_device *link) +{ + if (debug_level >= DEBUG_LEVEL_INFO) + printk("mgslpc_detach(0x%p)\n", link); + + ((MGSLPC_INFO *)link->priv)->stop = 1; + mgslpc_release((u_long)link); + + mgslpc_remove_device((MGSLPC_INFO *)link->priv); +} + +static int mgslpc_suspend(struct pcmcia_device *link) +{ + MGSLPC_INFO *info = link->priv; + + info->stop = 1; + + return 0; +} + +static int mgslpc_resume(struct pcmcia_device *link) +{ + MGSLPC_INFO *info = link->priv; + + info->stop = 0; + + return 0; +} + + +static inline bool mgslpc_paranoia_check(MGSLPC_INFO *info, + char *name, const char *routine) +{ +#ifdef MGSLPC_PARANOIA_CHECK + static const char *badmagic = + "Warning: bad magic number for mgsl struct (%s) in %s\n"; + static const char *badinfo = + "Warning: null mgslpc_info for (%s) in %s\n"; + + if (!info) { + printk(badinfo, name, routine); + return true; + } + if (info->magic != MGSLPC_MAGIC) { + printk(badmagic, name, routine); + return true; + } +#else + if (!info) + return true; +#endif + return false; +} + + +#define CMD_RXFIFO BIT7 // release current rx FIFO +#define CMD_RXRESET BIT6 // receiver reset +#define CMD_RXFIFO_READ BIT5 +#define CMD_START_TIMER BIT4 +#define CMD_TXFIFO BIT3 // release current tx FIFO +#define CMD_TXEOM BIT1 // transmit end message +#define CMD_TXRESET BIT0 // transmit reset + +static bool wait_command_complete(MGSLPC_INFO *info, unsigned char channel) +{ + int i = 0; + /* wait for command completion */ + while (read_reg(info, (unsigned char)(channel+STAR)) & BIT2) { + udelay(1); + if (i++ == 1000) + return false; + } + return true; +} + +static void issue_command(MGSLPC_INFO *info, unsigned char channel, unsigned char cmd) +{ + wait_command_complete(info, channel); + write_reg(info, (unsigned char) (channel + CMDR), cmd); +} + +static void tx_pause(struct tty_struct *tty) +{ + MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; + unsigned long flags; + + if (mgslpc_paranoia_check(info, tty->name, "tx_pause")) + return; + if (debug_level >= DEBUG_LEVEL_INFO) + printk("tx_pause(%s)\n", info->device_name); + + spin_lock_irqsave(&info->lock, flags); + if (info->tx_enabled) + tx_stop(info); + spin_unlock_irqrestore(&info->lock, flags); +} + +static void tx_release(struct tty_struct *tty) +{ + MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; + unsigned long flags; + + if (mgslpc_paranoia_check(info, tty->name, "tx_release")) + return; + if (debug_level >= DEBUG_LEVEL_INFO) + printk("tx_release(%s)\n", info->device_name); + + spin_lock_irqsave(&info->lock, flags); + if (!info->tx_enabled) + tx_start(info, tty); + spin_unlock_irqrestore(&info->lock, flags); +} + +/* Return next bottom half action to perform. + * or 0 if nothing to do. + */ +static int bh_action(MGSLPC_INFO *info) +{ + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&info->lock, flags); + + if (info->pending_bh & BH_RECEIVE) { + info->pending_bh &= ~BH_RECEIVE; + rc = BH_RECEIVE; + } else if (info->pending_bh & BH_TRANSMIT) { + info->pending_bh &= ~BH_TRANSMIT; + rc = BH_TRANSMIT; + } else if (info->pending_bh & BH_STATUS) { + info->pending_bh &= ~BH_STATUS; + rc = BH_STATUS; + } + + if (!rc) { + /* Mark BH routine as complete */ + info->bh_running = false; + info->bh_requested = false; + } + + spin_unlock_irqrestore(&info->lock, flags); + + return rc; +} + +static void bh_handler(struct work_struct *work) +{ + MGSLPC_INFO *info = container_of(work, MGSLPC_INFO, task); + struct tty_struct *tty; + int action; + + if (debug_level >= DEBUG_LEVEL_BH) + printk("%s(%d):bh_handler(%s) entry\n", + __FILE__,__LINE__,info->device_name); + + info->bh_running = true; + tty = tty_port_tty_get(&info->port); + + while((action = bh_action(info)) != 0) { + + /* Process work item */ + if (debug_level >= DEBUG_LEVEL_BH) + printk("%s(%d):bh_handler() work item action=%d\n", + __FILE__,__LINE__,action); + + switch (action) { + + case BH_RECEIVE: + while(rx_get_frame(info, tty)); + break; + case BH_TRANSMIT: + bh_transmit(info, tty); + break; + case BH_STATUS: + bh_status(info); + break; + default: + /* unknown work item ID */ + printk("Unknown work item ID=%08X!\n", action); + break; + } + } + + tty_kref_put(tty); + if (debug_level >= DEBUG_LEVEL_BH) + printk("%s(%d):bh_handler(%s) exit\n", + __FILE__,__LINE__,info->device_name); +} + +static void bh_transmit(MGSLPC_INFO *info, struct tty_struct *tty) +{ + if (debug_level >= DEBUG_LEVEL_BH) + printk("bh_transmit() entry on %s\n", info->device_name); + + if (tty) + tty_wakeup(tty); +} + +static void bh_status(MGSLPC_INFO *info) +{ + info->ri_chkcount = 0; + info->dsr_chkcount = 0; + info->dcd_chkcount = 0; + info->cts_chkcount = 0; +} + +/* eom: non-zero = end of frame */ +static void rx_ready_hdlc(MGSLPC_INFO *info, int eom) +{ + unsigned char data[2]; + unsigned char fifo_count, read_count, i; + RXBUF *buf = (RXBUF*)(info->rx_buf + (info->rx_put * info->rx_buf_size)); + + if (debug_level >= DEBUG_LEVEL_ISR) + printk("%s(%d):rx_ready_hdlc(eom=%d)\n", __FILE__, __LINE__, eom); + + if (!info->rx_enabled) + return; + + if (info->rx_frame_count >= info->rx_buf_count) { + /* no more free buffers */ + issue_command(info, CHA, CMD_RXRESET); + info->pending_bh |= BH_RECEIVE; + info->rx_overflow = true; + info->icount.buf_overrun++; + return; + } + + if (eom) { + /* end of frame, get FIFO count from RBCL register */ + fifo_count = (unsigned char)(read_reg(info, CHA+RBCL) & 0x1f); + if (fifo_count == 0) + fifo_count = 32; + } else + fifo_count = 32; + + do { + if (fifo_count == 1) { + read_count = 1; + data[0] = read_reg(info, CHA + RXFIFO); + } else { + read_count = 2; + *((unsigned short *) data) = read_reg16(info, CHA + RXFIFO); + } + fifo_count -= read_count; + if (!fifo_count && eom) + buf->status = data[--read_count]; + + for (i = 0; i < read_count; i++) { + if (buf->count >= info->max_frame_size) { + /* frame too large, reset receiver and reset current buffer */ + issue_command(info, CHA, CMD_RXRESET); + buf->count = 0; + return; + } + *(buf->data + buf->count) = data[i]; + buf->count++; + } + } while (fifo_count); + + if (eom) { + info->pending_bh |= BH_RECEIVE; + info->rx_frame_count++; + info->rx_put++; + if (info->rx_put >= info->rx_buf_count) + info->rx_put = 0; + } + issue_command(info, CHA, CMD_RXFIFO); +} + +static void rx_ready_async(MGSLPC_INFO *info, int tcd) +{ + struct tty_port *port = &info->port; + unsigned char data, status, flag; + int fifo_count; + int work = 0; + struct mgsl_icount *icount = &info->icount; + + if (tcd) { + /* early termination, get FIFO count from RBCL register */ + fifo_count = (unsigned char)(read_reg(info, CHA+RBCL) & 0x1f); + + /* Zero fifo count could mean 0 or 32 bytes available. + * If BIT5 of STAR is set then at least 1 byte is available. + */ + if (!fifo_count && (read_reg(info,CHA+STAR) & BIT5)) + fifo_count = 32; + } else + fifo_count = 32; + + tty_buffer_request_room(port, fifo_count); + /* Flush received async data to receive data buffer. */ + while (fifo_count) { + data = read_reg(info, CHA + RXFIFO); + status = read_reg(info, CHA + RXFIFO); + fifo_count -= 2; + + icount->rx++; + flag = TTY_NORMAL; + + // if no frameing/crc error then save data + // BIT7:parity error + // BIT6:framing error + + if (status & (BIT7 + BIT6)) { + if (status & BIT7) + icount->parity++; + else + icount->frame++; + + /* discard char if tty control flags say so */ + if (status & info->ignore_status_mask) + continue; + + status &= info->read_status_mask; + + if (status & BIT7) + flag = TTY_PARITY; + else if (status & BIT6) + flag = TTY_FRAME; + } + work += tty_insert_flip_char(port, data, flag); + } + issue_command(info, CHA, CMD_RXFIFO); + + if (debug_level >= DEBUG_LEVEL_ISR) { + printk("%s(%d):rx_ready_async", + __FILE__,__LINE__); + printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n", + __FILE__,__LINE__,icount->rx,icount->brk, + icount->parity,icount->frame,icount->overrun); + } + + if (work) + tty_flip_buffer_push(port); +} + + +static void tx_done(MGSLPC_INFO *info, struct tty_struct *tty) +{ + if (!info->tx_active) + return; + + info->tx_active = false; + info->tx_aborting = false; + + if (info->params.mode == MGSL_MODE_ASYNC) + return; + + info->tx_count = info->tx_put = info->tx_get = 0; + del_timer(&info->tx_timer); + + if (info->drop_rts_on_tx_done) { + get_signals(info); + if (info->serial_signals & SerialSignal_RTS) { + info->serial_signals &= ~SerialSignal_RTS; + set_signals(info); + } + info->drop_rts_on_tx_done = false; + } + +#if SYNCLINK_GENERIC_HDLC + if (info->netcount) + hdlcdev_tx_done(info); + else +#endif + { + if (tty && (tty->stopped || tty->hw_stopped)) { + tx_stop(info); + return; + } + info->pending_bh |= BH_TRANSMIT; + } +} + +static void tx_ready(MGSLPC_INFO *info, struct tty_struct *tty) +{ + unsigned char fifo_count = 32; + int c; + + if (debug_level >= DEBUG_LEVEL_ISR) + printk("%s(%d):tx_ready(%s)\n", __FILE__, __LINE__, info->device_name); + + if (info->params.mode == MGSL_MODE_HDLC) { + if (!info->tx_active) + return; + } else { + if (tty && (tty->stopped || tty->hw_stopped)) { + tx_stop(info); + return; + } + if (!info->tx_count) + info->tx_active = false; + } + + if (!info->tx_count) + return; + + while (info->tx_count && fifo_count) { + c = min(2, min_t(int, fifo_count, min(info->tx_count, TXBUFSIZE - info->tx_get))); + + if (c == 1) { + write_reg(info, CHA + TXFIFO, *(info->tx_buf + info->tx_get)); + } else { + write_reg16(info, CHA + TXFIFO, + *((unsigned short*)(info->tx_buf + info->tx_get))); + } + info->tx_count -= c; + info->tx_get = (info->tx_get + c) & (TXBUFSIZE - 1); + fifo_count -= c; + } + + if (info->params.mode == MGSL_MODE_ASYNC) { + if (info->tx_count < WAKEUP_CHARS) + info->pending_bh |= BH_TRANSMIT; + issue_command(info, CHA, CMD_TXFIFO); + } else { + if (info->tx_count) + issue_command(info, CHA, CMD_TXFIFO); + else + issue_command(info, CHA, CMD_TXFIFO + CMD_TXEOM); + } +} + +static void cts_change(MGSLPC_INFO *info, struct tty_struct *tty) +{ + get_signals(info); + if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) + irq_disable(info, CHB, IRQ_CTS); + info->icount.cts++; + if (info->serial_signals & SerialSignal_CTS) + info->input_signal_events.cts_up++; + else + info->input_signal_events.cts_down++; + wake_up_interruptible(&info->status_event_wait_q); + wake_up_interruptible(&info->event_wait_q); + + if (tty && tty_port_cts_enabled(&info->port)) { + if (tty->hw_stopped) { + if (info->serial_signals & SerialSignal_CTS) { + if (debug_level >= DEBUG_LEVEL_ISR) + printk("CTS tx start..."); + tty->hw_stopped = 0; + tx_start(info, tty); + info->pending_bh |= BH_TRANSMIT; + return; + } + } else { + if (!(info->serial_signals & SerialSignal_CTS)) { + if (debug_level >= DEBUG_LEVEL_ISR) + printk("CTS tx stop..."); + tty->hw_stopped = 1; + tx_stop(info); + } + } + } + info->pending_bh |= BH_STATUS; +} + +static void dcd_change(MGSLPC_INFO *info, struct tty_struct *tty) +{ + get_signals(info); + if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) + irq_disable(info, CHB, IRQ_DCD); + info->icount.dcd++; + if (info->serial_signals & SerialSignal_DCD) { + info->input_signal_events.dcd_up++; + } + else + info->input_signal_events.dcd_down++; +#if SYNCLINK_GENERIC_HDLC + if (info->netcount) { + if (info->serial_signals & SerialSignal_DCD) + netif_carrier_on(info->netdev); + else + netif_carrier_off(info->netdev); + } +#endif + wake_up_interruptible(&info->status_event_wait_q); + wake_up_interruptible(&info->event_wait_q); + + if (info->port.flags & ASYNC_CHECK_CD) { + if (debug_level >= DEBUG_LEVEL_ISR) + printk("%s CD now %s...", info->device_name, + (info->serial_signals & SerialSignal_DCD) ? "on" : "off"); + if (info->serial_signals & SerialSignal_DCD) + wake_up_interruptible(&info->port.open_wait); + else { + if (debug_level >= DEBUG_LEVEL_ISR) + printk("doing serial hangup..."); + if (tty) + tty_hangup(tty); + } + } + info->pending_bh |= BH_STATUS; +} + +static void dsr_change(MGSLPC_INFO *info) +{ + get_signals(info); + if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) + port_irq_disable(info, PVR_DSR); + info->icount.dsr++; + if (info->serial_signals & SerialSignal_DSR) + info->input_signal_events.dsr_up++; + else + info->input_signal_events.dsr_down++; + wake_up_interruptible(&info->status_event_wait_q); + wake_up_interruptible(&info->event_wait_q); + info->pending_bh |= BH_STATUS; +} + +static void ri_change(MGSLPC_INFO *info) +{ + get_signals(info); + if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) + port_irq_disable(info, PVR_RI); + info->icount.rng++; + if (info->serial_signals & SerialSignal_RI) + info->input_signal_events.ri_up++; + else + info->input_signal_events.ri_down++; + wake_up_interruptible(&info->status_event_wait_q); + wake_up_interruptible(&info->event_wait_q); + info->pending_bh |= BH_STATUS; +} + +/* Interrupt service routine entry point. + * + * Arguments: + * + * irq interrupt number that caused interrupt + * dev_id device ID supplied during interrupt registration + */ +static irqreturn_t mgslpc_isr(int dummy, void *dev_id) +{ + MGSLPC_INFO *info = dev_id; + struct tty_struct *tty; + unsigned short isr; + unsigned char gis, pis; + int count=0; + + if (debug_level >= DEBUG_LEVEL_ISR) + printk("mgslpc_isr(%d) entry.\n", info->irq_level); + + if (!(info->p_dev->_locked)) + return IRQ_HANDLED; + + tty = tty_port_tty_get(&info->port); + + spin_lock(&info->lock); + + while ((gis = read_reg(info, CHA + GIS))) { + if (debug_level >= DEBUG_LEVEL_ISR) + printk("mgslpc_isr %s gis=%04X\n", info->device_name,gis); + + if ((gis & 0x70) || count > 1000) { + printk("synclink_cs:hardware failed or ejected\n"); + break; + } + count++; + + if (gis & (BIT1 | BIT0)) { + isr = read_reg16(info, CHB + ISR); + if (isr & IRQ_DCD) + dcd_change(info, tty); + if (isr & IRQ_CTS) + cts_change(info, tty); + } + if (gis & (BIT3 | BIT2)) + { + isr = read_reg16(info, CHA + ISR); + if (isr & IRQ_TIMER) { + info->irq_occurred = true; + irq_disable(info, CHA, IRQ_TIMER); + } + + /* receive IRQs */ + if (isr & IRQ_EXITHUNT) { + info->icount.exithunt++; + wake_up_interruptible(&info->event_wait_q); + } + if (isr & IRQ_BREAK_ON) { + info->icount.brk++; + if (info->port.flags & ASYNC_SAK) + do_SAK(tty); + } + if (isr & IRQ_RXTIME) { + issue_command(info, CHA, CMD_RXFIFO_READ); + } + if (isr & (IRQ_RXEOM | IRQ_RXFIFO)) { + if (info->params.mode == MGSL_MODE_HDLC) + rx_ready_hdlc(info, isr & IRQ_RXEOM); + else + rx_ready_async(info, isr & IRQ_RXEOM); + } + + /* transmit IRQs */ + if (isr & IRQ_UNDERRUN) { + if (info->tx_aborting) + info->icount.txabort++; + else + info->icount.txunder++; + tx_done(info, tty); + } + else if (isr & IRQ_ALLSENT) { + info->icount.txok++; + tx_done(info, tty); + } + else if (isr & IRQ_TXFIFO) + tx_ready(info, tty); + } + if (gis & BIT7) { + pis = read_reg(info, CHA + PIS); + if (pis & BIT1) + dsr_change(info); + if (pis & BIT2) + ri_change(info); + } + } + + /* Request bottom half processing if there's something + * for it to do and the bh is not already running + */ + + if (info->pending_bh && !info->bh_running && !info->bh_requested) { + if (debug_level >= DEBUG_LEVEL_ISR) + printk("%s(%d):%s queueing bh task.\n", + __FILE__,__LINE__,info->device_name); + schedule_work(&info->task); + info->bh_requested = true; + } + + spin_unlock(&info->lock); + tty_kref_put(tty); + + if (debug_level >= DEBUG_LEVEL_ISR) + printk("%s(%d):mgslpc_isr(%d)exit.\n", + __FILE__, __LINE__, info->irq_level); + + return IRQ_HANDLED; +} + +/* Initialize and start device. + */ +static int startup(MGSLPC_INFO * info, struct tty_struct *tty) +{ + int retval = 0; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):startup(%s)\n", __FILE__, __LINE__, info->device_name); + + if (info->port.flags & ASYNC_INITIALIZED) + return 0; + + if (!info->tx_buf) { + /* allocate a page of memory for a transmit buffer */ + info->tx_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL); + if (!info->tx_buf) { + printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n", + __FILE__, __LINE__, info->device_name); + return -ENOMEM; + } + } + + info->pending_bh = 0; + + memset(&info->icount, 0, sizeof(info->icount)); + + setup_timer(&info->tx_timer, tx_timeout, (unsigned long)info); + + /* Allocate and claim adapter resources */ + retval = claim_resources(info); + + /* perform existence check and diagnostics */ + if (!retval) + retval = adapter_test(info); + + if (retval) { + if (capable(CAP_SYS_ADMIN) && tty) + set_bit(TTY_IO_ERROR, &tty->flags); + release_resources(info); + return retval; + } + + /* program hardware for current parameters */ + mgslpc_change_params(info, tty); + + if (tty) + clear_bit(TTY_IO_ERROR, &tty->flags); + + info->port.flags |= ASYNC_INITIALIZED; + + return 0; +} + +/* Called by mgslpc_close() and mgslpc_hangup() to shutdown hardware + */ +static void shutdown(MGSLPC_INFO * info, struct tty_struct *tty) +{ + unsigned long flags; + + if (!(info->port.flags & ASYNC_INITIALIZED)) + return; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgslpc_shutdown(%s)\n", + __FILE__, __LINE__, info->device_name); + + /* clear status wait queue because status changes */ + /* can't happen after shutting down the hardware */ + wake_up_interruptible(&info->status_event_wait_q); + wake_up_interruptible(&info->event_wait_q); + + del_timer_sync(&info->tx_timer); + + if (info->tx_buf) { + free_page((unsigned long) info->tx_buf); + info->tx_buf = NULL; + } + + spin_lock_irqsave(&info->lock, flags); + + rx_stop(info); + tx_stop(info); + + /* TODO:disable interrupts instead of reset to preserve signal states */ + reset_device(info); + + if (!tty || tty->termios.c_cflag & HUPCL) { + info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); + set_signals(info); + } + + spin_unlock_irqrestore(&info->lock, flags); + + release_resources(info); + + if (tty) + set_bit(TTY_IO_ERROR, &tty->flags); + + info->port.flags &= ~ASYNC_INITIALIZED; +} + +static void mgslpc_program_hw(MGSLPC_INFO *info, struct tty_struct *tty) +{ + unsigned long flags; + + spin_lock_irqsave(&info->lock, flags); + + rx_stop(info); + tx_stop(info); + info->tx_count = info->tx_put = info->tx_get = 0; + + if (info->params.mode == MGSL_MODE_HDLC || info->netcount) + hdlc_mode(info); + else + async_mode(info); + + set_signals(info); + + info->dcd_chkcount = 0; + info->cts_chkcount = 0; + info->ri_chkcount = 0; + info->dsr_chkcount = 0; + + irq_enable(info, CHB, IRQ_DCD | IRQ_CTS); + port_irq_enable(info, (unsigned char) PVR_DSR | PVR_RI); + get_signals(info); + + if (info->netcount || (tty && (tty->termios.c_cflag & CREAD))) + rx_start(info); + + spin_unlock_irqrestore(&info->lock, flags); +} + +/* Reconfigure adapter based on new parameters + */ +static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty) +{ + unsigned cflag; + int bits_per_char; + + if (!tty) + return; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgslpc_change_params(%s)\n", + __FILE__, __LINE__, info->device_name); + + cflag = tty->termios.c_cflag; + + /* if B0 rate (hangup) specified then negate RTS and DTR */ + /* otherwise assert RTS and DTR */ + if (cflag & CBAUD) + info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR; + else + info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); + + /* byte size and parity */ + + switch (cflag & CSIZE) { + case CS5: info->params.data_bits = 5; break; + case CS6: info->params.data_bits = 6; break; + case CS7: info->params.data_bits = 7; break; + case CS8: info->params.data_bits = 8; break; + default: info->params.data_bits = 7; break; + } + + if (cflag & CSTOPB) + info->params.stop_bits = 2; + else + info->params.stop_bits = 1; + + info->params.parity = ASYNC_PARITY_NONE; + if (cflag & PARENB) { + if (cflag & PARODD) + info->params.parity = ASYNC_PARITY_ODD; + else + info->params.parity = ASYNC_PARITY_EVEN; +#ifdef CMSPAR + if (cflag & CMSPAR) + info->params.parity = ASYNC_PARITY_SPACE; +#endif + } + + /* calculate number of jiffies to transmit a full + * FIFO (32 bytes) at specified data rate + */ + bits_per_char = info->params.data_bits + + info->params.stop_bits + 1; + + /* if port data rate is set to 460800 or less then + * allow tty settings to override, otherwise keep the + * current data rate. + */ + if (info->params.data_rate <= 460800) { + info->params.data_rate = tty_get_baud_rate(tty); + } + + if (info->params.data_rate) { + info->timeout = (32*HZ*bits_per_char) / + info->params.data_rate; + } + info->timeout += HZ/50; /* Add .02 seconds of slop */ + + if (cflag & CRTSCTS) + info->port.flags |= ASYNC_CTS_FLOW; + else + info->port.flags &= ~ASYNC_CTS_FLOW; + + if (cflag & CLOCAL) + info->port.flags &= ~ASYNC_CHECK_CD; + else + info->port.flags |= ASYNC_CHECK_CD; + + /* process tty input control flags */ + + info->read_status_mask = 0; + if (I_INPCK(tty)) + info->read_status_mask |= BIT7 | BIT6; + if (I_IGNPAR(tty)) + info->ignore_status_mask |= BIT7 | BIT6; + + mgslpc_program_hw(info, tty); +} + +/* Add a character to the transmit buffer + */ +static int mgslpc_put_char(struct tty_struct *tty, unsigned char ch) +{ + MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) { + printk("%s(%d):mgslpc_put_char(%d) on %s\n", + __FILE__, __LINE__, ch, info->device_name); + } + + if (mgslpc_paranoia_check(info, tty->name, "mgslpc_put_char")) + return 0; + + if (!info->tx_buf) + return 0; + + spin_lock_irqsave(&info->lock, flags); + + if (info->params.mode == MGSL_MODE_ASYNC || !info->tx_active) { + if (info->tx_count < TXBUFSIZE - 1) { + info->tx_buf[info->tx_put++] = ch; + info->tx_put &= TXBUFSIZE-1; + info->tx_count++; + } + } + + spin_unlock_irqrestore(&info->lock, flags); + return 1; +} + +/* Enable transmitter so remaining characters in the + * transmit buffer are sent. + */ +static void mgslpc_flush_chars(struct tty_struct *tty) +{ + MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgslpc_flush_chars() entry on %s tx_count=%d\n", + __FILE__, __LINE__, info->device_name, info->tx_count); + + if (mgslpc_paranoia_check(info, tty->name, "mgslpc_flush_chars")) + return; + + if (info->tx_count <= 0 || tty->stopped || + tty->hw_stopped || !info->tx_buf) + return; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgslpc_flush_chars() entry on %s starting transmitter\n", + __FILE__, __LINE__, info->device_name); + + spin_lock_irqsave(&info->lock, flags); + if (!info->tx_active) + tx_start(info, tty); + spin_unlock_irqrestore(&info->lock, flags); +} + +/* Send a block of data + * + * Arguments: + * + * tty pointer to tty information structure + * buf pointer to buffer containing send data + * count size of send data in bytes + * + * Returns: number of characters written + */ +static int mgslpc_write(struct tty_struct * tty, + const unsigned char *buf, int count) +{ + int c, ret = 0; + MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgslpc_write(%s) count=%d\n", + __FILE__, __LINE__, info->device_name, count); + + if (mgslpc_paranoia_check(info, tty->name, "mgslpc_write") || + !info->tx_buf) + goto cleanup; + + if (info->params.mode == MGSL_MODE_HDLC) { + if (count > TXBUFSIZE) { + ret = -EIO; + goto cleanup; + } + if (info->tx_active) + goto cleanup; + else if (info->tx_count) + goto start; + } + + for (;;) { + c = min(count, + min(TXBUFSIZE - info->tx_count - 1, + TXBUFSIZE - info->tx_put)); + if (c <= 0) + break; + + memcpy(info->tx_buf + info->tx_put, buf, c); + + spin_lock_irqsave(&info->lock, flags); + info->tx_put = (info->tx_put + c) & (TXBUFSIZE-1); + info->tx_count += c; + spin_unlock_irqrestore(&info->lock, flags); + + buf += c; + count -= c; + ret += c; + } +start: + if (info->tx_count && !tty->stopped && !tty->hw_stopped) { + spin_lock_irqsave(&info->lock, flags); + if (!info->tx_active) + tx_start(info, tty); + spin_unlock_irqrestore(&info->lock, flags); + } +cleanup: + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgslpc_write(%s) returning=%d\n", + __FILE__, __LINE__, info->device_name, ret); + return ret; +} + +/* Return the count of free bytes in transmit buffer + */ +static int mgslpc_write_room(struct tty_struct *tty) +{ + MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; + int ret; + + if (mgslpc_paranoia_check(info, tty->name, "mgslpc_write_room")) + return 0; + + if (info->params.mode == MGSL_MODE_HDLC) { + /* HDLC (frame oriented) mode */ + if (info->tx_active) + return 0; + else + return HDLC_MAX_FRAME_SIZE; + } else { + ret = TXBUFSIZE - info->tx_count - 1; + if (ret < 0) + ret = 0; + } + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgslpc_write_room(%s)=%d\n", + __FILE__, __LINE__, info->device_name, ret); + return ret; +} + +/* Return the count of bytes in transmit buffer + */ +static int mgslpc_chars_in_buffer(struct tty_struct *tty) +{ + MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; + int rc; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgslpc_chars_in_buffer(%s)\n", + __FILE__, __LINE__, info->device_name); + + if (mgslpc_paranoia_check(info, tty->name, "mgslpc_chars_in_buffer")) + return 0; + + if (info->params.mode == MGSL_MODE_HDLC) + rc = info->tx_active ? info->max_frame_size : 0; + else + rc = info->tx_count; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgslpc_chars_in_buffer(%s)=%d\n", + __FILE__, __LINE__, info->device_name, rc); + + return rc; +} + +/* Discard all data in the send buffer + */ +static void mgslpc_flush_buffer(struct tty_struct *tty) +{ + MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgslpc_flush_buffer(%s) entry\n", + __FILE__, __LINE__, info->device_name); + + if (mgslpc_paranoia_check(info, tty->name, "mgslpc_flush_buffer")) + return; + + spin_lock_irqsave(&info->lock, flags); + info->tx_count = info->tx_put = info->tx_get = 0; + del_timer(&info->tx_timer); + spin_unlock_irqrestore(&info->lock, flags); + + wake_up_interruptible(&tty->write_wait); + tty_wakeup(tty); +} + +/* Send a high-priority XON/XOFF character + */ +static void mgslpc_send_xchar(struct tty_struct *tty, char ch) +{ + MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgslpc_send_xchar(%s,%d)\n", + __FILE__, __LINE__, info->device_name, ch); + + if (mgslpc_paranoia_check(info, tty->name, "mgslpc_send_xchar")) + return; + + info->x_char = ch; + if (ch) { + spin_lock_irqsave(&info->lock, flags); + if (!info->tx_enabled) + tx_start(info, tty); + spin_unlock_irqrestore(&info->lock, flags); + } +} + +/* Signal remote device to throttle send data (our receive data) + */ +static void mgslpc_throttle(struct tty_struct * tty) +{ + MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgslpc_throttle(%s) entry\n", + __FILE__, __LINE__, info->device_name); + + if (mgslpc_paranoia_check(info, tty->name, "mgslpc_throttle")) + return; + + if (I_IXOFF(tty)) + mgslpc_send_xchar(tty, STOP_CHAR(tty)); + + if (tty->termios.c_cflag & CRTSCTS) { + spin_lock_irqsave(&info->lock, flags); + info->serial_signals &= ~SerialSignal_RTS; + set_signals(info); + spin_unlock_irqrestore(&info->lock, flags); + } +} + +/* Signal remote device to stop throttling send data (our receive data) + */ +static void mgslpc_unthrottle(struct tty_struct * tty) +{ + MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgslpc_unthrottle(%s) entry\n", + __FILE__, __LINE__, info->device_name); + + if (mgslpc_paranoia_check(info, tty->name, "mgslpc_unthrottle")) + return; + + if (I_IXOFF(tty)) { + if (info->x_char) + info->x_char = 0; + else + mgslpc_send_xchar(tty, START_CHAR(tty)); + } + + if (tty->termios.c_cflag & CRTSCTS) { + spin_lock_irqsave(&info->lock, flags); + info->serial_signals |= SerialSignal_RTS; + set_signals(info); + spin_unlock_irqrestore(&info->lock, flags); + } +} + +/* get the current serial statistics + */ +static int get_stats(MGSLPC_INFO * info, struct mgsl_icount __user *user_icount) +{ + int err; + if (debug_level >= DEBUG_LEVEL_INFO) + printk("get_params(%s)\n", info->device_name); + if (!user_icount) { + memset(&info->icount, 0, sizeof(info->icount)); + } else { + COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount)); + if (err) + return -EFAULT; + } + return 0; +} + +/* get the current serial parameters + */ +static int get_params(MGSLPC_INFO * info, MGSL_PARAMS __user *user_params) +{ + int err; + if (debug_level >= DEBUG_LEVEL_INFO) + printk("get_params(%s)\n", info->device_name); + COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS)); + if (err) + return -EFAULT; + return 0; +} + +/* set the serial parameters + * + * Arguments: + * + * info pointer to device instance data + * new_params user buffer containing new serial params + * + * Returns: 0 if success, otherwise error code + */ +static int set_params(MGSLPC_INFO * info, MGSL_PARAMS __user *new_params, struct tty_struct *tty) +{ + unsigned long flags; + MGSL_PARAMS tmp_params; + int err; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):set_params %s\n", __FILE__,__LINE__, + info->device_name); + COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS)); + if (err) { + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):set_params(%s) user buffer copy failed\n", + __FILE__, __LINE__, info->device_name); + return -EFAULT; + } + + spin_lock_irqsave(&info->lock, flags); + memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS)); + spin_unlock_irqrestore(&info->lock, flags); + + mgslpc_change_params(info, tty); + + return 0; +} + +static int get_txidle(MGSLPC_INFO * info, int __user *idle_mode) +{ + int err; + if (debug_level >= DEBUG_LEVEL_INFO) + printk("get_txidle(%s)=%d\n", info->device_name, info->idle_mode); + COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int)); + if (err) + return -EFAULT; + return 0; +} + +static int set_txidle(MGSLPC_INFO * info, int idle_mode) +{ + unsigned long flags; + if (debug_level >= DEBUG_LEVEL_INFO) + printk("set_txidle(%s,%d)\n", info->device_name, idle_mode); + spin_lock_irqsave(&info->lock, flags); + info->idle_mode = idle_mode; + tx_set_idle(info); + spin_unlock_irqrestore(&info->lock, flags); + return 0; +} + +static int get_interface(MGSLPC_INFO * info, int __user *if_mode) +{ + int err; + if (debug_level >= DEBUG_LEVEL_INFO) + printk("get_interface(%s)=%d\n", info->device_name, info->if_mode); + COPY_TO_USER(err,if_mode, &info->if_mode, sizeof(int)); + if (err) + return -EFAULT; + return 0; +} + +static int set_interface(MGSLPC_INFO * info, int if_mode) +{ + unsigned long flags; + unsigned char val; + if (debug_level >= DEBUG_LEVEL_INFO) + printk("set_interface(%s,%d)\n", info->device_name, if_mode); + spin_lock_irqsave(&info->lock, flags); + info->if_mode = if_mode; + + val = read_reg(info, PVR) & 0x0f; + switch (info->if_mode) + { + case MGSL_INTERFACE_RS232: val |= PVR_RS232; break; + case MGSL_INTERFACE_V35: val |= PVR_V35; break; + case MGSL_INTERFACE_RS422: val |= PVR_RS422; break; + } + write_reg(info, PVR, val); + + spin_unlock_irqrestore(&info->lock, flags); + return 0; +} + +static int set_txenable(MGSLPC_INFO * info, int enable, struct tty_struct *tty) +{ + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("set_txenable(%s,%d)\n", info->device_name, enable); + + spin_lock_irqsave(&info->lock, flags); + if (enable) { + if (!info->tx_enabled) + tx_start(info, tty); + } else { + if (info->tx_enabled) + tx_stop(info); + } + spin_unlock_irqrestore(&info->lock, flags); + return 0; +} + +static int tx_abort(MGSLPC_INFO * info) +{ + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("tx_abort(%s)\n", info->device_name); + + spin_lock_irqsave(&info->lock, flags); + if (info->tx_active && info->tx_count && + info->params.mode == MGSL_MODE_HDLC) { + /* clear data count so FIFO is not filled on next IRQ. + * This results in underrun and abort transmission. + */ + info->tx_count = info->tx_put = info->tx_get = 0; + info->tx_aborting = true; + } + spin_unlock_irqrestore(&info->lock, flags); + return 0; +} + +static int set_rxenable(MGSLPC_INFO * info, int enable) +{ + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("set_rxenable(%s,%d)\n", info->device_name, enable); + + spin_lock_irqsave(&info->lock, flags); + if (enable) { + if (!info->rx_enabled) + rx_start(info); + } else { + if (info->rx_enabled) + rx_stop(info); + } + spin_unlock_irqrestore(&info->lock, flags); + return 0; +} + +/* wait for specified event to occur + * + * Arguments: info pointer to device instance data + * mask pointer to bitmask of events to wait for + * Return Value: 0 if successful and bit mask updated with + * of events triggerred, + * otherwise error code + */ +static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr) +{ + unsigned long flags; + int s; + int rc=0; + struct mgsl_icount cprev, cnow; + int events; + int mask; + struct _input_signal_events oldsigs, newsigs; + DECLARE_WAITQUEUE(wait, current); + + COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int)); + if (rc) + return -EFAULT; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("wait_events(%s,%d)\n", info->device_name, mask); + + spin_lock_irqsave(&info->lock, flags); + + /* return immediately if state matches requested events */ + get_signals(info); + s = info->serial_signals; + events = mask & + ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) + + ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) + + ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) + + ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) ); + if (events) { + spin_unlock_irqrestore(&info->lock, flags); + goto exit; + } + + /* save current irq counts */ + cprev = info->icount; + oldsigs = info->input_signal_events; + + if ((info->params.mode == MGSL_MODE_HDLC) && + (mask & MgslEvent_ExitHuntMode)) + irq_enable(info, CHA, IRQ_EXITHUNT); + + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&info->event_wait_q, &wait); + + spin_unlock_irqrestore(&info->lock, flags); + + + for(;;) { + schedule(); + if (signal_pending(current)) { + rc = -ERESTARTSYS; + break; + } + + /* get current irq counts */ + spin_lock_irqsave(&info->lock, flags); + cnow = info->icount; + newsigs = info->input_signal_events; + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_irqrestore(&info->lock, flags); + + /* if no change, wait aborted for some reason */ + if (newsigs.dsr_up == oldsigs.dsr_up && + newsigs.dsr_down == oldsigs.dsr_down && + newsigs.dcd_up == oldsigs.dcd_up && + newsigs.dcd_down == oldsigs.dcd_down && + newsigs.cts_up == oldsigs.cts_up && + newsigs.cts_down == oldsigs.cts_down && + newsigs.ri_up == oldsigs.ri_up && + newsigs.ri_down == oldsigs.ri_down && + cnow.exithunt == cprev.exithunt && + cnow.rxidle == cprev.rxidle) { + rc = -EIO; + break; + } + + events = mask & + ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) + + (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) + + (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) + + (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) + + (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) + + (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) + + (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) + + (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) + + (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) + + (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) ); + if (events) + break; + + cprev = cnow; + oldsigs = newsigs; + } + + remove_wait_queue(&info->event_wait_q, &wait); + set_current_state(TASK_RUNNING); + + if (mask & MgslEvent_ExitHuntMode) { + spin_lock_irqsave(&info->lock, flags); + if (!waitqueue_active(&info->event_wait_q)) + irq_disable(info, CHA, IRQ_EXITHUNT); + spin_unlock_irqrestore(&info->lock, flags); + } +exit: + if (rc == 0) + PUT_USER(rc, events, mask_ptr); + return rc; +} + +static int modem_input_wait(MGSLPC_INFO *info,int arg) +{ + unsigned long flags; + int rc; + struct mgsl_icount cprev, cnow; + DECLARE_WAITQUEUE(wait, current); + + /* save current irq counts */ + spin_lock_irqsave(&info->lock, flags); + cprev = info->icount; + add_wait_queue(&info->status_event_wait_q, &wait); + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_irqrestore(&info->lock, flags); + + for(;;) { + schedule(); + if (signal_pending(current)) { + rc = -ERESTARTSYS; + break; + } + + /* get new irq counts */ + spin_lock_irqsave(&info->lock, flags); + cnow = info->icount; + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_irqrestore(&info->lock, flags); + + /* if no change, wait aborted for some reason */ + if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && + cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) { + rc = -EIO; + break; + } + + /* check for change in caller specified modem input */ + if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) || + (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) || + (arg & TIOCM_CD && cnow.dcd != cprev.dcd) || + (arg & TIOCM_CTS && cnow.cts != cprev.cts)) { + rc = 0; + break; + } + + cprev = cnow; + } + remove_wait_queue(&info->status_event_wait_q, &wait); + set_current_state(TASK_RUNNING); + return rc; +} + +/* return the state of the serial control and status signals + */ +static int tiocmget(struct tty_struct *tty) +{ + MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; + unsigned int result; + unsigned long flags; + + spin_lock_irqsave(&info->lock, flags); + get_signals(info); + spin_unlock_irqrestore(&info->lock, flags); + + result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) + + ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) + + ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) + + ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) + + ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) + + ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0); + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s tiocmget() value=%08X\n", + __FILE__, __LINE__, info->device_name, result); + return result; +} + +/* set modem control signals (DTR/RTS) + */ +static int tiocmset(struct tty_struct *tty, + unsigned int set, unsigned int clear) +{ + MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s tiocmset(%x,%x)\n", + __FILE__, __LINE__, info->device_name, set, clear); + + if (set & TIOCM_RTS) + info->serial_signals |= SerialSignal_RTS; + if (set & TIOCM_DTR) + info->serial_signals |= SerialSignal_DTR; + if (clear & TIOCM_RTS) + info->serial_signals &= ~SerialSignal_RTS; + if (clear & TIOCM_DTR) + info->serial_signals &= ~SerialSignal_DTR; + + spin_lock_irqsave(&info->lock, flags); + set_signals(info); + spin_unlock_irqrestore(&info->lock, flags); + + return 0; +} + +/* Set or clear transmit break condition + * + * Arguments: tty pointer to tty instance data + * break_state -1=set break condition, 0=clear + */ +static int mgslpc_break(struct tty_struct *tty, int break_state) +{ + MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgslpc_break(%s,%d)\n", + __FILE__, __LINE__, info->device_name, break_state); + + if (mgslpc_paranoia_check(info, tty->name, "mgslpc_break")) + return -EINVAL; + + spin_lock_irqsave(&info->lock, flags); + if (break_state == -1) + set_reg_bits(info, CHA+DAFO, BIT6); + else + clear_reg_bits(info, CHA+DAFO, BIT6); + spin_unlock_irqrestore(&info->lock, flags); + return 0; +} + +static int mgslpc_get_icount(struct tty_struct *tty, + struct serial_icounter_struct *icount) +{ + MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; + struct mgsl_icount cnow; /* kernel counter temps */ + unsigned long flags; + + spin_lock_irqsave(&info->lock, flags); + cnow = info->icount; + spin_unlock_irqrestore(&info->lock, flags); + + icount->cts = cnow.cts; + icount->dsr = cnow.dsr; + icount->rng = cnow.rng; + icount->dcd = cnow.dcd; + icount->rx = cnow.rx; + icount->tx = cnow.tx; + icount->frame = cnow.frame; + icount->overrun = cnow.overrun; + icount->parity = cnow.parity; + icount->brk = cnow.brk; + icount->buf_overrun = cnow.buf_overrun; + + return 0; +} + +/* Service an IOCTL request + * + * Arguments: + * + * tty pointer to tty instance data + * cmd IOCTL command code + * arg command argument/context + * + * Return Value: 0 if success, otherwise error code + */ +static int mgslpc_ioctl(struct tty_struct *tty, + unsigned int cmd, unsigned long arg) +{ + MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; + void __user *argp = (void __user *)arg; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgslpc_ioctl %s cmd=%08X\n", __FILE__, __LINE__, + info->device_name, cmd); + + if (mgslpc_paranoia_check(info, tty->name, "mgslpc_ioctl")) + return -ENODEV; + + if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && + (cmd != TIOCMIWAIT)) { + if (tty->flags & (1 << TTY_IO_ERROR)) + return -EIO; + } + + switch (cmd) { + case MGSL_IOCGPARAMS: + return get_params(info, argp); + case MGSL_IOCSPARAMS: + return set_params(info, argp, tty); + case MGSL_IOCGTXIDLE: + return get_txidle(info, argp); + case MGSL_IOCSTXIDLE: + return set_txidle(info, (int)arg); + case MGSL_IOCGIF: + return get_interface(info, argp); + case MGSL_IOCSIF: + return set_interface(info,(int)arg); + case MGSL_IOCTXENABLE: + return set_txenable(info,(int)arg, tty); + case MGSL_IOCRXENABLE: + return set_rxenable(info,(int)arg); + case MGSL_IOCTXABORT: + return tx_abort(info); + case MGSL_IOCGSTATS: + return get_stats(info, argp); + case MGSL_IOCWAITEVENT: + return wait_events(info, argp); + case TIOCMIWAIT: + return modem_input_wait(info,(int)arg); + default: + return -ENOIOCTLCMD; + } + return 0; +} + +/* Set new termios settings + * + * Arguments: + * + * tty pointer to tty structure + * termios pointer to buffer to hold returned old termios + */ +static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_termios) +{ + MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgslpc_set_termios %s\n", __FILE__, __LINE__, + tty->driver->name); + + /* just return if nothing has changed */ + if ((tty->termios.c_cflag == old_termios->c_cflag) + && (RELEVANT_IFLAG(tty->termios.c_iflag) + == RELEVANT_IFLAG(old_termios->c_iflag))) + return; + + mgslpc_change_params(info, tty); + + /* Handle transition to B0 status */ + if (old_termios->c_cflag & CBAUD && + !(tty->termios.c_cflag & CBAUD)) { + info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); + spin_lock_irqsave(&info->lock, flags); + set_signals(info); + spin_unlock_irqrestore(&info->lock, flags); + } + + /* Handle transition away from B0 status */ + if (!(old_termios->c_cflag & CBAUD) && + tty->termios.c_cflag & CBAUD) { + info->serial_signals |= SerialSignal_DTR; + if (!(tty->termios.c_cflag & CRTSCTS) || + !test_bit(TTY_THROTTLED, &tty->flags)) { + info->serial_signals |= SerialSignal_RTS; + } + spin_lock_irqsave(&info->lock, flags); + set_signals(info); + spin_unlock_irqrestore(&info->lock, flags); + } + + /* Handle turning off CRTSCTS */ + if (old_termios->c_cflag & CRTSCTS && + !(tty->termios.c_cflag & CRTSCTS)) { + tty->hw_stopped = 0; + tx_release(tty); + } +} + +static void mgslpc_close(struct tty_struct *tty, struct file * filp) +{ + MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; + struct tty_port *port = &info->port; + + if (mgslpc_paranoia_check(info, tty->name, "mgslpc_close")) + return; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgslpc_close(%s) entry, count=%d\n", + __FILE__, __LINE__, info->device_name, port->count); + + if (tty_port_close_start(port, tty, filp) == 0) + goto cleanup; + + if (port->flags & ASYNC_INITIALIZED) + mgslpc_wait_until_sent(tty, info->timeout); + + mgslpc_flush_buffer(tty); + + tty_ldisc_flush(tty); + shutdown(info, tty); + + tty_port_close_end(port, tty); + tty_port_tty_set(port, NULL); +cleanup: + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__, + tty->driver->name, port->count); +} + +/* Wait until the transmitter is empty. + */ +static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout) +{ + MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; + unsigned long orig_jiffies, char_time; + + if (!info) + return; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgslpc_wait_until_sent(%s) entry\n", + __FILE__, __LINE__, info->device_name); + + if (mgslpc_paranoia_check(info, tty->name, "mgslpc_wait_until_sent")) + return; + + if (!(info->port.flags & ASYNC_INITIALIZED)) + goto exit; + + orig_jiffies = jiffies; + + /* Set check interval to 1/5 of estimated time to + * send a character, and make it at least 1. The check + * interval should also be less than the timeout. + * Note: use tight timings here to satisfy the NIST-PCTS. + */ + + if (info->params.data_rate) { + char_time = info->timeout/(32 * 5); + if (!char_time) + char_time++; + } else + char_time = 1; + + if (timeout) + char_time = min_t(unsigned long, char_time, timeout); + + if (info->params.mode == MGSL_MODE_HDLC) { + while (info->tx_active) { + msleep_interruptible(jiffies_to_msecs(char_time)); + if (signal_pending(current)) + break; + if (timeout && time_after(jiffies, orig_jiffies + timeout)) + break; + } + } else { + while ((info->tx_count || info->tx_active) && + info->tx_enabled) { + msleep_interruptible(jiffies_to_msecs(char_time)); + if (signal_pending(current)) + break; + if (timeout && time_after(jiffies, orig_jiffies + timeout)) + break; + } + } + +exit: + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgslpc_wait_until_sent(%s) exit\n", + __FILE__, __LINE__, info->device_name); +} + +/* Called by tty_hangup() when a hangup is signaled. + * This is the same as closing all open files for the port. + */ +static void mgslpc_hangup(struct tty_struct *tty) +{ + MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgslpc_hangup(%s)\n", + __FILE__, __LINE__, info->device_name); + + if (mgslpc_paranoia_check(info, tty->name, "mgslpc_hangup")) + return; + + mgslpc_flush_buffer(tty); + shutdown(info, tty); + tty_port_hangup(&info->port); +} + +static int carrier_raised(struct tty_port *port) +{ + MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port); + unsigned long flags; + + spin_lock_irqsave(&info->lock, flags); + get_signals(info); + spin_unlock_irqrestore(&info->lock, flags); + + if (info->serial_signals & SerialSignal_DCD) + return 1; + return 0; +} + +static void dtr_rts(struct tty_port *port, int onoff) +{ + MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port); + unsigned long flags; + + spin_lock_irqsave(&info->lock, flags); + if (onoff) + info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR; + else + info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); + set_signals(info); + spin_unlock_irqrestore(&info->lock, flags); +} + + +static int mgslpc_open(struct tty_struct *tty, struct file * filp) +{ + MGSLPC_INFO *info; + struct tty_port *port; + int retval, line; + unsigned long flags; + + /* verify range of specified line number */ + line = tty->index; + if (line >= mgslpc_device_count) { + printk("%s(%d):mgslpc_open with invalid line #%d.\n", + __FILE__, __LINE__, line); + return -ENODEV; + } + + /* find the info structure for the specified line */ + info = mgslpc_device_list; + while(info && info->line != line) + info = info->next_device; + if (mgslpc_paranoia_check(info, tty->name, "mgslpc_open")) + return -ENODEV; + + port = &info->port; + tty->driver_data = info; + tty_port_tty_set(port, tty); + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgslpc_open(%s), old ref count = %d\n", + __FILE__, __LINE__, tty->driver->name, port->count); + + /* If port is closing, signal caller to try again */ + if (port->flags & ASYNC_CLOSING){ + wait_event_interruptible_tty(tty, port->close_wait, + !(port->flags & ASYNC_CLOSING)); + retval = ((port->flags & ASYNC_HUP_NOTIFY) ? + -EAGAIN : -ERESTARTSYS); + goto cleanup; + } + + port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0; + + spin_lock_irqsave(&info->netlock, flags); + if (info->netcount) { + retval = -EBUSY; + spin_unlock_irqrestore(&info->netlock, flags); + goto cleanup; + } + spin_lock(&port->lock); + port->count++; + spin_unlock(&port->lock); + spin_unlock_irqrestore(&info->netlock, flags); + + if (port->count == 1) { + /* 1st open on this device, init hardware */ + retval = startup(info, tty); + if (retval < 0) + goto cleanup; + } + + retval = tty_port_block_til_ready(&info->port, tty, filp); + if (retval) { + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):block_til_ready(%s) returned %d\n", + __FILE__, __LINE__, info->device_name, retval); + goto cleanup; + } + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgslpc_open(%s) success\n", + __FILE__, __LINE__, info->device_name); + retval = 0; + +cleanup: + return retval; +} + +/* + * /proc fs routines.... + */ + +static inline void line_info(struct seq_file *m, MGSLPC_INFO *info) +{ + char stat_buf[30]; + unsigned long flags; + + seq_printf(m, "%s:io:%04X irq:%d", + info->device_name, info->io_base, info->irq_level); + + /* output current serial signal states */ + spin_lock_irqsave(&info->lock, flags); + get_signals(info); + spin_unlock_irqrestore(&info->lock, flags); + + stat_buf[0] = 0; + stat_buf[1] = 0; + if (info->serial_signals & SerialSignal_RTS) + strcat(stat_buf, "|RTS"); + if (info->serial_signals & SerialSignal_CTS) + strcat(stat_buf, "|CTS"); + if (info->serial_signals & SerialSignal_DTR) + strcat(stat_buf, "|DTR"); + if (info->serial_signals & SerialSignal_DSR) + strcat(stat_buf, "|DSR"); + if (info->serial_signals & SerialSignal_DCD) + strcat(stat_buf, "|CD"); + if (info->serial_signals & SerialSignal_RI) + strcat(stat_buf, "|RI"); + + if (info->params.mode == MGSL_MODE_HDLC) { + seq_printf(m, " HDLC txok:%d rxok:%d", + info->icount.txok, info->icount.rxok); + if (info->icount.txunder) + seq_printf(m, " txunder:%d", info->icount.txunder); + if (info->icount.txabort) + seq_printf(m, " txabort:%d", info->icount.txabort); + if (info->icount.rxshort) + seq_printf(m, " rxshort:%d", info->icount.rxshort); + if (info->icount.rxlong) + seq_printf(m, " rxlong:%d", info->icount.rxlong); + if (info->icount.rxover) + seq_printf(m, " rxover:%d", info->icount.rxover); + if (info->icount.rxcrc) + seq_printf(m, " rxcrc:%d", info->icount.rxcrc); + } else { + seq_printf(m, " ASYNC tx:%d rx:%d", + info->icount.tx, info->icount.rx); + if (info->icount.frame) + seq_printf(m, " fe:%d", info->icount.frame); + if (info->icount.parity) + seq_printf(m, " pe:%d", info->icount.parity); + if (info->icount.brk) + seq_printf(m, " brk:%d", info->icount.brk); + if (info->icount.overrun) + seq_printf(m, " oe:%d", info->icount.overrun); + } + + /* Append serial signal status to end */ + seq_printf(m, " %s\n", stat_buf+1); + + seq_printf(m, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n", + info->tx_active,info->bh_requested,info->bh_running, + info->pending_bh); +} + +/* Called to print information about devices + */ +static int mgslpc_proc_show(struct seq_file *m, void *v) +{ + MGSLPC_INFO *info; + + seq_printf(m, "synclink driver:%s\n", driver_version); + + info = mgslpc_device_list; + while (info) { + line_info(m, info); + info = info->next_device; + } + return 0; +} + +static int mgslpc_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, mgslpc_proc_show, NULL); +} + +static const struct file_operations mgslpc_proc_fops = { + .owner = THIS_MODULE, + .open = mgslpc_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int rx_alloc_buffers(MGSLPC_INFO *info) +{ + /* each buffer has header and data */ + info->rx_buf_size = sizeof(RXBUF) + info->max_frame_size; + + /* calculate total allocation size for 8 buffers */ + info->rx_buf_total_size = info->rx_buf_size * 8; + + /* limit total allocated memory */ + if (info->rx_buf_total_size > 0x10000) + info->rx_buf_total_size = 0x10000; + + /* calculate number of buffers */ + info->rx_buf_count = info->rx_buf_total_size / info->rx_buf_size; + + info->rx_buf = kmalloc(info->rx_buf_total_size, GFP_KERNEL); + if (info->rx_buf == NULL) + return -ENOMEM; + + /* unused flag buffer to satisfy receive_buf calling interface */ + info->flag_buf = kzalloc(info->max_frame_size, GFP_KERNEL); + if (!info->flag_buf) { + kfree(info->rx_buf); + info->rx_buf = NULL; + return -ENOMEM; + } + + rx_reset_buffers(info); + return 0; +} + +static void rx_free_buffers(MGSLPC_INFO *info) +{ + kfree(info->rx_buf); + info->rx_buf = NULL; + kfree(info->flag_buf); + info->flag_buf = NULL; +} + +static int claim_resources(MGSLPC_INFO *info) +{ + if (rx_alloc_buffers(info) < 0) { + printk("Can't allocate rx buffer %s\n", info->device_name); + release_resources(info); + return -ENODEV; + } + return 0; +} + +static void release_resources(MGSLPC_INFO *info) +{ + if (debug_level >= DEBUG_LEVEL_INFO) + printk("release_resources(%s)\n", info->device_name); + rx_free_buffers(info); +} + +/* Add the specified device instance data structure to the + * global linked list of devices and increment the device count. + * + * Arguments: info pointer to device instance data + */ +static int mgslpc_add_device(MGSLPC_INFO *info) +{ + MGSLPC_INFO *current_dev = NULL; + struct device *tty_dev; + int ret; + + info->next_device = NULL; + info->line = mgslpc_device_count; + sprintf(info->device_name,"ttySLP%d",info->line); + + if (info->line < MAX_DEVICE_COUNT) { + if (maxframe[info->line]) + info->max_frame_size = maxframe[info->line]; + } + + mgslpc_device_count++; + + if (!mgslpc_device_list) + mgslpc_device_list = info; + else { + current_dev = mgslpc_device_list; + while (current_dev->next_device) + current_dev = current_dev->next_device; + current_dev->next_device = info; + } + + if (info->max_frame_size < 4096) + info->max_frame_size = 4096; + else if (info->max_frame_size > 65535) + info->max_frame_size = 65535; + + printk("SyncLink PC Card %s:IO=%04X IRQ=%d\n", + info->device_name, info->io_base, info->irq_level); + +#if SYNCLINK_GENERIC_HDLC + ret = hdlcdev_init(info); + if (ret != 0) + goto failed; +#endif + + tty_dev = tty_port_register_device(&info->port, serial_driver, info->line, + &info->p_dev->dev); + if (IS_ERR(tty_dev)) { + ret = PTR_ERR(tty_dev); +#if SYNCLINK_GENERIC_HDLC + hdlcdev_exit(info); +#endif + goto failed; + } + + return 0; + +failed: + if (current_dev) + current_dev->next_device = NULL; + else + mgslpc_device_list = NULL; + mgslpc_device_count--; + return ret; +} + +static void mgslpc_remove_device(MGSLPC_INFO *remove_info) +{ + MGSLPC_INFO *info = mgslpc_device_list; + MGSLPC_INFO *last = NULL; + + while(info) { + if (info == remove_info) { + if (last) + last->next_device = info->next_device; + else + mgslpc_device_list = info->next_device; + tty_unregister_device(serial_driver, info->line); +#if SYNCLINK_GENERIC_HDLC + hdlcdev_exit(info); +#endif + release_resources(info); + tty_port_destroy(&info->port); + kfree(info); + mgslpc_device_count--; + return; + } + last = info; + info = info->next_device; + } +} + +static const struct pcmcia_device_id mgslpc_ids[] = { + PCMCIA_DEVICE_MANF_CARD(0x02c5, 0x0050), + PCMCIA_DEVICE_NULL +}; +MODULE_DEVICE_TABLE(pcmcia, mgslpc_ids); + +static struct pcmcia_driver mgslpc_driver = { + .owner = THIS_MODULE, + .name = "synclink_cs", + .probe = mgslpc_probe, + .remove = mgslpc_detach, + .id_table = mgslpc_ids, + .suspend = mgslpc_suspend, + .resume = mgslpc_resume, +}; + +static const struct tty_operations mgslpc_ops = { + .open = mgslpc_open, + .close = mgslpc_close, + .write = mgslpc_write, + .put_char = mgslpc_put_char, + .flush_chars = mgslpc_flush_chars, + .write_room = mgslpc_write_room, + .chars_in_buffer = mgslpc_chars_in_buffer, + .flush_buffer = mgslpc_flush_buffer, + .ioctl = mgslpc_ioctl, + .throttle = mgslpc_throttle, + .unthrottle = mgslpc_unthrottle, + .send_xchar = mgslpc_send_xchar, + .break_ctl = mgslpc_break, + .wait_until_sent = mgslpc_wait_until_sent, + .set_termios = mgslpc_set_termios, + .stop = tx_pause, + .start = tx_release, + .hangup = mgslpc_hangup, + .tiocmget = tiocmget, + .tiocmset = tiocmset, + .get_icount = mgslpc_get_icount, + .proc_fops = &mgslpc_proc_fops, +}; + +static int __init synclink_cs_init(void) +{ + int rc; + + if (break_on_load) { + mgslpc_get_text_ptr(); + BREAKPOINT(); + } + + serial_driver = tty_alloc_driver(MAX_DEVICE_COUNT, + TTY_DRIVER_REAL_RAW | + TTY_DRIVER_DYNAMIC_DEV); + if (IS_ERR(serial_driver)) { + rc = PTR_ERR(serial_driver); + goto err; + } + + /* Initialize the tty_driver structure */ + serial_driver->driver_name = "synclink_cs"; + serial_driver->name = "ttySLP"; + serial_driver->major = ttymajor; + serial_driver->minor_start = 64; + serial_driver->type = TTY_DRIVER_TYPE_SERIAL; + serial_driver->subtype = SERIAL_TYPE_NORMAL; + serial_driver->init_termios = tty_std_termios; + serial_driver->init_termios.c_cflag = + B9600 | CS8 | CREAD | HUPCL | CLOCAL; + tty_set_operations(serial_driver, &mgslpc_ops); + + rc = tty_register_driver(serial_driver); + if (rc < 0) { + printk(KERN_ERR "%s(%d):Couldn't register serial driver\n", + __FILE__, __LINE__); + goto err_put_tty; + } + + rc = pcmcia_register_driver(&mgslpc_driver); + if (rc < 0) + goto err_unreg_tty; + + printk(KERN_INFO "%s %s, tty major#%d\n", driver_name, driver_version, + serial_driver->major); + + return 0; +err_unreg_tty: + tty_unregister_driver(serial_driver); +err_put_tty: + put_tty_driver(serial_driver); +err: + return rc; +} + +static void __exit synclink_cs_exit(void) +{ + pcmcia_unregister_driver(&mgslpc_driver); + tty_unregister_driver(serial_driver); + put_tty_driver(serial_driver); +} + +module_init(synclink_cs_init); +module_exit(synclink_cs_exit); + +static void mgslpc_set_rate(MGSLPC_INFO *info, unsigned char channel, unsigned int rate) +{ + unsigned int M, N; + unsigned char val; + + /* note:standard BRG mode is broken in V3.2 chip + * so enhanced mode is always used + */ + + if (rate) { + N = 3686400 / rate; + if (!N) + N = 1; + N >>= 1; + for (M = 1; N > 64 && M < 16; M++) + N >>= 1; + N--; + + /* BGR[5..0] = N + * BGR[9..6] = M + * BGR[7..0] contained in BGR register + * BGR[9..8] contained in CCR2[7..6] + * divisor = (N+1)*2^M + * + * Note: M *must* not be zero (causes asymetric duty cycle) + */ + write_reg(info, (unsigned char) (channel + BGR), + (unsigned char) ((M << 6) + N)); + val = read_reg(info, (unsigned char) (channel + CCR2)) & 0x3f; + val |= ((M << 4) & 0xc0); + write_reg(info, (unsigned char) (channel + CCR2), val); + } +} + +/* Enabled the AUX clock output at the specified frequency. + */ +static void enable_auxclk(MGSLPC_INFO *info) +{ + unsigned char val; + + /* MODE + * + * 07..06 MDS[1..0] 10 = transparent HDLC mode + * 05 ADM Address Mode, 0 = no addr recognition + * 04 TMD Timer Mode, 0 = external + * 03 RAC Receiver Active, 0 = inactive + * 02 RTS 0=RTS active during xmit, 1=RTS always active + * 01 TRS Timer Resolution, 1=512 + * 00 TLP Test Loop, 0 = no loop + * + * 1000 0010 + */ + val = 0x82; + + /* channel B RTS is used to enable AUXCLK driver on SP505 */ + if (info->params.mode == MGSL_MODE_HDLC && info->params.clock_speed) + val |= BIT2; + write_reg(info, CHB + MODE, val); + + /* CCR0 + * + * 07 PU Power Up, 1=active, 0=power down + * 06 MCE Master Clock Enable, 1=enabled + * 05 Reserved, 0 + * 04..02 SC[2..0] Encoding + * 01..00 SM[1..0] Serial Mode, 00=HDLC + * + * 11000000 + */ + write_reg(info, CHB + CCR0, 0xc0); + + /* CCR1 + * + * 07 SFLG Shared Flag, 0 = disable shared flags + * 06 GALP Go Active On Loop, 0 = not used + * 05 GLP Go On Loop, 0 = not used + * 04 ODS Output Driver Select, 1=TxD is push-pull output + * 03 ITF Interframe Time Fill, 0=mark, 1=flag + * 02..00 CM[2..0] Clock Mode + * + * 0001 0111 + */ + write_reg(info, CHB + CCR1, 0x17); + + /* CCR2 (Channel B) + * + * 07..06 BGR[9..8] Baud rate bits 9..8 + * 05 BDF Baud rate divisor factor, 0=1, 1=BGR value + * 04 SSEL Clock source select, 1=submode b + * 03 TOE 0=TxCLK is input, 1=TxCLK is output + * 02 RWX Read/Write Exchange 0=disabled + * 01 C32, CRC select, 0=CRC-16, 1=CRC-32 + * 00 DIV, data inversion 0=disabled, 1=enabled + * + * 0011 1000 + */ + if (info->params.mode == MGSL_MODE_HDLC && info->params.clock_speed) + write_reg(info, CHB + CCR2, 0x38); + else + write_reg(info, CHB + CCR2, 0x30); + + /* CCR4 + * + * 07 MCK4 Master Clock Divide by 4, 1=enabled + * 06 EBRG Enhanced Baud Rate Generator Mode, 1=enabled + * 05 TST1 Test Pin, 0=normal operation + * 04 ICD Ivert Carrier Detect, 1=enabled (active low) + * 03..02 Reserved, must be 0 + * 01..00 RFT[1..0] RxFIFO Threshold 00=32 bytes + * + * 0101 0000 + */ + write_reg(info, CHB + CCR4, 0x50); + + /* if auxclk not enabled, set internal BRG so + * CTS transitions can be detected (requires TxC) + */ + if (info->params.mode == MGSL_MODE_HDLC && info->params.clock_speed) + mgslpc_set_rate(info, CHB, info->params.clock_speed); + else + mgslpc_set_rate(info, CHB, 921600); +} + +static void loopback_enable(MGSLPC_INFO *info) +{ + unsigned char val; + + /* CCR1:02..00 CM[2..0] Clock Mode = 111 (clock mode 7) */ + val = read_reg(info, CHA + CCR1) | (BIT2 | BIT1 | BIT0); + write_reg(info, CHA + CCR1, val); + + /* CCR2:04 SSEL Clock source select, 1=submode b */ + val = read_reg(info, CHA + CCR2) | (BIT4 | BIT5); + write_reg(info, CHA + CCR2, val); + + /* set LinkSpeed if available, otherwise default to 2Mbps */ + if (info->params.clock_speed) + mgslpc_set_rate(info, CHA, info->params.clock_speed); + else + mgslpc_set_rate(info, CHA, 1843200); + + /* MODE:00 TLP Test Loop, 1=loopback enabled */ + val = read_reg(info, CHA + MODE) | BIT0; + write_reg(info, CHA + MODE, val); +} + +static void hdlc_mode(MGSLPC_INFO *info) +{ + unsigned char val; + unsigned char clkmode, clksubmode; + + /* disable all interrupts */ + irq_disable(info, CHA, 0xffff); + irq_disable(info, CHB, 0xffff); + port_irq_disable(info, 0xff); + + /* assume clock mode 0a, rcv=RxC xmt=TxC */ + clkmode = clksubmode = 0; + if (info->params.flags & HDLC_FLAG_RXC_DPLL + && info->params.flags & HDLC_FLAG_TXC_DPLL) { + /* clock mode 7a, rcv = DPLL, xmt = DPLL */ + clkmode = 7; + } else if (info->params.flags & HDLC_FLAG_RXC_BRG + && info->params.flags & HDLC_FLAG_TXC_BRG) { + /* clock mode 7b, rcv = BRG, xmt = BRG */ + clkmode = 7; + clksubmode = 1; + } else if (info->params.flags & HDLC_FLAG_RXC_DPLL) { + if (info->params.flags & HDLC_FLAG_TXC_BRG) { + /* clock mode 6b, rcv = DPLL, xmt = BRG/16 */ + clkmode = 6; + clksubmode = 1; + } else { + /* clock mode 6a, rcv = DPLL, xmt = TxC */ + clkmode = 6; + } + } else if (info->params.flags & HDLC_FLAG_TXC_BRG) { + /* clock mode 0b, rcv = RxC, xmt = BRG */ + clksubmode = 1; + } + + /* MODE + * + * 07..06 MDS[1..0] 10 = transparent HDLC mode + * 05 ADM Address Mode, 0 = no addr recognition + * 04 TMD Timer Mode, 0 = external + * 03 RAC Receiver Active, 0 = inactive + * 02 RTS 0=RTS active during xmit, 1=RTS always active + * 01 TRS Timer Resolution, 1=512 + * 00 TLP Test Loop, 0 = no loop + * + * 1000 0010 + */ + val = 0x82; + if (info->params.loopback) + val |= BIT0; + + /* preserve RTS state */ + if (info->serial_signals & SerialSignal_RTS) + val |= BIT2; + write_reg(info, CHA + MODE, val); + + /* CCR0 + * + * 07 PU Power Up, 1=active, 0=power down + * 06 MCE Master Clock Enable, 1=enabled + * 05 Reserved, 0 + * 04..02 SC[2..0] Encoding + * 01..00 SM[1..0] Serial Mode, 00=HDLC + * + * 11000000 + */ + val = 0xc0; + switch (info->params.encoding) + { + case HDLC_ENCODING_NRZI: + val |= BIT3; + break; + case HDLC_ENCODING_BIPHASE_SPACE: + val |= BIT4; + break; // FM0 + case HDLC_ENCODING_BIPHASE_MARK: + val |= BIT4 | BIT2; + break; // FM1 + case HDLC_ENCODING_BIPHASE_LEVEL: + val |= BIT4 | BIT3; + break; // Manchester + } + write_reg(info, CHA + CCR0, val); + + /* CCR1 + * + * 07 SFLG Shared Flag, 0 = disable shared flags + * 06 GALP Go Active On Loop, 0 = not used + * 05 GLP Go On Loop, 0 = not used + * 04 ODS Output Driver Select, 1=TxD is push-pull output + * 03 ITF Interframe Time Fill, 0=mark, 1=flag + * 02..00 CM[2..0] Clock Mode + * + * 0001 0000 + */ + val = 0x10 + clkmode; + write_reg(info, CHA + CCR1, val); + + /* CCR2 + * + * 07..06 BGR[9..8] Baud rate bits 9..8 + * 05 BDF Baud rate divisor factor, 0=1, 1=BGR value + * 04 SSEL Clock source select, 1=submode b + * 03 TOE 0=TxCLK is input, 0=TxCLK is input + * 02 RWX Read/Write Exchange 0=disabled + * 01 C32, CRC select, 0=CRC-16, 1=CRC-32 + * 00 DIV, data inversion 0=disabled, 1=enabled + * + * 0000 0000 + */ + val = 0x00; + if (clkmode == 2 || clkmode == 3 || clkmode == 6 + || clkmode == 7 || (clkmode == 0 && clksubmode == 1)) + val |= BIT5; + if (clksubmode) + val |= BIT4; + if (info->params.crc_type == HDLC_CRC_32_CCITT) + val |= BIT1; + if (info->params.encoding == HDLC_ENCODING_NRZB) + val |= BIT0; + write_reg(info, CHA + CCR2, val); + + /* CCR3 + * + * 07..06 PRE[1..0] Preamble count 00=1, 01=2, 10=4, 11=8 + * 05 EPT Enable preamble transmission, 1=enabled + * 04 RADD Receive address pushed to FIFO, 0=disabled + * 03 CRL CRC Reset Level, 0=FFFF + * 02 RCRC Rx CRC 0=On 1=Off + * 01 TCRC Tx CRC 0=On 1=Off + * 00 PSD DPLL Phase Shift Disable + * + * 0000 0000 + */ + val = 0x00; + if (info->params.crc_type == HDLC_CRC_NONE) + val |= BIT2 | BIT1; + if (info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE) + val |= BIT5; + switch (info->params.preamble_length) + { + case HDLC_PREAMBLE_LENGTH_16BITS: + val |= BIT6; + break; + case HDLC_PREAMBLE_LENGTH_32BITS: + val |= BIT6; + break; + case HDLC_PREAMBLE_LENGTH_64BITS: + val |= BIT7 | BIT6; + break; + } + write_reg(info, CHA + CCR3, val); + + /* PRE - Preamble pattern */ + val = 0; + switch (info->params.preamble) + { + case HDLC_PREAMBLE_PATTERN_FLAGS: val = 0x7e; break; + case HDLC_PREAMBLE_PATTERN_10: val = 0xaa; break; + case HDLC_PREAMBLE_PATTERN_01: val = 0x55; break; + case HDLC_PREAMBLE_PATTERN_ONES: val = 0xff; break; + } + write_reg(info, CHA + PRE, val); + + /* CCR4 + * + * 07 MCK4 Master Clock Divide by 4, 1=enabled + * 06 EBRG Enhanced Baud Rate Generator Mode, 1=enabled + * 05 TST1 Test Pin, 0=normal operation + * 04 ICD Ivert Carrier Detect, 1=enabled (active low) + * 03..02 Reserved, must be 0 + * 01..00 RFT[1..0] RxFIFO Threshold 00=32 bytes + * + * 0101 0000 + */ + val = 0x50; + write_reg(info, CHA + CCR4, val); + if (info->params.flags & HDLC_FLAG_RXC_DPLL) + mgslpc_set_rate(info, CHA, info->params.clock_speed * 16); + else + mgslpc_set_rate(info, CHA, info->params.clock_speed); + + /* RLCR Receive length check register + * + * 7 1=enable receive length check + * 6..0 Max frame length = (RL + 1) * 32 + */ + write_reg(info, CHA + RLCR, 0); + + /* XBCH Transmit Byte Count High + * + * 07 DMA mode, 0 = interrupt driven + * 06 NRM, 0=ABM (ignored) + * 05 CAS Carrier Auto Start + * 04 XC Transmit Continuously (ignored) + * 03..00 XBC[10..8] Transmit byte count bits 10..8 + * + * 0000 0000 + */ + val = 0x00; + if (info->params.flags & HDLC_FLAG_AUTO_DCD) + val |= BIT5; + write_reg(info, CHA + XBCH, val); + enable_auxclk(info); + if (info->params.loopback || info->testing_irq) + loopback_enable(info); + if (info->params.flags & HDLC_FLAG_AUTO_CTS) + { + irq_enable(info, CHB, IRQ_CTS); + /* PVR[3] 1=AUTO CTS active */ + set_reg_bits(info, CHA + PVR, BIT3); + } else + clear_reg_bits(info, CHA + PVR, BIT3); + + irq_enable(info, CHA, + IRQ_RXEOM | IRQ_RXFIFO | IRQ_ALLSENT | + IRQ_UNDERRUN | IRQ_TXFIFO); + issue_command(info, CHA, CMD_TXRESET + CMD_RXRESET); + wait_command_complete(info, CHA); + read_reg16(info, CHA + ISR); /* clear pending IRQs */ + + /* Master clock mode enabled above to allow reset commands + * to complete even if no data clocks are present. + * + * Disable master clock mode for normal communications because + * V3.2 of the ESCC2 has a bug that prevents the transmit all sent + * IRQ when in master clock mode. + * + * Leave master clock mode enabled for IRQ test because the + * timer IRQ used by the test can only happen in master clock mode. + */ + if (!info->testing_irq) + clear_reg_bits(info, CHA + CCR0, BIT6); + + tx_set_idle(info); + + tx_stop(info); + rx_stop(info); +} + +static void rx_stop(MGSLPC_INFO *info) +{ + if (debug_level >= DEBUG_LEVEL_ISR) + printk("%s(%d):rx_stop(%s)\n", + __FILE__, __LINE__, info->device_name); + + /* MODE:03 RAC Receiver Active, 0=inactive */ + clear_reg_bits(info, CHA + MODE, BIT3); + + info->rx_enabled = false; + info->rx_overflow = false; +} + +static void rx_start(MGSLPC_INFO *info) +{ + if (debug_level >= DEBUG_LEVEL_ISR) + printk("%s(%d):rx_start(%s)\n", + __FILE__, __LINE__, info->device_name); + + rx_reset_buffers(info); + info->rx_enabled = false; + info->rx_overflow = false; + + /* MODE:03 RAC Receiver Active, 1=active */ + set_reg_bits(info, CHA + MODE, BIT3); + + info->rx_enabled = true; +} + +static void tx_start(MGSLPC_INFO *info, struct tty_struct *tty) +{ + if (debug_level >= DEBUG_LEVEL_ISR) + printk("%s(%d):tx_start(%s)\n", + __FILE__, __LINE__, info->device_name); + + if (info->tx_count) { + /* If auto RTS enabled and RTS is inactive, then assert */ + /* RTS and set a flag indicating that the driver should */ + /* negate RTS when the transmission completes. */ + info->drop_rts_on_tx_done = false; + + if (info->params.flags & HDLC_FLAG_AUTO_RTS) { + get_signals(info); + if (!(info->serial_signals & SerialSignal_RTS)) { + info->serial_signals |= SerialSignal_RTS; + set_signals(info); + info->drop_rts_on_tx_done = true; + } + } + + if (info->params.mode == MGSL_MODE_ASYNC) { + if (!info->tx_active) { + info->tx_active = true; + tx_ready(info, tty); + } + } else { + info->tx_active = true; + tx_ready(info, tty); + mod_timer(&info->tx_timer, jiffies + + msecs_to_jiffies(5000)); + } + } + + if (!info->tx_enabled) + info->tx_enabled = true; +} + +static void tx_stop(MGSLPC_INFO *info) +{ + if (debug_level >= DEBUG_LEVEL_ISR) + printk("%s(%d):tx_stop(%s)\n", + __FILE__, __LINE__, info->device_name); + + del_timer(&info->tx_timer); + + info->tx_enabled = false; + info->tx_active = false; +} + +/* Reset the adapter to a known state and prepare it for further use. + */ +static void reset_device(MGSLPC_INFO *info) +{ + /* power up both channels (set BIT7) */ + write_reg(info, CHA + CCR0, 0x80); + write_reg(info, CHB + CCR0, 0x80); + write_reg(info, CHA + MODE, 0); + write_reg(info, CHB + MODE, 0); + + /* disable all interrupts */ + irq_disable(info, CHA, 0xffff); + irq_disable(info, CHB, 0xffff); + port_irq_disable(info, 0xff); + + /* PCR Port Configuration Register + * + * 07..04 DEC[3..0] Serial I/F select outputs + * 03 output, 1=AUTO CTS control enabled + * 02 RI Ring Indicator input 0=active + * 01 DSR input 0=active + * 00 DTR output 0=active + * + * 0000 0110 + */ + write_reg(info, PCR, 0x06); + + /* PVR Port Value Register + * + * 07..04 DEC[3..0] Serial I/F select (0000=disabled) + * 03 AUTO CTS output 1=enabled + * 02 RI Ring Indicator input + * 01 DSR input + * 00 DTR output (1=inactive) + * + * 0000 0001 + */ +// write_reg(info, PVR, PVR_DTR); + + /* IPC Interrupt Port Configuration + * + * 07 VIS 1=Masked interrupts visible + * 06..05 Reserved, 0 + * 04..03 SLA Slave address, 00 ignored + * 02 CASM Cascading Mode, 1=daisy chain + * 01..00 IC[1..0] Interrupt Config, 01=push-pull output, active low + * + * 0000 0101 + */ + write_reg(info, IPC, 0x05); +} + +static void async_mode(MGSLPC_INFO *info) +{ + unsigned char val; + + /* disable all interrupts */ + irq_disable(info, CHA, 0xffff); + irq_disable(info, CHB, 0xffff); + port_irq_disable(info, 0xff); + + /* MODE + * + * 07 Reserved, 0 + * 06 FRTS RTS State, 0=active + * 05 FCTS Flow Control on CTS + * 04 FLON Flow Control Enable + * 03 RAC Receiver Active, 0 = inactive + * 02 RTS 0=Auto RTS, 1=manual RTS + * 01 TRS Timer Resolution, 1=512 + * 00 TLP Test Loop, 0 = no loop + * + * 0000 0110 + */ + val = 0x06; + if (info->params.loopback) + val |= BIT0; + + /* preserve RTS state */ + if (!(info->serial_signals & SerialSignal_RTS)) + val |= BIT6; + write_reg(info, CHA + MODE, val); + + /* CCR0 + * + * 07 PU Power Up, 1=active, 0=power down + * 06 MCE Master Clock Enable, 1=enabled + * 05 Reserved, 0 + * 04..02 SC[2..0] Encoding, 000=NRZ + * 01..00 SM[1..0] Serial Mode, 11=Async + * + * 1000 0011 + */ + write_reg(info, CHA + CCR0, 0x83); + + /* CCR1 + * + * 07..05 Reserved, 0 + * 04 ODS Output Driver Select, 1=TxD is push-pull output + * 03 BCR Bit Clock Rate, 1=16x + * 02..00 CM[2..0] Clock Mode, 111=BRG + * + * 0001 1111 + */ + write_reg(info, CHA + CCR1, 0x1f); + + /* CCR2 (channel A) + * + * 07..06 BGR[9..8] Baud rate bits 9..8 + * 05 BDF Baud rate divisor factor, 0=1, 1=BGR value + * 04 SSEL Clock source select, 1=submode b + * 03 TOE 0=TxCLK is input, 0=TxCLK is input + * 02 RWX Read/Write Exchange 0=disabled + * 01 Reserved, 0 + * 00 DIV, data inversion 0=disabled, 1=enabled + * + * 0001 0000 + */ + write_reg(info, CHA + CCR2, 0x10); + + /* CCR3 + * + * 07..01 Reserved, 0 + * 00 PSD DPLL Phase Shift Disable + * + * 0000 0000 + */ + write_reg(info, CHA + CCR3, 0); + + /* CCR4 + * + * 07 MCK4 Master Clock Divide by 4, 1=enabled + * 06 EBRG Enhanced Baud Rate Generator Mode, 1=enabled + * 05 TST1 Test Pin, 0=normal operation + * 04 ICD Ivert Carrier Detect, 1=enabled (active low) + * 03..00 Reserved, must be 0 + * + * 0101 0000 + */ + write_reg(info, CHA + CCR4, 0x50); + mgslpc_set_rate(info, CHA, info->params.data_rate * 16); + + /* DAFO Data Format + * + * 07 Reserved, 0 + * 06 XBRK transmit break, 0=normal operation + * 05 Stop bits (0=1, 1=2) + * 04..03 PAR[1..0] Parity (01=odd, 10=even) + * 02 PAREN Parity Enable + * 01..00 CHL[1..0] Character Length (00=8, 01=7) + * + */ + val = 0x00; + if (info->params.data_bits != 8) + val |= BIT0; /* 7 bits */ + if (info->params.stop_bits != 1) + val |= BIT5; + if (info->params.parity != ASYNC_PARITY_NONE) + { + val |= BIT2; /* Parity enable */ + if (info->params.parity == ASYNC_PARITY_ODD) + val |= BIT3; + else + val |= BIT4; + } + write_reg(info, CHA + DAFO, val); + + /* RFC Rx FIFO Control + * + * 07 Reserved, 0 + * 06 DPS, 1=parity bit not stored in data byte + * 05 DXS, 0=all data stored in FIFO (including XON/XOFF) + * 04 RFDF Rx FIFO Data Format, 1=status byte stored in FIFO + * 03..02 RFTH[1..0], rx threshold, 11=16 status + 16 data byte + * 01 Reserved, 0 + * 00 TCDE Terminate Char Detect Enable, 0=disabled + * + * 0101 1100 + */ + write_reg(info, CHA + RFC, 0x5c); + + /* RLCR Receive length check register + * + * Max frame length = (RL + 1) * 32 + */ + write_reg(info, CHA + RLCR, 0); + + /* XBCH Transmit Byte Count High + * + * 07 DMA mode, 0 = interrupt driven + * 06 NRM, 0=ABM (ignored) + * 05 CAS Carrier Auto Start + * 04 XC Transmit Continuously (ignored) + * 03..00 XBC[10..8] Transmit byte count bits 10..8 + * + * 0000 0000 + */ + val = 0x00; + if (info->params.flags & HDLC_FLAG_AUTO_DCD) + val |= BIT5; + write_reg(info, CHA + XBCH, val); + if (info->params.flags & HDLC_FLAG_AUTO_CTS) + irq_enable(info, CHA, IRQ_CTS); + + /* MODE:03 RAC Receiver Active, 1=active */ + set_reg_bits(info, CHA + MODE, BIT3); + enable_auxclk(info); + if (info->params.flags & HDLC_FLAG_AUTO_CTS) { + irq_enable(info, CHB, IRQ_CTS); + /* PVR[3] 1=AUTO CTS active */ + set_reg_bits(info, CHA + PVR, BIT3); + } else + clear_reg_bits(info, CHA + PVR, BIT3); + irq_enable(info, CHA, + IRQ_RXEOM | IRQ_RXFIFO | IRQ_BREAK_ON | IRQ_RXTIME | + IRQ_ALLSENT | IRQ_TXFIFO); + issue_command(info, CHA, CMD_TXRESET + CMD_RXRESET); + wait_command_complete(info, CHA); + read_reg16(info, CHA + ISR); /* clear pending IRQs */ +} + +/* Set the HDLC idle mode for the transmitter. + */ +static void tx_set_idle(MGSLPC_INFO *info) +{ + /* Note: ESCC2 only supports flags and one idle modes */ + if (info->idle_mode == HDLC_TXIDLE_FLAGS) + set_reg_bits(info, CHA + CCR1, BIT3); + else + clear_reg_bits(info, CHA + CCR1, BIT3); +} + +/* get state of the V24 status (input) signals. + */ +static void get_signals(MGSLPC_INFO *info) +{ + unsigned char status = 0; + + /* preserve RTS and DTR */ + info->serial_signals &= SerialSignal_RTS | SerialSignal_DTR; + + if (read_reg(info, CHB + VSTR) & BIT7) + info->serial_signals |= SerialSignal_DCD; + if (read_reg(info, CHB + STAR) & BIT1) + info->serial_signals |= SerialSignal_CTS; + + status = read_reg(info, CHA + PVR); + if (!(status & PVR_RI)) + info->serial_signals |= SerialSignal_RI; + if (!(status & PVR_DSR)) + info->serial_signals |= SerialSignal_DSR; +} + +/* Set the state of RTS and DTR based on contents of + * serial_signals member of device extension. + */ +static void set_signals(MGSLPC_INFO *info) +{ + unsigned char val; + + val = read_reg(info, CHA + MODE); + if (info->params.mode == MGSL_MODE_ASYNC) { + if (info->serial_signals & SerialSignal_RTS) + val &= ~BIT6; + else + val |= BIT6; + } else { + if (info->serial_signals & SerialSignal_RTS) + val |= BIT2; + else + val &= ~BIT2; + } + write_reg(info, CHA + MODE, val); + + if (info->serial_signals & SerialSignal_DTR) + clear_reg_bits(info, CHA + PVR, PVR_DTR); + else + set_reg_bits(info, CHA + PVR, PVR_DTR); +} + +static void rx_reset_buffers(MGSLPC_INFO *info) +{ + RXBUF *buf; + int i; + + info->rx_put = 0; + info->rx_get = 0; + info->rx_frame_count = 0; + for (i=0 ; i < info->rx_buf_count ; i++) { + buf = (RXBUF*)(info->rx_buf + (i * info->rx_buf_size)); + buf->status = buf->count = 0; + } +} + +/* Attempt to return a received HDLC frame + * Only frames received without errors are returned. + * + * Returns true if frame returned, otherwise false + */ +static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty) +{ + unsigned short status; + RXBUF *buf; + unsigned int framesize = 0; + unsigned long flags; + bool return_frame = false; + + if (info->rx_frame_count == 0) + return false; + + buf = (RXBUF*)(info->rx_buf + (info->rx_get * info->rx_buf_size)); + + status = buf->status; + + /* 07 VFR 1=valid frame + * 06 RDO 1=data overrun + * 05 CRC 1=OK, 0=error + * 04 RAB 1=frame aborted + */ + if ((status & 0xf0) != 0xA0) { + if (!(status & BIT7) || (status & BIT4)) + info->icount.rxabort++; + else if (status & BIT6) + info->icount.rxover++; + else if (!(status & BIT5)) { + info->icount.rxcrc++; + if (info->params.crc_type & HDLC_CRC_RETURN_EX) + return_frame = true; + } + framesize = 0; +#if SYNCLINK_GENERIC_HDLC + { + info->netdev->stats.rx_errors++; + info->netdev->stats.rx_frame_errors++; + } +#endif + } else + return_frame = true; + + if (return_frame) + framesize = buf->count; + + if (debug_level >= DEBUG_LEVEL_BH) + printk("%s(%d):rx_get_frame(%s) status=%04X size=%d\n", + __FILE__, __LINE__, info->device_name, status, framesize); + + if (debug_level >= DEBUG_LEVEL_DATA) + trace_block(info, buf->data, framesize, 0); + + if (framesize) { + if ((info->params.crc_type & HDLC_CRC_RETURN_EX && + framesize+1 > info->max_frame_size) || + framesize > info->max_frame_size) + info->icount.rxlong++; + else { + if (status & BIT5) + info->icount.rxok++; + + if (info->params.crc_type & HDLC_CRC_RETURN_EX) { + *(buf->data + framesize) = status & BIT5 ? RX_OK:RX_CRC_ERROR; + ++framesize; + } + +#if SYNCLINK_GENERIC_HDLC + if (info->netcount) + hdlcdev_rx(info, buf->data, framesize); + else +#endif + ldisc_receive_buf(tty, buf->data, info->flag_buf, framesize); + } + } + + spin_lock_irqsave(&info->lock, flags); + buf->status = buf->count = 0; + info->rx_frame_count--; + info->rx_get++; + if (info->rx_get >= info->rx_buf_count) + info->rx_get = 0; + spin_unlock_irqrestore(&info->lock, flags); + + return true; +} + +static bool register_test(MGSLPC_INFO *info) +{ + static unsigned char patterns[] = + { 0x00, 0xff, 0xaa, 0x55, 0x69, 0x96, 0x0f }; + static unsigned int count = ARRAY_SIZE(patterns); + unsigned int i; + bool rc = true; + unsigned long flags; + + spin_lock_irqsave(&info->lock, flags); + reset_device(info); + + for (i = 0; i < count; i++) { + write_reg(info, XAD1, patterns[i]); + write_reg(info, XAD2, patterns[(i + 1) % count]); + if ((read_reg(info, XAD1) != patterns[i]) || + (read_reg(info, XAD2) != patterns[(i + 1) % count])) { + rc = false; + break; + } + } + + spin_unlock_irqrestore(&info->lock, flags); + return rc; +} + +static bool irq_test(MGSLPC_INFO *info) +{ + unsigned long end_time; + unsigned long flags; + + spin_lock_irqsave(&info->lock, flags); + reset_device(info); + + info->testing_irq = true; + hdlc_mode(info); + + info->irq_occurred = false; + + /* init hdlc mode */ + + irq_enable(info, CHA, IRQ_TIMER); + write_reg(info, CHA + TIMR, 0); /* 512 cycles */ + issue_command(info, CHA, CMD_START_TIMER); + + spin_unlock_irqrestore(&info->lock, flags); + + end_time=100; + while(end_time-- && !info->irq_occurred) { + msleep_interruptible(10); + } + + info->testing_irq = false; + + spin_lock_irqsave(&info->lock, flags); + reset_device(info); + spin_unlock_irqrestore(&info->lock, flags); + + return info->irq_occurred; +} + +static int adapter_test(MGSLPC_INFO *info) +{ + if (!register_test(info)) { + info->init_error = DiagStatus_AddressFailure; + printk("%s(%d):Register test failure for device %s Addr=%04X\n", + __FILE__, __LINE__, info->device_name, (unsigned short)(info->io_base)); + return -ENODEV; + } + + if (!irq_test(info)) { + info->init_error = DiagStatus_IrqFailure; + printk("%s(%d):Interrupt test failure for device %s IRQ=%d\n", + __FILE__, __LINE__, info->device_name, (unsigned short)(info->irq_level)); + return -ENODEV; + } + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):device %s passed diagnostics\n", + __FILE__, __LINE__, info->device_name); + return 0; +} + +static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit) +{ + int i; + int linecount; + if (xmit) + printk("%s tx data:\n", info->device_name); + else + printk("%s rx data:\n", info->device_name); + + while(count) { + if (count > 16) + linecount = 16; + else + linecount = count; + + for(i=0;i=040 && data[i]<=0176) + printk("%c", data[i]); + else + printk("."); + } + printk("\n"); + + data += linecount; + count -= linecount; + } +} + +/* HDLC frame time out + * update stats and do tx completion processing + */ +static void tx_timeout(unsigned long context) +{ + MGSLPC_INFO *info = (MGSLPC_INFO*)context; + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):tx_timeout(%s)\n", + __FILE__, __LINE__, info->device_name); + if (info->tx_active && + info->params.mode == MGSL_MODE_HDLC) { + info->icount.txtimeout++; + } + spin_lock_irqsave(&info->lock, flags); + info->tx_active = false; + info->tx_count = info->tx_put = info->tx_get = 0; + + spin_unlock_irqrestore(&info->lock, flags); + +#if SYNCLINK_GENERIC_HDLC + if (info->netcount) + hdlcdev_tx_done(info); + else +#endif + { + struct tty_struct *tty = tty_port_tty_get(&info->port); + bh_transmit(info, tty); + tty_kref_put(tty); + } +} + +#if SYNCLINK_GENERIC_HDLC + +/** + * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.) + * set encoding and frame check sequence (FCS) options + * + * dev pointer to network device structure + * encoding serial encoding setting + * parity FCS setting + * + * returns 0 if success, otherwise error code + */ +static int hdlcdev_attach(struct net_device *dev, unsigned short encoding, + unsigned short parity) +{ + MGSLPC_INFO *info = dev_to_port(dev); + struct tty_struct *tty; + unsigned char new_encoding; + unsigned short new_crctype; + + /* return error if TTY interface open */ + if (info->port.count) + return -EBUSY; + + switch (encoding) + { + case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break; + case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break; + case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break; + case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break; + case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break; + default: return -EINVAL; + } + + switch (parity) + { + case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break; + case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break; + case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break; + default: return -EINVAL; + } + + info->params.encoding = new_encoding; + info->params.crc_type = new_crctype; + + /* if network interface up, reprogram hardware */ + if (info->netcount) { + tty = tty_port_tty_get(&info->port); + mgslpc_program_hw(info, tty); + tty_kref_put(tty); + } + + return 0; +} + +/** + * called by generic HDLC layer to send frame + * + * skb socket buffer containing HDLC frame + * dev pointer to network device structure + */ +static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + MGSLPC_INFO *info = dev_to_port(dev); + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk(KERN_INFO "%s:hdlc_xmit(%s)\n", __FILE__, dev->name); + + /* stop sending until this frame completes */ + netif_stop_queue(dev); + + /* copy data to device buffers */ + skb_copy_from_linear_data(skb, info->tx_buf, skb->len); + info->tx_get = 0; + info->tx_put = info->tx_count = skb->len; + + /* update network statistics */ + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + + /* done with socket buffer, so free it */ + dev_kfree_skb(skb); + + /* save start time for transmit timeout detection */ + dev->trans_start = jiffies; + + /* start hardware transmitter if necessary */ + spin_lock_irqsave(&info->lock, flags); + if (!info->tx_active) { + struct tty_struct *tty = tty_port_tty_get(&info->port); + tx_start(info, tty); + tty_kref_put(tty); + } + spin_unlock_irqrestore(&info->lock, flags); + + return NETDEV_TX_OK; +} + +/** + * called by network layer when interface enabled + * claim resources and initialize hardware + * + * dev pointer to network device structure + * + * returns 0 if success, otherwise error code + */ +static int hdlcdev_open(struct net_device *dev) +{ + MGSLPC_INFO *info = dev_to_port(dev); + struct tty_struct *tty; + int rc; + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s:hdlcdev_open(%s)\n", __FILE__, dev->name); + + /* generic HDLC layer open processing */ + rc = hdlc_open(dev); + if (rc != 0) + return rc; + + /* arbitrate between network and tty opens */ + spin_lock_irqsave(&info->netlock, flags); + if (info->port.count != 0 || info->netcount != 0) { + printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name); + spin_unlock_irqrestore(&info->netlock, flags); + return -EBUSY; + } + info->netcount=1; + spin_unlock_irqrestore(&info->netlock, flags); + + tty = tty_port_tty_get(&info->port); + /* claim resources and init adapter */ + rc = startup(info, tty); + if (rc != 0) { + tty_kref_put(tty); + spin_lock_irqsave(&info->netlock, flags); + info->netcount=0; + spin_unlock_irqrestore(&info->netlock, flags); + return rc; + } + /* assert RTS and DTR, apply hardware settings */ + info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR; + mgslpc_program_hw(info, tty); + tty_kref_put(tty); + + /* enable network layer transmit */ + dev->trans_start = jiffies; + netif_start_queue(dev); + + /* inform generic HDLC layer of current DCD status */ + spin_lock_irqsave(&info->lock, flags); + get_signals(info); + spin_unlock_irqrestore(&info->lock, flags); + if (info->serial_signals & SerialSignal_DCD) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + return 0; +} + +/** + * called by network layer when interface is disabled + * shutdown hardware and release resources + * + * dev pointer to network device structure + * + * returns 0 if success, otherwise error code + */ +static int hdlcdev_close(struct net_device *dev) +{ + MGSLPC_INFO *info = dev_to_port(dev); + struct tty_struct *tty = tty_port_tty_get(&info->port); + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s:hdlcdev_close(%s)\n", __FILE__, dev->name); + + netif_stop_queue(dev); + + /* shutdown adapter and release resources */ + shutdown(info, tty); + tty_kref_put(tty); + hdlc_close(dev); + + spin_lock_irqsave(&info->netlock, flags); + info->netcount=0; + spin_unlock_irqrestore(&info->netlock, flags); + + return 0; +} + +/** + * called by network layer to process IOCTL call to network device + * + * dev pointer to network device structure + * ifr pointer to network interface request structure + * cmd IOCTL command code + * + * returns 0 if success, otherwise error code + */ +static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + const size_t size = sizeof(sync_serial_settings); + sync_serial_settings new_line; + sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync; + MGSLPC_INFO *info = dev_to_port(dev); + unsigned int flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name); + + /* return error if TTY interface open */ + if (info->port.count) + return -EBUSY; + + if (cmd != SIOCWANDEV) + return hdlc_ioctl(dev, ifr, cmd); + + memset(&new_line, 0, size); + + switch(ifr->ifr_settings.type) { + case IF_GET_IFACE: /* return current sync_serial_settings */ + + ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL; + if (ifr->ifr_settings.size < size) { + ifr->ifr_settings.size = size; /* data size wanted */ + return -ENOBUFS; + } + + flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | + HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | + HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | + HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); + + switch (flags){ + case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break; + case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break; + case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break; + case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break; + default: new_line.clock_type = CLOCK_DEFAULT; + } + + new_line.clock_rate = info->params.clock_speed; + new_line.loopback = info->params.loopback ? 1:0; + + if (copy_to_user(line, &new_line, size)) + return -EFAULT; + return 0; + + case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */ + + if(!capable(CAP_NET_ADMIN)) + return -EPERM; + if (copy_from_user(&new_line, line, size)) + return -EFAULT; + + switch (new_line.clock_type) + { + case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break; + case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break; + case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break; + case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break; + case CLOCK_DEFAULT: flags = info->params.flags & + (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | + HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | + HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | + HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break; + default: return -EINVAL; + } + + if (new_line.loopback != 0 && new_line.loopback != 1) + return -EINVAL; + + info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | + HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | + HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | + HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); + info->params.flags |= flags; + + info->params.loopback = new_line.loopback; + + if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG)) + info->params.clock_speed = new_line.clock_rate; + else + info->params.clock_speed = 0; + + /* if network interface up, reprogram hardware */ + if (info->netcount) { + struct tty_struct *tty = tty_port_tty_get(&info->port); + mgslpc_program_hw(info, tty); + tty_kref_put(tty); + } + return 0; + + default: + return hdlc_ioctl(dev, ifr, cmd); + } +} + +/** + * called by network layer when transmit timeout is detected + * + * dev pointer to network device structure + */ +static void hdlcdev_tx_timeout(struct net_device *dev) +{ + MGSLPC_INFO *info = dev_to_port(dev); + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("hdlcdev_tx_timeout(%s)\n", dev->name); + + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; + + spin_lock_irqsave(&info->lock, flags); + tx_stop(info); + spin_unlock_irqrestore(&info->lock, flags); + + netif_wake_queue(dev); +} + +/** + * called by device driver when transmit completes + * reenable network layer transmit if stopped + * + * info pointer to device instance information + */ +static void hdlcdev_tx_done(MGSLPC_INFO *info) +{ + if (netif_queue_stopped(info->netdev)) + netif_wake_queue(info->netdev); +} + +/** + * called by device driver when frame received + * pass frame to network layer + * + * info pointer to device instance information + * buf pointer to buffer contianing frame data + * size count of data bytes in buf + */ +static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size) +{ + struct sk_buff *skb = dev_alloc_skb(size); + struct net_device *dev = info->netdev; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("hdlcdev_rx(%s)\n", dev->name); + + if (skb == NULL) { + printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name); + dev->stats.rx_dropped++; + return; + } + + memcpy(skb_put(skb, size), buf, size); + + skb->protocol = hdlc_type_trans(skb, dev); + + dev->stats.rx_packets++; + dev->stats.rx_bytes += size; + + netif_rx(skb); +} + +static const struct net_device_ops hdlcdev_ops = { + .ndo_open = hdlcdev_open, + .ndo_stop = hdlcdev_close, + .ndo_change_mtu = hdlc_change_mtu, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_do_ioctl = hdlcdev_ioctl, + .ndo_tx_timeout = hdlcdev_tx_timeout, +}; + +/** + * called by device driver when adding device instance + * do generic HDLC initialization + * + * info pointer to device instance information + * + * returns 0 if success, otherwise error code + */ +static int hdlcdev_init(MGSLPC_INFO *info) +{ + int rc; + struct net_device *dev; + hdlc_device *hdlc; + + /* allocate and initialize network and HDLC layer objects */ + + dev = alloc_hdlcdev(info); + if (dev == NULL) { + printk(KERN_ERR "%s:hdlc device allocation failure\n", __FILE__); + return -ENOMEM; + } + + /* for network layer reporting purposes only */ + dev->base_addr = info->io_base; + dev->irq = info->irq_level; + + /* network layer callbacks and settings */ + dev->netdev_ops = &hdlcdev_ops; + dev->watchdog_timeo = 10 * HZ; + dev->tx_queue_len = 50; + + /* generic HDLC layer callbacks and settings */ + hdlc = dev_to_hdlc(dev); + hdlc->attach = hdlcdev_attach; + hdlc->xmit = hdlcdev_xmit; + + /* register objects with HDLC layer */ + rc = register_hdlc_device(dev); + if (rc) { + printk(KERN_WARNING "%s:unable to register hdlc device\n", __FILE__); + free_netdev(dev); + return rc; + } + + info->netdev = dev; + return 0; +} + +/** + * called by device driver when removing device instance + * do generic HDLC cleanup + * + * info pointer to device instance information + */ +static void hdlcdev_exit(MGSLPC_INFO *info) +{ + unregister_hdlc_device(info->netdev); + free_netdev(info->netdev); + info->netdev = NULL; +} + +#endif /* CONFIG_HDLC */ + diff --git a/kernel/drivers/char/ppdev.c b/kernel/drivers/char/ppdev.c new file mode 100644 index 000000000..ae0b42b66 --- /dev/null +++ b/kernel/drivers/char/ppdev.c @@ -0,0 +1,811 @@ +/* + * linux/drivers/char/ppdev.c + * + * This is the code behind /dev/parport* -- it allows a user-space + * application to use the parport subsystem. + * + * Copyright (C) 1998-2000, 2002 Tim Waugh + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * A /dev/parportx device node represents an arbitrary device + * on port 'x'. The following operations are possible: + * + * open do nothing, set up default IEEE 1284 protocol to be COMPAT + * close release port and unregister device (if necessary) + * ioctl + * EXCL register device exclusively (may fail) + * CLAIM (register device first time) parport_claim_or_block + * RELEASE parport_release + * SETMODE set the IEEE 1284 protocol to use for read/write + * SETPHASE set the IEEE 1284 phase of a particular mode. Not to be + * confused with ioctl(fd, SETPHASER, &stun). ;-) + * DATADIR data_forward / data_reverse + * WDATA write_data + * RDATA read_data + * WCONTROL write_control + * RCONTROL read_control + * FCONTROL frob_control + * RSTATUS read_status + * NEGOT parport_negotiate + * YIELD parport_yield_blocking + * WCTLONIRQ on interrupt, set control lines + * CLRIRQ clear (and return) interrupt count + * SETTIME sets device timeout (struct timeval) + * GETTIME gets device timeout (struct timeval) + * GETMODES gets hardware supported modes (unsigned int) + * GETMODE gets the current IEEE1284 mode + * GETPHASE gets the current IEEE1284 phase + * GETFLAGS gets current (user-visible) flags + * SETFLAGS sets current (user-visible) flags + * read/write read or write in current IEEE 1284 protocol + * select wait for interrupt (in readfds) + * + * Changes: + * Added SETTIME/GETTIME ioctl, Fred Barnes, 1999. + * + * Arnaldo Carvalho de Melo 2000/08/25 + * - On error, copy_from_user and copy_to_user do not return -EFAULT, + * They return the positive number of bytes *not* copied due to address + * space errors. + * + * Added GETMODES/GETMODE/GETPHASE ioctls, Fred Barnes , 03/01/2001. + * Added GETFLAGS/SETFLAGS ioctls, Fred Barnes, 04/2001 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PP_VERSION "ppdev: user-space parallel port driver" +#define CHRDEV "ppdev" + +struct pp_struct { + struct pardevice * pdev; + wait_queue_head_t irq_wait; + atomic_t irqc; + unsigned int flags; + int irqresponse; + unsigned char irqctl; + struct ieee1284_info state; + struct ieee1284_info saved_state; + long default_inactivity; +}; + +/* pp_struct.flags bitfields */ +#define PP_CLAIMED (1<<0) +#define PP_EXCL (1<<1) + +/* Other constants */ +#define PP_INTERRUPT_TIMEOUT (10 * HZ) /* 10s */ +#define PP_BUFFER_SIZE 1024 +#define PARDEVICE_MAX 8 + +/* ROUND_UP macro from fs/select.c */ +#define ROUND_UP(x,y) (((x)+(y)-1)/(y)) + +static DEFINE_MUTEX(pp_do_mutex); +static inline void pp_enable_irq (struct pp_struct *pp) +{ + struct parport *port = pp->pdev->port; + port->ops->enable_irq (port); +} + +static ssize_t pp_read (struct file * file, char __user * buf, size_t count, + loff_t * ppos) +{ + unsigned int minor = iminor(file_inode(file)); + struct pp_struct *pp = file->private_data; + char * kbuffer; + ssize_t bytes_read = 0; + struct parport *pport; + int mode; + + if (!(pp->flags & PP_CLAIMED)) { + /* Don't have the port claimed */ + pr_debug(CHRDEV "%x: claim the port first\n", minor); + return -EINVAL; + } + + /* Trivial case. */ + if (count == 0) + return 0; + + kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL); + if (!kbuffer) { + return -ENOMEM; + } + pport = pp->pdev->port; + mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR); + + parport_set_timeout (pp->pdev, + (file->f_flags & O_NONBLOCK) ? + PARPORT_INACTIVITY_O_NONBLOCK : + pp->default_inactivity); + + while (bytes_read == 0) { + ssize_t need = min_t(unsigned long, count, PP_BUFFER_SIZE); + + if (mode == IEEE1284_MODE_EPP) { + /* various specials for EPP mode */ + int flags = 0; + size_t (*fn)(struct parport *, void *, size_t, int); + + if (pp->flags & PP_W91284PIC) { + flags |= PARPORT_W91284PIC; + } + if (pp->flags & PP_FASTREAD) { + flags |= PARPORT_EPP_FAST; + } + if (pport->ieee1284.mode & IEEE1284_ADDR) { + fn = pport->ops->epp_read_addr; + } else { + fn = pport->ops->epp_read_data; + } + bytes_read = (*fn)(pport, kbuffer, need, flags); + } else { + bytes_read = parport_read (pport, kbuffer, need); + } + + if (bytes_read != 0) + break; + + if (file->f_flags & O_NONBLOCK) { + bytes_read = -EAGAIN; + break; + } + + if (signal_pending (current)) { + bytes_read = -ERESTARTSYS; + break; + } + + cond_resched(); + } + + parport_set_timeout (pp->pdev, pp->default_inactivity); + + if (bytes_read > 0 && copy_to_user (buf, kbuffer, bytes_read)) + bytes_read = -EFAULT; + + kfree (kbuffer); + pp_enable_irq (pp); + return bytes_read; +} + +static ssize_t pp_write (struct file * file, const char __user * buf, + size_t count, loff_t * ppos) +{ + unsigned int minor = iminor(file_inode(file)); + struct pp_struct *pp = file->private_data; + char * kbuffer; + ssize_t bytes_written = 0; + ssize_t wrote; + int mode; + struct parport *pport; + + if (!(pp->flags & PP_CLAIMED)) { + /* Don't have the port claimed */ + pr_debug(CHRDEV "%x: claim the port first\n", minor); + return -EINVAL; + } + + kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL); + if (!kbuffer) { + return -ENOMEM; + } + pport = pp->pdev->port; + mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR); + + parport_set_timeout (pp->pdev, + (file->f_flags & O_NONBLOCK) ? + PARPORT_INACTIVITY_O_NONBLOCK : + pp->default_inactivity); + + while (bytes_written < count) { + ssize_t n = min_t(unsigned long, count - bytes_written, PP_BUFFER_SIZE); + + if (copy_from_user (kbuffer, buf + bytes_written, n)) { + bytes_written = -EFAULT; + break; + } + + if ((pp->flags & PP_FASTWRITE) && (mode == IEEE1284_MODE_EPP)) { + /* do a fast EPP write */ + if (pport->ieee1284.mode & IEEE1284_ADDR) { + wrote = pport->ops->epp_write_addr (pport, + kbuffer, n, PARPORT_EPP_FAST); + } else { + wrote = pport->ops->epp_write_data (pport, + kbuffer, n, PARPORT_EPP_FAST); + } + } else { + wrote = parport_write (pp->pdev->port, kbuffer, n); + } + + if (wrote <= 0) { + if (!bytes_written) { + bytes_written = wrote; + } + break; + } + + bytes_written += wrote; + + if (file->f_flags & O_NONBLOCK) { + if (!bytes_written) + bytes_written = -EAGAIN; + break; + } + + if (signal_pending (current)) + break; + + cond_resched(); + } + + parport_set_timeout (pp->pdev, pp->default_inactivity); + + kfree (kbuffer); + pp_enable_irq (pp); + return bytes_written; +} + +static void pp_irq (void *private) +{ + struct pp_struct *pp = private; + + if (pp->irqresponse) { + parport_write_control (pp->pdev->port, pp->irqctl); + pp->irqresponse = 0; + } + + atomic_inc (&pp->irqc); + wake_up_interruptible (&pp->irq_wait); +} + +static int register_device (int minor, struct pp_struct *pp) +{ + struct parport *port; + struct pardevice * pdev = NULL; + char *name; + int fl; + + name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); + if (name == NULL) + return -ENOMEM; + + port = parport_find_number (minor); + if (!port) { + printk (KERN_WARNING "%s: no associated port!\n", name); + kfree (name); + return -ENXIO; + } + + fl = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; + pdev = parport_register_device (port, name, NULL, + NULL, pp_irq, fl, pp); + parport_put_port (port); + + if (!pdev) { + printk (KERN_WARNING "%s: failed to register device!\n", name); + kfree (name); + return -ENXIO; + } + + pp->pdev = pdev; + pr_debug("%s: registered pardevice\n", name); + return 0; +} + +static enum ieee1284_phase init_phase (int mode) +{ + switch (mode & ~(IEEE1284_DEVICEID + | IEEE1284_ADDR)) { + case IEEE1284_MODE_NIBBLE: + case IEEE1284_MODE_BYTE: + return IEEE1284_PH_REV_IDLE; + } + return IEEE1284_PH_FWD_IDLE; +} + +static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + unsigned int minor = iminor(file_inode(file)); + struct pp_struct *pp = file->private_data; + struct parport * port; + void __user *argp = (void __user *)arg; + + /* First handle the cases that don't take arguments. */ + switch (cmd) { + case PPCLAIM: + { + struct ieee1284_info *info; + int ret; + + if (pp->flags & PP_CLAIMED) { + pr_debug(CHRDEV "%x: you've already got it!\n", minor); + return -EINVAL; + } + + /* Deferred device registration. */ + if (!pp->pdev) { + int err = register_device (minor, pp); + if (err) { + return err; + } + } + + ret = parport_claim_or_block (pp->pdev); + if (ret < 0) + return ret; + + pp->flags |= PP_CLAIMED; + + /* For interrupt-reporting to work, we need to be + * informed of each interrupt. */ + pp_enable_irq (pp); + + /* We may need to fix up the state machine. */ + info = &pp->pdev->port->ieee1284; + pp->saved_state.mode = info->mode; + pp->saved_state.phase = info->phase; + info->mode = pp->state.mode; + info->phase = pp->state.phase; + pp->default_inactivity = parport_set_timeout (pp->pdev, 0); + parport_set_timeout (pp->pdev, pp->default_inactivity); + + return 0; + } + case PPEXCL: + if (pp->pdev) { + pr_debug(CHRDEV "%x: too late for PPEXCL; " + "already registered\n", minor); + if (pp->flags & PP_EXCL) + /* But it's not really an error. */ + return 0; + /* There's no chance of making the driver happy. */ + return -EINVAL; + } + + /* Just remember to register the device exclusively + * when we finally do the registration. */ + pp->flags |= PP_EXCL; + return 0; + case PPSETMODE: + { + int mode; + if (copy_from_user (&mode, argp, sizeof (mode))) + return -EFAULT; + /* FIXME: validate mode */ + pp->state.mode = mode; + pp->state.phase = init_phase (mode); + + if (pp->flags & PP_CLAIMED) { + pp->pdev->port->ieee1284.mode = mode; + pp->pdev->port->ieee1284.phase = pp->state.phase; + } + + return 0; + } + case PPGETMODE: + { + int mode; + + if (pp->flags & PP_CLAIMED) { + mode = pp->pdev->port->ieee1284.mode; + } else { + mode = pp->state.mode; + } + if (copy_to_user (argp, &mode, sizeof (mode))) { + return -EFAULT; + } + return 0; + } + case PPSETPHASE: + { + int phase; + if (copy_from_user (&phase, argp, sizeof (phase))) { + return -EFAULT; + } + /* FIXME: validate phase */ + pp->state.phase = phase; + + if (pp->flags & PP_CLAIMED) { + pp->pdev->port->ieee1284.phase = phase; + } + + return 0; + } + case PPGETPHASE: + { + int phase; + + if (pp->flags & PP_CLAIMED) { + phase = pp->pdev->port->ieee1284.phase; + } else { + phase = pp->state.phase; + } + if (copy_to_user (argp, &phase, sizeof (phase))) { + return -EFAULT; + } + return 0; + } + case PPGETMODES: + { + unsigned int modes; + + port = parport_find_number (minor); + if (!port) + return -ENODEV; + + modes = port->modes; + parport_put_port(port); + if (copy_to_user (argp, &modes, sizeof (modes))) { + return -EFAULT; + } + return 0; + } + case PPSETFLAGS: + { + int uflags; + + if (copy_from_user (&uflags, argp, sizeof (uflags))) { + return -EFAULT; + } + pp->flags &= ~PP_FLAGMASK; + pp->flags |= (uflags & PP_FLAGMASK); + return 0; + } + case PPGETFLAGS: + { + int uflags; + + uflags = pp->flags & PP_FLAGMASK; + if (copy_to_user (argp, &uflags, sizeof (uflags))) { + return -EFAULT; + } + return 0; + } + } /* end switch() */ + + /* Everything else requires the port to be claimed, so check + * that now. */ + if ((pp->flags & PP_CLAIMED) == 0) { + pr_debug(CHRDEV "%x: claim the port first\n", minor); + return -EINVAL; + } + + port = pp->pdev->port; + switch (cmd) { + struct ieee1284_info *info; + unsigned char reg; + unsigned char mask; + int mode; + int ret; + struct timeval par_timeout; + long to_jiffies; + + case PPRSTATUS: + reg = parport_read_status (port); + if (copy_to_user (argp, ®, sizeof (reg))) + return -EFAULT; + return 0; + case PPRDATA: + reg = parport_read_data (port); + if (copy_to_user (argp, ®, sizeof (reg))) + return -EFAULT; + return 0; + case PPRCONTROL: + reg = parport_read_control (port); + if (copy_to_user (argp, ®, sizeof (reg))) + return -EFAULT; + return 0; + case PPYIELD: + parport_yield_blocking (pp->pdev); + return 0; + + case PPRELEASE: + /* Save the state machine's state. */ + info = &pp->pdev->port->ieee1284; + pp->state.mode = info->mode; + pp->state.phase = info->phase; + info->mode = pp->saved_state.mode; + info->phase = pp->saved_state.phase; + parport_release (pp->pdev); + pp->flags &= ~PP_CLAIMED; + return 0; + + case PPWCONTROL: + if (copy_from_user (®, argp, sizeof (reg))) + return -EFAULT; + parport_write_control (port, reg); + return 0; + + case PPWDATA: + if (copy_from_user (®, argp, sizeof (reg))) + return -EFAULT; + parport_write_data (port, reg); + return 0; + + case PPFCONTROL: + if (copy_from_user (&mask, argp, + sizeof (mask))) + return -EFAULT; + if (copy_from_user (®, 1 + (unsigned char __user *) arg, + sizeof (reg))) + return -EFAULT; + parport_frob_control (port, mask, reg); + return 0; + + case PPDATADIR: + if (copy_from_user (&mode, argp, sizeof (mode))) + return -EFAULT; + if (mode) + port->ops->data_reverse (port); + else + port->ops->data_forward (port); + return 0; + + case PPNEGOT: + if (copy_from_user (&mode, argp, sizeof (mode))) + return -EFAULT; + switch ((ret = parport_negotiate (port, mode))) { + case 0: break; + case -1: /* handshake failed, peripheral not IEEE 1284 */ + ret = -EIO; + break; + case 1: /* handshake succeeded, peripheral rejected mode */ + ret = -ENXIO; + break; + } + pp_enable_irq (pp); + return ret; + + case PPWCTLONIRQ: + if (copy_from_user (®, argp, sizeof (reg))) + return -EFAULT; + + /* Remember what to set the control lines to, for next + * time we get an interrupt. */ + pp->irqctl = reg; + pp->irqresponse = 1; + return 0; + + case PPCLRIRQ: + ret = atomic_read (&pp->irqc); + if (copy_to_user (argp, &ret, sizeof (ret))) + return -EFAULT; + atomic_sub (ret, &pp->irqc); + return 0; + + case PPSETTIME: + if (copy_from_user (&par_timeout, argp, sizeof(struct timeval))) { + return -EFAULT; + } + /* Convert to jiffies, place in pp->pdev->timeout */ + if ((par_timeout.tv_sec < 0) || (par_timeout.tv_usec < 0)) { + return -EINVAL; + } + to_jiffies = ROUND_UP(par_timeout.tv_usec, 1000000/HZ); + to_jiffies += par_timeout.tv_sec * (long)HZ; + if (to_jiffies <= 0) { + return -EINVAL; + } + pp->pdev->timeout = to_jiffies; + return 0; + + case PPGETTIME: + to_jiffies = pp->pdev->timeout; + memset(&par_timeout, 0, sizeof(par_timeout)); + par_timeout.tv_sec = to_jiffies / HZ; + par_timeout.tv_usec = (to_jiffies % (long)HZ) * (1000000/HZ); + if (copy_to_user (argp, &par_timeout, sizeof(struct timeval))) + return -EFAULT; + return 0; + + default: + pr_debug(CHRDEV "%x: What? (cmd=0x%x)\n", minor, cmd); + return -EINVAL; + } + + /* Keep the compiler happy */ + return 0; +} + +static long pp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + long ret; + mutex_lock(&pp_do_mutex); + ret = pp_do_ioctl(file, cmd, arg); + mutex_unlock(&pp_do_mutex); + return ret; +} + +static int pp_open (struct inode * inode, struct file * file) +{ + unsigned int minor = iminor(inode); + struct pp_struct *pp; + + if (minor >= PARPORT_MAX) + return -ENXIO; + + pp = kmalloc (sizeof (struct pp_struct), GFP_KERNEL); + if (!pp) + return -ENOMEM; + + pp->state.mode = IEEE1284_MODE_COMPAT; + pp->state.phase = init_phase (pp->state.mode); + pp->flags = 0; + pp->irqresponse = 0; + atomic_set (&pp->irqc, 0); + init_waitqueue_head (&pp->irq_wait); + + /* Defer the actual device registration until the first claim. + * That way, we know whether or not the driver wants to have + * exclusive access to the port (PPEXCL). + */ + pp->pdev = NULL; + file->private_data = pp; + + return 0; +} + +static int pp_release (struct inode * inode, struct file * file) +{ + unsigned int minor = iminor(inode); + struct pp_struct *pp = file->private_data; + int compat_negot; + + compat_negot = 0; + if (!(pp->flags & PP_CLAIMED) && pp->pdev && + (pp->state.mode != IEEE1284_MODE_COMPAT)) { + struct ieee1284_info *info; + + /* parport released, but not in compatibility mode */ + parport_claim_or_block (pp->pdev); + pp->flags |= PP_CLAIMED; + info = &pp->pdev->port->ieee1284; + pp->saved_state.mode = info->mode; + pp->saved_state.phase = info->phase; + info->mode = pp->state.mode; + info->phase = pp->state.phase; + compat_negot = 1; + } else if ((pp->flags & PP_CLAIMED) && pp->pdev && + (pp->pdev->port->ieee1284.mode != IEEE1284_MODE_COMPAT)) { + compat_negot = 2; + } + if (compat_negot) { + parport_negotiate (pp->pdev->port, IEEE1284_MODE_COMPAT); + pr_debug(CHRDEV "%x: negotiated back to compatibility " + "mode because user-space forgot\n", minor); + } + + if (pp->flags & PP_CLAIMED) { + struct ieee1284_info *info; + + info = &pp->pdev->port->ieee1284; + pp->state.mode = info->mode; + pp->state.phase = info->phase; + info->mode = pp->saved_state.mode; + info->phase = pp->saved_state.phase; + parport_release (pp->pdev); + if (compat_negot != 1) { + pr_debug(CHRDEV "%x: released pardevice " + "because user-space forgot\n", minor); + } + } + + if (pp->pdev) { + const char *name = pp->pdev->name; + parport_unregister_device (pp->pdev); + kfree (name); + pp->pdev = NULL; + pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); + } + + kfree (pp); + + return 0; +} + +/* No kernel lock held - fine */ +static unsigned int pp_poll (struct file * file, poll_table * wait) +{ + struct pp_struct *pp = file->private_data; + unsigned int mask = 0; + + poll_wait (file, &pp->irq_wait, wait); + if (atomic_read (&pp->irqc)) + mask |= POLLIN | POLLRDNORM; + + return mask; +} + +static struct class *ppdev_class; + +static const struct file_operations pp_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .read = pp_read, + .write = pp_write, + .poll = pp_poll, + .unlocked_ioctl = pp_ioctl, + .open = pp_open, + .release = pp_release, +}; + +static void pp_attach(struct parport *port) +{ + device_create(ppdev_class, port->dev, MKDEV(PP_MAJOR, port->number), + NULL, "parport%d", port->number); +} + +static void pp_detach(struct parport *port) +{ + device_destroy(ppdev_class, MKDEV(PP_MAJOR, port->number)); +} + +static struct parport_driver pp_driver = { + .name = CHRDEV, + .attach = pp_attach, + .detach = pp_detach, +}; + +static int __init ppdev_init (void) +{ + int err = 0; + + if (register_chrdev (PP_MAJOR, CHRDEV, &pp_fops)) { + printk (KERN_WARNING CHRDEV ": unable to get major %d\n", + PP_MAJOR); + return -EIO; + } + ppdev_class = class_create(THIS_MODULE, CHRDEV); + if (IS_ERR(ppdev_class)) { + err = PTR_ERR(ppdev_class); + goto out_chrdev; + } + err = parport_register_driver(&pp_driver); + if (err < 0) { + printk (KERN_WARNING CHRDEV ": unable to register with parport\n"); + goto out_class; + } + + printk (KERN_INFO PP_VERSION "\n"); + goto out; + +out_class: + class_destroy(ppdev_class); +out_chrdev: + unregister_chrdev(PP_MAJOR, CHRDEV); +out: + return err; +} + +static void __exit ppdev_cleanup (void) +{ + /* Clean up all parport stuff */ + parport_unregister_driver(&pp_driver); + class_destroy(ppdev_class); + unregister_chrdev (PP_MAJOR, CHRDEV); +} + +module_init(ppdev_init); +module_exit(ppdev_cleanup); + +MODULE_LICENSE("GPL"); +MODULE_ALIAS_CHARDEV_MAJOR(PP_MAJOR); diff --git a/kernel/drivers/char/ps3flash.c b/kernel/drivers/char/ps3flash.c new file mode 100644 index 000000000..0b311fa27 --- /dev/null +++ b/kernel/drivers/char/ps3flash.c @@ -0,0 +1,458 @@ +/* + * PS3 FLASH ROM Storage Driver + * + * Copyright (C) 2007 Sony Computer Entertainment Inc. + * Copyright 2007 Sony Corp. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published + * by the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include +#include +#include +#include +#include + +#include +#include + + +#define DEVICE_NAME "ps3flash" + +#define FLASH_BLOCK_SIZE (256*1024) + + +struct ps3flash_private { + struct mutex mutex; /* Bounce buffer mutex */ + u64 chunk_sectors; + int tag; /* Start sector of buffer, -1 if invalid */ + bool dirty; +}; + +static struct ps3_storage_device *ps3flash_dev; + +static int ps3flash_read_write_sectors(struct ps3_storage_device *dev, + u64 start_sector, int write) +{ + struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd); + u64 res = ps3stor_read_write_sectors(dev, dev->bounce_lpar, + start_sector, priv->chunk_sectors, + write); + if (res) { + dev_err(&dev->sbd.core, "%s:%u: %s failed 0x%llx\n", __func__, + __LINE__, write ? "write" : "read", res); + return -EIO; + } + return 0; +} + +static int ps3flash_writeback(struct ps3_storage_device *dev) +{ + struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd); + int res; + + if (!priv->dirty || priv->tag < 0) + return 0; + + res = ps3flash_read_write_sectors(dev, priv->tag, 1); + if (res) + return res; + + priv->dirty = false; + return 0; +} + +static int ps3flash_fetch(struct ps3_storage_device *dev, u64 start_sector) +{ + struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd); + int res; + + if (start_sector == priv->tag) + return 0; + + res = ps3flash_writeback(dev); + if (res) + return res; + + priv->tag = -1; + + res = ps3flash_read_write_sectors(dev, start_sector, 0); + if (res) + return res; + + priv->tag = start_sector; + return 0; +} + +static loff_t ps3flash_llseek(struct file *file, loff_t offset, int origin) +{ + struct ps3_storage_device *dev = ps3flash_dev; + return generic_file_llseek_size(file, offset, origin, MAX_LFS_FILESIZE, + dev->regions[dev->region_idx].size*dev->blk_size); +} + +static ssize_t ps3flash_read(char __user *userbuf, void *kernelbuf, + size_t count, loff_t *pos) +{ + struct ps3_storage_device *dev = ps3flash_dev; + struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd); + u64 size, sector, offset; + int res; + size_t remaining, n; + const void *src; + + dev_dbg(&dev->sbd.core, + "%s:%u: Reading %zu bytes at position %lld to U0x%p/K0x%p\n", + __func__, __LINE__, count, *pos, userbuf, kernelbuf); + + size = dev->regions[dev->region_idx].size*dev->blk_size; + if (*pos >= size || !count) + return 0; + + if (*pos + count > size) { + dev_dbg(&dev->sbd.core, + "%s:%u Truncating count from %zu to %llu\n", __func__, + __LINE__, count, size - *pos); + count = size - *pos; + } + + sector = *pos / dev->bounce_size * priv->chunk_sectors; + offset = *pos % dev->bounce_size; + + remaining = count; + do { + n = min_t(u64, remaining, dev->bounce_size - offset); + src = dev->bounce_buf + offset; + + mutex_lock(&priv->mutex); + + res = ps3flash_fetch(dev, sector); + if (res) + goto fail; + + dev_dbg(&dev->sbd.core, + "%s:%u: copy %lu bytes from 0x%p to U0x%p/K0x%p\n", + __func__, __LINE__, n, src, userbuf, kernelbuf); + if (userbuf) { + if (copy_to_user(userbuf, src, n)) { + res = -EFAULT; + goto fail; + } + userbuf += n; + } + if (kernelbuf) { + memcpy(kernelbuf, src, n); + kernelbuf += n; + } + + mutex_unlock(&priv->mutex); + + *pos += n; + remaining -= n; + sector += priv->chunk_sectors; + offset = 0; + } while (remaining > 0); + + return count; + +fail: + mutex_unlock(&priv->mutex); + return res; +} + +static ssize_t ps3flash_write(const char __user *userbuf, + const void *kernelbuf, size_t count, loff_t *pos) +{ + struct ps3_storage_device *dev = ps3flash_dev; + struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd); + u64 size, sector, offset; + int res = 0; + size_t remaining, n; + void *dst; + + dev_dbg(&dev->sbd.core, + "%s:%u: Writing %zu bytes at position %lld from U0x%p/K0x%p\n", + __func__, __LINE__, count, *pos, userbuf, kernelbuf); + + size = dev->regions[dev->region_idx].size*dev->blk_size; + if (*pos >= size || !count) + return 0; + + if (*pos + count > size) { + dev_dbg(&dev->sbd.core, + "%s:%u Truncating count from %zu to %llu\n", __func__, + __LINE__, count, size - *pos); + count = size - *pos; + } + + sector = *pos / dev->bounce_size * priv->chunk_sectors; + offset = *pos % dev->bounce_size; + + remaining = count; + do { + n = min_t(u64, remaining, dev->bounce_size - offset); + dst = dev->bounce_buf + offset; + + mutex_lock(&priv->mutex); + + if (n != dev->bounce_size) + res = ps3flash_fetch(dev, sector); + else if (sector != priv->tag) + res = ps3flash_writeback(dev); + if (res) + goto fail; + + dev_dbg(&dev->sbd.core, + "%s:%u: copy %lu bytes from U0x%p/K0x%p to 0x%p\n", + __func__, __LINE__, n, userbuf, kernelbuf, dst); + if (userbuf) { + if (copy_from_user(dst, userbuf, n)) { + res = -EFAULT; + goto fail; + } + userbuf += n; + } + if (kernelbuf) { + memcpy(dst, kernelbuf, n); + kernelbuf += n; + } + + priv->tag = sector; + priv->dirty = true; + + mutex_unlock(&priv->mutex); + + *pos += n; + remaining -= n; + sector += priv->chunk_sectors; + offset = 0; + } while (remaining > 0); + + return count; + +fail: + mutex_unlock(&priv->mutex); + return res; +} + +static ssize_t ps3flash_user_read(struct file *file, char __user *buf, + size_t count, loff_t *pos) +{ + return ps3flash_read(buf, NULL, count, pos); +} + +static ssize_t ps3flash_user_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + return ps3flash_write(buf, NULL, count, pos); +} + +static ssize_t ps3flash_kernel_read(void *buf, size_t count, loff_t pos) +{ + return ps3flash_read(NULL, buf, count, &pos); +} + +static ssize_t ps3flash_kernel_write(const void *buf, size_t count, + loff_t pos) +{ + ssize_t res; + int wb; + + res = ps3flash_write(NULL, buf, count, &pos); + if (res < 0) + return res; + + /* Make kernel writes synchronous */ + wb = ps3flash_writeback(ps3flash_dev); + if (wb) + return wb; + + return res; +} + +static int ps3flash_flush(struct file *file, fl_owner_t id) +{ + return ps3flash_writeback(ps3flash_dev); +} + +static int ps3flash_fsync(struct file *file, loff_t start, loff_t end, int datasync) +{ + struct inode *inode = file_inode(file); + int err; + mutex_lock(&inode->i_mutex); + err = ps3flash_writeback(ps3flash_dev); + mutex_unlock(&inode->i_mutex); + return err; +} + +static irqreturn_t ps3flash_interrupt(int irq, void *data) +{ + struct ps3_storage_device *dev = data; + int res; + u64 tag, status; + + res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status); + + if (tag != dev->tag) + dev_err(&dev->sbd.core, + "%s:%u: tag mismatch, got %llx, expected %llx\n", + __func__, __LINE__, tag, dev->tag); + + if (res) { + dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%llx\n", + __func__, __LINE__, res, status); + } else { + dev->lv1_status = status; + complete(&dev->done); + } + return IRQ_HANDLED; +} + +static const struct file_operations ps3flash_fops = { + .owner = THIS_MODULE, + .llseek = ps3flash_llseek, + .read = ps3flash_user_read, + .write = ps3flash_user_write, + .flush = ps3flash_flush, + .fsync = ps3flash_fsync, +}; + +static const struct ps3_os_area_flash_ops ps3flash_kernel_ops = { + .read = ps3flash_kernel_read, + .write = ps3flash_kernel_write, +}; + +static struct miscdevice ps3flash_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = DEVICE_NAME, + .fops = &ps3flash_fops, +}; + +static int ps3flash_probe(struct ps3_system_bus_device *_dev) +{ + struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core); + struct ps3flash_private *priv; + int error; + unsigned long tmp; + + tmp = dev->regions[dev->region_idx].start*dev->blk_size; + if (tmp % FLASH_BLOCK_SIZE) { + dev_err(&dev->sbd.core, + "%s:%u region start %lu is not aligned\n", __func__, + __LINE__, tmp); + return -EINVAL; + } + tmp = dev->regions[dev->region_idx].size*dev->blk_size; + if (tmp % FLASH_BLOCK_SIZE) { + dev_err(&dev->sbd.core, + "%s:%u region size %lu is not aligned\n", __func__, + __LINE__, tmp); + return -EINVAL; + } + + /* use static buffer, kmalloc cannot allocate 256 KiB */ + if (!ps3flash_bounce_buffer.address) + return -ENODEV; + + if (ps3flash_dev) { + dev_err(&dev->sbd.core, + "Only one FLASH device is supported\n"); + return -EBUSY; + } + + ps3flash_dev = dev; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + error = -ENOMEM; + goto fail; + } + + ps3_system_bus_set_drvdata(&dev->sbd, priv); + mutex_init(&priv->mutex); + priv->tag = -1; + + dev->bounce_size = ps3flash_bounce_buffer.size; + dev->bounce_buf = ps3flash_bounce_buffer.address; + priv->chunk_sectors = dev->bounce_size / dev->blk_size; + + error = ps3stor_setup(dev, ps3flash_interrupt); + if (error) + goto fail_free_priv; + + ps3flash_misc.parent = &dev->sbd.core; + error = misc_register(&ps3flash_misc); + if (error) { + dev_err(&dev->sbd.core, "%s:%u: misc_register failed %d\n", + __func__, __LINE__, error); + goto fail_teardown; + } + + dev_info(&dev->sbd.core, "%s:%u: registered misc device %d\n", + __func__, __LINE__, ps3flash_misc.minor); + + ps3_os_area_flash_register(&ps3flash_kernel_ops); + return 0; + +fail_teardown: + ps3stor_teardown(dev); +fail_free_priv: + kfree(priv); + ps3_system_bus_set_drvdata(&dev->sbd, NULL); +fail: + ps3flash_dev = NULL; + return error; +} + +static int ps3flash_remove(struct ps3_system_bus_device *_dev) +{ + struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core); + + ps3_os_area_flash_register(NULL); + misc_deregister(&ps3flash_misc); + ps3stor_teardown(dev); + kfree(ps3_system_bus_get_drvdata(&dev->sbd)); + ps3_system_bus_set_drvdata(&dev->sbd, NULL); + ps3flash_dev = NULL; + return 0; +} + + +static struct ps3_system_bus_driver ps3flash = { + .match_id = PS3_MATCH_ID_STOR_FLASH, + .core.name = DEVICE_NAME, + .core.owner = THIS_MODULE, + .probe = ps3flash_probe, + .remove = ps3flash_remove, + .shutdown = ps3flash_remove, +}; + + +static int __init ps3flash_init(void) +{ + return ps3_system_bus_driver_register(&ps3flash); +} + +static void __exit ps3flash_exit(void) +{ + ps3_system_bus_driver_unregister(&ps3flash); +} + +module_init(ps3flash_init); +module_exit(ps3flash_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("PS3 FLASH ROM Storage Driver"); +MODULE_AUTHOR("Sony Corporation"); +MODULE_ALIAS(PS3_MODULE_ALIAS_STOR_FLASH); diff --git a/kernel/drivers/char/random.c b/kernel/drivers/char/random.c new file mode 100644 index 000000000..eb47efec2 --- /dev/null +++ b/kernel/drivers/char/random.c @@ -0,0 +1,1776 @@ +/* + * random.c -- A strong random number generator + * + * Copyright Matt Mackall , 2003, 2004, 2005 + * + * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All + * rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, and the entire permission notice in its entirety, + * including the disclaimer of warranties. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * ALTERNATIVELY, this product may be distributed under the terms of + * the GNU General Public License, in which case the provisions of the GPL are + * required INSTEAD OF the above restrictions. (This clause is + * necessary due to a potential bad interaction between the GPL and + * the restrictions contained in a BSD-style copyright.) + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF + * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + */ + +/* + * (now, with legal B.S. out of the way.....) + * + * This routine gathers environmental noise from device drivers, etc., + * and returns good random numbers, suitable for cryptographic use. + * Besides the obvious cryptographic uses, these numbers are also good + * for seeding TCP sequence numbers, and other places where it is + * desirable to have numbers which are not only random, but hard to + * predict by an attacker. + * + * Theory of operation + * =================== + * + * Computers are very predictable devices. Hence it is extremely hard + * to produce truly random numbers on a computer --- as opposed to + * pseudo-random numbers, which can easily generated by using a + * algorithm. Unfortunately, it is very easy for attackers to guess + * the sequence of pseudo-random number generators, and for some + * applications this is not acceptable. So instead, we must try to + * gather "environmental noise" from the computer's environment, which + * must be hard for outside attackers to observe, and use that to + * generate random numbers. In a Unix environment, this is best done + * from inside the kernel. + * + * Sources of randomness from the environment include inter-keyboard + * timings, inter-interrupt timings from some interrupts, and other + * events which are both (a) non-deterministic and (b) hard for an + * outside observer to measure. Randomness from these sources are + * added to an "entropy pool", which is mixed using a CRC-like function. + * This is not cryptographically strong, but it is adequate assuming + * the randomness is not chosen maliciously, and it is fast enough that + * the overhead of doing it on every interrupt is very reasonable. + * As random bytes are mixed into the entropy pool, the routines keep + * an *estimate* of how many bits of randomness have been stored into + * the random number generator's internal state. + * + * When random bytes are desired, they are obtained by taking the SHA + * hash of the contents of the "entropy pool". The SHA hash avoids + * exposing the internal state of the entropy pool. It is believed to + * be computationally infeasible to derive any useful information + * about the input of SHA from its output. Even if it is possible to + * analyze SHA in some clever way, as long as the amount of data + * returned from the generator is less than the inherent entropy in + * the pool, the output data is totally unpredictable. For this + * reason, the routine decreases its internal estimate of how many + * bits of "true randomness" are contained in the entropy pool as it + * outputs random numbers. + * + * If this estimate goes to zero, the routine can still generate + * random numbers; however, an attacker may (at least in theory) be + * able to infer the future output of the generator from prior + * outputs. This requires successful cryptanalysis of SHA, which is + * not believed to be feasible, but there is a remote possibility. + * Nonetheless, these numbers should be useful for the vast majority + * of purposes. + * + * Exported interfaces ---- output + * =============================== + * + * There are three exported interfaces; the first is one designed to + * be used from within the kernel: + * + * void get_random_bytes(void *buf, int nbytes); + * + * This interface will return the requested number of random bytes, + * and place it in the requested buffer. + * + * The two other interfaces are two character devices /dev/random and + * /dev/urandom. /dev/random is suitable for use when very high + * quality randomness is desired (for example, for key generation or + * one-time pads), as it will only return a maximum of the number of + * bits of randomness (as estimated by the random number generator) + * contained in the entropy pool. + * + * The /dev/urandom device does not have this limit, and will return + * as many bytes as are requested. As more and more random bytes are + * requested without giving time for the entropy pool to recharge, + * this will result in random numbers that are merely cryptographically + * strong. For many applications, however, this is acceptable. + * + * Exported interfaces ---- input + * ============================== + * + * The current exported interfaces for gathering environmental noise + * from the devices are: + * + * void add_device_randomness(const void *buf, unsigned int size); + * void add_input_randomness(unsigned int type, unsigned int code, + * unsigned int value); + * void add_interrupt_randomness(int irq, int irq_flags); + * void add_disk_randomness(struct gendisk *disk); + * + * add_device_randomness() is for adding data to the random pool that + * is likely to differ between two devices (or possibly even per boot). + * This would be things like MAC addresses or serial numbers, or the + * read-out of the RTC. This does *not* add any actual entropy to the + * pool, but it initializes the pool to different values for devices + * that might otherwise be identical and have very little entropy + * available to them (particularly common in the embedded world). + * + * add_input_randomness() uses the input layer interrupt timing, as well as + * the event type information from the hardware. + * + * add_interrupt_randomness() uses the interrupt timing as random + * inputs to the entropy pool. Using the cycle counters and the irq source + * as inputs, it feeds the randomness roughly once a second. + * + * add_disk_randomness() uses what amounts to the seek time of block + * layer request events, on a per-disk_devt basis, as input to the + * entropy pool. Note that high-speed solid state drives with very low + * seek times do not make for good sources of entropy, as their seek + * times are usually fairly consistent. + * + * All of these routines try to estimate how many bits of randomness a + * particular randomness source. They do this by keeping track of the + * first and second order deltas of the event timings. + * + * Ensuring unpredictability at system startup + * ============================================ + * + * When any operating system starts up, it will go through a sequence + * of actions that are fairly predictable by an adversary, especially + * if the start-up does not involve interaction with a human operator. + * This reduces the actual number of bits of unpredictability in the + * entropy pool below the value in entropy_count. In order to + * counteract this effect, it helps to carry information in the + * entropy pool across shut-downs and start-ups. To do this, put the + * following lines an appropriate script which is run during the boot + * sequence: + * + * echo "Initializing random number generator..." + * random_seed=/var/run/random-seed + * # Carry a random seed from start-up to start-up + * # Load and then save the whole entropy pool + * if [ -f $random_seed ]; then + * cat $random_seed >/dev/urandom + * else + * touch $random_seed + * fi + * chmod 600 $random_seed + * dd if=/dev/urandom of=$random_seed count=1 bs=512 + * + * and the following lines in an appropriate script which is run as + * the system is shutdown: + * + * # Carry a random seed from shut-down to start-up + * # Save the whole entropy pool + * echo "Saving random seed..." + * random_seed=/var/run/random-seed + * touch $random_seed + * chmod 600 $random_seed + * dd if=/dev/urandom of=$random_seed count=1 bs=512 + * + * For example, on most modern systems using the System V init + * scripts, such code fragments would be found in + * /etc/rc.d/init.d/random. On older Linux systems, the correct script + * location might be in /etc/rcb.d/rc.local or /etc/rc.d/rc.0. + * + * Effectively, these commands cause the contents of the entropy pool + * to be saved at shut-down time and reloaded into the entropy pool at + * start-up. (The 'dd' in the addition to the bootup script is to + * make sure that /etc/random-seed is different for every start-up, + * even if the system crashes without executing rc.0.) Even with + * complete knowledge of the start-up activities, predicting the state + * of the entropy pool requires knowledge of the previous history of + * the system. + * + * Configuring the /dev/random driver under Linux + * ============================================== + * + * The /dev/random driver under Linux uses minor numbers 8 and 9 of + * the /dev/mem major number (#1). So if your system does not have + * /dev/random and /dev/urandom created already, they can be created + * by using the commands: + * + * mknod /dev/random c 1 8 + * mknod /dev/urandom c 1 9 + * + * Acknowledgements: + * ================= + * + * Ideas for constructing this random number generator were derived + * from Pretty Good Privacy's random number generator, and from private + * discussions with Phil Karn. Colin Plumb provided a faster random + * number generator, which speed up the mixing function of the entropy + * pool, taken from PGPfone. Dale Worley has also contributed many + * useful ideas and suggestions to improve this driver. + * + * Any flaws in the design are solely my responsibility, and should + * not be attributed to the Phil, Colin, or any of authors of PGP. + * + * Further background information on this topic may be obtained from + * RFC 1750, "Randomness Recommendations for Security", by Donald + * Eastlake, Steve Crocker, and Jeff Schiller. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include + +/* #define ADD_INTERRUPT_BENCH */ + +/* + * Configuration information + */ +#define INPUT_POOL_SHIFT 12 +#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5)) +#define OUTPUT_POOL_SHIFT 10 +#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5)) +#define SEC_XFER_SIZE 512 +#define EXTRACT_SIZE 10 + +#define DEBUG_RANDOM_BOOT 0 + +#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long)) + +/* + * To allow fractional bits to be tracked, the entropy_count field is + * denominated in units of 1/8th bits. + * + * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in + * credit_entropy_bits() needs to be 64 bits wide. + */ +#define ENTROPY_SHIFT 3 +#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT) + +/* + * The minimum number of bits of entropy before we wake up a read on + * /dev/random. Should be enough to do a significant reseed. + */ +static int random_read_wakeup_bits = 64; + +/* + * If the entropy count falls under this number of bits, then we + * should wake up processes which are selecting or polling on write + * access to /dev/random. + */ +static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS; + +/* + * The minimum number of seconds between urandom pool reseeding. We + * do this to limit the amount of entropy that can be drained from the + * input pool even if there are heavy demands on /dev/urandom. + */ +static int random_min_urandom_seed = 60; + +/* + * Originally, we used a primitive polynomial of degree .poolwords + * over GF(2). The taps for various sizes are defined below. They + * were chosen to be evenly spaced except for the last tap, which is 1 + * to get the twisting happening as fast as possible. + * + * For the purposes of better mixing, we use the CRC-32 polynomial as + * well to make a (modified) twisted Generalized Feedback Shift + * Register. (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR + * generators. ACM Transactions on Modeling and Computer Simulation + * 2(3):179-194. Also see M. Matsumoto & Y. Kurita, 1994. Twisted + * GFSR generators II. ACM Transactions on Modeling and Computer + * Simulation 4:254-266) + * + * Thanks to Colin Plumb for suggesting this. + * + * The mixing operation is much less sensitive than the output hash, + * where we use SHA-1. All that we want of mixing operation is that + * it be a good non-cryptographic hash; i.e. it not produce collisions + * when fed "random" data of the sort we expect to see. As long as + * the pool state differs for different inputs, we have preserved the + * input entropy and done a good job. The fact that an intelligent + * attacker can construct inputs that will produce controlled + * alterations to the pool's state is not important because we don't + * consider such inputs to contribute any randomness. The only + * property we need with respect to them is that the attacker can't + * increase his/her knowledge of the pool's state. Since all + * additions are reversible (knowing the final state and the input, + * you can reconstruct the initial state), if an attacker has any + * uncertainty about the initial state, he/she can only shuffle that + * uncertainty about, but never cause any collisions (which would + * decrease the uncertainty). + * + * Our mixing functions were analyzed by Lacharme, Roeck, Strubel, and + * Videau in their paper, "The Linux Pseudorandom Number Generator + * Revisited" (see: http://eprint.iacr.org/2012/251.pdf). In their + * paper, they point out that we are not using a true Twisted GFSR, + * since Matsumoto & Kurita used a trinomial feedback polynomial (that + * is, with only three taps, instead of the six that we are using). + * As a result, the resulting polynomial is neither primitive nor + * irreducible, and hence does not have a maximal period over + * GF(2**32). They suggest a slight change to the generator + * polynomial which improves the resulting TGFSR polynomial to be + * irreducible, which we have made here. + */ +static struct poolinfo { + int poolbitshift, poolwords, poolbytes, poolbits, poolfracbits; +#define S(x) ilog2(x)+5, (x), (x)*4, (x)*32, (x) << (ENTROPY_SHIFT+5) + int tap1, tap2, tap3, tap4, tap5; +} poolinfo_table[] = { + /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */ + /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */ + { S(128), 104, 76, 51, 25, 1 }, + /* was: x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 */ + /* x^32 + x^26 + x^19 + x^14 + x^7 + x + 1 */ + { S(32), 26, 19, 14, 7, 1 }, +#if 0 + /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */ + { S(2048), 1638, 1231, 819, 411, 1 }, + + /* x^1024 + x^817 + x^615 + x^412 + x^204 + x + 1 -- 290 */ + { S(1024), 817, 615, 412, 204, 1 }, + + /* x^1024 + x^819 + x^616 + x^410 + x^207 + x^2 + 1 -- 115 */ + { S(1024), 819, 616, 410, 207, 2 }, + + /* x^512 + x^411 + x^308 + x^208 + x^104 + x + 1 -- 225 */ + { S(512), 411, 308, 208, 104, 1 }, + + /* x^512 + x^409 + x^307 + x^206 + x^102 + x^2 + 1 -- 95 */ + { S(512), 409, 307, 206, 102, 2 }, + /* x^512 + x^409 + x^309 + x^205 + x^103 + x^2 + 1 -- 95 */ + { S(512), 409, 309, 205, 103, 2 }, + + /* x^256 + x^205 + x^155 + x^101 + x^52 + x + 1 -- 125 */ + { S(256), 205, 155, 101, 52, 1 }, + + /* x^128 + x^103 + x^78 + x^51 + x^27 + x^2 + 1 -- 70 */ + { S(128), 103, 78, 51, 27, 2 }, + + /* x^64 + x^52 + x^39 + x^26 + x^14 + x + 1 -- 15 */ + { S(64), 52, 39, 26, 14, 1 }, +#endif +}; + +/* + * Static global variables + */ +static DECLARE_WAIT_QUEUE_HEAD(random_read_wait); +static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); +static DECLARE_WAIT_QUEUE_HEAD(urandom_init_wait); +static struct fasync_struct *fasync; + +/********************************************************************** + * + * OS independent entropy store. Here are the functions which handle + * storing entropy in an entropy pool. + * + **********************************************************************/ + +struct entropy_store; +struct entropy_store { + /* read-only data: */ + const struct poolinfo *poolinfo; + __u32 *pool; + const char *name; + struct entropy_store *pull; + struct work_struct push_work; + + /* read-write data: */ + unsigned long last_pulled; + spinlock_t lock; + unsigned short add_ptr; + unsigned short input_rotate; + int entropy_count; + int entropy_total; + unsigned int initialized:1; + unsigned int limit:1; + unsigned int last_data_init:1; + __u8 last_data[EXTRACT_SIZE]; +}; + +static void push_to_pool(struct work_struct *work); +static __u32 input_pool_data[INPUT_POOL_WORDS]; +static __u32 blocking_pool_data[OUTPUT_POOL_WORDS]; +static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS]; + +static struct entropy_store input_pool = { + .poolinfo = &poolinfo_table[0], + .name = "input", + .limit = 1, + .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock), + .pool = input_pool_data +}; + +static struct entropy_store blocking_pool = { + .poolinfo = &poolinfo_table[1], + .name = "blocking", + .limit = 1, + .pull = &input_pool, + .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock), + .pool = blocking_pool_data, + .push_work = __WORK_INITIALIZER(blocking_pool.push_work, + push_to_pool), +}; + +static struct entropy_store nonblocking_pool = { + .poolinfo = &poolinfo_table[1], + .name = "nonblocking", + .pull = &input_pool, + .lock = __SPIN_LOCK_UNLOCKED(nonblocking_pool.lock), + .pool = nonblocking_pool_data, + .push_work = __WORK_INITIALIZER(nonblocking_pool.push_work, + push_to_pool), +}; + +static __u32 const twist_table[8] = { + 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158, + 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 }; + +/* + * This function adds bytes into the entropy "pool". It does not + * update the entropy estimate. The caller should call + * credit_entropy_bits if this is appropriate. + * + * The pool is stirred with a primitive polynomial of the appropriate + * degree, and then twisted. We twist by three bits at a time because + * it's cheap to do so and helps slightly in the expected case where + * the entropy is concentrated in the low-order bits. + */ +static void _mix_pool_bytes(struct entropy_store *r, const void *in, + int nbytes) +{ + unsigned long i, tap1, tap2, tap3, tap4, tap5; + int input_rotate; + int wordmask = r->poolinfo->poolwords - 1; + const char *bytes = in; + __u32 w; + + tap1 = r->poolinfo->tap1; + tap2 = r->poolinfo->tap2; + tap3 = r->poolinfo->tap3; + tap4 = r->poolinfo->tap4; + tap5 = r->poolinfo->tap5; + + input_rotate = r->input_rotate; + i = r->add_ptr; + + /* mix one byte at a time to simplify size handling and churn faster */ + while (nbytes--) { + w = rol32(*bytes++, input_rotate); + i = (i - 1) & wordmask; + + /* XOR in the various taps */ + w ^= r->pool[i]; + w ^= r->pool[(i + tap1) & wordmask]; + w ^= r->pool[(i + tap2) & wordmask]; + w ^= r->pool[(i + tap3) & wordmask]; + w ^= r->pool[(i + tap4) & wordmask]; + w ^= r->pool[(i + tap5) & wordmask]; + + /* Mix the result back in with a twist */ + r->pool[i] = (w >> 3) ^ twist_table[w & 7]; + + /* + * Normally, we add 7 bits of rotation to the pool. + * At the beginning of the pool, add an extra 7 bits + * rotation, so that successive passes spread the + * input bits across the pool evenly. + */ + input_rotate = (input_rotate + (i ? 7 : 14)) & 31; + } + + r->input_rotate = input_rotate; + r->add_ptr = i; +} + +static void __mix_pool_bytes(struct entropy_store *r, const void *in, + int nbytes) +{ + trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_); + _mix_pool_bytes(r, in, nbytes); +} + +static void mix_pool_bytes(struct entropy_store *r, const void *in, + int nbytes) +{ + unsigned long flags; + + trace_mix_pool_bytes(r->name, nbytes, _RET_IP_); + spin_lock_irqsave(&r->lock, flags); + _mix_pool_bytes(r, in, nbytes); + spin_unlock_irqrestore(&r->lock, flags); +} + +struct fast_pool { + __u32 pool[4]; + unsigned long last; + unsigned short reg_idx; + unsigned char count; +}; + +/* + * This is a fast mixing routine used by the interrupt randomness + * collector. It's hardcoded for an 128 bit pool and assumes that any + * locks that might be needed are taken by the caller. + */ +static void fast_mix(struct fast_pool *f) +{ + __u32 a = f->pool[0], b = f->pool[1]; + __u32 c = f->pool[2], d = f->pool[3]; + + a += b; c += d; + b = rol32(b, 6); d = rol32(d, 27); + d ^= a; b ^= c; + + a += b; c += d; + b = rol32(b, 16); d = rol32(d, 14); + d ^= a; b ^= c; + + a += b; c += d; + b = rol32(b, 6); d = rol32(d, 27); + d ^= a; b ^= c; + + a += b; c += d; + b = rol32(b, 16); d = rol32(d, 14); + d ^= a; b ^= c; + + f->pool[0] = a; f->pool[1] = b; + f->pool[2] = c; f->pool[3] = d; + f->count++; +} + +/* + * Credit (or debit) the entropy store with n bits of entropy. + * Use credit_entropy_bits_safe() if the value comes from userspace + * or otherwise should be checked for extreme values. + */ +static void credit_entropy_bits(struct entropy_store *r, int nbits) +{ + int entropy_count, orig; + const int pool_size = r->poolinfo->poolfracbits; + int nfrac = nbits << ENTROPY_SHIFT; + + if (!nbits) + return; + +retry: + entropy_count = orig = ACCESS_ONCE(r->entropy_count); + if (nfrac < 0) { + /* Debit */ + entropy_count += nfrac; + } else { + /* + * Credit: we have to account for the possibility of + * overwriting already present entropy. Even in the + * ideal case of pure Shannon entropy, new contributions + * approach the full value asymptotically: + * + * entropy <- entropy + (pool_size - entropy) * + * (1 - exp(-add_entropy/pool_size)) + * + * For add_entropy <= pool_size/2 then + * (1 - exp(-add_entropy/pool_size)) >= + * (add_entropy/pool_size)*0.7869... + * so we can approximate the exponential with + * 3/4*add_entropy/pool_size and still be on the + * safe side by adding at most pool_size/2 at a time. + * + * The use of pool_size-2 in the while statement is to + * prevent rounding artifacts from making the loop + * arbitrarily long; this limits the loop to log2(pool_size)*2 + * turns no matter how large nbits is. + */ + int pnfrac = nfrac; + const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2; + /* The +2 corresponds to the /4 in the denominator */ + + do { + unsigned int anfrac = min(pnfrac, pool_size/2); + unsigned int add = + ((pool_size - entropy_count)*anfrac*3) >> s; + + entropy_count += add; + pnfrac -= anfrac; + } while (unlikely(entropy_count < pool_size-2 && pnfrac)); + } + + if (unlikely(entropy_count < 0)) { + pr_warn("random: negative entropy/overflow: pool %s count %d\n", + r->name, entropy_count); + WARN_ON(1); + entropy_count = 0; + } else if (entropy_count > pool_size) + entropy_count = pool_size; + if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) + goto retry; + + r->entropy_total += nbits; + if (!r->initialized && r->entropy_total > 128) { + r->initialized = 1; + r->entropy_total = 0; + if (r == &nonblocking_pool) { + prandom_reseed_late(); + wake_up_interruptible(&urandom_init_wait); + pr_notice("random: %s pool is initialized\n", r->name); + } + } + + trace_credit_entropy_bits(r->name, nbits, + entropy_count >> ENTROPY_SHIFT, + r->entropy_total, _RET_IP_); + + if (r == &input_pool) { + int entropy_bits = entropy_count >> ENTROPY_SHIFT; + + /* should we wake readers? */ + if (entropy_bits >= random_read_wakeup_bits) { + wake_up_interruptible(&random_read_wait); + kill_fasync(&fasync, SIGIO, POLL_IN); + } + /* If the input pool is getting full, send some + * entropy to the two output pools, flipping back and + * forth between them, until the output pools are 75% + * full. + */ + if (entropy_bits > random_write_wakeup_bits && + r->initialized && + r->entropy_total >= 2*random_read_wakeup_bits) { + static struct entropy_store *last = &blocking_pool; + struct entropy_store *other = &blocking_pool; + + if (last == &blocking_pool) + other = &nonblocking_pool; + if (other->entropy_count <= + 3 * other->poolinfo->poolfracbits / 4) + last = other; + if (last->entropy_count <= + 3 * last->poolinfo->poolfracbits / 4) { + schedule_work(&last->push_work); + r->entropy_total = 0; + } + } + } +} + +static void credit_entropy_bits_safe(struct entropy_store *r, int nbits) +{ + const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1)); + + /* Cap the value to avoid overflows */ + nbits = min(nbits, nbits_max); + nbits = max(nbits, -nbits_max); + + credit_entropy_bits(r, nbits); +} + +/********************************************************************* + * + * Entropy input management + * + *********************************************************************/ + +/* There is one of these per entropy source */ +struct timer_rand_state { + cycles_t last_time; + long last_delta, last_delta2; + unsigned dont_count_entropy:1; +}; + +#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, }; + +/* + * Add device- or boot-specific data to the input and nonblocking + * pools to help initialize them to unique values. + * + * None of this adds any entropy, it is meant to avoid the + * problem of the nonblocking pool having similar initial state + * across largely identical devices. + */ +void add_device_randomness(const void *buf, unsigned int size) +{ + unsigned long time = random_get_entropy() ^ jiffies; + unsigned long flags; + + trace_add_device_randomness(size, _RET_IP_); + spin_lock_irqsave(&input_pool.lock, flags); + _mix_pool_bytes(&input_pool, buf, size); + _mix_pool_bytes(&input_pool, &time, sizeof(time)); + spin_unlock_irqrestore(&input_pool.lock, flags); + + spin_lock_irqsave(&nonblocking_pool.lock, flags); + _mix_pool_bytes(&nonblocking_pool, buf, size); + _mix_pool_bytes(&nonblocking_pool, &time, sizeof(time)); + spin_unlock_irqrestore(&nonblocking_pool.lock, flags); +} +EXPORT_SYMBOL(add_device_randomness); + +static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE; + +/* + * This function adds entropy to the entropy "pool" by using timing + * delays. It uses the timer_rand_state structure to make an estimate + * of how many bits of entropy this call has added to the pool. + * + * The number "num" is also added to the pool - it should somehow describe + * the type of event which just happened. This is currently 0-255 for + * keyboard scan codes, and 256 upwards for interrupts. + * + */ +static void add_timer_randomness(struct timer_rand_state *state, unsigned num) +{ + struct entropy_store *r; + struct { + long jiffies; + unsigned cycles; + unsigned num; + } sample; + long delta, delta2, delta3; + + sample.jiffies = jiffies; + sample.cycles = random_get_entropy(); + sample.num = num; + r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool; + mix_pool_bytes(r, &sample, sizeof(sample)); + + /* + * Calculate number of bits of randomness we probably added. + * We take into account the first, second and third-order deltas + * in order to make our estimate. + */ + + if (!state->dont_count_entropy) { + delta = sample.jiffies - state->last_time; + state->last_time = sample.jiffies; + + delta2 = delta - state->last_delta; + state->last_delta = delta; + + delta3 = delta2 - state->last_delta2; + state->last_delta2 = delta2; + + if (delta < 0) + delta = -delta; + if (delta2 < 0) + delta2 = -delta2; + if (delta3 < 0) + delta3 = -delta3; + if (delta > delta2) + delta = delta2; + if (delta > delta3) + delta = delta3; + + /* + * delta is now minimum absolute delta. + * Round down by 1 bit on general principles, + * and limit entropy entimate to 12 bits. + */ + credit_entropy_bits(r, min_t(int, fls(delta>>1), 11)); + } +} + +void add_input_randomness(unsigned int type, unsigned int code, + unsigned int value) +{ + static unsigned char last_value; + + /* ignore autorepeat and the like */ + if (value == last_value) + return; + + last_value = value; + add_timer_randomness(&input_timer_state, + (type << 4) ^ code ^ (code >> 4) ^ value); + trace_add_input_randomness(ENTROPY_BITS(&input_pool)); +} +EXPORT_SYMBOL_GPL(add_input_randomness); + +static DEFINE_PER_CPU(struct fast_pool, irq_randomness); + +#ifdef ADD_INTERRUPT_BENCH +static unsigned long avg_cycles, avg_deviation; + +#define AVG_SHIFT 8 /* Exponential average factor k=1/256 */ +#define FIXED_1_2 (1 << (AVG_SHIFT-1)) + +static void add_interrupt_bench(cycles_t start) +{ + long delta = random_get_entropy() - start; + + /* Use a weighted moving average */ + delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT); + avg_cycles += delta; + /* And average deviation */ + delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT); + avg_deviation += delta; +} +#else +#define add_interrupt_bench(x) +#endif + +static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) +{ + __u32 *ptr = (__u32 *) regs; + + if (regs == NULL) + return 0; + if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32)) + f->reg_idx = 0; + return *(ptr + f->reg_idx++); +} + +void add_interrupt_randomness(int irq, int irq_flags, __u64 ip) +{ + struct entropy_store *r; + struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); + unsigned long now = jiffies; + cycles_t cycles = random_get_entropy(); + __u32 c_high, j_high; + unsigned long seed; + int credit = 0; + + if (cycles == 0) + cycles = get_reg(fast_pool, NULL); + c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0; + j_high = (sizeof(now) > 4) ? now >> 32 : 0; + fast_pool->pool[0] ^= cycles ^ j_high ^ irq; + fast_pool->pool[1] ^= now ^ c_high; + if (!ip) + ip = _RET_IP_; + fast_pool->pool[2] ^= ip; + fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 : + get_reg(fast_pool, NULL); + + fast_mix(fast_pool); + add_interrupt_bench(cycles); + + if ((fast_pool->count < 64) && + !time_after(now, fast_pool->last + HZ)) + return; + + r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool; + if (!spin_trylock(&r->lock)) + return; + + fast_pool->last = now; + __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool)); + + /* + * If we have architectural seed generator, produce a seed and + * add it to the pool. For the sake of paranoia don't let the + * architectural seed generator dominate the input from the + * interrupt noise. + */ + if (arch_get_random_seed_long(&seed)) { + __mix_pool_bytes(r, &seed, sizeof(seed)); + credit = 1; + } + spin_unlock(&r->lock); + + fast_pool->count = 0; + + /* award one bit for the contents of the fast pool */ + credit_entropy_bits(r, credit + 1); +} + +#ifdef CONFIG_BLOCK +void add_disk_randomness(struct gendisk *disk) +{ + if (!disk || !disk->random) + return; + /* first major is 1, so we get >= 0x200 here */ + add_timer_randomness(disk->random, 0x100 + disk_devt(disk)); + trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool)); +} +EXPORT_SYMBOL_GPL(add_disk_randomness); +#endif + +/********************************************************************* + * + * Entropy extraction routines + * + *********************************************************************/ + +static ssize_t extract_entropy(struct entropy_store *r, void *buf, + size_t nbytes, int min, int rsvd); + +/* + * This utility inline function is responsible for transferring entropy + * from the primary pool to the secondary extraction pool. We make + * sure we pull enough for a 'catastrophic reseed'. + */ +static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes); +static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes) +{ + if (!r->pull || + r->entropy_count >= (nbytes << (ENTROPY_SHIFT + 3)) || + r->entropy_count > r->poolinfo->poolfracbits) + return; + + if (r->limit == 0 && random_min_urandom_seed) { + unsigned long now = jiffies; + + if (time_before(now, + r->last_pulled + random_min_urandom_seed * HZ)) + return; + r->last_pulled = now; + } + + _xfer_secondary_pool(r, nbytes); +} + +static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes) +{ + __u32 tmp[OUTPUT_POOL_WORDS]; + + /* For /dev/random's pool, always leave two wakeups' worth */ + int rsvd_bytes = r->limit ? 0 : random_read_wakeup_bits / 4; + int bytes = nbytes; + + /* pull at least as much as a wakeup */ + bytes = max_t(int, bytes, random_read_wakeup_bits / 8); + /* but never more than the buffer size */ + bytes = min_t(int, bytes, sizeof(tmp)); + + trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8, + ENTROPY_BITS(r), ENTROPY_BITS(r->pull)); + bytes = extract_entropy(r->pull, tmp, bytes, + random_read_wakeup_bits / 8, rsvd_bytes); + mix_pool_bytes(r, tmp, bytes); + credit_entropy_bits(r, bytes*8); +} + +/* + * Used as a workqueue function so that when the input pool is getting + * full, we can "spill over" some entropy to the output pools. That + * way the output pools can store some of the excess entropy instead + * of letting it go to waste. + */ +static void push_to_pool(struct work_struct *work) +{ + struct entropy_store *r = container_of(work, struct entropy_store, + push_work); + BUG_ON(!r); + _xfer_secondary_pool(r, random_read_wakeup_bits/8); + trace_push_to_pool(r->name, r->entropy_count >> ENTROPY_SHIFT, + r->pull->entropy_count >> ENTROPY_SHIFT); +} + +/* + * This function decides how many bytes to actually take from the + * given pool, and also debits the entropy count accordingly. + */ +static size_t account(struct entropy_store *r, size_t nbytes, int min, + int reserved) +{ + int entropy_count, orig; + size_t ibytes, nfrac; + + BUG_ON(r->entropy_count > r->poolinfo->poolfracbits); + + /* Can we pull enough? */ +retry: + entropy_count = orig = ACCESS_ONCE(r->entropy_count); + ibytes = nbytes; + /* If limited, never pull more than available */ + if (r->limit) { + int have_bytes = entropy_count >> (ENTROPY_SHIFT + 3); + + if ((have_bytes -= reserved) < 0) + have_bytes = 0; + ibytes = min_t(size_t, ibytes, have_bytes); + } + if (ibytes < min) + ibytes = 0; + + if (unlikely(entropy_count < 0)) { + pr_warn("random: negative entropy count: pool %s count %d\n", + r->name, entropy_count); + WARN_ON(1); + entropy_count = 0; + } + nfrac = ibytes << (ENTROPY_SHIFT + 3); + if ((size_t) entropy_count > nfrac) + entropy_count -= nfrac; + else + entropy_count = 0; + + if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) + goto retry; + + trace_debit_entropy(r->name, 8 * ibytes); + if (ibytes && + (r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_bits) { + wake_up_interruptible(&random_write_wait); + kill_fasync(&fasync, SIGIO, POLL_OUT); + } + + return ibytes; +} + +/* + * This function does the actual extraction for extract_entropy and + * extract_entropy_user. + * + * Note: we assume that .poolwords is a multiple of 16 words. + */ +static void extract_buf(struct entropy_store *r, __u8 *out) +{ + int i; + union { + __u32 w[5]; + unsigned long l[LONGS(20)]; + } hash; + __u32 workspace[SHA_WORKSPACE_WORDS]; + unsigned long flags; + + /* + * If we have an architectural hardware random number + * generator, use it for SHA's initial vector + */ + sha_init(hash.w); + for (i = 0; i < LONGS(20); i++) { + unsigned long v; + if (!arch_get_random_long(&v)) + break; + hash.l[i] = v; + } + + /* Generate a hash across the pool, 16 words (512 bits) at a time */ + spin_lock_irqsave(&r->lock, flags); + for (i = 0; i < r->poolinfo->poolwords; i += 16) + sha_transform(hash.w, (__u8 *)(r->pool + i), workspace); + + /* + * We mix the hash back into the pool to prevent backtracking + * attacks (where the attacker knows the state of the pool + * plus the current outputs, and attempts to find previous + * ouputs), unless the hash function can be inverted. By + * mixing at least a SHA1 worth of hash data back, we make + * brute-forcing the feedback as hard as brute-forcing the + * hash. + */ + __mix_pool_bytes(r, hash.w, sizeof(hash.w)); + spin_unlock_irqrestore(&r->lock, flags); + + memzero_explicit(workspace, sizeof(workspace)); + + /* + * In case the hash function has some recognizable output + * pattern, we fold it in half. Thus, we always feed back + * twice as much data as we output. + */ + hash.w[0] ^= hash.w[3]; + hash.w[1] ^= hash.w[4]; + hash.w[2] ^= rol32(hash.w[2], 16); + + memcpy(out, &hash, EXTRACT_SIZE); + memzero_explicit(&hash, sizeof(hash)); +} + +/* + * This function extracts randomness from the "entropy pool", and + * returns it in a buffer. + * + * The min parameter specifies the minimum amount we can pull before + * failing to avoid races that defeat catastrophic reseeding while the + * reserved parameter indicates how much entropy we must leave in the + * pool after each pull to avoid starving other readers. + */ +static ssize_t extract_entropy(struct entropy_store *r, void *buf, + size_t nbytes, int min, int reserved) +{ + ssize_t ret = 0, i; + __u8 tmp[EXTRACT_SIZE]; + unsigned long flags; + + /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */ + if (fips_enabled) { + spin_lock_irqsave(&r->lock, flags); + if (!r->last_data_init) { + r->last_data_init = 1; + spin_unlock_irqrestore(&r->lock, flags); + trace_extract_entropy(r->name, EXTRACT_SIZE, + ENTROPY_BITS(r), _RET_IP_); + xfer_secondary_pool(r, EXTRACT_SIZE); + extract_buf(r, tmp); + spin_lock_irqsave(&r->lock, flags); + memcpy(r->last_data, tmp, EXTRACT_SIZE); + } + spin_unlock_irqrestore(&r->lock, flags); + } + + trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_); + xfer_secondary_pool(r, nbytes); + nbytes = account(r, nbytes, min, reserved); + + while (nbytes) { + extract_buf(r, tmp); + + if (fips_enabled) { + spin_lock_irqsave(&r->lock, flags); + if (!memcmp(tmp, r->last_data, EXTRACT_SIZE)) + panic("Hardware RNG duplicated output!\n"); + memcpy(r->last_data, tmp, EXTRACT_SIZE); + spin_unlock_irqrestore(&r->lock, flags); + } + i = min_t(int, nbytes, EXTRACT_SIZE); + memcpy(buf, tmp, i); + nbytes -= i; + buf += i; + ret += i; + } + + /* Wipe data just returned from memory */ + memzero_explicit(tmp, sizeof(tmp)); + + return ret; +} + +/* + * This function extracts randomness from the "entropy pool", and + * returns it in a userspace buffer. + */ +static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf, + size_t nbytes) +{ + ssize_t ret = 0, i; + __u8 tmp[EXTRACT_SIZE]; + int large_request = (nbytes > 256); + + trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_); + xfer_secondary_pool(r, nbytes); + nbytes = account(r, nbytes, 0, 0); + + while (nbytes) { + if (large_request && need_resched()) { + if (signal_pending(current)) { + if (ret == 0) + ret = -ERESTARTSYS; + break; + } + schedule(); + } + + extract_buf(r, tmp); + i = min_t(int, nbytes, EXTRACT_SIZE); + if (copy_to_user(buf, tmp, i)) { + ret = -EFAULT; + break; + } + + nbytes -= i; + buf += i; + ret += i; + } + + /* Wipe data just returned from memory */ + memzero_explicit(tmp, sizeof(tmp)); + + return ret; +} + +/* + * This function is the exported kernel interface. It returns some + * number of good random numbers, suitable for key generation, seeding + * TCP sequence numbers, etc. It does not rely on the hardware random + * number generator. For random bytes direct from the hardware RNG + * (when available), use get_random_bytes_arch(). + */ +void get_random_bytes(void *buf, int nbytes) +{ +#if DEBUG_RANDOM_BOOT > 0 + if (unlikely(nonblocking_pool.initialized == 0)) + printk(KERN_NOTICE "random: %pF get_random_bytes called " + "with %d bits of entropy available\n", + (void *) _RET_IP_, + nonblocking_pool.entropy_total); +#endif + trace_get_random_bytes(nbytes, _RET_IP_); + extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0); +} +EXPORT_SYMBOL(get_random_bytes); + +/* + * This function will use the architecture-specific hardware random + * number generator if it is available. The arch-specific hw RNG will + * almost certainly be faster than what we can do in software, but it + * is impossible to verify that it is implemented securely (as + * opposed, to, say, the AES encryption of a sequence number using a + * key known by the NSA). So it's useful if we need the speed, but + * only if we're willing to trust the hardware manufacturer not to + * have put in a back door. + */ +void get_random_bytes_arch(void *buf, int nbytes) +{ + char *p = buf; + + trace_get_random_bytes_arch(nbytes, _RET_IP_); + while (nbytes) { + unsigned long v; + int chunk = min(nbytes, (int)sizeof(unsigned long)); + + if (!arch_get_random_long(&v)) + break; + + memcpy(p, &v, chunk); + p += chunk; + nbytes -= chunk; + } + + if (nbytes) + extract_entropy(&nonblocking_pool, p, nbytes, 0, 0); +} +EXPORT_SYMBOL(get_random_bytes_arch); + + +/* + * init_std_data - initialize pool with system data + * + * @r: pool to initialize + * + * This function clears the pool's entropy count and mixes some system + * data into the pool to prepare it for use. The pool is not cleared + * as that can only decrease the entropy in the pool. + */ +static void init_std_data(struct entropy_store *r) +{ + int i; + ktime_t now = ktime_get_real(); + unsigned long rv; + + r->last_pulled = jiffies; + mix_pool_bytes(r, &now, sizeof(now)); + for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) { + if (!arch_get_random_seed_long(&rv) && + !arch_get_random_long(&rv)) + rv = random_get_entropy(); + mix_pool_bytes(r, &rv, sizeof(rv)); + } + mix_pool_bytes(r, utsname(), sizeof(*(utsname()))); +} + +/* + * Note that setup_arch() may call add_device_randomness() + * long before we get here. This allows seeding of the pools + * with some platform dependent data very early in the boot + * process. But it limits our options here. We must use + * statically allocated structures that already have all + * initializations complete at compile time. We should also + * take care not to overwrite the precious per platform data + * we were given. + */ +static int rand_initialize(void) +{ + init_std_data(&input_pool); + init_std_data(&blocking_pool); + init_std_data(&nonblocking_pool); + return 0; +} +early_initcall(rand_initialize); + +#ifdef CONFIG_BLOCK +void rand_initialize_disk(struct gendisk *disk) +{ + struct timer_rand_state *state; + + /* + * If kzalloc returns null, we just won't use that entropy + * source. + */ + state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL); + if (state) { + state->last_time = INITIAL_JIFFIES; + disk->random = state; + } +} +#endif + +static ssize_t +_random_read(int nonblock, char __user *buf, size_t nbytes) +{ + ssize_t n; + + if (nbytes == 0) + return 0; + + nbytes = min_t(size_t, nbytes, SEC_XFER_SIZE); + while (1) { + n = extract_entropy_user(&blocking_pool, buf, nbytes); + if (n < 0) + return n; + trace_random_read(n*8, (nbytes-n)*8, + ENTROPY_BITS(&blocking_pool), + ENTROPY_BITS(&input_pool)); + if (n > 0) + return n; + + /* Pool is (near) empty. Maybe wait and retry. */ + if (nonblock) + return -EAGAIN; + + wait_event_interruptible(random_read_wait, + ENTROPY_BITS(&input_pool) >= + random_read_wakeup_bits); + if (signal_pending(current)) + return -ERESTARTSYS; + } +} + +static ssize_t +random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) +{ + return _random_read(file->f_flags & O_NONBLOCK, buf, nbytes); +} + +static ssize_t +urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) +{ + int ret; + + if (unlikely(nonblocking_pool.initialized == 0)) + printk_once(KERN_NOTICE "random: %s urandom read " + "with %d bits of entropy available\n", + current->comm, nonblocking_pool.entropy_total); + + nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3)); + ret = extract_entropy_user(&nonblocking_pool, buf, nbytes); + + trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool), + ENTROPY_BITS(&input_pool)); + return ret; +} + +static unsigned int +random_poll(struct file *file, poll_table * wait) +{ + unsigned int mask; + + poll_wait(file, &random_read_wait, wait); + poll_wait(file, &random_write_wait, wait); + mask = 0; + if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits) + mask |= POLLIN | POLLRDNORM; + if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits) + mask |= POLLOUT | POLLWRNORM; + return mask; +} + +static int +write_pool(struct entropy_store *r, const char __user *buffer, size_t count) +{ + size_t bytes; + __u32 buf[16]; + const char __user *p = buffer; + + while (count > 0) { + bytes = min(count, sizeof(buf)); + if (copy_from_user(&buf, p, bytes)) + return -EFAULT; + + count -= bytes; + p += bytes; + + mix_pool_bytes(r, buf, bytes); + cond_resched(); + } + + return 0; +} + +static ssize_t random_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) +{ + size_t ret; + + ret = write_pool(&blocking_pool, buffer, count); + if (ret) + return ret; + ret = write_pool(&nonblocking_pool, buffer, count); + if (ret) + return ret; + + return (ssize_t)count; +} + +static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) +{ + int size, ent_count; + int __user *p = (int __user *)arg; + int retval; + + switch (cmd) { + case RNDGETENTCNT: + /* inherently racy, no point locking */ + ent_count = ENTROPY_BITS(&input_pool); + if (put_user(ent_count, p)) + return -EFAULT; + return 0; + case RNDADDTOENTCNT: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (get_user(ent_count, p)) + return -EFAULT; + credit_entropy_bits_safe(&input_pool, ent_count); + return 0; + case RNDADDENTROPY: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (get_user(ent_count, p++)) + return -EFAULT; + if (ent_count < 0) + return -EINVAL; + if (get_user(size, p++)) + return -EFAULT; + retval = write_pool(&input_pool, (const char __user *)p, + size); + if (retval < 0) + return retval; + credit_entropy_bits_safe(&input_pool, ent_count); + return 0; + case RNDZAPENTCNT: + case RNDCLEARPOOL: + /* + * Clear the entropy pool counters. We no longer clear + * the entropy pool, as that's silly. + */ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + input_pool.entropy_count = 0; + nonblocking_pool.entropy_count = 0; + blocking_pool.entropy_count = 0; + return 0; + default: + return -EINVAL; + } +} + +static int random_fasync(int fd, struct file *filp, int on) +{ + return fasync_helper(fd, filp, on, &fasync); +} + +const struct file_operations random_fops = { + .read = random_read, + .write = random_write, + .poll = random_poll, + .unlocked_ioctl = random_ioctl, + .fasync = random_fasync, + .llseek = noop_llseek, +}; + +const struct file_operations urandom_fops = { + .read = urandom_read, + .write = random_write, + .unlocked_ioctl = random_ioctl, + .fasync = random_fasync, + .llseek = noop_llseek, +}; + +SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, + unsigned int, flags) +{ + if (flags & ~(GRND_NONBLOCK|GRND_RANDOM)) + return -EINVAL; + + if (count > INT_MAX) + count = INT_MAX; + + if (flags & GRND_RANDOM) + return _random_read(flags & GRND_NONBLOCK, buf, count); + + if (unlikely(nonblocking_pool.initialized == 0)) { + if (flags & GRND_NONBLOCK) + return -EAGAIN; + wait_event_interruptible(urandom_init_wait, + nonblocking_pool.initialized); + if (signal_pending(current)) + return -ERESTARTSYS; + } + return urandom_read(NULL, buf, count, NULL); +} + +/*************************************************************** + * Random UUID interface + * + * Used here for a Boot ID, but can be useful for other kernel + * drivers. + ***************************************************************/ + +/* + * Generate random UUID + */ +void generate_random_uuid(unsigned char uuid_out[16]) +{ + get_random_bytes(uuid_out, 16); + /* Set UUID version to 4 --- truly random generation */ + uuid_out[6] = (uuid_out[6] & 0x0F) | 0x40; + /* Set the UUID variant to DCE */ + uuid_out[8] = (uuid_out[8] & 0x3F) | 0x80; +} +EXPORT_SYMBOL(generate_random_uuid); + +/******************************************************************** + * + * Sysctl interface + * + ********************************************************************/ + +#ifdef CONFIG_SYSCTL + +#include + +static int min_read_thresh = 8, min_write_thresh; +static int max_read_thresh = OUTPUT_POOL_WORDS * 32; +static int max_write_thresh = INPUT_POOL_WORDS * 32; +static char sysctl_bootid[16]; + +/* + * This function is used to return both the bootid UUID, and random + * UUID. The difference is in whether table->data is NULL; if it is, + * then a new UUID is generated and returned to the user. + * + * If the user accesses this via the proc interface, the UUID will be + * returned as an ASCII string in the standard UUID format; if via the + * sysctl system call, as 16 bytes of binary data. + */ +static int proc_do_uuid(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct ctl_table fake_table; + unsigned char buf[64], tmp_uuid[16], *uuid; + + uuid = table->data; + if (!uuid) { + uuid = tmp_uuid; + generate_random_uuid(uuid); + } else { + static DEFINE_SPINLOCK(bootid_spinlock); + + spin_lock(&bootid_spinlock); + if (!uuid[8]) + generate_random_uuid(uuid); + spin_unlock(&bootid_spinlock); + } + + sprintf(buf, "%pU", uuid); + + fake_table.data = buf; + fake_table.maxlen = sizeof(buf); + + return proc_dostring(&fake_table, write, buffer, lenp, ppos); +} + +/* + * Return entropy available scaled to integral bits + */ +static int proc_do_entropy(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct ctl_table fake_table; + int entropy_count; + + entropy_count = *(int *)table->data >> ENTROPY_SHIFT; + + fake_table.data = &entropy_count; + fake_table.maxlen = sizeof(entropy_count); + + return proc_dointvec(&fake_table, write, buffer, lenp, ppos); +} + +static int sysctl_poolsize = INPUT_POOL_WORDS * 32; +extern struct ctl_table random_table[]; +struct ctl_table random_table[] = { + { + .procname = "poolsize", + .data = &sysctl_poolsize, + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = proc_dointvec, + }, + { + .procname = "entropy_avail", + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = proc_do_entropy, + .data = &input_pool.entropy_count, + }, + { + .procname = "read_wakeup_threshold", + .data = &random_read_wakeup_bits, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &min_read_thresh, + .extra2 = &max_read_thresh, + }, + { + .procname = "write_wakeup_threshold", + .data = &random_write_wakeup_bits, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &min_write_thresh, + .extra2 = &max_write_thresh, + }, + { + .procname = "urandom_min_reseed_secs", + .data = &random_min_urandom_seed, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "boot_id", + .data = &sysctl_bootid, + .maxlen = 16, + .mode = 0444, + .proc_handler = proc_do_uuid, + }, + { + .procname = "uuid", + .maxlen = 16, + .mode = 0444, + .proc_handler = proc_do_uuid, + }, +#ifdef ADD_INTERRUPT_BENCH + { + .procname = "add_interrupt_avg_cycles", + .data = &avg_cycles, + .maxlen = sizeof(avg_cycles), + .mode = 0444, + .proc_handler = proc_doulongvec_minmax, + }, + { + .procname = "add_interrupt_avg_deviation", + .data = &avg_deviation, + .maxlen = sizeof(avg_deviation), + .mode = 0444, + .proc_handler = proc_doulongvec_minmax, + }, +#endif + { } +}; +#endif /* CONFIG_SYSCTL */ + +static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned; + +int random_int_secret_init(void) +{ + get_random_bytes(random_int_secret, sizeof(random_int_secret)); + return 0; +} + +/* + * Get a random word for internal kernel use only. Similar to urandom but + * with the goal of minimal entropy pool depletion. As a result, the random + * value is not cryptographically secure but for several uses the cost of + * depleting entropy is too high + */ +static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash); +unsigned int get_random_int(void) +{ + __u32 *hash; + unsigned int ret; + + if (arch_get_random_int(&ret)) + return ret; + + hash = get_cpu_var(get_random_int_hash); + + hash[0] += current->pid + jiffies + random_get_entropy(); + md5_transform(hash, random_int_secret); + ret = hash[0]; + put_cpu_var(get_random_int_hash); + + return ret; +} +EXPORT_SYMBOL(get_random_int); + +/* + * randomize_range() returns a start address such that + * + * [...... .....] + * start end + * + * a with size "len" starting at the return value is inside in the + * area defined by [start, end], but is otherwise randomized. + */ +unsigned long +randomize_range(unsigned long start, unsigned long end, unsigned long len) +{ + unsigned long range = end - len - start; + + if (end <= start + len) + return 0; + return PAGE_ALIGN(get_random_int() % range + start); +} + +/* Interface for in-kernel drivers of true hardware RNGs. + * Those devices may produce endless random bits and will be throttled + * when our pool is full. + */ +void add_hwgenerator_randomness(const char *buffer, size_t count, + size_t entropy) +{ + struct entropy_store *poolp = &input_pool; + + /* Suspend writing if we're above the trickle threshold. + * We'll be woken up again once below random_write_wakeup_thresh, + * or when the calling thread is about to terminate. + */ + wait_event_interruptible(random_write_wait, kthread_should_stop() || + ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits); + mix_pool_bytes(poolp, buffer, count); + credit_entropy_bits(poolp, entropy); +} +EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); diff --git a/kernel/drivers/char/raw.c b/kernel/drivers/char/raw.c new file mode 100644 index 000000000..5fc291c61 --- /dev/null +++ b/kernel/drivers/char/raw.c @@ -0,0 +1,369 @@ +/* + * linux/drivers/char/raw.c + * + * Front-end raw character devices. These can be bound to any block + * devices to provide genuine Unix raw character device semantics. + * + * We reserve minor number 0 for a control interface. ioctl()s on this + * device are used to bind the other minor numbers to block devices. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +struct raw_device_data { + struct block_device *binding; + int inuse; +}; + +static struct class *raw_class; +static struct raw_device_data *raw_devices; +static DEFINE_MUTEX(raw_mutex); +static const struct file_operations raw_ctl_fops; /* forward declaration */ + +static int max_raw_minors = MAX_RAW_MINORS; + +module_param(max_raw_minors, int, 0); +MODULE_PARM_DESC(max_raw_minors, "Maximum number of raw devices (1-65536)"); + +/* + * Open/close code for raw IO. + * + * We just rewrite the i_mapping for the /dev/raw/rawN file descriptor to + * point at the blockdev's address_space and set the file handle to use + * O_DIRECT. + * + * Set the device's soft blocksize to the minimum possible. This gives the + * finest possible alignment and has no adverse impact on performance. + */ +static int raw_open(struct inode *inode, struct file *filp) +{ + const int minor = iminor(inode); + struct block_device *bdev; + int err; + + if (minor == 0) { /* It is the control device */ + filp->f_op = &raw_ctl_fops; + return 0; + } + + mutex_lock(&raw_mutex); + + /* + * All we need to do on open is check that the device is bound. + */ + bdev = raw_devices[minor].binding; + err = -ENODEV; + if (!bdev) + goto out; + igrab(bdev->bd_inode); + err = blkdev_get(bdev, filp->f_mode | FMODE_EXCL, raw_open); + if (err) + goto out; + err = set_blocksize(bdev, bdev_logical_block_size(bdev)); + if (err) + goto out1; + filp->f_flags |= O_DIRECT; + filp->f_mapping = bdev->bd_inode->i_mapping; + if (++raw_devices[minor].inuse == 1) + file_inode(filp)->i_mapping = + bdev->bd_inode->i_mapping; + filp->private_data = bdev; + mutex_unlock(&raw_mutex); + return 0; + +out1: + blkdev_put(bdev, filp->f_mode | FMODE_EXCL); +out: + mutex_unlock(&raw_mutex); + return err; +} + +/* + * When the final fd which refers to this character-special node is closed, we + * make its ->mapping point back at its own i_data. + */ +static int raw_release(struct inode *inode, struct file *filp) +{ + const int minor= iminor(inode); + struct block_device *bdev; + + mutex_lock(&raw_mutex); + bdev = raw_devices[minor].binding; + if (--raw_devices[minor].inuse == 0) + /* Here inode->i_mapping == bdev->bd_inode->i_mapping */ + inode->i_mapping = &inode->i_data; + mutex_unlock(&raw_mutex); + + blkdev_put(bdev, filp->f_mode | FMODE_EXCL); + return 0; +} + +/* + * Forward ioctls to the underlying block device. + */ +static long +raw_ioctl(struct file *filp, unsigned int command, unsigned long arg) +{ + struct block_device *bdev = filp->private_data; + return blkdev_ioctl(bdev, 0, command, arg); +} + +static int bind_set(int number, u64 major, u64 minor) +{ + dev_t dev = MKDEV(major, minor); + struct raw_device_data *rawdev; + int err = 0; + + if (number <= 0 || number >= max_raw_minors) + return -EINVAL; + + if (MAJOR(dev) != major || MINOR(dev) != minor) + return -EINVAL; + + rawdev = &raw_devices[number]; + + /* + * This is like making block devices, so demand the + * same capability + */ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + /* + * For now, we don't need to check that the underlying + * block device is present or not: we can do that when + * the raw device is opened. Just check that the + * major/minor numbers make sense. + */ + + if (MAJOR(dev) == 0 && dev != 0) + return -EINVAL; + + mutex_lock(&raw_mutex); + if (rawdev->inuse) { + mutex_unlock(&raw_mutex); + return -EBUSY; + } + if (rawdev->binding) { + bdput(rawdev->binding); + module_put(THIS_MODULE); + } + if (!dev) { + /* unbind */ + rawdev->binding = NULL; + device_destroy(raw_class, MKDEV(RAW_MAJOR, number)); + } else { + rawdev->binding = bdget(dev); + if (rawdev->binding == NULL) { + err = -ENOMEM; + } else { + dev_t raw = MKDEV(RAW_MAJOR, number); + __module_get(THIS_MODULE); + device_destroy(raw_class, raw); + device_create(raw_class, NULL, raw, NULL, + "raw%d", number); + } + } + mutex_unlock(&raw_mutex); + return err; +} + +static int bind_get(int number, dev_t *dev) +{ + struct raw_device_data *rawdev; + struct block_device *bdev; + + if (number <= 0 || number >= max_raw_minors) + return -EINVAL; + + rawdev = &raw_devices[number]; + + mutex_lock(&raw_mutex); + bdev = rawdev->binding; + *dev = bdev ? bdev->bd_dev : 0; + mutex_unlock(&raw_mutex); + return 0; +} + +/* + * Deal with ioctls against the raw-device control interface, to bind + * and unbind other raw devices. + */ +static long raw_ctl_ioctl(struct file *filp, unsigned int command, + unsigned long arg) +{ + struct raw_config_request rq; + dev_t dev; + int err; + + switch (command) { + case RAW_SETBIND: + if (copy_from_user(&rq, (void __user *) arg, sizeof(rq))) + return -EFAULT; + + return bind_set(rq.raw_minor, rq.block_major, rq.block_minor); + + case RAW_GETBIND: + if (copy_from_user(&rq, (void __user *) arg, sizeof(rq))) + return -EFAULT; + + err = bind_get(rq.raw_minor, &dev); + if (err) + return err; + + rq.block_major = MAJOR(dev); + rq.block_minor = MINOR(dev); + + if (copy_to_user((void __user *)arg, &rq, sizeof(rq))) + return -EFAULT; + + return 0; + } + + return -EINVAL; +} + +#ifdef CONFIG_COMPAT +struct raw32_config_request { + compat_int_t raw_minor; + compat_u64 block_major; + compat_u64 block_minor; +}; + +static long raw_ctl_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct raw32_config_request __user *user_req = compat_ptr(arg); + struct raw32_config_request rq; + dev_t dev; + int err = 0; + + switch (cmd) { + case RAW_SETBIND: + if (copy_from_user(&rq, user_req, sizeof(rq))) + return -EFAULT; + + return bind_set(rq.raw_minor, rq.block_major, rq.block_minor); + + case RAW_GETBIND: + if (copy_from_user(&rq, user_req, sizeof(rq))) + return -EFAULT; + + err = bind_get(rq.raw_minor, &dev); + if (err) + return err; + + rq.block_major = MAJOR(dev); + rq.block_minor = MINOR(dev); + + if (copy_to_user(user_req, &rq, sizeof(rq))) + return -EFAULT; + + return 0; + } + + return -EINVAL; +} +#endif + +static const struct file_operations raw_fops = { + .read_iter = blkdev_read_iter, + .write_iter = blkdev_write_iter, + .fsync = blkdev_fsync, + .open = raw_open, + .release = raw_release, + .unlocked_ioctl = raw_ioctl, + .llseek = default_llseek, + .owner = THIS_MODULE, +}; + +static const struct file_operations raw_ctl_fops = { + .unlocked_ioctl = raw_ctl_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = raw_ctl_compat_ioctl, +#endif + .open = raw_open, + .owner = THIS_MODULE, + .llseek = noop_llseek, +}; + +static struct cdev raw_cdev; + +static char *raw_devnode(struct device *dev, umode_t *mode) +{ + return kasprintf(GFP_KERNEL, "raw/%s", dev_name(dev)); +} + +static int __init raw_init(void) +{ + dev_t dev = MKDEV(RAW_MAJOR, 0); + int ret; + + if (max_raw_minors < 1 || max_raw_minors > 65536) { + printk(KERN_WARNING "raw: invalid max_raw_minors (must be" + " between 1 and 65536), using %d\n", MAX_RAW_MINORS); + max_raw_minors = MAX_RAW_MINORS; + } + + raw_devices = vzalloc(sizeof(struct raw_device_data) * max_raw_minors); + if (!raw_devices) { + printk(KERN_ERR "Not enough memory for raw device structures\n"); + ret = -ENOMEM; + goto error; + } + + ret = register_chrdev_region(dev, max_raw_minors, "raw"); + if (ret) + goto error; + + cdev_init(&raw_cdev, &raw_fops); + ret = cdev_add(&raw_cdev, dev, max_raw_minors); + if (ret) { + goto error_region; + } + + raw_class = class_create(THIS_MODULE, "raw"); + if (IS_ERR(raw_class)) { + printk(KERN_ERR "Error creating raw class.\n"); + cdev_del(&raw_cdev); + ret = PTR_ERR(raw_class); + goto error_region; + } + raw_class->devnode = raw_devnode; + device_create(raw_class, NULL, MKDEV(RAW_MAJOR, 0), NULL, "rawctl"); + + return 0; + +error_region: + unregister_chrdev_region(dev, max_raw_minors); +error: + vfree(raw_devices); + return ret; +} + +static void __exit raw_exit(void) +{ + device_destroy(raw_class, MKDEV(RAW_MAJOR, 0)); + class_destroy(raw_class); + cdev_del(&raw_cdev); + unregister_chrdev_region(MKDEV(RAW_MAJOR, 0), max_raw_minors); +} + +module_init(raw_init); +module_exit(raw_exit); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/rtc.c b/kernel/drivers/char/rtc.c new file mode 100644 index 000000000..35259961c --- /dev/null +++ b/kernel/drivers/char/rtc.c @@ -0,0 +1,1427 @@ +/* + * Real Time Clock interface for Linux + * + * Copyright (C) 1996 Paul Gortmaker + * + * This driver allows use of the real time clock (built into + * nearly all computers) from user space. It exports the /dev/rtc + * interface supporting various ioctl() and also the + * /proc/driver/rtc pseudo-file for status information. + * + * The ioctls can be used to set the interrupt behaviour and + * generation rate from the RTC via IRQ 8. Then the /dev/rtc + * interface can be used to make use of these timer interrupts, + * be they interval or alarm based. + * + * The /dev/rtc interface will block on reads until an interrupt + * has been received. If a RTC interrupt has already happened, + * it will output an unsigned long and then block. The output value + * contains the interrupt status in the low byte and the number of + * interrupts since the last read in the remaining high bytes. The + * /dev/rtc interface can also be used with the select(2) call. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Based on other minimal char device drivers, like Alan's + * watchdog, Ted's random, etc. etc. + * + * 1.07 Paul Gortmaker. + * 1.08 Miquel van Smoorenburg: disallow certain things on the + * DEC Alpha as the CMOS clock is also used for other things. + * 1.09 Nikita Schmidt: epoch support and some Alpha cleanup. + * 1.09a Pete Zaitcev: Sun SPARC + * 1.09b Jeff Garzik: Modularize, init cleanup + * 1.09c Jeff Garzik: SMP cleanup + * 1.10 Paul Barton-Davis: add support for async I/O + * 1.10a Andrea Arcangeli: Alpha updates + * 1.10b Andrew Morton: SMP lock fix + * 1.10c Cesar Barros: SMP locking fixes and cleanup + * 1.10d Paul Gortmaker: delete paranoia check in rtc_exit + * 1.10e Maciej W. Rozycki: Handle DECstation's year weirdness. + * 1.11 Takashi Iwai: Kernel access functions + * rtc_register/rtc_unregister/rtc_control + * 1.11a Daniele Bellucci: Audit create_proc_read_entry in rtc_init + * 1.12 Venkatesh Pallipadi: Hooks for emulating rtc on HPET base-timer + * CONFIG_HPET_EMULATE_RTC + * 1.12a Maciej W. Rozycki: Handle memory-mapped chips properly. + * 1.12ac Alan Cox: Allow read access to the day of week register + * 1.12b David John: Remove calls to the BKL. + */ + +#define RTC_VERSION "1.12b" + +/* + * Note that *all* calls to CMOS_READ and CMOS_WRITE are done with + * interrupts disabled. Due to the index-port/data-port (0x70/0x71) + * design of the RTC, we don't want two different things trying to + * get to it at once. (e.g. the periodic 11 min sync from + * kernel/time/ntp.c vs. this driver.) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#ifdef CONFIG_X86 +#include +#endif + +#ifdef CONFIG_SPARC32 +#include +#include +#include + +static unsigned long rtc_port; +static int rtc_irq; +#endif + +#ifdef CONFIG_HPET_EMULATE_RTC +#undef RTC_IRQ +#endif + +#ifdef RTC_IRQ +static int rtc_has_irq = 1; +#endif + +#ifndef CONFIG_HPET_EMULATE_RTC +#define is_hpet_enabled() 0 +#define hpet_set_alarm_time(hrs, min, sec) 0 +#define hpet_set_periodic_freq(arg) 0 +#define hpet_mask_rtc_irq_bit(arg) 0 +#define hpet_set_rtc_irq_bit(arg) 0 +#define hpet_rtc_timer_init() do { } while (0) +#define hpet_rtc_dropped_irq() 0 +#define hpet_register_irq_handler(h) ({ 0; }) +#define hpet_unregister_irq_handler(h) ({ 0; }) +#ifdef RTC_IRQ +static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) +{ + return 0; +} +#endif +#endif + +/* + * We sponge a minor off of the misc major. No need slurping + * up another valuable major dev number for this. If you add + * an ioctl, make sure you don't conflict with SPARC's RTC + * ioctls. + */ + +static struct fasync_struct *rtc_async_queue; + +static DECLARE_WAIT_QUEUE_HEAD(rtc_wait); + +#ifdef RTC_IRQ +static void rtc_dropped_irq(unsigned long data); + +static DEFINE_TIMER(rtc_irq_timer, rtc_dropped_irq, 0, 0); +#endif + +static ssize_t rtc_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos); + +static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +static void rtc_get_rtc_time(struct rtc_time *rtc_tm); + +#ifdef RTC_IRQ +static unsigned int rtc_poll(struct file *file, poll_table *wait); +#endif + +static void get_rtc_alm_time(struct rtc_time *alm_tm); +#ifdef RTC_IRQ +static void set_rtc_irq_bit_locked(unsigned char bit); +static void mask_rtc_irq_bit_locked(unsigned char bit); + +static inline void set_rtc_irq_bit(unsigned char bit) +{ + spin_lock_irq(&rtc_lock); + set_rtc_irq_bit_locked(bit); + spin_unlock_irq(&rtc_lock); +} + +static void mask_rtc_irq_bit(unsigned char bit) +{ + spin_lock_irq(&rtc_lock); + mask_rtc_irq_bit_locked(bit); + spin_unlock_irq(&rtc_lock); +} +#endif + +#ifdef CONFIG_PROC_FS +static int rtc_proc_open(struct inode *inode, struct file *file); +#endif + +/* + * Bits in rtc_status. (6 bits of room for future expansion) + */ + +#define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */ +#define RTC_TIMER_ON 0x02 /* missed irq timer active */ + +/* + * rtc_status is never changed by rtc_interrupt, and ioctl/open/close is + * protected by the spin lock rtc_lock. However, ioctl can still disable the + * timer in rtc_status and then with del_timer after the interrupt has read + * rtc_status but before mod_timer is called, which would then reenable the + * timer (but you would need to have an awful timing before you'd trip on it) + */ +static unsigned long rtc_status; /* bitmapped status byte. */ +static unsigned long rtc_freq; /* Current periodic IRQ rate */ +static unsigned long rtc_irq_data; /* our output to the world */ +static unsigned long rtc_max_user_freq = 64; /* > this, need CAP_SYS_RESOURCE */ + +#ifdef RTC_IRQ +/* + * rtc_task_lock nests inside rtc_lock. + */ +static DEFINE_SPINLOCK(rtc_task_lock); +static rtc_task_t *rtc_callback; +#endif + +/* + * If this driver ever becomes modularised, it will be really nice + * to make the epoch retain its value across module reload... + */ + +static unsigned long epoch = 1900; /* year corresponding to 0x00 */ + +static const unsigned char days_in_mo[] = +{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; + +/* + * Returns true if a clock update is in progress + */ +static inline unsigned char rtc_is_updating(void) +{ + unsigned long flags; + unsigned char uip; + + spin_lock_irqsave(&rtc_lock, flags); + uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP); + spin_unlock_irqrestore(&rtc_lock, flags); + return uip; +} + +#ifdef RTC_IRQ +/* + * A very tiny interrupt handler. It runs with interrupts disabled, + * but there is possibility of conflicting with the set_rtc_mmss() + * call (the rtc irq and the timer irq can easily run at the same + * time in two different CPUs). So we need to serialize + * accesses to the chip with the rtc_lock spinlock that each + * architecture should implement in the timer code. + * (See ./arch/XXXX/kernel/time.c for the set_rtc_mmss() function.) + */ + +static irqreturn_t rtc_interrupt(int irq, void *dev_id) +{ + /* + * Can be an alarm interrupt, update complete interrupt, + * or a periodic interrupt. We store the status in the + * low byte and the number of interrupts received since + * the last read in the remainder of rtc_irq_data. + */ + + spin_lock(&rtc_lock); + rtc_irq_data += 0x100; + rtc_irq_data &= ~0xff; + if (is_hpet_enabled()) { + /* + * In this case it is HPET RTC interrupt handler + * calling us, with the interrupt information + * passed as arg1, instead of irq. + */ + rtc_irq_data |= (unsigned long)irq & 0xF0; + } else { + rtc_irq_data |= (CMOS_READ(RTC_INTR_FLAGS) & 0xF0); + } + + if (rtc_status & RTC_TIMER_ON) + mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100); + + spin_unlock(&rtc_lock); + + /* Now do the rest of the actions */ + spin_lock(&rtc_task_lock); + if (rtc_callback) + rtc_callback->func(rtc_callback->private_data); + spin_unlock(&rtc_task_lock); + wake_up_interruptible(&rtc_wait); + + kill_fasync(&rtc_async_queue, SIGIO, POLL_IN); + + return IRQ_HANDLED; +} +#endif + +/* + * sysctl-tuning infrastructure. + */ +static struct ctl_table rtc_table[] = { + { + .procname = "max-user-freq", + .data = &rtc_max_user_freq, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { } +}; + +static struct ctl_table rtc_root[] = { + { + .procname = "rtc", + .mode = 0555, + .child = rtc_table, + }, + { } +}; + +static struct ctl_table dev_root[] = { + { + .procname = "dev", + .mode = 0555, + .child = rtc_root, + }, + { } +}; + +static struct ctl_table_header *sysctl_header; + +static int __init init_sysctl(void) +{ + sysctl_header = register_sysctl_table(dev_root); + return 0; +} + +static void __exit cleanup_sysctl(void) +{ + unregister_sysctl_table(sysctl_header); +} + +/* + * Now all the various file operations that we export. + */ + +static ssize_t rtc_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ +#ifndef RTC_IRQ + return -EIO; +#else + DECLARE_WAITQUEUE(wait, current); + unsigned long data; + ssize_t retval; + + if (rtc_has_irq == 0) + return -EIO; + + /* + * Historically this function used to assume that sizeof(unsigned long) + * is the same in userspace and kernelspace. This lead to problems + * for configurations with multiple ABIs such a the MIPS o32 and 64 + * ABIs supported on the same kernel. So now we support read of both + * 4 and 8 bytes and assume that's the sizeof(unsigned long) in the + * userspace ABI. + */ + if (count != sizeof(unsigned int) && count != sizeof(unsigned long)) + return -EINVAL; + + add_wait_queue(&rtc_wait, &wait); + + do { + /* First make it right. Then make it fast. Putting this whole + * block within the parentheses of a while would be too + * confusing. And no, xchg() is not the answer. */ + + __set_current_state(TASK_INTERRUPTIBLE); + + spin_lock_irq(&rtc_lock); + data = rtc_irq_data; + rtc_irq_data = 0; + spin_unlock_irq(&rtc_lock); + + if (data != 0) + break; + + if (file->f_flags & O_NONBLOCK) { + retval = -EAGAIN; + goto out; + } + if (signal_pending(current)) { + retval = -ERESTARTSYS; + goto out; + } + schedule(); + } while (1); + + if (count == sizeof(unsigned int)) { + retval = put_user(data, + (unsigned int __user *)buf) ?: sizeof(int); + } else { + retval = put_user(data, + (unsigned long __user *)buf) ?: sizeof(long); + } + if (!retval) + retval = count; + out: + __set_current_state(TASK_RUNNING); + remove_wait_queue(&rtc_wait, &wait); + + return retval; +#endif +} + +static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel) +{ + struct rtc_time wtime; + +#ifdef RTC_IRQ + if (rtc_has_irq == 0) { + switch (cmd) { + case RTC_AIE_OFF: + case RTC_AIE_ON: + case RTC_PIE_OFF: + case RTC_PIE_ON: + case RTC_UIE_OFF: + case RTC_UIE_ON: + case RTC_IRQP_READ: + case RTC_IRQP_SET: + return -EINVAL; + } + } +#endif + + switch (cmd) { +#ifdef RTC_IRQ + case RTC_AIE_OFF: /* Mask alarm int. enab. bit */ + { + mask_rtc_irq_bit(RTC_AIE); + return 0; + } + case RTC_AIE_ON: /* Allow alarm interrupts. */ + { + set_rtc_irq_bit(RTC_AIE); + return 0; + } + case RTC_PIE_OFF: /* Mask periodic int. enab. bit */ + { + /* can be called from isr via rtc_control() */ + unsigned long flags; + + spin_lock_irqsave(&rtc_lock, flags); + mask_rtc_irq_bit_locked(RTC_PIE); + if (rtc_status & RTC_TIMER_ON) { + rtc_status &= ~RTC_TIMER_ON; + del_timer(&rtc_irq_timer); + } + spin_unlock_irqrestore(&rtc_lock, flags); + + return 0; + } + case RTC_PIE_ON: /* Allow periodic ints */ + { + /* can be called from isr via rtc_control() */ + unsigned long flags; + + /* + * We don't really want Joe User enabling more + * than 64Hz of interrupts on a multi-user machine. + */ + if (!kernel && (rtc_freq > rtc_max_user_freq) && + (!capable(CAP_SYS_RESOURCE))) + return -EACCES; + + spin_lock_irqsave(&rtc_lock, flags); + if (!(rtc_status & RTC_TIMER_ON)) { + mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + + 2*HZ/100); + rtc_status |= RTC_TIMER_ON; + } + set_rtc_irq_bit_locked(RTC_PIE); + spin_unlock_irqrestore(&rtc_lock, flags); + + return 0; + } + case RTC_UIE_OFF: /* Mask ints from RTC updates. */ + { + mask_rtc_irq_bit(RTC_UIE); + return 0; + } + case RTC_UIE_ON: /* Allow ints for RTC updates. */ + { + set_rtc_irq_bit(RTC_UIE); + return 0; + } +#endif + case RTC_ALM_READ: /* Read the present alarm time */ + { + /* + * This returns a struct rtc_time. Reading >= 0xc0 + * means "don't care" or "match all". Only the tm_hour, + * tm_min, and tm_sec values are filled in. + */ + memset(&wtime, 0, sizeof(struct rtc_time)); + get_rtc_alm_time(&wtime); + break; + } + case RTC_ALM_SET: /* Store a time into the alarm */ + { + /* + * This expects a struct rtc_time. Writing 0xff means + * "don't care" or "match all". Only the tm_hour, + * tm_min and tm_sec are used. + */ + unsigned char hrs, min, sec; + struct rtc_time alm_tm; + + if (copy_from_user(&alm_tm, (struct rtc_time __user *)arg, + sizeof(struct rtc_time))) + return -EFAULT; + + hrs = alm_tm.tm_hour; + min = alm_tm.tm_min; + sec = alm_tm.tm_sec; + + spin_lock_irq(&rtc_lock); + if (hpet_set_alarm_time(hrs, min, sec)) { + /* + * Fallthru and set alarm time in CMOS too, + * so that we will get proper value in RTC_ALM_READ + */ + } + if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || + RTC_ALWAYS_BCD) { + if (sec < 60) + sec = bin2bcd(sec); + else + sec = 0xff; + + if (min < 60) + min = bin2bcd(min); + else + min = 0xff; + + if (hrs < 24) + hrs = bin2bcd(hrs); + else + hrs = 0xff; + } + CMOS_WRITE(hrs, RTC_HOURS_ALARM); + CMOS_WRITE(min, RTC_MINUTES_ALARM); + CMOS_WRITE(sec, RTC_SECONDS_ALARM); + spin_unlock_irq(&rtc_lock); + + return 0; + } + case RTC_RD_TIME: /* Read the time/date from RTC */ + { + memset(&wtime, 0, sizeof(struct rtc_time)); + rtc_get_rtc_time(&wtime); + break; + } + case RTC_SET_TIME: /* Set the RTC */ + { + struct rtc_time rtc_tm; + unsigned char mon, day, hrs, min, sec, leap_yr; + unsigned char save_control, save_freq_select; + unsigned int yrs; +#ifdef CONFIG_MACH_DECSTATION + unsigned int real_yrs; +#endif + + if (!capable(CAP_SYS_TIME)) + return -EACCES; + + if (copy_from_user(&rtc_tm, (struct rtc_time __user *)arg, + sizeof(struct rtc_time))) + return -EFAULT; + + yrs = rtc_tm.tm_year + 1900; + mon = rtc_tm.tm_mon + 1; /* tm_mon starts at zero */ + day = rtc_tm.tm_mday; + hrs = rtc_tm.tm_hour; + min = rtc_tm.tm_min; + sec = rtc_tm.tm_sec; + + if (yrs < 1970) + return -EINVAL; + + leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400)); + + if ((mon > 12) || (day == 0)) + return -EINVAL; + + if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr))) + return -EINVAL; + + if ((hrs >= 24) || (min >= 60) || (sec >= 60)) + return -EINVAL; + + yrs -= epoch; + if (yrs > 255) /* They are unsigned */ + return -EINVAL; + + spin_lock_irq(&rtc_lock); +#ifdef CONFIG_MACH_DECSTATION + real_yrs = yrs; + yrs = 72; + + /* + * We want to keep the year set to 73 until March + * for non-leap years, so that Feb, 29th is handled + * correctly. + */ + if (!leap_yr && mon < 3) { + real_yrs--; + yrs = 73; + } +#endif + /* These limits and adjustments are independent of + * whether the chip is in binary mode or not. + */ + if (yrs > 169) { + spin_unlock_irq(&rtc_lock); + return -EINVAL; + } + if (yrs >= 100) + yrs -= 100; + + if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) + || RTC_ALWAYS_BCD) { + sec = bin2bcd(sec); + min = bin2bcd(min); + hrs = bin2bcd(hrs); + day = bin2bcd(day); + mon = bin2bcd(mon); + yrs = bin2bcd(yrs); + } + + save_control = CMOS_READ(RTC_CONTROL); + CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); + save_freq_select = CMOS_READ(RTC_FREQ_SELECT); + CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); + +#ifdef CONFIG_MACH_DECSTATION + CMOS_WRITE(real_yrs, RTC_DEC_YEAR); +#endif + CMOS_WRITE(yrs, RTC_YEAR); + CMOS_WRITE(mon, RTC_MONTH); + CMOS_WRITE(day, RTC_DAY_OF_MONTH); + CMOS_WRITE(hrs, RTC_HOURS); + CMOS_WRITE(min, RTC_MINUTES); + CMOS_WRITE(sec, RTC_SECONDS); + + CMOS_WRITE(save_control, RTC_CONTROL); + CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); + + spin_unlock_irq(&rtc_lock); + return 0; + } +#ifdef RTC_IRQ + case RTC_IRQP_READ: /* Read the periodic IRQ rate. */ + { + return put_user(rtc_freq, (unsigned long __user *)arg); + } + case RTC_IRQP_SET: /* Set periodic IRQ rate. */ + { + int tmp = 0; + unsigned char val; + /* can be called from isr via rtc_control() */ + unsigned long flags; + + /* + * The max we can do is 8192Hz. + */ + if ((arg < 2) || (arg > 8192)) + return -EINVAL; + /* + * We don't really want Joe User generating more + * than 64Hz of interrupts on a multi-user machine. + */ + if (!kernel && (arg > rtc_max_user_freq) && + !capable(CAP_SYS_RESOURCE)) + return -EACCES; + + while (arg > (1<func == NULL) + return -EINVAL; + spin_lock_irq(&rtc_lock); + if (rtc_status & RTC_IS_OPEN) { + spin_unlock_irq(&rtc_lock); + return -EBUSY; + } + spin_lock(&rtc_task_lock); + if (rtc_callback) { + spin_unlock(&rtc_task_lock); + spin_unlock_irq(&rtc_lock); + return -EBUSY; + } + rtc_status |= RTC_IS_OPEN; + rtc_callback = task; + spin_unlock(&rtc_task_lock); + spin_unlock_irq(&rtc_lock); + return 0; +#endif +} +EXPORT_SYMBOL(rtc_register); + +int rtc_unregister(rtc_task_t *task) +{ +#ifndef RTC_IRQ + return -EIO; +#else + unsigned char tmp; + + spin_lock_irq(&rtc_lock); + spin_lock(&rtc_task_lock); + if (rtc_callback != task) { + spin_unlock(&rtc_task_lock); + spin_unlock_irq(&rtc_lock); + return -ENXIO; + } + rtc_callback = NULL; + + /* disable controls */ + if (!hpet_mask_rtc_irq_bit(RTC_PIE | RTC_AIE | RTC_UIE)) { + tmp = CMOS_READ(RTC_CONTROL); + tmp &= ~RTC_PIE; + tmp &= ~RTC_AIE; + tmp &= ~RTC_UIE; + CMOS_WRITE(tmp, RTC_CONTROL); + CMOS_READ(RTC_INTR_FLAGS); + } + if (rtc_status & RTC_TIMER_ON) { + rtc_status &= ~RTC_TIMER_ON; + del_timer(&rtc_irq_timer); + } + rtc_status &= ~RTC_IS_OPEN; + spin_unlock(&rtc_task_lock); + spin_unlock_irq(&rtc_lock); + return 0; +#endif +} +EXPORT_SYMBOL(rtc_unregister); + +int rtc_control(rtc_task_t *task, unsigned int cmd, unsigned long arg) +{ +#ifndef RTC_IRQ + return -EIO; +#else + unsigned long flags; + if (cmd != RTC_PIE_ON && cmd != RTC_PIE_OFF && cmd != RTC_IRQP_SET) + return -EINVAL; + spin_lock_irqsave(&rtc_task_lock, flags); + if (rtc_callback != task) { + spin_unlock_irqrestore(&rtc_task_lock, flags); + return -ENXIO; + } + spin_unlock_irqrestore(&rtc_task_lock, flags); + return rtc_do_ioctl(cmd, arg, 1); +#endif +} +EXPORT_SYMBOL(rtc_control); + +/* + * The various file operations we support. + */ + +static const struct file_operations rtc_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .read = rtc_read, +#ifdef RTC_IRQ + .poll = rtc_poll, +#endif + .unlocked_ioctl = rtc_ioctl, + .open = rtc_open, + .release = rtc_release, + .fasync = rtc_fasync, +}; + +static struct miscdevice rtc_dev = { + .minor = RTC_MINOR, + .name = "rtc", + .fops = &rtc_fops, +}; + +#ifdef CONFIG_PROC_FS +static const struct file_operations rtc_proc_fops = { + .owner = THIS_MODULE, + .open = rtc_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + +static resource_size_t rtc_size; + +static struct resource * __init rtc_request_region(resource_size_t size) +{ + struct resource *r; + + if (RTC_IOMAPPED) + r = request_region(RTC_PORT(0), size, "rtc"); + else + r = request_mem_region(RTC_PORT(0), size, "rtc"); + + if (r) + rtc_size = size; + + return r; +} + +static void rtc_release_region(void) +{ + if (RTC_IOMAPPED) + release_region(RTC_PORT(0), rtc_size); + else + release_mem_region(RTC_PORT(0), rtc_size); +} + +static int __init rtc_init(void) +{ +#ifdef CONFIG_PROC_FS + struct proc_dir_entry *ent; +#endif +#if defined(__alpha__) || defined(__mips__) + unsigned int year, ctrl; + char *guess = NULL; +#endif +#ifdef CONFIG_SPARC32 + struct device_node *ebus_dp; + struct platform_device *op; +#else + void *r; +#ifdef RTC_IRQ + irq_handler_t rtc_int_handler_ptr; +#endif +#endif + +#ifdef CONFIG_SPARC32 + for_each_node_by_name(ebus_dp, "ebus") { + struct device_node *dp; + for (dp = ebus_dp; dp; dp = dp->sibling) { + if (!strcmp(dp->name, "rtc")) { + op = of_find_device_by_node(dp); + if (op) { + rtc_port = op->resource[0].start; + rtc_irq = op->irqs[0]; + goto found; + } + } + } + } + rtc_has_irq = 0; + printk(KERN_ERR "rtc_init: no PC rtc found\n"); + return -EIO; + +found: + if (!rtc_irq) { + rtc_has_irq = 0; + goto no_irq; + } + + /* + * XXX Interrupt pin #7 in Espresso is shared between RTC and + * PCI Slot 2 INTA# (and some INTx# in Slot 1). + */ + if (request_irq(rtc_irq, rtc_interrupt, IRQF_SHARED, "rtc", + (void *)&rtc_port)) { + rtc_has_irq = 0; + printk(KERN_ERR "rtc: cannot register IRQ %d\n", rtc_irq); + return -EIO; + } +no_irq: +#else + r = rtc_request_region(RTC_IO_EXTENT); + + /* + * If we've already requested a smaller range (for example, because + * PNPBIOS or ACPI told us how the device is configured), the request + * above might fail because it's too big. + * + * If so, request just the range we actually use. + */ + if (!r) + r = rtc_request_region(RTC_IO_EXTENT_USED); + if (!r) { +#ifdef RTC_IRQ + rtc_has_irq = 0; +#endif + printk(KERN_ERR "rtc: I/O resource %lx is not free.\n", + (long)(RTC_PORT(0))); + return -EIO; + } + +#ifdef RTC_IRQ + if (is_hpet_enabled()) { + int err; + + rtc_int_handler_ptr = hpet_rtc_interrupt; + err = hpet_register_irq_handler(rtc_interrupt); + if (err != 0) { + printk(KERN_WARNING "hpet_register_irq_handler failed " + "in rtc_init()."); + return err; + } + } else { + rtc_int_handler_ptr = rtc_interrupt; + } + + if (request_irq(RTC_IRQ, rtc_int_handler_ptr, 0, "rtc", NULL)) { + /* Yeah right, seeing as irq 8 doesn't even hit the bus. */ + rtc_has_irq = 0; + printk(KERN_ERR "rtc: IRQ %d is not free.\n", RTC_IRQ); + rtc_release_region(); + + return -EIO; + } + hpet_rtc_timer_init(); + +#endif + +#endif /* CONFIG_SPARC32 vs. others */ + + if (misc_register(&rtc_dev)) { +#ifdef RTC_IRQ + free_irq(RTC_IRQ, NULL); + hpet_unregister_irq_handler(rtc_interrupt); + rtc_has_irq = 0; +#endif + rtc_release_region(); + return -ENODEV; + } + +#ifdef CONFIG_PROC_FS + ent = proc_create("driver/rtc", 0, NULL, &rtc_proc_fops); + if (!ent) + printk(KERN_WARNING "rtc: Failed to register with procfs.\n"); +#endif + +#if defined(__alpha__) || defined(__mips__) + rtc_freq = HZ; + + /* Each operating system on an Alpha uses its own epoch. + Let's try to guess which one we are using now. */ + + if (rtc_is_updating() != 0) + msleep(20); + + spin_lock_irq(&rtc_lock); + year = CMOS_READ(RTC_YEAR); + ctrl = CMOS_READ(RTC_CONTROL); + spin_unlock_irq(&rtc_lock); + + if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) + year = bcd2bin(year); /* This should never happen... */ + + if (year < 20) { + epoch = 2000; + guess = "SRM (post-2000)"; + } else if (year >= 20 && year < 48) { + epoch = 1980; + guess = "ARC console"; + } else if (year >= 48 && year < 72) { + epoch = 1952; + guess = "Digital UNIX"; +#if defined(__mips__) + } else if (year >= 72 && year < 74) { + epoch = 2000; + guess = "Digital DECstation"; +#else + } else if (year >= 70) { + epoch = 1900; + guess = "Standard PC (1900)"; +#endif + } + if (guess) + printk(KERN_INFO "rtc: %s epoch (%lu) detected\n", + guess, epoch); +#endif +#ifdef RTC_IRQ + if (rtc_has_irq == 0) + goto no_irq2; + + spin_lock_irq(&rtc_lock); + rtc_freq = 1024; + if (!hpet_set_periodic_freq(rtc_freq)) { + /* + * Initialize periodic frequency to CMOS reset default, + * which is 1024Hz + */ + CMOS_WRITE(((CMOS_READ(RTC_FREQ_SELECT) & 0xF0) | 0x06), + RTC_FREQ_SELECT); + } + spin_unlock_irq(&rtc_lock); +no_irq2: +#endif + + (void) init_sysctl(); + + printk(KERN_INFO "Real Time Clock Driver v" RTC_VERSION "\n"); + + return 0; +} + +static void __exit rtc_exit(void) +{ + cleanup_sysctl(); + remove_proc_entry("driver/rtc", NULL); + misc_deregister(&rtc_dev); + +#ifdef CONFIG_SPARC32 + if (rtc_has_irq) + free_irq(rtc_irq, &rtc_port); +#else + rtc_release_region(); +#ifdef RTC_IRQ + if (rtc_has_irq) { + free_irq(RTC_IRQ, NULL); + hpet_unregister_irq_handler(hpet_rtc_interrupt); + } +#endif +#endif /* CONFIG_SPARC32 */ +} + +module_init(rtc_init); +module_exit(rtc_exit); + +#ifdef RTC_IRQ +/* + * At IRQ rates >= 4096Hz, an interrupt may get lost altogether. + * (usually during an IDE disk interrupt, with IRQ unmasking off) + * Since the interrupt handler doesn't get called, the IRQ status + * byte doesn't get read, and the RTC stops generating interrupts. + * A timer is set, and will call this function if/when that happens. + * To get it out of this stalled state, we just read the status. + * At least a jiffy of interrupts (rtc_freq/HZ) will have been lost. + * (You *really* shouldn't be trying to use a non-realtime system + * for something that requires a steady > 1KHz signal anyways.) + */ + +static void rtc_dropped_irq(unsigned long data) +{ + unsigned long freq; + + spin_lock_irq(&rtc_lock); + + if (hpet_rtc_dropped_irq()) { + spin_unlock_irq(&rtc_lock); + return; + } + + /* Just in case someone disabled the timer from behind our back... */ + if (rtc_status & RTC_TIMER_ON) + mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100); + + rtc_irq_data += ((rtc_freq/HZ)<<8); + rtc_irq_data &= ~0xff; + rtc_irq_data |= (CMOS_READ(RTC_INTR_FLAGS) & 0xF0); /* restart */ + + freq = rtc_freq; + + spin_unlock_irq(&rtc_lock); + + printk_ratelimited(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n", + freq); + + /* Now we have new data */ + wake_up_interruptible(&rtc_wait); + + kill_fasync(&rtc_async_queue, SIGIO, POLL_IN); +} +#endif + +#ifdef CONFIG_PROC_FS +/* + * Info exported via "/proc/driver/rtc". + */ + +static int rtc_proc_show(struct seq_file *seq, void *v) +{ +#define YN(bit) ((ctrl & bit) ? "yes" : "no") +#define NY(bit) ((ctrl & bit) ? "no" : "yes") + struct rtc_time tm; + unsigned char batt, ctrl; + unsigned long freq; + + spin_lock_irq(&rtc_lock); + batt = CMOS_READ(RTC_VALID) & RTC_VRT; + ctrl = CMOS_READ(RTC_CONTROL); + freq = rtc_freq; + spin_unlock_irq(&rtc_lock); + + + rtc_get_rtc_time(&tm); + + /* + * There is no way to tell if the luser has the RTC set for local + * time or for Universal Standard Time (GMT). Probably local though. + */ + seq_printf(seq, + "rtc_time\t: %02d:%02d:%02d\n" + "rtc_date\t: %04d-%02d-%02d\n" + "rtc_epoch\t: %04lu\n", + tm.tm_hour, tm.tm_min, tm.tm_sec, + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, epoch); + + get_rtc_alm_time(&tm); + + /* + * We implicitly assume 24hr mode here. Alarm values >= 0xc0 will + * match any value for that particular field. Values that are + * greater than a valid time, but less than 0xc0 shouldn't appear. + */ + seq_puts(seq, "alarm\t\t: "); + if (tm.tm_hour <= 24) + seq_printf(seq, "%02d:", tm.tm_hour); + else + seq_puts(seq, "**:"); + + if (tm.tm_min <= 59) + seq_printf(seq, "%02d:", tm.tm_min); + else + seq_puts(seq, "**:"); + + if (tm.tm_sec <= 59) + seq_printf(seq, "%02d\n", tm.tm_sec); + else + seq_puts(seq, "**\n"); + + seq_printf(seq, + "DST_enable\t: %s\n" + "BCD\t\t: %s\n" + "24hr\t\t: %s\n" + "square_wave\t: %s\n" + "alarm_IRQ\t: %s\n" + "update_IRQ\t: %s\n" + "periodic_IRQ\t: %s\n" + "periodic_freq\t: %ld\n" + "batt_status\t: %s\n", + YN(RTC_DST_EN), + NY(RTC_DM_BINARY), + YN(RTC_24H), + YN(RTC_SQWE), + YN(RTC_AIE), + YN(RTC_UIE), + YN(RTC_PIE), + freq, + batt ? "okay" : "dead"); + + return 0; +#undef YN +#undef NY +} + +static int rtc_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, rtc_proc_show, NULL); +} +#endif + +static void rtc_get_rtc_time(struct rtc_time *rtc_tm) +{ + unsigned long uip_watchdog = jiffies, flags; + unsigned char ctrl; +#ifdef CONFIG_MACH_DECSTATION + unsigned int real_year; +#endif + + /* + * read RTC once any update in progress is done. The update + * can take just over 2ms. We wait 20ms. There is no need to + * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP. + * If you need to know *exactly* when a second has started, enable + * periodic update complete interrupts, (via ioctl) and then + * immediately read /dev/rtc which will block until you get the IRQ. + * Once the read clears, read the RTC time (again via ioctl). Easy. + */ + + while (rtc_is_updating() != 0 && + time_before(jiffies, uip_watchdog + 2*HZ/100)) + cpu_relax(); + + /* + * Only the values that we read from the RTC are set. We leave + * tm_wday, tm_yday and tm_isdst untouched. Note that while the + * RTC has RTC_DAY_OF_WEEK, we should usually ignore it, as it is + * only updated by the RTC when initially set to a non-zero value. + */ + spin_lock_irqsave(&rtc_lock, flags); + rtc_tm->tm_sec = CMOS_READ(RTC_SECONDS); + rtc_tm->tm_min = CMOS_READ(RTC_MINUTES); + rtc_tm->tm_hour = CMOS_READ(RTC_HOURS); + rtc_tm->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH); + rtc_tm->tm_mon = CMOS_READ(RTC_MONTH); + rtc_tm->tm_year = CMOS_READ(RTC_YEAR); + /* Only set from 2.6.16 onwards */ + rtc_tm->tm_wday = CMOS_READ(RTC_DAY_OF_WEEK); + +#ifdef CONFIG_MACH_DECSTATION + real_year = CMOS_READ(RTC_DEC_YEAR); +#endif + ctrl = CMOS_READ(RTC_CONTROL); + spin_unlock_irqrestore(&rtc_lock, flags); + + if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { + rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec); + rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min); + rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour); + rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday); + rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon); + rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year); + rtc_tm->tm_wday = bcd2bin(rtc_tm->tm_wday); + } + +#ifdef CONFIG_MACH_DECSTATION + rtc_tm->tm_year += real_year - 72; +#endif + + /* + * Account for differences between how the RTC uses the values + * and how they are defined in a struct rtc_time; + */ + rtc_tm->tm_year += epoch - 1900; + if (rtc_tm->tm_year <= 69) + rtc_tm->tm_year += 100; + + rtc_tm->tm_mon--; +} + +static void get_rtc_alm_time(struct rtc_time *alm_tm) +{ + unsigned char ctrl; + + /* + * Only the values that we read from the RTC are set. That + * means only tm_hour, tm_min, and tm_sec. + */ + spin_lock_irq(&rtc_lock); + alm_tm->tm_sec = CMOS_READ(RTC_SECONDS_ALARM); + alm_tm->tm_min = CMOS_READ(RTC_MINUTES_ALARM); + alm_tm->tm_hour = CMOS_READ(RTC_HOURS_ALARM); + ctrl = CMOS_READ(RTC_CONTROL); + spin_unlock_irq(&rtc_lock); + + if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { + alm_tm->tm_sec = bcd2bin(alm_tm->tm_sec); + alm_tm->tm_min = bcd2bin(alm_tm->tm_min); + alm_tm->tm_hour = bcd2bin(alm_tm->tm_hour); + } +} + +#ifdef RTC_IRQ +/* + * Used to disable/enable interrupts for any one of UIE, AIE, PIE. + * Rumour has it that if you frob the interrupt enable/disable + * bits in RTC_CONTROL, you should read RTC_INTR_FLAGS, to + * ensure you actually start getting interrupts. Probably for + * compatibility with older/broken chipset RTC implementations. + * We also clear out any old irq data after an ioctl() that + * meddles with the interrupt enable/disable bits. + */ + +static void mask_rtc_irq_bit_locked(unsigned char bit) +{ + unsigned char val; + + if (hpet_mask_rtc_irq_bit(bit)) + return; + val = CMOS_READ(RTC_CONTROL); + val &= ~bit; + CMOS_WRITE(val, RTC_CONTROL); + CMOS_READ(RTC_INTR_FLAGS); + + rtc_irq_data = 0; +} + +static void set_rtc_irq_bit_locked(unsigned char bit) +{ + unsigned char val; + + if (hpet_set_rtc_irq_bit(bit)) + return; + val = CMOS_READ(RTC_CONTROL); + val |= bit; + CMOS_WRITE(val, RTC_CONTROL); + CMOS_READ(RTC_INTR_FLAGS); + + rtc_irq_data = 0; +} +#endif + +MODULE_AUTHOR("Paul Gortmaker"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_MISCDEV(RTC_MINOR); diff --git a/kernel/drivers/char/scx200_gpio.c b/kernel/drivers/char/scx200_gpio.c new file mode 100644 index 000000000..0bc135b9b --- /dev/null +++ b/kernel/drivers/char/scx200_gpio.c @@ -0,0 +1,132 @@ +/* linux/drivers/char/scx200_gpio.c + + National Semiconductor SCx200 GPIO driver. Allows a user space + process to play with the GPIO pins. + + Copyright (c) 2001,2002 Christer Weinigel */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#define DRVNAME "scx200_gpio" + +static struct platform_device *pdev; + +MODULE_AUTHOR("Christer Weinigel "); +MODULE_DESCRIPTION("NatSemi/AMD SCx200 GPIO Pin Driver"); +MODULE_LICENSE("GPL"); + +static int major = 0; /* default to dynamic major */ +module_param(major, int, 0); +MODULE_PARM_DESC(major, "Major device number"); + +#define MAX_PINS 32 /* 64 later, when known ok */ + +struct nsc_gpio_ops scx200_gpio_ops = { + .owner = THIS_MODULE, + .gpio_config = scx200_gpio_configure, + .gpio_dump = nsc_gpio_dump, + .gpio_get = scx200_gpio_get, + .gpio_set = scx200_gpio_set, + .gpio_change = scx200_gpio_change, + .gpio_current = scx200_gpio_current +}; +EXPORT_SYMBOL_GPL(scx200_gpio_ops); + +static int scx200_gpio_open(struct inode *inode, struct file *file) +{ + unsigned m = iminor(inode); + file->private_data = &scx200_gpio_ops; + + if (m >= MAX_PINS) + return -EINVAL; + return nonseekable_open(inode, file); +} + +static int scx200_gpio_release(struct inode *inode, struct file *file) +{ + return 0; +} + +static const struct file_operations scx200_gpio_fileops = { + .owner = THIS_MODULE, + .write = nsc_gpio_write, + .read = nsc_gpio_read, + .open = scx200_gpio_open, + .release = scx200_gpio_release, + .llseek = no_llseek, +}; + +static struct cdev scx200_gpio_cdev; /* use 1 cdev for all pins */ + +static int __init scx200_gpio_init(void) +{ + int rc; + dev_t devid; + + if (!scx200_gpio_present()) { + printk(KERN_ERR DRVNAME ": no SCx200 gpio present\n"); + return -ENODEV; + } + + /* support dev_dbg() with pdev->dev */ + pdev = platform_device_alloc(DRVNAME, 0); + if (!pdev) + return -ENOMEM; + + rc = platform_device_add(pdev); + if (rc) + goto undo_malloc; + + /* nsc_gpio uses dev_dbg(), so needs this */ + scx200_gpio_ops.dev = &pdev->dev; + + if (major) { + devid = MKDEV(major, 0); + rc = register_chrdev_region(devid, MAX_PINS, "scx200_gpio"); + } else { + rc = alloc_chrdev_region(&devid, 0, MAX_PINS, "scx200_gpio"); + major = MAJOR(devid); + } + if (rc < 0) { + dev_err(&pdev->dev, "SCx200 chrdev_region err: %d\n", rc); + goto undo_platform_device_add; + } + + cdev_init(&scx200_gpio_cdev, &scx200_gpio_fileops); + cdev_add(&scx200_gpio_cdev, devid, MAX_PINS); + + return 0; /* succeed */ + +undo_platform_device_add: + platform_device_del(pdev); +undo_malloc: + platform_device_put(pdev); + + return rc; +} + +static void __exit scx200_gpio_cleanup(void) +{ + cdev_del(&scx200_gpio_cdev); + /* cdev_put(&scx200_gpio_cdev); */ + + unregister_chrdev_region(MKDEV(major, 0), MAX_PINS); + platform_device_unregister(pdev); +} + +module_init(scx200_gpio_init); +module_exit(scx200_gpio_cleanup); diff --git a/kernel/drivers/char/snsc.c b/kernel/drivers/char/snsc.c new file mode 100644 index 000000000..8bab59292 --- /dev/null +++ b/kernel/drivers/char/snsc.c @@ -0,0 +1,465 @@ +/* + * SN Platform system controller communication support + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2004, 2006 Silicon Graphics, Inc. All rights reserved. + */ + +/* + * System controller communication driver + * + * This driver allows a user process to communicate with the system + * controller (a.k.a. "IRouter") network in an SGI SN system. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "snsc.h" + +#define SYSCTL_BASENAME "snsc" + +#define SCDRV_BUFSZ 2048 +#define SCDRV_TIMEOUT 1000 + +static DEFINE_MUTEX(scdrv_mutex); +static irqreturn_t +scdrv_interrupt(int irq, void *subch_data) +{ + struct subch_data_s *sd = subch_data; + unsigned long flags; + int status; + + spin_lock_irqsave(&sd->sd_rlock, flags); + spin_lock(&sd->sd_wlock); + status = ia64_sn_irtr_intr(sd->sd_nasid, sd->sd_subch); + + if (status > 0) { + if (status & SAL_IROUTER_INTR_RECV) { + wake_up(&sd->sd_rq); + } + if (status & SAL_IROUTER_INTR_XMIT) { + ia64_sn_irtr_intr_disable + (sd->sd_nasid, sd->sd_subch, + SAL_IROUTER_INTR_XMIT); + wake_up(&sd->sd_wq); + } + } + spin_unlock(&sd->sd_wlock); + spin_unlock_irqrestore(&sd->sd_rlock, flags); + return IRQ_HANDLED; +} + +/* + * scdrv_open + * + * Reserve a subchannel for system controller communication. + */ + +static int +scdrv_open(struct inode *inode, struct file *file) +{ + struct sysctl_data_s *scd; + struct subch_data_s *sd; + int rv; + + /* look up device info for this device file */ + scd = container_of(inode->i_cdev, struct sysctl_data_s, scd_cdev); + + /* allocate memory for subchannel data */ + sd = kzalloc(sizeof (struct subch_data_s), GFP_KERNEL); + if (sd == NULL) { + printk("%s: couldn't allocate subchannel data\n", + __func__); + return -ENOMEM; + } + + /* initialize subch_data_s fields */ + sd->sd_nasid = scd->scd_nasid; + sd->sd_subch = ia64_sn_irtr_open(scd->scd_nasid); + + if (sd->sd_subch < 0) { + kfree(sd); + printk("%s: couldn't allocate subchannel\n", __func__); + return -EBUSY; + } + + spin_lock_init(&sd->sd_rlock); + spin_lock_init(&sd->sd_wlock); + init_waitqueue_head(&sd->sd_rq); + init_waitqueue_head(&sd->sd_wq); + sema_init(&sd->sd_rbs, 1); + sema_init(&sd->sd_wbs, 1); + + file->private_data = sd; + + /* hook this subchannel up to the system controller interrupt */ + mutex_lock(&scdrv_mutex); + rv = request_irq(SGI_UART_VECTOR, scdrv_interrupt, + IRQF_SHARED, SYSCTL_BASENAME, sd); + if (rv) { + ia64_sn_irtr_close(sd->sd_nasid, sd->sd_subch); + kfree(sd); + printk("%s: irq request failed (%d)\n", __func__, rv); + mutex_unlock(&scdrv_mutex); + return -EBUSY; + } + mutex_unlock(&scdrv_mutex); + return 0; +} + +/* + * scdrv_release + * + * Release a previously-reserved subchannel. + */ + +static int +scdrv_release(struct inode *inode, struct file *file) +{ + struct subch_data_s *sd = (struct subch_data_s *) file->private_data; + int rv; + + /* free the interrupt */ + free_irq(SGI_UART_VECTOR, sd); + + /* ask SAL to close the subchannel */ + rv = ia64_sn_irtr_close(sd->sd_nasid, sd->sd_subch); + + kfree(sd); + return rv; +} + +/* + * scdrv_read + * + * Called to read bytes from the open IRouter pipe. + * + */ + +static inline int +read_status_check(struct subch_data_s *sd, int *len) +{ + return ia64_sn_irtr_recv(sd->sd_nasid, sd->sd_subch, sd->sd_rb, len); +} + +static ssize_t +scdrv_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos) +{ + int status; + int len; + unsigned long flags; + struct subch_data_s *sd = (struct subch_data_s *) file->private_data; + + /* try to get control of the read buffer */ + if (down_trylock(&sd->sd_rbs)) { + /* somebody else has it now; + * if we're non-blocking, then exit... + */ + if (file->f_flags & O_NONBLOCK) { + return -EAGAIN; + } + /* ...or if we want to block, then do so here */ + if (down_interruptible(&sd->sd_rbs)) { + /* something went wrong with wait */ + return -ERESTARTSYS; + } + } + + /* anything to read? */ + len = CHUNKSIZE; + spin_lock_irqsave(&sd->sd_rlock, flags); + status = read_status_check(sd, &len); + + /* if not, and we're blocking I/O, loop */ + while (status < 0) { + DECLARE_WAITQUEUE(wait, current); + + if (file->f_flags & O_NONBLOCK) { + spin_unlock_irqrestore(&sd->sd_rlock, flags); + up(&sd->sd_rbs); + return -EAGAIN; + } + + len = CHUNKSIZE; + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&sd->sd_rq, &wait); + spin_unlock_irqrestore(&sd->sd_rlock, flags); + + schedule_timeout(SCDRV_TIMEOUT); + + remove_wait_queue(&sd->sd_rq, &wait); + if (signal_pending(current)) { + /* wait was interrupted */ + up(&sd->sd_rbs); + return -ERESTARTSYS; + } + + spin_lock_irqsave(&sd->sd_rlock, flags); + status = read_status_check(sd, &len); + } + spin_unlock_irqrestore(&sd->sd_rlock, flags); + + if (len > 0) { + /* we read something in the last read_status_check(); copy + * it out to user space + */ + if (count < len) { + pr_debug("%s: only accepting %d of %d bytes\n", + __func__, (int) count, len); + } + len = min((int) count, len); + if (copy_to_user(buf, sd->sd_rb, len)) + len = -EFAULT; + } + + /* release the read buffer and wake anyone who might be + * waiting for it + */ + up(&sd->sd_rbs); + + /* return the number of characters read in */ + return len; +} + +/* + * scdrv_write + * + * Writes a chunk of an IRouter packet (or other system controller data) + * to the system controller. + * + */ +static inline int +write_status_check(struct subch_data_s *sd, int count) +{ + return ia64_sn_irtr_send(sd->sd_nasid, sd->sd_subch, sd->sd_wb, count); +} + +static ssize_t +scdrv_write(struct file *file, const char __user *buf, + size_t count, loff_t *f_pos) +{ + unsigned long flags; + int status; + struct subch_data_s *sd = (struct subch_data_s *) file->private_data; + + /* try to get control of the write buffer */ + if (down_trylock(&sd->sd_wbs)) { + /* somebody else has it now; + * if we're non-blocking, then exit... + */ + if (file->f_flags & O_NONBLOCK) { + return -EAGAIN; + } + /* ...or if we want to block, then do so here */ + if (down_interruptible(&sd->sd_wbs)) { + /* something went wrong with wait */ + return -ERESTARTSYS; + } + } + + count = min((int) count, CHUNKSIZE); + if (copy_from_user(sd->sd_wb, buf, count)) { + up(&sd->sd_wbs); + return -EFAULT; + } + + /* try to send the buffer */ + spin_lock_irqsave(&sd->sd_wlock, flags); + status = write_status_check(sd, count); + + /* if we failed, and we want to block, then loop */ + while (status <= 0) { + DECLARE_WAITQUEUE(wait, current); + + if (file->f_flags & O_NONBLOCK) { + spin_unlock(&sd->sd_wlock); + up(&sd->sd_wbs); + return -EAGAIN; + } + + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&sd->sd_wq, &wait); + spin_unlock_irqrestore(&sd->sd_wlock, flags); + + schedule_timeout(SCDRV_TIMEOUT); + + remove_wait_queue(&sd->sd_wq, &wait); + if (signal_pending(current)) { + /* wait was interrupted */ + up(&sd->sd_wbs); + return -ERESTARTSYS; + } + + spin_lock_irqsave(&sd->sd_wlock, flags); + status = write_status_check(sd, count); + } + spin_unlock_irqrestore(&sd->sd_wlock, flags); + + /* release the write buffer and wake anyone who's waiting for it */ + up(&sd->sd_wbs); + + /* return the number of characters accepted (should be the complete + * "chunk" as requested) + */ + if ((status >= 0) && (status < count)) { + pr_debug("Didn't accept the full chunk; %d of %d\n", + status, (int) count); + } + return status; +} + +static unsigned int +scdrv_poll(struct file *file, struct poll_table_struct *wait) +{ + unsigned int mask = 0; + int status = 0; + struct subch_data_s *sd = (struct subch_data_s *) file->private_data; + unsigned long flags; + + poll_wait(file, &sd->sd_rq, wait); + poll_wait(file, &sd->sd_wq, wait); + + spin_lock_irqsave(&sd->sd_rlock, flags); + spin_lock(&sd->sd_wlock); + status = ia64_sn_irtr_intr(sd->sd_nasid, sd->sd_subch); + spin_unlock(&sd->sd_wlock); + spin_unlock_irqrestore(&sd->sd_rlock, flags); + + if (status > 0) { + if (status & SAL_IROUTER_INTR_RECV) { + mask |= POLLIN | POLLRDNORM; + } + if (status & SAL_IROUTER_INTR_XMIT) { + mask |= POLLOUT | POLLWRNORM; + } + } + + return mask; +} + +static const struct file_operations scdrv_fops = { + .owner = THIS_MODULE, + .read = scdrv_read, + .write = scdrv_write, + .poll = scdrv_poll, + .open = scdrv_open, + .release = scdrv_release, + .llseek = noop_llseek, +}; + +static struct class *snsc_class; + +/* + * scdrv_init + * + * Called at boot time to initialize the system controller communication + * facility. + */ +int __init +scdrv_init(void) +{ + geoid_t geoid; + cnodeid_t cnode; + char devname[32]; + char *devnamep; + struct sysctl_data_s *scd; + void *salbuf; + dev_t first_dev, dev; + nasid_t event_nasid; + + if (!ia64_platform_is("sn2")) + return -ENODEV; + + event_nasid = ia64_sn_get_console_nasid(); + + if (alloc_chrdev_region(&first_dev, 0, num_cnodes, + SYSCTL_BASENAME) < 0) { + printk("%s: failed to register SN system controller device\n", + __func__); + return -ENODEV; + } + snsc_class = class_create(THIS_MODULE, SYSCTL_BASENAME); + + for (cnode = 0; cnode < num_cnodes; cnode++) { + geoid = cnodeid_get_geoid(cnode); + devnamep = devname; + format_module_id(devnamep, geo_module(geoid), + MODULE_FORMAT_BRIEF); + devnamep = devname + strlen(devname); + sprintf(devnamep, "^%d#%d", geo_slot(geoid), + geo_slab(geoid)); + + /* allocate sysctl device data */ + scd = kzalloc(sizeof (struct sysctl_data_s), + GFP_KERNEL); + if (!scd) { + printk("%s: failed to allocate device info" + "for %s/%s\n", __func__, + SYSCTL_BASENAME, devname); + continue; + } + + /* initialize sysctl device data fields */ + scd->scd_nasid = cnodeid_to_nasid(cnode); + if (!(salbuf = kmalloc(SCDRV_BUFSZ, GFP_KERNEL))) { + printk("%s: failed to allocate driver buffer" + "(%s%s)\n", __func__, + SYSCTL_BASENAME, devname); + kfree(scd); + continue; + } + + if (ia64_sn_irtr_init(scd->scd_nasid, salbuf, + SCDRV_BUFSZ) < 0) { + printk + ("%s: failed to initialize SAL for" + " system controller communication" + " (%s/%s): outdated PROM?\n", + __func__, SYSCTL_BASENAME, devname); + kfree(scd); + kfree(salbuf); + continue; + } + + dev = first_dev + cnode; + cdev_init(&scd->scd_cdev, &scdrv_fops); + if (cdev_add(&scd->scd_cdev, dev, 1)) { + printk("%s: failed to register system" + " controller device (%s%s)\n", + __func__, SYSCTL_BASENAME, devname); + kfree(scd); + kfree(salbuf); + continue; + } + + device_create(snsc_class, NULL, dev, NULL, + "%s", devname); + + ia64_sn_irtr_intr_enable(scd->scd_nasid, + 0 /*ignored */ , + SAL_IROUTER_INTR_RECV); + + /* on the console nasid, prepare to receive + * system controller environmental events + */ + if(scd->scd_nasid == event_nasid) { + scdrv_event_init(scd); + } + } + return 0; +} + +module_init(scdrv_init); diff --git a/kernel/drivers/char/snsc.h b/kernel/drivers/char/snsc.h new file mode 100644 index 000000000..e8c52c882 --- /dev/null +++ b/kernel/drivers/char/snsc.h @@ -0,0 +1,92 @@ +/* + * SN Platform system controller communication support + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2004-2006 Silicon Graphics, Inc. All rights reserved. + */ + +/* + * This file contains macros and data types for communication with the + * system controllers in SGI SN systems. + */ + +#ifndef _SN_SYSCTL_H_ +#define _SN_SYSCTL_H_ + +#include +#include +#include +#include +#include +#include +#include + +#define CHUNKSIZE 127 + +/* This structure is used to track an open subchannel. */ +struct subch_data_s { + nasid_t sd_nasid; /* node on which the subchannel was opened */ + int sd_subch; /* subchannel number */ + spinlock_t sd_rlock; /* monitor lock for rsv */ + spinlock_t sd_wlock; /* monitor lock for wsv */ + wait_queue_head_t sd_rq; /* wait queue for readers */ + wait_queue_head_t sd_wq; /* wait queue for writers */ + struct semaphore sd_rbs; /* semaphore for read buffer */ + struct semaphore sd_wbs; /* semaphore for write buffer */ + + char sd_rb[CHUNKSIZE]; /* read buffer */ + char sd_wb[CHUNKSIZE]; /* write buffer */ +}; + +struct sysctl_data_s { + struct cdev scd_cdev; /* Character device info */ + nasid_t scd_nasid; /* Node on which subchannels are opened. */ +}; + + +/* argument types */ +#define IR_ARG_INT 0x00 /* 4-byte integer (big-endian) */ +#define IR_ARG_ASCII 0x01 /* null-terminated ASCII string */ +#define IR_ARG_UNKNOWN 0x80 /* unknown data type. The low + * 7 bits will contain the data + * length. */ +#define IR_ARG_UNKNOWN_LENGTH_MASK 0x7f + + +/* system controller event codes */ +#define EV_CLASS_MASK 0xf000ul +#define EV_SEVERITY_MASK 0x0f00ul +#define EV_COMPONENT_MASK 0x00fful + +#define EV_CLASS_POWER 0x1000ul +#define EV_CLASS_FAN 0x2000ul +#define EV_CLASS_TEMP 0x3000ul +#define EV_CLASS_ENV 0x4000ul +#define EV_CLASS_TEST_FAULT 0x5000ul +#define EV_CLASS_TEST_WARNING 0x6000ul +#define EV_CLASS_PWRD_NOTIFY 0x8000ul + +/* ENV class codes */ +#define ENV_PWRDN_PEND 0x4101ul + +#define EV_SEVERITY_POWER_STABLE 0x0000ul +#define EV_SEVERITY_POWER_LOW_WARNING 0x0100ul +#define EV_SEVERITY_POWER_HIGH_WARNING 0x0200ul +#define EV_SEVERITY_POWER_HIGH_FAULT 0x0300ul +#define EV_SEVERITY_POWER_LOW_FAULT 0x0400ul + +#define EV_SEVERITY_FAN_STABLE 0x0000ul +#define EV_SEVERITY_FAN_WARNING 0x0100ul +#define EV_SEVERITY_FAN_FAULT 0x0200ul + +#define EV_SEVERITY_TEMP_STABLE 0x0000ul +#define EV_SEVERITY_TEMP_ADVISORY 0x0100ul +#define EV_SEVERITY_TEMP_CRITICAL 0x0200ul +#define EV_SEVERITY_TEMP_FAULT 0x0300ul + +void scdrv_event_init(struct sysctl_data_s *); + +#endif /* _SN_SYSCTL_H_ */ diff --git a/kernel/drivers/char/snsc_event.c b/kernel/drivers/char/snsc_event.c new file mode 100644 index 000000000..59bcefd6e --- /dev/null +++ b/kernel/drivers/char/snsc_event.c @@ -0,0 +1,303 @@ +/* + * SN Platform system controller communication support + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2004-2006 Silicon Graphics, Inc. All rights reserved. + */ + +/* + * System controller event handler + * + * These routines deal with environmental events arriving from the + * system controllers. + */ + +#include +#include +#include +#include +#include +#include +#include "snsc.h" + +static struct subch_data_s *event_sd; + +void scdrv_event(unsigned long); +DECLARE_TASKLET(sn_sysctl_event, scdrv_event, 0); + +/* + * scdrv_event_interrupt + * + * Pull incoming environmental events off the physical link to the + * system controller and put them in a temporary holding area in SAL. + * Schedule scdrv_event() to move them along to their ultimate + * destination. + */ +static irqreturn_t +scdrv_event_interrupt(int irq, void *subch_data) +{ + struct subch_data_s *sd = subch_data; + unsigned long flags; + int status; + + spin_lock_irqsave(&sd->sd_rlock, flags); + status = ia64_sn_irtr_intr(sd->sd_nasid, sd->sd_subch); + + if ((status > 0) && (status & SAL_IROUTER_INTR_RECV)) { + tasklet_schedule(&sn_sysctl_event); + } + spin_unlock_irqrestore(&sd->sd_rlock, flags); + return IRQ_HANDLED; +} + + +/* + * scdrv_parse_event + * + * Break an event (as read from SAL) into useful pieces so we can decide + * what to do with it. + */ +static int +scdrv_parse_event(char *event, int *src, int *code, int *esp_code, char *desc) +{ + char *desc_end; + + /* record event source address */ + *src = get_unaligned_be32(event); + event += 4; /* move on to event code */ + + /* record the system controller's event code */ + *code = get_unaligned_be32(event); + event += 4; /* move on to event arguments */ + + /* how many arguments are in the packet? */ + if (*event++ != 2) { + /* if not 2, give up */ + return -1; + } + + /* parse out the ESP code */ + if (*event++ != IR_ARG_INT) { + /* not an integer argument, so give up */ + return -1; + } + *esp_code = get_unaligned_be32(event); + event += 4; + + /* parse out the event description */ + if (*event++ != IR_ARG_ASCII) { + /* not an ASCII string, so give up */ + return -1; + } + event[CHUNKSIZE-1] = '\0'; /* ensure this string ends! */ + event += 2; /* skip leading CR/LF */ + desc_end = desc + sprintf(desc, "%s", event); + + /* strip trailing CR/LF (if any) */ + for (desc_end--; + (desc_end != desc) && ((*desc_end == 0xd) || (*desc_end == 0xa)); + desc_end--) { + *desc_end = '\0'; + } + + return 0; +} + + +/* + * scdrv_event_severity + * + * Figure out how urgent a message we should write to the console/syslog + * via printk. + */ +static char * +scdrv_event_severity(int code) +{ + int ev_class = (code & EV_CLASS_MASK); + int ev_severity = (code & EV_SEVERITY_MASK); + char *pk_severity = KERN_NOTICE; + + switch (ev_class) { + case EV_CLASS_POWER: + switch (ev_severity) { + case EV_SEVERITY_POWER_LOW_WARNING: + case EV_SEVERITY_POWER_HIGH_WARNING: + pk_severity = KERN_WARNING; + break; + case EV_SEVERITY_POWER_HIGH_FAULT: + case EV_SEVERITY_POWER_LOW_FAULT: + pk_severity = KERN_ALERT; + break; + } + break; + case EV_CLASS_FAN: + switch (ev_severity) { + case EV_SEVERITY_FAN_WARNING: + pk_severity = KERN_WARNING; + break; + case EV_SEVERITY_FAN_FAULT: + pk_severity = KERN_CRIT; + break; + } + break; + case EV_CLASS_TEMP: + switch (ev_severity) { + case EV_SEVERITY_TEMP_ADVISORY: + pk_severity = KERN_WARNING; + break; + case EV_SEVERITY_TEMP_CRITICAL: + pk_severity = KERN_CRIT; + break; + case EV_SEVERITY_TEMP_FAULT: + pk_severity = KERN_ALERT; + break; + } + break; + case EV_CLASS_ENV: + pk_severity = KERN_ALERT; + break; + case EV_CLASS_TEST_FAULT: + pk_severity = KERN_ALERT; + break; + case EV_CLASS_TEST_WARNING: + pk_severity = KERN_WARNING; + break; + case EV_CLASS_PWRD_NOTIFY: + pk_severity = KERN_ALERT; + break; + } + + return pk_severity; +} + + +/* + * scdrv_dispatch_event + * + * Do the right thing with an incoming event. That's often nothing + * more than printing it to the system log. For power-down notifications + * we start a graceful shutdown. + */ +static void +scdrv_dispatch_event(char *event, int len) +{ + static int snsc_shutting_down = 0; + int code, esp_code, src, class; + char desc[CHUNKSIZE]; + char *severity; + + if (scdrv_parse_event(event, &src, &code, &esp_code, desc) < 0) { + /* ignore uninterpretible event */ + return; + } + + /* how urgent is the message? */ + severity = scdrv_event_severity(code); + + class = (code & EV_CLASS_MASK); + + if (class == EV_CLASS_PWRD_NOTIFY || code == ENV_PWRDN_PEND) { + if (snsc_shutting_down) + return; + + snsc_shutting_down = 1; + + /* give a message for each type of event */ + if (class == EV_CLASS_PWRD_NOTIFY) + printk(KERN_NOTICE "Power off indication received." + " Sending SIGPWR to init...\n"); + else if (code == ENV_PWRDN_PEND) + printk(KERN_CRIT "WARNING: Shutting down the system" + " due to a critical environmental condition." + " Sending SIGPWR to init...\n"); + + /* give a SIGPWR signal to init proc */ + kill_cad_pid(SIGPWR, 0); + } else { + /* print to system log */ + printk("%s|$(0x%x)%s\n", severity, esp_code, desc); + } +} + + +/* + * scdrv_event + * + * Called as a tasklet when an event arrives from the L1. Read the event + * from where it's temporarily stored in SAL and call scdrv_dispatch_event() + * to send it on its way. Keep trying to read events until SAL indicates + * that there are no more immediately available. + */ +void +scdrv_event(unsigned long dummy) +{ + int status; + int len; + unsigned long flags; + struct subch_data_s *sd = event_sd; + + /* anything to read? */ + len = CHUNKSIZE; + spin_lock_irqsave(&sd->sd_rlock, flags); + status = ia64_sn_irtr_recv(sd->sd_nasid, sd->sd_subch, + sd->sd_rb, &len); + + while (!(status < 0)) { + spin_unlock_irqrestore(&sd->sd_rlock, flags); + scdrv_dispatch_event(sd->sd_rb, len); + len = CHUNKSIZE; + spin_lock_irqsave(&sd->sd_rlock, flags); + status = ia64_sn_irtr_recv(sd->sd_nasid, sd->sd_subch, + sd->sd_rb, &len); + } + spin_unlock_irqrestore(&sd->sd_rlock, flags); +} + + +/* + * scdrv_event_init + * + * Sets up a system controller subchannel to begin receiving event + * messages. This is sort of a specialized version of scdrv_open() + * in drivers/char/sn_sysctl.c. + */ +void +scdrv_event_init(struct sysctl_data_s *scd) +{ + int rv; + + event_sd = kzalloc(sizeof (struct subch_data_s), GFP_KERNEL); + if (event_sd == NULL) { + printk(KERN_WARNING "%s: couldn't allocate subchannel info" + " for event monitoring\n", __func__); + return; + } + + /* initialize subch_data_s fields */ + event_sd->sd_nasid = scd->scd_nasid; + spin_lock_init(&event_sd->sd_rlock); + + /* ask the system controllers to send events to this node */ + event_sd->sd_subch = ia64_sn_sysctl_event_init(scd->scd_nasid); + + if (event_sd->sd_subch < 0) { + kfree(event_sd); + printk(KERN_WARNING "%s: couldn't open event subchannel\n", + __func__); + return; + } + + /* hook event subchannel up to the system controller interrupt */ + rv = request_irq(SGI_UART_VECTOR, scdrv_event_interrupt, + IRQF_SHARED, "system controller events", event_sd); + if (rv) { + printk(KERN_WARNING "%s: irq request failed (%d)\n", + __func__, rv); + ia64_sn_irtr_close(event_sd->sd_nasid, event_sd->sd_subch); + kfree(event_sd); + return; + } +} diff --git a/kernel/drivers/char/sonypi.c b/kernel/drivers/char/sonypi.c new file mode 100644 index 000000000..e496daefe --- /dev/null +++ b/kernel/drivers/char/sonypi.c @@ -0,0 +1,1563 @@ +/* + * Sony Programmable I/O Control Device driver for VAIO + * + * Copyright (C) 2007 Mattia Dongili + * + * Copyright (C) 2001-2005 Stelian Pop + * + * Copyright (C) 2005 Narayanan R S + * + * Copyright (C) 2001-2002 Alcôve + * + * Copyright (C) 2001 Michael Ashley + * + * Copyright (C) 2001 Junichi Morita + * + * Copyright (C) 2000 Takaya Kinjo + * + * Copyright (C) 2000 Andrew Tridgell + * + * Earlier work by Werner Almesberger, Paul `Rusty' Russell and Paul Mackerras. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#define SONYPI_DRIVER_VERSION "1.26" + +MODULE_AUTHOR("Stelian Pop "); +MODULE_DESCRIPTION("Sony Programmable I/O Control Device driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(SONYPI_DRIVER_VERSION); + +static int minor = -1; +module_param(minor, int, 0); +MODULE_PARM_DESC(minor, + "minor number of the misc device, default is -1 (automatic)"); + +static int verbose; /* = 0 */ +module_param(verbose, int, 0644); +MODULE_PARM_DESC(verbose, "be verbose, default is 0 (no)"); + +static int fnkeyinit; /* = 0 */ +module_param(fnkeyinit, int, 0444); +MODULE_PARM_DESC(fnkeyinit, + "set this if your Fn keys do not generate any event"); + +static int camera; /* = 0 */ +module_param(camera, int, 0444); +MODULE_PARM_DESC(camera, + "set this if you have a MotionEye camera (PictureBook series)"); + +static int compat; /* = 0 */ +module_param(compat, int, 0444); +MODULE_PARM_DESC(compat, + "set this if you want to enable backward compatibility mode"); + +static unsigned long mask = 0xffffffff; +module_param(mask, ulong, 0644); +MODULE_PARM_DESC(mask, + "set this to the mask of event you want to enable (see doc)"); + +static int useinput = 1; +module_param(useinput, int, 0444); +MODULE_PARM_DESC(useinput, + "set this if you would like sonypi to feed events to the input subsystem"); + +static int check_ioport = 1; +module_param(check_ioport, int, 0444); +MODULE_PARM_DESC(check_ioport, + "set this to 0 if you think the automatic ioport check for sony-laptop is wrong"); + +#define SONYPI_DEVICE_MODEL_TYPE1 1 +#define SONYPI_DEVICE_MODEL_TYPE2 2 +#define SONYPI_DEVICE_MODEL_TYPE3 3 + +/* type1 models use those */ +#define SONYPI_IRQ_PORT 0x8034 +#define SONYPI_IRQ_SHIFT 22 +#define SONYPI_TYPE1_BASE 0x50 +#define SONYPI_G10A (SONYPI_TYPE1_BASE+0x14) +#define SONYPI_TYPE1_REGION_SIZE 0x08 +#define SONYPI_TYPE1_EVTYPE_OFFSET 0x04 + +/* type2 series specifics */ +#define SONYPI_SIRQ 0x9b +#define SONYPI_SLOB 0x9c +#define SONYPI_SHIB 0x9d +#define SONYPI_TYPE2_REGION_SIZE 0x20 +#define SONYPI_TYPE2_EVTYPE_OFFSET 0x12 + +/* type3 series specifics */ +#define SONYPI_TYPE3_BASE 0x40 +#define SONYPI_TYPE3_GID2 (SONYPI_TYPE3_BASE+0x48) /* 16 bits */ +#define SONYPI_TYPE3_MISC (SONYPI_TYPE3_BASE+0x6d) /* 8 bits */ +#define SONYPI_TYPE3_REGION_SIZE 0x20 +#define SONYPI_TYPE3_EVTYPE_OFFSET 0x12 + +/* battery / brightness addresses */ +#define SONYPI_BAT_FLAGS 0x81 +#define SONYPI_LCD_LIGHT 0x96 +#define SONYPI_BAT1_PCTRM 0xa0 +#define SONYPI_BAT1_LEFT 0xa2 +#define SONYPI_BAT1_MAXRT 0xa4 +#define SONYPI_BAT2_PCTRM 0xa8 +#define SONYPI_BAT2_LEFT 0xaa +#define SONYPI_BAT2_MAXRT 0xac +#define SONYPI_BAT1_MAXTK 0xb0 +#define SONYPI_BAT1_FULL 0xb2 +#define SONYPI_BAT2_MAXTK 0xb8 +#define SONYPI_BAT2_FULL 0xba + +/* FAN0 information (reverse engineered from ACPI tables) */ +#define SONYPI_FAN0_STATUS 0x93 +#define SONYPI_TEMP_STATUS 0xC1 + +/* ioports used for brightness and type2 events */ +#define SONYPI_DATA_IOPORT 0x62 +#define SONYPI_CST_IOPORT 0x66 + +/* The set of possible ioports */ +struct sonypi_ioport_list { + u16 port1; + u16 port2; +}; + +static struct sonypi_ioport_list sonypi_type1_ioport_list[] = { + { 0x10c0, 0x10c4 }, /* looks like the default on C1Vx */ + { 0x1080, 0x1084 }, + { 0x1090, 0x1094 }, + { 0x10a0, 0x10a4 }, + { 0x10b0, 0x10b4 }, + { 0x0, 0x0 } +}; + +static struct sonypi_ioport_list sonypi_type2_ioport_list[] = { + { 0x1080, 0x1084 }, + { 0x10a0, 0x10a4 }, + { 0x10c0, 0x10c4 }, + { 0x10e0, 0x10e4 }, + { 0x0, 0x0 } +}; + +/* same as in type 2 models */ +static struct sonypi_ioport_list *sonypi_type3_ioport_list = + sonypi_type2_ioport_list; + +/* The set of possible interrupts */ +struct sonypi_irq_list { + u16 irq; + u16 bits; +}; + +static struct sonypi_irq_list sonypi_type1_irq_list[] = { + { 11, 0x2 }, /* IRQ 11, GO22=0,GO23=1 in AML */ + { 10, 0x1 }, /* IRQ 10, GO22=1,GO23=0 in AML */ + { 5, 0x0 }, /* IRQ 5, GO22=0,GO23=0 in AML */ + { 0, 0x3 } /* no IRQ, GO22=1,GO23=1 in AML */ +}; + +static struct sonypi_irq_list sonypi_type2_irq_list[] = { + { 11, 0x80 }, /* IRQ 11, 0x80 in SIRQ in AML */ + { 10, 0x40 }, /* IRQ 10, 0x40 in SIRQ in AML */ + { 9, 0x20 }, /* IRQ 9, 0x20 in SIRQ in AML */ + { 6, 0x10 }, /* IRQ 6, 0x10 in SIRQ in AML */ + { 0, 0x00 } /* no IRQ, 0x00 in SIRQ in AML */ +}; + +/* same as in type2 models */ +static struct sonypi_irq_list *sonypi_type3_irq_list = sonypi_type2_irq_list; + +#define SONYPI_CAMERA_BRIGHTNESS 0 +#define SONYPI_CAMERA_CONTRAST 1 +#define SONYPI_CAMERA_HUE 2 +#define SONYPI_CAMERA_COLOR 3 +#define SONYPI_CAMERA_SHARPNESS 4 + +#define SONYPI_CAMERA_PICTURE 5 +#define SONYPI_CAMERA_EXPOSURE_MASK 0xC +#define SONYPI_CAMERA_WHITE_BALANCE_MASK 0x3 +#define SONYPI_CAMERA_PICTURE_MODE_MASK 0x30 +#define SONYPI_CAMERA_MUTE_MASK 0x40 + +/* the rest don't need a loop until not 0xff */ +#define SONYPI_CAMERA_AGC 6 +#define SONYPI_CAMERA_AGC_MASK 0x30 +#define SONYPI_CAMERA_SHUTTER_MASK 0x7 + +#define SONYPI_CAMERA_SHUTDOWN_REQUEST 7 +#define SONYPI_CAMERA_CONTROL 0x10 + +#define SONYPI_CAMERA_STATUS 7 +#define SONYPI_CAMERA_STATUS_READY 0x2 +#define SONYPI_CAMERA_STATUS_POSITION 0x4 + +#define SONYPI_DIRECTION_BACKWARDS 0x4 + +#define SONYPI_CAMERA_REVISION 8 +#define SONYPI_CAMERA_ROMVERSION 9 + +/* Event masks */ +#define SONYPI_JOGGER_MASK 0x00000001 +#define SONYPI_CAPTURE_MASK 0x00000002 +#define SONYPI_FNKEY_MASK 0x00000004 +#define SONYPI_BLUETOOTH_MASK 0x00000008 +#define SONYPI_PKEY_MASK 0x00000010 +#define SONYPI_BACK_MASK 0x00000020 +#define SONYPI_HELP_MASK 0x00000040 +#define SONYPI_LID_MASK 0x00000080 +#define SONYPI_ZOOM_MASK 0x00000100 +#define SONYPI_THUMBPHRASE_MASK 0x00000200 +#define SONYPI_MEYE_MASK 0x00000400 +#define SONYPI_MEMORYSTICK_MASK 0x00000800 +#define SONYPI_BATTERY_MASK 0x00001000 +#define SONYPI_WIRELESS_MASK 0x00002000 + +struct sonypi_event { + u8 data; + u8 event; +}; + +/* The set of possible button release events */ +static struct sonypi_event sonypi_releaseev[] = { + { 0x00, SONYPI_EVENT_ANYBUTTON_RELEASED }, + { 0, 0 } +}; + +/* The set of possible jogger events */ +static struct sonypi_event sonypi_joggerev[] = { + { 0x1f, SONYPI_EVENT_JOGDIAL_UP }, + { 0x01, SONYPI_EVENT_JOGDIAL_DOWN }, + { 0x5f, SONYPI_EVENT_JOGDIAL_UP_PRESSED }, + { 0x41, SONYPI_EVENT_JOGDIAL_DOWN_PRESSED }, + { 0x1e, SONYPI_EVENT_JOGDIAL_FAST_UP }, + { 0x02, SONYPI_EVENT_JOGDIAL_FAST_DOWN }, + { 0x5e, SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED }, + { 0x42, SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED }, + { 0x1d, SONYPI_EVENT_JOGDIAL_VFAST_UP }, + { 0x03, SONYPI_EVENT_JOGDIAL_VFAST_DOWN }, + { 0x5d, SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED }, + { 0x43, SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED }, + { 0x40, SONYPI_EVENT_JOGDIAL_PRESSED }, + { 0, 0 } +}; + +/* The set of possible capture button events */ +static struct sonypi_event sonypi_captureev[] = { + { 0x05, SONYPI_EVENT_CAPTURE_PARTIALPRESSED }, + { 0x07, SONYPI_EVENT_CAPTURE_PRESSED }, + { 0x01, SONYPI_EVENT_CAPTURE_PARTIALRELEASED }, + { 0, 0 } +}; + +/* The set of possible fnkeys events */ +static struct sonypi_event sonypi_fnkeyev[] = { + { 0x10, SONYPI_EVENT_FNKEY_ESC }, + { 0x11, SONYPI_EVENT_FNKEY_F1 }, + { 0x12, SONYPI_EVENT_FNKEY_F2 }, + { 0x13, SONYPI_EVENT_FNKEY_F3 }, + { 0x14, SONYPI_EVENT_FNKEY_F4 }, + { 0x15, SONYPI_EVENT_FNKEY_F5 }, + { 0x16, SONYPI_EVENT_FNKEY_F6 }, + { 0x17, SONYPI_EVENT_FNKEY_F7 }, + { 0x18, SONYPI_EVENT_FNKEY_F8 }, + { 0x19, SONYPI_EVENT_FNKEY_F9 }, + { 0x1a, SONYPI_EVENT_FNKEY_F10 }, + { 0x1b, SONYPI_EVENT_FNKEY_F11 }, + { 0x1c, SONYPI_EVENT_FNKEY_F12 }, + { 0x1f, SONYPI_EVENT_FNKEY_RELEASED }, + { 0x21, SONYPI_EVENT_FNKEY_1 }, + { 0x22, SONYPI_EVENT_FNKEY_2 }, + { 0x31, SONYPI_EVENT_FNKEY_D }, + { 0x32, SONYPI_EVENT_FNKEY_E }, + { 0x33, SONYPI_EVENT_FNKEY_F }, + { 0x34, SONYPI_EVENT_FNKEY_S }, + { 0x35, SONYPI_EVENT_FNKEY_B }, + { 0x36, SONYPI_EVENT_FNKEY_ONLY }, + { 0, 0 } +}; + +/* The set of possible program key events */ +static struct sonypi_event sonypi_pkeyev[] = { + { 0x01, SONYPI_EVENT_PKEY_P1 }, + { 0x02, SONYPI_EVENT_PKEY_P2 }, + { 0x04, SONYPI_EVENT_PKEY_P3 }, + { 0x5c, SONYPI_EVENT_PKEY_P1 }, + { 0, 0 } +}; + +/* The set of possible bluetooth events */ +static struct sonypi_event sonypi_blueev[] = { + { 0x55, SONYPI_EVENT_BLUETOOTH_PRESSED }, + { 0x59, SONYPI_EVENT_BLUETOOTH_ON }, + { 0x5a, SONYPI_EVENT_BLUETOOTH_OFF }, + { 0, 0 } +}; + +/* The set of possible wireless events */ +static struct sonypi_event sonypi_wlessev[] = { + { 0x59, SONYPI_EVENT_WIRELESS_ON }, + { 0x5a, SONYPI_EVENT_WIRELESS_OFF }, + { 0, 0 } +}; + +/* The set of possible back button events */ +static struct sonypi_event sonypi_backev[] = { + { 0x20, SONYPI_EVENT_BACK_PRESSED }, + { 0, 0 } +}; + +/* The set of possible help button events */ +static struct sonypi_event sonypi_helpev[] = { + { 0x3b, SONYPI_EVENT_HELP_PRESSED }, + { 0, 0 } +}; + + +/* The set of possible lid events */ +static struct sonypi_event sonypi_lidev[] = { + { 0x51, SONYPI_EVENT_LID_CLOSED }, + { 0x50, SONYPI_EVENT_LID_OPENED }, + { 0, 0 } +}; + +/* The set of possible zoom events */ +static struct sonypi_event sonypi_zoomev[] = { + { 0x39, SONYPI_EVENT_ZOOM_PRESSED }, + { 0, 0 } +}; + +/* The set of possible thumbphrase events */ +static struct sonypi_event sonypi_thumbphraseev[] = { + { 0x3a, SONYPI_EVENT_THUMBPHRASE_PRESSED }, + { 0, 0 } +}; + +/* The set of possible motioneye camera events */ +static struct sonypi_event sonypi_meyeev[] = { + { 0x00, SONYPI_EVENT_MEYE_FACE }, + { 0x01, SONYPI_EVENT_MEYE_OPPOSITE }, + { 0, 0 } +}; + +/* The set of possible memorystick events */ +static struct sonypi_event sonypi_memorystickev[] = { + { 0x53, SONYPI_EVENT_MEMORYSTICK_INSERT }, + { 0x54, SONYPI_EVENT_MEMORYSTICK_EJECT }, + { 0, 0 } +}; + +/* The set of possible battery events */ +static struct sonypi_event sonypi_batteryev[] = { + { 0x20, SONYPI_EVENT_BATTERY_INSERT }, + { 0x30, SONYPI_EVENT_BATTERY_REMOVE }, + { 0, 0 } +}; + +static struct sonypi_eventtypes { + int model; + u8 data; + unsigned long mask; + struct sonypi_event * events; +} sonypi_eventtypes[] = { + { SONYPI_DEVICE_MODEL_TYPE1, 0, 0xffffffff, sonypi_releaseev }, + { SONYPI_DEVICE_MODEL_TYPE1, 0x70, SONYPI_MEYE_MASK, sonypi_meyeev }, + { SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_LID_MASK, sonypi_lidev }, + { SONYPI_DEVICE_MODEL_TYPE1, 0x60, SONYPI_CAPTURE_MASK, sonypi_captureev }, + { SONYPI_DEVICE_MODEL_TYPE1, 0x10, SONYPI_JOGGER_MASK, sonypi_joggerev }, + { SONYPI_DEVICE_MODEL_TYPE1, 0x20, SONYPI_FNKEY_MASK, sonypi_fnkeyev }, + { SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_BLUETOOTH_MASK, sonypi_blueev }, + { SONYPI_DEVICE_MODEL_TYPE1, 0x40, SONYPI_PKEY_MASK, sonypi_pkeyev }, + { SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev }, + { SONYPI_DEVICE_MODEL_TYPE1, 0x40, SONYPI_BATTERY_MASK, sonypi_batteryev }, + + { SONYPI_DEVICE_MODEL_TYPE2, 0, 0xffffffff, sonypi_releaseev }, + { SONYPI_DEVICE_MODEL_TYPE2, 0x38, SONYPI_LID_MASK, sonypi_lidev }, + { SONYPI_DEVICE_MODEL_TYPE2, 0x11, SONYPI_JOGGER_MASK, sonypi_joggerev }, + { SONYPI_DEVICE_MODEL_TYPE2, 0x61, SONYPI_CAPTURE_MASK, sonypi_captureev }, + { SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev }, + { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_BLUETOOTH_MASK, sonypi_blueev }, + { SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_PKEY_MASK, sonypi_pkeyev }, + { SONYPI_DEVICE_MODEL_TYPE2, 0x11, SONYPI_BACK_MASK, sonypi_backev }, + { SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_HELP_MASK, sonypi_helpev }, + { SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_ZOOM_MASK, sonypi_zoomev }, + { SONYPI_DEVICE_MODEL_TYPE2, 0x20, SONYPI_THUMBPHRASE_MASK, sonypi_thumbphraseev }, + { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev }, + { SONYPI_DEVICE_MODEL_TYPE2, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev }, + { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev }, + + { SONYPI_DEVICE_MODEL_TYPE3, 0, 0xffffffff, sonypi_releaseev }, + { SONYPI_DEVICE_MODEL_TYPE3, 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev }, + { SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_WIRELESS_MASK, sonypi_wlessev }, + { SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev }, + { SONYPI_DEVICE_MODEL_TYPE3, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev }, + { SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev }, + { 0 } +}; + +#define SONYPI_BUF_SIZE 128 + +/* Correspondance table between sonypi events and input layer events */ +static struct { + int sonypiev; + int inputev; +} sonypi_inputkeys[] = { + { SONYPI_EVENT_CAPTURE_PRESSED, KEY_CAMERA }, + { SONYPI_EVENT_FNKEY_ONLY, KEY_FN }, + { SONYPI_EVENT_FNKEY_ESC, KEY_FN_ESC }, + { SONYPI_EVENT_FNKEY_F1, KEY_FN_F1 }, + { SONYPI_EVENT_FNKEY_F2, KEY_FN_F2 }, + { SONYPI_EVENT_FNKEY_F3, KEY_FN_F3 }, + { SONYPI_EVENT_FNKEY_F4, KEY_FN_F4 }, + { SONYPI_EVENT_FNKEY_F5, KEY_FN_F5 }, + { SONYPI_EVENT_FNKEY_F6, KEY_FN_F6 }, + { SONYPI_EVENT_FNKEY_F7, KEY_FN_F7 }, + { SONYPI_EVENT_FNKEY_F8, KEY_FN_F8 }, + { SONYPI_EVENT_FNKEY_F9, KEY_FN_F9 }, + { SONYPI_EVENT_FNKEY_F10, KEY_FN_F10 }, + { SONYPI_EVENT_FNKEY_F11, KEY_FN_F11 }, + { SONYPI_EVENT_FNKEY_F12, KEY_FN_F12 }, + { SONYPI_EVENT_FNKEY_1, KEY_FN_1 }, + { SONYPI_EVENT_FNKEY_2, KEY_FN_2 }, + { SONYPI_EVENT_FNKEY_D, KEY_FN_D }, + { SONYPI_EVENT_FNKEY_E, KEY_FN_E }, + { SONYPI_EVENT_FNKEY_F, KEY_FN_F }, + { SONYPI_EVENT_FNKEY_S, KEY_FN_S }, + { SONYPI_EVENT_FNKEY_B, KEY_FN_B }, + { SONYPI_EVENT_BLUETOOTH_PRESSED, KEY_BLUE }, + { SONYPI_EVENT_BLUETOOTH_ON, KEY_BLUE }, + { SONYPI_EVENT_PKEY_P1, KEY_PROG1 }, + { SONYPI_EVENT_PKEY_P2, KEY_PROG2 }, + { SONYPI_EVENT_PKEY_P3, KEY_PROG3 }, + { SONYPI_EVENT_BACK_PRESSED, KEY_BACK }, + { SONYPI_EVENT_HELP_PRESSED, KEY_HELP }, + { SONYPI_EVENT_ZOOM_PRESSED, KEY_ZOOM }, + { SONYPI_EVENT_THUMBPHRASE_PRESSED, BTN_THUMB }, + { 0, 0 }, +}; + +struct sonypi_keypress { + struct input_dev *dev; + int key; +}; + +static struct sonypi_device { + struct pci_dev *dev; + u16 irq; + u16 bits; + u16 ioport1; + u16 ioport2; + u16 region_size; + u16 evtype_offset; + int camera_power; + int bluetooth_power; + struct mutex lock; + struct kfifo fifo; + spinlock_t fifo_lock; + wait_queue_head_t fifo_proc_list; + struct fasync_struct *fifo_async; + int open_count; + int model; + struct input_dev *input_jog_dev; + struct input_dev *input_key_dev; + struct work_struct input_work; + struct kfifo input_fifo; + spinlock_t input_fifo_lock; +} sonypi_device; + +#define ITERATIONS_LONG 10000 +#define ITERATIONS_SHORT 10 + +#define wait_on_command(quiet, command, iterations) { \ + unsigned int n = iterations; \ + while (--n && (command)) \ + udelay(1); \ + if (!n && (verbose || !quiet)) \ + printk(KERN_WARNING "sonypi command failed at %s : %s (line %d)\n", __FILE__, __func__, __LINE__); \ +} + +#ifdef CONFIG_ACPI +#define SONYPI_ACPI_ACTIVE (!acpi_disabled) +#else +#define SONYPI_ACPI_ACTIVE 0 +#endif /* CONFIG_ACPI */ + +#ifdef CONFIG_ACPI +static struct acpi_device *sonypi_acpi_device; +static int acpi_driver_registered; +#endif + +static int sonypi_ec_write(u8 addr, u8 value) +{ +#ifdef CONFIG_ACPI + if (SONYPI_ACPI_ACTIVE) + return ec_write(addr, value); +#endif + wait_on_command(1, inb_p(SONYPI_CST_IOPORT) & 3, ITERATIONS_LONG); + outb_p(0x81, SONYPI_CST_IOPORT); + wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG); + outb_p(addr, SONYPI_DATA_IOPORT); + wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG); + outb_p(value, SONYPI_DATA_IOPORT); + wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG); + return 0; +} + +static int sonypi_ec_read(u8 addr, u8 *value) +{ +#ifdef CONFIG_ACPI + if (SONYPI_ACPI_ACTIVE) + return ec_read(addr, value); +#endif + wait_on_command(1, inb_p(SONYPI_CST_IOPORT) & 3, ITERATIONS_LONG); + outb_p(0x80, SONYPI_CST_IOPORT); + wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG); + outb_p(addr, SONYPI_DATA_IOPORT); + wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG); + *value = inb_p(SONYPI_DATA_IOPORT); + return 0; +} + +static int ec_read16(u8 addr, u16 *value) +{ + u8 val_lb, val_hb; + if (sonypi_ec_read(addr, &val_lb)) + return -1; + if (sonypi_ec_read(addr + 1, &val_hb)) + return -1; + *value = val_lb | (val_hb << 8); + return 0; +} + +/* Initializes the device - this comes from the AML code in the ACPI bios */ +static void sonypi_type1_srs(void) +{ + u32 v; + + pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v); + v = (v & 0xFFFF0000) | ((u32) sonypi_device.ioport1); + pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v); + + pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v); + v = (v & 0xFFF0FFFF) | + (((u32) sonypi_device.ioport1 ^ sonypi_device.ioport2) << 16); + pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v); + + v = inl(SONYPI_IRQ_PORT); + v &= ~(((u32) 0x3) << SONYPI_IRQ_SHIFT); + v |= (((u32) sonypi_device.bits) << SONYPI_IRQ_SHIFT); + outl(v, SONYPI_IRQ_PORT); + + pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v); + v = (v & 0xFF1FFFFF) | 0x00C00000; + pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v); +} + +static void sonypi_type2_srs(void) +{ + if (sonypi_ec_write(SONYPI_SHIB, (sonypi_device.ioport1 & 0xFF00) >> 8)) + printk(KERN_WARNING "ec_write failed\n"); + if (sonypi_ec_write(SONYPI_SLOB, sonypi_device.ioport1 & 0x00FF)) + printk(KERN_WARNING "ec_write failed\n"); + if (sonypi_ec_write(SONYPI_SIRQ, sonypi_device.bits)) + printk(KERN_WARNING "ec_write failed\n"); + udelay(10); +} + +static void sonypi_type3_srs(void) +{ + u16 v16; + u8 v8; + + /* This model type uses the same initialiazation of + * the embedded controller as the type2 models. */ + sonypi_type2_srs(); + + /* Initialization of PCI config space of the LPC interface bridge. */ + v16 = (sonypi_device.ioport1 & 0xFFF0) | 0x01; + pci_write_config_word(sonypi_device.dev, SONYPI_TYPE3_GID2, v16); + pci_read_config_byte(sonypi_device.dev, SONYPI_TYPE3_MISC, &v8); + v8 = (v8 & 0xCF) | 0x10; + pci_write_config_byte(sonypi_device.dev, SONYPI_TYPE3_MISC, v8); +} + +/* Disables the device - this comes from the AML code in the ACPI bios */ +static void sonypi_type1_dis(void) +{ + u32 v; + + pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v); + v = v & 0xFF3FFFFF; + pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v); + + v = inl(SONYPI_IRQ_PORT); + v |= (0x3 << SONYPI_IRQ_SHIFT); + outl(v, SONYPI_IRQ_PORT); +} + +static void sonypi_type2_dis(void) +{ + if (sonypi_ec_write(SONYPI_SHIB, 0)) + printk(KERN_WARNING "ec_write failed\n"); + if (sonypi_ec_write(SONYPI_SLOB, 0)) + printk(KERN_WARNING "ec_write failed\n"); + if (sonypi_ec_write(SONYPI_SIRQ, 0)) + printk(KERN_WARNING "ec_write failed\n"); +} + +static void sonypi_type3_dis(void) +{ + sonypi_type2_dis(); + udelay(10); + pci_write_config_word(sonypi_device.dev, SONYPI_TYPE3_GID2, 0); +} + +static u8 sonypi_call1(u8 dev) +{ + u8 v1, v2; + + wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG); + outb(dev, sonypi_device.ioport2); + v1 = inb_p(sonypi_device.ioport2); + v2 = inb_p(sonypi_device.ioport1); + return v2; +} + +static u8 sonypi_call2(u8 dev, u8 fn) +{ + u8 v1; + + wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG); + outb(dev, sonypi_device.ioport2); + wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG); + outb(fn, sonypi_device.ioport1); + v1 = inb_p(sonypi_device.ioport1); + return v1; +} + +static u8 sonypi_call3(u8 dev, u8 fn, u8 v) +{ + u8 v1; + + wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG); + outb(dev, sonypi_device.ioport2); + wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG); + outb(fn, sonypi_device.ioport1); + wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG); + outb(v, sonypi_device.ioport1); + v1 = inb_p(sonypi_device.ioport1); + return v1; +} + +#if 0 +/* Get brightness, hue etc. Unreliable... */ +static u8 sonypi_read(u8 fn) +{ + u8 v1, v2; + int n = 100; + + while (n--) { + v1 = sonypi_call2(0x8f, fn); + v2 = sonypi_call2(0x8f, fn); + if (v1 == v2 && v1 != 0xff) + return v1; + } + return 0xff; +} +#endif + +/* Set brightness, hue etc */ +static void sonypi_set(u8 fn, u8 v) +{ + wait_on_command(0, sonypi_call3(0x90, fn, v), ITERATIONS_SHORT); +} + +/* Tests if the camera is ready */ +static int sonypi_camera_ready(void) +{ + u8 v; + + v = sonypi_call2(0x8f, SONYPI_CAMERA_STATUS); + return (v != 0xff && (v & SONYPI_CAMERA_STATUS_READY)); +} + +/* Turns the camera off */ +static void sonypi_camera_off(void) +{ + sonypi_set(SONYPI_CAMERA_PICTURE, SONYPI_CAMERA_MUTE_MASK); + + if (!sonypi_device.camera_power) + return; + + sonypi_call2(0x91, 0); + sonypi_device.camera_power = 0; +} + +/* Turns the camera on */ +static void sonypi_camera_on(void) +{ + int i, j; + + if (sonypi_device.camera_power) + return; + + for (j = 5; j > 0; j--) { + + while (sonypi_call2(0x91, 0x1)) + msleep(10); + sonypi_call1(0x93); + + for (i = 400; i > 0; i--) { + if (sonypi_camera_ready()) + break; + msleep(10); + } + if (i) + break; + } + + if (j == 0) { + printk(KERN_WARNING "sonypi: failed to power on camera\n"); + return; + } + + sonypi_set(0x10, 0x5a); + sonypi_device.camera_power = 1; +} + +/* sets the bluetooth subsystem power state */ +static void sonypi_setbluetoothpower(u8 state) +{ + state = !!state; + + if (sonypi_device.bluetooth_power == state) + return; + + sonypi_call2(0x96, state); + sonypi_call1(0x82); + sonypi_device.bluetooth_power = state; +} + +static void input_keyrelease(struct work_struct *work) +{ + struct sonypi_keypress kp; + + while (kfifo_out_locked(&sonypi_device.input_fifo, (unsigned char *)&kp, + sizeof(kp), &sonypi_device.input_fifo_lock) + == sizeof(kp)) { + msleep(10); + input_report_key(kp.dev, kp.key, 0); + input_sync(kp.dev); + } +} + +static void sonypi_report_input_event(u8 event) +{ + struct input_dev *jog_dev = sonypi_device.input_jog_dev; + struct input_dev *key_dev = sonypi_device.input_key_dev; + struct sonypi_keypress kp = { NULL }; + int i; + + switch (event) { + case SONYPI_EVENT_JOGDIAL_UP: + case SONYPI_EVENT_JOGDIAL_UP_PRESSED: + input_report_rel(jog_dev, REL_WHEEL, 1); + input_sync(jog_dev); + break; + + case SONYPI_EVENT_JOGDIAL_DOWN: + case SONYPI_EVENT_JOGDIAL_DOWN_PRESSED: + input_report_rel(jog_dev, REL_WHEEL, -1); + input_sync(jog_dev); + break; + + case SONYPI_EVENT_JOGDIAL_PRESSED: + kp.key = BTN_MIDDLE; + kp.dev = jog_dev; + break; + + case SONYPI_EVENT_FNKEY_RELEASED: + /* Nothing, not all VAIOs generate this event */ + break; + + default: + for (i = 0; sonypi_inputkeys[i].sonypiev; i++) + if (event == sonypi_inputkeys[i].sonypiev) { + kp.dev = key_dev; + kp.key = sonypi_inputkeys[i].inputev; + break; + } + break; + } + + if (kp.dev) { + input_report_key(kp.dev, kp.key, 1); + input_sync(kp.dev); + kfifo_in_locked(&sonypi_device.input_fifo, + (unsigned char *)&kp, sizeof(kp), + &sonypi_device.input_fifo_lock); + schedule_work(&sonypi_device.input_work); + } +} + +/* Interrupt handler: some event is available */ +static irqreturn_t sonypi_irq(int irq, void *dev_id) +{ + u8 v1, v2, event = 0; + int i, j; + + v1 = inb_p(sonypi_device.ioport1); + v2 = inb_p(sonypi_device.ioport1 + sonypi_device.evtype_offset); + + for (i = 0; sonypi_eventtypes[i].model; i++) { + if (sonypi_device.model != sonypi_eventtypes[i].model) + continue; + if ((v2 & sonypi_eventtypes[i].data) != + sonypi_eventtypes[i].data) + continue; + if (!(mask & sonypi_eventtypes[i].mask)) + continue; + for (j = 0; sonypi_eventtypes[i].events[j].event; j++) { + if (v1 == sonypi_eventtypes[i].events[j].data) { + event = sonypi_eventtypes[i].events[j].event; + goto found; + } + } + } + + if (verbose) + printk(KERN_WARNING + "sonypi: unknown event port1=0x%02x,port2=0x%02x\n", + v1, v2); + /* We need to return IRQ_HANDLED here because there *are* + * events belonging to the sonypi device we don't know about, + * but we still don't want those to pollute the logs... */ + return IRQ_HANDLED; + +found: + if (verbose > 1) + printk(KERN_INFO + "sonypi: event port1=0x%02x,port2=0x%02x\n", v1, v2); + + if (useinput) + sonypi_report_input_event(event); + + kfifo_in_locked(&sonypi_device.fifo, (unsigned char *)&event, + sizeof(event), &sonypi_device.fifo_lock); + kill_fasync(&sonypi_device.fifo_async, SIGIO, POLL_IN); + wake_up_interruptible(&sonypi_device.fifo_proc_list); + + return IRQ_HANDLED; +} + +static int sonypi_misc_fasync(int fd, struct file *filp, int on) +{ + return fasync_helper(fd, filp, on, &sonypi_device.fifo_async); +} + +static int sonypi_misc_release(struct inode *inode, struct file *file) +{ + mutex_lock(&sonypi_device.lock); + sonypi_device.open_count--; + mutex_unlock(&sonypi_device.lock); + return 0; +} + +static int sonypi_misc_open(struct inode *inode, struct file *file) +{ + mutex_lock(&sonypi_device.lock); + /* Flush input queue on first open */ + if (!sonypi_device.open_count) + kfifo_reset(&sonypi_device.fifo); + sonypi_device.open_count++; + mutex_unlock(&sonypi_device.lock); + + return 0; +} + +static ssize_t sonypi_misc_read(struct file *file, char __user *buf, + size_t count, loff_t *pos) +{ + ssize_t ret; + unsigned char c; + + if ((kfifo_len(&sonypi_device.fifo) == 0) && + (file->f_flags & O_NONBLOCK)) + return -EAGAIN; + + ret = wait_event_interruptible(sonypi_device.fifo_proc_list, + kfifo_len(&sonypi_device.fifo) != 0); + if (ret) + return ret; + + while (ret < count && + (kfifo_out_locked(&sonypi_device.fifo, &c, sizeof(c), + &sonypi_device.fifo_lock) == sizeof(c))) { + if (put_user(c, buf++)) + return -EFAULT; + ret++; + } + + if (ret > 0) { + struct inode *inode = file_inode(file); + inode->i_atime = current_fs_time(inode->i_sb); + } + + return ret; +} + +static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait) +{ + poll_wait(file, &sonypi_device.fifo_proc_list, wait); + if (kfifo_len(&sonypi_device.fifo)) + return POLLIN | POLLRDNORM; + return 0; +} + +static long sonypi_misc_ioctl(struct file *fp, + unsigned int cmd, unsigned long arg) +{ + long ret = 0; + void __user *argp = (void __user *)arg; + u8 val8; + u16 val16; + + mutex_lock(&sonypi_device.lock); + switch (cmd) { + case SONYPI_IOCGBRT: + if (sonypi_ec_read(SONYPI_LCD_LIGHT, &val8)) { + ret = -EIO; + break; + } + if (copy_to_user(argp, &val8, sizeof(val8))) + ret = -EFAULT; + break; + case SONYPI_IOCSBRT: + if (copy_from_user(&val8, argp, sizeof(val8))) { + ret = -EFAULT; + break; + } + if (sonypi_ec_write(SONYPI_LCD_LIGHT, val8)) + ret = -EIO; + break; + case SONYPI_IOCGBAT1CAP: + if (ec_read16(SONYPI_BAT1_FULL, &val16)) { + ret = -EIO; + break; + } + if (copy_to_user(argp, &val16, sizeof(val16))) + ret = -EFAULT; + break; + case SONYPI_IOCGBAT1REM: + if (ec_read16(SONYPI_BAT1_LEFT, &val16)) { + ret = -EIO; + break; + } + if (copy_to_user(argp, &val16, sizeof(val16))) + ret = -EFAULT; + break; + case SONYPI_IOCGBAT2CAP: + if (ec_read16(SONYPI_BAT2_FULL, &val16)) { + ret = -EIO; + break; + } + if (copy_to_user(argp, &val16, sizeof(val16))) + ret = -EFAULT; + break; + case SONYPI_IOCGBAT2REM: + if (ec_read16(SONYPI_BAT2_LEFT, &val16)) { + ret = -EIO; + break; + } + if (copy_to_user(argp, &val16, sizeof(val16))) + ret = -EFAULT; + break; + case SONYPI_IOCGBATFLAGS: + if (sonypi_ec_read(SONYPI_BAT_FLAGS, &val8)) { + ret = -EIO; + break; + } + val8 &= 0x07; + if (copy_to_user(argp, &val8, sizeof(val8))) + ret = -EFAULT; + break; + case SONYPI_IOCGBLUE: + val8 = sonypi_device.bluetooth_power; + if (copy_to_user(argp, &val8, sizeof(val8))) + ret = -EFAULT; + break; + case SONYPI_IOCSBLUE: + if (copy_from_user(&val8, argp, sizeof(val8))) { + ret = -EFAULT; + break; + } + sonypi_setbluetoothpower(val8); + break; + /* FAN Controls */ + case SONYPI_IOCGFAN: + if (sonypi_ec_read(SONYPI_FAN0_STATUS, &val8)) { + ret = -EIO; + break; + } + if (copy_to_user(argp, &val8, sizeof(val8))) + ret = -EFAULT; + break; + case SONYPI_IOCSFAN: + if (copy_from_user(&val8, argp, sizeof(val8))) { + ret = -EFAULT; + break; + } + if (sonypi_ec_write(SONYPI_FAN0_STATUS, val8)) + ret = -EIO; + break; + /* GET Temperature (useful under APM) */ + case SONYPI_IOCGTEMP: + if (sonypi_ec_read(SONYPI_TEMP_STATUS, &val8)) { + ret = -EIO; + break; + } + if (copy_to_user(argp, &val8, sizeof(val8))) + ret = -EFAULT; + break; + default: + ret = -EINVAL; + } + mutex_unlock(&sonypi_device.lock); + return ret; +} + +static const struct file_operations sonypi_misc_fops = { + .owner = THIS_MODULE, + .read = sonypi_misc_read, + .poll = sonypi_misc_poll, + .open = sonypi_misc_open, + .release = sonypi_misc_release, + .fasync = sonypi_misc_fasync, + .unlocked_ioctl = sonypi_misc_ioctl, + .llseek = no_llseek, +}; + +static struct miscdevice sonypi_misc_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "sonypi", + .fops = &sonypi_misc_fops, +}; + +static void sonypi_enable(unsigned int camera_on) +{ + switch (sonypi_device.model) { + case SONYPI_DEVICE_MODEL_TYPE1: + sonypi_type1_srs(); + break; + case SONYPI_DEVICE_MODEL_TYPE2: + sonypi_type2_srs(); + break; + case SONYPI_DEVICE_MODEL_TYPE3: + sonypi_type3_srs(); + break; + } + + sonypi_call1(0x82); + sonypi_call2(0x81, 0xff); + sonypi_call1(compat ? 0x92 : 0x82); + + /* Enable ACPI mode to get Fn key events */ + if (!SONYPI_ACPI_ACTIVE && fnkeyinit) + outb(0xf0, 0xb2); + + if (camera && camera_on) + sonypi_camera_on(); +} + +static int sonypi_disable(void) +{ + sonypi_call2(0x81, 0); /* make sure we don't get any more events */ + if (camera) + sonypi_camera_off(); + + /* disable ACPI mode */ + if (!SONYPI_ACPI_ACTIVE && fnkeyinit) + outb(0xf1, 0xb2); + + switch (sonypi_device.model) { + case SONYPI_DEVICE_MODEL_TYPE1: + sonypi_type1_dis(); + break; + case SONYPI_DEVICE_MODEL_TYPE2: + sonypi_type2_dis(); + break; + case SONYPI_DEVICE_MODEL_TYPE3: + sonypi_type3_dis(); + break; + } + + return 0; +} + +#ifdef CONFIG_ACPI +static int sonypi_acpi_add(struct acpi_device *device) +{ + sonypi_acpi_device = device; + strcpy(acpi_device_name(device), "Sony laptop hotkeys"); + strcpy(acpi_device_class(device), "sony/hotkey"); + return 0; +} + +static int sonypi_acpi_remove(struct acpi_device *device) +{ + sonypi_acpi_device = NULL; + return 0; +} + +static const struct acpi_device_id sonypi_device_ids[] = { + {"SNY6001", 0}, + {"", 0}, +}; + +static struct acpi_driver sonypi_acpi_driver = { + .name = "sonypi", + .class = "hkey", + .ids = sonypi_device_ids, + .ops = { + .add = sonypi_acpi_add, + .remove = sonypi_acpi_remove, + }, +}; +#endif + +static int sonypi_create_input_devices(struct platform_device *pdev) +{ + struct input_dev *jog_dev; + struct input_dev *key_dev; + int i; + int error; + + sonypi_device.input_jog_dev = jog_dev = input_allocate_device(); + if (!jog_dev) + return -ENOMEM; + + jog_dev->name = "Sony Vaio Jogdial"; + jog_dev->id.bustype = BUS_ISA; + jog_dev->id.vendor = PCI_VENDOR_ID_SONY; + jog_dev->dev.parent = &pdev->dev; + + jog_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); + jog_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_MIDDLE); + jog_dev->relbit[0] = BIT_MASK(REL_WHEEL); + + sonypi_device.input_key_dev = key_dev = input_allocate_device(); + if (!key_dev) { + error = -ENOMEM; + goto err_free_jogdev; + } + + key_dev->name = "Sony Vaio Keys"; + key_dev->id.bustype = BUS_ISA; + key_dev->id.vendor = PCI_VENDOR_ID_SONY; + key_dev->dev.parent = &pdev->dev; + + /* Initialize the Input Drivers: special keys */ + key_dev->evbit[0] = BIT_MASK(EV_KEY); + for (i = 0; sonypi_inputkeys[i].sonypiev; i++) + if (sonypi_inputkeys[i].inputev) + set_bit(sonypi_inputkeys[i].inputev, key_dev->keybit); + + error = input_register_device(jog_dev); + if (error) + goto err_free_keydev; + + error = input_register_device(key_dev); + if (error) + goto err_unregister_jogdev; + + return 0; + + err_unregister_jogdev: + input_unregister_device(jog_dev); + /* Set to NULL so we don't free it again below */ + jog_dev = NULL; + err_free_keydev: + input_free_device(key_dev); + sonypi_device.input_key_dev = NULL; + err_free_jogdev: + input_free_device(jog_dev); + sonypi_device.input_jog_dev = NULL; + + return error; +} + +static int sonypi_setup_ioports(struct sonypi_device *dev, + const struct sonypi_ioport_list *ioport_list) +{ + /* try to detect if sony-laptop is being used and thus + * has already requested one of the known ioports. + * As in the deprecated check_region this is racy has we have + * multiple ioports available and one of them can be requested + * between this check and the subsequent request. Anyway, as an + * attempt to be some more user-friendly as we currently are, + * this is enough. + */ + const struct sonypi_ioport_list *check = ioport_list; + while (check_ioport && check->port1) { + if (!request_region(check->port1, + sonypi_device.region_size, + "Sony Programmable I/O Device Check")) { + printk(KERN_ERR "sonypi: ioport 0x%.4x busy, using sony-laptop? " + "if not use check_ioport=0\n", + check->port1); + return -EBUSY; + } + release_region(check->port1, sonypi_device.region_size); + check++; + } + + while (ioport_list->port1) { + + if (request_region(ioport_list->port1, + sonypi_device.region_size, + "Sony Programmable I/O Device")) { + dev->ioport1 = ioport_list->port1; + dev->ioport2 = ioport_list->port2; + return 0; + } + ioport_list++; + } + + return -EBUSY; +} + +static int sonypi_setup_irq(struct sonypi_device *dev, + const struct sonypi_irq_list *irq_list) +{ + while (irq_list->irq) { + + if (!request_irq(irq_list->irq, sonypi_irq, + IRQF_SHARED, "sonypi", sonypi_irq)) { + dev->irq = irq_list->irq; + dev->bits = irq_list->bits; + return 0; + } + irq_list++; + } + + return -EBUSY; +} + +static void sonypi_display_info(void) +{ + printk(KERN_INFO "sonypi: detected type%d model, " + "verbose = %d, fnkeyinit = %s, camera = %s, " + "compat = %s, mask = 0x%08lx, useinput = %s, acpi = %s\n", + sonypi_device.model, + verbose, + fnkeyinit ? "on" : "off", + camera ? "on" : "off", + compat ? "on" : "off", + mask, + useinput ? "on" : "off", + SONYPI_ACPI_ACTIVE ? "on" : "off"); + printk(KERN_INFO "sonypi: enabled at irq=%d, port1=0x%x, port2=0x%x\n", + sonypi_device.irq, + sonypi_device.ioport1, sonypi_device.ioport2); + + if (minor == -1) + printk(KERN_INFO "sonypi: device allocated minor is %d\n", + sonypi_misc_device.minor); +} + +static int sonypi_probe(struct platform_device *dev) +{ + const struct sonypi_ioport_list *ioport_list; + const struct sonypi_irq_list *irq_list; + struct pci_dev *pcidev; + int error; + + printk(KERN_WARNING "sonypi: please try the sony-laptop module instead " + "and report failures, see also " + "http://www.linux.it/~malattia/wiki/index.php/Sony_drivers\n"); + + spin_lock_init(&sonypi_device.fifo_lock); + error = kfifo_alloc(&sonypi_device.fifo, SONYPI_BUF_SIZE, GFP_KERNEL); + if (error) { + printk(KERN_ERR "sonypi: kfifo_alloc failed\n"); + return error; + } + + init_waitqueue_head(&sonypi_device.fifo_proc_list); + mutex_init(&sonypi_device.lock); + sonypi_device.bluetooth_power = -1; + + if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_82371AB_3, NULL))) + sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE1; + else if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_ICH6_1, NULL))) + sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE3; + else if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_ICH7_1, NULL))) + sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE3; + else + sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE2; + + if (pcidev && pci_enable_device(pcidev)) { + printk(KERN_ERR "sonypi: pci_enable_device failed\n"); + error = -EIO; + goto err_put_pcidev; + } + + sonypi_device.dev = pcidev; + + if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE1) { + ioport_list = sonypi_type1_ioport_list; + sonypi_device.region_size = SONYPI_TYPE1_REGION_SIZE; + sonypi_device.evtype_offset = SONYPI_TYPE1_EVTYPE_OFFSET; + irq_list = sonypi_type1_irq_list; + } else if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE2) { + ioport_list = sonypi_type2_ioport_list; + sonypi_device.region_size = SONYPI_TYPE2_REGION_SIZE; + sonypi_device.evtype_offset = SONYPI_TYPE2_EVTYPE_OFFSET; + irq_list = sonypi_type2_irq_list; + } else { + ioport_list = sonypi_type3_ioport_list; + sonypi_device.region_size = SONYPI_TYPE3_REGION_SIZE; + sonypi_device.evtype_offset = SONYPI_TYPE3_EVTYPE_OFFSET; + irq_list = sonypi_type3_irq_list; + } + + error = sonypi_setup_ioports(&sonypi_device, ioport_list); + if (error) { + printk(KERN_ERR "sonypi: failed to request ioports\n"); + goto err_disable_pcidev; + } + + error = sonypi_setup_irq(&sonypi_device, irq_list); + if (error) { + printk(KERN_ERR "sonypi: request_irq failed\n"); + goto err_free_ioports; + } + + if (minor != -1) + sonypi_misc_device.minor = minor; + error = misc_register(&sonypi_misc_device); + if (error) { + printk(KERN_ERR "sonypi: misc_register failed\n"); + goto err_free_irq; + } + + sonypi_display_info(); + + if (useinput) { + + error = sonypi_create_input_devices(dev); + if (error) { + printk(KERN_ERR + "sonypi: failed to create input devices\n"); + goto err_miscdev_unregister; + } + + spin_lock_init(&sonypi_device.input_fifo_lock); + error = kfifo_alloc(&sonypi_device.input_fifo, SONYPI_BUF_SIZE, + GFP_KERNEL); + if (error) { + printk(KERN_ERR "sonypi: kfifo_alloc failed\n"); + goto err_inpdev_unregister; + } + + INIT_WORK(&sonypi_device.input_work, input_keyrelease); + } + + sonypi_enable(0); + + return 0; + + err_inpdev_unregister: + input_unregister_device(sonypi_device.input_key_dev); + input_unregister_device(sonypi_device.input_jog_dev); + err_miscdev_unregister: + misc_deregister(&sonypi_misc_device); + err_free_irq: + free_irq(sonypi_device.irq, sonypi_irq); + err_free_ioports: + release_region(sonypi_device.ioport1, sonypi_device.region_size); + err_disable_pcidev: + if (pcidev) + pci_disable_device(pcidev); + err_put_pcidev: + pci_dev_put(pcidev); + kfifo_free(&sonypi_device.fifo); + + return error; +} + +static int sonypi_remove(struct platform_device *dev) +{ + sonypi_disable(); + + synchronize_irq(sonypi_device.irq); + flush_work(&sonypi_device.input_work); + + if (useinput) { + input_unregister_device(sonypi_device.input_key_dev); + input_unregister_device(sonypi_device.input_jog_dev); + kfifo_free(&sonypi_device.input_fifo); + } + + misc_deregister(&sonypi_misc_device); + + free_irq(sonypi_device.irq, sonypi_irq); + release_region(sonypi_device.ioport1, sonypi_device.region_size); + + if (sonypi_device.dev) { + pci_disable_device(sonypi_device.dev); + pci_dev_put(sonypi_device.dev); + } + + kfifo_free(&sonypi_device.fifo); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int old_camera_power; + +static int sonypi_suspend(struct device *dev) +{ + old_camera_power = sonypi_device.camera_power; + sonypi_disable(); + + return 0; +} + +static int sonypi_resume(struct device *dev) +{ + sonypi_enable(old_camera_power); + return 0; +} + +static SIMPLE_DEV_PM_OPS(sonypi_pm, sonypi_suspend, sonypi_resume); +#define SONYPI_PM (&sonypi_pm) +#else +#define SONYPI_PM NULL +#endif + +static void sonypi_shutdown(struct platform_device *dev) +{ + sonypi_disable(); +} + +static struct platform_driver sonypi_driver = { + .driver = { + .name = "sonypi", + .pm = SONYPI_PM, + }, + .probe = sonypi_probe, + .remove = sonypi_remove, + .shutdown = sonypi_shutdown, +}; + +static struct platform_device *sonypi_platform_device; + +static struct dmi_system_id __initdata sonypi_dmi_table[] = { + { + .ident = "Sony Vaio", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "PCG-"), + }, + }, + { + .ident = "Sony Vaio", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-"), + }, + }, + { } +}; + +static int __init sonypi_init(void) +{ + int error; + + printk(KERN_INFO + "sonypi: Sony Programmable I/O Controller Driver v%s.\n", + SONYPI_DRIVER_VERSION); + + if (!dmi_check_system(sonypi_dmi_table)) + return -ENODEV; + + error = platform_driver_register(&sonypi_driver); + if (error) + return error; + + sonypi_platform_device = platform_device_alloc("sonypi", -1); + if (!sonypi_platform_device) { + error = -ENOMEM; + goto err_driver_unregister; + } + + error = platform_device_add(sonypi_platform_device); + if (error) + goto err_free_device; + +#ifdef CONFIG_ACPI + if (acpi_bus_register_driver(&sonypi_acpi_driver) >= 0) + acpi_driver_registered = 1; +#endif + + return 0; + + err_free_device: + platform_device_put(sonypi_platform_device); + err_driver_unregister: + platform_driver_unregister(&sonypi_driver); + return error; +} + +static void __exit sonypi_exit(void) +{ +#ifdef CONFIG_ACPI + if (acpi_driver_registered) + acpi_bus_unregister_driver(&sonypi_acpi_driver); +#endif + platform_device_unregister(sonypi_platform_device); + platform_driver_unregister(&sonypi_driver); + printk(KERN_INFO "sonypi: removed.\n"); +} + +module_init(sonypi_init); +module_exit(sonypi_exit); diff --git a/kernel/drivers/char/tb0219.c b/kernel/drivers/char/tb0219.c new file mode 100644 index 000000000..480a777db --- /dev/null +++ b/kernel/drivers/char/tb0219.c @@ -0,0 +1,371 @@ +/* + * Driver for TANBAC TB0219 base board. + * + * Copyright (C) 2005 Yoichi Yuasa + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include +#include +#include +#include + +#include +#include +#include +#include + +MODULE_AUTHOR("Yoichi Yuasa "); +MODULE_DESCRIPTION("TANBAC TB0219 base board driver"); +MODULE_LICENSE("GPL"); + +static int major; /* default is dynamic major device number */ +module_param(major, int, 0); +MODULE_PARM_DESC(major, "Major device number"); + +static void (*old_machine_restart)(char *command); +static void __iomem *tb0219_base; +static DEFINE_SPINLOCK(tb0219_lock); + +#define tb0219_read(offset) readw(tb0219_base + (offset)) +#define tb0219_write(offset, value) writew((value), tb0219_base + (offset)) + +#define TB0219_START 0x0a000000UL +#define TB0219_SIZE 0x20UL + +#define TB0219_LED 0x00 +#define TB0219_GPIO_INPUT 0x02 +#define TB0219_GPIO_OUTPUT 0x04 +#define TB0219_DIP_SWITCH 0x06 +#define TB0219_MISC 0x08 +#define TB0219_RESET 0x0e +#define TB0219_PCI_SLOT1_IRQ_STATUS 0x10 +#define TB0219_PCI_SLOT2_IRQ_STATUS 0x12 +#define TB0219_PCI_SLOT3_IRQ_STATUS 0x14 + +typedef enum { + TYPE_LED, + TYPE_GPIO_OUTPUT, +} tb0219_type_t; + +/* + * Minor device number + * 0 = 7 segment LED + * + * 16 = GPIO IN 0 + * 17 = GPIO IN 1 + * 18 = GPIO IN 2 + * 19 = GPIO IN 3 + * 20 = GPIO IN 4 + * 21 = GPIO IN 5 + * 22 = GPIO IN 6 + * 23 = GPIO IN 7 + * + * 32 = GPIO OUT 0 + * 33 = GPIO OUT 1 + * 34 = GPIO OUT 2 + * 35 = GPIO OUT 3 + * 36 = GPIO OUT 4 + * 37 = GPIO OUT 5 + * 38 = GPIO OUT 6 + * 39 = GPIO OUT 7 + * + * 48 = DIP switch 1 + * 49 = DIP switch 2 + * 50 = DIP switch 3 + * 51 = DIP switch 4 + * 52 = DIP switch 5 + * 53 = DIP switch 6 + * 54 = DIP switch 7 + * 55 = DIP switch 8 + */ + +static inline char get_led(void) +{ + return (char)tb0219_read(TB0219_LED); +} + +static inline char get_gpio_input_pin(unsigned int pin) +{ + uint16_t values; + + values = tb0219_read(TB0219_GPIO_INPUT); + if (values & (1 << pin)) + return '1'; + + return '0'; +} + +static inline char get_gpio_output_pin(unsigned int pin) +{ + uint16_t values; + + values = tb0219_read(TB0219_GPIO_OUTPUT); + if (values & (1 << pin)) + return '1'; + + return '0'; +} + +static inline char get_dip_switch(unsigned int pin) +{ + uint16_t values; + + values = tb0219_read(TB0219_DIP_SWITCH); + if (values & (1 << pin)) + return '1'; + + return '0'; +} + +static inline int set_led(char command) +{ + tb0219_write(TB0219_LED, command); + + return 0; +} + +static inline int set_gpio_output_pin(unsigned int pin, char command) +{ + unsigned long flags; + uint16_t value; + + if (command != '0' && command != '1') + return -EINVAL; + + spin_lock_irqsave(&tb0219_lock, flags); + value = tb0219_read(TB0219_GPIO_OUTPUT); + if (command == '0') + value &= ~(1 << pin); + else + value |= 1 << pin; + tb0219_write(TB0219_GPIO_OUTPUT, value); + spin_unlock_irqrestore(&tb0219_lock, flags); + + return 0; + +} + +static ssize_t tanbac_tb0219_read(struct file *file, char __user *buf, size_t len, + loff_t *ppos) +{ + unsigned int minor; + char value; + + minor = iminor(file_inode(file)); + switch (minor) { + case 0: + value = get_led(); + break; + case 16 ... 23: + value = get_gpio_input_pin(minor - 16); + break; + case 32 ... 39: + value = get_gpio_output_pin(minor - 32); + break; + case 48 ... 55: + value = get_dip_switch(minor - 48); + break; + default: + return -EBADF; + } + + if (len <= 0) + return -EFAULT; + + if (put_user(value, buf)) + return -EFAULT; + + return 1; +} + +static ssize_t tanbac_tb0219_write(struct file *file, const char __user *data, + size_t len, loff_t *ppos) +{ + unsigned int minor; + tb0219_type_t type; + size_t i; + int retval = 0; + char c; + + minor = iminor(file_inode(file)); + switch (minor) { + case 0: + type = TYPE_LED; + break; + case 32 ... 39: + type = TYPE_GPIO_OUTPUT; + break; + default: + return -EBADF; + } + + for (i = 0; i < len; i++) { + if (get_user(c, data + i)) + return -EFAULT; + + switch (type) { + case TYPE_LED: + retval = set_led(c); + break; + case TYPE_GPIO_OUTPUT: + retval = set_gpio_output_pin(minor - 32, c); + break; + } + + if (retval < 0) + break; + } + + return i; +} + +static int tanbac_tb0219_open(struct inode *inode, struct file *file) +{ + unsigned int minor; + + minor = iminor(inode); + switch (minor) { + case 0: + case 16 ... 23: + case 32 ... 39: + case 48 ... 55: + return nonseekable_open(inode, file); + default: + break; + } + + return -EBADF; +} + +static int tanbac_tb0219_release(struct inode *inode, struct file *file) +{ + return 0; +} + +static const struct file_operations tb0219_fops = { + .owner = THIS_MODULE, + .read = tanbac_tb0219_read, + .write = tanbac_tb0219_write, + .open = tanbac_tb0219_open, + .release = tanbac_tb0219_release, + .llseek = no_llseek, +}; + +static void tb0219_restart(char *command) +{ + tb0219_write(TB0219_RESET, 0); +} + +static void tb0219_pci_irq_init(void) +{ + /* PCI Slot 1 */ + vr41xx_set_irq_trigger(TB0219_PCI_SLOT1_PIN, IRQ_TRIGGER_LEVEL, IRQ_SIGNAL_THROUGH); + vr41xx_set_irq_level(TB0219_PCI_SLOT1_PIN, IRQ_LEVEL_LOW); + + /* PCI Slot 2 */ + vr41xx_set_irq_trigger(TB0219_PCI_SLOT2_PIN, IRQ_TRIGGER_LEVEL, IRQ_SIGNAL_THROUGH); + vr41xx_set_irq_level(TB0219_PCI_SLOT2_PIN, IRQ_LEVEL_LOW); + + /* PCI Slot 3 */ + vr41xx_set_irq_trigger(TB0219_PCI_SLOT3_PIN, IRQ_TRIGGER_LEVEL, IRQ_SIGNAL_THROUGH); + vr41xx_set_irq_level(TB0219_PCI_SLOT3_PIN, IRQ_LEVEL_LOW); +} + +static int tb0219_probe(struct platform_device *dev) +{ + int retval; + + if (request_mem_region(TB0219_START, TB0219_SIZE, "TB0219") == NULL) + return -EBUSY; + + tb0219_base = ioremap(TB0219_START, TB0219_SIZE); + if (tb0219_base == NULL) { + release_mem_region(TB0219_START, TB0219_SIZE); + return -ENOMEM; + } + + retval = register_chrdev(major, "TB0219", &tb0219_fops); + if (retval < 0) { + iounmap(tb0219_base); + tb0219_base = NULL; + release_mem_region(TB0219_START, TB0219_SIZE); + return retval; + } + + old_machine_restart = _machine_restart; + _machine_restart = tb0219_restart; + + tb0219_pci_irq_init(); + + if (major == 0) { + major = retval; + printk(KERN_INFO "TB0219: major number %d\n", major); + } + + return 0; +} + +static int tb0219_remove(struct platform_device *dev) +{ + _machine_restart = old_machine_restart; + + iounmap(tb0219_base); + tb0219_base = NULL; + + release_mem_region(TB0219_START, TB0219_SIZE); + + return 0; +} + +static struct platform_device *tb0219_platform_device; + +static struct platform_driver tb0219_device_driver = { + .probe = tb0219_probe, + .remove = tb0219_remove, + .driver = { + .name = "TB0219", + }, +}; + +static int __init tanbac_tb0219_init(void) +{ + int retval; + + tb0219_platform_device = platform_device_alloc("TB0219", -1); + if (!tb0219_platform_device) + return -ENOMEM; + + retval = platform_device_add(tb0219_platform_device); + if (retval < 0) { + platform_device_put(tb0219_platform_device); + return retval; + } + + retval = platform_driver_register(&tb0219_device_driver); + if (retval < 0) + platform_device_unregister(tb0219_platform_device); + + return retval; +} + +static void __exit tanbac_tb0219_exit(void) +{ + platform_driver_unregister(&tb0219_device_driver); + platform_device_unregister(tb0219_platform_device); +} + +module_init(tanbac_tb0219_init); +module_exit(tanbac_tb0219_exit); diff --git a/kernel/drivers/char/tile-srom.c b/kernel/drivers/char/tile-srom.c new file mode 100644 index 000000000..69f6b4acc --- /dev/null +++ b/kernel/drivers/char/tile-srom.c @@ -0,0 +1,472 @@ +/* + * Copyright 2011 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + * + * SPI Flash ROM driver + * + * This source code is derived from code provided in "Linux Device + * Drivers, Third Edition", by Jonathan Corbet, Alessandro Rubini, and + * Greg Kroah-Hartman, published by O'Reilly Media, Inc. + */ + +#include +#include +#include /* printk() */ +#include /* kmalloc() */ +#include /* everything... */ +#include /* error codes */ +#include /* size_t */ +#include +#include /* O_ACCMODE */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Size of our hypervisor I/O requests. We break up large transfers + * so that we don't spend large uninterrupted spans of time in the + * hypervisor. Erasing an SROM sector takes a significant fraction of + * a second, so if we allowed the user to, say, do one I/O to write the + * entire ROM, we'd get soft lockup timeouts, or worse. + */ +#define SROM_CHUNK_SIZE ((size_t)4096) + +/* + * When hypervisor is busy (e.g. erasing), poll the status periodically. + */ + +/* + * Interval to poll the state in msec + */ +#define SROM_WAIT_TRY_INTERVAL 20 + +/* + * Maximum times to poll the state + */ +#define SROM_MAX_WAIT_TRY_TIMES 1000 + +struct srom_dev { + int hv_devhdl; /* Handle for hypervisor device */ + u32 total_size; /* Size of this device */ + u32 sector_size; /* Size of a sector */ + u32 page_size; /* Size of a page */ + struct mutex lock; /* Allow only one accessor at a time */ +}; + +static int srom_major; /* Dynamic major by default */ +module_param(srom_major, int, 0); +MODULE_AUTHOR("Tilera Corporation"); +MODULE_LICENSE("GPL"); + +static int srom_devs; /* Number of SROM partitions */ +static struct cdev srom_cdev; +static struct platform_device *srom_parent; +static struct class *srom_class; +static struct srom_dev *srom_devices; + +/* + * Handle calling the hypervisor and managing EAGAIN/EBUSY. + */ + +static ssize_t _srom_read(int hv_devhdl, void *buf, + loff_t off, size_t count) +{ + int retval, retries = SROM_MAX_WAIT_TRY_TIMES; + for (;;) { + retval = hv_dev_pread(hv_devhdl, 0, (HV_VirtAddr)buf, + count, off); + if (retval >= 0) + return retval; + if (retval == HV_EAGAIN) + continue; + if (retval == HV_EBUSY && --retries > 0) { + msleep(SROM_WAIT_TRY_INTERVAL); + continue; + } + pr_err("_srom_read: error %d\n", retval); + return -EIO; + } +} + +static ssize_t _srom_write(int hv_devhdl, const void *buf, + loff_t off, size_t count) +{ + int retval, retries = SROM_MAX_WAIT_TRY_TIMES; + for (;;) { + retval = hv_dev_pwrite(hv_devhdl, 0, (HV_VirtAddr)buf, + count, off); + if (retval >= 0) + return retval; + if (retval == HV_EAGAIN) + continue; + if (retval == HV_EBUSY && --retries > 0) { + msleep(SROM_WAIT_TRY_INTERVAL); + continue; + } + pr_err("_srom_write: error %d\n", retval); + return -EIO; + } +} + +/** + * srom_open() - Device open routine. + * @inode: Inode for this device. + * @filp: File for this specific open of the device. + * + * Returns zero, or an error code. + */ +static int srom_open(struct inode *inode, struct file *filp) +{ + filp->private_data = &srom_devices[iminor(inode)]; + return 0; +} + + +/** + * srom_release() - Device release routine. + * @inode: Inode for this device. + * @filp: File for this specific open of the device. + * + * Returns zero, or an error code. + */ +static int srom_release(struct inode *inode, struct file *filp) +{ + struct srom_dev *srom = filp->private_data; + char dummy; + + /* Make sure we've flushed anything written to the ROM. */ + mutex_lock(&srom->lock); + if (srom->hv_devhdl >= 0) + _srom_write(srom->hv_devhdl, &dummy, SROM_FLUSH_OFF, 1); + mutex_unlock(&srom->lock); + + filp->private_data = NULL; + + return 0; +} + + +/** + * srom_read() - Read data from the device. + * @filp: File for this specific open of the device. + * @buf: User's data buffer. + * @count: Number of bytes requested. + * @f_pos: File position. + * + * Returns number of bytes read, or an error code. + */ +static ssize_t srom_read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos) +{ + int retval = 0; + void *kernbuf; + struct srom_dev *srom = filp->private_data; + + kernbuf = kmalloc(SROM_CHUNK_SIZE, GFP_KERNEL); + if (!kernbuf) + return -ENOMEM; + + if (mutex_lock_interruptible(&srom->lock)) { + retval = -ERESTARTSYS; + kfree(kernbuf); + return retval; + } + + while (count) { + int hv_retval; + int bytes_this_pass = min(count, SROM_CHUNK_SIZE); + + hv_retval = _srom_read(srom->hv_devhdl, kernbuf, + *f_pos, bytes_this_pass); + if (hv_retval <= 0) { + if (retval == 0) + retval = hv_retval; + break; + } + + if (copy_to_user(buf, kernbuf, hv_retval) != 0) { + retval = -EFAULT; + break; + } + + retval += hv_retval; + *f_pos += hv_retval; + buf += hv_retval; + count -= hv_retval; + } + + mutex_unlock(&srom->lock); + kfree(kernbuf); + + return retval; +} + +/** + * srom_write() - Write data to the device. + * @filp: File for this specific open of the device. + * @buf: User's data buffer. + * @count: Number of bytes requested. + * @f_pos: File position. + * + * Returns number of bytes written, or an error code. + */ +static ssize_t srom_write(struct file *filp, const char __user *buf, + size_t count, loff_t *f_pos) +{ + int retval = 0; + void *kernbuf; + struct srom_dev *srom = filp->private_data; + + kernbuf = kmalloc(SROM_CHUNK_SIZE, GFP_KERNEL); + if (!kernbuf) + return -ENOMEM; + + if (mutex_lock_interruptible(&srom->lock)) { + retval = -ERESTARTSYS; + kfree(kernbuf); + return retval; + } + + while (count) { + int hv_retval; + int bytes_this_pass = min(count, SROM_CHUNK_SIZE); + + if (copy_from_user(kernbuf, buf, bytes_this_pass) != 0) { + retval = -EFAULT; + break; + } + + hv_retval = _srom_write(srom->hv_devhdl, kernbuf, + *f_pos, bytes_this_pass); + if (hv_retval <= 0) { + if (retval == 0) + retval = hv_retval; + break; + } + + retval += hv_retval; + *f_pos += hv_retval; + buf += hv_retval; + count -= hv_retval; + } + + mutex_unlock(&srom->lock); + kfree(kernbuf); + + return retval; +} + +/* Provide our own implementation so we can use srom->total_size. */ +loff_t srom_llseek(struct file *file, loff_t offset, int origin) +{ + struct srom_dev *srom = file->private_data; + return fixed_size_llseek(file, offset, origin, srom->total_size); +} + +static ssize_t total_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srom_dev *srom = dev_get_drvdata(dev); + return sprintf(buf, "%u\n", srom->total_size); +} +static DEVICE_ATTR_RO(total_size); + +static ssize_t sector_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srom_dev *srom = dev_get_drvdata(dev); + return sprintf(buf, "%u\n", srom->sector_size); +} +static DEVICE_ATTR_RO(sector_size); + +static ssize_t page_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srom_dev *srom = dev_get_drvdata(dev); + return sprintf(buf, "%u\n", srom->page_size); +} +static DEVICE_ATTR_RO(page_size); + +static struct attribute *srom_dev_attrs[] = { + &dev_attr_total_size.attr, + &dev_attr_sector_size.attr, + &dev_attr_page_size.attr, + NULL, +}; +ATTRIBUTE_GROUPS(srom_dev); + +static char *srom_devnode(struct device *dev, umode_t *mode) +{ + *mode = S_IRUGO | S_IWUSR; + return kasprintf(GFP_KERNEL, "srom/%s", dev_name(dev)); +} + +/* + * The fops + */ +static const struct file_operations srom_fops = { + .owner = THIS_MODULE, + .llseek = srom_llseek, + .read = srom_read, + .write = srom_write, + .open = srom_open, + .release = srom_release, +}; + +/** + * srom_setup_minor() - Initialize per-minor information. + * @srom: Per-device SROM state. + * @index: Device to set up. + */ +static int srom_setup_minor(struct srom_dev *srom, int index) +{ + struct device *dev; + int devhdl = srom->hv_devhdl; + + mutex_init(&srom->lock); + + if (_srom_read(devhdl, &srom->total_size, + SROM_TOTAL_SIZE_OFF, sizeof(srom->total_size)) < 0) + return -EIO; + if (_srom_read(devhdl, &srom->sector_size, + SROM_SECTOR_SIZE_OFF, sizeof(srom->sector_size)) < 0) + return -EIO; + if (_srom_read(devhdl, &srom->page_size, + SROM_PAGE_SIZE_OFF, sizeof(srom->page_size)) < 0) + return -EIO; + + dev = device_create(srom_class, &srom_parent->dev, + MKDEV(srom_major, index), srom, "%d", index); + return PTR_ERR_OR_ZERO(dev); +} + +/** srom_init() - Initialize the driver's module. */ +static int srom_init(void) +{ + int result, i; + dev_t dev = MKDEV(srom_major, 0); + + /* + * Start with a plausible number of partitions; the krealloc() call + * below will yield about log(srom_devs) additional allocations. + */ + srom_devices = kzalloc(4 * sizeof(struct srom_dev), GFP_KERNEL); + + /* Discover the number of srom partitions. */ + for (i = 0; ; i++) { + int devhdl; + char buf[20]; + struct srom_dev *new_srom_devices = + krealloc(srom_devices, (i+1) * sizeof(struct srom_dev), + GFP_KERNEL | __GFP_ZERO); + if (!new_srom_devices) { + result = -ENOMEM; + goto fail_mem; + } + srom_devices = new_srom_devices; + sprintf(buf, "srom/0/%d", i); + devhdl = hv_dev_open((HV_VirtAddr)buf, 0); + if (devhdl < 0) { + if (devhdl != HV_ENODEV) + pr_notice("srom/%d: hv_dev_open failed: %d.\n", + i, devhdl); + break; + } + srom_devices[i].hv_devhdl = devhdl; + } + srom_devs = i; + + /* Bail out early if we have no partitions at all. */ + if (srom_devs == 0) { + result = -ENODEV; + goto fail_mem; + } + + /* Register our major, and accept a dynamic number. */ + if (srom_major) + result = register_chrdev_region(dev, srom_devs, "srom"); + else { + result = alloc_chrdev_region(&dev, 0, srom_devs, "srom"); + srom_major = MAJOR(dev); + } + if (result < 0) + goto fail_mem; + + /* Register a character device. */ + cdev_init(&srom_cdev, &srom_fops); + srom_cdev.owner = THIS_MODULE; + srom_cdev.ops = &srom_fops; + result = cdev_add(&srom_cdev, dev, srom_devs); + if (result < 0) + goto fail_chrdev; + + /* Create a parent device */ + srom_parent = platform_device_register_simple("srom", -1, NULL, 0); + if (IS_ERR(srom_parent)) { + result = PTR_ERR(srom_parent); + goto fail_pdev; + } + + /* Create a sysfs class. */ + srom_class = class_create(THIS_MODULE, "srom"); + if (IS_ERR(srom_class)) { + result = PTR_ERR(srom_class); + goto fail_cdev; + } + srom_class->dev_groups = srom_dev_groups; + srom_class->devnode = srom_devnode; + + /* Do per-partition initialization */ + for (i = 0; i < srom_devs; i++) { + result = srom_setup_minor(srom_devices + i, i); + if (result < 0) + goto fail_class; + } + + return 0; + +fail_class: + for (i = 0; i < srom_devs; i++) + device_destroy(srom_class, MKDEV(srom_major, i)); + class_destroy(srom_class); +fail_cdev: + platform_device_unregister(srom_parent); +fail_pdev: + cdev_del(&srom_cdev); +fail_chrdev: + unregister_chrdev_region(dev, srom_devs); +fail_mem: + kfree(srom_devices); + return result; +} + +/** srom_cleanup() - Clean up the driver's module. */ +static void srom_cleanup(void) +{ + int i; + for (i = 0; i < srom_devs; i++) + device_destroy(srom_class, MKDEV(srom_major, i)); + class_destroy(srom_class); + cdev_del(&srom_cdev); + platform_device_unregister(srom_parent); + unregister_chrdev_region(MKDEV(srom_major, 0), srom_devs); + kfree(srom_devices); +} + +module_init(srom_init); +module_exit(srom_cleanup); diff --git a/kernel/drivers/char/tlclk.c b/kernel/drivers/char/tlclk.c new file mode 100644 index 000000000..100cd1de9 --- /dev/null +++ b/kernel/drivers/char/tlclk.c @@ -0,0 +1,937 @@ +/* + * Telecom Clock driver for Intel NetStructure(tm) MPCBL0010 + * + * Copyright (C) 2005 Kontron Canada + * + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Send feedback to and the current + * Maintainer + * + * Description : This is the TELECOM CLOCK module driver for the ATCA + * MPCBL0010 ATCA computer. + */ + +#include +#include +#include /* printk() */ +#include /* everything... */ +#include /* error codes */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* inb/outb */ +#include + +MODULE_AUTHOR("Sebastien Bouchard "); +MODULE_LICENSE("GPL"); + +/*Hardware Reset of the PLL */ +#define RESET_ON 0x00 +#define RESET_OFF 0x01 + +/* MODE SELECT */ +#define NORMAL_MODE 0x00 +#define HOLDOVER_MODE 0x10 +#define FREERUN_MODE 0x20 + +/* FILTER SELECT */ +#define FILTER_6HZ 0x04 +#define FILTER_12HZ 0x00 + +/* SELECT REFERENCE FREQUENCY */ +#define REF_CLK1_8kHz 0x00 +#define REF_CLK2_19_44MHz 0x02 + +/* Select primary or secondary redundant clock */ +#define PRIMARY_CLOCK 0x00 +#define SECONDARY_CLOCK 0x01 + +/* CLOCK TRANSMISSION DEFINE */ +#define CLK_8kHz 0xff +#define CLK_16_384MHz 0xfb + +#define CLK_1_544MHz 0x00 +#define CLK_2_048MHz 0x01 +#define CLK_4_096MHz 0x02 +#define CLK_6_312MHz 0x03 +#define CLK_8_192MHz 0x04 +#define CLK_19_440MHz 0x06 + +#define CLK_8_592MHz 0x08 +#define CLK_11_184MHz 0x09 +#define CLK_34_368MHz 0x0b +#define CLK_44_736MHz 0x0a + +/* RECEIVED REFERENCE */ +#define AMC_B1 0 +#define AMC_B2 1 + +/* HARDWARE SWITCHING DEFINE */ +#define HW_ENABLE 0x80 +#define HW_DISABLE 0x00 + +/* HARDWARE SWITCHING MODE DEFINE */ +#define PLL_HOLDOVER 0x40 +#define LOST_CLOCK 0x00 + +/* ALARMS DEFINE */ +#define UNLOCK_MASK 0x10 +#define HOLDOVER_MASK 0x20 +#define SEC_LOST_MASK 0x40 +#define PRI_LOST_MASK 0x80 + +/* INTERRUPT CAUSE DEFINE */ + +#define PRI_LOS_01_MASK 0x01 +#define PRI_LOS_10_MASK 0x02 + +#define SEC_LOS_01_MASK 0x04 +#define SEC_LOS_10_MASK 0x08 + +#define HOLDOVER_01_MASK 0x10 +#define HOLDOVER_10_MASK 0x20 + +#define UNLOCK_01_MASK 0x40 +#define UNLOCK_10_MASK 0x80 + +struct tlclk_alarms { + __u32 lost_clocks; + __u32 lost_primary_clock; + __u32 lost_secondary_clock; + __u32 primary_clock_back; + __u32 secondary_clock_back; + __u32 switchover_primary; + __u32 switchover_secondary; + __u32 pll_holdover; + __u32 pll_end_holdover; + __u32 pll_lost_sync; + __u32 pll_sync; +}; +/* Telecom clock I/O register definition */ +#define TLCLK_BASE 0xa08 +#define TLCLK_REG0 TLCLK_BASE +#define TLCLK_REG1 (TLCLK_BASE+1) +#define TLCLK_REG2 (TLCLK_BASE+2) +#define TLCLK_REG3 (TLCLK_BASE+3) +#define TLCLK_REG4 (TLCLK_BASE+4) +#define TLCLK_REG5 (TLCLK_BASE+5) +#define TLCLK_REG6 (TLCLK_BASE+6) +#define TLCLK_REG7 (TLCLK_BASE+7) + +#define SET_PORT_BITS(port, mask, val) outb(((inb(port) & mask) | val), port) + +/* 0 = Dynamic allocation of the major device number */ +#define TLCLK_MAJOR 0 + +/* sysfs interface definition: +Upon loading the driver will create a sysfs directory under +/sys/devices/platform/telco_clock. + +This directory exports the following interfaces. There operation is +documented in the MCPBL0010 TPS under the Telecom Clock API section, 11.4. +alarms : +current_ref : +received_ref_clk3a : +received_ref_clk3b : +enable_clk3a_output : +enable_clk3b_output : +enable_clka0_output : +enable_clka1_output : +enable_clkb0_output : +enable_clkb1_output : +filter_select : +hardware_switching : +hardware_switching_mode : +telclock_version : +mode_select : +refalign : +reset : +select_amcb1_transmit_clock : +select_amcb2_transmit_clock : +select_redundant_clock : +select_ref_frequency : + +All sysfs interfaces are integers in hex format, i.e echo 99 > refalign +has the same effect as echo 0x99 > refalign. +*/ + +static unsigned int telclk_interrupt; + +static int int_events; /* Event that generate a interrupt */ +static int got_event; /* if events processing have been done */ + +static void switchover_timeout(unsigned long data); +static struct timer_list switchover_timer = + TIMER_INITIALIZER(switchover_timeout , 0, 0); +static unsigned long tlclk_timer_data; + +static struct tlclk_alarms *alarm_events; + +static DEFINE_SPINLOCK(event_lock); + +static int tlclk_major = TLCLK_MAJOR; + +static irqreturn_t tlclk_interrupt(int irq, void *dev_id); + +static DECLARE_WAIT_QUEUE_HEAD(wq); + +static unsigned long useflags; +static DEFINE_MUTEX(tlclk_mutex); + +static int tlclk_open(struct inode *inode, struct file *filp) +{ + int result; + + mutex_lock(&tlclk_mutex); + if (test_and_set_bit(0, &useflags)) { + result = -EBUSY; + /* this legacy device is always one per system and it doesn't + * know how to handle multiple concurrent clients. + */ + goto out; + } + + /* Make sure there is no interrupt pending while + * initialising interrupt handler */ + inb(TLCLK_REG6); + + /* This device is wired through the FPGA IO space of the ATCA blade + * we can't share this IRQ */ + result = request_irq(telclk_interrupt, &tlclk_interrupt, + 0, "telco_clock", tlclk_interrupt); + if (result == -EBUSY) + printk(KERN_ERR "tlclk: Interrupt can't be reserved.\n"); + else + inb(TLCLK_REG6); /* Clear interrupt events */ + +out: + mutex_unlock(&tlclk_mutex); + return result; +} + +static int tlclk_release(struct inode *inode, struct file *filp) +{ + free_irq(telclk_interrupt, tlclk_interrupt); + clear_bit(0, &useflags); + + return 0; +} + +static ssize_t tlclk_read(struct file *filp, char __user *buf, size_t count, + loff_t *f_pos) +{ + if (count < sizeof(struct tlclk_alarms)) + return -EIO; + if (mutex_lock_interruptible(&tlclk_mutex)) + return -EINTR; + + + wait_event_interruptible(wq, got_event); + if (copy_to_user(buf, alarm_events, sizeof(struct tlclk_alarms))) { + mutex_unlock(&tlclk_mutex); + return -EFAULT; + } + + memset(alarm_events, 0, sizeof(struct tlclk_alarms)); + got_event = 0; + + mutex_unlock(&tlclk_mutex); + return sizeof(struct tlclk_alarms); +} + +static const struct file_operations tlclk_fops = { + .read = tlclk_read, + .open = tlclk_open, + .release = tlclk_release, + .llseek = noop_llseek, + +}; + +static struct miscdevice tlclk_miscdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "telco_clock", + .fops = &tlclk_fops, +}; + +static ssize_t show_current_ref(struct device *d, + struct device_attribute *attr, char *buf) +{ + unsigned long ret_val; + unsigned long flags; + + spin_lock_irqsave(&event_lock, flags); + ret_val = ((inb(TLCLK_REG1) & 0x08) >> 3); + spin_unlock_irqrestore(&event_lock, flags); + + return sprintf(buf, "0x%lX\n", ret_val); +} + +static DEVICE_ATTR(current_ref, S_IRUGO, show_current_ref, NULL); + + +static ssize_t show_telclock_version(struct device *d, + struct device_attribute *attr, char *buf) +{ + unsigned long ret_val; + unsigned long flags; + + spin_lock_irqsave(&event_lock, flags); + ret_val = inb(TLCLK_REG5); + spin_unlock_irqrestore(&event_lock, flags); + + return sprintf(buf, "0x%lX\n", ret_val); +} + +static DEVICE_ATTR(telclock_version, S_IRUGO, + show_telclock_version, NULL); + +static ssize_t show_alarms(struct device *d, + struct device_attribute *attr, char *buf) +{ + unsigned long ret_val; + unsigned long flags; + + spin_lock_irqsave(&event_lock, flags); + ret_val = (inb(TLCLK_REG2) & 0xf0); + spin_unlock_irqrestore(&event_lock, flags); + + return sprintf(buf, "0x%lX\n", ret_val); +} + +static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); + +static ssize_t store_received_ref_clk3a(struct device *d, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long tmp; + unsigned char val; + unsigned long flags; + + sscanf(buf, "%lX", &tmp); + dev_dbg(d, ": tmp = 0x%lX\n", tmp); + + val = (unsigned char)tmp; + spin_lock_irqsave(&event_lock, flags); + SET_PORT_BITS(TLCLK_REG1, 0xef, val); + spin_unlock_irqrestore(&event_lock, flags); + + return strnlen(buf, count); +} + +static DEVICE_ATTR(received_ref_clk3a, (S_IWUSR|S_IWGRP), NULL, + store_received_ref_clk3a); + + +static ssize_t store_received_ref_clk3b(struct device *d, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long tmp; + unsigned char val; + unsigned long flags; + + sscanf(buf, "%lX", &tmp); + dev_dbg(d, ": tmp = 0x%lX\n", tmp); + + val = (unsigned char)tmp; + spin_lock_irqsave(&event_lock, flags); + SET_PORT_BITS(TLCLK_REG1, 0xdf, val << 1); + spin_unlock_irqrestore(&event_lock, flags); + + return strnlen(buf, count); +} + +static DEVICE_ATTR(received_ref_clk3b, (S_IWUSR|S_IWGRP), NULL, + store_received_ref_clk3b); + + +static ssize_t store_enable_clk3b_output(struct device *d, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long tmp; + unsigned char val; + unsigned long flags; + + sscanf(buf, "%lX", &tmp); + dev_dbg(d, ": tmp = 0x%lX\n", tmp); + + val = (unsigned char)tmp; + spin_lock_irqsave(&event_lock, flags); + SET_PORT_BITS(TLCLK_REG3, 0x7f, val << 7); + spin_unlock_irqrestore(&event_lock, flags); + + return strnlen(buf, count); +} + +static DEVICE_ATTR(enable_clk3b_output, (S_IWUSR|S_IWGRP), NULL, + store_enable_clk3b_output); + +static ssize_t store_enable_clk3a_output(struct device *d, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long flags; + unsigned long tmp; + unsigned char val; + + sscanf(buf, "%lX", &tmp); + dev_dbg(d, "tmp = 0x%lX\n", tmp); + + val = (unsigned char)tmp; + spin_lock_irqsave(&event_lock, flags); + SET_PORT_BITS(TLCLK_REG3, 0xbf, val << 6); + spin_unlock_irqrestore(&event_lock, flags); + + return strnlen(buf, count); +} + +static DEVICE_ATTR(enable_clk3a_output, (S_IWUSR|S_IWGRP), NULL, + store_enable_clk3a_output); + +static ssize_t store_enable_clkb1_output(struct device *d, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long flags; + unsigned long tmp; + unsigned char val; + + sscanf(buf, "%lX", &tmp); + dev_dbg(d, "tmp = 0x%lX\n", tmp); + + val = (unsigned char)tmp; + spin_lock_irqsave(&event_lock, flags); + SET_PORT_BITS(TLCLK_REG2, 0xf7, val << 3); + spin_unlock_irqrestore(&event_lock, flags); + + return strnlen(buf, count); +} + +static DEVICE_ATTR(enable_clkb1_output, (S_IWUSR|S_IWGRP), NULL, + store_enable_clkb1_output); + + +static ssize_t store_enable_clka1_output(struct device *d, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long flags; + unsigned long tmp; + unsigned char val; + + sscanf(buf, "%lX", &tmp); + dev_dbg(d, "tmp = 0x%lX\n", tmp); + + val = (unsigned char)tmp; + spin_lock_irqsave(&event_lock, flags); + SET_PORT_BITS(TLCLK_REG2, 0xfb, val << 2); + spin_unlock_irqrestore(&event_lock, flags); + + return strnlen(buf, count); +} + +static DEVICE_ATTR(enable_clka1_output, (S_IWUSR|S_IWGRP), NULL, + store_enable_clka1_output); + +static ssize_t store_enable_clkb0_output(struct device *d, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long flags; + unsigned long tmp; + unsigned char val; + + sscanf(buf, "%lX", &tmp); + dev_dbg(d, "tmp = 0x%lX\n", tmp); + + val = (unsigned char)tmp; + spin_lock_irqsave(&event_lock, flags); + SET_PORT_BITS(TLCLK_REG2, 0xfd, val << 1); + spin_unlock_irqrestore(&event_lock, flags); + + return strnlen(buf, count); +} + +static DEVICE_ATTR(enable_clkb0_output, (S_IWUSR|S_IWGRP), NULL, + store_enable_clkb0_output); + +static ssize_t store_enable_clka0_output(struct device *d, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long flags; + unsigned long tmp; + unsigned char val; + + sscanf(buf, "%lX", &tmp); + dev_dbg(d, "tmp = 0x%lX\n", tmp); + + val = (unsigned char)tmp; + spin_lock_irqsave(&event_lock, flags); + SET_PORT_BITS(TLCLK_REG2, 0xfe, val); + spin_unlock_irqrestore(&event_lock, flags); + + return strnlen(buf, count); +} + +static DEVICE_ATTR(enable_clka0_output, (S_IWUSR|S_IWGRP), NULL, + store_enable_clka0_output); + +static ssize_t store_select_amcb2_transmit_clock(struct device *d, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long flags; + unsigned long tmp; + unsigned char val; + + sscanf(buf, "%lX", &tmp); + dev_dbg(d, "tmp = 0x%lX\n", tmp); + + val = (unsigned char)tmp; + spin_lock_irqsave(&event_lock, flags); + if ((val == CLK_8kHz) || (val == CLK_16_384MHz)) { + SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x28); + SET_PORT_BITS(TLCLK_REG1, 0xfb, ~val); + } else if (val >= CLK_8_592MHz) { + SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x38); + switch (val) { + case CLK_8_592MHz: + SET_PORT_BITS(TLCLK_REG0, 0xfc, 2); + break; + case CLK_11_184MHz: + SET_PORT_BITS(TLCLK_REG0, 0xfc, 0); + break; + case CLK_34_368MHz: + SET_PORT_BITS(TLCLK_REG0, 0xfc, 3); + break; + case CLK_44_736MHz: + SET_PORT_BITS(TLCLK_REG0, 0xfc, 1); + break; + } + } else + SET_PORT_BITS(TLCLK_REG3, 0xc7, val << 3); + + spin_unlock_irqrestore(&event_lock, flags); + + return strnlen(buf, count); +} + +static DEVICE_ATTR(select_amcb2_transmit_clock, (S_IWUSR|S_IWGRP), NULL, + store_select_amcb2_transmit_clock); + +static ssize_t store_select_amcb1_transmit_clock(struct device *d, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long tmp; + unsigned char val; + unsigned long flags; + + sscanf(buf, "%lX", &tmp); + dev_dbg(d, "tmp = 0x%lX\n", tmp); + + val = (unsigned char)tmp; + spin_lock_irqsave(&event_lock, flags); + if ((val == CLK_8kHz) || (val == CLK_16_384MHz)) { + SET_PORT_BITS(TLCLK_REG3, 0xf8, 0x5); + SET_PORT_BITS(TLCLK_REG1, 0xfb, ~val); + } else if (val >= CLK_8_592MHz) { + SET_PORT_BITS(TLCLK_REG3, 0xf8, 0x7); + switch (val) { + case CLK_8_592MHz: + SET_PORT_BITS(TLCLK_REG0, 0xfc, 2); + break; + case CLK_11_184MHz: + SET_PORT_BITS(TLCLK_REG0, 0xfc, 0); + break; + case CLK_34_368MHz: + SET_PORT_BITS(TLCLK_REG0, 0xfc, 3); + break; + case CLK_44_736MHz: + SET_PORT_BITS(TLCLK_REG0, 0xfc, 1); + break; + } + } else + SET_PORT_BITS(TLCLK_REG3, 0xf8, val); + spin_unlock_irqrestore(&event_lock, flags); + + return strnlen(buf, count); +} + +static DEVICE_ATTR(select_amcb1_transmit_clock, (S_IWUSR|S_IWGRP), NULL, + store_select_amcb1_transmit_clock); + +static ssize_t store_select_redundant_clock(struct device *d, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long tmp; + unsigned char val; + unsigned long flags; + + sscanf(buf, "%lX", &tmp); + dev_dbg(d, "tmp = 0x%lX\n", tmp); + + val = (unsigned char)tmp; + spin_lock_irqsave(&event_lock, flags); + SET_PORT_BITS(TLCLK_REG1, 0xfe, val); + spin_unlock_irqrestore(&event_lock, flags); + + return strnlen(buf, count); +} + +static DEVICE_ATTR(select_redundant_clock, (S_IWUSR|S_IWGRP), NULL, + store_select_redundant_clock); + +static ssize_t store_select_ref_frequency(struct device *d, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long tmp; + unsigned char val; + unsigned long flags; + + sscanf(buf, "%lX", &tmp); + dev_dbg(d, "tmp = 0x%lX\n", tmp); + + val = (unsigned char)tmp; + spin_lock_irqsave(&event_lock, flags); + SET_PORT_BITS(TLCLK_REG1, 0xfd, val); + spin_unlock_irqrestore(&event_lock, flags); + + return strnlen(buf, count); +} + +static DEVICE_ATTR(select_ref_frequency, (S_IWUSR|S_IWGRP), NULL, + store_select_ref_frequency); + +static ssize_t store_filter_select(struct device *d, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long tmp; + unsigned char val; + unsigned long flags; + + sscanf(buf, "%lX", &tmp); + dev_dbg(d, "tmp = 0x%lX\n", tmp); + + val = (unsigned char)tmp; + spin_lock_irqsave(&event_lock, flags); + SET_PORT_BITS(TLCLK_REG0, 0xfb, val); + spin_unlock_irqrestore(&event_lock, flags); + + return strnlen(buf, count); +} + +static DEVICE_ATTR(filter_select, (S_IWUSR|S_IWGRP), NULL, store_filter_select); + +static ssize_t store_hardware_switching_mode(struct device *d, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long tmp; + unsigned char val; + unsigned long flags; + + sscanf(buf, "%lX", &tmp); + dev_dbg(d, "tmp = 0x%lX\n", tmp); + + val = (unsigned char)tmp; + spin_lock_irqsave(&event_lock, flags); + SET_PORT_BITS(TLCLK_REG0, 0xbf, val); + spin_unlock_irqrestore(&event_lock, flags); + + return strnlen(buf, count); +} + +static DEVICE_ATTR(hardware_switching_mode, (S_IWUSR|S_IWGRP), NULL, + store_hardware_switching_mode); + +static ssize_t store_hardware_switching(struct device *d, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long tmp; + unsigned char val; + unsigned long flags; + + sscanf(buf, "%lX", &tmp); + dev_dbg(d, "tmp = 0x%lX\n", tmp); + + val = (unsigned char)tmp; + spin_lock_irqsave(&event_lock, flags); + SET_PORT_BITS(TLCLK_REG0, 0x7f, val); + spin_unlock_irqrestore(&event_lock, flags); + + return strnlen(buf, count); +} + +static DEVICE_ATTR(hardware_switching, (S_IWUSR|S_IWGRP), NULL, + store_hardware_switching); + +static ssize_t store_refalign (struct device *d, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long tmp; + unsigned long flags; + + sscanf(buf, "%lX", &tmp); + dev_dbg(d, "tmp = 0x%lX\n", tmp); + spin_lock_irqsave(&event_lock, flags); + SET_PORT_BITS(TLCLK_REG0, 0xf7, 0); + SET_PORT_BITS(TLCLK_REG0, 0xf7, 0x08); + SET_PORT_BITS(TLCLK_REG0, 0xf7, 0); + spin_unlock_irqrestore(&event_lock, flags); + + return strnlen(buf, count); +} + +static DEVICE_ATTR(refalign, (S_IWUSR|S_IWGRP), NULL, store_refalign); + +static ssize_t store_mode_select (struct device *d, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long tmp; + unsigned char val; + unsigned long flags; + + sscanf(buf, "%lX", &tmp); + dev_dbg(d, "tmp = 0x%lX\n", tmp); + + val = (unsigned char)tmp; + spin_lock_irqsave(&event_lock, flags); + SET_PORT_BITS(TLCLK_REG0, 0xcf, val); + spin_unlock_irqrestore(&event_lock, flags); + + return strnlen(buf, count); +} + +static DEVICE_ATTR(mode_select, (S_IWUSR|S_IWGRP), NULL, store_mode_select); + +static ssize_t store_reset (struct device *d, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long tmp; + unsigned char val; + unsigned long flags; + + sscanf(buf, "%lX", &tmp); + dev_dbg(d, "tmp = 0x%lX\n", tmp); + + val = (unsigned char)tmp; + spin_lock_irqsave(&event_lock, flags); + SET_PORT_BITS(TLCLK_REG4, 0xfd, val); + spin_unlock_irqrestore(&event_lock, flags); + + return strnlen(buf, count); +} + +static DEVICE_ATTR(reset, (S_IWUSR|S_IWGRP), NULL, store_reset); + +static struct attribute *tlclk_sysfs_entries[] = { + &dev_attr_current_ref.attr, + &dev_attr_telclock_version.attr, + &dev_attr_alarms.attr, + &dev_attr_received_ref_clk3a.attr, + &dev_attr_received_ref_clk3b.attr, + &dev_attr_enable_clk3a_output.attr, + &dev_attr_enable_clk3b_output.attr, + &dev_attr_enable_clkb1_output.attr, + &dev_attr_enable_clka1_output.attr, + &dev_attr_enable_clkb0_output.attr, + &dev_attr_enable_clka0_output.attr, + &dev_attr_select_amcb1_transmit_clock.attr, + &dev_attr_select_amcb2_transmit_clock.attr, + &dev_attr_select_redundant_clock.attr, + &dev_attr_select_ref_frequency.attr, + &dev_attr_filter_select.attr, + &dev_attr_hardware_switching_mode.attr, + &dev_attr_hardware_switching.attr, + &dev_attr_refalign.attr, + &dev_attr_mode_select.attr, + &dev_attr_reset.attr, + NULL +}; + +static struct attribute_group tlclk_attribute_group = { + .name = NULL, /* put in device directory */ + .attrs = tlclk_sysfs_entries, +}; + +static struct platform_device *tlclk_device; + +static int __init tlclk_init(void) +{ + int ret; + + ret = register_chrdev(tlclk_major, "telco_clock", &tlclk_fops); + if (ret < 0) { + printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major); + return ret; + } + tlclk_major = ret; + alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL); + if (!alarm_events) { + ret = -ENOMEM; + goto out1; + } + + /* Read telecom clock IRQ number (Set by BIOS) */ + if (!request_region(TLCLK_BASE, 8, "telco_clock")) { + printk(KERN_ERR "tlclk: request_region 0x%X failed.\n", + TLCLK_BASE); + ret = -EBUSY; + goto out2; + } + telclk_interrupt = (inb(TLCLK_REG7) & 0x0f); + + if (0x0F == telclk_interrupt ) { /* not MCPBL0010 ? */ + printk(KERN_ERR "telclk_interrupt = 0x%x non-mcpbl0010 hw.\n", + telclk_interrupt); + ret = -ENXIO; + goto out3; + } + + init_timer(&switchover_timer); + + ret = misc_register(&tlclk_miscdev); + if (ret < 0) { + printk(KERN_ERR "tlclk: misc_register returns %d.\n", ret); + goto out3; + } + + tlclk_device = platform_device_register_simple("telco_clock", + -1, NULL, 0); + if (IS_ERR(tlclk_device)) { + printk(KERN_ERR "tlclk: platform_device_register failed.\n"); + ret = PTR_ERR(tlclk_device); + goto out4; + } + + ret = sysfs_create_group(&tlclk_device->dev.kobj, + &tlclk_attribute_group); + if (ret) { + printk(KERN_ERR "tlclk: failed to create sysfs device attributes.\n"); + goto out5; + } + + return 0; +out5: + platform_device_unregister(tlclk_device); +out4: + misc_deregister(&tlclk_miscdev); +out3: + release_region(TLCLK_BASE, 8); +out2: + kfree(alarm_events); +out1: + unregister_chrdev(tlclk_major, "telco_clock"); + return ret; +} + +static void __exit tlclk_cleanup(void) +{ + sysfs_remove_group(&tlclk_device->dev.kobj, &tlclk_attribute_group); + platform_device_unregister(tlclk_device); + misc_deregister(&tlclk_miscdev); + unregister_chrdev(tlclk_major, "telco_clock"); + + release_region(TLCLK_BASE, 8); + del_timer_sync(&switchover_timer); + kfree(alarm_events); + +} + +static void switchover_timeout(unsigned long data) +{ + unsigned long flags = *(unsigned long *) data; + + if ((flags & 1)) { + if ((inb(TLCLK_REG1) & 0x08) != (flags & 0x08)) + alarm_events->switchover_primary++; + } else { + if ((inb(TLCLK_REG1) & 0x08) != (flags & 0x08)) + alarm_events->switchover_secondary++; + } + + /* Alarm processing is done, wake up read task */ + del_timer(&switchover_timer); + got_event = 1; + wake_up(&wq); +} + +static irqreturn_t tlclk_interrupt(int irq, void *dev_id) +{ + unsigned long flags; + + spin_lock_irqsave(&event_lock, flags); + /* Read and clear interrupt events */ + int_events = inb(TLCLK_REG6); + + /* Primary_Los changed from 0 to 1 ? */ + if (int_events & PRI_LOS_01_MASK) { + if (inb(TLCLK_REG2) & SEC_LOST_MASK) + alarm_events->lost_clocks++; + else + alarm_events->lost_primary_clock++; + } + + /* Primary_Los changed from 1 to 0 ? */ + if (int_events & PRI_LOS_10_MASK) { + alarm_events->primary_clock_back++; + SET_PORT_BITS(TLCLK_REG1, 0xFE, 1); + } + /* Secondary_Los changed from 0 to 1 ? */ + if (int_events & SEC_LOS_01_MASK) { + if (inb(TLCLK_REG2) & PRI_LOST_MASK) + alarm_events->lost_clocks++; + else + alarm_events->lost_secondary_clock++; + } + /* Secondary_Los changed from 1 to 0 ? */ + if (int_events & SEC_LOS_10_MASK) { + alarm_events->secondary_clock_back++; + SET_PORT_BITS(TLCLK_REG1, 0xFE, 0); + } + if (int_events & HOLDOVER_10_MASK) + alarm_events->pll_end_holdover++; + + if (int_events & UNLOCK_01_MASK) + alarm_events->pll_lost_sync++; + + if (int_events & UNLOCK_10_MASK) + alarm_events->pll_sync++; + + /* Holdover changed from 0 to 1 ? */ + if (int_events & HOLDOVER_01_MASK) { + alarm_events->pll_holdover++; + + /* TIMEOUT in ~10ms */ + switchover_timer.expires = jiffies + msecs_to_jiffies(10); + tlclk_timer_data = inb(TLCLK_REG1); + switchover_timer.data = (unsigned long) &tlclk_timer_data; + mod_timer(&switchover_timer, switchover_timer.expires); + } else { + got_event = 1; + wake_up(&wq); + } + spin_unlock_irqrestore(&event_lock, flags); + + return IRQ_HANDLED; +} + +module_init(tlclk_init); +module_exit(tlclk_cleanup); diff --git a/kernel/drivers/char/toshiba.c b/kernel/drivers/char/toshiba.c new file mode 100644 index 000000000..014c9d90d --- /dev/null +++ b/kernel/drivers/char/toshiba.c @@ -0,0 +1,546 @@ +/* toshiba.c -- Linux driver for accessing the SMM on Toshiba laptops + * + * Copyright (c) 1996-2001 Jonathan A. Buzzard (jonathan@buzzard.org.uk) + * + * Valuable assistance and patches from: + * Tom May + * Rob Napier + * + * Fn status port numbers for machine ID's courtesy of + * 0xfc02: Scott Eisert + * 0xfc04: Steve VanDevender + * 0xfc08: Garth Berry + * 0xfc0a: Egbert Eich + * 0xfc10: Andrew Lofthouse + * 0xfc11: Spencer Olson + * 0xfc13: Claudius Frankewitz + * 0xfc15: Tom May + * 0xfc17: Dave Konrad + * 0xfc1a: George Betzos + * 0xfc1b: Munemasa Wada + * 0xfc1d: Arthur Liu + * 0xfc5a: Jacques L'helgoualc'h + * 0xfcd1: Mr. Dave Konrad + * + * WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING + * + * This code is covered by the GNU GPL and you are free to make any + * changes you wish to it under the terms of the license. However the + * code has the potential to render your computer and/or someone else's + * unusable. Please proceed with care when modifying the code. + * + * Note: Unfortunately the laptop hardware can close the System Configuration + * Interface on it's own accord. It is therefore necessary for *all* + * programs using this driver to be aware that *any* SCI call can fail at + * *any* time. It is up to any program to be aware of this eventuality + * and take appropriate steps. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The information used to write this driver has been obtained by reverse + * engineering the software supplied by Toshiba for their portable computers in + * strict accordance with the European Council Directive 92/250/EEC on the legal + * protection of computer programs, and it's implementation into English Law by + * the Copyright (Computer Programs) Regulations 1992 (S.I. 1992 No.3233). + * + */ + +#define TOSH_VERSION "1.11 26/9/2001" +#define TOSH_DEBUG 0 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TOSH_MINOR_DEV 181 + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jonathan Buzzard "); +MODULE_DESCRIPTION("Toshiba laptop SMM driver"); +MODULE_SUPPORTED_DEVICE("toshiba"); + +static DEFINE_MUTEX(tosh_mutex); +static int tosh_fn; +module_param_named(fn, tosh_fn, int, 0); +MODULE_PARM_DESC(fn, "User specified Fn key detection port"); + +static int tosh_id; +static int tosh_bios; +static int tosh_date; +static int tosh_sci; +static int tosh_fan; + +static long tosh_ioctl(struct file *, unsigned int, + unsigned long); + + +static const struct file_operations tosh_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = tosh_ioctl, + .llseek = noop_llseek, +}; + +static struct miscdevice tosh_device = { + TOSH_MINOR_DEV, + "toshiba", + &tosh_fops +}; + +/* + * Read the Fn key status + */ +#ifdef CONFIG_PROC_FS +static int tosh_fn_status(void) +{ + unsigned char scan; + unsigned long flags; + + if (tosh_fn!=0) { + scan = inb(tosh_fn); + } else { + local_irq_save(flags); + outb(0x8e, 0xe4); + scan = inb(0xe5); + local_irq_restore(flags); + } + + return (int) scan; +} +#endif + + +/* + * For the Portage 610CT and the Tecra 700CS/700CDT emulate the HCI fan function + */ +static int tosh_emulate_fan(SMMRegisters *regs) +{ + unsigned long eax,ecx,flags; + unsigned char al; + + eax = regs->eax & 0xff00; + ecx = regs->ecx & 0xffff; + + /* Portage 610CT */ + + if (tosh_id==0xfccb) { + if (eax==0xfe00) { + /* fan status */ + local_irq_save(flags); + outb(0xbe, 0xe4); + al = inb(0xe5); + local_irq_restore(flags); + regs->eax = 0x00; + regs->ecx = (unsigned int) (al & 0x01); + } + if ((eax==0xff00) && (ecx==0x0000)) { + /* fan off */ + local_irq_save(flags); + outb(0xbe, 0xe4); + al = inb(0xe5); + outb(0xbe, 0xe4); + outb (al | 0x01, 0xe5); + local_irq_restore(flags); + regs->eax = 0x00; + regs->ecx = 0x00; + } + if ((eax==0xff00) && (ecx==0x0001)) { + /* fan on */ + local_irq_save(flags); + outb(0xbe, 0xe4); + al = inb(0xe5); + outb(0xbe, 0xe4); + outb(al & 0xfe, 0xe5); + local_irq_restore(flags); + regs->eax = 0x00; + regs->ecx = 0x01; + } + } + + /* Tecra 700CS/CDT */ + + if (tosh_id==0xfccc) { + if (eax==0xfe00) { + /* fan status */ + local_irq_save(flags); + outb(0xe0, 0xe4); + al = inb(0xe5); + local_irq_restore(flags); + regs->eax = 0x00; + regs->ecx = al & 0x01; + } + if ((eax==0xff00) && (ecx==0x0000)) { + /* fan off */ + local_irq_save(flags); + outb(0xe0, 0xe4); + al = inb(0xe5); + outw(0xe0 | ((al & 0xfe) << 8), 0xe4); + local_irq_restore(flags); + regs->eax = 0x00; + regs->ecx = 0x00; + } + if ((eax==0xff00) && (ecx==0x0001)) { + /* fan on */ + local_irq_save(flags); + outb(0xe0, 0xe4); + al = inb(0xe5); + outw(0xe0 | ((al | 0x01) << 8), 0xe4); + local_irq_restore(flags); + regs->eax = 0x00; + regs->ecx = 0x01; + } + } + + return 0; +} + + +/* + * Put the laptop into System Management Mode + */ +int tosh_smm(SMMRegisters *regs) +{ + int eax; + + asm ("# load the values into the registers\n\t" \ + "pushl %%eax\n\t" \ + "movl 0(%%eax),%%edx\n\t" \ + "push %%edx\n\t" \ + "movl 4(%%eax),%%ebx\n\t" \ + "movl 8(%%eax),%%ecx\n\t" \ + "movl 12(%%eax),%%edx\n\t" \ + "movl 16(%%eax),%%esi\n\t" \ + "movl 20(%%eax),%%edi\n\t" \ + "popl %%eax\n\t" \ + "# call the System Management mode\n\t" \ + "inb $0xb2,%%al\n\t" + "# fill out the memory with the values in the registers\n\t" \ + "xchgl %%eax,(%%esp)\n\t" + "movl %%ebx,4(%%eax)\n\t" \ + "movl %%ecx,8(%%eax)\n\t" \ + "movl %%edx,12(%%eax)\n\t" \ + "movl %%esi,16(%%eax)\n\t" \ + "movl %%edi,20(%%eax)\n\t" \ + "popl %%edx\n\t" \ + "movl %%edx,0(%%eax)\n\t" \ + "# setup the return value to the carry flag\n\t" \ + "lahf\n\t" \ + "shrl $8,%%eax\n\t" \ + "andl $1,%%eax\n" \ + : "=a" (eax) + : "a" (regs) + : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory"); + + return eax; +} +EXPORT_SYMBOL(tosh_smm); + + +static long tosh_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) +{ + SMMRegisters regs; + SMMRegisters __user *argp = (SMMRegisters __user *)arg; + unsigned short ax,bx; + int err; + + if (!argp) + return -EINVAL; + + if (copy_from_user(®s, argp, sizeof(SMMRegisters))) + return -EFAULT; + + switch (cmd) { + case TOSH_SMM: + ax = regs.eax & 0xff00; + bx = regs.ebx & 0xffff; + /* block HCI calls to read/write memory & PCI devices */ + if (((ax==0xff00) || (ax==0xfe00)) && (bx>0x0069)) + return -EINVAL; + + /* do we need to emulate the fan ? */ + mutex_lock(&tosh_mutex); + if (tosh_fan==1) { + if (((ax==0xf300) || (ax==0xf400)) && (bx==0x0004)) { + err = tosh_emulate_fan(®s); + mutex_unlock(&tosh_mutex); + break; + } + } + err = tosh_smm(®s); + mutex_unlock(&tosh_mutex); + break; + default: + return -EINVAL; + } + + if (copy_to_user(argp, ®s, sizeof(SMMRegisters))) + return -EFAULT; + + return (err==0) ? 0:-EINVAL; +} + + +/* + * Print the information for /proc/toshiba + */ +#ifdef CONFIG_PROC_FS +static int proc_toshiba_show(struct seq_file *m, void *v) +{ + int key; + + key = tosh_fn_status(); + + /* Arguments + 0) Linux driver version (this will change if format changes) + 1) Machine ID + 2) SCI version + 3) BIOS version (major, minor) + 4) BIOS date (in SCI date format) + 5) Fn Key status + */ + seq_printf(m, "1.1 0x%04x %d.%d %d.%d 0x%04x 0x%02x\n", + tosh_id, + (tosh_sci & 0xff00)>>8, + tosh_sci & 0xff, + (tosh_bios & 0xff00)>>8, + tosh_bios & 0xff, + tosh_date, + key); + return 0; +} + +static int proc_toshiba_open(struct inode *inode, struct file *file) +{ + return single_open(file, proc_toshiba_show, NULL); +} + +static const struct file_operations proc_toshiba_fops = { + .owner = THIS_MODULE, + .open = proc_toshiba_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + + +/* + * Determine which port to use for the Fn key status + */ +static void tosh_set_fn_port(void) +{ + switch (tosh_id) { + case 0xfc02: case 0xfc04: case 0xfc09: case 0xfc0a: case 0xfc10: + case 0xfc11: case 0xfc13: case 0xfc15: case 0xfc1a: case 0xfc1b: + case 0xfc5a: + tosh_fn = 0x62; + break; + case 0xfc08: case 0xfc17: case 0xfc1d: case 0xfcd1: case 0xfce0: + case 0xfce2: + tosh_fn = 0x68; + break; + default: + tosh_fn = 0x00; + break; + } + + return; +} + + +/* + * Get the machine identification number of the current model + */ +static int tosh_get_machine_id(void __iomem *bios) +{ + int id; + SMMRegisters regs; + unsigned short bx,cx; + unsigned long address; + + id = (0x100*(int) readb(bios+0xfffe))+((int) readb(bios+0xfffa)); + + /* do we have a SCTTable machine identication number on our hands */ + + if (id==0xfc2f) { + + /* start by getting a pointer into the BIOS */ + + regs.eax = 0xc000; + regs.ebx = 0x0000; + regs.ecx = 0x0000; + tosh_smm(®s); + bx = (unsigned short) (regs.ebx & 0xffff); + + /* At this point in the Toshiba routines under MS Windows + the bx register holds 0xe6f5. However my code is producing + a different value! For the time being I will just fudge the + value. This has been verified on a Satellite Pro 430CDT, + Tecra 750CDT, Tecra 780DVD and Satellite 310CDT. */ +#if TOSH_DEBUG + printk("toshiba: debugging ID ebx=0x%04x\n", regs.ebx); +#endif + bx = 0xe6f5; + + /* now twiddle with our pointer a bit */ + + address = bx; + cx = readw(bios + address); + address = 9+bx+cx; + cx = readw(bios + address); + address = 0xa+cx; + cx = readw(bios + address); + + /* now construct our machine identification number */ + + id = ((cx & 0xff)<<8)+((cx & 0xff00)>>8); + } + + return id; +} + + +/* + * Probe for the presence of a Toshiba laptop + * + * returns and non-zero if unable to detect the presence of a Toshiba + * laptop, otherwise zero and determines the Machine ID, BIOS version and + * date, and SCI version. + */ +static int tosh_probe(void) +{ + int i,major,minor,day,year,month,flag; + unsigned char signature[7] = { 0x54,0x4f,0x53,0x48,0x49,0x42,0x41 }; + SMMRegisters regs; + void __iomem *bios = ioremap_cache(0xf0000, 0x10000); + + if (!bios) + return -ENOMEM; + + /* extra sanity check for the string "TOSHIBA" in the BIOS because + some machines that are not Toshiba's pass the next test */ + + for (i=0;i<7;i++) { + if (readb(bios+0xe010+i)!=signature[i]) { + printk("toshiba: not a supported Toshiba laptop\n"); + iounmap(bios); + return -ENODEV; + } + } + + /* call the Toshiba SCI support check routine */ + + regs.eax = 0xf0f0; + regs.ebx = 0x0000; + regs.ecx = 0x0000; + flag = tosh_smm(®s); + + /* if this is not a Toshiba laptop carry flag is set and ah=0x86 */ + + if ((flag==1) || ((regs.eax & 0xff00)==0x8600)) { + printk("toshiba: not a supported Toshiba laptop\n"); + iounmap(bios); + return -ENODEV; + } + + /* if we get this far then we are running on a Toshiba (probably)! */ + + tosh_sci = regs.edx & 0xffff; + + /* next get the machine ID of the current laptop */ + + tosh_id = tosh_get_machine_id(bios); + + /* get the BIOS version */ + + major = readb(bios+0xe009)-'0'; + minor = ((readb(bios+0xe00b)-'0')*10)+(readb(bios+0xe00c)-'0'); + tosh_bios = (major*0x100)+minor; + + /* get the BIOS date */ + + day = ((readb(bios+0xfff5)-'0')*10)+(readb(bios+0xfff6)-'0'); + month = ((readb(bios+0xfff8)-'0')*10)+(readb(bios+0xfff9)-'0'); + year = ((readb(bios+0xfffb)-'0')*10)+(readb(bios+0xfffc)-'0'); + tosh_date = (((year-90) & 0x1f)<<10) | ((month & 0xf)<<6) + | ((day & 0x1f)<<1); + + + /* in theory we should check the ports we are going to use for the + fn key detection (and the fan on the Portage 610/Tecra700), and + then request them to stop other drivers using them. However as + the keyboard driver grabs 0x60-0x6f and the pic driver grabs + 0xa0-0xbf we can't. We just have to live dangerously and use the + ports anyway, oh boy! */ + + /* do we need to emulate the fan? */ + + if ((tosh_id==0xfccb) || (tosh_id==0xfccc)) + tosh_fan = 1; + + iounmap(bios); + + return 0; +} + +static int __init toshiba_init(void) +{ + int retval; + /* are we running on a Toshiba laptop */ + + if (tosh_probe()) + return -ENODEV; + + printk(KERN_INFO "Toshiba System Management Mode driver v" TOSH_VERSION "\n"); + + /* set the port to use for Fn status if not specified as a parameter */ + if (tosh_fn==0x00) + tosh_set_fn_port(); + + /* register the device file */ + retval = misc_register(&tosh_device); + if (retval < 0) + return retval; + +#ifdef CONFIG_PROC_FS + { + struct proc_dir_entry *pde; + + pde = proc_create("toshiba", 0, NULL, &proc_toshiba_fops); + if (!pde) { + misc_deregister(&tosh_device); + return -ENOMEM; + } + } +#endif + + return 0; +} + +static void __exit toshiba_exit(void) +{ + remove_proc_entry("toshiba", NULL); + misc_deregister(&tosh_device); +} + +module_init(toshiba_init); +module_exit(toshiba_exit); + diff --git a/kernel/drivers/char/tpm/Kconfig b/kernel/drivers/char/tpm/Kconfig new file mode 100644 index 000000000..3b84a8b1b --- /dev/null +++ b/kernel/drivers/char/tpm/Kconfig @@ -0,0 +1,126 @@ +# +# TPM device configuration +# + +menuconfig TCG_TPM + tristate "TPM Hardware Support" + depends on HAS_IOMEM + select SECURITYFS + ---help--- + If you have a TPM security chip in your system, which + implements the Trusted Computing Group's specification, + say Yes and it will be accessible from within Linux. For + more information see . + An implementation of the Trusted Software Stack (TSS), the + userspace enablement piece of the specification, can be + obtained at: . To + compile this driver as a module, choose M here; the module + will be called tpm. If unsure, say N. + Notes: + 1) For more TPM drivers enable CONFIG_PNP, CONFIG_ACPI + and CONFIG_PNPACPI. + 2) Without ACPI enabled, the BIOS event log won't be accessible, + which is required to validate the PCR 0-7 values. + +if TCG_TPM + +config TCG_TIS + tristate "TPM Interface Specification 1.2 Interface / TPM 2.0 FIFO Interface" + depends on X86 + ---help--- + If you have a TPM security chip that is compliant with the + TCG TIS 1.2 TPM specification (TPM1.2) or the TCG PTP FIFO + specification (TPM2.0) say Yes and it will be accessible from + within Linux. To compile this driver as a module, choose M here; + the module will be called tpm_tis. + +config TCG_TIS_I2C_ATMEL + tristate "TPM Interface Specification 1.2 Interface (I2C - Atmel)" + depends on I2C + ---help--- + If you have an Atmel I2C TPM security chip say Yes and it will be + accessible from within Linux. + To compile this driver as a module, choose M here; the module will + be called tpm_tis_i2c_atmel. + +config TCG_TIS_I2C_INFINEON + tristate "TPM Interface Specification 1.2 Interface (I2C - Infineon)" + depends on I2C + ---help--- + If you have a TPM security chip that is compliant with the + TCG TIS 1.2 TPM specification and Infineon's I2C Protocol Stack + Specification 0.20 say Yes and it will be accessible from within + Linux. + To compile this driver as a module, choose M here; the module + will be called tpm_i2c_infineon. + +config TCG_TIS_I2C_NUVOTON + tristate "TPM Interface Specification 1.2 Interface (I2C - Nuvoton)" + depends on I2C + ---help--- + If you have a TPM security chip with an I2C interface from + Nuvoton Technology Corp. say Yes and it will be accessible + from within Linux. + To compile this driver as a module, choose M here; the module + will be called tpm_i2c_nuvoton. + +config TCG_NSC + tristate "National Semiconductor TPM Interface" + depends on X86 + ---help--- + If you have a TPM security chip from National Semiconductor + say Yes and it will be accessible from within Linux. To + compile this driver as a module, choose M here; the module + will be called tpm_nsc. + +config TCG_ATMEL + tristate "Atmel TPM Interface" + depends on PPC64 || HAS_IOPORT_MAP + ---help--- + If you have a TPM security chip from Atmel say Yes and it + will be accessible from within Linux. To compile this driver + as a module, choose M here; the module will be called tpm_atmel. + +config TCG_INFINEON + tristate "Infineon Technologies TPM Interface" + depends on PNP + ---help--- + If you have a TPM security chip from Infineon Technologies + (either SLD 9630 TT 1.1 or SLB 9635 TT 1.2) say Yes and it + will be accessible from within Linux. + To compile this driver as a module, choose M here; the module + will be called tpm_infineon. + Further information on this driver and the supported hardware + can be found at http://www.trust.rub.de/projects/linux-device-driver-infineon-tpm/ + +config TCG_IBMVTPM + tristate "IBM VTPM Interface" + depends on PPC_PSERIES + ---help--- + If you have IBM virtual TPM (VTPM) support say Yes and it + will be accessible from within Linux. To compile this driver + as a module, choose M here; the module will be called tpm_ibmvtpm. + +config TCG_XEN + tristate "XEN TPM Interface" + depends on TCG_TPM && XEN + select XEN_XENBUS_FRONTEND + ---help--- + If you want to make TPM support available to a Xen user domain, + say Yes and it will be accessible from within Linux. See + the manpages for xl, xl.conf, and docs/misc/vtpm.txt in + the Xen source repository for more details. + To compile this driver as a module, choose M here; the module + will be called xen-tpmfront. + +config TCG_CRB + tristate "TPM 2.0 CRB Interface" + depends on X86 && ACPI + ---help--- + If you have a TPM security chip that is compliant with the + TCG CRB 2.0 TPM specification say Yes and it will be accessible + from within Linux. To compile this driver as a module, choose + M here; the module will be called tpm_crb. + +source "drivers/char/tpm/st33zp24/Kconfig" +endif # TCG_TPM diff --git a/kernel/drivers/char/tpm/Makefile b/kernel/drivers/char/tpm/Makefile new file mode 100644 index 000000000..56e8f1f3d --- /dev/null +++ b/kernel/drivers/char/tpm/Makefile @@ -0,0 +1,25 @@ +# +# Makefile for the kernel tpm device drivers. +# +obj-$(CONFIG_TCG_TPM) += tpm.o +tpm-y := tpm-interface.o tpm-dev.o tpm-sysfs.o tpm-chip.o tpm2-cmd.o +tpm-$(CONFIG_ACPI) += tpm_ppi.o + +ifdef CONFIG_ACPI + tpm-y += tpm_eventlog.o tpm_acpi.o +else +ifdef CONFIG_TCG_IBMVTPM + tpm-y += tpm_eventlog.o tpm_of.o +endif +endif +obj-$(CONFIG_TCG_TIS) += tpm_tis.o +obj-$(CONFIG_TCG_TIS_I2C_ATMEL) += tpm_i2c_atmel.o +obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o +obj-$(CONFIG_TCG_TIS_I2C_NUVOTON) += tpm_i2c_nuvoton.o +obj-$(CONFIG_TCG_NSC) += tpm_nsc.o +obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o +obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o +obj-$(CONFIG_TCG_IBMVTPM) += tpm_ibmvtpm.o +obj-$(CONFIG_TCG_TIS_ST33ZP24) += st33zp24/ +obj-$(CONFIG_TCG_XEN) += xen-tpmfront.o +obj-$(CONFIG_TCG_CRB) += tpm_crb.o diff --git a/kernel/drivers/char/tpm/st33zp24/Kconfig b/kernel/drivers/char/tpm/st33zp24/Kconfig new file mode 100644 index 000000000..09cb72786 --- /dev/null +++ b/kernel/drivers/char/tpm/st33zp24/Kconfig @@ -0,0 +1,30 @@ +config TCG_TIS_ST33ZP24 + tristate "STMicroelectronics TPM Interface Specification 1.2 Interface" + depends on GPIOLIB + ---help--- + STMicroelectronics ST33ZP24 core driver. It implements the core + TPM1.2 logic and hooks into the TPM kernel APIs. Physical layers will + register against it. + + To compile this driver as a module, choose m here. The module will be called + tpm_st33zp24. + +config TCG_TIS_ST33ZP24_I2C + tristate "TPM 1.2 ST33ZP24 I2C support" + depends on TCG_TIS_ST33ZP24 + depends on I2C + ---help--- + This module adds support for the STMicroelectronics TPM security chip + ST33ZP24 with i2c interface. + To compile this driver as a module, choose M here; the module will be + called tpm_st33zp24_i2c. + +config TCG_TIS_ST33ZP24_SPI + tristate "TPM 1.2 ST33ZP24 SPI support" + depends on TCG_TIS_ST33ZP24 + depends on SPI + ---help--- + This module adds support for the STMicroelectronics TPM security chip + ST33ZP24 with spi interface. + To compile this driver as a module, choose M here; the module will be + called tpm_st33zp24_spi. diff --git a/kernel/drivers/char/tpm/st33zp24/Makefile b/kernel/drivers/char/tpm/st33zp24/Makefile new file mode 100644 index 000000000..74a722e5e --- /dev/null +++ b/kernel/drivers/char/tpm/st33zp24/Makefile @@ -0,0 +1,12 @@ +# +# Makefile for ST33ZP24 TPM 1.2 driver +# + +tpm_st33zp24-objs = st33zp24.o +obj-$(CONFIG_TCG_TIS_ST33ZP24) += tpm_st33zp24.o + +tpm_st33zp24_i2c-objs = i2c.o +obj-$(CONFIG_TCG_TIS_ST33ZP24_I2C) += tpm_st33zp24_i2c.o + +tpm_st33zp24_spi-objs = spi.o +obj-$(CONFIG_TCG_TIS_ST33ZP24_SPI) += tpm_st33zp24_spi.o diff --git a/kernel/drivers/char/tpm/st33zp24/i2c.c b/kernel/drivers/char/tpm/st33zp24/i2c.c new file mode 100644 index 000000000..ad1ee180e --- /dev/null +++ b/kernel/drivers/char/tpm/st33zp24/i2c.c @@ -0,0 +1,276 @@ +/* + * STMicroelectronics TPM I2C Linux driver for TPM ST33ZP24 + * Copyright (C) 2009 - 2015 STMicroelectronics + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "st33zp24.h" + +#define TPM_DUMMY_BYTE 0xAA + +struct st33zp24_i2c_phy { + struct i2c_client *client; + u8 buf[TPM_BUFSIZE + 1]; + int io_lpcpd; +}; + +/* + * write8_reg + * Send byte to the TIS register according to the ST33ZP24 I2C protocol. + * @param: tpm_register, the tpm tis register where the data should be written + * @param: tpm_data, the tpm_data to write inside the tpm_register + * @param: tpm_size, The length of the data + * @return: Returns negative errno, or else the number of bytes written. + */ +static int write8_reg(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size) +{ + struct st33zp24_i2c_phy *phy = phy_id; + + phy->buf[0] = tpm_register; + memcpy(phy->buf + 1, tpm_data, tpm_size); + return i2c_master_send(phy->client, phy->buf, tpm_size + 1); +} /* write8_reg() */ + +/* + * read8_reg + * Recv byte from the TIS register according to the ST33ZP24 I2C protocol. + * @param: tpm_register, the tpm tis register where the data should be read + * @param: tpm_data, the TPM response + * @param: tpm_size, tpm TPM response size to read. + * @return: number of byte read successfully: should be one if success. + */ +static int read8_reg(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size) +{ + struct st33zp24_i2c_phy *phy = phy_id; + u8 status = 0; + u8 data; + + data = TPM_DUMMY_BYTE; + status = write8_reg(phy, tpm_register, &data, 1); + if (status == 2) + status = i2c_master_recv(phy->client, tpm_data, tpm_size); + return status; +} /* read8_reg() */ + +/* + * st33zp24_i2c_send + * Send byte to the TIS register according to the ST33ZP24 I2C protocol. + * @param: phy_id, the phy description + * @param: tpm_register, the tpm tis register where the data should be written + * @param: tpm_data, the tpm_data to write inside the tpm_register + * @param: tpm_size, the length of the data + * @return: number of byte written successfully: should be one if success. + */ +static int st33zp24_i2c_send(void *phy_id, u8 tpm_register, u8 *tpm_data, + int tpm_size) +{ + return write8_reg(phy_id, tpm_register | TPM_WRITE_DIRECTION, tpm_data, + tpm_size); +} + +/* + * st33zp24_i2c_recv + * Recv byte from the TIS register according to the ST33ZP24 I2C protocol. + * @param: phy_id, the phy description + * @param: tpm_register, the tpm tis register where the data should be read + * @param: tpm_data, the TPM response + * @param: tpm_size, tpm TPM response size to read. + * @return: number of byte read successfully: should be one if success. + */ +static int st33zp24_i2c_recv(void *phy_id, u8 tpm_register, u8 *tpm_data, + int tpm_size) +{ + return read8_reg(phy_id, tpm_register, tpm_data, tpm_size); +} + +static const struct st33zp24_phy_ops i2c_phy_ops = { + .send = st33zp24_i2c_send, + .recv = st33zp24_i2c_recv, +}; + +#ifdef CONFIG_OF +static int st33zp24_i2c_of_request_resources(struct st33zp24_i2c_phy *phy) +{ + struct device_node *pp; + struct i2c_client *client = phy->client; + int gpio; + int ret; + + pp = client->dev.of_node; + if (!pp) { + dev_err(&client->dev, "No platform data\n"); + return -ENODEV; + } + + /* Get GPIO from device tree */ + gpio = of_get_named_gpio(pp, "lpcpd-gpios", 0); + if (gpio < 0) { + dev_err(&client->dev, + "Failed to retrieve lpcpd-gpios from dts.\n"); + phy->io_lpcpd = -1; + /* + * lpcpd pin is not specified. This is not an issue as + * power management can be also managed by TPM specific + * commands. So leave with a success status code. + */ + return 0; + } + /* GPIO request and configuration */ + ret = devm_gpio_request_one(&client->dev, gpio, + GPIOF_OUT_INIT_HIGH, "TPM IO LPCPD"); + if (ret) { + dev_err(&client->dev, "Failed to request lpcpd pin\n"); + return -ENODEV; + } + phy->io_lpcpd = gpio; + + return 0; +} +#else +static int st33zp24_i2c_of_request_resources(struct st33zp24_i2c_phy *phy) +{ + return -ENODEV; +} +#endif + +static int st33zp24_i2c_request_resources(struct i2c_client *client, + struct st33zp24_i2c_phy *phy) +{ + struct st33zp24_platform_data *pdata; + int ret; + + pdata = client->dev.platform_data; + if (!pdata) { + dev_err(&client->dev, "No platform data\n"); + return -ENODEV; + } + + /* store for late use */ + phy->io_lpcpd = pdata->io_lpcpd; + + if (gpio_is_valid(pdata->io_lpcpd)) { + ret = devm_gpio_request_one(&client->dev, + pdata->io_lpcpd, GPIOF_OUT_INIT_HIGH, + "TPM IO_LPCPD"); + if (ret) { + dev_err(&client->dev, "Failed to request lpcpd pin\n"); + return ret; + } + } + + return 0; +} + +/* + * st33zp24_i2c_probe initialize the TPM device + * @param: client, the i2c_client drescription (TPM I2C description). + * @param: id, the i2c_device_id struct. + * @return: 0 in case of success. + * -1 in other case. + */ +static int st33zp24_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int ret; + struct st33zp24_platform_data *pdata; + struct st33zp24_i2c_phy *phy; + + if (!client) { + pr_info("%s: i2c client is NULL. Device not accessible.\n", + __func__); + return -ENODEV; + } + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + dev_info(&client->dev, "client not i2c capable\n"); + return -ENODEV; + } + + phy = devm_kzalloc(&client->dev, sizeof(struct st33zp24_i2c_phy), + GFP_KERNEL); + if (!phy) + return -ENOMEM; + + phy->client = client; + pdata = client->dev.platform_data; + if (!pdata && client->dev.of_node) { + ret = st33zp24_i2c_of_request_resources(phy); + if (ret) + return ret; + } else if (pdata) { + ret = st33zp24_i2c_request_resources(client, phy); + if (ret) + return ret; + } + + return st33zp24_probe(phy, &i2c_phy_ops, &client->dev, client->irq, + phy->io_lpcpd); +} + +/* + * st33zp24_i2c_remove remove the TPM device + * @param: client, the i2c_client description (TPM I2C description). + * @return: 0 in case of success. + */ +static int st33zp24_i2c_remove(struct i2c_client *client) +{ + struct tpm_chip *chip = i2c_get_clientdata(client); + + return st33zp24_remove(chip); +} + +static const struct i2c_device_id st33zp24_i2c_id[] = { + {TPM_ST33_I2C, 0}, + {} +}; +MODULE_DEVICE_TABLE(i2c, st33zp24_i2c_id); + +#ifdef CONFIG_OF +static const struct of_device_id of_st33zp24_i2c_match[] = { + { .compatible = "st,st33zp24-i2c", }, + {} +}; +MODULE_DEVICE_TABLE(of, of_st33zp24_i2c_match); +#endif + +static SIMPLE_DEV_PM_OPS(st33zp24_i2c_ops, st33zp24_pm_suspend, + st33zp24_pm_resume); + +static struct i2c_driver st33zp24_i2c_driver = { + .driver = { + .owner = THIS_MODULE, + .name = TPM_ST33_I2C, + .pm = &st33zp24_i2c_ops, + .of_match_table = of_match_ptr(of_st33zp24_i2c_match), + }, + .probe = st33zp24_i2c_probe, + .remove = st33zp24_i2c_remove, + .id_table = st33zp24_i2c_id +}; + +module_i2c_driver(st33zp24_i2c_driver); + +MODULE_AUTHOR("TPM support (TPMsupport@list.st.com)"); +MODULE_DESCRIPTION("STM TPM 1.2 I2C ST33 Driver"); +MODULE_VERSION("1.3.0"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/tpm/st33zp24/spi.c b/kernel/drivers/char/tpm/st33zp24/spi.c new file mode 100644 index 000000000..f0184a1b0 --- /dev/null +++ b/kernel/drivers/char/tpm/st33zp24/spi.c @@ -0,0 +1,399 @@ +/* + * STMicroelectronics TPM SPI Linux driver for TPM ST33ZP24 + * Copyright (C) 2009 - 2015 STMicroelectronics + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "st33zp24.h" + +#define TPM_DATA_FIFO 0x24 +#define TPM_INTF_CAPABILITY 0x14 + +#define TPM_DUMMY_BYTE 0x00 + +#define MAX_SPI_LATENCY 15 +#define LOCALITY0 0 + +#define ST33ZP24_OK 0x5A +#define ST33ZP24_UNDEFINED_ERR 0x80 +#define ST33ZP24_BADLOCALITY 0x81 +#define ST33ZP24_TISREGISTER_UKNOWN 0x82 +#define ST33ZP24_LOCALITY_NOT_ACTIVATED 0x83 +#define ST33ZP24_HASH_END_BEFORE_HASH_START 0x84 +#define ST33ZP24_BAD_COMMAND_ORDER 0x85 +#define ST33ZP24_INCORECT_RECEIVED_LENGTH 0x86 +#define ST33ZP24_TPM_FIFO_OVERFLOW 0x89 +#define ST33ZP24_UNEXPECTED_READ_FIFO 0x8A +#define ST33ZP24_UNEXPECTED_WRITE_FIFO 0x8B +#define ST33ZP24_CMDRDY_SET_WHEN_PROCESSING_HASH_END 0x90 +#define ST33ZP24_DUMMY_BYTES 0x00 + +/* + * TPM command can be up to 2048 byte, A TPM response can be up to + * 1024 byte. + * Between command and response, there are latency byte (up to 15 + * usually on st33zp24 2 are enough). + * + * Overall when sending a command and expecting an answer we need if + * worst case: + * 2048 (for the TPM command) + 1024 (for the TPM answer). We need + * some latency byte before the answer is available (max 15). + * We have 2048 + 1024 + 15. + */ +#define ST33ZP24_SPI_BUFFER_SIZE (TPM_BUFSIZE + (TPM_BUFSIZE / 2) +\ + MAX_SPI_LATENCY) + + +struct st33zp24_spi_phy { + struct spi_device *spi_device; + struct spi_transfer spi_xfer; + u8 tx_buf[ST33ZP24_SPI_BUFFER_SIZE]; + u8 rx_buf[ST33ZP24_SPI_BUFFER_SIZE]; + + int io_lpcpd; + int latency; +}; + +static int st33zp24_status_to_errno(u8 code) +{ + switch (code) { + case ST33ZP24_OK: + return 0; + case ST33ZP24_UNDEFINED_ERR: + case ST33ZP24_BADLOCALITY: + case ST33ZP24_TISREGISTER_UKNOWN: + case ST33ZP24_LOCALITY_NOT_ACTIVATED: + case ST33ZP24_HASH_END_BEFORE_HASH_START: + case ST33ZP24_BAD_COMMAND_ORDER: + case ST33ZP24_UNEXPECTED_READ_FIFO: + case ST33ZP24_UNEXPECTED_WRITE_FIFO: + case ST33ZP24_CMDRDY_SET_WHEN_PROCESSING_HASH_END: + return -EPROTO; + case ST33ZP24_INCORECT_RECEIVED_LENGTH: + case ST33ZP24_TPM_FIFO_OVERFLOW: + return -EMSGSIZE; + case ST33ZP24_DUMMY_BYTES: + return -ENOSYS; + } + return code; +} + +/* + * st33zp24_spi_send + * Send byte to the TIS register according to the ST33ZP24 SPI protocol. + * @param: phy_id, the phy description + * @param: tpm_register, the tpm tis register where the data should be written + * @param: tpm_data, the tpm_data to write inside the tpm_register + * @param: tpm_size, The length of the data + * @return: should be zero if success else a negative error code. + */ +static int st33zp24_spi_send(void *phy_id, u8 tpm_register, u8 *tpm_data, + int tpm_size) +{ + u8 data = 0; + int total_length = 0, nbr_dummy_bytes = 0, ret = 0; + struct st33zp24_spi_phy *phy = phy_id; + struct spi_device *dev = phy->spi_device; + u8 *tx_buf = (u8 *)phy->spi_xfer.tx_buf; + u8 *rx_buf = phy->spi_xfer.rx_buf; + + /* Pre-Header */ + data = TPM_WRITE_DIRECTION | LOCALITY0; + memcpy(tx_buf + total_length, &data, sizeof(data)); + total_length++; + data = tpm_register; + memcpy(tx_buf + total_length, &data, sizeof(data)); + total_length++; + + if (tpm_size > 0 && tpm_register == TPM_DATA_FIFO) { + tx_buf[total_length++] = tpm_size >> 8; + tx_buf[total_length++] = tpm_size; + } + + memcpy(&tx_buf[total_length], tpm_data, tpm_size); + total_length += tpm_size; + + nbr_dummy_bytes = phy->latency; + memset(&tx_buf[total_length], TPM_DUMMY_BYTE, nbr_dummy_bytes); + + phy->spi_xfer.len = total_length + nbr_dummy_bytes; + + ret = spi_sync_transfer(dev, &phy->spi_xfer, 1); + if (ret == 0) + ret = rx_buf[total_length + nbr_dummy_bytes - 1]; + + return st33zp24_status_to_errno(ret); +} /* st33zp24_spi_send() */ + +/* + * read8_recv + * Recv byte from the TIS register according to the ST33ZP24 SPI protocol. + * @param: phy_id, the phy description + * @param: tpm_register, the tpm tis register where the data should be read + * @param: tpm_data, the TPM response + * @param: tpm_size, tpm TPM response size to read. + * @return: should be zero if success else a negative error code. + */ +static int read8_reg(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size) +{ + u8 data = 0; + int total_length = 0, nbr_dummy_bytes, ret; + struct st33zp24_spi_phy *phy = phy_id; + struct spi_device *dev = phy->spi_device; + u8 *tx_buf = (u8 *)phy->spi_xfer.tx_buf; + u8 *rx_buf = phy->spi_xfer.rx_buf; + + /* Pre-Header */ + data = LOCALITY0; + memcpy(tx_buf + total_length, &data, sizeof(data)); + total_length++; + data = tpm_register; + memcpy(tx_buf + total_length, &data, sizeof(data)); + total_length++; + + nbr_dummy_bytes = phy->latency; + memset(&tx_buf[total_length], TPM_DUMMY_BYTE, + nbr_dummy_bytes + tpm_size); + + phy->spi_xfer.len = total_length + nbr_dummy_bytes + tpm_size; + + /* header + status byte + size of the data + status byte */ + ret = spi_sync_transfer(dev, &phy->spi_xfer, 1); + if (tpm_size > 0 && ret == 0) { + ret = rx_buf[total_length + nbr_dummy_bytes - 1]; + + memcpy(tpm_data, rx_buf + total_length + nbr_dummy_bytes, + tpm_size); + } + + return ret; +} /* read8_reg() */ + +/* + * st33zp24_spi_recv + * Recv byte from the TIS register according to the ST33ZP24 SPI protocol. + * @param: phy_id, the phy description + * @param: tpm_register, the tpm tis register where the data should be read + * @param: tpm_data, the TPM response + * @param: tpm_size, tpm TPM response size to read. + * @return: number of byte read successfully: should be one if success. + */ +static int st33zp24_spi_recv(void *phy_id, u8 tpm_register, u8 *tpm_data, + int tpm_size) +{ + int ret; + + ret = read8_reg(phy_id, tpm_register, tpm_data, tpm_size); + if (!st33zp24_status_to_errno(ret)) + return tpm_size; + return ret; +} /* st33zp24_spi_recv() */ + +static int evaluate_latency(void *phy_id) +{ + struct st33zp24_spi_phy *phy = phy_id; + int latency = 1, status = 0; + u8 data = 0; + + while (!status && latency < MAX_SPI_LATENCY) { + phy->latency = latency; + status = read8_reg(phy_id, TPM_INTF_CAPABILITY, &data, 1); + latency++; + } + return latency - 1; +} /* evaluate_latency() */ + +static const struct st33zp24_phy_ops spi_phy_ops = { + .send = st33zp24_spi_send, + .recv = st33zp24_spi_recv, +}; + +#ifdef CONFIG_OF +static int tpm_stm_spi_of_request_resources(struct st33zp24_spi_phy *phy) +{ + struct device_node *pp; + struct spi_device *dev = phy->spi_device; + int gpio; + int ret; + + pp = dev->dev.of_node; + if (!pp) { + dev_err(&dev->dev, "No platform data\n"); + return -ENODEV; + } + + /* Get GPIO from device tree */ + gpio = of_get_named_gpio(pp, "lpcpd-gpios", 0); + if (gpio < 0) { + dev_err(&dev->dev, + "Failed to retrieve lpcpd-gpios from dts.\n"); + phy->io_lpcpd = -1; + /* + * lpcpd pin is not specified. This is not an issue as + * power management can be also managed by TPM specific + * commands. So leave with a success status code. + */ + return 0; + } + /* GPIO request and configuration */ + ret = devm_gpio_request_one(&dev->dev, gpio, + GPIOF_OUT_INIT_HIGH, "TPM IO LPCPD"); + if (ret) { + dev_err(&dev->dev, "Failed to request lpcpd pin\n"); + return -ENODEV; + } + phy->io_lpcpd = gpio; + + return 0; +} +#else +static int tpm_stm_spi_of_request_resources(struct st33zp24_spi_phy *phy) +{ + return -ENODEV; +} +#endif + +static int tpm_stm_spi_request_resources(struct spi_device *dev, + struct st33zp24_spi_phy *phy) +{ + struct st33zp24_platform_data *pdata; + int ret; + + pdata = dev->dev.platform_data; + if (!pdata) { + dev_err(&dev->dev, "No platform data\n"); + return -ENODEV; + } + + /* store for late use */ + phy->io_lpcpd = pdata->io_lpcpd; + + if (gpio_is_valid(pdata->io_lpcpd)) { + ret = devm_gpio_request_one(&dev->dev, + pdata->io_lpcpd, GPIOF_OUT_INIT_HIGH, + "TPM IO_LPCPD"); + if (ret) { + dev_err(&dev->dev, "%s : reset gpio_request failed\n", + __FILE__); + return ret; + } + } + + return 0; +} + +/* + * tpm_st33_spi_probe initialize the TPM device + * @param: dev, the spi_device drescription (TPM SPI description). + * @return: 0 in case of success. + * or a negative value describing the error. + */ +static int +tpm_st33_spi_probe(struct spi_device *dev) +{ + int ret; + struct st33zp24_platform_data *pdata; + struct st33zp24_spi_phy *phy; + + /* Check SPI platform functionnalities */ + if (!dev) { + pr_info("%s: dev is NULL. Device is not accessible.\n", + __func__); + return -ENODEV; + } + + phy = devm_kzalloc(&dev->dev, sizeof(struct st33zp24_spi_phy), + GFP_KERNEL); + if (!phy) + return -ENOMEM; + + phy->spi_device = dev; + pdata = dev->dev.platform_data; + if (!pdata && dev->dev.of_node) { + ret = tpm_stm_spi_of_request_resources(phy); + if (ret) + return ret; + } else if (pdata) { + ret = tpm_stm_spi_request_resources(dev, phy); + if (ret) + return ret; + } + + phy->spi_xfer.tx_buf = phy->tx_buf; + phy->spi_xfer.rx_buf = phy->rx_buf; + + phy->latency = evaluate_latency(phy); + if (phy->latency <= 0) + return -ENODEV; + + return st33zp24_probe(phy, &spi_phy_ops, &dev->dev, dev->irq, + phy->io_lpcpd); +} + +/* + * tpm_st33_spi_remove remove the TPM device + * @param: client, the spi_device drescription (TPM SPI description). + * @return: 0 in case of success. + */ +static int tpm_st33_spi_remove(struct spi_device *dev) +{ + struct tpm_chip *chip = spi_get_drvdata(dev); + + return st33zp24_remove(chip); +} + +static const struct spi_device_id st33zp24_spi_id[] = { + {TPM_ST33_SPI, 0}, + {} +}; +MODULE_DEVICE_TABLE(spi, st33zp24_spi_id); + +#ifdef CONFIG_OF +static const struct of_device_id of_st33zp24_spi_match[] = { + { .compatible = "st,st33zp24-spi", }, + {} +}; +MODULE_DEVICE_TABLE(of, of_st33zp24_spi_match); +#endif + +static SIMPLE_DEV_PM_OPS(st33zp24_spi_ops, st33zp24_pm_suspend, + st33zp24_pm_resume); + +static struct spi_driver tpm_st33_spi_driver = { + .driver = { + .owner = THIS_MODULE, + .name = TPM_ST33_SPI, + .pm = &st33zp24_spi_ops, + .of_match_table = of_match_ptr(of_st33zp24_spi_match), + }, + .probe = tpm_st33_spi_probe, + .remove = tpm_st33_spi_remove, + .id_table = st33zp24_spi_id, +}; + +module_spi_driver(tpm_st33_spi_driver); + +MODULE_AUTHOR("TPM support (TPMsupport@list.st.com)"); +MODULE_DESCRIPTION("STM TPM 1.2 SPI ST33 Driver"); +MODULE_VERSION("1.3.0"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/tpm/st33zp24/st33zp24.c b/kernel/drivers/char/tpm/st33zp24/st33zp24.c new file mode 100644 index 000000000..8d626784c --- /dev/null +++ b/kernel/drivers/char/tpm/st33zp24/st33zp24.c @@ -0,0 +1,698 @@ +/* + * STMicroelectronics TPM Linux driver for TPM ST33ZP24 + * Copyright (C) 2009 - 2015 STMicroelectronics + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../tpm.h" +#include "st33zp24.h" + +#define TPM_ACCESS 0x0 +#define TPM_STS 0x18 +#define TPM_DATA_FIFO 0x24 +#define TPM_INTF_CAPABILITY 0x14 +#define TPM_INT_STATUS 0x10 +#define TPM_INT_ENABLE 0x08 + +#define LOCALITY0 0 + +enum st33zp24_access { + TPM_ACCESS_VALID = 0x80, + TPM_ACCESS_ACTIVE_LOCALITY = 0x20, + TPM_ACCESS_REQUEST_PENDING = 0x04, + TPM_ACCESS_REQUEST_USE = 0x02, +}; + +enum st33zp24_status { + TPM_STS_VALID = 0x80, + TPM_STS_COMMAND_READY = 0x40, + TPM_STS_GO = 0x20, + TPM_STS_DATA_AVAIL = 0x10, + TPM_STS_DATA_EXPECT = 0x08, +}; + +enum st33zp24_int_flags { + TPM_GLOBAL_INT_ENABLE = 0x80, + TPM_INTF_CMD_READY_INT = 0x080, + TPM_INTF_FIFO_AVALAIBLE_INT = 0x040, + TPM_INTF_WAKE_UP_READY_INT = 0x020, + TPM_INTF_LOCALITY_CHANGE_INT = 0x004, + TPM_INTF_STS_VALID_INT = 0x002, + TPM_INTF_DATA_AVAIL_INT = 0x001, +}; + +enum tis_defaults { + TIS_SHORT_TIMEOUT = 750, + TIS_LONG_TIMEOUT = 2000, +}; + +struct st33zp24_dev { + struct tpm_chip *chip; + void *phy_id; + const struct st33zp24_phy_ops *ops; + u32 intrs; + int io_lpcpd; +}; + +/* + * clear_interruption clear the pending interrupt. + * @param: tpm_dev, the tpm device device. + * @return: the interrupt status value. + */ +static u8 clear_interruption(struct st33zp24_dev *tpm_dev) +{ + u8 interrupt; + + tpm_dev->ops->recv(tpm_dev->phy_id, TPM_INT_STATUS, &interrupt, 1); + tpm_dev->ops->send(tpm_dev->phy_id, TPM_INT_STATUS, &interrupt, 1); + return interrupt; +} /* clear_interruption() */ + +/* + * st33zp24_cancel, cancel the current command execution or + * set STS to COMMAND READY. + * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h + */ +static void st33zp24_cancel(struct tpm_chip *chip) +{ + struct st33zp24_dev *tpm_dev; + u8 data; + + tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); + + data = TPM_STS_COMMAND_READY; + tpm_dev->ops->send(tpm_dev->phy_id, TPM_STS, &data, 1); +} /* st33zp24_cancel() */ + +/* + * st33zp24_status return the TPM_STS register + * @param: chip, the tpm chip description + * @return: the TPM_STS register value. + */ +static u8 st33zp24_status(struct tpm_chip *chip) +{ + struct st33zp24_dev *tpm_dev; + u8 data; + + tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); + + tpm_dev->ops->recv(tpm_dev->phy_id, TPM_STS, &data, 1); + return data; +} /* st33zp24_status() */ + +/* + * check_locality if the locality is active + * @param: chip, the tpm chip description + * @return: the active locality or -EACCESS. + */ +static int check_locality(struct tpm_chip *chip) +{ + struct st33zp24_dev *tpm_dev; + u8 data; + u8 status; + + tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); + + status = tpm_dev->ops->recv(tpm_dev->phy_id, TPM_ACCESS, &data, 1); + if (status && (data & + (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) == + (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) + return chip->vendor.locality; + + return -EACCES; +} /* check_locality() */ + +/* + * request_locality request the TPM locality + * @param: chip, the chip description + * @return: the active locality or negative value. + */ +static int request_locality(struct tpm_chip *chip) +{ + unsigned long stop; + long ret; + struct st33zp24_dev *tpm_dev; + u8 data; + + if (check_locality(chip) == chip->vendor.locality) + return chip->vendor.locality; + + tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); + + data = TPM_ACCESS_REQUEST_USE; + ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_ACCESS, &data, 1); + if (ret < 0) + return ret; + + stop = jiffies + chip->vendor.timeout_a; + + /* Request locality is usually effective after the request */ + do { + if (check_locality(chip) >= 0) + return chip->vendor.locality; + msleep(TPM_TIMEOUT); + } while (time_before(jiffies, stop)); + + /* could not get locality */ + return -EACCES; +} /* request_locality() */ + +/* + * release_locality release the active locality + * @param: chip, the tpm chip description. + */ +static void release_locality(struct tpm_chip *chip) +{ + struct st33zp24_dev *tpm_dev; + u8 data; + + tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); + data = TPM_ACCESS_ACTIVE_LOCALITY; + + tpm_dev->ops->send(tpm_dev->phy_id, TPM_ACCESS, &data, 1); +} + +/* + * get_burstcount return the burstcount value + * @param: chip, the chip description + * return: the burstcount or negative value. + */ +static int get_burstcount(struct tpm_chip *chip) +{ + unsigned long stop; + int burstcnt, status; + u8 tpm_reg, temp; + struct st33zp24_dev *tpm_dev; + + tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); + + stop = jiffies + chip->vendor.timeout_d; + do { + tpm_reg = TPM_STS + 1; + status = tpm_dev->ops->recv(tpm_dev->phy_id, tpm_reg, &temp, 1); + if (status < 0) + return -EBUSY; + + tpm_reg = TPM_STS + 2; + burstcnt = temp; + status = tpm_dev->ops->recv(tpm_dev->phy_id, tpm_reg, &temp, 1); + if (status < 0) + return -EBUSY; + + burstcnt |= temp << 8; + if (burstcnt) + return burstcnt; + msleep(TPM_TIMEOUT); + } while (time_before(jiffies, stop)); + return -EBUSY; +} /* get_burstcount() */ + + +/* + * wait_for_tpm_stat_cond + * @param: chip, chip description + * @param: mask, expected mask value + * @param: check_cancel, does the command expected to be canceled ? + * @param: canceled, did we received a cancel request ? + * @return: true if status == mask or if the command is canceled. + * false in other cases. + */ +static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask, + bool check_cancel, bool *canceled) +{ + u8 status = chip->ops->status(chip); + + *canceled = false; + if ((status & mask) == mask) + return true; + if (check_cancel && chip->ops->req_canceled(chip, status)) { + *canceled = true; + return true; + } + return false; +} + +/* + * wait_for_stat wait for a TPM_STS value + * @param: chip, the tpm chip description + * @param: mask, the value mask to wait + * @param: timeout, the timeout + * @param: queue, the wait queue. + * @param: check_cancel, does the command can be cancelled ? + * @return: the tpm status, 0 if success, -ETIME if timeout is reached. + */ +static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, + wait_queue_head_t *queue, bool check_cancel) +{ + unsigned long stop; + int ret = 0; + bool canceled = false; + bool condition; + u32 cur_intrs; + u8 status; + struct st33zp24_dev *tpm_dev; + + tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); + + /* check current status */ + status = st33zp24_status(chip); + if ((status & mask) == mask) + return 0; + + stop = jiffies + timeout; + + if (chip->vendor.irq) { + cur_intrs = tpm_dev->intrs; + clear_interruption(tpm_dev); + enable_irq(chip->vendor.irq); + + do { + if (ret == -ERESTARTSYS && freezing(current)) + clear_thread_flag(TIF_SIGPENDING); + + timeout = stop - jiffies; + if ((long) timeout <= 0) + return -1; + + ret = wait_event_interruptible_timeout(*queue, + cur_intrs != tpm_dev->intrs, + timeout); + clear_interruption(tpm_dev); + condition = wait_for_tpm_stat_cond(chip, mask, + check_cancel, &canceled); + if (ret >= 0 && condition) { + if (canceled) + return -ECANCELED; + return 0; + } + } while (ret == -ERESTARTSYS && freezing(current)); + + disable_irq_nosync(chip->vendor.irq); + + } else { + do { + msleep(TPM_TIMEOUT); + status = chip->ops->status(chip); + if ((status & mask) == mask) + return 0; + } while (time_before(jiffies, stop)); + } + + return -ETIME; +} /* wait_for_stat() */ + +/* + * recv_data receive data + * @param: chip, the tpm chip description + * @param: buf, the buffer where the data are received + * @param: count, the number of data to receive + * @return: the number of bytes read from TPM FIFO. + */ +static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) +{ + int size = 0, burstcnt, len, ret; + struct st33zp24_dev *tpm_dev; + + tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); + + while (size < count && + wait_for_stat(chip, + TPM_STS_DATA_AVAIL | TPM_STS_VALID, + chip->vendor.timeout_c, + &chip->vendor.read_queue, true) == 0) { + burstcnt = get_burstcount(chip); + if (burstcnt < 0) + return burstcnt; + len = min_t(int, burstcnt, count - size); + ret = tpm_dev->ops->recv(tpm_dev->phy_id, TPM_DATA_FIFO, + buf + size, len); + if (ret < 0) + return ret; + + size += len; + } + return size; +} + +/* + * tpm_ioserirq_handler the serirq irq handler + * @param: irq, the tpm chip description + * @param: dev_id, the description of the chip + * @return: the status of the handler. + */ +static irqreturn_t tpm_ioserirq_handler(int irq, void *dev_id) +{ + struct tpm_chip *chip = dev_id; + struct st33zp24_dev *tpm_dev; + + tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); + + tpm_dev->intrs++; + wake_up_interruptible(&chip->vendor.read_queue); + disable_irq_nosync(chip->vendor.irq); + + return IRQ_HANDLED; +} /* tpm_ioserirq_handler() */ + +/* + * st33zp24_send send TPM commands through the I2C bus. + * + * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h + * @param: buf, the buffer to send. + * @param: count, the number of bytes to send. + * @return: In case of success the number of bytes sent. + * In other case, a < 0 value describing the issue. + */ +static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf, + size_t len) +{ + u32 status, i, size, ordinal; + int burstcnt = 0; + int ret; + u8 data; + struct st33zp24_dev *tpm_dev; + + if (!chip) + return -EBUSY; + if (len < TPM_HEADER_SIZE) + return -EBUSY; + + tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); + + ret = request_locality(chip); + if (ret < 0) + return ret; + + status = st33zp24_status(chip); + if ((status & TPM_STS_COMMAND_READY) == 0) { + st33zp24_cancel(chip); + if (wait_for_stat + (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b, + &chip->vendor.read_queue, false) < 0) { + ret = -ETIME; + goto out_err; + } + } + + for (i = 0; i < len - 1;) { + burstcnt = get_burstcount(chip); + if (burstcnt < 0) + return burstcnt; + size = min_t(int, len - i - 1, burstcnt); + ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_DATA_FIFO, + buf + i, size); + if (ret < 0) + goto out_err; + + i += size; + } + + status = st33zp24_status(chip); + if ((status & TPM_STS_DATA_EXPECT) == 0) { + ret = -EIO; + goto out_err; + } + + ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_DATA_FIFO, + buf + len - 1, 1); + if (ret < 0) + goto out_err; + + status = st33zp24_status(chip); + if ((status & TPM_STS_DATA_EXPECT) != 0) { + ret = -EIO; + goto out_err; + } + + data = TPM_STS_GO; + ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_STS, &data, 1); + if (ret < 0) + goto out_err; + + if (chip->vendor.irq) { + ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); + + ret = wait_for_stat(chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, + tpm_calc_ordinal_duration(chip, ordinal), + &chip->vendor.read_queue, false); + if (ret < 0) + goto out_err; + } + + return len; +out_err: + st33zp24_cancel(chip); + release_locality(chip); + return ret; +} + +/* + * st33zp24_recv received TPM response through TPM phy. + * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h. + * @param: buf, the buffer to store datas. + * @param: count, the number of bytes to send. + * @return: In case of success the number of bytes received. + * In other case, a < 0 value describing the issue. + */ +static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf, + size_t count) +{ + int size = 0; + int expected; + + if (!chip) + return -EBUSY; + + if (count < TPM_HEADER_SIZE) { + size = -EIO; + goto out; + } + + size = recv_data(chip, buf, TPM_HEADER_SIZE); + if (size < TPM_HEADER_SIZE) { + dev_err(&chip->dev, "Unable to read header\n"); + goto out; + } + + expected = be32_to_cpu(*(__be32 *)(buf + 2)); + if (expected > count) { + size = -EIO; + goto out; + } + + size += recv_data(chip, &buf[TPM_HEADER_SIZE], + expected - TPM_HEADER_SIZE); + if (size < expected) { + dev_err(&chip->dev, "Unable to read remainder of result\n"); + size = -ETIME; + } + +out: + st33zp24_cancel(chip); + release_locality(chip); + return size; +} + +/* + * st33zp24_req_canceled + * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h. + * @param: status, the TPM status. + * @return: Does TPM ready to compute a new command ? true. + */ +static bool st33zp24_req_canceled(struct tpm_chip *chip, u8 status) +{ + return (status == TPM_STS_COMMAND_READY); +} + +static const struct tpm_class_ops st33zp24_tpm = { + .send = st33zp24_send, + .recv = st33zp24_recv, + .cancel = st33zp24_cancel, + .status = st33zp24_status, + .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID, + .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID, + .req_canceled = st33zp24_req_canceled, +}; + +/* + * st33zp24_probe initialize the TPM device + * @param: client, the i2c_client drescription (TPM I2C description). + * @param: id, the i2c_device_id struct. + * @return: 0 in case of success. + * -1 in other case. + */ +int st33zp24_probe(void *phy_id, const struct st33zp24_phy_ops *ops, + struct device *dev, int irq, int io_lpcpd) +{ + int ret; + u8 intmask = 0; + struct tpm_chip *chip; + struct st33zp24_dev *tpm_dev; + + chip = tpmm_chip_alloc(dev, &st33zp24_tpm); + if (IS_ERR(chip)) + return PTR_ERR(chip); + + tpm_dev = devm_kzalloc(dev, sizeof(struct st33zp24_dev), + GFP_KERNEL); + if (!tpm_dev) + return -ENOMEM; + + TPM_VPRIV(chip) = tpm_dev; + tpm_dev->phy_id = phy_id; + tpm_dev->ops = ops; + + chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT); + chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT); + chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT); + chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT); + + chip->vendor.locality = LOCALITY0; + + if (irq) { + /* INTERRUPT Setup */ + init_waitqueue_head(&chip->vendor.read_queue); + tpm_dev->intrs = 0; + + if (request_locality(chip) != LOCALITY0) { + ret = -ENODEV; + goto _tpm_clean_answer; + } + + clear_interruption(tpm_dev); + ret = devm_request_irq(dev, irq, tpm_ioserirq_handler, + IRQF_TRIGGER_HIGH, "TPM SERIRQ management", + chip); + if (ret < 0) { + dev_err(&chip->dev, "TPM SERIRQ signals %d not available\n", + irq); + goto _tpm_clean_answer; + } + + intmask |= TPM_INTF_CMD_READY_INT + | TPM_INTF_STS_VALID_INT + | TPM_INTF_DATA_AVAIL_INT; + + ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_INT_ENABLE, + &intmask, 1); + if (ret < 0) + goto _tpm_clean_answer; + + intmask = TPM_GLOBAL_INT_ENABLE; + ret = tpm_dev->ops->send(tpm_dev->phy_id, (TPM_INT_ENABLE + 3), + &intmask, 1); + if (ret < 0) + goto _tpm_clean_answer; + + chip->vendor.irq = irq; + + disable_irq_nosync(chip->vendor.irq); + + tpm_gen_interrupt(chip); + } + + tpm_get_timeouts(chip); + tpm_do_selftest(chip); + + return tpm_chip_register(chip); +_tpm_clean_answer: + dev_info(&chip->dev, "TPM initialization fail\n"); + return ret; +} +EXPORT_SYMBOL(st33zp24_probe); + +/* + * st33zp24_remove remove the TPM device + * @param: tpm_data, the tpm phy. + * @return: 0 in case of success. + */ +int st33zp24_remove(struct tpm_chip *chip) +{ + tpm_chip_unregister(chip); + return 0; +} +EXPORT_SYMBOL(st33zp24_remove); + +#ifdef CONFIG_PM_SLEEP +/* + * st33zp24_pm_suspend suspend the TPM device + * @param: tpm_data, the tpm phy. + * @param: mesg, the power management message. + * @return: 0 in case of success. + */ +int st33zp24_pm_suspend(struct device *dev) +{ + struct tpm_chip *chip = dev_get_drvdata(dev); + struct st33zp24_dev *tpm_dev; + int ret = 0; + + tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); + + if (gpio_is_valid(tpm_dev->io_lpcpd)) + gpio_set_value(tpm_dev->io_lpcpd, 0); + else + ret = tpm_pm_suspend(dev); + + return ret; +} /* st33zp24_pm_suspend() */ +EXPORT_SYMBOL(st33zp24_pm_suspend); + +/* + * st33zp24_pm_resume resume the TPM device + * @param: tpm_data, the tpm phy. + * @return: 0 in case of success. + */ +int st33zp24_pm_resume(struct device *dev) +{ + struct tpm_chip *chip = dev_get_drvdata(dev); + struct st33zp24_dev *tpm_dev; + int ret = 0; + + tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); + + if (gpio_is_valid(tpm_dev->io_lpcpd)) { + gpio_set_value(tpm_dev->io_lpcpd, 1); + ret = wait_for_stat(chip, + TPM_STS_VALID, chip->vendor.timeout_b, + &chip->vendor.read_queue, false); + } else { + ret = tpm_pm_resume(dev); + if (!ret) + tpm_do_selftest(chip); + } + return ret; +} /* st33zp24_pm_resume() */ +EXPORT_SYMBOL(st33zp24_pm_resume); +#endif + +MODULE_AUTHOR("TPM support (TPMsupport@list.st.com)"); +MODULE_DESCRIPTION("ST33ZP24 TPM 1.2 driver"); +MODULE_VERSION("1.3.0"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/tpm/st33zp24/st33zp24.h b/kernel/drivers/char/tpm/st33zp24/st33zp24.h new file mode 100644 index 000000000..c207cebf6 --- /dev/null +++ b/kernel/drivers/char/tpm/st33zp24/st33zp24.h @@ -0,0 +1,37 @@ +/* + * STMicroelectronics TPM Linux driver for TPM ST33ZP24 + * Copyright (C) 2009 - 2015 STMicroelectronics + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef __LOCAL_ST33ZP24_H__ +#define __LOCAL_ST33ZP24_H__ + +#define TPM_WRITE_DIRECTION 0x80 +#define TPM_BUFSIZE 2048 + +struct st33zp24_phy_ops { + int (*send)(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size); + int (*recv)(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size); +}; + +#ifdef CONFIG_PM_SLEEP +int st33zp24_pm_suspend(struct device *dev); +int st33zp24_pm_resume(struct device *dev); +#endif + +int st33zp24_probe(void *phy_id, const struct st33zp24_phy_ops *ops, + struct device *dev, int irq, int io_lpcpd); +int st33zp24_remove(struct tpm_chip *chip); +#endif /* __LOCAL_ST33ZP24_H__ */ diff --git a/kernel/drivers/char/tpm/tpm-chip.c b/kernel/drivers/char/tpm/tpm-chip.c new file mode 100644 index 000000000..283f00a7f --- /dev/null +++ b/kernel/drivers/char/tpm/tpm-chip.c @@ -0,0 +1,268 @@ +/* + * Copyright (C) 2004 IBM Corporation + * Copyright (C) 2014 Intel Corporation + * + * Authors: + * Jarkko Sakkinen + * Leendert van Doorn + * Dave Safford + * Reiner Sailer + * Kylene Hall + * + * Maintained by: + * + * TPM chip management routines. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + */ + +#include +#include +#include +#include +#include +#include +#include "tpm.h" +#include "tpm_eventlog.h" + +static DECLARE_BITMAP(dev_mask, TPM_NUM_DEVICES); +static LIST_HEAD(tpm_chip_list); +static DEFINE_SPINLOCK(driver_lock); + +struct class *tpm_class; +dev_t tpm_devt; + +/* + * tpm_chip_find_get - return tpm_chip for a given chip number + * @chip_num the device number for the chip + */ +struct tpm_chip *tpm_chip_find_get(int chip_num) +{ + struct tpm_chip *pos, *chip = NULL; + + rcu_read_lock(); + list_for_each_entry_rcu(pos, &tpm_chip_list, list) { + if (chip_num != TPM_ANY_NUM && chip_num != pos->dev_num) + continue; + + if (try_module_get(pos->pdev->driver->owner)) { + chip = pos; + break; + } + } + rcu_read_unlock(); + return chip; +} + +/** + * tpm_dev_release() - free chip memory and the device number + * @dev: the character device for the TPM chip + * + * This is used as the release function for the character device. + */ +static void tpm_dev_release(struct device *dev) +{ + struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev); + + spin_lock(&driver_lock); + clear_bit(chip->dev_num, dev_mask); + spin_unlock(&driver_lock); + kfree(chip); +} + +/** + * tpmm_chip_alloc() - allocate a new struct tpm_chip instance + * @dev: device to which the chip is associated + * @ops: struct tpm_class_ops instance + * + * Allocates a new struct tpm_chip instance and assigns a free + * device number for it. Caller does not have to worry about + * freeing the allocated resources. When the devices is removed + * devres calls tpmm_chip_remove() to do the job. + */ +struct tpm_chip *tpmm_chip_alloc(struct device *dev, + const struct tpm_class_ops *ops) +{ + struct tpm_chip *chip; + + chip = kzalloc(sizeof(*chip), GFP_KERNEL); + if (chip == NULL) + return ERR_PTR(-ENOMEM); + + mutex_init(&chip->tpm_mutex); + INIT_LIST_HEAD(&chip->list); + + chip->ops = ops; + + spin_lock(&driver_lock); + chip->dev_num = find_first_zero_bit(dev_mask, TPM_NUM_DEVICES); + spin_unlock(&driver_lock); + + if (chip->dev_num >= TPM_NUM_DEVICES) { + dev_err(dev, "No available tpm device numbers\n"); + kfree(chip); + return ERR_PTR(-ENOMEM); + } + + set_bit(chip->dev_num, dev_mask); + + scnprintf(chip->devname, sizeof(chip->devname), "tpm%d", chip->dev_num); + + chip->pdev = dev; + + dev_set_drvdata(dev, chip); + + chip->dev.class = tpm_class; + chip->dev.release = tpm_dev_release; + chip->dev.parent = chip->pdev; + + if (chip->dev_num == 0) + chip->dev.devt = MKDEV(MISC_MAJOR, TPM_MINOR); + else + chip->dev.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num); + + dev_set_name(&chip->dev, "%s", chip->devname); + + device_initialize(&chip->dev); + + chip->cdev.owner = chip->pdev->driver->owner; + cdev_init(&chip->cdev, &tpm_fops); + + return chip; +} +EXPORT_SYMBOL_GPL(tpmm_chip_alloc); + +static int tpm_dev_add_device(struct tpm_chip *chip) +{ + int rc; + + rc = cdev_add(&chip->cdev, chip->dev.devt, 1); + if (rc) { + dev_err(&chip->dev, + "unable to cdev_add() %s, major %d, minor %d, err=%d\n", + chip->devname, MAJOR(chip->dev.devt), + MINOR(chip->dev.devt), rc); + + device_unregister(&chip->dev); + return rc; + } + + rc = device_add(&chip->dev); + if (rc) { + dev_err(&chip->dev, + "unable to device_register() %s, major %d, minor %d, err=%d\n", + chip->devname, MAJOR(chip->dev.devt), + MINOR(chip->dev.devt), rc); + + return rc; + } + + return rc; +} + +static void tpm_dev_del_device(struct tpm_chip *chip) +{ + cdev_del(&chip->cdev); + device_unregister(&chip->dev); +} + +static int tpm1_chip_register(struct tpm_chip *chip) +{ + int rc; + + if (chip->flags & TPM_CHIP_FLAG_TPM2) + return 0; + + rc = tpm_sysfs_add_device(chip); + if (rc) + return rc; + + rc = tpm_add_ppi(chip); + if (rc) { + tpm_sysfs_del_device(chip); + return rc; + } + + chip->bios_dir = tpm_bios_log_setup(chip->devname); + + return 0; +} + +static void tpm1_chip_unregister(struct tpm_chip *chip) +{ + if (chip->flags & TPM_CHIP_FLAG_TPM2) + return; + + if (chip->bios_dir) + tpm_bios_log_teardown(chip->bios_dir); + + tpm_remove_ppi(chip); + + tpm_sysfs_del_device(chip); +} + +/* + * tpm_chip_register() - create a character device for the TPM chip + * @chip: TPM chip to use. + * + * Creates a character device for the TPM chip and adds sysfs attributes for + * the device. As the last step this function adds the chip to the list of TPM + * chips available for in-kernel use. + * + * This function should be only called after the chip initialization is + * complete. + */ +int tpm_chip_register(struct tpm_chip *chip) +{ + int rc; + + rc = tpm1_chip_register(chip); + if (rc) + return rc; + + rc = tpm_dev_add_device(chip); + if (rc) + goto out_err; + + /* Make the chip available. */ + spin_lock(&driver_lock); + list_add_rcu(&chip->list, &tpm_chip_list); + spin_unlock(&driver_lock); + + chip->flags |= TPM_CHIP_FLAG_REGISTERED; + + return 0; +out_err: + tpm1_chip_unregister(chip); + return rc; +} +EXPORT_SYMBOL_GPL(tpm_chip_register); + +/* + * tpm_chip_unregister() - release the TPM driver + * @chip: TPM chip to use. + * + * Takes the chip first away from the list of available TPM chips and then + * cleans up all the resources reserved by tpm_chip_register(). + * + * NOTE: This function should be only called before deinitializing chip + * resources. + */ +void tpm_chip_unregister(struct tpm_chip *chip) +{ + if (!(chip->flags & TPM_CHIP_FLAG_REGISTERED)) + return; + + spin_lock(&driver_lock); + list_del_rcu(&chip->list); + spin_unlock(&driver_lock); + synchronize_rcu(); + + tpm1_chip_unregister(chip); + tpm_dev_del_device(chip); +} +EXPORT_SYMBOL_GPL(tpm_chip_unregister); diff --git a/kernel/drivers/char/tpm/tpm-dev.c b/kernel/drivers/char/tpm/tpm-dev.c new file mode 100644 index 000000000..de0337ebd --- /dev/null +++ b/kernel/drivers/char/tpm/tpm-dev.c @@ -0,0 +1,183 @@ +/* + * Copyright (C) 2004 IBM Corporation + * Authors: + * Leendert van Doorn + * Dave Safford + * Reiner Sailer + * Kylene Hall + * + * Copyright (C) 2013 Obsidian Research Corp + * Jason Gunthorpe + * + * Device file system interface to the TPM + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + */ +#include +#include +#include "tpm.h" + +struct file_priv { + struct tpm_chip *chip; + + /* Data passed to and from the tpm via the read/write calls */ + atomic_t data_pending; + struct mutex buffer_mutex; + + struct timer_list user_read_timer; /* user needs to claim result */ + struct work_struct work; + + u8 data_buffer[TPM_BUFSIZE]; +}; + +static void user_reader_timeout(unsigned long ptr) +{ + struct file_priv *priv = (struct file_priv *)ptr; + + schedule_work(&priv->work); +} + +static void timeout_work(struct work_struct *work) +{ + struct file_priv *priv = container_of(work, struct file_priv, work); + + mutex_lock(&priv->buffer_mutex); + atomic_set(&priv->data_pending, 0); + memset(priv->data_buffer, 0, sizeof(priv->data_buffer)); + mutex_unlock(&priv->buffer_mutex); +} + +static int tpm_open(struct inode *inode, struct file *file) +{ + struct tpm_chip *chip = + container_of(inode->i_cdev, struct tpm_chip, cdev); + struct file_priv *priv; + + /* It's assured that the chip will be opened just once, + * by the check of is_open variable, which is protected + * by driver_lock. */ + if (test_and_set_bit(0, &chip->is_open)) { + dev_dbg(chip->pdev, "Another process owns this TPM\n"); + return -EBUSY; + } + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (priv == NULL) { + clear_bit(0, &chip->is_open); + return -ENOMEM; + } + + priv->chip = chip; + atomic_set(&priv->data_pending, 0); + mutex_init(&priv->buffer_mutex); + setup_timer(&priv->user_read_timer, user_reader_timeout, + (unsigned long)priv); + INIT_WORK(&priv->work, timeout_work); + + file->private_data = priv; + get_device(chip->pdev); + return 0; +} + +static ssize_t tpm_read(struct file *file, char __user *buf, + size_t size, loff_t *off) +{ + struct file_priv *priv = file->private_data; + ssize_t ret_size; + int rc; + + del_singleshot_timer_sync(&priv->user_read_timer); + flush_work(&priv->work); + ret_size = atomic_read(&priv->data_pending); + if (ret_size > 0) { /* relay data */ + ssize_t orig_ret_size = ret_size; + if (size < ret_size) + ret_size = size; + + mutex_lock(&priv->buffer_mutex); + rc = copy_to_user(buf, priv->data_buffer, ret_size); + memset(priv->data_buffer, 0, orig_ret_size); + if (rc) + ret_size = -EFAULT; + + mutex_unlock(&priv->buffer_mutex); + } + + atomic_set(&priv->data_pending, 0); + + return ret_size; +} + +static ssize_t tpm_write(struct file *file, const char __user *buf, + size_t size, loff_t *off) +{ + struct file_priv *priv = file->private_data; + size_t in_size = size; + ssize_t out_size; + + /* cannot perform a write until the read has cleared + either via tpm_read or a user_read_timer timeout. + This also prevents splitted buffered writes from blocking here. + */ + if (atomic_read(&priv->data_pending) != 0) + return -EBUSY; + + if (in_size > TPM_BUFSIZE) + return -E2BIG; + + mutex_lock(&priv->buffer_mutex); + + if (copy_from_user + (priv->data_buffer, (void __user *) buf, in_size)) { + mutex_unlock(&priv->buffer_mutex); + return -EFAULT; + } + + /* atomic tpm command send and result receive */ + out_size = tpm_transmit(priv->chip, priv->data_buffer, + sizeof(priv->data_buffer)); + if (out_size < 0) { + mutex_unlock(&priv->buffer_mutex); + return out_size; + } + + atomic_set(&priv->data_pending, out_size); + mutex_unlock(&priv->buffer_mutex); + + /* Set a timeout by which the reader must come claim the result */ + mod_timer(&priv->user_read_timer, jiffies + (60 * HZ)); + + return in_size; +} + +/* + * Called on file close + */ +static int tpm_release(struct inode *inode, struct file *file) +{ + struct file_priv *priv = file->private_data; + + del_singleshot_timer_sync(&priv->user_read_timer); + flush_work(&priv->work); + file->private_data = NULL; + atomic_set(&priv->data_pending, 0); + clear_bit(0, &priv->chip->is_open); + put_device(priv->chip->pdev); + kfree(priv); + return 0; +} + +const struct file_operations tpm_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .open = tpm_open, + .read = tpm_read, + .write = tpm_write, + .release = tpm_release, +}; + + diff --git a/kernel/drivers/char/tpm/tpm-interface.c b/kernel/drivers/char/tpm/tpm-interface.c new file mode 100644 index 000000000..e85d3416d --- /dev/null +++ b/kernel/drivers/char/tpm/tpm-interface.c @@ -0,0 +1,1056 @@ +/* + * Copyright (C) 2004 IBM Corporation + * Copyright (C) 2014 Intel Corporation + * + * Authors: + * Leendert van Doorn + * Dave Safford + * Reiner Sailer + * Kylene Hall + * + * Maintained by: + * + * Device driver for TCG/TCPA TPM (trusted platform module). + * Specifications at www.trustedcomputinggroup.org + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + * Note, the TPM chip is not interrupt driven (only polling) + * and can have very long timeouts (minutes!). Hence the unusual + * calls to msleep. + * + */ + +#include +#include +#include +#include +#include + +#include "tpm.h" +#include "tpm_eventlog.h" + +#define TPM_MAX_ORDINAL 243 +#define TSC_MAX_ORDINAL 12 +#define TPM_PROTECTED_COMMAND 0x00 +#define TPM_CONNECTION_COMMAND 0x40 + +/* + * Bug workaround - some TPM's don't flush the most + * recently changed pcr on suspend, so force the flush + * with an extend to the selected _unused_ non-volatile pcr. + */ +static int tpm_suspend_pcr; +module_param_named(suspend_pcr, tpm_suspend_pcr, uint, 0644); +MODULE_PARM_DESC(suspend_pcr, + "PCR to use for dummy writes to faciltate flush on suspend."); + +/* + * Array with one entry per ordinal defining the maximum amount + * of time the chip could take to return the result. The ordinal + * designation of short, medium or long is defined in a table in + * TCG Specification TPM Main Part 2 TPM Structures Section 17. The + * values of the SHORT, MEDIUM, and LONG durations are retrieved + * from the chip during initialization with a call to tpm_get_timeouts. + */ +static const u8 tpm_ordinal_duration[TPM_MAX_ORDINAL] = { + TPM_UNDEFINED, /* 0 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, /* 5 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_SHORT, /* 10 */ + TPM_SHORT, + TPM_MEDIUM, + TPM_LONG, + TPM_LONG, + TPM_MEDIUM, /* 15 */ + TPM_SHORT, + TPM_SHORT, + TPM_MEDIUM, + TPM_LONG, + TPM_SHORT, /* 20 */ + TPM_SHORT, + TPM_MEDIUM, + TPM_MEDIUM, + TPM_MEDIUM, + TPM_SHORT, /* 25 */ + TPM_SHORT, + TPM_MEDIUM, + TPM_SHORT, + TPM_SHORT, + TPM_MEDIUM, /* 30 */ + TPM_LONG, + TPM_MEDIUM, + TPM_SHORT, + TPM_SHORT, + TPM_SHORT, /* 35 */ + TPM_MEDIUM, + TPM_MEDIUM, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_MEDIUM, /* 40 */ + TPM_LONG, + TPM_MEDIUM, + TPM_SHORT, + TPM_SHORT, + TPM_SHORT, /* 45 */ + TPM_SHORT, + TPM_SHORT, + TPM_SHORT, + TPM_LONG, + TPM_MEDIUM, /* 50 */ + TPM_MEDIUM, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, /* 55 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_MEDIUM, /* 60 */ + TPM_MEDIUM, + TPM_MEDIUM, + TPM_SHORT, + TPM_SHORT, + TPM_MEDIUM, /* 65 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_SHORT, /* 70 */ + TPM_SHORT, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, /* 75 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_LONG, /* 80 */ + TPM_UNDEFINED, + TPM_MEDIUM, + TPM_LONG, + TPM_SHORT, + TPM_UNDEFINED, /* 85 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_SHORT, /* 90 */ + TPM_SHORT, + TPM_SHORT, + TPM_SHORT, + TPM_SHORT, + TPM_UNDEFINED, /* 95 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_MEDIUM, /* 100 */ + TPM_SHORT, + TPM_SHORT, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, /* 105 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_SHORT, /* 110 */ + TPM_SHORT, + TPM_SHORT, + TPM_SHORT, + TPM_SHORT, + TPM_SHORT, /* 115 */ + TPM_SHORT, + TPM_SHORT, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_LONG, /* 120 */ + TPM_LONG, + TPM_MEDIUM, + TPM_UNDEFINED, + TPM_SHORT, + TPM_SHORT, /* 125 */ + TPM_SHORT, + TPM_LONG, + TPM_SHORT, + TPM_SHORT, + TPM_SHORT, /* 130 */ + TPM_MEDIUM, + TPM_UNDEFINED, + TPM_SHORT, + TPM_MEDIUM, + TPM_UNDEFINED, /* 135 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_SHORT, /* 140 */ + TPM_SHORT, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, /* 145 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_SHORT, /* 150 */ + TPM_MEDIUM, + TPM_MEDIUM, + TPM_SHORT, + TPM_SHORT, + TPM_UNDEFINED, /* 155 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_SHORT, /* 160 */ + TPM_SHORT, + TPM_SHORT, + TPM_SHORT, + TPM_UNDEFINED, + TPM_UNDEFINED, /* 165 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_LONG, /* 170 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, /* 175 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_MEDIUM, /* 180 */ + TPM_SHORT, + TPM_MEDIUM, + TPM_MEDIUM, + TPM_MEDIUM, + TPM_MEDIUM, /* 185 */ + TPM_SHORT, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, /* 190 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, /* 195 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_SHORT, /* 200 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_SHORT, + TPM_SHORT, /* 205 */ + TPM_SHORT, + TPM_SHORT, + TPM_SHORT, + TPM_SHORT, + TPM_MEDIUM, /* 210 */ + TPM_UNDEFINED, + TPM_MEDIUM, + TPM_MEDIUM, + TPM_MEDIUM, + TPM_UNDEFINED, /* 215 */ + TPM_MEDIUM, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_SHORT, + TPM_SHORT, /* 220 */ + TPM_SHORT, + TPM_SHORT, + TPM_SHORT, + TPM_SHORT, + TPM_UNDEFINED, /* 225 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_SHORT, /* 230 */ + TPM_LONG, + TPM_MEDIUM, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, /* 235 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_SHORT, /* 240 */ + TPM_UNDEFINED, + TPM_MEDIUM, +}; + +/* + * Returns max number of jiffies to wait + */ +unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, + u32 ordinal) +{ + int duration_idx = TPM_UNDEFINED; + int duration = 0; + u8 category = (ordinal >> 24) & 0xFF; + + if ((category == TPM_PROTECTED_COMMAND && ordinal < TPM_MAX_ORDINAL) || + (category == TPM_CONNECTION_COMMAND && ordinal < TSC_MAX_ORDINAL)) + duration_idx = tpm_ordinal_duration[ordinal]; + + if (duration_idx != TPM_UNDEFINED) + duration = chip->vendor.duration[duration_idx]; + if (duration <= 0) + return 2 * 60 * HZ; + else + return duration; +} +EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration); + +/* + * Internal kernel interface to transmit TPM commands + */ +ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf, + size_t bufsiz) +{ + ssize_t rc; + u32 count, ordinal; + unsigned long stop; + + if (bufsiz > TPM_BUFSIZE) + bufsiz = TPM_BUFSIZE; + + count = be32_to_cpu(*((__be32 *) (buf + 2))); + ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); + if (count == 0) + return -ENODATA; + if (count > bufsiz) { + dev_err(chip->pdev, + "invalid count value %x %zx\n", count, bufsiz); + return -E2BIG; + } + + mutex_lock(&chip->tpm_mutex); + + rc = chip->ops->send(chip, (u8 *) buf, count); + if (rc < 0) { + dev_err(chip->pdev, + "tpm_transmit: tpm_send: error %zd\n", rc); + goto out; + } + + if (chip->vendor.irq) + goto out_recv; + + if (chip->flags & TPM_CHIP_FLAG_TPM2) + stop = jiffies + tpm2_calc_ordinal_duration(chip, ordinal); + else + stop = jiffies + tpm_calc_ordinal_duration(chip, ordinal); + do { + u8 status = chip->ops->status(chip); + if ((status & chip->ops->req_complete_mask) == + chip->ops->req_complete_val) + goto out_recv; + + if (chip->ops->req_canceled(chip, status)) { + dev_err(chip->pdev, "Operation Canceled\n"); + rc = -ECANCELED; + goto out; + } + + msleep(TPM_TIMEOUT); /* CHECK */ + rmb(); + } while (time_before(jiffies, stop)); + + chip->ops->cancel(chip); + dev_err(chip->pdev, "Operation Timed out\n"); + rc = -ETIME; + goto out; + +out_recv: + rc = chip->ops->recv(chip, (u8 *) buf, bufsiz); + if (rc < 0) + dev_err(chip->pdev, + "tpm_transmit: tpm_recv: error %zd\n", rc); +out: + mutex_unlock(&chip->tpm_mutex); + return rc; +} + +#define TPM_DIGEST_SIZE 20 +#define TPM_RET_CODE_IDX 6 + +ssize_t tpm_transmit_cmd(struct tpm_chip *chip, void *cmd, + int len, const char *desc) +{ + struct tpm_output_header *header; + int err; + + len = tpm_transmit(chip, (u8 *) cmd, len); + if (len < 0) + return len; + else if (len < TPM_HEADER_SIZE) + return -EFAULT; + + header = cmd; + + err = be32_to_cpu(header->return_code); + if (err != 0 && desc) + dev_err(chip->pdev, "A TPM error (%d) occurred %s\n", err, + desc); + + return err; +} + +#define TPM_INTERNAL_RESULT_SIZE 200 +#define TPM_ORD_GET_CAP cpu_to_be32(101) +#define TPM_ORD_GET_RANDOM cpu_to_be32(70) + +static const struct tpm_input_header tpm_getcap_header = { + .tag = TPM_TAG_RQU_COMMAND, + .length = cpu_to_be32(22), + .ordinal = TPM_ORD_GET_CAP +}; + +ssize_t tpm_getcap(struct device *dev, __be32 subcap_id, cap_t *cap, + const char *desc) +{ + struct tpm_cmd_t tpm_cmd; + int rc; + struct tpm_chip *chip = dev_get_drvdata(dev); + + tpm_cmd.header.in = tpm_getcap_header; + if (subcap_id == CAP_VERSION_1_1 || subcap_id == CAP_VERSION_1_2) { + tpm_cmd.params.getcap_in.cap = subcap_id; + /*subcap field not necessary */ + tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(0); + tpm_cmd.header.in.length -= cpu_to_be32(sizeof(__be32)); + } else { + if (subcap_id == TPM_CAP_FLAG_PERM || + subcap_id == TPM_CAP_FLAG_VOL) + tpm_cmd.params.getcap_in.cap = TPM_CAP_FLAG; + else + tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP; + tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4); + tpm_cmd.params.getcap_in.subcap = subcap_id; + } + rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, desc); + if (!rc) + *cap = tpm_cmd.params.getcap_out.cap; + return rc; +} + +void tpm_gen_interrupt(struct tpm_chip *chip) +{ + struct tpm_cmd_t tpm_cmd; + ssize_t rc; + + tpm_cmd.header.in = tpm_getcap_header; + tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP; + tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4); + tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT; + + rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, + "attempting to determine the timeouts"); +} +EXPORT_SYMBOL_GPL(tpm_gen_interrupt); + +#define TPM_ORD_STARTUP cpu_to_be32(153) +#define TPM_ST_CLEAR cpu_to_be16(1) +#define TPM_ST_STATE cpu_to_be16(2) +#define TPM_ST_DEACTIVATED cpu_to_be16(3) +static const struct tpm_input_header tpm_startup_header = { + .tag = TPM_TAG_RQU_COMMAND, + .length = cpu_to_be32(12), + .ordinal = TPM_ORD_STARTUP +}; + +static int tpm_startup(struct tpm_chip *chip, __be16 startup_type) +{ + struct tpm_cmd_t start_cmd; + start_cmd.header.in = tpm_startup_header; + + start_cmd.params.startup_in.startup_type = startup_type; + return tpm_transmit_cmd(chip, &start_cmd, TPM_INTERNAL_RESULT_SIZE, + "attempting to start the TPM"); +} + +int tpm_get_timeouts(struct tpm_chip *chip) +{ + struct tpm_cmd_t tpm_cmd; + unsigned long new_timeout[4]; + unsigned long old_timeout[4]; + struct duration_t *duration_cap; + ssize_t rc; + + tpm_cmd.header.in = tpm_getcap_header; + tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP; + tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4); + tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT; + rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, NULL); + + if (rc == TPM_ERR_INVALID_POSTINIT) { + /* The TPM is not started, we are the first to talk to it. + Execute a startup command. */ + dev_info(chip->pdev, "Issuing TPM_STARTUP"); + if (tpm_startup(chip, TPM_ST_CLEAR)) + return rc; + + tpm_cmd.header.in = tpm_getcap_header; + tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP; + tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4); + tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT; + rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, + NULL); + } + if (rc) { + dev_err(chip->pdev, + "A TPM error (%zd) occurred attempting to determine the timeouts\n", + rc); + goto duration; + } + + if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 || + be32_to_cpu(tpm_cmd.header.out.length) + != sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32)) + return -EINVAL; + + old_timeout[0] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.a); + old_timeout[1] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.b); + old_timeout[2] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.c); + old_timeout[3] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.d); + memcpy(new_timeout, old_timeout, sizeof(new_timeout)); + + /* + * Provide ability for vendor overrides of timeout values in case + * of misreporting. + */ + if (chip->ops->update_timeouts != NULL) + chip->vendor.timeout_adjusted = + chip->ops->update_timeouts(chip, new_timeout); + + if (!chip->vendor.timeout_adjusted) { + /* Don't overwrite default if value is 0 */ + if (new_timeout[0] != 0 && new_timeout[0] < 1000) { + int i; + + /* timeouts in msec rather usec */ + for (i = 0; i != ARRAY_SIZE(new_timeout); i++) + new_timeout[i] *= 1000; + chip->vendor.timeout_adjusted = true; + } + } + + /* Report adjusted timeouts */ + if (chip->vendor.timeout_adjusted) { + dev_info(chip->pdev, + HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n", + old_timeout[0], new_timeout[0], + old_timeout[1], new_timeout[1], + old_timeout[2], new_timeout[2], + old_timeout[3], new_timeout[3]); + } + + chip->vendor.timeout_a = usecs_to_jiffies(new_timeout[0]); + chip->vendor.timeout_b = usecs_to_jiffies(new_timeout[1]); + chip->vendor.timeout_c = usecs_to_jiffies(new_timeout[2]); + chip->vendor.timeout_d = usecs_to_jiffies(new_timeout[3]); + +duration: + tpm_cmd.header.in = tpm_getcap_header; + tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP; + tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4); + tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_DURATION; + + rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, + "attempting to determine the durations"); + if (rc) + return rc; + + if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 || + be32_to_cpu(tpm_cmd.header.out.length) + != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32)) + return -EINVAL; + + duration_cap = &tpm_cmd.params.getcap_out.cap.duration; + chip->vendor.duration[TPM_SHORT] = + usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short)); + chip->vendor.duration[TPM_MEDIUM] = + usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium)); + chip->vendor.duration[TPM_LONG] = + usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long)); + + /* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above + * value wrong and apparently reports msecs rather than usecs. So we + * fix up the resulting too-small TPM_SHORT value to make things work. + * We also scale the TPM_MEDIUM and -_LONG values by 1000. + */ + if (chip->vendor.duration[TPM_SHORT] < (HZ / 100)) { + chip->vendor.duration[TPM_SHORT] = HZ; + chip->vendor.duration[TPM_MEDIUM] *= 1000; + chip->vendor.duration[TPM_LONG] *= 1000; + chip->vendor.duration_adjusted = true; + dev_info(chip->pdev, "Adjusting TPM timeout parameters."); + } + return 0; +} +EXPORT_SYMBOL_GPL(tpm_get_timeouts); + +#define TPM_ORD_CONTINUE_SELFTEST 83 +#define CONTINUE_SELFTEST_RESULT_SIZE 10 + +static struct tpm_input_header continue_selftest_header = { + .tag = TPM_TAG_RQU_COMMAND, + .length = cpu_to_be32(10), + .ordinal = cpu_to_be32(TPM_ORD_CONTINUE_SELFTEST), +}; + +/** + * tpm_continue_selftest -- run TPM's selftest + * @chip: TPM chip to use + * + * Returns 0 on success, < 0 in case of fatal error or a value > 0 representing + * a TPM error code. + */ +static int tpm_continue_selftest(struct tpm_chip *chip) +{ + int rc; + struct tpm_cmd_t cmd; + + cmd.header.in = continue_selftest_header; + rc = tpm_transmit_cmd(chip, &cmd, CONTINUE_SELFTEST_RESULT_SIZE, + "continue selftest"); + return rc; +} + +#define TPM_ORDINAL_PCRREAD cpu_to_be32(21) +#define READ_PCR_RESULT_SIZE 30 +static struct tpm_input_header pcrread_header = { + .tag = TPM_TAG_RQU_COMMAND, + .length = cpu_to_be32(14), + .ordinal = TPM_ORDINAL_PCRREAD +}; + +int tpm_pcr_read_dev(struct tpm_chip *chip, int pcr_idx, u8 *res_buf) +{ + int rc; + struct tpm_cmd_t cmd; + + cmd.header.in = pcrread_header; + cmd.params.pcrread_in.pcr_idx = cpu_to_be32(pcr_idx); + rc = tpm_transmit_cmd(chip, &cmd, READ_PCR_RESULT_SIZE, + "attempting to read a pcr value"); + + if (rc == 0) + memcpy(res_buf, cmd.params.pcrread_out.pcr_result, + TPM_DIGEST_SIZE); + return rc; +} + +/** + * tpm_pcr_read - read a pcr value + * @chip_num: tpm idx # or ANY + * @pcr_idx: pcr idx to retrieve + * @res_buf: TPM_PCR value + * size of res_buf is 20 bytes (or NULL if you don't care) + * + * The TPM driver should be built-in, but for whatever reason it + * isn't, protect against the chip disappearing, by incrementing + * the module usage count. + */ +int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) +{ + struct tpm_chip *chip; + int rc; + + chip = tpm_chip_find_get(chip_num); + if (chip == NULL) + return -ENODEV; + if (chip->flags & TPM_CHIP_FLAG_TPM2) + rc = tpm2_pcr_read(chip, pcr_idx, res_buf); + else + rc = tpm_pcr_read_dev(chip, pcr_idx, res_buf); + tpm_chip_put(chip); + return rc; +} +EXPORT_SYMBOL_GPL(tpm_pcr_read); + +/** + * tpm_pcr_extend - extend pcr value with hash + * @chip_num: tpm idx # or AN& + * @pcr_idx: pcr idx to extend + * @hash: hash value used to extend pcr value + * + * The TPM driver should be built-in, but for whatever reason it + * isn't, protect against the chip disappearing, by incrementing + * the module usage count. + */ +#define TPM_ORD_PCR_EXTEND cpu_to_be32(20) +#define EXTEND_PCR_RESULT_SIZE 34 +static struct tpm_input_header pcrextend_header = { + .tag = TPM_TAG_RQU_COMMAND, + .length = cpu_to_be32(34), + .ordinal = TPM_ORD_PCR_EXTEND +}; + +int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) +{ + struct tpm_cmd_t cmd; + int rc; + struct tpm_chip *chip; + + chip = tpm_chip_find_get(chip_num); + if (chip == NULL) + return -ENODEV; + + if (chip->flags & TPM_CHIP_FLAG_TPM2) { + rc = tpm2_pcr_extend(chip, pcr_idx, hash); + tpm_chip_put(chip); + return rc; + } + + cmd.header.in = pcrextend_header; + cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(pcr_idx); + memcpy(cmd.params.pcrextend_in.hash, hash, TPM_DIGEST_SIZE); + rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE, + "attempting extend a PCR value"); + + tpm_chip_put(chip); + return rc; +} +EXPORT_SYMBOL_GPL(tpm_pcr_extend); + +/** + * tpm_do_selftest - have the TPM continue its selftest and wait until it + * can receive further commands + * @chip: TPM chip to use + * + * Returns 0 on success, < 0 in case of fatal error or a value > 0 representing + * a TPM error code. + */ +int tpm_do_selftest(struct tpm_chip *chip) +{ + int rc; + unsigned int loops; + unsigned int delay_msec = 100; + unsigned long duration; + struct tpm_cmd_t cmd; + + duration = tpm_calc_ordinal_duration(chip, TPM_ORD_CONTINUE_SELFTEST); + + loops = jiffies_to_msecs(duration) / delay_msec; + + rc = tpm_continue_selftest(chip); + /* This may fail if there was no TPM driver during a suspend/resume + * cycle; some may return 10 (BAD_ORDINAL), others 28 (FAILEDSELFTEST) + */ + if (rc) + return rc; + + do { + /* Attempt to read a PCR value */ + cmd.header.in = pcrread_header; + cmd.params.pcrread_in.pcr_idx = cpu_to_be32(0); + rc = tpm_transmit(chip, (u8 *) &cmd, READ_PCR_RESULT_SIZE); + /* Some buggy TPMs will not respond to tpm_tis_ready() for + * around 300ms while the self test is ongoing, keep trying + * until the self test duration expires. */ + if (rc == -ETIME) { + dev_info(chip->pdev, HW_ERR "TPM command timed out during continue self test"); + msleep(delay_msec); + continue; + } + + if (rc < TPM_HEADER_SIZE) + return -EFAULT; + + rc = be32_to_cpu(cmd.header.out.return_code); + if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) { + dev_info(chip->pdev, + "TPM is disabled/deactivated (0x%X)\n", rc); + /* TPM is disabled and/or deactivated; driver can + * proceed and TPM does handle commands for + * suspend/resume correctly + */ + return 0; + } + if (rc != TPM_WARN_DOING_SELFTEST) + return rc; + msleep(delay_msec); + } while (--loops > 0); + + return rc; +} +EXPORT_SYMBOL_GPL(tpm_do_selftest); + +int tpm_send(u32 chip_num, void *cmd, size_t buflen) +{ + struct tpm_chip *chip; + int rc; + + chip = tpm_chip_find_get(chip_num); + if (chip == NULL) + return -ENODEV; + + rc = tpm_transmit_cmd(chip, cmd, buflen, "attempting tpm_cmd"); + + tpm_chip_put(chip); + return rc; +} +EXPORT_SYMBOL_GPL(tpm_send); + +static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask, + bool check_cancel, bool *canceled) +{ + u8 status = chip->ops->status(chip); + + *canceled = false; + if ((status & mask) == mask) + return true; + if (check_cancel && chip->ops->req_canceled(chip, status)) { + *canceled = true; + return true; + } + return false; +} + +int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, + wait_queue_head_t *queue, bool check_cancel) +{ + unsigned long stop; + long rc; + u8 status; + bool canceled = false; + + /* check current status */ + status = chip->ops->status(chip); + if ((status & mask) == mask) + return 0; + + stop = jiffies + timeout; + + if (chip->vendor.irq) { +again: + timeout = stop - jiffies; + if ((long)timeout <= 0) + return -ETIME; + rc = wait_event_interruptible_timeout(*queue, + wait_for_tpm_stat_cond(chip, mask, check_cancel, + &canceled), + timeout); + if (rc > 0) { + if (canceled) + return -ECANCELED; + return 0; + } + if (rc == -ERESTARTSYS && freezing(current)) { + clear_thread_flag(TIF_SIGPENDING); + goto again; + } + } else { + do { + msleep(TPM_TIMEOUT); + status = chip->ops->status(chip); + if ((status & mask) == mask) + return 0; + } while (time_before(jiffies, stop)); + } + return -ETIME; +} +EXPORT_SYMBOL_GPL(wait_for_tpm_stat); + +#define TPM_ORD_SAVESTATE cpu_to_be32(152) +#define SAVESTATE_RESULT_SIZE 10 + +static struct tpm_input_header savestate_header = { + .tag = TPM_TAG_RQU_COMMAND, + .length = cpu_to_be32(10), + .ordinal = TPM_ORD_SAVESTATE +}; + +/* + * We are about to suspend. Save the TPM state + * so that it can be restored. + */ +int tpm_pm_suspend(struct device *dev) +{ + struct tpm_chip *chip = dev_get_drvdata(dev); + struct tpm_cmd_t cmd; + int rc, try; + + u8 dummy_hash[TPM_DIGEST_SIZE] = { 0 }; + + if (chip == NULL) + return -ENODEV; + + if (chip->flags & TPM_CHIP_FLAG_TPM2) { + tpm2_shutdown(chip, TPM2_SU_STATE); + return 0; + } + + /* for buggy tpm, flush pcrs with extend to selected dummy */ + if (tpm_suspend_pcr) { + cmd.header.in = pcrextend_header; + cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(tpm_suspend_pcr); + memcpy(cmd.params.pcrextend_in.hash, dummy_hash, + TPM_DIGEST_SIZE); + rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE, + "extending dummy pcr before suspend"); + } + + /* now do the actual savestate */ + for (try = 0; try < TPM_RETRY; try++) { + cmd.header.in = savestate_header; + rc = tpm_transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE, NULL); + + /* + * If the TPM indicates that it is too busy to respond to + * this command then retry before giving up. It can take + * several seconds for this TPM to be ready. + * + * This can happen if the TPM has already been sent the + * SaveState command before the driver has loaded. TCG 1.2 + * specification states that any communication after SaveState + * may cause the TPM to invalidate previously saved state. + */ + if (rc != TPM_WARN_RETRY) + break; + msleep(TPM_TIMEOUT_RETRY); + } + + if (rc) + dev_err(chip->pdev, + "Error (%d) sending savestate before suspend\n", rc); + else if (try > 0) + dev_warn(chip->pdev, "TPM savestate took %dms\n", + try * TPM_TIMEOUT_RETRY); + + return rc; +} +EXPORT_SYMBOL_GPL(tpm_pm_suspend); + +/* + * Resume from a power safe. The BIOS already restored + * the TPM state. + */ +int tpm_pm_resume(struct device *dev) +{ + struct tpm_chip *chip = dev_get_drvdata(dev); + + if (chip == NULL) + return -ENODEV; + + return 0; +} +EXPORT_SYMBOL_GPL(tpm_pm_resume); + +#define TPM_GETRANDOM_RESULT_SIZE 18 +static struct tpm_input_header tpm_getrandom_header = { + .tag = TPM_TAG_RQU_COMMAND, + .length = cpu_to_be32(14), + .ordinal = TPM_ORD_GET_RANDOM +}; + +/** + * tpm_get_random() - Get random bytes from the tpm's RNG + * @chip_num: A specific chip number for the request or TPM_ANY_NUM + * @out: destination buffer for the random bytes + * @max: the max number of bytes to write to @out + * + * Returns < 0 on error and the number of bytes read on success + */ +int tpm_get_random(u32 chip_num, u8 *out, size_t max) +{ + struct tpm_chip *chip; + struct tpm_cmd_t tpm_cmd; + u32 recd, num_bytes = min_t(u32, max, TPM_MAX_RNG_DATA); + int err, total = 0, retries = 5; + u8 *dest = out; + + if (!out || !num_bytes || max > TPM_MAX_RNG_DATA) + return -EINVAL; + + chip = tpm_chip_find_get(chip_num); + if (chip == NULL) + return -ENODEV; + + if (chip->flags & TPM_CHIP_FLAG_TPM2) { + err = tpm2_get_random(chip, out, max); + tpm_chip_put(chip); + return err; + } + + do { + tpm_cmd.header.in = tpm_getrandom_header; + tpm_cmd.params.getrandom_in.num_bytes = cpu_to_be32(num_bytes); + + err = tpm_transmit_cmd(chip, &tpm_cmd, + TPM_GETRANDOM_RESULT_SIZE + num_bytes, + "attempting get random"); + if (err) + break; + + recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len); + memcpy(dest, tpm_cmd.params.getrandom_out.rng_data, recd); + + dest += recd; + total += recd; + num_bytes -= recd; + } while (retries-- && total < max); + + tpm_chip_put(chip); + return total ? total : -EIO; +} +EXPORT_SYMBOL_GPL(tpm_get_random); + +static int __init tpm_init(void) +{ + int rc; + + tpm_class = class_create(THIS_MODULE, "tpm"); + if (IS_ERR(tpm_class)) { + pr_err("couldn't create tpm class\n"); + return PTR_ERR(tpm_class); + } + + rc = alloc_chrdev_region(&tpm_devt, 0, TPM_NUM_DEVICES, "tpm"); + if (rc < 0) { + pr_err("tpm: failed to allocate char dev region\n"); + class_destroy(tpm_class); + return rc; + } + + return 0; +} + +static void __exit tpm_exit(void) +{ + class_destroy(tpm_class); + unregister_chrdev_region(tpm_devt, TPM_NUM_DEVICES); +} + +subsys_initcall(tpm_init); +module_exit(tpm_exit); + +MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)"); +MODULE_DESCRIPTION("TPM Driver"); +MODULE_VERSION("2.0"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/tpm/tpm-sysfs.c b/kernel/drivers/char/tpm/tpm-sysfs.c new file mode 100644 index 000000000..ee66fd467 --- /dev/null +++ b/kernel/drivers/char/tpm/tpm-sysfs.c @@ -0,0 +1,299 @@ +/* + * Copyright (C) 2004 IBM Corporation + * Authors: + * Leendert van Doorn + * Dave Safford + * Reiner Sailer + * Kylene Hall + * + * Copyright (C) 2013 Obsidian Research Corp + * Jason Gunthorpe + * + * sysfs filesystem inspection interface to the TPM + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + */ +#include +#include "tpm.h" + +#define READ_PUBEK_RESULT_SIZE 314 +#define TPM_ORD_READPUBEK cpu_to_be32(124) +static struct tpm_input_header tpm_readpubek_header = { + .tag = TPM_TAG_RQU_COMMAND, + .length = cpu_to_be32(30), + .ordinal = TPM_ORD_READPUBEK +}; +static ssize_t pubek_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + u8 *data; + struct tpm_cmd_t tpm_cmd; + ssize_t err; + int i, rc; + char *str = buf; + + struct tpm_chip *chip = dev_get_drvdata(dev); + + tpm_cmd.header.in = tpm_readpubek_header; + err = tpm_transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE, + "attempting to read the PUBEK"); + if (err) + goto out; + + /* + ignore header 10 bytes + algorithm 32 bits (1 == RSA ) + encscheme 16 bits + sigscheme 16 bits + parameters (RSA 12->bytes: keybit, #primes, expbit) + keylenbytes 32 bits + 256 byte modulus + ignore checksum 20 bytes + */ + data = tpm_cmd.params.readpubek_out_buffer; + str += + sprintf(str, + "Algorithm: %02X %02X %02X %02X\n" + "Encscheme: %02X %02X\n" + "Sigscheme: %02X %02X\n" + "Parameters: %02X %02X %02X %02X " + "%02X %02X %02X %02X " + "%02X %02X %02X %02X\n" + "Modulus length: %d\n" + "Modulus:\n", + data[0], data[1], data[2], data[3], + data[4], data[5], + data[6], data[7], + data[12], data[13], data[14], data[15], + data[16], data[17], data[18], data[19], + data[20], data[21], data[22], data[23], + be32_to_cpu(*((__be32 *) (data + 24)))); + + for (i = 0; i < 256; i++) { + str += sprintf(str, "%02X ", data[i + 28]); + if ((i + 1) % 16 == 0) + str += sprintf(str, "\n"); + } +out: + rc = str - buf; + return rc; +} +static DEVICE_ATTR_RO(pubek); + +static ssize_t pcrs_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + cap_t cap; + u8 digest[TPM_DIGEST_SIZE]; + ssize_t rc; + int i, j, num_pcrs; + char *str = buf; + struct tpm_chip *chip = dev_get_drvdata(dev); + + rc = tpm_getcap(dev, TPM_CAP_PROP_PCR, &cap, + "attempting to determine the number of PCRS"); + if (rc) + return 0; + + num_pcrs = be32_to_cpu(cap.num_pcrs); + for (i = 0; i < num_pcrs; i++) { + rc = tpm_pcr_read_dev(chip, i, digest); + if (rc) + break; + str += sprintf(str, "PCR-%02d: ", i); + for (j = 0; j < TPM_DIGEST_SIZE; j++) + str += sprintf(str, "%02X ", digest[j]); + str += sprintf(str, "\n"); + } + return str - buf; +} +static DEVICE_ATTR_RO(pcrs); + +static ssize_t enabled_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + cap_t cap; + ssize_t rc; + + rc = tpm_getcap(dev, TPM_CAP_FLAG_PERM, &cap, + "attempting to determine the permanent enabled state"); + if (rc) + return 0; + + rc = sprintf(buf, "%d\n", !cap.perm_flags.disable); + return rc; +} +static DEVICE_ATTR_RO(enabled); + +static ssize_t active_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + cap_t cap; + ssize_t rc; + + rc = tpm_getcap(dev, TPM_CAP_FLAG_PERM, &cap, + "attempting to determine the permanent active state"); + if (rc) + return 0; + + rc = sprintf(buf, "%d\n", !cap.perm_flags.deactivated); + return rc; +} +static DEVICE_ATTR_RO(active); + +static ssize_t owned_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + cap_t cap; + ssize_t rc; + + rc = tpm_getcap(dev, TPM_CAP_PROP_OWNER, &cap, + "attempting to determine the owner state"); + if (rc) + return 0; + + rc = sprintf(buf, "%d\n", cap.owned); + return rc; +} +static DEVICE_ATTR_RO(owned); + +static ssize_t temp_deactivated_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + cap_t cap; + ssize_t rc; + + rc = tpm_getcap(dev, TPM_CAP_FLAG_VOL, &cap, + "attempting to determine the temporary state"); + if (rc) + return 0; + + rc = sprintf(buf, "%d\n", cap.stclear_flags.deactivated); + return rc; +} +static DEVICE_ATTR_RO(temp_deactivated); + +static ssize_t caps_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + cap_t cap; + ssize_t rc; + char *str = buf; + + rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap, + "attempting to determine the manufacturer"); + if (rc) + return 0; + str += sprintf(str, "Manufacturer: 0x%x\n", + be32_to_cpu(cap.manufacturer_id)); + + /* Try to get a TPM version 1.2 TPM_CAP_VERSION_INFO */ + rc = tpm_getcap(dev, CAP_VERSION_1_2, &cap, + "attempting to determine the 1.2 version"); + if (!rc) { + str += sprintf(str, + "TCG version: %d.%d\nFirmware version: %d.%d\n", + cap.tpm_version_1_2.Major, + cap.tpm_version_1_2.Minor, + cap.tpm_version_1_2.revMajor, + cap.tpm_version_1_2.revMinor); + } else { + /* Otherwise just use TPM_STRUCT_VER */ + rc = tpm_getcap(dev, CAP_VERSION_1_1, &cap, + "attempting to determine the 1.1 version"); + if (rc) + return 0; + str += sprintf(str, + "TCG version: %d.%d\nFirmware version: %d.%d\n", + cap.tpm_version.Major, + cap.tpm_version.Minor, + cap.tpm_version.revMajor, + cap.tpm_version.revMinor); + } + + return str - buf; +} +static DEVICE_ATTR_RO(caps); + +static ssize_t cancel_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct tpm_chip *chip = dev_get_drvdata(dev); + if (chip == NULL) + return 0; + + chip->ops->cancel(chip); + return count; +} +static DEVICE_ATTR_WO(cancel); + +static ssize_t durations_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tpm_chip *chip = dev_get_drvdata(dev); + + if (chip->vendor.duration[TPM_LONG] == 0) + return 0; + + return sprintf(buf, "%d %d %d [%s]\n", + jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]), + jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]), + jiffies_to_usecs(chip->vendor.duration[TPM_LONG]), + chip->vendor.duration_adjusted + ? "adjusted" : "original"); +} +static DEVICE_ATTR_RO(durations); + +static ssize_t timeouts_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tpm_chip *chip = dev_get_drvdata(dev); + + return sprintf(buf, "%d %d %d %d [%s]\n", + jiffies_to_usecs(chip->vendor.timeout_a), + jiffies_to_usecs(chip->vendor.timeout_b), + jiffies_to_usecs(chip->vendor.timeout_c), + jiffies_to_usecs(chip->vendor.timeout_d), + chip->vendor.timeout_adjusted + ? "adjusted" : "original"); +} +static DEVICE_ATTR_RO(timeouts); + +static struct attribute *tpm_dev_attrs[] = { + &dev_attr_pubek.attr, + &dev_attr_pcrs.attr, + &dev_attr_enabled.attr, + &dev_attr_active.attr, + &dev_attr_owned.attr, + &dev_attr_temp_deactivated.attr, + &dev_attr_caps.attr, + &dev_attr_cancel.attr, + &dev_attr_durations.attr, + &dev_attr_timeouts.attr, + NULL, +}; + +static const struct attribute_group tpm_dev_group = { + .attrs = tpm_dev_attrs, +}; + +int tpm_sysfs_add_device(struct tpm_chip *chip) +{ + int err; + err = sysfs_create_group(&chip->pdev->kobj, + &tpm_dev_group); + + if (err) + dev_err(chip->pdev, + "failed to create sysfs attributes, %d\n", err); + return err; +} + +void tpm_sysfs_del_device(struct tpm_chip *chip) +{ + sysfs_remove_group(&chip->pdev->kobj, &tpm_dev_group); +} diff --git a/kernel/drivers/char/tpm/tpm.h b/kernel/drivers/char/tpm/tpm.h new file mode 100644 index 000000000..f8319a086 --- /dev/null +++ b/kernel/drivers/char/tpm/tpm.h @@ -0,0 +1,439 @@ +/* + * Copyright (C) 2004 IBM Corporation + * + * Authors: + * Leendert van Doorn + * Dave Safford + * Reiner Sailer + * Kylene Hall + * + * Maintained by: + * + * Device driver for TCG/TCPA TPM (trusted platform module). + * Specifications at www.trustedcomputinggroup.org + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum tpm_const { + TPM_MINOR = 224, /* officially assigned */ + TPM_BUFSIZE = 4096, + TPM_NUM_DEVICES = 256, + TPM_RETRY = 50, /* 5 seconds */ +}; + +enum tpm_timeout { + TPM_TIMEOUT = 5, /* msecs */ + TPM_TIMEOUT_RETRY = 100 /* msecs */ +}; + +/* TPM addresses */ +enum tpm_addr { + TPM_SUPERIO_ADDR = 0x2E, + TPM_ADDR = 0x4E, +}; + +/* Indexes the duration array */ +enum tpm_duration { + TPM_SHORT = 0, + TPM_MEDIUM = 1, + TPM_LONG = 2, + TPM_UNDEFINED, +}; + +#define TPM_WARN_RETRY 0x800 +#define TPM_WARN_DOING_SELFTEST 0x802 +#define TPM_ERR_DEACTIVATED 0x6 +#define TPM_ERR_DISABLED 0x7 +#define TPM_ERR_INVALID_POSTINIT 38 + +#define TPM_HEADER_SIZE 10 + +enum tpm2_const { + TPM2_PLATFORM_PCR = 24, + TPM2_PCR_SELECT_MIN = ((TPM2_PLATFORM_PCR + 7) / 8), + TPM2_TIMEOUT_A = 750, + TPM2_TIMEOUT_B = 2000, + TPM2_TIMEOUT_C = 200, + TPM2_TIMEOUT_D = 30, + TPM2_DURATION_SHORT = 20, + TPM2_DURATION_MEDIUM = 750, + TPM2_DURATION_LONG = 2000, +}; + +enum tpm2_structures { + TPM2_ST_NO_SESSIONS = 0x8001, + TPM2_ST_SESSIONS = 0x8002, +}; + +enum tpm2_return_codes { + TPM2_RC_INITIALIZE = 0x0100, + TPM2_RC_TESTING = 0x090A, + TPM2_RC_DISABLED = 0x0120, +}; + +enum tpm2_algorithms { + TPM2_ALG_SHA1 = 0x0004, +}; + +enum tpm2_command_codes { + TPM2_CC_FIRST = 0x011F, + TPM2_CC_SELF_TEST = 0x0143, + TPM2_CC_STARTUP = 0x0144, + TPM2_CC_SHUTDOWN = 0x0145, + TPM2_CC_GET_CAPABILITY = 0x017A, + TPM2_CC_GET_RANDOM = 0x017B, + TPM2_CC_PCR_READ = 0x017E, + TPM2_CC_PCR_EXTEND = 0x0182, + TPM2_CC_LAST = 0x018F, +}; + +enum tpm2_permanent_handles { + TPM2_RS_PW = 0x40000009, +}; + +enum tpm2_capabilities { + TPM2_CAP_TPM_PROPERTIES = 6, +}; + +enum tpm2_startup_types { + TPM2_SU_CLEAR = 0x0000, + TPM2_SU_STATE = 0x0001, +}; + +struct tpm_chip; + +struct tpm_vendor_specific { + void __iomem *iobase; /* ioremapped address */ + unsigned long base; /* TPM base address */ + + int irq; + int probed_irq; + + int region_size; + int have_region; + + struct list_head list; + int locality; + unsigned long timeout_a, timeout_b, timeout_c, timeout_d; /* jiffies */ + bool timeout_adjusted; + unsigned long duration[3]; /* jiffies */ + bool duration_adjusted; + void *priv; + + wait_queue_head_t read_queue; + wait_queue_head_t int_queue; + + u16 manufacturer_id; +}; + +#define TPM_VPRIV(c) ((c)->vendor.priv) + +#define TPM_VID_INTEL 0x8086 +#define TPM_VID_WINBOND 0x1050 +#define TPM_VID_STM 0x104A + +#define TPM_PPI_VERSION_LEN 3 + +enum tpm_chip_flags { + TPM_CHIP_FLAG_REGISTERED = BIT(0), + TPM_CHIP_FLAG_PPI = BIT(1), + TPM_CHIP_FLAG_TPM2 = BIT(2), +}; + +struct tpm_chip { + struct device *pdev; /* Device stuff */ + struct device dev; + struct cdev cdev; + + const struct tpm_class_ops *ops; + unsigned int flags; + + int dev_num; /* /dev/tpm# */ + char devname[7]; + unsigned long is_open; /* only one allowed */ + int time_expired; + + struct mutex tpm_mutex; /* tpm is processing */ + + struct tpm_vendor_specific vendor; + + struct dentry **bios_dir; + +#ifdef CONFIG_ACPI + acpi_handle acpi_dev_handle; + char ppi_version[TPM_PPI_VERSION_LEN + 1]; +#endif /* CONFIG_ACPI */ + + struct list_head list; +}; + +#define to_tpm_chip(n) container_of(n, struct tpm_chip, vendor) + +static inline void tpm_chip_put(struct tpm_chip *chip) +{ + module_put(chip->pdev->driver->owner); +} + +static inline int tpm_read_index(int base, int index) +{ + outb(index, base); + return inb(base+1) & 0xFF; +} + +static inline void tpm_write_index(int base, int index, int value) +{ + outb(index, base); + outb(value & 0xFF, base+1); +} +struct tpm_input_header { + __be16 tag; + __be32 length; + __be32 ordinal; +} __packed; + +struct tpm_output_header { + __be16 tag; + __be32 length; + __be32 return_code; +} __packed; + +#define TPM_TAG_RQU_COMMAND cpu_to_be16(193) + +struct stclear_flags_t { + __be16 tag; + u8 deactivated; + u8 disableForceClear; + u8 physicalPresence; + u8 physicalPresenceLock; + u8 bGlobalLock; +} __packed; + +struct tpm_version_t { + u8 Major; + u8 Minor; + u8 revMajor; + u8 revMinor; +} __packed; + +struct tpm_version_1_2_t { + __be16 tag; + u8 Major; + u8 Minor; + u8 revMajor; + u8 revMinor; +} __packed; + +struct timeout_t { + __be32 a; + __be32 b; + __be32 c; + __be32 d; +} __packed; + +struct duration_t { + __be32 tpm_short; + __be32 tpm_medium; + __be32 tpm_long; +} __packed; + +struct permanent_flags_t { + __be16 tag; + u8 disable; + u8 ownership; + u8 deactivated; + u8 readPubek; + u8 disableOwnerClear; + u8 allowMaintenance; + u8 physicalPresenceLifetimeLock; + u8 physicalPresenceHWEnable; + u8 physicalPresenceCMDEnable; + u8 CEKPUsed; + u8 TPMpost; + u8 TPMpostLock; + u8 FIPS; + u8 operator; + u8 enableRevokeEK; + u8 nvLocked; + u8 readSRKPub; + u8 tpmEstablished; + u8 maintenanceDone; + u8 disableFullDALogicInfo; +} __packed; + +typedef union { + struct permanent_flags_t perm_flags; + struct stclear_flags_t stclear_flags; + bool owned; + __be32 num_pcrs; + struct tpm_version_t tpm_version; + struct tpm_version_1_2_t tpm_version_1_2; + __be32 manufacturer_id; + struct timeout_t timeout; + struct duration_t duration; +} cap_t; + +enum tpm_capabilities { + TPM_CAP_FLAG = cpu_to_be32(4), + TPM_CAP_PROP = cpu_to_be32(5), + CAP_VERSION_1_1 = cpu_to_be32(0x06), + CAP_VERSION_1_2 = cpu_to_be32(0x1A) +}; + +enum tpm_sub_capabilities { + TPM_CAP_PROP_PCR = cpu_to_be32(0x101), + TPM_CAP_PROP_MANUFACTURER = cpu_to_be32(0x103), + TPM_CAP_FLAG_PERM = cpu_to_be32(0x108), + TPM_CAP_FLAG_VOL = cpu_to_be32(0x109), + TPM_CAP_PROP_OWNER = cpu_to_be32(0x111), + TPM_CAP_PROP_TIS_TIMEOUT = cpu_to_be32(0x115), + TPM_CAP_PROP_TIS_DURATION = cpu_to_be32(0x120), + +}; + +struct tpm_getcap_params_in { + __be32 cap; + __be32 subcap_size; + __be32 subcap; +} __packed; + +struct tpm_getcap_params_out { + __be32 cap_size; + cap_t cap; +} __packed; + +struct tpm_readpubek_params_out { + u8 algorithm[4]; + u8 encscheme[2]; + u8 sigscheme[2]; + __be32 paramsize; + u8 parameters[12]; /*assuming RSA*/ + __be32 keysize; + u8 modulus[256]; + u8 checksum[20]; +} __packed; + +typedef union { + struct tpm_input_header in; + struct tpm_output_header out; +} tpm_cmd_header; + +struct tpm_pcrread_out { + u8 pcr_result[TPM_DIGEST_SIZE]; +} __packed; + +struct tpm_pcrread_in { + __be32 pcr_idx; +} __packed; + +struct tpm_pcrextend_in { + __be32 pcr_idx; + u8 hash[TPM_DIGEST_SIZE]; +} __packed; + +/* 128 bytes is an arbitrary cap. This could be as large as TPM_BUFSIZE - 18 + * bytes, but 128 is still a relatively large number of random bytes and + * anything much bigger causes users of struct tpm_cmd_t to start getting + * compiler warnings about stack frame size. */ +#define TPM_MAX_RNG_DATA 128 + +struct tpm_getrandom_out { + __be32 rng_data_len; + u8 rng_data[TPM_MAX_RNG_DATA]; +} __packed; + +struct tpm_getrandom_in { + __be32 num_bytes; +} __packed; + +struct tpm_startup_in { + __be16 startup_type; +} __packed; + +typedef union { + struct tpm_getcap_params_out getcap_out; + struct tpm_readpubek_params_out readpubek_out; + u8 readpubek_out_buffer[sizeof(struct tpm_readpubek_params_out)]; + struct tpm_getcap_params_in getcap_in; + struct tpm_pcrread_in pcrread_in; + struct tpm_pcrread_out pcrread_out; + struct tpm_pcrextend_in pcrextend_in; + struct tpm_getrandom_in getrandom_in; + struct tpm_getrandom_out getrandom_out; + struct tpm_startup_in startup_in; +} tpm_cmd_params; + +struct tpm_cmd_t { + tpm_cmd_header header; + tpm_cmd_params params; +} __packed; + +extern struct class *tpm_class; +extern dev_t tpm_devt; +extern const struct file_operations tpm_fops; + +ssize_t tpm_getcap(struct device *, __be32, cap_t *, const char *); +ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf, + size_t bufsiz); +ssize_t tpm_transmit_cmd(struct tpm_chip *chip, void *cmd, int len, + const char *desc); +extern int tpm_get_timeouts(struct tpm_chip *); +extern void tpm_gen_interrupt(struct tpm_chip *); +extern int tpm_do_selftest(struct tpm_chip *); +extern unsigned long tpm_calc_ordinal_duration(struct tpm_chip *, u32); +extern int tpm_pm_suspend(struct device *); +extern int tpm_pm_resume(struct device *); +extern int wait_for_tpm_stat(struct tpm_chip *, u8, unsigned long, + wait_queue_head_t *, bool); + +struct tpm_chip *tpm_chip_find_get(int chip_num); +extern struct tpm_chip *tpmm_chip_alloc(struct device *dev, + const struct tpm_class_ops *ops); +extern int tpm_chip_register(struct tpm_chip *chip); +extern void tpm_chip_unregister(struct tpm_chip *chip); + +int tpm_sysfs_add_device(struct tpm_chip *chip); +void tpm_sysfs_del_device(struct tpm_chip *chip); + +int tpm_pcr_read_dev(struct tpm_chip *chip, int pcr_idx, u8 *res_buf); + +#ifdef CONFIG_ACPI +extern int tpm_add_ppi(struct tpm_chip *chip); +extern void tpm_remove_ppi(struct tpm_chip *chip); +#else +static inline int tpm_add_ppi(struct tpm_chip *chip) +{ + return 0; +} + +static inline void tpm_remove_ppi(struct tpm_chip *chip) +{ +} +#endif + +int tpm2_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf); +int tpm2_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash); +int tpm2_get_random(struct tpm_chip *chip, u8 *out, size_t max); +ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, + u32 *value, const char *desc); + +extern int tpm2_startup(struct tpm_chip *chip, u16 startup_type); +extern void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type); +extern unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *, u32); +extern int tpm2_do_selftest(struct tpm_chip *chip); +extern int tpm2_gen_interrupt(struct tpm_chip *chip); +extern int tpm2_probe(struct tpm_chip *chip); diff --git a/kernel/drivers/char/tpm/tpm2-cmd.c b/kernel/drivers/char/tpm/tpm2-cmd.c new file mode 100644 index 000000000..011909a9b --- /dev/null +++ b/kernel/drivers/char/tpm/tpm2-cmd.c @@ -0,0 +1,646 @@ +/* + * Copyright (C) 2014 Intel Corporation + * + * Authors: + * Jarkko Sakkinen + * + * Maintained by: + * + * This file contains TPM2 protocol implementations of the commands + * used by the kernel internally. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + +#include "tpm.h" + +struct tpm2_startup_in { + __be16 startup_type; +} __packed; + +struct tpm2_self_test_in { + u8 full_test; +} __packed; + +struct tpm2_pcr_read_in { + __be32 pcr_selects_cnt; + __be16 hash_alg; + u8 pcr_select_size; + u8 pcr_select[TPM2_PCR_SELECT_MIN]; +} __packed; + +struct tpm2_pcr_read_out { + __be32 update_cnt; + __be32 pcr_selects_cnt; + __be16 hash_alg; + u8 pcr_select_size; + u8 pcr_select[TPM2_PCR_SELECT_MIN]; + __be32 digests_cnt; + __be16 digest_size; + u8 digest[TPM_DIGEST_SIZE]; +} __packed; + +struct tpm2_null_auth_area { + __be32 handle; + __be16 nonce_size; + u8 attributes; + __be16 auth_size; +} __packed; + +struct tpm2_pcr_extend_in { + __be32 pcr_idx; + __be32 auth_area_size; + struct tpm2_null_auth_area auth_area; + __be32 digest_cnt; + __be16 hash_alg; + u8 digest[TPM_DIGEST_SIZE]; +} __packed; + +struct tpm2_get_tpm_pt_in { + __be32 cap_id; + __be32 property_id; + __be32 property_cnt; +} __packed; + +struct tpm2_get_tpm_pt_out { + u8 more_data; + __be32 subcap_id; + __be32 property_cnt; + __be32 property_id; + __be32 value; +} __packed; + +struct tpm2_get_random_in { + __be16 size; +} __packed; + +struct tpm2_get_random_out { + __be16 size; + u8 buffer[TPM_MAX_RNG_DATA]; +} __packed; + +union tpm2_cmd_params { + struct tpm2_startup_in startup_in; + struct tpm2_self_test_in selftest_in; + struct tpm2_pcr_read_in pcrread_in; + struct tpm2_pcr_read_out pcrread_out; + struct tpm2_pcr_extend_in pcrextend_in; + struct tpm2_get_tpm_pt_in get_tpm_pt_in; + struct tpm2_get_tpm_pt_out get_tpm_pt_out; + struct tpm2_get_random_in getrandom_in; + struct tpm2_get_random_out getrandom_out; +}; + +struct tpm2_cmd { + tpm_cmd_header header; + union tpm2_cmd_params params; +} __packed; + +/* + * Array with one entry per ordinal defining the maximum amount + * of time the chip could take to return the result. The values + * of the SHORT, MEDIUM, and LONG durations are taken from the + * PC Client Profile (PTP) specification. + */ +static const u8 tpm2_ordinal_duration[TPM2_CC_LAST - TPM2_CC_FIRST + 1] = { + TPM_UNDEFINED, /* 11F */ + TPM_UNDEFINED, /* 120 */ + TPM_LONG, /* 121 */ + TPM_UNDEFINED, /* 122 */ + TPM_UNDEFINED, /* 123 */ + TPM_UNDEFINED, /* 124 */ + TPM_UNDEFINED, /* 125 */ + TPM_UNDEFINED, /* 126 */ + TPM_UNDEFINED, /* 127 */ + TPM_UNDEFINED, /* 128 */ + TPM_LONG, /* 129 */ + TPM_UNDEFINED, /* 12a */ + TPM_UNDEFINED, /* 12b */ + TPM_UNDEFINED, /* 12c */ + TPM_UNDEFINED, /* 12d */ + TPM_UNDEFINED, /* 12e */ + TPM_UNDEFINED, /* 12f */ + TPM_UNDEFINED, /* 130 */ + TPM_UNDEFINED, /* 131 */ + TPM_UNDEFINED, /* 132 */ + TPM_UNDEFINED, /* 133 */ + TPM_UNDEFINED, /* 134 */ + TPM_UNDEFINED, /* 135 */ + TPM_UNDEFINED, /* 136 */ + TPM_UNDEFINED, /* 137 */ + TPM_UNDEFINED, /* 138 */ + TPM_UNDEFINED, /* 139 */ + TPM_UNDEFINED, /* 13a */ + TPM_UNDEFINED, /* 13b */ + TPM_UNDEFINED, /* 13c */ + TPM_UNDEFINED, /* 13d */ + TPM_MEDIUM, /* 13e */ + TPM_UNDEFINED, /* 13f */ + TPM_UNDEFINED, /* 140 */ + TPM_UNDEFINED, /* 141 */ + TPM_UNDEFINED, /* 142 */ + TPM_LONG, /* 143 */ + TPM_MEDIUM, /* 144 */ + TPM_UNDEFINED, /* 145 */ + TPM_UNDEFINED, /* 146 */ + TPM_UNDEFINED, /* 147 */ + TPM_UNDEFINED, /* 148 */ + TPM_UNDEFINED, /* 149 */ + TPM_UNDEFINED, /* 14a */ + TPM_UNDEFINED, /* 14b */ + TPM_UNDEFINED, /* 14c */ + TPM_UNDEFINED, /* 14d */ + TPM_LONG, /* 14e */ + TPM_UNDEFINED, /* 14f */ + TPM_UNDEFINED, /* 150 */ + TPM_UNDEFINED, /* 151 */ + TPM_UNDEFINED, /* 152 */ + TPM_UNDEFINED, /* 153 */ + TPM_UNDEFINED, /* 154 */ + TPM_UNDEFINED, /* 155 */ + TPM_UNDEFINED, /* 156 */ + TPM_UNDEFINED, /* 157 */ + TPM_UNDEFINED, /* 158 */ + TPM_UNDEFINED, /* 159 */ + TPM_UNDEFINED, /* 15a */ + TPM_UNDEFINED, /* 15b */ + TPM_MEDIUM, /* 15c */ + TPM_UNDEFINED, /* 15d */ + TPM_UNDEFINED, /* 15e */ + TPM_UNDEFINED, /* 15f */ + TPM_UNDEFINED, /* 160 */ + TPM_UNDEFINED, /* 161 */ + TPM_UNDEFINED, /* 162 */ + TPM_UNDEFINED, /* 163 */ + TPM_UNDEFINED, /* 164 */ + TPM_UNDEFINED, /* 165 */ + TPM_UNDEFINED, /* 166 */ + TPM_UNDEFINED, /* 167 */ + TPM_UNDEFINED, /* 168 */ + TPM_UNDEFINED, /* 169 */ + TPM_UNDEFINED, /* 16a */ + TPM_UNDEFINED, /* 16b */ + TPM_UNDEFINED, /* 16c */ + TPM_UNDEFINED, /* 16d */ + TPM_UNDEFINED, /* 16e */ + TPM_UNDEFINED, /* 16f */ + TPM_UNDEFINED, /* 170 */ + TPM_UNDEFINED, /* 171 */ + TPM_UNDEFINED, /* 172 */ + TPM_UNDEFINED, /* 173 */ + TPM_UNDEFINED, /* 174 */ + TPM_UNDEFINED, /* 175 */ + TPM_UNDEFINED, /* 176 */ + TPM_LONG, /* 177 */ + TPM_UNDEFINED, /* 178 */ + TPM_UNDEFINED, /* 179 */ + TPM_MEDIUM, /* 17a */ + TPM_LONG, /* 17b */ + TPM_UNDEFINED, /* 17c */ + TPM_UNDEFINED, /* 17d */ + TPM_UNDEFINED, /* 17e */ + TPM_UNDEFINED, /* 17f */ + TPM_UNDEFINED, /* 180 */ + TPM_UNDEFINED, /* 181 */ + TPM_MEDIUM, /* 182 */ + TPM_UNDEFINED, /* 183 */ + TPM_UNDEFINED, /* 184 */ + TPM_MEDIUM, /* 185 */ + TPM_MEDIUM, /* 186 */ + TPM_UNDEFINED, /* 187 */ + TPM_UNDEFINED, /* 188 */ + TPM_UNDEFINED, /* 189 */ + TPM_UNDEFINED, /* 18a */ + TPM_UNDEFINED, /* 18b */ + TPM_UNDEFINED, /* 18c */ + TPM_UNDEFINED, /* 18d */ + TPM_UNDEFINED, /* 18e */ + TPM_UNDEFINED /* 18f */ +}; + +#define TPM2_PCR_READ_IN_SIZE \ + (sizeof(struct tpm_input_header) + \ + sizeof(struct tpm2_pcr_read_in)) + +static const struct tpm_input_header tpm2_pcrread_header = { + .tag = cpu_to_be16(TPM2_ST_NO_SESSIONS), + .length = cpu_to_be32(TPM2_PCR_READ_IN_SIZE), + .ordinal = cpu_to_be32(TPM2_CC_PCR_READ) +}; + +/** + * tpm2_pcr_read() - read a PCR value + * @chip: TPM chip to use. + * @pcr_idx: index of the PCR to read. + * @ref_buf: buffer to store the resulting hash, + * + * 0 is returned when the operation is successful. If a negative number is + * returned it remarks a POSIX error code. If a positive number is returned + * it remarks a TPM error. + */ +int tpm2_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf) +{ + int rc; + struct tpm2_cmd cmd; + u8 *buf; + + if (pcr_idx >= TPM2_PLATFORM_PCR) + return -EINVAL; + + cmd.header.in = tpm2_pcrread_header; + cmd.params.pcrread_in.pcr_selects_cnt = cpu_to_be32(1); + cmd.params.pcrread_in.hash_alg = cpu_to_be16(TPM2_ALG_SHA1); + cmd.params.pcrread_in.pcr_select_size = TPM2_PCR_SELECT_MIN; + + memset(cmd.params.pcrread_in.pcr_select, 0, + sizeof(cmd.params.pcrread_in.pcr_select)); + cmd.params.pcrread_in.pcr_select[pcr_idx >> 3] = 1 << (pcr_idx & 0x7); + + rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), + "attempting to read a pcr value"); + if (rc == 0) { + buf = cmd.params.pcrread_out.digest; + memcpy(res_buf, buf, TPM_DIGEST_SIZE); + } + + return rc; +} + +#define TPM2_GET_PCREXTEND_IN_SIZE \ + (sizeof(struct tpm_input_header) + \ + sizeof(struct tpm2_pcr_extend_in)) + +static const struct tpm_input_header tpm2_pcrextend_header = { + .tag = cpu_to_be16(TPM2_ST_SESSIONS), + .length = cpu_to_be32(TPM2_GET_PCREXTEND_IN_SIZE), + .ordinal = cpu_to_be32(TPM2_CC_PCR_EXTEND) +}; + +/** + * tpm2_pcr_extend() - extend a PCR value + * @chip: TPM chip to use. + * @pcr_idx: index of the PCR. + * @hash: hash value to use for the extend operation. + * + * 0 is returned when the operation is successful. If a negative number is + * returned it remarks a POSIX error code. If a positive number is returned + * it remarks a TPM error. + */ +int tpm2_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash) +{ + struct tpm2_cmd cmd; + int rc; + + cmd.header.in = tpm2_pcrextend_header; + cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(pcr_idx); + cmd.params.pcrextend_in.auth_area_size = + cpu_to_be32(sizeof(struct tpm2_null_auth_area)); + cmd.params.pcrextend_in.auth_area.handle = + cpu_to_be32(TPM2_RS_PW); + cmd.params.pcrextend_in.auth_area.nonce_size = 0; + cmd.params.pcrextend_in.auth_area.attributes = 0; + cmd.params.pcrextend_in.auth_area.auth_size = 0; + cmd.params.pcrextend_in.digest_cnt = cpu_to_be32(1); + cmd.params.pcrextend_in.hash_alg = cpu_to_be16(TPM2_ALG_SHA1); + memcpy(cmd.params.pcrextend_in.digest, hash, TPM_DIGEST_SIZE); + + rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), + "attempting extend a PCR value"); + + return rc; +} + +#define TPM2_GETRANDOM_IN_SIZE \ + (sizeof(struct tpm_input_header) + \ + sizeof(struct tpm2_get_random_in)) + +static const struct tpm_input_header tpm2_getrandom_header = { + .tag = cpu_to_be16(TPM2_ST_NO_SESSIONS), + .length = cpu_to_be32(TPM2_GETRANDOM_IN_SIZE), + .ordinal = cpu_to_be32(TPM2_CC_GET_RANDOM) +}; + +/** + * tpm2_get_random() - get random bytes from the TPM RNG + * @chip: TPM chip to use + * @out: destination buffer for the random bytes + * @max: the max number of bytes to write to @out + * + * 0 is returned when the operation is successful. If a negative number is + * returned it remarks a POSIX error code. If a positive number is returned + * it remarks a TPM error. + */ +int tpm2_get_random(struct tpm_chip *chip, u8 *out, size_t max) +{ + struct tpm2_cmd cmd; + u32 recd; + u32 num_bytes; + int err; + int total = 0; + int retries = 5; + u8 *dest = out; + + num_bytes = min_t(u32, max, sizeof(cmd.params.getrandom_out.buffer)); + + if (!out || !num_bytes || + max > sizeof(cmd.params.getrandom_out.buffer)) + return -EINVAL; + + do { + cmd.header.in = tpm2_getrandom_header; + cmd.params.getrandom_in.size = cpu_to_be16(num_bytes); + + err = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), + "attempting get random"); + if (err) + break; + + recd = min_t(u32, be16_to_cpu(cmd.params.getrandom_out.size), + num_bytes); + memcpy(dest, cmd.params.getrandom_out.buffer, recd); + + dest += recd; + total += recd; + num_bytes -= recd; + } while (retries-- && total < max); + + return total ? total : -EIO; +} + +#define TPM2_GET_TPM_PT_IN_SIZE \ + (sizeof(struct tpm_input_header) + \ + sizeof(struct tpm2_get_tpm_pt_in)) + +static const struct tpm_input_header tpm2_get_tpm_pt_header = { + .tag = cpu_to_be16(TPM2_ST_NO_SESSIONS), + .length = cpu_to_be32(TPM2_GET_TPM_PT_IN_SIZE), + .ordinal = cpu_to_be32(TPM2_CC_GET_CAPABILITY) +}; + +/** + * tpm2_get_tpm_pt() - get value of a TPM_CAP_TPM_PROPERTIES type property + * @chip: TPM chip to use. + * @property_id: property ID. + * @value: output variable. + * @desc: passed to tpm_transmit_cmd() + * + * 0 is returned when the operation is successful. If a negative number is + * returned it remarks a POSIX error code. If a positive number is returned + * it remarks a TPM error. + */ +ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value, + const char *desc) +{ + struct tpm2_cmd cmd; + int rc; + + cmd.header.in = tpm2_get_tpm_pt_header; + cmd.params.get_tpm_pt_in.cap_id = cpu_to_be32(TPM2_CAP_TPM_PROPERTIES); + cmd.params.get_tpm_pt_in.property_id = cpu_to_be32(property_id); + cmd.params.get_tpm_pt_in.property_cnt = cpu_to_be32(1); + + rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), desc); + if (!rc) + *value = cmd.params.get_tpm_pt_out.value; + + return rc; +} + +#define TPM2_STARTUP_IN_SIZE \ + (sizeof(struct tpm_input_header) + \ + sizeof(struct tpm2_startup_in)) + +static const struct tpm_input_header tpm2_startup_header = { + .tag = cpu_to_be16(TPM2_ST_NO_SESSIONS), + .length = cpu_to_be32(TPM2_STARTUP_IN_SIZE), + .ordinal = cpu_to_be32(TPM2_CC_STARTUP) +}; + +/** + * tpm2_startup() - send startup command to the TPM chip + * @chip: TPM chip to use. + * @startup_type startup type. The value is either + * TPM_SU_CLEAR or TPM_SU_STATE. + * + * 0 is returned when the operation is successful. If a negative number is + * returned it remarks a POSIX error code. If a positive number is returned + * it remarks a TPM error. + */ +int tpm2_startup(struct tpm_chip *chip, u16 startup_type) +{ + struct tpm2_cmd cmd; + + cmd.header.in = tpm2_startup_header; + + cmd.params.startup_in.startup_type = cpu_to_be16(startup_type); + return tpm_transmit_cmd(chip, &cmd, sizeof(cmd), + "attempting to start the TPM"); +} +EXPORT_SYMBOL_GPL(tpm2_startup); + +#define TPM2_SHUTDOWN_IN_SIZE \ + (sizeof(struct tpm_input_header) + \ + sizeof(struct tpm2_startup_in)) + +static const struct tpm_input_header tpm2_shutdown_header = { + .tag = cpu_to_be16(TPM2_ST_NO_SESSIONS), + .length = cpu_to_be32(TPM2_SHUTDOWN_IN_SIZE), + .ordinal = cpu_to_be32(TPM2_CC_SHUTDOWN) +}; + +/** + * tpm2_shutdown() - send shutdown command to the TPM chip + * @chip: TPM chip to use. + * @shutdown_type shutdown type. The value is either + * TPM_SU_CLEAR or TPM_SU_STATE. + */ +void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type) +{ + struct tpm2_cmd cmd; + int rc; + + cmd.header.in = tpm2_shutdown_header; + cmd.params.startup_in.startup_type = cpu_to_be16(shutdown_type); + + rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), "stopping the TPM"); + + /* In places where shutdown command is sent there's no much we can do + * except print the error code on a system failure. + */ + if (rc < 0) + dev_warn(chip->pdev, "transmit returned %d while stopping the TPM", + rc); +} +EXPORT_SYMBOL_GPL(tpm2_shutdown); + +/* + * tpm2_calc_ordinal_duration() - maximum duration for a command + * @chip: TPM chip to use. + * @ordinal: command code number. + * + * 0 is returned when the operation is successful. If a negative number is + * returned it remarks a POSIX error code. If a positive number is returned + * it remarks a TPM error. + */ +unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal) +{ + int index = TPM_UNDEFINED; + int duration = 0; + + if (ordinal >= TPM2_CC_FIRST && ordinal <= TPM2_CC_LAST) + index = tpm2_ordinal_duration[ordinal - TPM2_CC_FIRST]; + + if (index != TPM_UNDEFINED) + duration = chip->vendor.duration[index]; + + if (duration <= 0) + duration = 2 * 60 * HZ; + + return duration; +} +EXPORT_SYMBOL_GPL(tpm2_calc_ordinal_duration); + +#define TPM2_SELF_TEST_IN_SIZE \ + (sizeof(struct tpm_input_header) + \ + sizeof(struct tpm2_self_test_in)) + +static const struct tpm_input_header tpm2_selftest_header = { + .tag = cpu_to_be16(TPM2_ST_NO_SESSIONS), + .length = cpu_to_be32(TPM2_SELF_TEST_IN_SIZE), + .ordinal = cpu_to_be32(TPM2_CC_SELF_TEST) +}; + +/** + * tpm2_continue_selftest() - start a self test + * @chip: TPM chip to use + * @full: test all commands instead of testing only those that were not + * previously tested. + * + * 0 is returned when the operation is successful. If a negative number is + * returned it remarks a POSIX error code. If a positive number is returned + * it remarks a TPM error. + */ +static int tpm2_start_selftest(struct tpm_chip *chip, bool full) +{ + int rc; + struct tpm2_cmd cmd; + + cmd.header.in = tpm2_selftest_header; + cmd.params.selftest_in.full_test = full; + + rc = tpm_transmit_cmd(chip, &cmd, TPM2_SELF_TEST_IN_SIZE, + "continue selftest"); + + /* At least some prototype chips seem to give RC_TESTING error + * immediately. This is a workaround for that. + */ + if (rc == TPM2_RC_TESTING) { + dev_warn(chip->pdev, "Got RC_TESTING, ignoring\n"); + rc = 0; + } + + return rc; +} + +/** + * tpm2_do_selftest() - run a full self test + * @chip: TPM chip to use + * + * During the self test TPM2 commands return with the error code RC_TESTING. + * Waiting is done by issuing PCR read until it executes successfully. + * + * 0 is returned when the operation is successful. If a negative number is + * returned it remarks a POSIX error code. If a positive number is returned + * it remarks a TPM error. + */ +int tpm2_do_selftest(struct tpm_chip *chip) +{ + int rc; + unsigned int loops; + unsigned int delay_msec = 100; + unsigned long duration; + struct tpm2_cmd cmd; + int i; + + duration = tpm2_calc_ordinal_duration(chip, TPM2_CC_SELF_TEST); + + loops = jiffies_to_msecs(duration) / delay_msec; + + rc = tpm2_start_selftest(chip, true); + if (rc) + return rc; + + for (i = 0; i < loops; i++) { + /* Attempt to read a PCR value */ + cmd.header.in = tpm2_pcrread_header; + cmd.params.pcrread_in.pcr_selects_cnt = cpu_to_be32(1); + cmd.params.pcrread_in.hash_alg = cpu_to_be16(TPM2_ALG_SHA1); + cmd.params.pcrread_in.pcr_select_size = TPM2_PCR_SELECT_MIN; + cmd.params.pcrread_in.pcr_select[0] = 0x01; + cmd.params.pcrread_in.pcr_select[1] = 0x00; + cmd.params.pcrread_in.pcr_select[2] = 0x00; + + rc = tpm_transmit_cmd(chip, (u8 *) &cmd, sizeof(cmd), NULL); + if (rc < 0) + break; + + rc = be32_to_cpu(cmd.header.out.return_code); + if (rc != TPM2_RC_TESTING) + break; + + msleep(delay_msec); + } + + return rc; +} +EXPORT_SYMBOL_GPL(tpm2_do_selftest); + +/** + * tpm2_gen_interrupt() - generate an interrupt + * @chip: TPM chip to use + * + * 0 is returned when the operation is successful. If a negative number is + * returned it remarks a POSIX error code. If a positive number is returned + * it remarks a TPM error. + */ +int tpm2_gen_interrupt(struct tpm_chip *chip) +{ + u32 dummy; + + return tpm2_get_tpm_pt(chip, 0x100, &dummy, + "attempting to generate an interrupt"); +} +EXPORT_SYMBOL_GPL(tpm2_gen_interrupt); + +/** + * tpm2_probe() - probe TPM 2.0 + * @chip: TPM chip to use + * + * Send idempotent TPM 2.0 command and see whether TPM 2.0 chip replied based on + * the reply tag. + */ +int tpm2_probe(struct tpm_chip *chip) +{ + struct tpm2_cmd cmd; + int rc; + + cmd.header.in = tpm2_get_tpm_pt_header; + cmd.params.get_tpm_pt_in.cap_id = cpu_to_be32(TPM2_CAP_TPM_PROPERTIES); + cmd.params.get_tpm_pt_in.property_id = cpu_to_be32(0x100); + cmd.params.get_tpm_pt_in.property_cnt = cpu_to_be32(1); + + rc = tpm_transmit(chip, (const char *) &cmd, sizeof(cmd)); + if (rc < 0) + return rc; + else if (rc < TPM_HEADER_SIZE) + return -EFAULT; + + if (be16_to_cpu(cmd.header.out.tag) == TPM2_ST_NO_SESSIONS) + chip->flags |= TPM_CHIP_FLAG_TPM2; + + return 0; +} +EXPORT_SYMBOL_GPL(tpm2_probe); diff --git a/kernel/drivers/char/tpm/tpm_acpi.c b/kernel/drivers/char/tpm/tpm_acpi.c new file mode 100644 index 000000000..565a9478c --- /dev/null +++ b/kernel/drivers/char/tpm/tpm_acpi.c @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2005 IBM Corporation + * + * Authors: + * Seiji Munetoh + * Stefan Berger + * Reiner Sailer + * Kylene Hall + * + * Maintained by: + * + * Access to the eventlog extended by the TCG BIOS of PC platform + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include + +#include "tpm.h" +#include "tpm_eventlog.h" + +struct acpi_tcpa { + struct acpi_table_header hdr; + u16 platform_class; + union { + struct client_hdr { + u32 log_max_len __packed; + u64 log_start_addr __packed; + } client; + struct server_hdr { + u16 reserved; + u64 log_max_len __packed; + u64 log_start_addr __packed; + } server; + }; +}; + +/* read binary bios log */ +int read_log(struct tpm_bios_log *log) +{ + struct acpi_tcpa *buff; + acpi_status status; + void __iomem *virt; + u64 len, start; + + if (log->bios_event_log != NULL) { + printk(KERN_ERR + "%s: ERROR - Eventlog already initialized\n", + __func__); + return -EFAULT; + } + + /* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */ + status = acpi_get_table(ACPI_SIG_TCPA, 1, + (struct acpi_table_header **)&buff); + + if (ACPI_FAILURE(status)) { + printk(KERN_ERR "%s: ERROR - Could not get TCPA table\n", + __func__); + return -EIO; + } + + switch(buff->platform_class) { + case BIOS_SERVER: + len = buff->server.log_max_len; + start = buff->server.log_start_addr; + break; + case BIOS_CLIENT: + default: + len = buff->client.log_max_len; + start = buff->client.log_start_addr; + break; + } + if (!len) { + printk(KERN_ERR "%s: ERROR - TCPA log area empty\n", __func__); + return -EIO; + } + + /* malloc EventLog space */ + log->bios_event_log = kmalloc(len, GFP_KERNEL); + if (!log->bios_event_log) { + printk("%s: ERROR - Not enough Memory for BIOS measurements\n", + __func__); + return -ENOMEM; + } + + log->bios_event_log_end = log->bios_event_log + len; + + virt = acpi_os_map_iomem(start, len); + if (!virt) { + kfree(log->bios_event_log); + printk("%s: ERROR - Unable to map memory\n", __func__); + return -EIO; + } + + memcpy_fromio(log->bios_event_log, virt, len); + + acpi_os_unmap_iomem(virt, len); + return 0; +} diff --git a/kernel/drivers/char/tpm/tpm_atmel.c b/kernel/drivers/char/tpm/tpm_atmel.c new file mode 100644 index 000000000..dfadad091 --- /dev/null +++ b/kernel/drivers/char/tpm/tpm_atmel.c @@ -0,0 +1,227 @@ +/* + * Copyright (C) 2004 IBM Corporation + * + * Authors: + * Leendert van Doorn + * Dave Safford + * Reiner Sailer + * Kylene Hall + * + * Maintained by: + * + * Device driver for TCG/TCPA TPM (trusted platform module). + * Specifications at www.trustedcomputinggroup.org + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + */ + +#include "tpm.h" +#include "tpm_atmel.h" + +/* write status bits */ +enum tpm_atmel_write_status { + ATML_STATUS_ABORT = 0x01, + ATML_STATUS_LASTBYTE = 0x04 +}; +/* read status bits */ +enum tpm_atmel_read_status { + ATML_STATUS_BUSY = 0x01, + ATML_STATUS_DATA_AVAIL = 0x02, + ATML_STATUS_REWRITE = 0x04, + ATML_STATUS_READY = 0x08 +}; + +static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + u8 status, *hdr = buf; + u32 size; + int i; + __be32 *native_size; + + /* start reading header */ + if (count < 6) + return -EIO; + + for (i = 0; i < 6; i++) { + status = ioread8(chip->vendor.iobase + 1); + if ((status & ATML_STATUS_DATA_AVAIL) == 0) { + dev_err(chip->pdev, "error reading header\n"); + return -EIO; + } + *buf++ = ioread8(chip->vendor.iobase); + } + + /* size of the data received */ + native_size = (__force __be32 *) (hdr + 2); + size = be32_to_cpu(*native_size); + + if (count < size) { + dev_err(chip->pdev, + "Recv size(%d) less than available space\n", size); + for (; i < size; i++) { /* clear the waiting data anyway */ + status = ioread8(chip->vendor.iobase + 1); + if ((status & ATML_STATUS_DATA_AVAIL) == 0) { + dev_err(chip->pdev, "error reading data\n"); + return -EIO; + } + } + return -EIO; + } + + /* read all the data available */ + for (; i < size; i++) { + status = ioread8(chip->vendor.iobase + 1); + if ((status & ATML_STATUS_DATA_AVAIL) == 0) { + dev_err(chip->pdev, "error reading data\n"); + return -EIO; + } + *buf++ = ioread8(chip->vendor.iobase); + } + + /* make sure data available is gone */ + status = ioread8(chip->vendor.iobase + 1); + + if (status & ATML_STATUS_DATA_AVAIL) { + dev_err(chip->pdev, "data available is stuck\n"); + return -EIO; + } + + return size; +} + +static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count) +{ + int i; + + dev_dbg(chip->pdev, "tpm_atml_send:\n"); + for (i = 0; i < count; i++) { + dev_dbg(chip->pdev, "%d 0x%x(%d)\n", i, buf[i], buf[i]); + iowrite8(buf[i], chip->vendor.iobase); + } + + return count; +} + +static void tpm_atml_cancel(struct tpm_chip *chip) +{ + iowrite8(ATML_STATUS_ABORT, chip->vendor.iobase + 1); +} + +static u8 tpm_atml_status(struct tpm_chip *chip) +{ + return ioread8(chip->vendor.iobase + 1); +} + +static bool tpm_atml_req_canceled(struct tpm_chip *chip, u8 status) +{ + return (status == ATML_STATUS_READY); +} + +static const struct tpm_class_ops tpm_atmel = { + .recv = tpm_atml_recv, + .send = tpm_atml_send, + .cancel = tpm_atml_cancel, + .status = tpm_atml_status, + .req_complete_mask = ATML_STATUS_BUSY | ATML_STATUS_DATA_AVAIL, + .req_complete_val = ATML_STATUS_DATA_AVAIL, + .req_canceled = tpm_atml_req_canceled, +}; + +static struct platform_device *pdev; + +static void atml_plat_remove(void) +{ + struct tpm_chip *chip = dev_get_drvdata(&pdev->dev); + + if (chip) { + tpm_chip_unregister(chip); + if (chip->vendor.have_region) + atmel_release_region(chip->vendor.base, + chip->vendor.region_size); + atmel_put_base_addr(chip->vendor.iobase); + platform_device_unregister(pdev); + } +} + +static SIMPLE_DEV_PM_OPS(tpm_atml_pm, tpm_pm_suspend, tpm_pm_resume); + +static struct platform_driver atml_drv = { + .driver = { + .name = "tpm_atmel", + .pm = &tpm_atml_pm, + }, +}; + +static int __init init_atmel(void) +{ + int rc = 0; + void __iomem *iobase = NULL; + int have_region, region_size; + unsigned long base; + struct tpm_chip *chip; + + rc = platform_driver_register(&atml_drv); + if (rc) + return rc; + + if ((iobase = atmel_get_base_addr(&base, ®ion_size)) == NULL) { + rc = -ENODEV; + goto err_unreg_drv; + } + + have_region = + (atmel_request_region + (base, region_size, "tpm_atmel0") == NULL) ? 0 : 1; + + pdev = platform_device_register_simple("tpm_atmel", -1, NULL, 0); + if (IS_ERR(pdev)) { + rc = PTR_ERR(pdev); + goto err_rel_reg; + } + + chip = tpmm_chip_alloc(&pdev->dev, &tpm_atmel); + if (IS_ERR(chip)) { + rc = PTR_ERR(chip); + goto err_unreg_dev; + } + + chip->vendor.iobase = iobase; + chip->vendor.base = base; + chip->vendor.have_region = have_region; + chip->vendor.region_size = region_size; + + rc = tpm_chip_register(chip); + if (rc) + goto err_unreg_dev; + + return 0; + +err_unreg_dev: + platform_device_unregister(pdev); +err_rel_reg: + atmel_put_base_addr(iobase); + if (have_region) + atmel_release_region(base, + region_size); +err_unreg_drv: + platform_driver_unregister(&atml_drv); + return rc; +} + +static void __exit cleanup_atmel(void) +{ + platform_driver_unregister(&atml_drv); + atml_plat_remove(); +} + +module_init(init_atmel); +module_exit(cleanup_atmel); + +MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)"); +MODULE_DESCRIPTION("TPM Driver"); +MODULE_VERSION("2.0"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/tpm/tpm_atmel.h b/kernel/drivers/char/tpm/tpm_atmel.h new file mode 100644 index 000000000..6c831f946 --- /dev/null +++ b/kernel/drivers/char/tpm/tpm_atmel.h @@ -0,0 +1,131 @@ +/* + * Copyright (C) 2005 IBM Corporation + * + * Authors: + * Kylene Hall + * + * Maintained by: + * + * Device driver for TCG/TCPA TPM (trusted platform module). + * Specifications at www.trustedcomputinggroup.org + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + * These difference are required on power because the device must be + * discovered through the device tree and iomap must be used to get + * around the need for holes in the io_page_mask. This does not happen + * automatically because the tpm is not a normal pci device and lives + * under the root node. + * + */ + +#ifdef CONFIG_PPC64 + +#include + +#define atmel_getb(chip, offset) readb(chip->vendor->iobase + offset); +#define atmel_putb(val, chip, offset) writeb(val, chip->vendor->iobase + offset) +#define atmel_request_region request_mem_region +#define atmel_release_region release_mem_region + +static inline void atmel_put_base_addr(void __iomem *iobase) +{ + iounmap(iobase); +} + +static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size) +{ + struct device_node *dn; + unsigned long address, size; + const unsigned int *reg; + int reglen; + int naddrc; + int nsizec; + + dn = of_find_node_by_name(NULL, "tpm"); + + if (!dn) + return NULL; + + if (!of_device_is_compatible(dn, "AT97SC3201")) { + of_node_put(dn); + return NULL; + } + + reg = of_get_property(dn, "reg", ®len); + naddrc = of_n_addr_cells(dn); + nsizec = of_n_size_cells(dn); + + of_node_put(dn); + + + if (naddrc == 2) + address = ((unsigned long) reg[0] << 32) | reg[1]; + else + address = reg[0]; + + if (nsizec == 2) + size = + ((unsigned long) reg[naddrc] << 32) | reg[naddrc + 1]; + else + size = reg[naddrc]; + + *base = address; + *region_size = size; + return ioremap(*base, *region_size); +} +#else +#define atmel_getb(chip, offset) inb(chip->vendor->base + offset) +#define atmel_putb(val, chip, offset) outb(val, chip->vendor->base + offset) +#define atmel_request_region request_region +#define atmel_release_region release_region +/* Atmel definitions */ +enum tpm_atmel_addr { + TPM_ATMEL_BASE_ADDR_LO = 0x08, + TPM_ATMEL_BASE_ADDR_HI = 0x09 +}; + +/* Verify this is a 1.1 Atmel TPM */ +static int atmel_verify_tpm11(void) +{ + + /* verify that it is an Atmel part */ + if (tpm_read_index(TPM_ADDR, 4) != 'A' || + tpm_read_index(TPM_ADDR, 5) != 'T' || + tpm_read_index(TPM_ADDR, 6) != 'M' || + tpm_read_index(TPM_ADDR, 7) != 'L') + return 1; + + /* query chip for its version number */ + if (tpm_read_index(TPM_ADDR, 0x00) != 1 || + tpm_read_index(TPM_ADDR, 0x01) != 1) + return 1; + + /* This is an atmel supported part */ + return 0; +} + +static inline void atmel_put_base_addr(void __iomem *iobase) +{ +} + +/* Determine where to talk to device */ +static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size) +{ + int lo, hi; + + if (atmel_verify_tpm11() != 0) + return NULL; + + lo = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_LO); + hi = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_HI); + + *base = (hi << 8) | lo; + *region_size = 2; + + return ioport_map(*base, *region_size); +} +#endif diff --git a/kernel/drivers/char/tpm/tpm_crb.c b/kernel/drivers/char/tpm/tpm_crb.c new file mode 100644 index 000000000..b26ceee35 --- /dev/null +++ b/kernel/drivers/char/tpm/tpm_crb.c @@ -0,0 +1,344 @@ +/* + * Copyright (C) 2014 Intel Corporation + * + * Authors: + * Jarkko Sakkinen + * + * Maintained by: + * + * This device driver implements the TPM interface as defined in + * the TCG CRB 2.0 TPM specification. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + +#include +#include +#include +#include +#include +#include "tpm.h" + +#define ACPI_SIG_TPM2 "TPM2" + +static const u8 CRB_ACPI_START_UUID[] = { + /* 0000 */ 0xAB, 0x6C, 0xBF, 0x6B, 0x63, 0x54, 0x14, 0x47, + /* 0008 */ 0xB7, 0xCD, 0xF0, 0x20, 0x3C, 0x03, 0x68, 0xD4 +}; + +enum crb_defaults { + CRB_ACPI_START_REVISION_ID = 1, + CRB_ACPI_START_INDEX = 1, +}; + +enum crb_start_method { + CRB_SM_ACPI_START = 2, + CRB_SM_CRB = 7, + CRB_SM_CRB_WITH_ACPI_START = 8, +}; + +struct acpi_tpm2 { + struct acpi_table_header hdr; + u16 platform_class; + u16 reserved; + u64 control_area_pa; + u32 start_method; +} __packed; + +enum crb_ca_request { + CRB_CA_REQ_GO_IDLE = BIT(0), + CRB_CA_REQ_CMD_READY = BIT(1), +}; + +enum crb_ca_status { + CRB_CA_STS_ERROR = BIT(0), + CRB_CA_STS_TPM_IDLE = BIT(1), +}; + +enum crb_start { + CRB_START_INVOKE = BIT(0), +}; + +enum crb_cancel { + CRB_CANCEL_INVOKE = BIT(0), +}; + +struct crb_control_area { + u32 req; + u32 sts; + u32 cancel; + u32 start; + u32 int_enable; + u32 int_sts; + u32 cmd_size; + u64 cmd_pa; + u32 rsp_size; + u64 rsp_pa; +} __packed; + +enum crb_status { + CRB_STS_COMPLETE = BIT(0), +}; + +enum crb_flags { + CRB_FL_ACPI_START = BIT(0), + CRB_FL_CRB_START = BIT(1), +}; + +struct crb_priv { + unsigned int flags; + struct crb_control_area __iomem *cca; + u8 __iomem *cmd; + u8 __iomem *rsp; +}; + +static SIMPLE_DEV_PM_OPS(crb_pm, tpm_pm_suspend, tpm_pm_resume); + +static u8 crb_status(struct tpm_chip *chip) +{ + struct crb_priv *priv = chip->vendor.priv; + u8 sts = 0; + + if ((le32_to_cpu(ioread32(&priv->cca->start)) & CRB_START_INVOKE) != + CRB_START_INVOKE) + sts |= CRB_STS_COMPLETE; + + return sts; +} + +static int crb_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + struct crb_priv *priv = chip->vendor.priv; + unsigned int expected; + + /* sanity check */ + if (count < 6) + return -EIO; + + if (le32_to_cpu(ioread32(&priv->cca->sts)) & CRB_CA_STS_ERROR) + return -EIO; + + memcpy_fromio(buf, priv->rsp, 6); + expected = be32_to_cpup((__be32 *) &buf[2]); + + if (expected > count) + return -EIO; + + memcpy_fromio(&buf[6], &priv->rsp[6], expected - 6); + + return expected; +} + +static int crb_do_acpi_start(struct tpm_chip *chip) +{ + union acpi_object *obj; + int rc; + + obj = acpi_evaluate_dsm(chip->acpi_dev_handle, + CRB_ACPI_START_UUID, + CRB_ACPI_START_REVISION_ID, + CRB_ACPI_START_INDEX, + NULL); + if (!obj) + return -ENXIO; + rc = obj->integer.value == 0 ? 0 : -ENXIO; + ACPI_FREE(obj); + return rc; +} + +static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len) +{ + struct crb_priv *priv = chip->vendor.priv; + int rc = 0; + + if (len > le32_to_cpu(ioread32(&priv->cca->cmd_size))) { + dev_err(&chip->dev, + "invalid command count value %x %zx\n", + (unsigned int) len, + (size_t) le32_to_cpu(ioread32(&priv->cca->cmd_size))); + return -E2BIG; + } + + memcpy_toio(priv->cmd, buf, len); + + /* Make sure that cmd is populated before issuing start. */ + wmb(); + + if (priv->flags & CRB_FL_CRB_START) + iowrite32(cpu_to_le32(CRB_START_INVOKE), &priv->cca->start); + + if (priv->flags & CRB_FL_ACPI_START) + rc = crb_do_acpi_start(chip); + + return rc; +} + +static void crb_cancel(struct tpm_chip *chip) +{ + struct crb_priv *priv = chip->vendor.priv; + + iowrite32(cpu_to_le32(CRB_CANCEL_INVOKE), &priv->cca->cancel); + + /* Make sure that cmd is populated before issuing cancel. */ + wmb(); + + if ((priv->flags & CRB_FL_ACPI_START) && crb_do_acpi_start(chip)) + dev_err(&chip->dev, "ACPI Start failed\n"); + + iowrite32(0, &priv->cca->cancel); +} + +static bool crb_req_canceled(struct tpm_chip *chip, u8 status) +{ + struct crb_priv *priv = chip->vendor.priv; + u32 cancel = le32_to_cpu(ioread32(&priv->cca->cancel)); + + return (cancel & CRB_CANCEL_INVOKE) == CRB_CANCEL_INVOKE; +} + +static const struct tpm_class_ops tpm_crb = { + .status = crb_status, + .recv = crb_recv, + .send = crb_send, + .cancel = crb_cancel, + .req_canceled = crb_req_canceled, + .req_complete_mask = CRB_STS_COMPLETE, + .req_complete_val = CRB_STS_COMPLETE, +}; + +static int crb_acpi_add(struct acpi_device *device) +{ + struct tpm_chip *chip; + struct acpi_tpm2 *buf; + struct crb_priv *priv; + struct device *dev = &device->dev; + acpi_status status; + u32 sm; + u64 pa; + int rc; + + chip = tpmm_chip_alloc(dev, &tpm_crb); + if (IS_ERR(chip)) + return PTR_ERR(chip); + + chip->flags = TPM_CHIP_FLAG_TPM2; + + status = acpi_get_table(ACPI_SIG_TPM2, 1, + (struct acpi_table_header **) &buf); + if (ACPI_FAILURE(status)) { + dev_err(dev, "failed to get TPM2 ACPI table\n"); + return -ENODEV; + } + + if (buf->hdr.length < sizeof(struct acpi_tpm2)) { + dev_err(dev, "TPM2 ACPI table has wrong size"); + return -EINVAL; + } + + priv = (struct crb_priv *) devm_kzalloc(dev, sizeof(struct crb_priv), + GFP_KERNEL); + if (!priv) { + dev_err(dev, "failed to devm_kzalloc for private data\n"); + return -ENOMEM; + } + + sm = le32_to_cpu(buf->start_method); + + /* The reason for the extra quirk is that the PTT in 4th Gen Core CPUs + * report only ACPI start but in practice seems to require both + * ACPI start and CRB start. + */ + if (sm == CRB_SM_CRB || sm == CRB_SM_CRB_WITH_ACPI_START || + !strcmp(acpi_device_hid(device), "MSFT0101")) + priv->flags |= CRB_FL_CRB_START; + + if (sm == CRB_SM_ACPI_START || sm == CRB_SM_CRB_WITH_ACPI_START) + priv->flags |= CRB_FL_ACPI_START; + + priv->cca = (struct crb_control_area __iomem *) + devm_ioremap_nocache(dev, buf->control_area_pa, 0x1000); + if (!priv->cca) { + dev_err(dev, "ioremap of the control area failed\n"); + return -ENOMEM; + } + + memcpy_fromio(&pa, &priv->cca->cmd_pa, 8); + pa = le64_to_cpu(pa); + priv->cmd = devm_ioremap_nocache(dev, le64_to_cpu(pa), + ioread32(&priv->cca->cmd_size)); + if (!priv->cmd) { + dev_err(dev, "ioremap of the command buffer failed\n"); + return -ENOMEM; + } + + memcpy_fromio(&pa, &priv->cca->rsp_pa, 8); + pa = le64_to_cpu(pa); + priv->rsp = devm_ioremap_nocache(dev, le64_to_cpu(pa), + ioread32(&priv->cca->rsp_size)); + if (!priv->rsp) { + dev_err(dev, "ioremap of the response buffer failed\n"); + return -ENOMEM; + } + + chip->vendor.priv = priv; + + /* Default timeouts and durations */ + chip->vendor.timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A); + chip->vendor.timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B); + chip->vendor.timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C); + chip->vendor.timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D); + chip->vendor.duration[TPM_SHORT] = + msecs_to_jiffies(TPM2_DURATION_SHORT); + chip->vendor.duration[TPM_MEDIUM] = + msecs_to_jiffies(TPM2_DURATION_MEDIUM); + chip->vendor.duration[TPM_LONG] = + msecs_to_jiffies(TPM2_DURATION_LONG); + + chip->acpi_dev_handle = device->handle; + + rc = tpm2_do_selftest(chip); + if (rc) + return rc; + + return tpm_chip_register(chip); +} + +static int crb_acpi_remove(struct acpi_device *device) +{ + struct device *dev = &device->dev; + struct tpm_chip *chip = dev_get_drvdata(dev); + + tpm_chip_unregister(chip); + + if (chip->flags & TPM_CHIP_FLAG_TPM2) + tpm2_shutdown(chip, TPM2_SU_CLEAR); + + return 0; +} + +static struct acpi_device_id crb_device_ids[] = { + {"MSFT0101", 0}, + {"", 0}, +}; +MODULE_DEVICE_TABLE(acpi, crb_device_ids); + +static struct acpi_driver crb_acpi_driver = { + .name = "tpm_crb", + .ids = crb_device_ids, + .ops = { + .add = crb_acpi_add, + .remove = crb_acpi_remove, + }, + .drv = { + .pm = &crb_pm, + }, +}; + +module_acpi_driver(crb_acpi_driver); +MODULE_AUTHOR("Jarkko Sakkinen "); +MODULE_DESCRIPTION("TPM2 Driver"); +MODULE_VERSION("0.1"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/tpm/tpm_eventlog.c b/kernel/drivers/char/tpm/tpm_eventlog.c new file mode 100644 index 000000000..3a56a1315 --- /dev/null +++ b/kernel/drivers/char/tpm/tpm_eventlog.c @@ -0,0 +1,414 @@ +/* + * Copyright (C) 2005, 2012 IBM Corporation + * + * Authors: + * Kent Yoder + * Seiji Munetoh + * Stefan Berger + * Reiner Sailer + * Kylene Hall + * + * Maintained by: + * + * Access to the eventlog created by a system's firmware / BIOS + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include +#include +#include +#include +#include + +#include "tpm.h" +#include "tpm_eventlog.h" + + +static const char* tcpa_event_type_strings[] = { + "PREBOOT", + "POST CODE", + "", + "NO ACTION", + "SEPARATOR", + "ACTION", + "EVENT TAG", + "S-CRTM Contents", + "S-CRTM Version", + "CPU Microcode", + "Platform Config Flags", + "Table of Devices", + "Compact Hash", + "IPL", + "IPL Partition Data", + "Non-Host Code", + "Non-Host Config", + "Non-Host Info" +}; + +static const char* tcpa_pc_event_id_strings[] = { + "", + "SMBIOS", + "BIS Certificate", + "POST BIOS ", + "ESCD ", + "CMOS", + "NVRAM", + "Option ROM", + "Option ROM config", + "", + "Option ROM microcode ", + "S-CRTM Version", + "S-CRTM Contents ", + "POST Contents ", + "Table of Devices", +}; + +/* returns pointer to start of pos. entry of tcg log */ +static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos) +{ + loff_t i; + struct tpm_bios_log *log = m->private; + void *addr = log->bios_event_log; + void *limit = log->bios_event_log_end; + struct tcpa_event *event; + + /* read over *pos measurements */ + for (i = 0; i < *pos; i++) { + event = addr; + + if ((addr + sizeof(struct tcpa_event)) < limit) { + if (event->event_type == 0 && event->event_size == 0) + return NULL; + addr += sizeof(struct tcpa_event) + event->event_size; + } + } + + /* now check if current entry is valid */ + if ((addr + sizeof(struct tcpa_event)) >= limit) + return NULL; + + event = addr; + + if ((event->event_type == 0 && event->event_size == 0) || + ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit)) + return NULL; + + return addr; +} + +static void *tpm_bios_measurements_next(struct seq_file *m, void *v, + loff_t *pos) +{ + struct tcpa_event *event = v; + struct tpm_bios_log *log = m->private; + void *limit = log->bios_event_log_end; + + v += sizeof(struct tcpa_event) + event->event_size; + + /* now check if current entry is valid */ + if ((v + sizeof(struct tcpa_event)) >= limit) + return NULL; + + event = v; + + if (event->event_type == 0 && event->event_size == 0) + return NULL; + + if ((event->event_type == 0 && event->event_size == 0) || + ((v + sizeof(struct tcpa_event) + event->event_size) >= limit)) + return NULL; + + (*pos)++; + return v; +} + +static void tpm_bios_measurements_stop(struct seq_file *m, void *v) +{ +} + +static int get_event_name(char *dest, struct tcpa_event *event, + unsigned char * event_entry) +{ + const char *name = ""; + /* 41 so there is room for 40 data and 1 nul */ + char data[41] = ""; + int i, n_len = 0, d_len = 0; + struct tcpa_pc_event *pc_event; + + switch(event->event_type) { + case PREBOOT: + case POST_CODE: + case UNUSED: + case NO_ACTION: + case SCRTM_CONTENTS: + case SCRTM_VERSION: + case CPU_MICROCODE: + case PLATFORM_CONFIG_FLAGS: + case TABLE_OF_DEVICES: + case COMPACT_HASH: + case IPL: + case IPL_PARTITION_DATA: + case NONHOST_CODE: + case NONHOST_CONFIG: + case NONHOST_INFO: + name = tcpa_event_type_strings[event->event_type]; + n_len = strlen(name); + break; + case SEPARATOR: + case ACTION: + if (MAX_TEXT_EVENT > event->event_size) { + name = event_entry; + n_len = event->event_size; + } + break; + case EVENT_TAG: + pc_event = (struct tcpa_pc_event *)event_entry; + + /* ToDo Row data -> Base64 */ + + switch (pc_event->event_id) { + case SMBIOS: + case BIS_CERT: + case CMOS: + case NVRAM: + case OPTION_ROM_EXEC: + case OPTION_ROM_CONFIG: + case S_CRTM_VERSION: + name = tcpa_pc_event_id_strings[pc_event->event_id]; + n_len = strlen(name); + break; + /* hash data */ + case POST_BIOS_ROM: + case ESCD: + case OPTION_ROM_MICROCODE: + case S_CRTM_CONTENTS: + case POST_CONTENTS: + name = tcpa_pc_event_id_strings[pc_event->event_id]; + n_len = strlen(name); + for (i = 0; i < 20; i++) + d_len += sprintf(&data[2*i], "%02x", + pc_event->event_data[i]); + break; + default: + break; + } + default: + break; + } + + return snprintf(dest, MAX_TEXT_EVENT, "[%.*s%.*s]", + n_len, name, d_len, data); + +} + +static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v) +{ + struct tcpa_event *event = v; + char *data = v; + int i; + + for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++) + seq_putc(m, data[i]); + + return 0; +} + +static int tpm_bios_measurements_release(struct inode *inode, + struct file *file) +{ + struct seq_file *seq = file->private_data; + struct tpm_bios_log *log = seq->private; + + if (log) { + kfree(log->bios_event_log); + kfree(log); + } + + return seq_release(inode, file); +} + +static int tpm_ascii_bios_measurements_show(struct seq_file *m, void *v) +{ + int len = 0; + char *eventname; + struct tcpa_event *event = v; + unsigned char *event_entry = + (unsigned char *) (v + sizeof(struct tcpa_event)); + + eventname = kmalloc(MAX_TEXT_EVENT, GFP_KERNEL); + if (!eventname) { + printk(KERN_ERR "%s: ERROR - No Memory for event name\n ", + __func__); + return -EFAULT; + } + + seq_printf(m, "%2d ", event->pcr_index); + + /* 2nd: SHA1 */ + seq_printf(m, "%20phN", event->pcr_value); + + /* 3rd: event type identifier */ + seq_printf(m, " %02x", event->event_type); + + len += get_event_name(eventname, event, event_entry); + + /* 4th: eventname <= max + \'0' delimiter */ + seq_printf(m, " %s\n", eventname); + + kfree(eventname); + return 0; +} + +static const struct seq_operations tpm_ascii_b_measurments_seqops = { + .start = tpm_bios_measurements_start, + .next = tpm_bios_measurements_next, + .stop = tpm_bios_measurements_stop, + .show = tpm_ascii_bios_measurements_show, +}; + +static const struct seq_operations tpm_binary_b_measurments_seqops = { + .start = tpm_bios_measurements_start, + .next = tpm_bios_measurements_next, + .stop = tpm_bios_measurements_stop, + .show = tpm_binary_bios_measurements_show, +}; + +static int tpm_ascii_bios_measurements_open(struct inode *inode, + struct file *file) +{ + int err; + struct tpm_bios_log *log; + struct seq_file *seq; + + log = kzalloc(sizeof(struct tpm_bios_log), GFP_KERNEL); + if (!log) + return -ENOMEM; + + if ((err = read_log(log))) + goto out_free; + + /* now register seq file */ + err = seq_open(file, &tpm_ascii_b_measurments_seqops); + if (!err) { + seq = file->private_data; + seq->private = log; + } else { + goto out_free; + } + +out: + return err; +out_free: + kfree(log->bios_event_log); + kfree(log); + goto out; +} + +static const struct file_operations tpm_ascii_bios_measurements_ops = { + .open = tpm_ascii_bios_measurements_open, + .read = seq_read, + .llseek = seq_lseek, + .release = tpm_bios_measurements_release, +}; + +static int tpm_binary_bios_measurements_open(struct inode *inode, + struct file *file) +{ + int err; + struct tpm_bios_log *log; + struct seq_file *seq; + + log = kzalloc(sizeof(struct tpm_bios_log), GFP_KERNEL); + if (!log) + return -ENOMEM; + + if ((err = read_log(log))) + goto out_free; + + /* now register seq file */ + err = seq_open(file, &tpm_binary_b_measurments_seqops); + if (!err) { + seq = file->private_data; + seq->private = log; + } else { + goto out_free; + } + +out: + return err; +out_free: + kfree(log->bios_event_log); + kfree(log); + goto out; +} + +static const struct file_operations tpm_binary_bios_measurements_ops = { + .open = tpm_binary_bios_measurements_open, + .read = seq_read, + .llseek = seq_lseek, + .release = tpm_bios_measurements_release, +}; + +static int is_bad(void *p) +{ + if (!p) + return 1; + if (IS_ERR(p) && (PTR_ERR(p) != -ENODEV)) + return 1; + return 0; +} + +struct dentry **tpm_bios_log_setup(char *name) +{ + struct dentry **ret = NULL, *tpm_dir, *bin_file, *ascii_file; + + tpm_dir = securityfs_create_dir(name, NULL); + if (is_bad(tpm_dir)) + goto out; + + bin_file = + securityfs_create_file("binary_bios_measurements", + S_IRUSR | S_IRGRP, tpm_dir, NULL, + &tpm_binary_bios_measurements_ops); + if (is_bad(bin_file)) + goto out_tpm; + + ascii_file = + securityfs_create_file("ascii_bios_measurements", + S_IRUSR | S_IRGRP, tpm_dir, NULL, + &tpm_ascii_bios_measurements_ops); + if (is_bad(ascii_file)) + goto out_bin; + + ret = kmalloc(3 * sizeof(struct dentry *), GFP_KERNEL); + if (!ret) + goto out_ascii; + + ret[0] = ascii_file; + ret[1] = bin_file; + ret[2] = tpm_dir; + + return ret; + +out_ascii: + securityfs_remove(ascii_file); +out_bin: + securityfs_remove(bin_file); +out_tpm: + securityfs_remove(tpm_dir); +out: + return NULL; +} + +void tpm_bios_log_teardown(struct dentry **lst) +{ + int i; + + for (i = 0; i < 3; i++) + securityfs_remove(lst[i]); +} diff --git a/kernel/drivers/char/tpm/tpm_eventlog.h b/kernel/drivers/char/tpm/tpm_eventlog.h new file mode 100644 index 000000000..e7da086d6 --- /dev/null +++ b/kernel/drivers/char/tpm/tpm_eventlog.h @@ -0,0 +1,86 @@ + +#ifndef __TPM_EVENTLOG_H__ +#define __TPM_EVENTLOG_H__ + +#define TCG_EVENT_NAME_LEN_MAX 255 +#define MAX_TEXT_EVENT 1000 /* Max event string length */ +#define ACPI_TCPA_SIG "TCPA" /* 0x41504354 /'TCPA' */ + +enum bios_platform_class { + BIOS_CLIENT = 0x00, + BIOS_SERVER = 0x01, +}; + +struct tpm_bios_log { + void *bios_event_log; + void *bios_event_log_end; +}; + +struct tcpa_event { + u32 pcr_index; + u32 event_type; + u8 pcr_value[20]; /* SHA1 */ + u32 event_size; + u8 event_data[0]; +}; + +enum tcpa_event_types { + PREBOOT = 0, + POST_CODE, + UNUSED, + NO_ACTION, + SEPARATOR, + ACTION, + EVENT_TAG, + SCRTM_CONTENTS, + SCRTM_VERSION, + CPU_MICROCODE, + PLATFORM_CONFIG_FLAGS, + TABLE_OF_DEVICES, + COMPACT_HASH, + IPL, + IPL_PARTITION_DATA, + NONHOST_CODE, + NONHOST_CONFIG, + NONHOST_INFO, +}; + +struct tcpa_pc_event { + u32 event_id; + u32 event_size; + u8 event_data[0]; +}; + +enum tcpa_pc_event_ids { + SMBIOS = 1, + BIS_CERT, + POST_BIOS_ROM, + ESCD, + CMOS, + NVRAM, + OPTION_ROM_EXEC, + OPTION_ROM_CONFIG, + OPTION_ROM_MICROCODE = 10, + S_CRTM_VERSION, + S_CRTM_CONTENTS, + POST_CONTENTS, + HOST_TABLE_OF_DEVICES, +}; + +int read_log(struct tpm_bios_log *log); + +#if defined(CONFIG_TCG_IBMVTPM) || defined(CONFIG_TCG_IBMVTPM_MODULE) || \ + defined(CONFIG_ACPI) +extern struct dentry **tpm_bios_log_setup(char *); +extern void tpm_bios_log_teardown(struct dentry **); +#else +static inline struct dentry **tpm_bios_log_setup(char *name) +{ + return NULL; +} +static inline void tpm_bios_log_teardown(struct dentry **dir) +{ +} +#endif + +#endif diff --git a/kernel/drivers/char/tpm/tpm_i2c_atmel.c b/kernel/drivers/char/tpm/tpm_i2c_atmel.c new file mode 100644 index 000000000..7a0ca78ad --- /dev/null +++ b/kernel/drivers/char/tpm/tpm_i2c_atmel.c @@ -0,0 +1,230 @@ +/* + * ATMEL I2C TPM AT97SC3204T + * + * Copyright (C) 2012 V Lab Technologies + * Teddy Reed + * Copyright (C) 2013, Obsidian Research Corp. + * Jason Gunthorpe + * Device driver for ATMEL I2C TPMs. + * + * Teddy Reed determined the basic I2C command flow, unlike other I2C TPM + * devices the raw TCG formatted TPM command data is written via I2C and then + * raw TCG formatted TPM command data is returned via I2C. + * + * TGC status/locality/etc functions seen in the LPC implementation do not + * seem to be present. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/>. + */ +#include +#include +#include +#include +#include +#include "tpm.h" + +#define I2C_DRIVER_NAME "tpm_i2c_atmel" + +#define TPM_I2C_SHORT_TIMEOUT 750 /* ms */ +#define TPM_I2C_LONG_TIMEOUT 2000 /* 2 sec */ + +#define ATMEL_STS_OK 1 + +struct priv_data { + size_t len; + /* This is the amount we read on the first try. 25 was chosen to fit a + * fair number of read responses in the buffer so a 2nd retry can be + * avoided in small message cases. */ + u8 buffer[sizeof(struct tpm_output_header) + 25]; +}; + +static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len) +{ + struct priv_data *priv = chip->vendor.priv; + struct i2c_client *client = to_i2c_client(chip->pdev); + s32 status; + + priv->len = 0; + + if (len <= 2) + return -EIO; + + status = i2c_master_send(client, buf, len); + + dev_dbg(chip->pdev, + "%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__, + (int)min_t(size_t, 64, len), buf, len, status); + return status; +} + +static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + struct priv_data *priv = chip->vendor.priv; + struct i2c_client *client = to_i2c_client(chip->pdev); + struct tpm_output_header *hdr = + (struct tpm_output_header *)priv->buffer; + u32 expected_len; + int rc; + + if (priv->len == 0) + return -EIO; + + /* Get the message size from the message header, if we didn't get the + * whole message in read_status then we need to re-read the + * message. */ + expected_len = be32_to_cpu(hdr->length); + if (expected_len > count) + return -ENOMEM; + + if (priv->len >= expected_len) { + dev_dbg(chip->pdev, + "%s early(buf=%*ph count=%0zx) -> ret=%d\n", __func__, + (int)min_t(size_t, 64, expected_len), buf, count, + expected_len); + memcpy(buf, priv->buffer, expected_len); + return expected_len; + } + + rc = i2c_master_recv(client, buf, expected_len); + dev_dbg(chip->pdev, + "%s reread(buf=%*ph count=%0zx) -> ret=%d\n", __func__, + (int)min_t(size_t, 64, expected_len), buf, count, + expected_len); + return rc; +} + +static void i2c_atmel_cancel(struct tpm_chip *chip) +{ + dev_err(chip->pdev, "TPM operation cancellation was requested, but is not supported"); +} + +static u8 i2c_atmel_read_status(struct tpm_chip *chip) +{ + struct priv_data *priv = chip->vendor.priv; + struct i2c_client *client = to_i2c_client(chip->pdev); + int rc; + + /* The TPM fails the I2C read until it is ready, so we do the entire + * transfer here and buffer it locally. This way the common code can + * properly handle the timeouts. */ + priv->len = 0; + memset(priv->buffer, 0, sizeof(priv->buffer)); + + + /* Once the TPM has completed the command the command remains readable + * until another command is issued. */ + rc = i2c_master_recv(client, priv->buffer, sizeof(priv->buffer)); + dev_dbg(chip->pdev, + "%s: sts=%d", __func__, rc); + if (rc <= 0) + return 0; + + priv->len = rc; + + return ATMEL_STS_OK; +} + +static bool i2c_atmel_req_canceled(struct tpm_chip *chip, u8 status) +{ + return false; +} + +static const struct tpm_class_ops i2c_atmel = { + .status = i2c_atmel_read_status, + .recv = i2c_atmel_recv, + .send = i2c_atmel_send, + .cancel = i2c_atmel_cancel, + .req_complete_mask = ATMEL_STS_OK, + .req_complete_val = ATMEL_STS_OK, + .req_canceled = i2c_atmel_req_canceled, +}; + +static int i2c_atmel_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct tpm_chip *chip; + struct device *dev = &client->dev; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) + return -ENODEV; + + chip = tpmm_chip_alloc(dev, &i2c_atmel); + if (IS_ERR(chip)) + return PTR_ERR(chip); + + chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data), + GFP_KERNEL); + if (!chip->vendor.priv) + return -ENOMEM; + + /* Default timeouts */ + chip->vendor.timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + chip->vendor.timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT); + chip->vendor.timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + chip->vendor.timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + chip->vendor.irq = 0; + + /* There is no known way to probe for this device, and all version + * information seems to be read via TPM commands. Thus we rely on the + * TPM startup process in the common code to detect the device. */ + if (tpm_get_timeouts(chip)) + return -ENODEV; + + if (tpm_do_selftest(chip)) + return -ENODEV; + + return tpm_chip_register(chip); +} + +static int i2c_atmel_remove(struct i2c_client *client) +{ + struct device *dev = &(client->dev); + struct tpm_chip *chip = dev_get_drvdata(dev); + tpm_chip_unregister(chip); + return 0; +} + +static const struct i2c_device_id i2c_atmel_id[] = { + {I2C_DRIVER_NAME, 0}, + {} +}; +MODULE_DEVICE_TABLE(i2c, i2c_atmel_id); + +#ifdef CONFIG_OF +static const struct of_device_id i2c_atmel_of_match[] = { + {.compatible = "atmel,at97sc3204t"}, + {}, +}; +MODULE_DEVICE_TABLE(of, i2c_atmel_of_match); +#endif + +static SIMPLE_DEV_PM_OPS(i2c_atmel_pm_ops, tpm_pm_suspend, tpm_pm_resume); + +static struct i2c_driver i2c_atmel_driver = { + .id_table = i2c_atmel_id, + .probe = i2c_atmel_probe, + .remove = i2c_atmel_remove, + .driver = { + .name = I2C_DRIVER_NAME, + .owner = THIS_MODULE, + .pm = &i2c_atmel_pm_ops, + .of_match_table = of_match_ptr(i2c_atmel_of_match), + }, +}; + +module_i2c_driver(i2c_atmel_driver); + +MODULE_AUTHOR("Jason Gunthorpe "); +MODULE_DESCRIPTION("Atmel TPM I2C Driver"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/tpm/tpm_i2c_infineon.c b/kernel/drivers/char/tpm/tpm_i2c_infineon.c new file mode 100644 index 000000000..33c5f360a --- /dev/null +++ b/kernel/drivers/char/tpm/tpm_i2c_infineon.c @@ -0,0 +1,724 @@ +/* + * Copyright (C) 2012,2013 Infineon Technologies + * + * Authors: + * Peter Huewe + * + * Device driver for TCG/TCPA TPM (trusted platform module). + * Specifications at www.trustedcomputinggroup.org + * + * This device driver implements the TPM interface as defined in + * the TCG TPM Interface Spec version 1.2, revision 1.0 and the + * Infineon I2C Protocol Stack Specification v0.20. + * + * It is based on the original tpm_tis device driver from Leendert van + * Dorn and Kyleen Hall. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + * + */ +#include +#include +#include +#include "tpm.h" + +/* max. buffer size supported by our TPM */ +#define TPM_BUFSIZE 1260 + +/* max. number of iterations after I2C NAK */ +#define MAX_COUNT 3 + +#define SLEEP_DURATION_LOW 55 +#define SLEEP_DURATION_HI 65 + +/* max. number of iterations after I2C NAK for 'long' commands + * we need this especially for sending TPM_READY, since the cleanup after the + * transtion to the ready state may take some time, but it is unpredictable + * how long it will take. + */ +#define MAX_COUNT_LONG 50 + +#define SLEEP_DURATION_LONG_LOW 200 +#define SLEEP_DURATION_LONG_HI 220 + +/* After sending TPM_READY to 'reset' the TPM we have to sleep even longer */ +#define SLEEP_DURATION_RESET_LOW 2400 +#define SLEEP_DURATION_RESET_HI 2600 + +/* we want to use usleep_range instead of msleep for the 5ms TPM_TIMEOUT */ +#define TPM_TIMEOUT_US_LOW (TPM_TIMEOUT * 1000) +#define TPM_TIMEOUT_US_HI (TPM_TIMEOUT_US_LOW + 2000) + +/* expected value for DIDVID register */ +#define TPM_TIS_I2C_DID_VID_9635 0xd1150b00L +#define TPM_TIS_I2C_DID_VID_9645 0x001a15d1L + +enum i2c_chip_type { + SLB9635, + SLB9645, + UNKNOWN, +}; + +/* Structure to store I2C TPM specific stuff */ +struct tpm_inf_dev { + struct i2c_client *client; + u8 buf[TPM_BUFSIZE + sizeof(u8)]; /* max. buffer size + addr */ + struct tpm_chip *chip; + enum i2c_chip_type chip_type; +}; + +static struct tpm_inf_dev tpm_dev; + +/* + * iic_tpm_read() - read from TPM register + * @addr: register address to read from + * @buffer: provided by caller + * @len: number of bytes to read + * + * Read len bytes from TPM register and put them into + * buffer (little-endian format, i.e. first byte is put into buffer[0]). + * + * NOTE: TPM is big-endian for multi-byte values. Multi-byte + * values have to be swapped. + * + * NOTE: We can't unfortunately use the combined read/write functions + * provided by the i2c core as the TPM currently does not support the + * repeated start condition and due to it's special requirements. + * The i2c_smbus* functions do not work for this chip. + * + * Return -EIO on error, 0 on success. + */ +static int iic_tpm_read(u8 addr, u8 *buffer, size_t len) +{ + + struct i2c_msg msg1 = { + .addr = tpm_dev.client->addr, + .len = 1, + .buf = &addr + }; + struct i2c_msg msg2 = { + .addr = tpm_dev.client->addr, + .flags = I2C_M_RD, + .len = len, + .buf = buffer + }; + struct i2c_msg msgs[] = {msg1, msg2}; + + int rc = 0; + int count; + + /* Lock the adapter for the duration of the whole sequence. */ + if (!tpm_dev.client->adapter->algo->master_xfer) + return -EOPNOTSUPP; + i2c_lock_adapter(tpm_dev.client->adapter); + + if (tpm_dev.chip_type == SLB9645) { + /* use a combined read for newer chips + * unfortunately the smbus functions are not suitable due to + * the 32 byte limit of the smbus. + * retries should usually not be needed, but are kept just to + * be on the safe side. + */ + for (count = 0; count < MAX_COUNT; count++) { + rc = __i2c_transfer(tpm_dev.client->adapter, msgs, 2); + if (rc > 0) + break; /* break here to skip sleep */ + usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI); + } + } else { + /* slb9635 protocol should work in all cases */ + for (count = 0; count < MAX_COUNT; count++) { + rc = __i2c_transfer(tpm_dev.client->adapter, &msg1, 1); + if (rc > 0) + break; /* break here to skip sleep */ + + usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI); + } + + if (rc <= 0) + goto out; + + /* After the TPM has successfully received the register address + * it needs some time, thus we're sleeping here again, before + * retrieving the data + */ + for (count = 0; count < MAX_COUNT; count++) { + usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI); + rc = __i2c_transfer(tpm_dev.client->adapter, &msg2, 1); + if (rc > 0) + break; + } + } + +out: + i2c_unlock_adapter(tpm_dev.client->adapter); + /* take care of 'guard time' */ + usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI); + + /* __i2c_transfer returns the number of successfully transferred + * messages. + * So rc should be greater than 0 here otherwise we have an error. + */ + if (rc <= 0) + return -EIO; + + return 0; +} + +static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len, + unsigned int sleep_low, + unsigned int sleep_hi, u8 max_count) +{ + int rc = -EIO; + int count; + + struct i2c_msg msg1 = { + .addr = tpm_dev.client->addr, + .len = len + 1, + .buf = tpm_dev.buf + }; + + if (len > TPM_BUFSIZE) + return -EINVAL; + + if (!tpm_dev.client->adapter->algo->master_xfer) + return -EOPNOTSUPP; + i2c_lock_adapter(tpm_dev.client->adapter); + + /* prepend the 'register address' to the buffer */ + tpm_dev.buf[0] = addr; + memcpy(&(tpm_dev.buf[1]), buffer, len); + + /* + * NOTE: We have to use these special mechanisms here and unfortunately + * cannot rely on the standard behavior of i2c_transfer. + * Even for newer chips the smbus functions are not + * suitable due to the 32 byte limit of the smbus. + */ + for (count = 0; count < max_count; count++) { + rc = __i2c_transfer(tpm_dev.client->adapter, &msg1, 1); + if (rc > 0) + break; + usleep_range(sleep_low, sleep_hi); + } + + i2c_unlock_adapter(tpm_dev.client->adapter); + /* take care of 'guard time' */ + usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI); + + /* __i2c_transfer returns the number of successfully transferred + * messages. + * So rc should be greater than 0 here otherwise we have an error. + */ + if (rc <= 0) + return -EIO; + + return 0; +} + +/* + * iic_tpm_write() - write to TPM register + * @addr: register address to write to + * @buffer: containing data to be written + * @len: number of bytes to write + * + * Write len bytes from provided buffer to TPM register (little + * endian format, i.e. buffer[0] is written as first byte). + * + * NOTE: TPM is big-endian for multi-byte values. Multi-byte + * values have to be swapped. + * + * NOTE: use this function instead of the iic_tpm_write_generic function. + * + * Return -EIO on error, 0 on success + */ +static int iic_tpm_write(u8 addr, u8 *buffer, size_t len) +{ + return iic_tpm_write_generic(addr, buffer, len, SLEEP_DURATION_LOW, + SLEEP_DURATION_HI, MAX_COUNT); +} + +/* + * This function is needed especially for the cleanup situation after + * sending TPM_READY + * */ +static int iic_tpm_write_long(u8 addr, u8 *buffer, size_t len) +{ + return iic_tpm_write_generic(addr, buffer, len, SLEEP_DURATION_LONG_LOW, + SLEEP_DURATION_LONG_HI, MAX_COUNT_LONG); +} + +enum tis_access { + TPM_ACCESS_VALID = 0x80, + TPM_ACCESS_ACTIVE_LOCALITY = 0x20, + TPM_ACCESS_REQUEST_PENDING = 0x04, + TPM_ACCESS_REQUEST_USE = 0x02, +}; + +enum tis_status { + TPM_STS_VALID = 0x80, + TPM_STS_COMMAND_READY = 0x40, + TPM_STS_GO = 0x20, + TPM_STS_DATA_AVAIL = 0x10, + TPM_STS_DATA_EXPECT = 0x08, +}; + +enum tis_defaults { + TIS_SHORT_TIMEOUT = 750, /* ms */ + TIS_LONG_TIMEOUT = 2000, /* 2 sec */ +}; + +#define TPM_ACCESS(l) (0x0000 | ((l) << 4)) +#define TPM_STS(l) (0x0001 | ((l) << 4)) +#define TPM_DATA_FIFO(l) (0x0005 | ((l) << 4)) +#define TPM_DID_VID(l) (0x0006 | ((l) << 4)) + +static int check_locality(struct tpm_chip *chip, int loc) +{ + u8 buf; + int rc; + + rc = iic_tpm_read(TPM_ACCESS(loc), &buf, 1); + if (rc < 0) + return rc; + + if ((buf & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) == + (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) { + chip->vendor.locality = loc; + return loc; + } + + return -EIO; +} + +/* implementation similar to tpm_tis */ +static void release_locality(struct tpm_chip *chip, int loc, int force) +{ + u8 buf; + if (iic_tpm_read(TPM_ACCESS(loc), &buf, 1) < 0) + return; + + if (force || (buf & (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) == + (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) { + buf = TPM_ACCESS_ACTIVE_LOCALITY; + iic_tpm_write(TPM_ACCESS(loc), &buf, 1); + } +} + +static int request_locality(struct tpm_chip *chip, int loc) +{ + unsigned long stop; + u8 buf = TPM_ACCESS_REQUEST_USE; + + if (check_locality(chip, loc) >= 0) + return loc; + + iic_tpm_write(TPM_ACCESS(loc), &buf, 1); + + /* wait for burstcount */ + stop = jiffies + chip->vendor.timeout_a; + do { + if (check_locality(chip, loc) >= 0) + return loc; + usleep_range(TPM_TIMEOUT_US_LOW, TPM_TIMEOUT_US_HI); + } while (time_before(jiffies, stop)); + + return -ETIME; +} + +static u8 tpm_tis_i2c_status(struct tpm_chip *chip) +{ + /* NOTE: since I2C read may fail, return 0 in this case --> time-out */ + u8 buf = 0xFF; + u8 i = 0; + + do { + if (iic_tpm_read(TPM_STS(chip->vendor.locality), &buf, 1) < 0) + return 0; + + i++; + /* if locallity is set STS should not be 0xFF */ + } while ((buf == 0xFF) && i < 10); + + return buf; +} + +static void tpm_tis_i2c_ready(struct tpm_chip *chip) +{ + /* this causes the current command to be aborted */ + u8 buf = TPM_STS_COMMAND_READY; + iic_tpm_write_long(TPM_STS(chip->vendor.locality), &buf, 1); +} + +static ssize_t get_burstcount(struct tpm_chip *chip) +{ + unsigned long stop; + ssize_t burstcnt; + u8 buf[3]; + + /* wait for burstcount */ + /* which timeout value, spec has 2 answers (c & d) */ + stop = jiffies + chip->vendor.timeout_d; + do { + /* Note: STS is little endian */ + if (iic_tpm_read(TPM_STS(chip->vendor.locality)+1, buf, 3) < 0) + burstcnt = 0; + else + burstcnt = (buf[2] << 16) + (buf[1] << 8) + buf[0]; + + if (burstcnt) + return burstcnt; + + usleep_range(TPM_TIMEOUT_US_LOW, TPM_TIMEOUT_US_HI); + } while (time_before(jiffies, stop)); + return -EBUSY; +} + +static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, + int *status) +{ + unsigned long stop; + + /* check current status */ + *status = tpm_tis_i2c_status(chip); + if ((*status != 0xFF) && (*status & mask) == mask) + return 0; + + stop = jiffies + timeout; + do { + /* since we just checked the status, give the TPM some time */ + usleep_range(TPM_TIMEOUT_US_LOW, TPM_TIMEOUT_US_HI); + *status = tpm_tis_i2c_status(chip); + if ((*status & mask) == mask) + return 0; + + } while (time_before(jiffies, stop)); + + return -ETIME; +} + +static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) +{ + size_t size = 0; + ssize_t burstcnt; + u8 retries = 0; + int rc; + + while (size < count) { + burstcnt = get_burstcount(chip); + + /* burstcnt < 0 = TPM is busy */ + if (burstcnt < 0) + return burstcnt; + + /* limit received data to max. left */ + if (burstcnt > (count - size)) + burstcnt = count - size; + + rc = iic_tpm_read(TPM_DATA_FIFO(chip->vendor.locality), + &(buf[size]), burstcnt); + if (rc == 0) + size += burstcnt; + else if (rc < 0) + retries++; + + /* avoid endless loop in case of broken HW */ + if (retries > MAX_COUNT_LONG) + return -EIO; + } + return size; +} + +static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + int size = 0; + int expected, status; + + if (count < TPM_HEADER_SIZE) { + size = -EIO; + goto out; + } + + /* read first 10 bytes, including tag, paramsize, and result */ + size = recv_data(chip, buf, TPM_HEADER_SIZE); + if (size < TPM_HEADER_SIZE) { + dev_err(chip->pdev, "Unable to read header\n"); + goto out; + } + + expected = be32_to_cpu(*(__be32 *)(buf + 2)); + if ((size_t) expected > count) { + size = -EIO; + goto out; + } + + size += recv_data(chip, &buf[TPM_HEADER_SIZE], + expected - TPM_HEADER_SIZE); + if (size < expected) { + dev_err(chip->pdev, "Unable to read remainder of result\n"); + size = -ETIME; + goto out; + } + + wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, &status); + if (status & TPM_STS_DATA_AVAIL) { /* retry? */ + dev_err(chip->pdev, "Error left over data\n"); + size = -EIO; + goto out; + } + +out: + tpm_tis_i2c_ready(chip); + /* The TPM needs some time to clean up here, + * so we sleep rather than keeping the bus busy + */ + usleep_range(SLEEP_DURATION_RESET_LOW, SLEEP_DURATION_RESET_HI); + release_locality(chip, chip->vendor.locality, 0); + return size; +} + +static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len) +{ + int rc, status; + ssize_t burstcnt; + size_t count = 0; + u8 retries = 0; + u8 sts = TPM_STS_GO; + + if (len > TPM_BUFSIZE) + return -E2BIG; /* command is too long for our tpm, sorry */ + + if (request_locality(chip, 0) < 0) + return -EBUSY; + + status = tpm_tis_i2c_status(chip); + if ((status & TPM_STS_COMMAND_READY) == 0) { + tpm_tis_i2c_ready(chip); + if (wait_for_stat + (chip, TPM_STS_COMMAND_READY, + chip->vendor.timeout_b, &status) < 0) { + rc = -ETIME; + goto out_err; + } + } + + while (count < len - 1) { + burstcnt = get_burstcount(chip); + + /* burstcnt < 0 = TPM is busy */ + if (burstcnt < 0) + return burstcnt; + + if (burstcnt > (len - 1 - count)) + burstcnt = len - 1 - count; + + rc = iic_tpm_write(TPM_DATA_FIFO(chip->vendor.locality), + &(buf[count]), burstcnt); + if (rc == 0) + count += burstcnt; + else if (rc < 0) + retries++; + + /* avoid endless loop in case of broken HW */ + if (retries > MAX_COUNT_LONG) { + rc = -EIO; + goto out_err; + } + + wait_for_stat(chip, TPM_STS_VALID, + chip->vendor.timeout_c, &status); + + if ((status & TPM_STS_DATA_EXPECT) == 0) { + rc = -EIO; + goto out_err; + } + } + + /* write last byte */ + iic_tpm_write(TPM_DATA_FIFO(chip->vendor.locality), &(buf[count]), 1); + wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, &status); + if ((status & TPM_STS_DATA_EXPECT) != 0) { + rc = -EIO; + goto out_err; + } + + /* go and do it */ + iic_tpm_write(TPM_STS(chip->vendor.locality), &sts, 1); + + return len; +out_err: + tpm_tis_i2c_ready(chip); + /* The TPM needs some time to clean up here, + * so we sleep rather than keeping the bus busy + */ + usleep_range(SLEEP_DURATION_RESET_LOW, SLEEP_DURATION_RESET_HI); + release_locality(chip, chip->vendor.locality, 0); + return rc; +} + +static bool tpm_tis_i2c_req_canceled(struct tpm_chip *chip, u8 status) +{ + return (status == TPM_STS_COMMAND_READY); +} + +static const struct tpm_class_ops tpm_tis_i2c = { + .status = tpm_tis_i2c_status, + .recv = tpm_tis_i2c_recv, + .send = tpm_tis_i2c_send, + .cancel = tpm_tis_i2c_ready, + .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID, + .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID, + .req_canceled = tpm_tis_i2c_req_canceled, +}; + +static int tpm_tis_i2c_init(struct device *dev) +{ + u32 vendor; + int rc = 0; + struct tpm_chip *chip; + + chip = tpmm_chip_alloc(dev, &tpm_tis_i2c); + if (IS_ERR(chip)) + return PTR_ERR(chip); + + /* Disable interrupts */ + chip->vendor.irq = 0; + + /* Default timeouts */ + chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT); + chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT); + chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT); + chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT); + + if (request_locality(chip, 0) != 0) { + dev_err(dev, "could not request locality\n"); + rc = -ENODEV; + goto out_err; + } + + /* read four bytes from DID_VID register */ + if (iic_tpm_read(TPM_DID_VID(0), (u8 *)&vendor, 4) < 0) { + dev_err(dev, "could not read vendor id\n"); + rc = -EIO; + goto out_release; + } + + if (vendor == TPM_TIS_I2C_DID_VID_9645) { + tpm_dev.chip_type = SLB9645; + } else if (vendor == TPM_TIS_I2C_DID_VID_9635) { + tpm_dev.chip_type = SLB9635; + } else { + dev_err(dev, "vendor id did not match! ID was %08x\n", vendor); + rc = -ENODEV; + goto out_release; + } + + dev_info(dev, "1.2 TPM (device-id 0x%X)\n", vendor >> 16); + + INIT_LIST_HEAD(&chip->vendor.list); + tpm_dev.chip = chip; + + tpm_get_timeouts(chip); + tpm_do_selftest(chip); + + return tpm_chip_register(chip); +out_release: + release_locality(chip, chip->vendor.locality, 1); + tpm_dev.client = NULL; +out_err: + return rc; +} + +static const struct i2c_device_id tpm_tis_i2c_table[] = { + {"tpm_i2c_infineon", 0}, + {"slb9635tt", 0}, + {"slb9645tt", 1}, + {}, +}; + +MODULE_DEVICE_TABLE(i2c, tpm_tis_i2c_table); + +#ifdef CONFIG_OF +static const struct of_device_id tpm_tis_i2c_of_match[] = { + { + .name = "tpm_i2c_infineon", + .type = "tpm", + .compatible = "infineon,tpm_i2c_infineon", + .data = (void *)0 + }, + { + .name = "slb9635tt", + .type = "tpm", + .compatible = "infineon,slb9635tt", + .data = (void *)0 + }, + { + .name = "slb9645tt", + .type = "tpm", + .compatible = "infineon,slb9645tt", + .data = (void *)1 + }, + {}, +}; +MODULE_DEVICE_TABLE(of, tpm_tis_i2c_of_match); +#endif + +static SIMPLE_DEV_PM_OPS(tpm_tis_i2c_ops, tpm_pm_suspend, tpm_pm_resume); + +static int tpm_tis_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int rc; + struct device *dev = &(client->dev); + + if (tpm_dev.client != NULL) { + dev_err(dev, "This driver only supports one client at a time\n"); + return -EBUSY; /* We only support one client */ + } + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + dev_err(dev, "no algorithms associated to the i2c bus\n"); + return -ENODEV; + } + + tpm_dev.client = client; + rc = tpm_tis_i2c_init(&client->dev); + if (rc != 0) { + tpm_dev.client = NULL; + rc = -ENODEV; + } + return rc; +} + +static int tpm_tis_i2c_remove(struct i2c_client *client) +{ + struct tpm_chip *chip = tpm_dev.chip; + + tpm_chip_unregister(chip); + release_locality(chip, chip->vendor.locality, 1); + tpm_dev.client = NULL; + + return 0; +} + +static struct i2c_driver tpm_tis_i2c_driver = { + .id_table = tpm_tis_i2c_table, + .probe = tpm_tis_i2c_probe, + .remove = tpm_tis_i2c_remove, + .driver = { + .name = "tpm_i2c_infineon", + .owner = THIS_MODULE, + .pm = &tpm_tis_i2c_ops, + .of_match_table = of_match_ptr(tpm_tis_i2c_of_match), + }, +}; + +module_i2c_driver(tpm_tis_i2c_driver); +MODULE_AUTHOR("Peter Huewe "); +MODULE_DESCRIPTION("TPM TIS I2C Infineon Driver"); +MODULE_VERSION("2.2.0"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/tpm/tpm_i2c_nuvoton.c b/kernel/drivers/char/tpm/tpm_i2c_nuvoton.c new file mode 100644 index 000000000..9d42b7d78 --- /dev/null +++ b/kernel/drivers/char/tpm/tpm_i2c_nuvoton.c @@ -0,0 +1,654 @@ +/****************************************************************************** + * Nuvoton TPM I2C Device Driver Interface for WPCT301/NPCT501, + * based on the TCG TPM Interface Spec version 1.2. + * Specifications at www.trustedcomputinggroup.org + * + * Copyright (C) 2011, Nuvoton Technology Corporation. + * Dan Morav + * Copyright (C) 2013, Obsidian Research Corp. + * Jason Gunthorpe + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/>. + * + * Nuvoton contact information: APC.Support@nuvoton.com + *****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include "tpm.h" + +/* I2C interface offsets */ +#define TPM_STS 0x00 +#define TPM_BURST_COUNT 0x01 +#define TPM_DATA_FIFO_W 0x20 +#define TPM_DATA_FIFO_R 0x40 +#define TPM_VID_DID_RID 0x60 +/* TPM command header size */ +#define TPM_HEADER_SIZE 10 +#define TPM_RETRY 5 +/* + * I2C bus device maximum buffer size w/o counting I2C address or command + * i.e. max size required for I2C write is 34 = addr, command, 32 bytes data + */ +#define TPM_I2C_MAX_BUF_SIZE 32 +#define TPM_I2C_RETRY_COUNT 32 +#define TPM_I2C_BUS_DELAY 1 /* msec */ +#define TPM_I2C_RETRY_DELAY_SHORT 2 /* msec */ +#define TPM_I2C_RETRY_DELAY_LONG 10 /* msec */ + +#define I2C_DRIVER_NAME "tpm_i2c_nuvoton" + +struct priv_data { + unsigned int intrs; +}; + +static s32 i2c_nuvoton_read_buf(struct i2c_client *client, u8 offset, u8 size, + u8 *data) +{ + s32 status; + + status = i2c_smbus_read_i2c_block_data(client, offset, size, data); + dev_dbg(&client->dev, + "%s(offset=%u size=%u data=%*ph) -> sts=%d\n", __func__, + offset, size, (int)size, data, status); + return status; +} + +static s32 i2c_nuvoton_write_buf(struct i2c_client *client, u8 offset, u8 size, + u8 *data) +{ + s32 status; + + status = i2c_smbus_write_i2c_block_data(client, offset, size, data); + dev_dbg(&client->dev, + "%s(offset=%u size=%u data=%*ph) -> sts=%d\n", __func__, + offset, size, (int)size, data, status); + return status; +} + +#define TPM_STS_VALID 0x80 +#define TPM_STS_COMMAND_READY 0x40 +#define TPM_STS_GO 0x20 +#define TPM_STS_DATA_AVAIL 0x10 +#define TPM_STS_EXPECT 0x08 +#define TPM_STS_RESPONSE_RETRY 0x02 +#define TPM_STS_ERR_VAL 0x07 /* bit2...bit0 reads always 0 */ + +#define TPM_I2C_SHORT_TIMEOUT 750 /* ms */ +#define TPM_I2C_LONG_TIMEOUT 2000 /* 2 sec */ + +/* read TPM_STS register */ +static u8 i2c_nuvoton_read_status(struct tpm_chip *chip) +{ + struct i2c_client *client = to_i2c_client(chip->pdev); + s32 status; + u8 data; + + status = i2c_nuvoton_read_buf(client, TPM_STS, 1, &data); + if (status <= 0) { + dev_err(chip->pdev, "%s() error return %d\n", __func__, + status); + data = TPM_STS_ERR_VAL; + } + + return data; +} + +/* write byte to TPM_STS register */ +static s32 i2c_nuvoton_write_status(struct i2c_client *client, u8 data) +{ + s32 status; + int i; + + /* this causes the current command to be aborted */ + for (i = 0, status = -1; i < TPM_I2C_RETRY_COUNT && status < 0; i++) { + status = i2c_nuvoton_write_buf(client, TPM_STS, 1, &data); + msleep(TPM_I2C_BUS_DELAY); + } + return status; +} + +/* write commandReady to TPM_STS register */ +static void i2c_nuvoton_ready(struct tpm_chip *chip) +{ + struct i2c_client *client = to_i2c_client(chip->pdev); + s32 status; + + /* this causes the current command to be aborted */ + status = i2c_nuvoton_write_status(client, TPM_STS_COMMAND_READY); + if (status < 0) + dev_err(chip->pdev, + "%s() fail to write TPM_STS.commandReady\n", __func__); +} + +/* read burstCount field from TPM_STS register + * return -1 on fail to read */ +static int i2c_nuvoton_get_burstcount(struct i2c_client *client, + struct tpm_chip *chip) +{ + unsigned long stop = jiffies + chip->vendor.timeout_d; + s32 status; + int burst_count = -1; + u8 data; + + /* wait for burstcount to be non-zero */ + do { + /* in I2C burstCount is 1 byte */ + status = i2c_nuvoton_read_buf(client, TPM_BURST_COUNT, 1, + &data); + if (status > 0 && data > 0) { + burst_count = min_t(u8, TPM_I2C_MAX_BUF_SIZE, data); + break; + } + msleep(TPM_I2C_BUS_DELAY); + } while (time_before(jiffies, stop)); + + return burst_count; +} + +/* + * WPCT301/NPCT501 SINT# supports only dataAvail + * any call to this function which is not waiting for dataAvail will + * set queue to NULL to avoid waiting for interrupt + */ +static bool i2c_nuvoton_check_status(struct tpm_chip *chip, u8 mask, u8 value) +{ + u8 status = i2c_nuvoton_read_status(chip); + return (status != TPM_STS_ERR_VAL) && ((status & mask) == value); +} + +static int i2c_nuvoton_wait_for_stat(struct tpm_chip *chip, u8 mask, u8 value, + u32 timeout, wait_queue_head_t *queue) +{ + if (chip->vendor.irq && queue) { + s32 rc; + struct priv_data *priv = chip->vendor.priv; + unsigned int cur_intrs = priv->intrs; + + enable_irq(chip->vendor.irq); + rc = wait_event_interruptible_timeout(*queue, + cur_intrs != priv->intrs, + timeout); + if (rc > 0) + return 0; + /* At this point we know that the SINT pin is asserted, so we + * do not need to do i2c_nuvoton_check_status */ + } else { + unsigned long ten_msec, stop; + bool status_valid; + + /* check current status */ + status_valid = i2c_nuvoton_check_status(chip, mask, value); + if (status_valid) + return 0; + + /* use polling to wait for the event */ + ten_msec = jiffies + msecs_to_jiffies(TPM_I2C_RETRY_DELAY_LONG); + stop = jiffies + timeout; + do { + if (time_before(jiffies, ten_msec)) + msleep(TPM_I2C_RETRY_DELAY_SHORT); + else + msleep(TPM_I2C_RETRY_DELAY_LONG); + status_valid = i2c_nuvoton_check_status(chip, mask, + value); + if (status_valid) + return 0; + } while (time_before(jiffies, stop)); + } + dev_err(chip->pdev, "%s(%02x, %02x) -> timeout\n", __func__, mask, + value); + return -ETIMEDOUT; +} + +/* wait for dataAvail field to be set in the TPM_STS register */ +static int i2c_nuvoton_wait_for_data_avail(struct tpm_chip *chip, u32 timeout, + wait_queue_head_t *queue) +{ + return i2c_nuvoton_wait_for_stat(chip, + TPM_STS_DATA_AVAIL | TPM_STS_VALID, + TPM_STS_DATA_AVAIL | TPM_STS_VALID, + timeout, queue); +} + +/* Read @count bytes into @buf from TPM_RD_FIFO register */ +static int i2c_nuvoton_recv_data(struct i2c_client *client, + struct tpm_chip *chip, u8 *buf, size_t count) +{ + s32 rc; + int burst_count, bytes2read, size = 0; + + while (size < count && + i2c_nuvoton_wait_for_data_avail(chip, + chip->vendor.timeout_c, + &chip->vendor.read_queue) == 0) { + burst_count = i2c_nuvoton_get_burstcount(client, chip); + if (burst_count < 0) { + dev_err(chip->pdev, + "%s() fail to read burstCount=%d\n", __func__, + burst_count); + return -EIO; + } + bytes2read = min_t(size_t, burst_count, count - size); + rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_R, + bytes2read, &buf[size]); + if (rc < 0) { + dev_err(chip->pdev, + "%s() fail on i2c_nuvoton_read_buf()=%d\n", + __func__, rc); + return -EIO; + } + dev_dbg(chip->pdev, "%s(%d):", __func__, bytes2read); + size += bytes2read; + } + + return size; +} + +/* Read TPM command results */ +static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + struct device *dev = chip->pdev; + struct i2c_client *client = to_i2c_client(dev); + s32 rc; + int expected, status, burst_count, retries, size = 0; + + if (count < TPM_HEADER_SIZE) { + i2c_nuvoton_ready(chip); /* return to idle */ + dev_err(dev, "%s() count < header size\n", __func__); + return -EIO; + } + for (retries = 0; retries < TPM_RETRY; retries++) { + if (retries > 0) { + /* if this is not the first trial, set responseRetry */ + i2c_nuvoton_write_status(client, + TPM_STS_RESPONSE_RETRY); + } + /* + * read first available (> 10 bytes), including: + * tag, paramsize, and result + */ + status = i2c_nuvoton_wait_for_data_avail( + chip, chip->vendor.timeout_c, &chip->vendor.read_queue); + if (status != 0) { + dev_err(dev, "%s() timeout on dataAvail\n", __func__); + size = -ETIMEDOUT; + continue; + } + burst_count = i2c_nuvoton_get_burstcount(client, chip); + if (burst_count < 0) { + dev_err(dev, "%s() fail to get burstCount\n", __func__); + size = -EIO; + continue; + } + size = i2c_nuvoton_recv_data(client, chip, buf, + burst_count); + if (size < TPM_HEADER_SIZE) { + dev_err(dev, "%s() fail to read header\n", __func__); + size = -EIO; + continue; + } + /* + * convert number of expected bytes field from big endian 32 bit + * to machine native + */ + expected = be32_to_cpu(*(__be32 *) (buf + 2)); + if (expected > count) { + dev_err(dev, "%s() expected > count\n", __func__); + size = -EIO; + continue; + } + rc = i2c_nuvoton_recv_data(client, chip, &buf[size], + expected - size); + size += rc; + if (rc < 0 || size < expected) { + dev_err(dev, "%s() fail to read remainder of result\n", + __func__); + size = -EIO; + continue; + } + if (i2c_nuvoton_wait_for_stat( + chip, TPM_STS_VALID | TPM_STS_DATA_AVAIL, + TPM_STS_VALID, chip->vendor.timeout_c, + NULL)) { + dev_err(dev, "%s() error left over data\n", __func__); + size = -ETIMEDOUT; + continue; + } + break; + } + i2c_nuvoton_ready(chip); + dev_dbg(chip->pdev, "%s() -> %d\n", __func__, size); + return size; +} + +/* + * Send TPM command. + * + * If interrupts are used (signaled by an irq set in the vendor structure) + * tpm.c can skip polling for the data to be available as the interrupt is + * waited for here + */ +static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len) +{ + struct device *dev = chip->pdev; + struct i2c_client *client = to_i2c_client(dev); + u32 ordinal; + size_t count = 0; + int burst_count, bytes2write, retries, rc = -EIO; + + for (retries = 0; retries < TPM_RETRY; retries++) { + i2c_nuvoton_ready(chip); + if (i2c_nuvoton_wait_for_stat(chip, TPM_STS_COMMAND_READY, + TPM_STS_COMMAND_READY, + chip->vendor.timeout_b, NULL)) { + dev_err(dev, "%s() timeout on commandReady\n", + __func__); + rc = -EIO; + continue; + } + rc = 0; + while (count < len - 1) { + burst_count = i2c_nuvoton_get_burstcount(client, + chip); + if (burst_count < 0) { + dev_err(dev, "%s() fail get burstCount\n", + __func__); + rc = -EIO; + break; + } + bytes2write = min_t(size_t, burst_count, + len - 1 - count); + rc = i2c_nuvoton_write_buf(client, TPM_DATA_FIFO_W, + bytes2write, &buf[count]); + if (rc < 0) { + dev_err(dev, "%s() fail i2cWriteBuf\n", + __func__); + break; + } + dev_dbg(dev, "%s(%d):", __func__, bytes2write); + count += bytes2write; + rc = i2c_nuvoton_wait_for_stat(chip, + TPM_STS_VALID | + TPM_STS_EXPECT, + TPM_STS_VALID | + TPM_STS_EXPECT, + chip->vendor.timeout_c, + NULL); + if (rc < 0) { + dev_err(dev, "%s() timeout on Expect\n", + __func__); + rc = -ETIMEDOUT; + break; + } + } + if (rc < 0) + continue; + + /* write last byte */ + rc = i2c_nuvoton_write_buf(client, TPM_DATA_FIFO_W, 1, + &buf[count]); + if (rc < 0) { + dev_err(dev, "%s() fail to write last byte\n", + __func__); + rc = -EIO; + continue; + } + dev_dbg(dev, "%s(last): %02x", __func__, buf[count]); + rc = i2c_nuvoton_wait_for_stat(chip, + TPM_STS_VALID | TPM_STS_EXPECT, + TPM_STS_VALID, + chip->vendor.timeout_c, NULL); + if (rc) { + dev_err(dev, "%s() timeout on Expect to clear\n", + __func__); + rc = -ETIMEDOUT; + continue; + } + break; + } + if (rc < 0) { + /* retries == TPM_RETRY */ + i2c_nuvoton_ready(chip); + return rc; + } + /* execute the TPM command */ + rc = i2c_nuvoton_write_status(client, TPM_STS_GO); + if (rc < 0) { + dev_err(dev, "%s() fail to write Go\n", __func__); + i2c_nuvoton_ready(chip); + return rc; + } + ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); + rc = i2c_nuvoton_wait_for_data_avail(chip, + tpm_calc_ordinal_duration(chip, + ordinal), + &chip->vendor.read_queue); + if (rc) { + dev_err(dev, "%s() timeout command duration\n", __func__); + i2c_nuvoton_ready(chip); + return rc; + } + + dev_dbg(dev, "%s() -> %zd\n", __func__, len); + return len; +} + +static bool i2c_nuvoton_req_canceled(struct tpm_chip *chip, u8 status) +{ + return (status == TPM_STS_COMMAND_READY); +} + +static const struct tpm_class_ops tpm_i2c = { + .status = i2c_nuvoton_read_status, + .recv = i2c_nuvoton_recv, + .send = i2c_nuvoton_send, + .cancel = i2c_nuvoton_ready, + .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID, + .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID, + .req_canceled = i2c_nuvoton_req_canceled, +}; + +/* The only purpose for the handler is to signal to any waiting threads that + * the interrupt is currently being asserted. The driver does not do any + * processing triggered by interrupts, and the chip provides no way to mask at + * the source (plus that would be slow over I2C). Run the IRQ as a one-shot, + * this means it cannot be shared. */ +static irqreturn_t i2c_nuvoton_int_handler(int dummy, void *dev_id) +{ + struct tpm_chip *chip = dev_id; + struct priv_data *priv = chip->vendor.priv; + + priv->intrs++; + wake_up(&chip->vendor.read_queue); + disable_irq_nosync(chip->vendor.irq); + return IRQ_HANDLED; +} + +static int get_vid(struct i2c_client *client, u32 *res) +{ + static const u8 vid_did_rid_value[] = { 0x50, 0x10, 0xfe }; + u32 temp; + s32 rc; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + return -ENODEV; + rc = i2c_nuvoton_read_buf(client, TPM_VID_DID_RID, 4, (u8 *)&temp); + if (rc < 0) + return rc; + + /* check WPCT301 values - ignore RID */ + if (memcmp(&temp, vid_did_rid_value, sizeof(vid_did_rid_value))) { + /* + * f/w rev 2.81 has an issue where the VID_DID_RID is not + * reporting the right value. so give it another chance at + * offset 0x20 (FIFO_W). + */ + rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_W, 4, + (u8 *) (&temp)); + if (rc < 0) + return rc; + + /* check WPCT301 values - ignore RID */ + if (memcmp(&temp, vid_did_rid_value, + sizeof(vid_did_rid_value))) + return -ENODEV; + } + + *res = temp; + return 0; +} + +static int i2c_nuvoton_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int rc; + struct tpm_chip *chip; + struct device *dev = &client->dev; + u32 vid = 0; + + rc = get_vid(client, &vid); + if (rc) + return rc; + + dev_info(dev, "VID: %04X DID: %02X RID: %02X\n", (u16) vid, + (u8) (vid >> 16), (u8) (vid >> 24)); + + chip = tpmm_chip_alloc(dev, &tpm_i2c); + if (IS_ERR(chip)) + return PTR_ERR(chip); + + chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data), + GFP_KERNEL); + if (!chip->vendor.priv) + return -ENOMEM; + + init_waitqueue_head(&chip->vendor.read_queue); + init_waitqueue_head(&chip->vendor.int_queue); + + /* Default timeouts */ + chip->vendor.timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + chip->vendor.timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT); + chip->vendor.timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + chip->vendor.timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + + /* + * I2C intfcaps (interrupt capabilitieis) in the chip are hard coded to: + * TPM_INTF_INT_LEVEL_LOW | TPM_INTF_DATA_AVAIL_INT + * The IRQ should be set in the i2c_board_info (which is done + * automatically in of_i2c_register_devices, for device tree users */ + chip->vendor.irq = client->irq; + + if (chip->vendor.irq) { + dev_dbg(dev, "%s() chip-vendor.irq\n", __func__); + rc = devm_request_irq(dev, chip->vendor.irq, + i2c_nuvoton_int_handler, + IRQF_TRIGGER_LOW, + chip->devname, + chip); + if (rc) { + dev_err(dev, "%s() Unable to request irq: %d for use\n", + __func__, chip->vendor.irq); + chip->vendor.irq = 0; + } else { + /* Clear any pending interrupt */ + i2c_nuvoton_ready(chip); + /* - wait for TPM_STS==0xA0 (stsValid, commandReady) */ + rc = i2c_nuvoton_wait_for_stat(chip, + TPM_STS_COMMAND_READY, + TPM_STS_COMMAND_READY, + chip->vendor.timeout_b, + NULL); + if (rc == 0) { + /* + * TIS is in ready state + * write dummy byte to enter reception state + * TPM_DATA_FIFO_W <- rc (0) + */ + rc = i2c_nuvoton_write_buf(client, + TPM_DATA_FIFO_W, + 1, (u8 *) (&rc)); + if (rc < 0) + return rc; + /* TPM_STS <- 0x40 (commandReady) */ + i2c_nuvoton_ready(chip); + } else { + /* + * timeout_b reached - command was + * aborted. TIS should now be in idle state - + * only TPM_STS_VALID should be set + */ + if (i2c_nuvoton_read_status(chip) != + TPM_STS_VALID) + return -EIO; + } + } + } + + if (tpm_get_timeouts(chip)) + return -ENODEV; + + if (tpm_do_selftest(chip)) + return -ENODEV; + + return tpm_chip_register(chip); +} + +static int i2c_nuvoton_remove(struct i2c_client *client) +{ + struct device *dev = &(client->dev); + struct tpm_chip *chip = dev_get_drvdata(dev); + tpm_chip_unregister(chip); + return 0; +} + +static const struct i2c_device_id i2c_nuvoton_id[] = { + {I2C_DRIVER_NAME, 0}, + {} +}; +MODULE_DEVICE_TABLE(i2c, i2c_nuvoton_id); + +#ifdef CONFIG_OF +static const struct of_device_id i2c_nuvoton_of_match[] = { + {.compatible = "nuvoton,npct501"}, + {.compatible = "winbond,wpct301"}, + {}, +}; +MODULE_DEVICE_TABLE(of, i2c_nuvoton_of_match); +#endif + +static SIMPLE_DEV_PM_OPS(i2c_nuvoton_pm_ops, tpm_pm_suspend, tpm_pm_resume); + +static struct i2c_driver i2c_nuvoton_driver = { + .id_table = i2c_nuvoton_id, + .probe = i2c_nuvoton_probe, + .remove = i2c_nuvoton_remove, + .driver = { + .name = I2C_DRIVER_NAME, + .owner = THIS_MODULE, + .pm = &i2c_nuvoton_pm_ops, + .of_match_table = of_match_ptr(i2c_nuvoton_of_match), + }, +}; + +module_i2c_driver(i2c_nuvoton_driver); + +MODULE_AUTHOR("Dan Morav (dan.morav@nuvoton.com)"); +MODULE_DESCRIPTION("Nuvoton TPM I2C Driver"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/tpm/tpm_ibmvtpm.c b/kernel/drivers/char/tpm/tpm_ibmvtpm.c new file mode 100644 index 000000000..42ffa5e7a --- /dev/null +++ b/kernel/drivers/char/tpm/tpm_ibmvtpm.c @@ -0,0 +1,699 @@ +/* + * Copyright (C) 2012 IBM Corporation + * + * Author: Ashley Lai + * + * Maintained by: + * + * Device driver for TCG/TCPA TPM (trusted platform module). + * Specifications at www.trustedcomputinggroup.org + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tpm.h" +#include "tpm_ibmvtpm.h" + +static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm"; + +static struct vio_device_id tpm_ibmvtpm_device_table[] = { + { "IBM,vtpm", "IBM,vtpm"}, + { "", "" } +}; +MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table); + +/** + * ibmvtpm_send_crq - Send a CRQ request + * @vdev: vio device struct + * @w1: first word + * @w2: second word + * + * Return value: + * 0 -Sucess + * Non-zero - Failure + */ +static int ibmvtpm_send_crq(struct vio_dev *vdev, u64 w1, u64 w2) +{ + return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, w2); +} + +/** + * ibmvtpm_get_data - Retrieve ibm vtpm data + * @dev: device struct + * + * Return value: + * vtpm device struct + */ +static struct ibmvtpm_dev *ibmvtpm_get_data(const struct device *dev) +{ + struct tpm_chip *chip = dev_get_drvdata(dev); + if (chip) + return (struct ibmvtpm_dev *)TPM_VPRIV(chip); + return NULL; +} + +/** + * tpm_ibmvtpm_recv - Receive data after send + * @chip: tpm chip struct + * @buf: buffer to read + * count: size of buffer + * + * Return value: + * Number of bytes read + */ +static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + struct ibmvtpm_dev *ibmvtpm; + u16 len; + int sig; + + ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip); + + if (!ibmvtpm->rtce_buf) { + dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n"); + return 0; + } + + sig = wait_event_interruptible(ibmvtpm->wq, ibmvtpm->res_len != 0); + if (sig) + return -EINTR; + + len = ibmvtpm->res_len; + + if (count < len) { + dev_err(ibmvtpm->dev, + "Invalid size in recv: count=%zd, crq_size=%d\n", + count, len); + return -EIO; + } + + spin_lock(&ibmvtpm->rtce_lock); + memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, len); + memset(ibmvtpm->rtce_buf, 0, len); + ibmvtpm->res_len = 0; + spin_unlock(&ibmvtpm->rtce_lock); + return len; +} + +/** + * tpm_ibmvtpm_send - Send tpm request + * @chip: tpm chip struct + * @buf: buffer contains data to send + * count: size of buffer + * + * Return value: + * Number of bytes sent + */ +static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) +{ + struct ibmvtpm_dev *ibmvtpm; + struct ibmvtpm_crq crq; + __be64 *word = (__be64 *)&crq; + int rc; + + ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip); + + if (!ibmvtpm->rtce_buf) { + dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n"); + return 0; + } + + if (count > ibmvtpm->rtce_size) { + dev_err(ibmvtpm->dev, + "Invalid size in send: count=%zd, rtce_size=%d\n", + count, ibmvtpm->rtce_size); + return -EIO; + } + + spin_lock(&ibmvtpm->rtce_lock); + memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count); + crq.valid = (u8)IBMVTPM_VALID_CMD; + crq.msg = (u8)VTPM_TPM_COMMAND; + crq.len = cpu_to_be16(count); + crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle); + + rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]), + be64_to_cpu(word[1])); + if (rc != H_SUCCESS) { + dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); + rc = 0; + } else + rc = count; + + spin_unlock(&ibmvtpm->rtce_lock); + return rc; +} + +static void tpm_ibmvtpm_cancel(struct tpm_chip *chip) +{ + return; +} + +static u8 tpm_ibmvtpm_status(struct tpm_chip *chip) +{ + return 0; +} + +/** + * ibmvtpm_crq_get_rtce_size - Send a CRQ request to get rtce size + * @ibmvtpm: vtpm device struct + * + * Return value: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm) +{ + struct ibmvtpm_crq crq; + u64 *buf = (u64 *) &crq; + int rc; + + crq.valid = (u8)IBMVTPM_VALID_CMD; + crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE; + + rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]), + cpu_to_be64(buf[1])); + if (rc != H_SUCCESS) + dev_err(ibmvtpm->dev, + "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc); + + return rc; +} + +/** + * ibmvtpm_crq_get_version - Send a CRQ request to get vtpm version + * - Note that this is vtpm version and not tpm version + * @ibmvtpm: vtpm device struct + * + * Return value: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm) +{ + struct ibmvtpm_crq crq; + u64 *buf = (u64 *) &crq; + int rc; + + crq.valid = (u8)IBMVTPM_VALID_CMD; + crq.msg = (u8)VTPM_GET_VERSION; + + rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]), + cpu_to_be64(buf[1])); + if (rc != H_SUCCESS) + dev_err(ibmvtpm->dev, + "ibmvtpm_crq_get_version failed rc=%d\n", rc); + + return rc; +} + +/** + * ibmvtpm_crq_send_init_complete - Send a CRQ initialize complete message + * @ibmvtpm: vtpm device struct + * + * Return value: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm) +{ + int rc; + + rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_COMP_CMD, 0); + if (rc != H_SUCCESS) + dev_err(ibmvtpm->dev, + "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc); + + return rc; +} + +/** + * ibmvtpm_crq_send_init - Send a CRQ initialize message + * @ibmvtpm: vtpm device struct + * + * Return value: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm) +{ + int rc; + + rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_CMD, 0); + if (rc != H_SUCCESS) + dev_err(ibmvtpm->dev, + "ibmvtpm_crq_send_init failed rc=%d\n", rc); + + return rc; +} + +/** + * tpm_ibmvtpm_remove - ibm vtpm remove entry point + * @vdev: vio device struct + * + * Return value: + * 0 + */ +static int tpm_ibmvtpm_remove(struct vio_dev *vdev) +{ + struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev); + struct tpm_chip *chip = dev_get_drvdata(ibmvtpm->dev); + int rc = 0; + + tpm_chip_unregister(chip); + + free_irq(vdev->irq, ibmvtpm); + + do { + if (rc) + msleep(100); + rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); + } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); + + dma_unmap_single(ibmvtpm->dev, ibmvtpm->crq_dma_handle, + CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL); + free_page((unsigned long)ibmvtpm->crq_queue.crq_addr); + + if (ibmvtpm->rtce_buf) { + dma_unmap_single(ibmvtpm->dev, ibmvtpm->rtce_dma_handle, + ibmvtpm->rtce_size, DMA_BIDIRECTIONAL); + kfree(ibmvtpm->rtce_buf); + } + + kfree(ibmvtpm); + + return 0; +} + +/** + * tpm_ibmvtpm_get_desired_dma - Get DMA size needed by this driver + * @vdev: vio device struct + * + * Return value: + * Number of bytes the driver needs to DMA map + */ +static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev) +{ + struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev); + + /* ibmvtpm initializes at probe time, so the data we are + * asking for may not be set yet. Estimate that 4K required + * for TCE-mapped buffer in addition to CRQ. + */ + if (!ibmvtpm) + return CRQ_RES_BUF_SIZE + PAGE_SIZE; + + return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size; +} + +/** + * tpm_ibmvtpm_suspend - Suspend + * @dev: device struct + * + * Return value: + * 0 + */ +static int tpm_ibmvtpm_suspend(struct device *dev) +{ + struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev); + struct ibmvtpm_crq crq; + u64 *buf = (u64 *) &crq; + int rc = 0; + + crq.valid = (u8)IBMVTPM_VALID_CMD; + crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND; + + rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]), + cpu_to_be64(buf[1])); + if (rc != H_SUCCESS) + dev_err(ibmvtpm->dev, + "tpm_ibmvtpm_suspend failed rc=%d\n", rc); + + return rc; +} + +/** + * ibmvtpm_reset_crq - Reset CRQ + * @ibmvtpm: ibm vtpm struct + * + * Return value: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm) +{ + int rc = 0; + + do { + if (rc) + msleep(100); + rc = plpar_hcall_norets(H_FREE_CRQ, + ibmvtpm->vdev->unit_address); + } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); + + memset(ibmvtpm->crq_queue.crq_addr, 0, CRQ_RES_BUF_SIZE); + ibmvtpm->crq_queue.index = 0; + + return plpar_hcall_norets(H_REG_CRQ, ibmvtpm->vdev->unit_address, + ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE); +} + +/** + * tpm_ibmvtpm_resume - Resume from suspend + * @dev: device struct + * + * Return value: + * 0 + */ +static int tpm_ibmvtpm_resume(struct device *dev) +{ + struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev); + int rc = 0; + + do { + if (rc) + msleep(100); + rc = plpar_hcall_norets(H_ENABLE_CRQ, + ibmvtpm->vdev->unit_address); + } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc)); + + if (rc) { + dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc); + return rc; + } + + rc = vio_enable_interrupts(ibmvtpm->vdev); + if (rc) { + dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc); + return rc; + } + + rc = ibmvtpm_crq_send_init(ibmvtpm); + if (rc) + dev_err(dev, "Error send_init rc=%d\n", rc); + + return rc; +} + +static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status) +{ + return (status == 0); +} + +static const struct tpm_class_ops tpm_ibmvtpm = { + .recv = tpm_ibmvtpm_recv, + .send = tpm_ibmvtpm_send, + .cancel = tpm_ibmvtpm_cancel, + .status = tpm_ibmvtpm_status, + .req_complete_mask = 0, + .req_complete_val = 0, + .req_canceled = tpm_ibmvtpm_req_canceled, +}; + +static const struct dev_pm_ops tpm_ibmvtpm_pm_ops = { + .suspend = tpm_ibmvtpm_suspend, + .resume = tpm_ibmvtpm_resume, +}; + +/** + * ibmvtpm_crq_get_next - Get next responded crq + * @ibmvtpm vtpm device struct + * + * Return value: + * vtpm crq pointer + */ +static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm) +{ + struct ibmvtpm_crq_queue *crq_q = &ibmvtpm->crq_queue; + struct ibmvtpm_crq *crq = &crq_q->crq_addr[crq_q->index]; + + if (crq->valid & VTPM_MSG_RES) { + if (++crq_q->index == crq_q->num_entry) + crq_q->index = 0; + smp_rmb(); + } else + crq = NULL; + return crq; +} + +/** + * ibmvtpm_crq_process - Process responded crq + * @crq crq to be processed + * @ibmvtpm vtpm device struct + * + * Return value: + * Nothing + */ +static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq, + struct ibmvtpm_dev *ibmvtpm) +{ + int rc = 0; + + switch (crq->valid) { + case VALID_INIT_CRQ: + switch (crq->msg) { + case INIT_CRQ_RES: + dev_info(ibmvtpm->dev, "CRQ initialized\n"); + rc = ibmvtpm_crq_send_init_complete(ibmvtpm); + if (rc) + dev_err(ibmvtpm->dev, "Unable to send CRQ init complete rc=%d\n", rc); + return; + case INIT_CRQ_COMP_RES: + dev_info(ibmvtpm->dev, + "CRQ initialization completed\n"); + return; + default: + dev_err(ibmvtpm->dev, "Unknown crq message type: %d\n", crq->msg); + return; + } + case IBMVTPM_VALID_CMD: + switch (crq->msg) { + case VTPM_GET_RTCE_BUFFER_SIZE_RES: + if (be16_to_cpu(crq->len) <= 0) { + dev_err(ibmvtpm->dev, "Invalid rtce size\n"); + return; + } + ibmvtpm->rtce_size = be16_to_cpu(crq->len); + ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size, + GFP_KERNEL); + if (!ibmvtpm->rtce_buf) { + dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n"); + return; + } + + ibmvtpm->rtce_dma_handle = dma_map_single(ibmvtpm->dev, + ibmvtpm->rtce_buf, ibmvtpm->rtce_size, + DMA_BIDIRECTIONAL); + + if (dma_mapping_error(ibmvtpm->dev, + ibmvtpm->rtce_dma_handle)) { + kfree(ibmvtpm->rtce_buf); + ibmvtpm->rtce_buf = NULL; + dev_err(ibmvtpm->dev, "Failed to dma map rtce buffer\n"); + } + + return; + case VTPM_GET_VERSION_RES: + ibmvtpm->vtpm_version = be32_to_cpu(crq->data); + return; + case VTPM_TPM_COMMAND_RES: + /* len of the data in rtce buffer */ + ibmvtpm->res_len = be16_to_cpu(crq->len); + wake_up_interruptible(&ibmvtpm->wq); + return; + default: + return; + } + } + return; +} + +/** + * ibmvtpm_interrupt - Interrupt handler + * @irq: irq number to handle + * @vtpm_instance: vtpm that received interrupt + * + * Returns: + * IRQ_HANDLED + **/ +static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance) +{ + struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance; + struct ibmvtpm_crq *crq; + + /* while loop is needed for initial setup (get version and + * get rtce_size). There should be only one tpm request at any + * given time. + */ + while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) { + ibmvtpm_crq_process(crq, ibmvtpm); + crq->valid = 0; + smp_wmb(); + } + + return IRQ_HANDLED; +} + +/** + * tpm_ibmvtpm_probe - ibm vtpm initialize entry point + * @vio_dev: vio device struct + * @id: vio device id struct + * + * Return value: + * 0 - Success + * Non-zero - Failure + */ +static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, + const struct vio_device_id *id) +{ + struct ibmvtpm_dev *ibmvtpm; + struct device *dev = &vio_dev->dev; + struct ibmvtpm_crq_queue *crq_q; + struct tpm_chip *chip; + int rc = -ENOMEM, rc1; + + chip = tpmm_chip_alloc(dev, &tpm_ibmvtpm); + if (IS_ERR(chip)) + return PTR_ERR(chip); + + ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL); + if (!ibmvtpm) { + dev_err(dev, "kzalloc for ibmvtpm failed\n"); + goto cleanup; + } + + crq_q = &ibmvtpm->crq_queue; + crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL); + if (!crq_q->crq_addr) { + dev_err(dev, "Unable to allocate memory for crq_addr\n"); + goto cleanup; + } + + crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr); + ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr, + CRQ_RES_BUF_SIZE, + DMA_BIDIRECTIONAL); + + if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) { + dev_err(dev, "dma mapping failed\n"); + goto cleanup; + } + + rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address, + ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE); + if (rc == H_RESOURCE) + rc = ibmvtpm_reset_crq(ibmvtpm); + + if (rc) { + dev_err(dev, "Unable to register CRQ rc=%d\n", rc); + goto reg_crq_cleanup; + } + + rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0, + tpm_ibmvtpm_driver_name, ibmvtpm); + if (rc) { + dev_err(dev, "Error %d register irq 0x%x\n", rc, vio_dev->irq); + goto init_irq_cleanup; + } + + rc = vio_enable_interrupts(vio_dev); + if (rc) { + dev_err(dev, "Error %d enabling interrupts\n", rc); + goto init_irq_cleanup; + } + + init_waitqueue_head(&ibmvtpm->wq); + + crq_q->index = 0; + + ibmvtpm->dev = dev; + ibmvtpm->vdev = vio_dev; + TPM_VPRIV(chip) = (void *)ibmvtpm; + + spin_lock_init(&ibmvtpm->rtce_lock); + + rc = ibmvtpm_crq_send_init(ibmvtpm); + if (rc) + goto init_irq_cleanup; + + rc = ibmvtpm_crq_get_version(ibmvtpm); + if (rc) + goto init_irq_cleanup; + + rc = ibmvtpm_crq_get_rtce_size(ibmvtpm); + if (rc) + goto init_irq_cleanup; + + return tpm_chip_register(chip); +init_irq_cleanup: + do { + rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address); + } while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1)); +reg_crq_cleanup: + dma_unmap_single(dev, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE, + DMA_BIDIRECTIONAL); +cleanup: + if (ibmvtpm) { + if (crq_q->crq_addr) + free_page((unsigned long)crq_q->crq_addr); + kfree(ibmvtpm); + } + + return rc; +} + +static struct vio_driver ibmvtpm_driver = { + .id_table = tpm_ibmvtpm_device_table, + .probe = tpm_ibmvtpm_probe, + .remove = tpm_ibmvtpm_remove, + .get_desired_dma = tpm_ibmvtpm_get_desired_dma, + .name = tpm_ibmvtpm_driver_name, + .pm = &tpm_ibmvtpm_pm_ops, +}; + +/** + * ibmvtpm_module_init - Initialize ibm vtpm module + * + * Return value: + * 0 -Success + * Non-zero - Failure + */ +static int __init ibmvtpm_module_init(void) +{ + return vio_register_driver(&ibmvtpm_driver); +} + +/** + * ibmvtpm_module_exit - Teardown ibm vtpm module + * + * Return value: + * Nothing + */ +static void __exit ibmvtpm_module_exit(void) +{ + vio_unregister_driver(&ibmvtpm_driver); +} + +module_init(ibmvtpm_module_init); +module_exit(ibmvtpm_module_exit); + +MODULE_AUTHOR("adlai@us.ibm.com"); +MODULE_DESCRIPTION("IBM vTPM Driver"); +MODULE_VERSION("1.0"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/tpm/tpm_ibmvtpm.h b/kernel/drivers/char/tpm/tpm_ibmvtpm.h new file mode 100644 index 000000000..6af928905 --- /dev/null +++ b/kernel/drivers/char/tpm/tpm_ibmvtpm.h @@ -0,0 +1,76 @@ +/* + * Copyright (C) 2012 IBM Corporation + * + * Author: Ashley Lai + * + * Maintained by: + * + * Device driver for TCG/TCPA TPM (trusted platform module). + * Specifications at www.trustedcomputinggroup.org + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + */ + +#ifndef __TPM_IBMVTPM_H__ +#define __TPM_IBMVTPM_H__ + +/* vTPM Message Format 1 */ +struct ibmvtpm_crq { + u8 valid; + u8 msg; + __be16 len; + __be32 data; + __be64 reserved; +} __attribute__((packed, aligned(8))); + +struct ibmvtpm_crq_queue { + struct ibmvtpm_crq *crq_addr; + u32 index; + u32 num_entry; +}; + +struct ibmvtpm_dev { + struct device *dev; + struct vio_dev *vdev; + struct ibmvtpm_crq_queue crq_queue; + dma_addr_t crq_dma_handle; + u32 rtce_size; + void __iomem *rtce_buf; + dma_addr_t rtce_dma_handle; + spinlock_t rtce_lock; + wait_queue_head_t wq; + u16 res_len; + u32 vtpm_version; +}; + +#define CRQ_RES_BUF_SIZE PAGE_SIZE + +/* Initialize CRQ */ +#define INIT_CRQ_CMD 0xC001000000000000LL /* Init cmd */ +#define INIT_CRQ_COMP_CMD 0xC002000000000000LL /* Init complete cmd */ +#define INIT_CRQ_RES 0x01 /* Init respond */ +#define INIT_CRQ_COMP_RES 0x02 /* Init complete respond */ +#define VALID_INIT_CRQ 0xC0 /* Valid command for init crq */ + +/* vTPM CRQ response is the message type | 0x80 */ +#define VTPM_MSG_RES 0x80 +#define IBMVTPM_VALID_CMD 0x80 + +/* vTPM CRQ message types */ +#define VTPM_GET_VERSION 0x01 +#define VTPM_GET_VERSION_RES (0x01 | VTPM_MSG_RES) + +#define VTPM_TPM_COMMAND 0x02 +#define VTPM_TPM_COMMAND_RES (0x02 | VTPM_MSG_RES) + +#define VTPM_GET_RTCE_BUFFER_SIZE 0x03 +#define VTPM_GET_RTCE_BUFFER_SIZE_RES (0x03 | VTPM_MSG_RES) + +#define VTPM_PREPARE_TO_SUSPEND 0x04 +#define VTPM_PREPARE_TO_SUSPEND_RES (0x04 | VTPM_MSG_RES) + +#endif diff --git a/kernel/drivers/char/tpm/tpm_infineon.c b/kernel/drivers/char/tpm/tpm_infineon.c new file mode 100644 index 000000000..6c488e635 --- /dev/null +++ b/kernel/drivers/char/tpm/tpm_infineon.c @@ -0,0 +1,629 @@ +/* + * Description: + * Device Driver for the Infineon Technologies + * SLD 9630 TT 1.1 and SLB 9635 TT 1.2 Trusted Platform Module + * Specifications at www.trustedcomputinggroup.org + * + * Copyright (C) 2005, Marcel Selhorst + * Sirrix AG - security technologies and + * Applied Data Security Group, Ruhr-University Bochum, Germany + * Project-Homepage: http://www.trust.rub.de/projects/linux-device-driver-infineon-tpm/ + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + */ + +#include +#include +#include "tpm.h" + +/* Infineon specific definitions */ +/* maximum number of WTX-packages */ +#define TPM_MAX_WTX_PACKAGES 50 +/* msleep-Time for WTX-packages */ +#define TPM_WTX_MSLEEP_TIME 20 +/* msleep-Time --> Interval to check status register */ +#define TPM_MSLEEP_TIME 3 +/* gives number of max. msleep()-calls before throwing timeout */ +#define TPM_MAX_TRIES 5000 +#define TPM_INFINEON_DEV_VEN_VALUE 0x15D1 + +#define TPM_INF_IO_PORT 0x0 +#define TPM_INF_IO_MEM 0x1 + +#define TPM_INF_ADDR 0x0 +#define TPM_INF_DATA 0x1 + +struct tpm_inf_dev { + int iotype; + + void __iomem *mem_base; /* MMIO ioremap'd addr */ + unsigned long map_base; /* phys MMIO base */ + unsigned long map_size; /* MMIO region size */ + unsigned int index_off; /* index register offset */ + + unsigned int data_regs; /* Data registers */ + unsigned int data_size; + + unsigned int config_port; /* IO Port config index reg */ + unsigned int config_size; +}; + +static struct tpm_inf_dev tpm_dev; + +static inline void tpm_data_out(unsigned char data, unsigned char offset) +{ + if (tpm_dev.iotype == TPM_INF_IO_PORT) + outb(data, tpm_dev.data_regs + offset); + else + writeb(data, tpm_dev.mem_base + tpm_dev.data_regs + offset); +} + +static inline unsigned char tpm_data_in(unsigned char offset) +{ + if (tpm_dev.iotype == TPM_INF_IO_PORT) + return inb(tpm_dev.data_regs + offset); + else + return readb(tpm_dev.mem_base + tpm_dev.data_regs + offset); +} + +static inline void tpm_config_out(unsigned char data, unsigned char offset) +{ + if (tpm_dev.iotype == TPM_INF_IO_PORT) + outb(data, tpm_dev.config_port + offset); + else + writeb(data, tpm_dev.mem_base + tpm_dev.index_off + offset); +} + +static inline unsigned char tpm_config_in(unsigned char offset) +{ + if (tpm_dev.iotype == TPM_INF_IO_PORT) + return inb(tpm_dev.config_port + offset); + else + return readb(tpm_dev.mem_base + tpm_dev.index_off + offset); +} + +/* TPM header definitions */ +enum infineon_tpm_header { + TPM_VL_VER = 0x01, + TPM_VL_CHANNEL_CONTROL = 0x07, + TPM_VL_CHANNEL_PERSONALISATION = 0x0A, + TPM_VL_CHANNEL_TPM = 0x0B, + TPM_VL_CONTROL = 0x00, + TPM_INF_NAK = 0x15, + TPM_CTRL_WTX = 0x10, + TPM_CTRL_WTX_ABORT = 0x18, + TPM_CTRL_WTX_ABORT_ACK = 0x18, + TPM_CTRL_ERROR = 0x20, + TPM_CTRL_CHAININGACK = 0x40, + TPM_CTRL_CHAINING = 0x80, + TPM_CTRL_DATA = 0x04, + TPM_CTRL_DATA_CHA = 0x84, + TPM_CTRL_DATA_CHA_ACK = 0xC4 +}; + +enum infineon_tpm_register { + WRFIFO = 0x00, + RDFIFO = 0x01, + STAT = 0x02, + CMD = 0x03 +}; + +enum infineon_tpm_command_bits { + CMD_DIS = 0x00, + CMD_LP = 0x01, + CMD_RES = 0x02, + CMD_IRQC = 0x06 +}; + +enum infineon_tpm_status_bits { + STAT_XFE = 0x00, + STAT_LPA = 0x01, + STAT_FOK = 0x02, + STAT_TOK = 0x03, + STAT_IRQA = 0x06, + STAT_RDA = 0x07 +}; + +/* some outgoing values */ +enum infineon_tpm_values { + CHIP_ID1 = 0x20, + CHIP_ID2 = 0x21, + TPM_DAR = 0x30, + RESET_LP_IRQC_DISABLE = 0x41, + ENABLE_REGISTER_PAIR = 0x55, + IOLIMH = 0x60, + IOLIML = 0x61, + DISABLE_REGISTER_PAIR = 0xAA, + IDVENL = 0xF1, + IDVENH = 0xF2, + IDPDL = 0xF3, + IDPDH = 0xF4 +}; + +static int number_of_wtx; + +static int empty_fifo(struct tpm_chip *chip, int clear_wrfifo) +{ + int status; + int check = 0; + int i; + + if (clear_wrfifo) { + for (i = 0; i < 4096; i++) { + status = tpm_data_in(WRFIFO); + if (status == 0xff) { + if (check == 5) + break; + else + check++; + } + } + } + /* Note: The values which are currently in the FIFO of the TPM + are thrown away since there is no usage for them. Usually, + this has nothing to say, since the TPM will give its answer + immediately or will be aborted anyway, so the data here is + usually garbage and useless. + We have to clean this, because the next communication with + the TPM would be rubbish, if there is still some old data + in the Read FIFO. + */ + i = 0; + do { + status = tpm_data_in(RDFIFO); + status = tpm_data_in(STAT); + i++; + if (i == TPM_MAX_TRIES) + return -EIO; + } while ((status & (1 << STAT_RDA)) != 0); + return 0; +} + +static int wait(struct tpm_chip *chip, int wait_for_bit) +{ + int status; + int i; + for (i = 0; i < TPM_MAX_TRIES; i++) { + status = tpm_data_in(STAT); + /* check the status-register if wait_for_bit is set */ + if (status & 1 << wait_for_bit) + break; + msleep(TPM_MSLEEP_TIME); + } + if (i == TPM_MAX_TRIES) { /* timeout occurs */ + if (wait_for_bit == STAT_XFE) + dev_err(chip->pdev, "Timeout in wait(STAT_XFE)\n"); + if (wait_for_bit == STAT_RDA) + dev_err(chip->pdev, "Timeout in wait(STAT_RDA)\n"); + return -EIO; + } + return 0; +}; + +static void wait_and_send(struct tpm_chip *chip, u8 sendbyte) +{ + wait(chip, STAT_XFE); + tpm_data_out(sendbyte, WRFIFO); +} + + /* Note: WTX means Waiting-Time-Extension. Whenever the TPM needs more + calculation time, it sends a WTX-package, which has to be acknowledged + or aborted. This usually occurs if you are hammering the TPM with key + creation. Set the maximum number of WTX-packages in the definitions + above, if the number is reached, the waiting-time will be denied + and the TPM command has to be resend. + */ + +static void tpm_wtx(struct tpm_chip *chip) +{ + number_of_wtx++; + dev_info(chip->pdev, "Granting WTX (%02d / %02d)\n", + number_of_wtx, TPM_MAX_WTX_PACKAGES); + wait_and_send(chip, TPM_VL_VER); + wait_and_send(chip, TPM_CTRL_WTX); + wait_and_send(chip, 0x00); + wait_and_send(chip, 0x00); + msleep(TPM_WTX_MSLEEP_TIME); +} + +static void tpm_wtx_abort(struct tpm_chip *chip) +{ + dev_info(chip->pdev, "Aborting WTX\n"); + wait_and_send(chip, TPM_VL_VER); + wait_and_send(chip, TPM_CTRL_WTX_ABORT); + wait_and_send(chip, 0x00); + wait_and_send(chip, 0x00); + number_of_wtx = 0; + msleep(TPM_WTX_MSLEEP_TIME); +} + +static int tpm_inf_recv(struct tpm_chip *chip, u8 * buf, size_t count) +{ + int i; + int ret; + u32 size = 0; + number_of_wtx = 0; + +recv_begin: + /* start receiving header */ + for (i = 0; i < 4; i++) { + ret = wait(chip, STAT_RDA); + if (ret) + return -EIO; + buf[i] = tpm_data_in(RDFIFO); + } + + if (buf[0] != TPM_VL_VER) { + dev_err(chip->pdev, + "Wrong transport protocol implementation!\n"); + return -EIO; + } + + if (buf[1] == TPM_CTRL_DATA) { + /* size of the data received */ + size = ((buf[2] << 8) | buf[3]); + + for (i = 0; i < size; i++) { + wait(chip, STAT_RDA); + buf[i] = tpm_data_in(RDFIFO); + } + + if ((size == 0x6D00) && (buf[1] == 0x80)) { + dev_err(chip->pdev, "Error handling on vendor layer!\n"); + return -EIO; + } + + for (i = 0; i < size; i++) + buf[i] = buf[i + 6]; + + size = size - 6; + return size; + } + + if (buf[1] == TPM_CTRL_WTX) { + dev_info(chip->pdev, "WTX-package received\n"); + if (number_of_wtx < TPM_MAX_WTX_PACKAGES) { + tpm_wtx(chip); + goto recv_begin; + } else { + tpm_wtx_abort(chip); + goto recv_begin; + } + } + + if (buf[1] == TPM_CTRL_WTX_ABORT_ACK) { + dev_info(chip->pdev, "WTX-abort acknowledged\n"); + return size; + } + + if (buf[1] == TPM_CTRL_ERROR) { + dev_err(chip->pdev, "ERROR-package received:\n"); + if (buf[4] == TPM_INF_NAK) + dev_err(chip->pdev, + "-> Negative acknowledgement" + " - retransmit command!\n"); + return -EIO; + } + return -EIO; +} + +static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count) +{ + int i; + int ret; + u8 count_high, count_low, count_4, count_3, count_2, count_1; + + /* Disabling Reset, LP and IRQC */ + tpm_data_out(RESET_LP_IRQC_DISABLE, CMD); + + ret = empty_fifo(chip, 1); + if (ret) { + dev_err(chip->pdev, "Timeout while clearing FIFO\n"); + return -EIO; + } + + ret = wait(chip, STAT_XFE); + if (ret) + return -EIO; + + count_4 = (count & 0xff000000) >> 24; + count_3 = (count & 0x00ff0000) >> 16; + count_2 = (count & 0x0000ff00) >> 8; + count_1 = (count & 0x000000ff); + count_high = ((count + 6) & 0xffffff00) >> 8; + count_low = ((count + 6) & 0x000000ff); + + /* Sending Header */ + wait_and_send(chip, TPM_VL_VER); + wait_and_send(chip, TPM_CTRL_DATA); + wait_and_send(chip, count_high); + wait_and_send(chip, count_low); + + /* Sending Data Header */ + wait_and_send(chip, TPM_VL_VER); + wait_and_send(chip, TPM_VL_CHANNEL_TPM); + wait_and_send(chip, count_4); + wait_and_send(chip, count_3); + wait_and_send(chip, count_2); + wait_and_send(chip, count_1); + + /* Sending Data */ + for (i = 0; i < count; i++) { + wait_and_send(chip, buf[i]); + } + return count; +} + +static void tpm_inf_cancel(struct tpm_chip *chip) +{ + /* + Since we are using the legacy mode to communicate + with the TPM, we have no cancel functions, but have + a workaround for interrupting the TPM through WTX. + */ +} + +static u8 tpm_inf_status(struct tpm_chip *chip) +{ + return tpm_data_in(STAT); +} + +static const struct tpm_class_ops tpm_inf = { + .recv = tpm_inf_recv, + .send = tpm_inf_send, + .cancel = tpm_inf_cancel, + .status = tpm_inf_status, + .req_complete_mask = 0, + .req_complete_val = 0, +}; + +static const struct pnp_device_id tpm_inf_pnp_tbl[] = { + /* Infineon TPMs */ + {"IFX0101", 0}, + {"IFX0102", 0}, + {"", 0} +}; + +MODULE_DEVICE_TABLE(pnp, tpm_inf_pnp_tbl); + +static int tpm_inf_pnp_probe(struct pnp_dev *dev, + const struct pnp_device_id *dev_id) +{ + int rc = 0; + u8 iol, ioh; + int vendorid[2]; + int version[2]; + int productid[2]; + char chipname[20]; + struct tpm_chip *chip; + + /* read IO-ports through PnP */ + if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && + !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) { + + tpm_dev.iotype = TPM_INF_IO_PORT; + + tpm_dev.config_port = pnp_port_start(dev, 0); + tpm_dev.config_size = pnp_port_len(dev, 0); + tpm_dev.data_regs = pnp_port_start(dev, 1); + tpm_dev.data_size = pnp_port_len(dev, 1); + if ((tpm_dev.data_size < 4) || (tpm_dev.config_size < 2)) { + rc = -EINVAL; + goto err_last; + } + dev_info(&dev->dev, "Found %s with ID %s\n", + dev->name, dev_id->id); + if (!((tpm_dev.data_regs >> 8) & 0xff)) { + rc = -EINVAL; + goto err_last; + } + /* publish my base address and request region */ + if (request_region(tpm_dev.data_regs, tpm_dev.data_size, + "tpm_infineon0") == NULL) { + rc = -EINVAL; + goto err_last; + } + if (request_region(tpm_dev.config_port, tpm_dev.config_size, + "tpm_infineon0") == NULL) { + release_region(tpm_dev.data_regs, tpm_dev.data_size); + rc = -EINVAL; + goto err_last; + } + } else if (pnp_mem_valid(dev, 0) && + !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) { + + tpm_dev.iotype = TPM_INF_IO_MEM; + + tpm_dev.map_base = pnp_mem_start(dev, 0); + tpm_dev.map_size = pnp_mem_len(dev, 0); + + dev_info(&dev->dev, "Found %s with ID %s\n", + dev->name, dev_id->id); + + /* publish my base address and request region */ + if (request_mem_region(tpm_dev.map_base, tpm_dev.map_size, + "tpm_infineon0") == NULL) { + rc = -EINVAL; + goto err_last; + } + + tpm_dev.mem_base = ioremap(tpm_dev.map_base, tpm_dev.map_size); + if (tpm_dev.mem_base == NULL) { + release_mem_region(tpm_dev.map_base, tpm_dev.map_size); + rc = -EINVAL; + goto err_last; + } + + /* + * The only known MMIO based Infineon TPM system provides + * a single large mem region with the device config + * registers at the default TPM_ADDR. The data registers + * seem like they could be placed anywhere within the MMIO + * region, but lets just put them at zero offset. + */ + tpm_dev.index_off = TPM_ADDR; + tpm_dev.data_regs = 0x0; + } else { + rc = -EINVAL; + goto err_last; + } + + /* query chip for its vendor, its version number a.s.o. */ + tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR); + tpm_config_out(IDVENL, TPM_INF_ADDR); + vendorid[1] = tpm_config_in(TPM_INF_DATA); + tpm_config_out(IDVENH, TPM_INF_ADDR); + vendorid[0] = tpm_config_in(TPM_INF_DATA); + tpm_config_out(IDPDL, TPM_INF_ADDR); + productid[1] = tpm_config_in(TPM_INF_DATA); + tpm_config_out(IDPDH, TPM_INF_ADDR); + productid[0] = tpm_config_in(TPM_INF_DATA); + tpm_config_out(CHIP_ID1, TPM_INF_ADDR); + version[1] = tpm_config_in(TPM_INF_DATA); + tpm_config_out(CHIP_ID2, TPM_INF_ADDR); + version[0] = tpm_config_in(TPM_INF_DATA); + + switch ((productid[0] << 8) | productid[1]) { + case 6: + snprintf(chipname, sizeof(chipname), " (SLD 9630 TT 1.1)"); + break; + case 11: + snprintf(chipname, sizeof(chipname), " (SLB 9635 TT 1.2)"); + break; + default: + snprintf(chipname, sizeof(chipname), " (unknown chip)"); + break; + } + + if ((vendorid[0] << 8 | vendorid[1]) == (TPM_INFINEON_DEV_VEN_VALUE)) { + + /* configure TPM with IO-ports */ + tpm_config_out(IOLIMH, TPM_INF_ADDR); + tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA); + tpm_config_out(IOLIML, TPM_INF_ADDR); + tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA); + + /* control if IO-ports are set correctly */ + tpm_config_out(IOLIMH, TPM_INF_ADDR); + ioh = tpm_config_in(TPM_INF_DATA); + tpm_config_out(IOLIML, TPM_INF_ADDR); + iol = tpm_config_in(TPM_INF_DATA); + + if ((ioh << 8 | iol) != tpm_dev.data_regs) { + dev_err(&dev->dev, + "Could not set IO-data registers to 0x%x\n", + tpm_dev.data_regs); + rc = -EIO; + goto err_release_region; + } + + /* activate register */ + tpm_config_out(TPM_DAR, TPM_INF_ADDR); + tpm_config_out(0x01, TPM_INF_DATA); + tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR); + + /* disable RESET, LP and IRQC */ + tpm_data_out(RESET_LP_IRQC_DISABLE, CMD); + + /* Finally, we're done, print some infos */ + dev_info(&dev->dev, "TPM found: " + "config base 0x%lx, " + "data base 0x%lx, " + "chip version 0x%02x%02x, " + "vendor id 0x%x%x (Infineon), " + "product id 0x%02x%02x" + "%s\n", + tpm_dev.iotype == TPM_INF_IO_PORT ? + tpm_dev.config_port : + tpm_dev.map_base + tpm_dev.index_off, + tpm_dev.iotype == TPM_INF_IO_PORT ? + tpm_dev.data_regs : + tpm_dev.map_base + tpm_dev.data_regs, + version[0], version[1], + vendorid[0], vendorid[1], + productid[0], productid[1], chipname); + + chip = tpmm_chip_alloc(&dev->dev, &tpm_inf); + if (IS_ERR(chip)) { + rc = PTR_ERR(chip); + goto err_release_region; + } + + rc = tpm_chip_register(chip); + if (rc) + goto err_release_region; + + return 0; + } else { + rc = -ENODEV; + goto err_release_region; + } + +err_release_region: + if (tpm_dev.iotype == TPM_INF_IO_PORT) { + release_region(tpm_dev.data_regs, tpm_dev.data_size); + release_region(tpm_dev.config_port, tpm_dev.config_size); + } else { + iounmap(tpm_dev.mem_base); + release_mem_region(tpm_dev.map_base, tpm_dev.map_size); + } + +err_last: + return rc; +} + +static void tpm_inf_pnp_remove(struct pnp_dev *dev) +{ + struct tpm_chip *chip = pnp_get_drvdata(dev); + + tpm_chip_unregister(chip); + + if (tpm_dev.iotype == TPM_INF_IO_PORT) { + release_region(tpm_dev.data_regs, tpm_dev.data_size); + release_region(tpm_dev.config_port, + tpm_dev.config_size); + } else { + iounmap(tpm_dev.mem_base); + release_mem_region(tpm_dev.map_base, tpm_dev.map_size); + } +} + +#ifdef CONFIG_PM_SLEEP +static int tpm_inf_resume(struct device *dev) +{ + /* Re-configure TPM after suspending */ + tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR); + tpm_config_out(IOLIMH, TPM_INF_ADDR); + tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA); + tpm_config_out(IOLIML, TPM_INF_ADDR); + tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA); + /* activate register */ + tpm_config_out(TPM_DAR, TPM_INF_ADDR); + tpm_config_out(0x01, TPM_INF_DATA); + tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR); + /* disable RESET, LP and IRQC */ + tpm_data_out(RESET_LP_IRQC_DISABLE, CMD); + return tpm_pm_resume(dev); +} +#endif +static SIMPLE_DEV_PM_OPS(tpm_inf_pm, tpm_pm_suspend, tpm_inf_resume); + +static struct pnp_driver tpm_inf_pnp_driver = { + .name = "tpm_inf_pnp", + .id_table = tpm_inf_pnp_tbl, + .probe = tpm_inf_pnp_probe, + .remove = tpm_inf_pnp_remove, + .driver = { + .pm = &tpm_inf_pm, + } +}; + +module_pnp_driver(tpm_inf_pnp_driver); + +MODULE_AUTHOR("Marcel Selhorst "); +MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2"); +MODULE_VERSION("1.9.2"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/tpm/tpm_nsc.c b/kernel/drivers/char/tpm/tpm_nsc.c new file mode 100644 index 000000000..289389ece --- /dev/null +++ b/kernel/drivers/char/tpm/tpm_nsc.c @@ -0,0 +1,383 @@ +/* + * Copyright (C) 2004 IBM Corporation + * + * Authors: + * Leendert van Doorn + * Dave Safford + * Reiner Sailer + * Kylene Hall + * + * Maintained by: + * + * Device driver for TCG/TCPA TPM (trusted platform module). + * Specifications at www.trustedcomputinggroup.org + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + */ + +#include +#include +#include "tpm.h" + +/* National definitions */ +enum tpm_nsc_addr{ + TPM_NSC_IRQ = 0x07, + TPM_NSC_BASE0_HI = 0x60, + TPM_NSC_BASE0_LO = 0x61, + TPM_NSC_BASE1_HI = 0x62, + TPM_NSC_BASE1_LO = 0x63 +}; + +enum tpm_nsc_index { + NSC_LDN_INDEX = 0x07, + NSC_SID_INDEX = 0x20, + NSC_LDC_INDEX = 0x30, + NSC_DIO_INDEX = 0x60, + NSC_CIO_INDEX = 0x62, + NSC_IRQ_INDEX = 0x70, + NSC_ITS_INDEX = 0x71 +}; + +enum tpm_nsc_status_loc { + NSC_STATUS = 0x01, + NSC_COMMAND = 0x01, + NSC_DATA = 0x00 +}; + +/* status bits */ +enum tpm_nsc_status { + NSC_STATUS_OBF = 0x01, /* output buffer full */ + NSC_STATUS_IBF = 0x02, /* input buffer full */ + NSC_STATUS_F0 = 0x04, /* F0 */ + NSC_STATUS_A2 = 0x08, /* A2 */ + NSC_STATUS_RDY = 0x10, /* ready to receive command */ + NSC_STATUS_IBR = 0x20 /* ready to receive data */ +}; + +/* command bits */ +enum tpm_nsc_cmd_mode { + NSC_COMMAND_NORMAL = 0x01, /* normal mode */ + NSC_COMMAND_EOC = 0x03, + NSC_COMMAND_CANCEL = 0x22 +}; +/* + * Wait for a certain status to appear + */ +static int wait_for_stat(struct tpm_chip *chip, u8 mask, u8 val, u8 * data) +{ + unsigned long stop; + + /* status immediately available check */ + *data = inb(chip->vendor.base + NSC_STATUS); + if ((*data & mask) == val) + return 0; + + /* wait for status */ + stop = jiffies + 10 * HZ; + do { + msleep(TPM_TIMEOUT); + *data = inb(chip->vendor.base + 1); + if ((*data & mask) == val) + return 0; + } + while (time_before(jiffies, stop)); + + return -EBUSY; +} + +static int nsc_wait_for_ready(struct tpm_chip *chip) +{ + int status; + unsigned long stop; + + /* status immediately available check */ + status = inb(chip->vendor.base + NSC_STATUS); + if (status & NSC_STATUS_OBF) + status = inb(chip->vendor.base + NSC_DATA); + if (status & NSC_STATUS_RDY) + return 0; + + /* wait for status */ + stop = jiffies + 100; + do { + msleep(TPM_TIMEOUT); + status = inb(chip->vendor.base + NSC_STATUS); + if (status & NSC_STATUS_OBF) + status = inb(chip->vendor.base + NSC_DATA); + if (status & NSC_STATUS_RDY) + return 0; + } + while (time_before(jiffies, stop)); + + dev_info(chip->pdev, "wait for ready failed\n"); + return -EBUSY; +} + + +static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count) +{ + u8 *buffer = buf; + u8 data, *p; + u32 size; + __be32 *native_size; + + if (count < 6) + return -EIO; + + if (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0) { + dev_err(chip->pdev, "F0 timeout\n"); + return -EIO; + } + if ((data = + inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_NORMAL) { + dev_err(chip->pdev, "not in normal mode (0x%x)\n", + data); + return -EIO; + } + + /* read the whole packet */ + for (p = buffer; p < &buffer[count]; p++) { + if (wait_for_stat + (chip, NSC_STATUS_OBF, NSC_STATUS_OBF, &data) < 0) { + dev_err(chip->pdev, + "OBF timeout (while reading data)\n"); + return -EIO; + } + if (data & NSC_STATUS_F0) + break; + *p = inb(chip->vendor.base + NSC_DATA); + } + + if ((data & NSC_STATUS_F0) == 0 && + (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0)) { + dev_err(chip->pdev, "F0 not set\n"); + return -EIO; + } + if ((data = inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_EOC) { + dev_err(chip->pdev, + "expected end of command(0x%x)\n", data); + return -EIO; + } + + native_size = (__force __be32 *) (buf + 2); + size = be32_to_cpu(*native_size); + + if (count < size) + return -EIO; + + return size; +} + +static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count) +{ + u8 data; + int i; + + /* + * If we hit the chip with back to back commands it locks up + * and never set IBF. Hitting it with this "hammer" seems to + * fix it. Not sure why this is needed, we followed the flow + * chart in the manual to the letter. + */ + outb(NSC_COMMAND_CANCEL, chip->vendor.base + NSC_COMMAND); + + if (nsc_wait_for_ready(chip) != 0) + return -EIO; + + if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { + dev_err(chip->pdev, "IBF timeout\n"); + return -EIO; + } + + outb(NSC_COMMAND_NORMAL, chip->vendor.base + NSC_COMMAND); + if (wait_for_stat(chip, NSC_STATUS_IBR, NSC_STATUS_IBR, &data) < 0) { + dev_err(chip->pdev, "IBR timeout\n"); + return -EIO; + } + + for (i = 0; i < count; i++) { + if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { + dev_err(chip->pdev, + "IBF timeout (while writing data)\n"); + return -EIO; + } + outb(buf[i], chip->vendor.base + NSC_DATA); + } + + if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { + dev_err(chip->pdev, "IBF timeout\n"); + return -EIO; + } + outb(NSC_COMMAND_EOC, chip->vendor.base + NSC_COMMAND); + + return count; +} + +static void tpm_nsc_cancel(struct tpm_chip *chip) +{ + outb(NSC_COMMAND_CANCEL, chip->vendor.base + NSC_COMMAND); +} + +static u8 tpm_nsc_status(struct tpm_chip *chip) +{ + return inb(chip->vendor.base + NSC_STATUS); +} + +static bool tpm_nsc_req_canceled(struct tpm_chip *chip, u8 status) +{ + return (status == NSC_STATUS_RDY); +} + +static const struct tpm_class_ops tpm_nsc = { + .recv = tpm_nsc_recv, + .send = tpm_nsc_send, + .cancel = tpm_nsc_cancel, + .status = tpm_nsc_status, + .req_complete_mask = NSC_STATUS_OBF, + .req_complete_val = NSC_STATUS_OBF, + .req_canceled = tpm_nsc_req_canceled, +}; + +static struct platform_device *pdev = NULL; + +static void tpm_nsc_remove(struct device *dev) +{ + struct tpm_chip *chip = dev_get_drvdata(dev); + + tpm_chip_unregister(chip); + release_region(chip->vendor.base, 2); +} + +static SIMPLE_DEV_PM_OPS(tpm_nsc_pm, tpm_pm_suspend, tpm_pm_resume); + +static struct platform_driver nsc_drv = { + .driver = { + .name = "tpm_nsc", + .pm = &tpm_nsc_pm, + }, +}; + +static int __init init_nsc(void) +{ + int rc = 0; + int lo, hi, err; + int nscAddrBase = TPM_ADDR; + struct tpm_chip *chip; + unsigned long base; + + /* verify that it is a National part (SID) */ + if (tpm_read_index(TPM_ADDR, NSC_SID_INDEX) != 0xEF) { + nscAddrBase = (tpm_read_index(TPM_SUPERIO_ADDR, 0x2C)<<8)| + (tpm_read_index(TPM_SUPERIO_ADDR, 0x2B)&0xFE); + if (tpm_read_index(nscAddrBase, NSC_SID_INDEX) != 0xF6) + return -ENODEV; + } + + err = platform_driver_register(&nsc_drv); + if (err) + return err; + + hi = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_HI); + lo = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_LO); + base = (hi<<8) | lo; + + /* enable the DPM module */ + tpm_write_index(nscAddrBase, NSC_LDC_INDEX, 0x01); + + pdev = platform_device_alloc("tpm_nscl0", -1); + if (!pdev) { + rc = -ENOMEM; + goto err_unreg_drv; + } + + pdev->num_resources = 0; + pdev->dev.driver = &nsc_drv.driver; + pdev->dev.release = tpm_nsc_remove; + + if ((rc = platform_device_add(pdev)) < 0) + goto err_put_dev; + + if (request_region(base, 2, "tpm_nsc0") == NULL ) { + rc = -EBUSY; + goto err_del_dev; + } + + chip = tpmm_chip_alloc(&pdev->dev, &tpm_nsc); + if (IS_ERR(chip)) { + rc = -ENODEV; + goto err_rel_reg; + } + + rc = tpm_chip_register(chip); + if (rc) + goto err_rel_reg; + + dev_dbg(&pdev->dev, "NSC TPM detected\n"); + dev_dbg(&pdev->dev, + "NSC LDN 0x%x, SID 0x%x, SRID 0x%x\n", + tpm_read_index(nscAddrBase,0x07), tpm_read_index(nscAddrBase,0x20), + tpm_read_index(nscAddrBase,0x27)); + dev_dbg(&pdev->dev, + "NSC SIOCF1 0x%x SIOCF5 0x%x SIOCF6 0x%x SIOCF8 0x%x\n", + tpm_read_index(nscAddrBase,0x21), tpm_read_index(nscAddrBase,0x25), + tpm_read_index(nscAddrBase,0x26), tpm_read_index(nscAddrBase,0x28)); + dev_dbg(&pdev->dev, "NSC IO Base0 0x%x\n", + (tpm_read_index(nscAddrBase,0x60) << 8) | tpm_read_index(nscAddrBase,0x61)); + dev_dbg(&pdev->dev, "NSC IO Base1 0x%x\n", + (tpm_read_index(nscAddrBase,0x62) << 8) | tpm_read_index(nscAddrBase,0x63)); + dev_dbg(&pdev->dev, "NSC Interrupt number and wakeup 0x%x\n", + tpm_read_index(nscAddrBase,0x70)); + dev_dbg(&pdev->dev, "NSC IRQ type select 0x%x\n", + tpm_read_index(nscAddrBase,0x71)); + dev_dbg(&pdev->dev, + "NSC DMA channel select0 0x%x, select1 0x%x\n", + tpm_read_index(nscAddrBase,0x74), tpm_read_index(nscAddrBase,0x75)); + dev_dbg(&pdev->dev, + "NSC Config " + "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", + tpm_read_index(nscAddrBase,0xF0), tpm_read_index(nscAddrBase,0xF1), + tpm_read_index(nscAddrBase,0xF2), tpm_read_index(nscAddrBase,0xF3), + tpm_read_index(nscAddrBase,0xF4), tpm_read_index(nscAddrBase,0xF5), + tpm_read_index(nscAddrBase,0xF6), tpm_read_index(nscAddrBase,0xF7), + tpm_read_index(nscAddrBase,0xF8), tpm_read_index(nscAddrBase,0xF9)); + + dev_info(&pdev->dev, + "NSC TPM revision %d\n", + tpm_read_index(nscAddrBase, 0x27) & 0x1F); + + chip->vendor.base = base; + + return 0; + +err_rel_reg: + release_region(base, 2); +err_del_dev: + platform_device_del(pdev); +err_put_dev: + platform_device_put(pdev); +err_unreg_drv: + platform_driver_unregister(&nsc_drv); + return rc; +} + +static void __exit cleanup_nsc(void) +{ + if (pdev) { + tpm_nsc_remove(&pdev->dev); + platform_device_unregister(pdev); + } + + platform_driver_unregister(&nsc_drv); +} + +module_init(init_nsc); +module_exit(cleanup_nsc); + +MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)"); +MODULE_DESCRIPTION("TPM Driver"); +MODULE_VERSION("2.0"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/tpm/tpm_of.c b/kernel/drivers/char/tpm/tpm_of.c new file mode 100644 index 000000000..c002d1bd9 --- /dev/null +++ b/kernel/drivers/char/tpm/tpm_of.c @@ -0,0 +1,73 @@ +/* + * Copyright 2012 IBM Corporation + * + * Author: Ashley Lai + * + * Maintained by: + * + * Read the event log created by the firmware on PPC64 + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include +#include + +#include "tpm.h" +#include "tpm_eventlog.h" + +int read_log(struct tpm_bios_log *log) +{ + struct device_node *np; + const u32 *sizep; + const __be64 *basep; + + if (log->bios_event_log != NULL) { + pr_err("%s: ERROR - Eventlog already initialized\n", __func__); + return -EFAULT; + } + + np = of_find_node_by_name(NULL, "ibm,vtpm"); + if (!np) { + pr_err("%s: ERROR - IBMVTPM not supported\n", __func__); + return -ENODEV; + } + + sizep = of_get_property(np, "linux,sml-size", NULL); + if (sizep == NULL) { + pr_err("%s: ERROR - SML size not found\n", __func__); + goto cleanup_eio; + } + if (*sizep == 0) { + pr_err("%s: ERROR - event log area empty\n", __func__); + goto cleanup_eio; + } + + basep = of_get_property(np, "linux,sml-base", NULL); + if (basep == NULL) { + pr_err(KERN_ERR "%s: ERROR - SML not found\n", __func__); + goto cleanup_eio; + } + + of_node_put(np); + log->bios_event_log = kmalloc(*sizep, GFP_KERNEL); + if (!log->bios_event_log) { + pr_err("%s: ERROR - Not enough memory for BIOS measurements\n", + __func__); + return -ENOMEM; + } + + log->bios_event_log_end = log->bios_event_log + *sizep; + + memcpy(log->bios_event_log, __va(be64_to_cpup(basep)), *sizep); + + return 0; + +cleanup_eio: + of_node_put(np); + return -EIO; +} diff --git a/kernel/drivers/char/tpm/tpm_ppi.c b/kernel/drivers/char/tpm/tpm_ppi.c new file mode 100644 index 000000000..6ca9b5d78 --- /dev/null +++ b/kernel/drivers/char/tpm/tpm_ppi.c @@ -0,0 +1,371 @@ +/* + * Copyright (C) 2012-2014 Intel Corporation + * + * Authors: + * Xiaoyan Zhang + * Jiang Liu + * Jarkko Sakkinen + * + * Maintained by: + * + * This file contains implementation of the sysfs interface for PPI. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + + +#include +#include "tpm.h" + +#define TPM_PPI_REVISION_ID 1 +#define TPM_PPI_FN_VERSION 1 +#define TPM_PPI_FN_SUBREQ 2 +#define TPM_PPI_FN_GETREQ 3 +#define TPM_PPI_FN_GETACT 4 +#define TPM_PPI_FN_GETRSP 5 +#define TPM_PPI_FN_SUBREQ2 7 +#define TPM_PPI_FN_GETOPR 8 +#define PPI_TPM_REQ_MAX 22 +#define PPI_VS_REQ_START 128 +#define PPI_VS_REQ_END 255 + +static const u8 tpm_ppi_uuid[] = { + 0xA6, 0xFA, 0xDD, 0x3D, + 0x1B, 0x36, + 0xB4, 0x4E, + 0xA4, 0x24, + 0x8D, 0x10, 0x08, 0x9D, 0x16, 0x53 +}; + +static inline union acpi_object * +tpm_eval_dsm(acpi_handle ppi_handle, int func, acpi_object_type type, + union acpi_object *argv4) +{ + BUG_ON(!ppi_handle); + return acpi_evaluate_dsm_typed(ppi_handle, tpm_ppi_uuid, + TPM_PPI_REVISION_ID, + func, argv4, type); +} + +static ssize_t tpm_show_ppi_version(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tpm_chip *chip = dev_get_drvdata(dev); + + return scnprintf(buf, PAGE_SIZE, "%s\n", chip->ppi_version); +} + +static ssize_t tpm_show_ppi_request(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t size = -EINVAL; + union acpi_object *obj; + struct tpm_chip *chip = dev_get_drvdata(dev); + + obj = tpm_eval_dsm(chip->acpi_dev_handle, TPM_PPI_FN_GETREQ, + ACPI_TYPE_PACKAGE, NULL); + if (!obj) + return -ENXIO; + + /* + * output.pointer should be of package type, including two integers. + * The first is function return code, 0 means success and 1 means + * error. The second is pending TPM operation requested by the OS, 0 + * means none and >0 means operation value. + */ + if (obj->package.count == 2 && + obj->package.elements[0].type == ACPI_TYPE_INTEGER && + obj->package.elements[1].type == ACPI_TYPE_INTEGER) { + if (obj->package.elements[0].integer.value) + size = -EFAULT; + else + size = scnprintf(buf, PAGE_SIZE, "%llu\n", + obj->package.elements[1].integer.value); + } + + ACPI_FREE(obj); + + return size; +} + +static ssize_t tpm_store_ppi_request(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + u32 req; + u64 ret; + int func = TPM_PPI_FN_SUBREQ; + union acpi_object *obj, tmp; + union acpi_object argv4 = ACPI_INIT_DSM_ARGV4(1, &tmp); + struct tpm_chip *chip = dev_get_drvdata(dev); + + /* + * the function to submit TPM operation request to pre-os environment + * is updated with function index from SUBREQ to SUBREQ2 since PPI + * version 1.1 + */ + if (acpi_check_dsm(chip->acpi_dev_handle, tpm_ppi_uuid, + TPM_PPI_REVISION_ID, 1 << TPM_PPI_FN_SUBREQ2)) + func = TPM_PPI_FN_SUBREQ2; + + /* + * PPI spec defines params[3].type as ACPI_TYPE_PACKAGE. Some BIOS + * accept buffer/string/integer type, but some BIOS accept buffer/ + * string/package type. For PPI version 1.0 and 1.1, use buffer type + * for compatibility, and use package type since 1.2 according to spec. + */ + if (strcmp(chip->ppi_version, "1.2") < 0) { + if (sscanf(buf, "%d", &req) != 1) + return -EINVAL; + argv4.type = ACPI_TYPE_BUFFER; + argv4.buffer.length = sizeof(req); + argv4.buffer.pointer = (u8 *)&req; + } else { + tmp.type = ACPI_TYPE_INTEGER; + if (sscanf(buf, "%llu", &tmp.integer.value) != 1) + return -EINVAL; + } + + obj = tpm_eval_dsm(chip->acpi_dev_handle, func, ACPI_TYPE_INTEGER, + &argv4); + if (!obj) { + return -ENXIO; + } else { + ret = obj->integer.value; + ACPI_FREE(obj); + } + + if (ret == 0) + return (acpi_status)count; + + return (ret == 1) ? -EPERM : -EFAULT; +} + +static ssize_t tpm_show_ppi_transition_action(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u32 ret; + acpi_status status; + union acpi_object *obj = NULL; + union acpi_object tmp = { + .buffer.type = ACPI_TYPE_BUFFER, + .buffer.length = 0, + .buffer.pointer = NULL + }; + struct tpm_chip *chip = dev_get_drvdata(dev); + + static char *info[] = { + "None", + "Shutdown", + "Reboot", + "OS Vendor-specific", + "Error", + }; + + /* + * PPI spec defines params[3].type as empty package, but some platforms + * (e.g. Capella with PPI 1.0) need integer/string/buffer type, so for + * compatibility, define params[3].type as buffer, if PPI version < 1.2 + */ + if (strcmp(chip->ppi_version, "1.2") < 0) + obj = &tmp; + obj = tpm_eval_dsm(chip->acpi_dev_handle, TPM_PPI_FN_GETACT, + ACPI_TYPE_INTEGER, obj); + if (!obj) { + return -ENXIO; + } else { + ret = obj->integer.value; + ACPI_FREE(obj); + } + + if (ret < ARRAY_SIZE(info) - 1) + status = scnprintf(buf, PAGE_SIZE, "%d: %s\n", ret, info[ret]); + else + status = scnprintf(buf, PAGE_SIZE, "%d: %s\n", ret, + info[ARRAY_SIZE(info)-1]); + return status; +} + +static ssize_t tpm_show_ppi_response(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + acpi_status status = -EINVAL; + union acpi_object *obj, *ret_obj; + u64 req, res; + struct tpm_chip *chip = dev_get_drvdata(dev); + + obj = tpm_eval_dsm(chip->acpi_dev_handle, TPM_PPI_FN_GETRSP, + ACPI_TYPE_PACKAGE, NULL); + if (!obj) + return -ENXIO; + + /* + * parameter output.pointer should be of package type, including + * 3 integers. The first means function return code, the second means + * most recent TPM operation request, and the last means response to + * the most recent TPM operation request. Only if the first is 0, and + * the second integer is not 0, the response makes sense. + */ + ret_obj = obj->package.elements; + if (obj->package.count < 3 || + ret_obj[0].type != ACPI_TYPE_INTEGER || + ret_obj[1].type != ACPI_TYPE_INTEGER || + ret_obj[2].type != ACPI_TYPE_INTEGER) + goto cleanup; + + if (ret_obj[0].integer.value) { + status = -EFAULT; + goto cleanup; + } + + req = ret_obj[1].integer.value; + res = ret_obj[2].integer.value; + if (req) { + if (res == 0) + status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req, + "0: Success"); + else if (res == 0xFFFFFFF0) + status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req, + "0xFFFFFFF0: User Abort"); + else if (res == 0xFFFFFFF1) + status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req, + "0xFFFFFFF1: BIOS Failure"); + else if (res >= 1 && res <= 0x00000FFF) + status = scnprintf(buf, PAGE_SIZE, "%llu %llu: %s\n", + req, res, "Corresponding TPM error"); + else + status = scnprintf(buf, PAGE_SIZE, "%llu %llu: %s\n", + req, res, "Error"); + } else { + status = scnprintf(buf, PAGE_SIZE, "%llu: %s\n", + req, "No Recent Request"); + } + +cleanup: + ACPI_FREE(obj); + return status; +} + +static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start, + u32 end) +{ + int i; + u32 ret; + char *str = buf; + union acpi_object *obj, tmp; + union acpi_object argv = ACPI_INIT_DSM_ARGV4(1, &tmp); + + static char *info[] = { + "Not implemented", + "BIOS only", + "Blocked for OS by BIOS", + "User required", + "User not required", + }; + + if (!acpi_check_dsm(dev_handle, tpm_ppi_uuid, TPM_PPI_REVISION_ID, + 1 << TPM_PPI_FN_GETOPR)) + return -EPERM; + + tmp.integer.type = ACPI_TYPE_INTEGER; + for (i = start; i <= end; i++) { + tmp.integer.value = i; + obj = tpm_eval_dsm(dev_handle, TPM_PPI_FN_GETOPR, + ACPI_TYPE_INTEGER, &argv); + if (!obj) { + return -ENOMEM; + } else { + ret = obj->integer.value; + ACPI_FREE(obj); + } + + if (ret > 0 && ret < ARRAY_SIZE(info)) + str += scnprintf(str, PAGE_SIZE, "%d %d: %s\n", + i, ret, info[ret]); + } + + return str - buf; +} + +static ssize_t tpm_show_ppi_tcg_operations(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct tpm_chip *chip = dev_get_drvdata(dev); + + return show_ppi_operations(chip->acpi_dev_handle, buf, 0, + PPI_TPM_REQ_MAX); +} + +static ssize_t tpm_show_ppi_vs_operations(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct tpm_chip *chip = dev_get_drvdata(dev); + + return show_ppi_operations(chip->acpi_dev_handle, buf, PPI_VS_REQ_START, + PPI_VS_REQ_END); +} + +static DEVICE_ATTR(version, S_IRUGO, tpm_show_ppi_version, NULL); +static DEVICE_ATTR(request, S_IRUGO | S_IWUSR | S_IWGRP, + tpm_show_ppi_request, tpm_store_ppi_request); +static DEVICE_ATTR(transition_action, S_IRUGO, + tpm_show_ppi_transition_action, NULL); +static DEVICE_ATTR(response, S_IRUGO, tpm_show_ppi_response, NULL); +static DEVICE_ATTR(tcg_operations, S_IRUGO, tpm_show_ppi_tcg_operations, NULL); +static DEVICE_ATTR(vs_operations, S_IRUGO, tpm_show_ppi_vs_operations, NULL); + +static struct attribute *ppi_attrs[] = { + &dev_attr_version.attr, + &dev_attr_request.attr, + &dev_attr_transition_action.attr, + &dev_attr_response.attr, + &dev_attr_tcg_operations.attr, + &dev_attr_vs_operations.attr, NULL, +}; +static struct attribute_group ppi_attr_grp = { + .name = "ppi", + .attrs = ppi_attrs +}; + +int tpm_add_ppi(struct tpm_chip *chip) +{ + union acpi_object *obj; + int rc; + + if (!chip->acpi_dev_handle) + return 0; + + if (!acpi_check_dsm(chip->acpi_dev_handle, tpm_ppi_uuid, + TPM_PPI_REVISION_ID, 1 << TPM_PPI_FN_VERSION)) + return 0; + + /* Cache PPI version string. */ + obj = acpi_evaluate_dsm_typed(chip->acpi_dev_handle, tpm_ppi_uuid, + TPM_PPI_REVISION_ID, TPM_PPI_FN_VERSION, + NULL, ACPI_TYPE_STRING); + if (obj) { + strlcpy(chip->ppi_version, obj->string.pointer, + sizeof(chip->ppi_version)); + ACPI_FREE(obj); + } + + rc = sysfs_create_group(&chip->pdev->kobj, &ppi_attr_grp); + + if (!rc) + chip->flags |= TPM_CHIP_FLAG_PPI; + + return rc; +} + +void tpm_remove_ppi(struct tpm_chip *chip) +{ + if (chip->flags & TPM_CHIP_FLAG_PPI) + sysfs_remove_group(&chip->pdev->kobj, &ppi_attr_grp); +} diff --git a/kernel/drivers/char/tpm/tpm_tis.c b/kernel/drivers/char/tpm/tpm_tis.c new file mode 100644 index 000000000..f2dffa770 --- /dev/null +++ b/kernel/drivers/char/tpm/tpm_tis.c @@ -0,0 +1,1013 @@ +/* + * Copyright (C) 2005, 2006 IBM Corporation + * Copyright (C) 2014 Intel Corporation + * + * Authors: + * Leendert van Doorn + * Kylene Hall + * + * Maintained by: + * + * Device driver for TCG/TCPA TPM (trusted platform module). + * Specifications at www.trustedcomputinggroup.org + * + * This device driver implements the TPM interface as defined in + * the TCG TPM Interface Spec version 1.2, revision 1.0. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "tpm.h" + +enum tis_access { + TPM_ACCESS_VALID = 0x80, + TPM_ACCESS_ACTIVE_LOCALITY = 0x20, + TPM_ACCESS_REQUEST_PENDING = 0x04, + TPM_ACCESS_REQUEST_USE = 0x02, +}; + +enum tis_status { + TPM_STS_VALID = 0x80, + TPM_STS_COMMAND_READY = 0x40, + TPM_STS_GO = 0x20, + TPM_STS_DATA_AVAIL = 0x10, + TPM_STS_DATA_EXPECT = 0x08, +}; + +enum tis_int_flags { + TPM_GLOBAL_INT_ENABLE = 0x80000000, + TPM_INTF_BURST_COUNT_STATIC = 0x100, + TPM_INTF_CMD_READY_INT = 0x080, + TPM_INTF_INT_EDGE_FALLING = 0x040, + TPM_INTF_INT_EDGE_RISING = 0x020, + TPM_INTF_INT_LEVEL_LOW = 0x010, + TPM_INTF_INT_LEVEL_HIGH = 0x008, + TPM_INTF_LOCALITY_CHANGE_INT = 0x004, + TPM_INTF_STS_VALID_INT = 0x002, + TPM_INTF_DATA_AVAIL_INT = 0x001, +}; + +enum tis_defaults { + TIS_MEM_BASE = 0xFED40000, + TIS_MEM_LEN = 0x5000, + TIS_SHORT_TIMEOUT = 750, /* ms */ + TIS_LONG_TIMEOUT = 2000, /* 2 sec */ +}; + + +/* Some timeout values are needed before it is known whether the chip is + * TPM 1.0 or TPM 2.0. + */ +#define TIS_TIMEOUT_A_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_A) +#define TIS_TIMEOUT_B_MAX max(TIS_LONG_TIMEOUT, TPM2_TIMEOUT_B) +#define TIS_TIMEOUT_C_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_C) +#define TIS_TIMEOUT_D_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_D) + +#define TPM_ACCESS(l) (0x0000 | ((l) << 12)) +#define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12)) +#define TPM_INT_VECTOR(l) (0x000C | ((l) << 12)) +#define TPM_INT_STATUS(l) (0x0010 | ((l) << 12)) +#define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12)) +#define TPM_STS(l) (0x0018 | ((l) << 12)) +#define TPM_STS3(l) (0x001b | ((l) << 12)) +#define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12)) + +#define TPM_DID_VID(l) (0x0F00 | ((l) << 12)) +#define TPM_RID(l) (0x0F04 | ((l) << 12)) + +struct priv_data { + bool irq_tested; +}; + +#if defined(CONFIG_PNP) && defined(CONFIG_ACPI) +static int is_itpm(struct pnp_dev *dev) +{ + struct acpi_device *acpi = pnp_acpi_device(dev); + struct acpi_hardware_id *id; + + if (!acpi) + return 0; + + list_for_each_entry(id, &acpi->pnp.ids, list) { + if (!strcmp("INTC0102", id->id)) + return 1; + } + + return 0; +} +#else +static inline int is_itpm(struct pnp_dev *dev) +{ + return 0; +} +#endif + +/* Before we attempt to access the TPM we must see that the valid bit is set. + * The specification says that this bit is 0 at reset and remains 0 until the + * 'TPM has gone through its self test and initialization and has established + * correct values in the other bits.' */ +static int wait_startup(struct tpm_chip *chip, int l) +{ + unsigned long stop = jiffies + chip->vendor.timeout_a; + do { + if (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) & + TPM_ACCESS_VALID) + return 0; + msleep(TPM_TIMEOUT); + } while (time_before(jiffies, stop)); + return -1; +} + +static int check_locality(struct tpm_chip *chip, int l) +{ + if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) & + (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) == + (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) + return chip->vendor.locality = l; + + return -1; +} + +static void release_locality(struct tpm_chip *chip, int l, int force) +{ + if (force || (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) & + (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) == + (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) + iowrite8(TPM_ACCESS_ACTIVE_LOCALITY, + chip->vendor.iobase + TPM_ACCESS(l)); +} + +static int request_locality(struct tpm_chip *chip, int l) +{ + unsigned long stop, timeout; + long rc; + + if (check_locality(chip, l) >= 0) + return l; + + iowrite8(TPM_ACCESS_REQUEST_USE, + chip->vendor.iobase + TPM_ACCESS(l)); + + stop = jiffies + chip->vendor.timeout_a; + + if (chip->vendor.irq) { +again: + timeout = stop - jiffies; + if ((long)timeout <= 0) + return -1; + rc = wait_event_interruptible_timeout(chip->vendor.int_queue, + (check_locality + (chip, l) >= 0), + timeout); + if (rc > 0) + return l; + if (rc == -ERESTARTSYS && freezing(current)) { + clear_thread_flag(TIF_SIGPENDING); + goto again; + } + } else { + /* wait for burstcount */ + do { + if (check_locality(chip, l) >= 0) + return l; + msleep(TPM_TIMEOUT); + } + while (time_before(jiffies, stop)); + } + return -1; +} + +static u8 tpm_tis_status(struct tpm_chip *chip) +{ + return ioread8(chip->vendor.iobase + + TPM_STS(chip->vendor.locality)); +} + +static void tpm_tis_ready(struct tpm_chip *chip) +{ + /* this causes the current command to be aborted */ + iowrite8(TPM_STS_COMMAND_READY, + chip->vendor.iobase + TPM_STS(chip->vendor.locality)); +} + +static int get_burstcount(struct tpm_chip *chip) +{ + unsigned long stop; + int burstcnt; + + /* wait for burstcount */ + /* which timeout value, spec has 2 answers (c & d) */ + stop = jiffies + chip->vendor.timeout_d; + do { + burstcnt = ioread8(chip->vendor.iobase + + TPM_STS(chip->vendor.locality) + 1); + burstcnt += ioread8(chip->vendor.iobase + + TPM_STS(chip->vendor.locality) + + 2) << 8; + if (burstcnt) + return burstcnt; + msleep(TPM_TIMEOUT); + } while (time_before(jiffies, stop)); + return -EBUSY; +} + +static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) +{ + int size = 0, burstcnt; + while (size < count && + wait_for_tpm_stat(chip, + TPM_STS_DATA_AVAIL | TPM_STS_VALID, + chip->vendor.timeout_c, + &chip->vendor.read_queue, true) + == 0) { + burstcnt = get_burstcount(chip); + for (; burstcnt > 0 && size < count; burstcnt--) + buf[size++] = ioread8(chip->vendor.iobase + + TPM_DATA_FIFO(chip->vendor. + locality)); + } + return size; +} + +static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + int size = 0; + int expected, status; + + if (count < TPM_HEADER_SIZE) { + size = -EIO; + goto out; + } + + /* read first 10 bytes, including tag, paramsize, and result */ + if ((size = + recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) { + dev_err(chip->pdev, "Unable to read header\n"); + goto out; + } + + expected = be32_to_cpu(*(__be32 *) (buf + 2)); + if (expected > count) { + size = -EIO; + goto out; + } + + if ((size += + recv_data(chip, &buf[TPM_HEADER_SIZE], + expected - TPM_HEADER_SIZE)) < expected) { + dev_err(chip->pdev, "Unable to read remainder of result\n"); + size = -ETIME; + goto out; + } + + wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, + &chip->vendor.int_queue, false); + status = tpm_tis_status(chip); + if (status & TPM_STS_DATA_AVAIL) { /* retry? */ + dev_err(chip->pdev, "Error left over data\n"); + size = -EIO; + goto out; + } + +out: + tpm_tis_ready(chip); + release_locality(chip, chip->vendor.locality, 0); + return size; +} + +static bool itpm; +module_param(itpm, bool, 0444); +MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)"); + +/* + * If interrupts are used (signaled by an irq set in the vendor structure) + * tpm.c can skip polling for the data to be available as the interrupt is + * waited for here + */ +static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len) +{ + int rc, status, burstcnt; + size_t count = 0; + + if (request_locality(chip, 0) < 0) + return -EBUSY; + + status = tpm_tis_status(chip); + if ((status & TPM_STS_COMMAND_READY) == 0) { + tpm_tis_ready(chip); + if (wait_for_tpm_stat + (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b, + &chip->vendor.int_queue, false) < 0) { + rc = -ETIME; + goto out_err; + } + } + + while (count < len - 1) { + burstcnt = get_burstcount(chip); + for (; burstcnt > 0 && count < len - 1; burstcnt--) { + iowrite8(buf[count], chip->vendor.iobase + + TPM_DATA_FIFO(chip->vendor.locality)); + count++; + } + + wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, + &chip->vendor.int_queue, false); + status = tpm_tis_status(chip); + if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) { + rc = -EIO; + goto out_err; + } + } + + /* write last byte */ + iowrite8(buf[count], + chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality)); + wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, + &chip->vendor.int_queue, false); + status = tpm_tis_status(chip); + if ((status & TPM_STS_DATA_EXPECT) != 0) { + rc = -EIO; + goto out_err; + } + + return 0; + +out_err: + tpm_tis_ready(chip); + release_locality(chip, chip->vendor.locality, 0); + return rc; +} + +static void disable_interrupts(struct tpm_chip *chip) +{ + u32 intmask; + + intmask = + ioread32(chip->vendor.iobase + + TPM_INT_ENABLE(chip->vendor.locality)); + intmask &= ~TPM_GLOBAL_INT_ENABLE; + iowrite32(intmask, + chip->vendor.iobase + + TPM_INT_ENABLE(chip->vendor.locality)); + free_irq(chip->vendor.irq, chip); + chip->vendor.irq = 0; +} + +/* + * If interrupts are used (signaled by an irq set in the vendor structure) + * tpm.c can skip polling for the data to be available as the interrupt is + * waited for here + */ +static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len) +{ + int rc; + u32 ordinal; + unsigned long dur; + + rc = tpm_tis_send_data(chip, buf, len); + if (rc < 0) + return rc; + + /* go and do it */ + iowrite8(TPM_STS_GO, + chip->vendor.iobase + TPM_STS(chip->vendor.locality)); + + if (chip->vendor.irq) { + ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); + + if (chip->flags & TPM_CHIP_FLAG_TPM2) + dur = tpm2_calc_ordinal_duration(chip, ordinal); + else + dur = tpm_calc_ordinal_duration(chip, ordinal); + + if (wait_for_tpm_stat + (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, dur, + &chip->vendor.read_queue, false) < 0) { + rc = -ETIME; + goto out_err; + } + } + return len; +out_err: + tpm_tis_ready(chip); + release_locality(chip, chip->vendor.locality, 0); + return rc; +} + +static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) +{ + int rc, irq; + struct priv_data *priv = chip->vendor.priv; + + if (!chip->vendor.irq || priv->irq_tested) + return tpm_tis_send_main(chip, buf, len); + + /* Verify receipt of the expected IRQ */ + irq = chip->vendor.irq; + chip->vendor.irq = 0; + rc = tpm_tis_send_main(chip, buf, len); + chip->vendor.irq = irq; + if (!priv->irq_tested) + msleep(1); + if (!priv->irq_tested) { + disable_interrupts(chip); + dev_err(chip->pdev, + FW_BUG "TPM interrupt not working, polling instead\n"); + } + priv->irq_tested = true; + return rc; +} + +struct tis_vendor_timeout_override { + u32 did_vid; + unsigned long timeout_us[4]; +}; + +static const struct tis_vendor_timeout_override vendor_timeout_overrides[] = { + /* Atmel 3204 */ + { 0x32041114, { (TIS_SHORT_TIMEOUT*1000), (TIS_LONG_TIMEOUT*1000), + (TIS_SHORT_TIMEOUT*1000), (TIS_SHORT_TIMEOUT*1000) } }, +}; + +static bool tpm_tis_update_timeouts(struct tpm_chip *chip, + unsigned long *timeout_cap) +{ + int i; + u32 did_vid; + + did_vid = ioread32(chip->vendor.iobase + TPM_DID_VID(0)); + + for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) { + if (vendor_timeout_overrides[i].did_vid != did_vid) + continue; + memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us, + sizeof(vendor_timeout_overrides[i].timeout_us)); + return true; + } + + return false; +} + +/* + * Early probing for iTPM with STS_DATA_EXPECT flaw. + * Try sending command without itpm flag set and if that + * fails, repeat with itpm flag set. + */ +static int probe_itpm(struct tpm_chip *chip) +{ + int rc = 0; + u8 cmd_getticks[] = { + 0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a, + 0x00, 0x00, 0x00, 0xf1 + }; + size_t len = sizeof(cmd_getticks); + bool rem_itpm = itpm; + u16 vendor = ioread16(chip->vendor.iobase + TPM_DID_VID(0)); + + /* probe only iTPMS */ + if (vendor != TPM_VID_INTEL) + return 0; + + itpm = false; + + rc = tpm_tis_send_data(chip, cmd_getticks, len); + if (rc == 0) + goto out; + + tpm_tis_ready(chip); + release_locality(chip, chip->vendor.locality, 0); + + itpm = true; + + rc = tpm_tis_send_data(chip, cmd_getticks, len); + if (rc == 0) { + dev_info(chip->pdev, "Detected an iTPM.\n"); + rc = 1; + } else + rc = -EFAULT; + +out: + itpm = rem_itpm; + tpm_tis_ready(chip); + release_locality(chip, chip->vendor.locality, 0); + + return rc; +} + +static bool tpm_tis_req_canceled(struct tpm_chip *chip, u8 status) +{ + switch (chip->vendor.manufacturer_id) { + case TPM_VID_WINBOND: + return ((status == TPM_STS_VALID) || + (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY))); + case TPM_VID_STM: + return (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY)); + default: + return (status == TPM_STS_COMMAND_READY); + } +} + +static const struct tpm_class_ops tpm_tis = { + .status = tpm_tis_status, + .recv = tpm_tis_recv, + .send = tpm_tis_send, + .cancel = tpm_tis_ready, + .update_timeouts = tpm_tis_update_timeouts, + .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID, + .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID, + .req_canceled = tpm_tis_req_canceled, +}; + +static irqreturn_t tis_int_probe(int irq, void *dev_id) +{ + struct tpm_chip *chip = dev_id; + u32 interrupt; + + interrupt = ioread32(chip->vendor.iobase + + TPM_INT_STATUS(chip->vendor.locality)); + + if (interrupt == 0) + return IRQ_NONE; + + chip->vendor.probed_irq = irq; + + /* Clear interrupts handled with TPM_EOI */ + iowrite32(interrupt, + chip->vendor.iobase + + TPM_INT_STATUS(chip->vendor.locality)); + return IRQ_HANDLED; +} + +static irqreturn_t tis_int_handler(int dummy, void *dev_id) +{ + struct tpm_chip *chip = dev_id; + u32 interrupt; + int i; + + interrupt = ioread32(chip->vendor.iobase + + TPM_INT_STATUS(chip->vendor.locality)); + + if (interrupt == 0) + return IRQ_NONE; + + ((struct priv_data *)chip->vendor.priv)->irq_tested = true; + if (interrupt & TPM_INTF_DATA_AVAIL_INT) + wake_up_interruptible(&chip->vendor.read_queue); + if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT) + for (i = 0; i < 5; i++) + if (check_locality(chip, i) >= 0) + break; + if (interrupt & + (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT | + TPM_INTF_CMD_READY_INT)) + wake_up_interruptible(&chip->vendor.int_queue); + + /* Clear interrupts handled with TPM_EOI */ + iowrite32(interrupt, + chip->vendor.iobase + + TPM_INT_STATUS(chip->vendor.locality)); + ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality)); + return IRQ_HANDLED; +} + +static bool interrupts = true; +module_param(interrupts, bool, 0444); +MODULE_PARM_DESC(interrupts, "Enable interrupts"); + +static void tpm_tis_remove(struct tpm_chip *chip) +{ + if (chip->flags & TPM_CHIP_FLAG_TPM2) + tpm2_shutdown(chip, TPM2_SU_CLEAR); + + iowrite32(~TPM_GLOBAL_INT_ENABLE & + ioread32(chip->vendor.iobase + + TPM_INT_ENABLE(chip->vendor. + locality)), + chip->vendor.iobase + + TPM_INT_ENABLE(chip->vendor.locality)); + release_locality(chip, chip->vendor.locality, 1); +} + +static int tpm_tis_init(struct device *dev, acpi_handle acpi_dev_handle, + resource_size_t start, resource_size_t len, + unsigned int irq) +{ + u32 vendor, intfcaps, intmask; + int rc, i, irq_s, irq_e, probe; + struct tpm_chip *chip; + struct priv_data *priv; + + priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL); + if (priv == NULL) + return -ENOMEM; + + chip = tpmm_chip_alloc(dev, &tpm_tis); + if (IS_ERR(chip)) + return PTR_ERR(chip); + + chip->vendor.priv = priv; +#ifdef CONFIG_ACPI + chip->acpi_dev_handle = acpi_dev_handle; +#endif + + chip->vendor.iobase = devm_ioremap(dev, start, len); + if (!chip->vendor.iobase) + return -EIO; + + /* Maximum timeouts */ + chip->vendor.timeout_a = TIS_TIMEOUT_A_MAX; + chip->vendor.timeout_b = TIS_TIMEOUT_B_MAX; + chip->vendor.timeout_c = TIS_TIMEOUT_C_MAX; + chip->vendor.timeout_d = TIS_TIMEOUT_D_MAX; + + if (wait_startup(chip, 0) != 0) { + rc = -ENODEV; + goto out_err; + } + + if (request_locality(chip, 0) != 0) { + rc = -ENODEV; + goto out_err; + } + + rc = tpm2_probe(chip); + if (rc) + goto out_err; + + vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0)); + chip->vendor.manufacturer_id = vendor; + + dev_info(dev, "%s TPM (device-id 0x%X, rev-id %d)\n", + (chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2", + vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0))); + + if (!itpm) { + probe = probe_itpm(chip); + if (probe < 0) { + rc = -ENODEV; + goto out_err; + } + itpm = !!probe; + } + + if (itpm) + dev_info(dev, "Intel iTPM workaround enabled\n"); + + + /* Figure out the capabilities */ + intfcaps = + ioread32(chip->vendor.iobase + + TPM_INTF_CAPS(chip->vendor.locality)); + dev_dbg(dev, "TPM interface capabilities (0x%x):\n", + intfcaps); + if (intfcaps & TPM_INTF_BURST_COUNT_STATIC) + dev_dbg(dev, "\tBurst Count Static\n"); + if (intfcaps & TPM_INTF_CMD_READY_INT) + dev_dbg(dev, "\tCommand Ready Int Support\n"); + if (intfcaps & TPM_INTF_INT_EDGE_FALLING) + dev_dbg(dev, "\tInterrupt Edge Falling\n"); + if (intfcaps & TPM_INTF_INT_EDGE_RISING) + dev_dbg(dev, "\tInterrupt Edge Rising\n"); + if (intfcaps & TPM_INTF_INT_LEVEL_LOW) + dev_dbg(dev, "\tInterrupt Level Low\n"); + if (intfcaps & TPM_INTF_INT_LEVEL_HIGH) + dev_dbg(dev, "\tInterrupt Level High\n"); + if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT) + dev_dbg(dev, "\tLocality Change Int Support\n"); + if (intfcaps & TPM_INTF_STS_VALID_INT) + dev_dbg(dev, "\tSts Valid Int Support\n"); + if (intfcaps & TPM_INTF_DATA_AVAIL_INT) + dev_dbg(dev, "\tData Avail Int Support\n"); + + /* INTERRUPT Setup */ + init_waitqueue_head(&chip->vendor.read_queue); + init_waitqueue_head(&chip->vendor.int_queue); + + intmask = + ioread32(chip->vendor.iobase + + TPM_INT_ENABLE(chip->vendor.locality)); + + intmask |= TPM_INTF_CMD_READY_INT + | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT + | TPM_INTF_STS_VALID_INT; + + iowrite32(intmask, + chip->vendor.iobase + + TPM_INT_ENABLE(chip->vendor.locality)); + if (interrupts) + chip->vendor.irq = irq; + if (interrupts && !chip->vendor.irq) { + irq_s = + ioread8(chip->vendor.iobase + + TPM_INT_VECTOR(chip->vendor.locality)); + if (irq_s) { + irq_e = irq_s; + } else { + irq_s = 3; + irq_e = 15; + } + + for (i = irq_s; i <= irq_e && chip->vendor.irq == 0; i++) { + iowrite8(i, chip->vendor.iobase + + TPM_INT_VECTOR(chip->vendor.locality)); + if (devm_request_irq + (dev, i, tis_int_probe, IRQF_SHARED, + chip->devname, chip) != 0) { + dev_info(chip->pdev, + "Unable to request irq: %d for probe\n", + i); + continue; + } + + /* Clear all existing */ + iowrite32(ioread32 + (chip->vendor.iobase + + TPM_INT_STATUS(chip->vendor.locality)), + chip->vendor.iobase + + TPM_INT_STATUS(chip->vendor.locality)); + + /* Turn on */ + iowrite32(intmask | TPM_GLOBAL_INT_ENABLE, + chip->vendor.iobase + + TPM_INT_ENABLE(chip->vendor.locality)); + + chip->vendor.probed_irq = 0; + + /* Generate Interrupts */ + if (chip->flags & TPM_CHIP_FLAG_TPM2) + tpm2_gen_interrupt(chip); + else + tpm_gen_interrupt(chip); + + chip->vendor.irq = chip->vendor.probed_irq; + + /* free_irq will call into tis_int_probe; + clear all irqs we haven't seen while doing + tpm_gen_interrupt */ + iowrite32(ioread32 + (chip->vendor.iobase + + TPM_INT_STATUS(chip->vendor.locality)), + chip->vendor.iobase + + TPM_INT_STATUS(chip->vendor.locality)); + + /* Turn off */ + iowrite32(intmask, + chip->vendor.iobase + + TPM_INT_ENABLE(chip->vendor.locality)); + } + } + if (chip->vendor.irq) { + iowrite8(chip->vendor.irq, + chip->vendor.iobase + + TPM_INT_VECTOR(chip->vendor.locality)); + if (devm_request_irq + (dev, chip->vendor.irq, tis_int_handler, IRQF_SHARED, + chip->devname, chip) != 0) { + dev_info(chip->pdev, + "Unable to request irq: %d for use\n", + chip->vendor.irq); + chip->vendor.irq = 0; + } else { + /* Clear all existing */ + iowrite32(ioread32 + (chip->vendor.iobase + + TPM_INT_STATUS(chip->vendor.locality)), + chip->vendor.iobase + + TPM_INT_STATUS(chip->vendor.locality)); + + /* Turn on */ + iowrite32(intmask | TPM_GLOBAL_INT_ENABLE, + chip->vendor.iobase + + TPM_INT_ENABLE(chip->vendor.locality)); + } + } + + if (chip->flags & TPM_CHIP_FLAG_TPM2) { + chip->vendor.timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A); + chip->vendor.timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B); + chip->vendor.timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C); + chip->vendor.timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D); + chip->vendor.duration[TPM_SHORT] = + msecs_to_jiffies(TPM2_DURATION_SHORT); + chip->vendor.duration[TPM_MEDIUM] = + msecs_to_jiffies(TPM2_DURATION_MEDIUM); + chip->vendor.duration[TPM_LONG] = + msecs_to_jiffies(TPM2_DURATION_LONG); + + rc = tpm2_do_selftest(chip); + if (rc == TPM2_RC_INITIALIZE) { + dev_warn(dev, "Firmware has not started TPM\n"); + rc = tpm2_startup(chip, TPM2_SU_CLEAR); + if (!rc) + rc = tpm2_do_selftest(chip); + } + + if (rc) { + dev_err(dev, "TPM self test failed\n"); + if (rc > 0) + rc = -ENODEV; + goto out_err; + } + } else { + if (tpm_get_timeouts(chip)) { + dev_err(dev, "Could not get TPM timeouts and durations\n"); + rc = -ENODEV; + goto out_err; + } + + if (tpm_do_selftest(chip)) { + dev_err(dev, "TPM self test failed\n"); + rc = -ENODEV; + goto out_err; + } + } + + return tpm_chip_register(chip); +out_err: + tpm_tis_remove(chip); + return rc; +} + +#ifdef CONFIG_PM_SLEEP +static void tpm_tis_reenable_interrupts(struct tpm_chip *chip) +{ + u32 intmask; + + /* reenable interrupts that device may have lost or + BIOS/firmware may have disabled */ + iowrite8(chip->vendor.irq, chip->vendor.iobase + + TPM_INT_VECTOR(chip->vendor.locality)); + + intmask = + ioread32(chip->vendor.iobase + + TPM_INT_ENABLE(chip->vendor.locality)); + + intmask |= TPM_INTF_CMD_READY_INT + | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT + | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE; + + iowrite32(intmask, + chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality)); +} + +static int tpm_tis_resume(struct device *dev) +{ + struct tpm_chip *chip = dev_get_drvdata(dev); + int ret; + + if (chip->vendor.irq) + tpm_tis_reenable_interrupts(chip); + + ret = tpm_pm_resume(dev); + if (ret) + return ret; + + /* TPM 1.2 requires self-test on resume. This function actually returns + * an error code but for unknown reason it isn't handled. + */ + if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) + tpm_do_selftest(chip); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume); + +#ifdef CONFIG_PNP +static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev, + const struct pnp_device_id *pnp_id) +{ + resource_size_t start, len; + unsigned int irq = 0; + acpi_handle acpi_dev_handle = NULL; + + start = pnp_mem_start(pnp_dev, 0); + len = pnp_mem_len(pnp_dev, 0); + + if (pnp_irq_valid(pnp_dev, 0)) + irq = pnp_irq(pnp_dev, 0); + else + interrupts = false; + + if (is_itpm(pnp_dev)) + itpm = true; + +#ifdef CONFIG_ACPI + if (pnp_acpi_device(pnp_dev)) + acpi_dev_handle = pnp_acpi_device(pnp_dev)->handle; +#endif + + return tpm_tis_init(&pnp_dev->dev, acpi_dev_handle, start, len, irq); +} + +static struct pnp_device_id tpm_pnp_tbl[] = { + {"PNP0C31", 0}, /* TPM */ + {"ATM1200", 0}, /* Atmel */ + {"IFX0102", 0}, /* Infineon */ + {"BCM0101", 0}, /* Broadcom */ + {"BCM0102", 0}, /* Broadcom */ + {"NSC1200", 0}, /* National */ + {"ICO0102", 0}, /* Intel */ + /* Add new here */ + {"", 0}, /* User Specified */ + {"", 0} /* Terminator */ +}; +MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl); + +static void tpm_tis_pnp_remove(struct pnp_dev *dev) +{ + struct tpm_chip *chip = pnp_get_drvdata(dev); + tpm_chip_unregister(chip); + tpm_tis_remove(chip); +} + +static struct pnp_driver tis_pnp_driver = { + .name = "tpm_tis", + .id_table = tpm_pnp_tbl, + .probe = tpm_tis_pnp_init, + .remove = tpm_tis_pnp_remove, + .driver = { + .pm = &tpm_tis_pm, + }, +}; + +#define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2 +module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id, + sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444); +MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe"); +#endif + +static struct platform_driver tis_drv = { + .driver = { + .name = "tpm_tis", + .pm = &tpm_tis_pm, + }, +}; + +static struct platform_device *pdev; + +static bool force; +module_param(force, bool, 0444); +MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry"); +static int __init init_tis(void) +{ + int rc; +#ifdef CONFIG_PNP + if (!force) + return pnp_register_driver(&tis_pnp_driver); +#endif + + rc = platform_driver_register(&tis_drv); + if (rc < 0) + return rc; + pdev = platform_device_register_simple("tpm_tis", -1, NULL, 0); + if (IS_ERR(pdev)) { + rc = PTR_ERR(pdev); + goto err_dev; + } + rc = tpm_tis_init(&pdev->dev, NULL, TIS_MEM_BASE, TIS_MEM_LEN, 0); + if (rc) + goto err_init; + return 0; +err_init: + platform_device_unregister(pdev); +err_dev: + platform_driver_unregister(&tis_drv); + return rc; +} + +static void __exit cleanup_tis(void) +{ + struct tpm_chip *chip; +#ifdef CONFIG_PNP + if (!force) { + pnp_unregister_driver(&tis_pnp_driver); + return; + } +#endif + chip = dev_get_drvdata(&pdev->dev); + tpm_chip_unregister(chip); + tpm_tis_remove(chip); + platform_device_unregister(pdev); + platform_driver_unregister(&tis_drv); +} + +module_init(init_tis); +module_exit(cleanup_tis); +MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)"); +MODULE_DESCRIPTION("TPM Driver"); +MODULE_VERSION("2.0"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/tpm/xen-tpmfront.c b/kernel/drivers/char/tpm/xen-tpmfront.c new file mode 100644 index 000000000..3111f2778 --- /dev/null +++ b/kernel/drivers/char/tpm/xen-tpmfront.c @@ -0,0 +1,401 @@ +/* + * Implementation of the Xen vTPM device frontend + * + * Author: Daniel De Graaf + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, + * as published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "tpm.h" +#include + +struct tpm_private { + struct tpm_chip *chip; + struct xenbus_device *dev; + + struct vtpm_shared_page *shr; + + unsigned int evtchn; + int ring_ref; + domid_t backend_id; +}; + +enum status_bits { + VTPM_STATUS_RUNNING = 0x1, + VTPM_STATUS_IDLE = 0x2, + VTPM_STATUS_RESULT = 0x4, + VTPM_STATUS_CANCELED = 0x8, +}; + +static u8 vtpm_status(struct tpm_chip *chip) +{ + struct tpm_private *priv = TPM_VPRIV(chip); + switch (priv->shr->state) { + case VTPM_STATE_IDLE: + return VTPM_STATUS_IDLE | VTPM_STATUS_CANCELED; + case VTPM_STATE_FINISH: + return VTPM_STATUS_IDLE | VTPM_STATUS_RESULT; + case VTPM_STATE_SUBMIT: + case VTPM_STATE_CANCEL: /* cancel requested, not yet canceled */ + return VTPM_STATUS_RUNNING; + default: + return 0; + } +} + +static bool vtpm_req_canceled(struct tpm_chip *chip, u8 status) +{ + return status & VTPM_STATUS_CANCELED; +} + +static void vtpm_cancel(struct tpm_chip *chip) +{ + struct tpm_private *priv = TPM_VPRIV(chip); + priv->shr->state = VTPM_STATE_CANCEL; + wmb(); + notify_remote_via_evtchn(priv->evtchn); +} + +static unsigned int shr_data_offset(struct vtpm_shared_page *shr) +{ + return sizeof(*shr) + sizeof(u32) * shr->nr_extra_pages; +} + +static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) +{ + struct tpm_private *priv = TPM_VPRIV(chip); + struct vtpm_shared_page *shr = priv->shr; + unsigned int offset = shr_data_offset(shr); + + u32 ordinal; + unsigned long duration; + + if (offset > PAGE_SIZE) + return -EINVAL; + + if (offset + count > PAGE_SIZE) + return -EINVAL; + + /* Wait for completion of any existing command or cancellation */ + if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, chip->vendor.timeout_c, + &chip->vendor.read_queue, true) < 0) { + vtpm_cancel(chip); + return -ETIME; + } + + memcpy(offset + (u8 *)shr, buf, count); + shr->length = count; + barrier(); + shr->state = VTPM_STATE_SUBMIT; + wmb(); + notify_remote_via_evtchn(priv->evtchn); + + ordinal = be32_to_cpu(((struct tpm_input_header*)buf)->ordinal); + duration = tpm_calc_ordinal_duration(chip, ordinal); + + if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, duration, + &chip->vendor.read_queue, true) < 0) { + /* got a signal or timeout, try to cancel */ + vtpm_cancel(chip); + return -ETIME; + } + + return count; +} + +static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + struct tpm_private *priv = TPM_VPRIV(chip); + struct vtpm_shared_page *shr = priv->shr; + unsigned int offset = shr_data_offset(shr); + size_t length = shr->length; + + if (shr->state == VTPM_STATE_IDLE) + return -ECANCELED; + + /* In theory the wait at the end of _send makes this one unnecessary */ + if (wait_for_tpm_stat(chip, VTPM_STATUS_RESULT, chip->vendor.timeout_c, + &chip->vendor.read_queue, true) < 0) { + vtpm_cancel(chip); + return -ETIME; + } + + if (offset > PAGE_SIZE) + return -EIO; + + if (offset + length > PAGE_SIZE) + length = PAGE_SIZE - offset; + + if (length > count) + length = count; + + memcpy(buf, offset + (u8 *)shr, length); + + return length; +} + +static const struct tpm_class_ops tpm_vtpm = { + .status = vtpm_status, + .recv = vtpm_recv, + .send = vtpm_send, + .cancel = vtpm_cancel, + .req_complete_mask = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT, + .req_complete_val = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT, + .req_canceled = vtpm_req_canceled, +}; + +static irqreturn_t tpmif_interrupt(int dummy, void *dev_id) +{ + struct tpm_private *priv = dev_id; + + switch (priv->shr->state) { + case VTPM_STATE_IDLE: + case VTPM_STATE_FINISH: + wake_up_interruptible(&priv->chip->vendor.read_queue); + break; + case VTPM_STATE_SUBMIT: + case VTPM_STATE_CANCEL: + default: + break; + } + return IRQ_HANDLED; +} + +static int setup_chip(struct device *dev, struct tpm_private *priv) +{ + struct tpm_chip *chip; + + chip = tpmm_chip_alloc(dev, &tpm_vtpm); + if (IS_ERR(chip)) + return PTR_ERR(chip); + + init_waitqueue_head(&chip->vendor.read_queue); + + priv->chip = chip; + TPM_VPRIV(chip) = priv; + + return 0; +} + +/* caller must clean up in case of errors */ +static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv) +{ + struct xenbus_transaction xbt; + const char *message = NULL; + int rv; + grant_ref_t gref; + + priv->shr = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO); + if (!priv->shr) { + xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); + return -ENOMEM; + } + + rv = xenbus_grant_ring(dev, &priv->shr, 1, &gref); + if (rv < 0) + return rv; + + priv->ring_ref = gref; + + rv = xenbus_alloc_evtchn(dev, &priv->evtchn); + if (rv) + return rv; + + rv = bind_evtchn_to_irqhandler(priv->evtchn, tpmif_interrupt, 0, + "tpmif", priv); + if (rv <= 0) { + xenbus_dev_fatal(dev, rv, "allocating TPM irq"); + return rv; + } + priv->chip->vendor.irq = rv; + + again: + rv = xenbus_transaction_start(&xbt); + if (rv) { + xenbus_dev_fatal(dev, rv, "starting transaction"); + return rv; + } + + rv = xenbus_printf(xbt, dev->nodename, + "ring-ref", "%u", priv->ring_ref); + if (rv) { + message = "writing ring-ref"; + goto abort_transaction; + } + + rv = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", + priv->evtchn); + if (rv) { + message = "writing event-channel"; + goto abort_transaction; + } + + rv = xenbus_printf(xbt, dev->nodename, "feature-protocol-v2", "1"); + if (rv) { + message = "writing feature-protocol-v2"; + goto abort_transaction; + } + + rv = xenbus_transaction_end(xbt, 0); + if (rv == -EAGAIN) + goto again; + if (rv) { + xenbus_dev_fatal(dev, rv, "completing transaction"); + return rv; + } + + xenbus_switch_state(dev, XenbusStateInitialised); + + return 0; + + abort_transaction: + xenbus_transaction_end(xbt, 1); + if (message) + xenbus_dev_error(dev, rv, "%s", message); + + return rv; +} + +static void ring_free(struct tpm_private *priv) +{ + if (!priv) + return; + + if (priv->ring_ref) + gnttab_end_foreign_access(priv->ring_ref, 0, + (unsigned long)priv->shr); + else + free_page((unsigned long)priv->shr); + + if (priv->chip && priv->chip->vendor.irq) + unbind_from_irqhandler(priv->chip->vendor.irq, priv); + + kfree(priv); +} + +static int tpmfront_probe(struct xenbus_device *dev, + const struct xenbus_device_id *id) +{ + struct tpm_private *priv; + struct tpm_chip *chip; + int rv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + xenbus_dev_fatal(dev, -ENOMEM, "allocating priv structure"); + return -ENOMEM; + } + + rv = setup_chip(&dev->dev, priv); + if (rv) { + kfree(priv); + return rv; + } + + rv = setup_ring(dev, priv); + if (rv) { + chip = dev_get_drvdata(&dev->dev); + tpm_chip_unregister(chip); + ring_free(priv); + return rv; + } + + tpm_get_timeouts(priv->chip); + + return tpm_chip_register(priv->chip); +} + +static int tpmfront_remove(struct xenbus_device *dev) +{ + struct tpm_chip *chip = dev_get_drvdata(&dev->dev); + struct tpm_private *priv = TPM_VPRIV(chip); + tpm_chip_unregister(chip); + ring_free(priv); + TPM_VPRIV(chip) = NULL; + return 0; +} + +static int tpmfront_resume(struct xenbus_device *dev) +{ + /* A suspend/resume/migrate will interrupt a vTPM anyway */ + tpmfront_remove(dev); + return tpmfront_probe(dev, NULL); +} + +static void backend_changed(struct xenbus_device *dev, + enum xenbus_state backend_state) +{ + int val; + + switch (backend_state) { + case XenbusStateInitialised: + case XenbusStateConnected: + if (dev->state == XenbusStateConnected) + break; + + if (xenbus_scanf(XBT_NIL, dev->otherend, + "feature-protocol-v2", "%d", &val) < 0) + val = 0; + if (!val) { + xenbus_dev_fatal(dev, -EINVAL, + "vTPM protocol 2 required"); + return; + } + xenbus_switch_state(dev, XenbusStateConnected); + break; + + case XenbusStateClosing: + case XenbusStateClosed: + device_unregister(&dev->dev); + xenbus_frontend_closed(dev); + break; + default: + break; + } +} + +static const struct xenbus_device_id tpmfront_ids[] = { + { "vtpm" }, + { "" } +}; +MODULE_ALIAS("xen:vtpm"); + +static struct xenbus_driver tpmfront_driver = { + .ids = tpmfront_ids, + .probe = tpmfront_probe, + .remove = tpmfront_remove, + .resume = tpmfront_resume, + .otherend_changed = backend_changed, +}; + +static int __init xen_tpmfront_init(void) +{ + if (!xen_domain()) + return -ENODEV; + + if (!xen_has_pv_devices()) + return -ENODEV; + + return xenbus_register_frontend(&tpmfront_driver); +} +module_init(xen_tpmfront_init); + +static void __exit xen_tpmfront_exit(void) +{ + xenbus_unregister_driver(&tpmfront_driver); +} +module_exit(xen_tpmfront_exit); + +MODULE_AUTHOR("Daniel De Graaf "); +MODULE_DESCRIPTION("Xen vTPM Driver"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/ttyprintk.c b/kernel/drivers/char/ttyprintk.c new file mode 100644 index 000000000..a15ce4ef3 --- /dev/null +++ b/kernel/drivers/char/ttyprintk.c @@ -0,0 +1,228 @@ +/* + * linux/drivers/char/ttyprintk.c + * + * Copyright (C) 2010 Samo Pogacnik + * + * This program is free software; you can redistribute it and/or modify + * it under the smems of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ + +/* + * This pseudo device allows user to make printk messages. It is possible + * to store "console" messages inline with kernel messages for better analyses + * of the boot process, for example. + */ + +#include +#include +#include +#include + +struct ttyprintk_port { + struct tty_port port; + struct mutex port_write_mutex; +}; + +static struct ttyprintk_port tpk_port; + +/* + * Our simple preformatting supports transparent output of (time-stamped) + * printk messages (also suitable for logging service): + * - any cr is replaced by nl + * - adds a ttyprintk source tag in front of each line + * - too long message is fragmeted, with '\'nl between fragments + * - TPK_STR_SIZE isn't really the write_room limiting factor, bcause + * it is emptied on the fly during preformatting. + */ +#define TPK_STR_SIZE 508 /* should be bigger then max expected line length */ +#define TPK_MAX_ROOM 4096 /* we could assume 4K for instance */ +static const char *tpk_tag = "[U] "; /* U for User */ +static int tpk_curr; + +static int tpk_printk(const unsigned char *buf, int count) +{ + static char tmp[TPK_STR_SIZE + 4]; + int i = tpk_curr; + + if (buf == NULL) { + /* flush tmp[] */ + if (tpk_curr > 0) { + /* non nl or cr terminated message - add nl */ + tmp[tpk_curr + 0] = '\n'; + tmp[tpk_curr + 1] = '\0'; + printk(KERN_INFO "%s%s", tpk_tag, tmp); + tpk_curr = 0; + } + return i; + } + + for (i = 0; i < count; i++) { + tmp[tpk_curr] = buf[i]; + if (tpk_curr < TPK_STR_SIZE) { + switch (buf[i]) { + case '\r': + /* replace cr with nl */ + tmp[tpk_curr + 0] = '\n'; + tmp[tpk_curr + 1] = '\0'; + printk(KERN_INFO "%s%s", tpk_tag, tmp); + tpk_curr = 0; + if ((i + 1) < count && buf[i + 1] == '\n') + i++; + break; + case '\n': + tmp[tpk_curr + 1] = '\0'; + printk(KERN_INFO "%s%s", tpk_tag, tmp); + tpk_curr = 0; + break; + default: + tpk_curr++; + } + } else { + /* end of tmp buffer reached: cut the message in two */ + tmp[tpk_curr + 1] = '\\'; + tmp[tpk_curr + 2] = '\n'; + tmp[tpk_curr + 3] = '\0'; + printk(KERN_INFO "%s%s", tpk_tag, tmp); + tpk_curr = 0; + } + } + + return count; +} + +/* + * TTY operations open function. + */ +static int tpk_open(struct tty_struct *tty, struct file *filp) +{ + tty->driver_data = &tpk_port; + + return tty_port_open(&tpk_port.port, tty, filp); +} + +/* + * TTY operations close function. + */ +static void tpk_close(struct tty_struct *tty, struct file *filp) +{ + struct ttyprintk_port *tpkp = tty->driver_data; + + mutex_lock(&tpkp->port_write_mutex); + /* flush tpk_printk buffer */ + tpk_printk(NULL, 0); + mutex_unlock(&tpkp->port_write_mutex); + + tty_port_close(&tpkp->port, tty, filp); +} + +/* + * TTY operations write function. + */ +static int tpk_write(struct tty_struct *tty, + const unsigned char *buf, int count) +{ + struct ttyprintk_port *tpkp = tty->driver_data; + int ret; + + + /* exclusive use of tpk_printk within this tty */ + mutex_lock(&tpkp->port_write_mutex); + ret = tpk_printk(buf, count); + mutex_unlock(&tpkp->port_write_mutex); + + return ret; +} + +/* + * TTY operations write_room function. + */ +static int tpk_write_room(struct tty_struct *tty) +{ + return TPK_MAX_ROOM; +} + +/* + * TTY operations ioctl function. + */ +static int tpk_ioctl(struct tty_struct *tty, + unsigned int cmd, unsigned long arg) +{ + struct ttyprintk_port *tpkp = tty->driver_data; + + if (!tpkp) + return -EINVAL; + + switch (cmd) { + /* Stop TIOCCONS */ + case TIOCCONS: + return -EOPNOTSUPP; + default: + return -ENOIOCTLCMD; + } + return 0; +} + +static const struct tty_operations ttyprintk_ops = { + .open = tpk_open, + .close = tpk_close, + .write = tpk_write, + .write_room = tpk_write_room, + .ioctl = tpk_ioctl, +}; + +static struct tty_port_operations null_ops = { }; + +static struct tty_driver *ttyprintk_driver; + +static int __init ttyprintk_init(void) +{ + int ret = -ENOMEM; + + mutex_init(&tpk_port.port_write_mutex); + + ttyprintk_driver = tty_alloc_driver(1, + TTY_DRIVER_RESET_TERMIOS | + TTY_DRIVER_REAL_RAW | + TTY_DRIVER_UNNUMBERED_NODE); + if (IS_ERR(ttyprintk_driver)) + return PTR_ERR(ttyprintk_driver); + + tty_port_init(&tpk_port.port); + tpk_port.port.ops = &null_ops; + + ttyprintk_driver->driver_name = "ttyprintk"; + ttyprintk_driver->name = "ttyprintk"; + ttyprintk_driver->major = TTYAUX_MAJOR; + ttyprintk_driver->minor_start = 3; + ttyprintk_driver->type = TTY_DRIVER_TYPE_CONSOLE; + ttyprintk_driver->init_termios = tty_std_termios; + ttyprintk_driver->init_termios.c_oflag = OPOST | OCRNL | ONOCR | ONLRET; + tty_set_operations(ttyprintk_driver, &ttyprintk_ops); + tty_port_link_device(&tpk_port.port, ttyprintk_driver, 0); + + ret = tty_register_driver(ttyprintk_driver); + if (ret < 0) { + printk(KERN_ERR "Couldn't register ttyprintk driver\n"); + goto error; + } + + return 0; + +error: + put_tty_driver(ttyprintk_driver); + tty_port_destroy(&tpk_port.port); + return ret; +} + +static void __exit ttyprintk_exit(void) +{ + tty_unregister_driver(ttyprintk_driver); + put_tty_driver(ttyprintk_driver); + tty_port_destroy(&tpk_port.port); +} + +device_initcall(ttyprintk_init); +module_exit(ttyprintk_exit); + +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/uv_mmtimer.c b/kernel/drivers/char/uv_mmtimer.c new file mode 100644 index 000000000..956ebe208 --- /dev/null +++ b/kernel/drivers/char/uv_mmtimer.c @@ -0,0 +1,220 @@ +/* + * Timer device implementation for SGI UV platform. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2009 Silicon Graphics, Inc. All rights reserved. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +MODULE_AUTHOR("Dimitri Sivanich "); +MODULE_DESCRIPTION("SGI UV Memory Mapped RTC Timer"); +MODULE_LICENSE("GPL"); + +/* name of the device, usually in /dev */ +#define UV_MMTIMER_NAME "mmtimer" +#define UV_MMTIMER_DESC "SGI UV Memory Mapped RTC Timer" +#define UV_MMTIMER_VERSION "1.0" + +static long uv_mmtimer_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); +static int uv_mmtimer_mmap(struct file *file, struct vm_area_struct *vma); + +/* + * Period in femtoseconds (10^-15 s) + */ +static unsigned long uv_mmtimer_femtoperiod; + +static const struct file_operations uv_mmtimer_fops = { + .owner = THIS_MODULE, + .mmap = uv_mmtimer_mmap, + .unlocked_ioctl = uv_mmtimer_ioctl, + .llseek = noop_llseek, +}; + +/** + * uv_mmtimer_ioctl - ioctl interface for /dev/uv_mmtimer + * @file: file structure for the device + * @cmd: command to execute + * @arg: optional argument to command + * + * Executes the command specified by @cmd. Returns 0 for success, < 0 for + * failure. + * + * Valid commands: + * + * %MMTIMER_GETOFFSET - Should return the offset (relative to the start + * of the page where the registers are mapped) for the counter in question. + * + * %MMTIMER_GETRES - Returns the resolution of the clock in femto (10^-15) + * seconds + * + * %MMTIMER_GETFREQ - Copies the frequency of the clock in Hz to the address + * specified by @arg + * + * %MMTIMER_GETBITS - Returns the number of bits in the clock's counter + * + * %MMTIMER_MMAPAVAIL - Returns 1 if registers can be mmap'd into userspace + * + * %MMTIMER_GETCOUNTER - Gets the current value in the counter and places it + * in the address specified by @arg. + */ +static long uv_mmtimer_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int ret = 0; + + switch (cmd) { + case MMTIMER_GETOFFSET: /* offset of the counter */ + /* + * Starting with HUB rev 2.0, the UV RTC register is + * replicated across all cachelines of it's own page. + * This allows faster simultaneous reads from a given socket. + * + * The offset returned is in 64 bit units. + */ + if (uv_get_min_hub_revision_id() == 1) + ret = 0; + else + ret = ((uv_blade_processor_id() * L1_CACHE_BYTES) % + PAGE_SIZE) / 8; + break; + + case MMTIMER_GETRES: /* resolution of the clock in 10^-15 s */ + if (copy_to_user((unsigned long __user *)arg, + &uv_mmtimer_femtoperiod, sizeof(unsigned long))) + ret = -EFAULT; + break; + + case MMTIMER_GETFREQ: /* frequency in Hz */ + if (copy_to_user((unsigned long __user *)arg, + &sn_rtc_cycles_per_second, + sizeof(unsigned long))) + ret = -EFAULT; + break; + + case MMTIMER_GETBITS: /* number of bits in the clock */ + ret = hweight64(UVH_RTC_REAL_TIME_CLOCK_MASK); + break; + + case MMTIMER_MMAPAVAIL: + ret = 1; + break; + + case MMTIMER_GETCOUNTER: + if (copy_to_user((unsigned long __user *)arg, + (unsigned long *)uv_local_mmr_address(UVH_RTC), + sizeof(unsigned long))) + ret = -EFAULT; + break; + default: + ret = -ENOTTY; + break; + } + return ret; +} + +/** + * uv_mmtimer_mmap - maps the clock's registers into userspace + * @file: file structure for the device + * @vma: VMA to map the registers into + * + * Calls remap_pfn_range() to map the clock's registers into + * the calling process' address space. + */ +static int uv_mmtimer_mmap(struct file *file, struct vm_area_struct *vma) +{ + unsigned long uv_mmtimer_addr; + + if (vma->vm_end - vma->vm_start != PAGE_SIZE) + return -EINVAL; + + if (vma->vm_flags & VM_WRITE) + return -EPERM; + + if (PAGE_SIZE > (1 << 16)) + return -ENOSYS; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + uv_mmtimer_addr = UV_LOCAL_MMR_BASE | UVH_RTC; + uv_mmtimer_addr &= ~(PAGE_SIZE - 1); + uv_mmtimer_addr &= 0xfffffffffffffffUL; + + if (remap_pfn_range(vma, vma->vm_start, uv_mmtimer_addr >> PAGE_SHIFT, + PAGE_SIZE, vma->vm_page_prot)) { + printk(KERN_ERR "remap_pfn_range failed in uv_mmtimer_mmap\n"); + return -EAGAIN; + } + + return 0; +} + +static struct miscdevice uv_mmtimer_miscdev = { + MISC_DYNAMIC_MINOR, + UV_MMTIMER_NAME, + &uv_mmtimer_fops +}; + + +/** + * uv_mmtimer_init - device initialization routine + * + * Does initial setup for the uv_mmtimer device. + */ +static int __init uv_mmtimer_init(void) +{ + if (!is_uv_system()) { + printk(KERN_ERR "%s: Hardware unsupported\n", UV_MMTIMER_NAME); + return -1; + } + + /* + * Sanity check the cycles/sec variable + */ + if (sn_rtc_cycles_per_second < 100000) { + printk(KERN_ERR "%s: unable to determine clock frequency\n", + UV_MMTIMER_NAME); + return -1; + } + + uv_mmtimer_femtoperiod = ((unsigned long)1E15 + + sn_rtc_cycles_per_second / 2) / + sn_rtc_cycles_per_second; + + if (misc_register(&uv_mmtimer_miscdev)) { + printk(KERN_ERR "%s: failed to register device\n", + UV_MMTIMER_NAME); + return -1; + } + + printk(KERN_INFO "%s: v%s, %ld MHz\n", UV_MMTIMER_DESC, + UV_MMTIMER_VERSION, + sn_rtc_cycles_per_second/(unsigned long)1E6); + + return 0; +} + +module_init(uv_mmtimer_init); diff --git a/kernel/drivers/char/virtio_console.c b/kernel/drivers/char/virtio_console.c new file mode 100644 index 000000000..50754d203 --- /dev/null +++ b/kernel/drivers/char/virtio_console.c @@ -0,0 +1,2320 @@ +/* + * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation + * Copyright (C) 2009, 2010, 2011 Red Hat, Inc. + * Copyright (C) 2009, 2010, 2011 Amit Shah + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../tty/hvc/hvc_console.h" + +#define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC) + +/* + * This is a global struct for storing common data for all the devices + * this driver handles. + * + * Mainly, it has a linked list for all the consoles in one place so + * that callbacks from hvc for get_chars(), put_chars() work properly + * across multiple devices and multiple ports per device. + */ +struct ports_driver_data { + /* Used for registering chardevs */ + struct class *class; + + /* Used for exporting per-port information to debugfs */ + struct dentry *debugfs_dir; + + /* List of all the devices we're handling */ + struct list_head portdevs; + + /* + * This is used to keep track of the number of hvc consoles + * spawned by this driver. This number is given as the first + * argument to hvc_alloc(). To correctly map an initial + * console spawned via hvc_instantiate to the console being + * hooked up via hvc_alloc, we need to pass the same vtermno. + * + * We also just assume the first console being initialised was + * the first one that got used as the initial console. + */ + unsigned int next_vtermno; + + /* All the console devices handled by this driver */ + struct list_head consoles; +}; +static struct ports_driver_data pdrvdata; + +static DEFINE_SPINLOCK(pdrvdata_lock); +static DECLARE_COMPLETION(early_console_added); + +/* This struct holds information that's relevant only for console ports */ +struct console { + /* We'll place all consoles in a list in the pdrvdata struct */ + struct list_head list; + + /* The hvc device associated with this console port */ + struct hvc_struct *hvc; + + /* The size of the console */ + struct winsize ws; + + /* + * This number identifies the number that we used to register + * with hvc in hvc_instantiate() and hvc_alloc(); this is the + * number passed on by the hvc callbacks to us to + * differentiate between the other console ports handled by + * this driver + */ + u32 vtermno; +}; + +struct port_buffer { + char *buf; + + /* size of the buffer in *buf above */ + size_t size; + + /* used length of the buffer */ + size_t len; + /* offset in the buf from which to consume data */ + size_t offset; + + /* DMA address of buffer */ + dma_addr_t dma; + + /* Device we got DMA memory from */ + struct device *dev; + + /* List of pending dma buffers to free */ + struct list_head list; + + /* If sgpages == 0 then buf is used */ + unsigned int sgpages; + + /* sg is used if spages > 0. sg must be the last in is struct */ + struct scatterlist sg[0]; +}; + +/* + * This is a per-device struct that stores data common to all the + * ports for that device (vdev->priv). + */ +struct ports_device { + /* Next portdev in the list, head is in the pdrvdata struct */ + struct list_head list; + + /* + * Workqueue handlers where we process deferred work after + * notification + */ + struct work_struct control_work; + struct work_struct config_work; + + struct list_head ports; + + /* To protect the list of ports */ + spinlock_t ports_lock; + + /* To protect the vq operations for the control channel */ + spinlock_t c_ivq_lock; + spinlock_t c_ovq_lock; + + /* The current config space is stored here */ + struct virtio_console_config config; + + /* The virtio device we're associated with */ + struct virtio_device *vdev; + + /* + * A couple of virtqueues for the control channel: one for + * guest->host transfers, one for host->guest transfers + */ + struct virtqueue *c_ivq, *c_ovq; + + /* Array of per-port IO virtqueues */ + struct virtqueue **in_vqs, **out_vqs; + + /* Major number for this device. Ports will be created as minors. */ + int chr_major; +}; + +struct port_stats { + unsigned long bytes_sent, bytes_received, bytes_discarded; +}; + +/* This struct holds the per-port data */ +struct port { + /* Next port in the list, head is in the ports_device */ + struct list_head list; + + /* Pointer to the parent virtio_console device */ + struct ports_device *portdev; + + /* The current buffer from which data has to be fed to readers */ + struct port_buffer *inbuf; + + /* + * To protect the operations on the in_vq associated with this + * port. Has to be a spinlock because it can be called from + * interrupt context (get_char()). + */ + spinlock_t inbuf_lock; + + /* Protect the operations on the out_vq. */ + spinlock_t outvq_lock; + + /* The IO vqs for this port */ + struct virtqueue *in_vq, *out_vq; + + /* File in the debugfs directory that exposes this port's information */ + struct dentry *debugfs_file; + + /* + * Keep count of the bytes sent, received and discarded for + * this port for accounting and debugging purposes. These + * counts are not reset across port open / close events. + */ + struct port_stats stats; + + /* + * The entries in this struct will be valid if this port is + * hooked up to an hvc console + */ + struct console cons; + + /* Each port associates with a separate char device */ + struct cdev *cdev; + struct device *dev; + + /* Reference-counting to handle port hot-unplugs and file operations */ + struct kref kref; + + /* A waitqueue for poll() or blocking read operations */ + wait_queue_head_t waitqueue; + + /* The 'name' of the port that we expose via sysfs properties */ + char *name; + + /* We can notify apps of host connect / disconnect events via SIGIO */ + struct fasync_struct *async_queue; + + /* The 'id' to identify the port with the Host */ + u32 id; + + bool outvq_full; + + /* Is the host device open */ + bool host_connected; + + /* We should allow only one process to open a port */ + bool guest_connected; +}; + +/* This is the very early arch-specified put chars function. */ +static int (*early_put_chars)(u32, const char *, int); + +static struct port *find_port_by_vtermno(u32 vtermno) +{ + struct port *port; + struct console *cons; + unsigned long flags; + + spin_lock_irqsave(&pdrvdata_lock, flags); + list_for_each_entry(cons, &pdrvdata.consoles, list) { + if (cons->vtermno == vtermno) { + port = container_of(cons, struct port, cons); + goto out; + } + } + port = NULL; +out: + spin_unlock_irqrestore(&pdrvdata_lock, flags); + return port; +} + +static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev, + dev_t dev) +{ + struct port *port; + unsigned long flags; + + spin_lock_irqsave(&portdev->ports_lock, flags); + list_for_each_entry(port, &portdev->ports, list) { + if (port->cdev->dev == dev) { + kref_get(&port->kref); + goto out; + } + } + port = NULL; +out: + spin_unlock_irqrestore(&portdev->ports_lock, flags); + + return port; +} + +static struct port *find_port_by_devt(dev_t dev) +{ + struct ports_device *portdev; + struct port *port; + unsigned long flags; + + spin_lock_irqsave(&pdrvdata_lock, flags); + list_for_each_entry(portdev, &pdrvdata.portdevs, list) { + port = find_port_by_devt_in_portdev(portdev, dev); + if (port) + goto out; + } + port = NULL; +out: + spin_unlock_irqrestore(&pdrvdata_lock, flags); + return port; +} + +static struct port *find_port_by_id(struct ports_device *portdev, u32 id) +{ + struct port *port; + unsigned long flags; + + spin_lock_irqsave(&portdev->ports_lock, flags); + list_for_each_entry(port, &portdev->ports, list) + if (port->id == id) + goto out; + port = NULL; +out: + spin_unlock_irqrestore(&portdev->ports_lock, flags); + + return port; +} + +static struct port *find_port_by_vq(struct ports_device *portdev, + struct virtqueue *vq) +{ + struct port *port; + unsigned long flags; + + spin_lock_irqsave(&portdev->ports_lock, flags); + list_for_each_entry(port, &portdev->ports, list) + if (port->in_vq == vq || port->out_vq == vq) + goto out; + port = NULL; +out: + spin_unlock_irqrestore(&portdev->ports_lock, flags); + return port; +} + +static bool is_console_port(struct port *port) +{ + if (port->cons.hvc) + return true; + return false; +} + +static bool is_rproc_serial(const struct virtio_device *vdev) +{ + return is_rproc_enabled && vdev->id.device == VIRTIO_ID_RPROC_SERIAL; +} + +static inline bool use_multiport(struct ports_device *portdev) +{ + /* + * This condition can be true when put_chars is called from + * early_init + */ + if (!portdev->vdev) + return false; + return __virtio_test_bit(portdev->vdev, VIRTIO_CONSOLE_F_MULTIPORT); +} + +static DEFINE_SPINLOCK(dma_bufs_lock); +static LIST_HEAD(pending_free_dma_bufs); + +static void free_buf(struct port_buffer *buf, bool can_sleep) +{ + unsigned int i; + + for (i = 0; i < buf->sgpages; i++) { + struct page *page = sg_page(&buf->sg[i]); + if (!page) + break; + put_page(page); + } + + if (!buf->dev) { + kfree(buf->buf); + } else if (is_rproc_enabled) { + unsigned long flags; + + /* dma_free_coherent requires interrupts to be enabled. */ + if (!can_sleep) { + /* queue up dma-buffers to be freed later */ + spin_lock_irqsave(&dma_bufs_lock, flags); + list_add_tail(&buf->list, &pending_free_dma_bufs); + spin_unlock_irqrestore(&dma_bufs_lock, flags); + return; + } + dma_free_coherent(buf->dev, buf->size, buf->buf, buf->dma); + + /* Release device refcnt and allow it to be freed */ + put_device(buf->dev); + } + + kfree(buf); +} + +static void reclaim_dma_bufs(void) +{ + unsigned long flags; + struct port_buffer *buf, *tmp; + LIST_HEAD(tmp_list); + + if (list_empty(&pending_free_dma_bufs)) + return; + + /* Create a copy of the pending_free_dma_bufs while holding the lock */ + spin_lock_irqsave(&dma_bufs_lock, flags); + list_cut_position(&tmp_list, &pending_free_dma_bufs, + pending_free_dma_bufs.prev); + spin_unlock_irqrestore(&dma_bufs_lock, flags); + + /* Release the dma buffers, without irqs enabled */ + list_for_each_entry_safe(buf, tmp, &tmp_list, list) { + list_del(&buf->list); + free_buf(buf, true); + } +} + +static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size, + int pages) +{ + struct port_buffer *buf; + + reclaim_dma_bufs(); + + /* + * Allocate buffer and the sg list. The sg list array is allocated + * directly after the port_buffer struct. + */ + buf = kmalloc(sizeof(*buf) + sizeof(struct scatterlist) * pages, + GFP_KERNEL); + if (!buf) + goto fail; + + buf->sgpages = pages; + if (pages > 0) { + buf->dev = NULL; + buf->buf = NULL; + return buf; + } + + if (is_rproc_serial(vq->vdev)) { + /* + * Allocate DMA memory from ancestor. When a virtio + * device is created by remoteproc, the DMA memory is + * associated with the grandparent device: + * vdev => rproc => platform-dev. + * The code here would have been less quirky if + * DMA_MEMORY_INCLUDES_CHILDREN had been supported + * in dma-coherent.c + */ + if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent) + goto free_buf; + buf->dev = vq->vdev->dev.parent->parent; + + /* Increase device refcnt to avoid freeing it */ + get_device(buf->dev); + buf->buf = dma_alloc_coherent(buf->dev, buf_size, &buf->dma, + GFP_KERNEL); + } else { + buf->dev = NULL; + buf->buf = kmalloc(buf_size, GFP_KERNEL); + } + + if (!buf->buf) + goto free_buf; + buf->len = 0; + buf->offset = 0; + buf->size = buf_size; + return buf; + +free_buf: + kfree(buf); +fail: + return NULL; +} + +/* Callers should take appropriate locks */ +static struct port_buffer *get_inbuf(struct port *port) +{ + struct port_buffer *buf; + unsigned int len; + + if (port->inbuf) + return port->inbuf; + + buf = virtqueue_get_buf(port->in_vq, &len); + if (buf) { + buf->len = len; + buf->offset = 0; + port->stats.bytes_received += len; + } + return buf; +} + +/* + * Create a scatter-gather list representing our input buffer and put + * it in the queue. + * + * Callers should take appropriate locks. + */ +static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf) +{ + struct scatterlist sg[1]; + int ret; + + sg_init_one(sg, buf->buf, buf->size); + + ret = virtqueue_add_inbuf(vq, sg, 1, buf, GFP_ATOMIC); + virtqueue_kick(vq); + if (!ret) + ret = vq->num_free; + return ret; +} + +/* Discard any unread data this port has. Callers lockers. */ +static void discard_port_data(struct port *port) +{ + struct port_buffer *buf; + unsigned int err; + + if (!port->portdev) { + /* Device has been unplugged. vqs are already gone. */ + return; + } + buf = get_inbuf(port); + + err = 0; + while (buf) { + port->stats.bytes_discarded += buf->len - buf->offset; + if (add_inbuf(port->in_vq, buf) < 0) { + err++; + free_buf(buf, false); + } + port->inbuf = NULL; + buf = get_inbuf(port); + } + if (err) + dev_warn(port->dev, "Errors adding %d buffers back to vq\n", + err); +} + +static bool port_has_data(struct port *port) +{ + unsigned long flags; + bool ret; + + ret = false; + spin_lock_irqsave(&port->inbuf_lock, flags); + port->inbuf = get_inbuf(port); + if (port->inbuf) + ret = true; + + spin_unlock_irqrestore(&port->inbuf_lock, flags); + return ret; +} + +static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id, + unsigned int event, unsigned int value) +{ + struct scatterlist sg[1]; + struct virtio_console_control cpkt; + struct virtqueue *vq; + unsigned int len; + + if (!use_multiport(portdev)) + return 0; + + cpkt.id = cpu_to_virtio32(portdev->vdev, port_id); + cpkt.event = cpu_to_virtio16(portdev->vdev, event); + cpkt.value = cpu_to_virtio16(portdev->vdev, value); + + vq = portdev->c_ovq; + + sg_init_one(sg, &cpkt, sizeof(cpkt)); + + spin_lock(&portdev->c_ovq_lock); + if (virtqueue_add_outbuf(vq, sg, 1, &cpkt, GFP_ATOMIC) == 0) { + virtqueue_kick(vq); + while (!virtqueue_get_buf(vq, &len) + && !virtqueue_is_broken(vq)) + cpu_relax(); + } + spin_unlock(&portdev->c_ovq_lock); + return 0; +} + +static ssize_t send_control_msg(struct port *port, unsigned int event, + unsigned int value) +{ + /* Did the port get unplugged before userspace closed it? */ + if (port->portdev) + return __send_control_msg(port->portdev, port->id, event, value); + return 0; +} + + +/* Callers must take the port->outvq_lock */ +static void reclaim_consumed_buffers(struct port *port) +{ + struct port_buffer *buf; + unsigned int len; + + if (!port->portdev) { + /* Device has been unplugged. vqs are already gone. */ + return; + } + while ((buf = virtqueue_get_buf(port->out_vq, &len))) { + free_buf(buf, false); + port->outvq_full = false; + } +} + +static ssize_t __send_to_port(struct port *port, struct scatterlist *sg, + int nents, size_t in_count, + void *data, bool nonblock) +{ + struct virtqueue *out_vq; + int err; + unsigned long flags; + unsigned int len; + + out_vq = port->out_vq; + + spin_lock_irqsave(&port->outvq_lock, flags); + + reclaim_consumed_buffers(port); + + err = virtqueue_add_outbuf(out_vq, sg, nents, data, GFP_ATOMIC); + + /* Tell Host to go! */ + virtqueue_kick(out_vq); + + if (err) { + in_count = 0; + goto done; + } + + if (out_vq->num_free == 0) + port->outvq_full = true; + + if (nonblock) + goto done; + + /* + * Wait till the host acknowledges it pushed out the data we + * sent. This is done for data from the hvc_console; the tty + * operations are performed with spinlocks held so we can't + * sleep here. An alternative would be to copy the data to a + * buffer and relax the spinning requirement. The downside is + * we need to kmalloc a GFP_ATOMIC buffer each time the + * console driver writes something out. + */ + while (!virtqueue_get_buf(out_vq, &len) + && !virtqueue_is_broken(out_vq)) + cpu_relax(); +done: + spin_unlock_irqrestore(&port->outvq_lock, flags); + + port->stats.bytes_sent += in_count; + /* + * We're expected to return the amount of data we wrote -- all + * of it + */ + return in_count; +} + +/* + * Give out the data that's requested from the buffer that we have + * queued up. + */ +static ssize_t fill_readbuf(struct port *port, char __user *out_buf, + size_t out_count, bool to_user) +{ + struct port_buffer *buf; + unsigned long flags; + + if (!out_count || !port_has_data(port)) + return 0; + + buf = port->inbuf; + out_count = min(out_count, buf->len - buf->offset); + + if (to_user) { + ssize_t ret; + + ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count); + if (ret) + return -EFAULT; + } else { + memcpy((__force char *)out_buf, buf->buf + buf->offset, + out_count); + } + + buf->offset += out_count; + + if (buf->offset == buf->len) { + /* + * We're done using all the data in this buffer. + * Re-queue so that the Host can send us more data. + */ + spin_lock_irqsave(&port->inbuf_lock, flags); + port->inbuf = NULL; + + if (add_inbuf(port->in_vq, buf) < 0) + dev_warn(port->dev, "failed add_buf\n"); + + spin_unlock_irqrestore(&port->inbuf_lock, flags); + } + /* Return the number of bytes actually copied */ + return out_count; +} + +/* The condition that must be true for polling to end */ +static bool will_read_block(struct port *port) +{ + if (!port->guest_connected) { + /* Port got hot-unplugged. Let's exit. */ + return false; + } + return !port_has_data(port) && port->host_connected; +} + +static bool will_write_block(struct port *port) +{ + bool ret; + + if (!port->guest_connected) { + /* Port got hot-unplugged. Let's exit. */ + return false; + } + if (!port->host_connected) + return true; + + spin_lock_irq(&port->outvq_lock); + /* + * Check if the Host has consumed any buffers since we last + * sent data (this is only applicable for nonblocking ports). + */ + reclaim_consumed_buffers(port); + ret = port->outvq_full; + spin_unlock_irq(&port->outvq_lock); + + return ret; +} + +static ssize_t port_fops_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *offp) +{ + struct port *port; + ssize_t ret; + + port = filp->private_data; + + /* Port is hot-unplugged. */ + if (!port->guest_connected) + return -ENODEV; + + if (!port_has_data(port)) { + /* + * If nothing's connected on the host just return 0 in + * case of list_empty; this tells the userspace app + * that there's no connection + */ + if (!port->host_connected) + return 0; + if (filp->f_flags & O_NONBLOCK) + return -EAGAIN; + + ret = wait_event_freezable(port->waitqueue, + !will_read_block(port)); + if (ret < 0) + return ret; + } + /* Port got hot-unplugged while we were waiting above. */ + if (!port->guest_connected) + return -ENODEV; + /* + * We could've received a disconnection message while we were + * waiting for more data. + * + * This check is not clubbed in the if() statement above as we + * might receive some data as well as the host could get + * disconnected after we got woken up from our wait. So we + * really want to give off whatever data we have and only then + * check for host_connected. + */ + if (!port_has_data(port) && !port->host_connected) + return 0; + + return fill_readbuf(port, ubuf, count, true); +} + +static int wait_port_writable(struct port *port, bool nonblock) +{ + int ret; + + if (will_write_block(port)) { + if (nonblock) + return -EAGAIN; + + ret = wait_event_freezable(port->waitqueue, + !will_write_block(port)); + if (ret < 0) + return ret; + } + /* Port got hot-unplugged. */ + if (!port->guest_connected) + return -ENODEV; + + return 0; +} + +static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, + size_t count, loff_t *offp) +{ + struct port *port; + struct port_buffer *buf; + ssize_t ret; + bool nonblock; + struct scatterlist sg[1]; + + /* Userspace could be out to fool us */ + if (!count) + return 0; + + port = filp->private_data; + + nonblock = filp->f_flags & O_NONBLOCK; + + ret = wait_port_writable(port, nonblock); + if (ret < 0) + return ret; + + count = min((size_t)(32 * 1024), count); + + buf = alloc_buf(port->out_vq, count, 0); + if (!buf) + return -ENOMEM; + + ret = copy_from_user(buf->buf, ubuf, count); + if (ret) { + ret = -EFAULT; + goto free_buf; + } + + /* + * We now ask send_buf() to not spin for generic ports -- we + * can re-use the same code path that non-blocking file + * descriptors take for blocking file descriptors since the + * wait is already done and we're certain the write will go + * through to the host. + */ + nonblock = true; + sg_init_one(sg, buf->buf, count); + ret = __send_to_port(port, sg, 1, count, buf, nonblock); + + if (nonblock && ret > 0) + goto out; + +free_buf: + free_buf(buf, true); +out: + return ret; +} + +struct sg_list { + unsigned int n; + unsigned int size; + size_t len; + struct scatterlist *sg; +}; + +static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf, + struct splice_desc *sd) +{ + struct sg_list *sgl = sd->u.data; + unsigned int offset, len; + + if (sgl->n == sgl->size) + return 0; + + /* Try lock this page */ + if (buf->ops->steal(pipe, buf) == 0) { + /* Get reference and unlock page for moving */ + get_page(buf->page); + unlock_page(buf->page); + + len = min(buf->len, sd->len); + sg_set_page(&(sgl->sg[sgl->n]), buf->page, len, buf->offset); + } else { + /* Failback to copying a page */ + struct page *page = alloc_page(GFP_KERNEL); + char *src; + + if (!page) + return -ENOMEM; + + offset = sd->pos & ~PAGE_MASK; + + len = sd->len; + if (len + offset > PAGE_SIZE) + len = PAGE_SIZE - offset; + + src = kmap_atomic(buf->page); + memcpy(page_address(page) + offset, src + buf->offset, len); + kunmap_atomic(src); + + sg_set_page(&(sgl->sg[sgl->n]), page, len, offset); + } + sgl->n++; + sgl->len += len; + + return len; +} + +/* Faster zero-copy write by splicing */ +static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe, + struct file *filp, loff_t *ppos, + size_t len, unsigned int flags) +{ + struct port *port = filp->private_data; + struct sg_list sgl; + ssize_t ret; + struct port_buffer *buf; + struct splice_desc sd = { + .total_len = len, + .flags = flags, + .pos = *ppos, + .u.data = &sgl, + }; + + /* + * Rproc_serial does not yet support splice. To support splice + * pipe_to_sg() must allocate dma-buffers and copy content from + * regular pages to dma pages. And alloc_buf and free_buf must + * support allocating and freeing such a list of dma-buffers. + */ + if (is_rproc_serial(port->out_vq->vdev)) + return -EINVAL; + + /* + * pipe->nrbufs == 0 means there are no data to transfer, + * so this returns just 0 for no data. + */ + pipe_lock(pipe); + if (!pipe->nrbufs) { + ret = 0; + goto error_out; + } + + ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK); + if (ret < 0) + goto error_out; + + buf = alloc_buf(port->out_vq, 0, pipe->nrbufs); + if (!buf) { + ret = -ENOMEM; + goto error_out; + } + + sgl.n = 0; + sgl.len = 0; + sgl.size = pipe->nrbufs; + sgl.sg = buf->sg; + sg_init_table(sgl.sg, sgl.size); + ret = __splice_from_pipe(pipe, &sd, pipe_to_sg); + pipe_unlock(pipe); + if (likely(ret > 0)) + ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true); + + if (unlikely(ret <= 0)) + free_buf(buf, true); + return ret; + +error_out: + pipe_unlock(pipe); + return ret; +} + +static unsigned int port_fops_poll(struct file *filp, poll_table *wait) +{ + struct port *port; + unsigned int ret; + + port = filp->private_data; + poll_wait(filp, &port->waitqueue, wait); + + if (!port->guest_connected) { + /* Port got unplugged */ + return POLLHUP; + } + ret = 0; + if (!will_read_block(port)) + ret |= POLLIN | POLLRDNORM; + if (!will_write_block(port)) + ret |= POLLOUT; + if (!port->host_connected) + ret |= POLLHUP; + + return ret; +} + +static void remove_port(struct kref *kref); + +static int port_fops_release(struct inode *inode, struct file *filp) +{ + struct port *port; + + port = filp->private_data; + + /* Notify host of port being closed */ + send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0); + + spin_lock_irq(&port->inbuf_lock); + port->guest_connected = false; + + discard_port_data(port); + + spin_unlock_irq(&port->inbuf_lock); + + spin_lock_irq(&port->outvq_lock); + reclaim_consumed_buffers(port); + spin_unlock_irq(&port->outvq_lock); + + reclaim_dma_bufs(); + /* + * Locks aren't necessary here as a port can't be opened after + * unplug, and if a port isn't unplugged, a kref would already + * exist for the port. Plus, taking ports_lock here would + * create a dependency on other locks taken by functions + * inside remove_port if we're the last holder of the port, + * creating many problems. + */ + kref_put(&port->kref, remove_port); + + return 0; +} + +static int port_fops_open(struct inode *inode, struct file *filp) +{ + struct cdev *cdev = inode->i_cdev; + struct port *port; + int ret; + + /* We get the port with a kref here */ + port = find_port_by_devt(cdev->dev); + if (!port) { + /* Port was unplugged before we could proceed */ + return -ENXIO; + } + filp->private_data = port; + + /* + * Don't allow opening of console port devices -- that's done + * via /dev/hvc + */ + if (is_console_port(port)) { + ret = -ENXIO; + goto out; + } + + /* Allow only one process to open a particular port at a time */ + spin_lock_irq(&port->inbuf_lock); + if (port->guest_connected) { + spin_unlock_irq(&port->inbuf_lock); + ret = -EBUSY; + goto out; + } + + port->guest_connected = true; + spin_unlock_irq(&port->inbuf_lock); + + spin_lock_irq(&port->outvq_lock); + /* + * There might be a chance that we missed reclaiming a few + * buffers in the window of the port getting previously closed + * and opening now. + */ + reclaim_consumed_buffers(port); + spin_unlock_irq(&port->outvq_lock); + + nonseekable_open(inode, filp); + + /* Notify host of port being opened */ + send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1); + + return 0; +out: + kref_put(&port->kref, remove_port); + return ret; +} + +static int port_fops_fasync(int fd, struct file *filp, int mode) +{ + struct port *port; + + port = filp->private_data; + return fasync_helper(fd, filp, mode, &port->async_queue); +} + +/* + * The file operations that we support: programs in the guest can open + * a console device, read from it, write to it, poll for data and + * close it. The devices are at + * /dev/vportp + */ +static const struct file_operations port_fops = { + .owner = THIS_MODULE, + .open = port_fops_open, + .read = port_fops_read, + .write = port_fops_write, + .splice_write = port_fops_splice_write, + .poll = port_fops_poll, + .release = port_fops_release, + .fasync = port_fops_fasync, + .llseek = no_llseek, +}; + +/* + * The put_chars() callback is pretty straightforward. + * + * We turn the characters into a scatter-gather list, add it to the + * output queue and then kick the Host. Then we sit here waiting for + * it to finish: inefficient in theory, but in practice + * implementations will do it immediately (lguest's Launcher does). + */ +static int put_chars(u32 vtermno, const char *buf, int count) +{ + struct port *port; + struct scatterlist sg[1]; + + if (unlikely(early_put_chars)) + return early_put_chars(vtermno, buf, count); + + port = find_port_by_vtermno(vtermno); + if (!port) + return -EPIPE; + + sg_init_one(sg, buf, count); + return __send_to_port(port, sg, 1, count, (void *)buf, false); +} + +/* + * get_chars() is the callback from the hvc_console infrastructure + * when an interrupt is received. + * + * We call out to fill_readbuf that gets us the required data from the + * buffers that are queued up. + */ +static int get_chars(u32 vtermno, char *buf, int count) +{ + struct port *port; + + /* If we've not set up the port yet, we have no input to give. */ + if (unlikely(early_put_chars)) + return 0; + + port = find_port_by_vtermno(vtermno); + if (!port) + return -EPIPE; + + /* If we don't have an input queue yet, we can't get input. */ + BUG_ON(!port->in_vq); + + return fill_readbuf(port, (__force char __user *)buf, count, false); +} + +static void resize_console(struct port *port) +{ + struct virtio_device *vdev; + + /* The port could have been hot-unplugged */ + if (!port || !is_console_port(port)) + return; + + vdev = port->portdev->vdev; + + /* Don't test F_SIZE at all if we're rproc: not a valid feature! */ + if (!is_rproc_serial(vdev) && + virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) + hvc_resize(port->cons.hvc, port->cons.ws); +} + +/* We set the configuration at this point, since we now have a tty */ +static int notifier_add_vio(struct hvc_struct *hp, int data) +{ + struct port *port; + + port = find_port_by_vtermno(hp->vtermno); + if (!port) + return -EINVAL; + + hp->irq_requested = 1; + resize_console(port); + + return 0; +} + +static void notifier_del_vio(struct hvc_struct *hp, int data) +{ + hp->irq_requested = 0; +} + +/* The operations for console ports. */ +static const struct hv_ops hv_ops = { + .get_chars = get_chars, + .put_chars = put_chars, + .notifier_add = notifier_add_vio, + .notifier_del = notifier_del_vio, + .notifier_hangup = notifier_del_vio, +}; + +/* + * Console drivers are initialized very early so boot messages can go + * out, so we do things slightly differently from the generic virtio + * initialization of the net and block drivers. + * + * At this stage, the console is output-only. It's too early to set + * up a virtqueue, so we let the drivers do some boutique early-output + * thing. + */ +int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)) +{ + early_put_chars = put_chars; + return hvc_instantiate(0, 0, &hv_ops); +} + +static int init_port_console(struct port *port) +{ + int ret; + + /* + * The Host's telling us this port is a console port. Hook it + * up with an hvc console. + * + * To set up and manage our virtual console, we call + * hvc_alloc(). + * + * The first argument of hvc_alloc() is the virtual console + * number. The second argument is the parameter for the + * notification mechanism (like irq number). We currently + * leave this as zero, virtqueues have implicit notifications. + * + * The third argument is a "struct hv_ops" containing the + * put_chars() get_chars(), notifier_add() and notifier_del() + * pointers. The final argument is the output buffer size: we + * can do any size, so we put PAGE_SIZE here. + */ + port->cons.vtermno = pdrvdata.next_vtermno; + + port->cons.hvc = hvc_alloc(port->cons.vtermno, 0, &hv_ops, PAGE_SIZE); + if (IS_ERR(port->cons.hvc)) { + ret = PTR_ERR(port->cons.hvc); + dev_err(port->dev, + "error %d allocating hvc for port\n", ret); + port->cons.hvc = NULL; + return ret; + } + spin_lock_irq(&pdrvdata_lock); + pdrvdata.next_vtermno++; + list_add_tail(&port->cons.list, &pdrvdata.consoles); + spin_unlock_irq(&pdrvdata_lock); + port->guest_connected = true; + + /* + * Start using the new console output if this is the first + * console to come up. + */ + if (early_put_chars) + early_put_chars = NULL; + + /* Notify host of port being opened */ + send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1); + + return 0; +} + +static ssize_t show_port_name(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct port *port; + + port = dev_get_drvdata(dev); + + return sprintf(buffer, "%s\n", port->name); +} + +static DEVICE_ATTR(name, S_IRUGO, show_port_name, NULL); + +static struct attribute *port_sysfs_entries[] = { + &dev_attr_name.attr, + NULL +}; + +static struct attribute_group port_attribute_group = { + .name = NULL, /* put in device directory */ + .attrs = port_sysfs_entries, +}; + +static ssize_t debugfs_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *offp) +{ + struct port *port; + char *buf; + ssize_t ret, out_offset, out_count; + + out_count = 1024; + buf = kmalloc(out_count, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + port = filp->private_data; + out_offset = 0; + out_offset += snprintf(buf + out_offset, out_count, + "name: %s\n", port->name ? port->name : ""); + out_offset += snprintf(buf + out_offset, out_count - out_offset, + "guest_connected: %d\n", port->guest_connected); + out_offset += snprintf(buf + out_offset, out_count - out_offset, + "host_connected: %d\n", port->host_connected); + out_offset += snprintf(buf + out_offset, out_count - out_offset, + "outvq_full: %d\n", port->outvq_full); + out_offset += snprintf(buf + out_offset, out_count - out_offset, + "bytes_sent: %lu\n", port->stats.bytes_sent); + out_offset += snprintf(buf + out_offset, out_count - out_offset, + "bytes_received: %lu\n", + port->stats.bytes_received); + out_offset += snprintf(buf + out_offset, out_count - out_offset, + "bytes_discarded: %lu\n", + port->stats.bytes_discarded); + out_offset += snprintf(buf + out_offset, out_count - out_offset, + "is_console: %s\n", + is_console_port(port) ? "yes" : "no"); + out_offset += snprintf(buf + out_offset, out_count - out_offset, + "console_vtermno: %u\n", port->cons.vtermno); + + ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset); + kfree(buf); + return ret; +} + +static const struct file_operations port_debugfs_ops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = debugfs_read, +}; + +static void set_console_size(struct port *port, u16 rows, u16 cols) +{ + if (!port || !is_console_port(port)) + return; + + port->cons.ws.ws_row = rows; + port->cons.ws.ws_col = cols; +} + +static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) +{ + struct port_buffer *buf; + unsigned int nr_added_bufs; + int ret; + + nr_added_bufs = 0; + do { + buf = alloc_buf(vq, PAGE_SIZE, 0); + if (!buf) + break; + + spin_lock_irq(lock); + ret = add_inbuf(vq, buf); + if (ret < 0) { + spin_unlock_irq(lock); + free_buf(buf, true); + break; + } + nr_added_bufs++; + spin_unlock_irq(lock); + } while (ret > 0); + + return nr_added_bufs; +} + +static void send_sigio_to_port(struct port *port) +{ + if (port->async_queue && port->guest_connected) + kill_fasync(&port->async_queue, SIGIO, POLL_OUT); +} + +static int add_port(struct ports_device *portdev, u32 id) +{ + char debugfs_name[16]; + struct port *port; + struct port_buffer *buf; + dev_t devt; + unsigned int nr_added_bufs; + int err; + + port = kmalloc(sizeof(*port), GFP_KERNEL); + if (!port) { + err = -ENOMEM; + goto fail; + } + kref_init(&port->kref); + + port->portdev = portdev; + port->id = id; + + port->name = NULL; + port->inbuf = NULL; + port->cons.hvc = NULL; + port->async_queue = NULL; + + port->cons.ws.ws_row = port->cons.ws.ws_col = 0; + + port->host_connected = port->guest_connected = false; + port->stats = (struct port_stats) { 0 }; + + port->outvq_full = false; + + port->in_vq = portdev->in_vqs[port->id]; + port->out_vq = portdev->out_vqs[port->id]; + + port->cdev = cdev_alloc(); + if (!port->cdev) { + dev_err(&port->portdev->vdev->dev, "Error allocating cdev\n"); + err = -ENOMEM; + goto free_port; + } + port->cdev->ops = &port_fops; + + devt = MKDEV(portdev->chr_major, id); + err = cdev_add(port->cdev, devt, 1); + if (err < 0) { + dev_err(&port->portdev->vdev->dev, + "Error %d adding cdev for port %u\n", err, id); + goto free_cdev; + } + port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev, + devt, port, "vport%up%u", + port->portdev->vdev->index, id); + if (IS_ERR(port->dev)) { + err = PTR_ERR(port->dev); + dev_err(&port->portdev->vdev->dev, + "Error %d creating device for port %u\n", + err, id); + goto free_cdev; + } + + spin_lock_init(&port->inbuf_lock); + spin_lock_init(&port->outvq_lock); + init_waitqueue_head(&port->waitqueue); + + /* Fill the in_vq with buffers so the host can send us data. */ + nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock); + if (!nr_added_bufs) { + dev_err(port->dev, "Error allocating inbufs\n"); + err = -ENOMEM; + goto free_device; + } + + if (is_rproc_serial(port->portdev->vdev)) + /* + * For rproc_serial assume remote processor is connected. + * rproc_serial does not want the console port, only + * the generic port implementation. + */ + port->host_connected = true; + else if (!use_multiport(port->portdev)) { + /* + * If we're not using multiport support, + * this has to be a console port. + */ + err = init_port_console(port); + if (err) + goto free_inbufs; + } + + spin_lock_irq(&portdev->ports_lock); + list_add_tail(&port->list, &port->portdev->ports); + spin_unlock_irq(&portdev->ports_lock); + + /* + * Tell the Host we're set so that it can send us various + * configuration parameters for this port (eg, port name, + * caching, whether this is a console port, etc.) + */ + send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); + + if (pdrvdata.debugfs_dir) { + /* + * Finally, create the debugfs file that we can use to + * inspect a port's state at any time + */ + sprintf(debugfs_name, "vport%up%u", + port->portdev->vdev->index, id); + port->debugfs_file = debugfs_create_file(debugfs_name, 0444, + pdrvdata.debugfs_dir, + port, + &port_debugfs_ops); + } + return 0; + +free_inbufs: + while ((buf = virtqueue_detach_unused_buf(port->in_vq))) + free_buf(buf, true); +free_device: + device_destroy(pdrvdata.class, port->dev->devt); +free_cdev: + cdev_del(port->cdev); +free_port: + kfree(port); +fail: + /* The host might want to notify management sw about port add failure */ + __send_control_msg(portdev, id, VIRTIO_CONSOLE_PORT_READY, 0); + return err; +} + +/* No users remain, remove all port-specific data. */ +static void remove_port(struct kref *kref) +{ + struct port *port; + + port = container_of(kref, struct port, kref); + + kfree(port); +} + +static void remove_port_data(struct port *port) +{ + struct port_buffer *buf; + + spin_lock_irq(&port->inbuf_lock); + /* Remove unused data this port might have received. */ + discard_port_data(port); + + /* Remove buffers we queued up for the Host to send us data in. */ + while ((buf = virtqueue_detach_unused_buf(port->in_vq))) + free_buf(buf, true); + spin_unlock_irq(&port->inbuf_lock); + + spin_lock_irq(&port->outvq_lock); + reclaim_consumed_buffers(port); + + /* Free pending buffers from the out-queue. */ + while ((buf = virtqueue_detach_unused_buf(port->out_vq))) + free_buf(buf, true); + spin_unlock_irq(&port->outvq_lock); +} + +/* + * Port got unplugged. Remove port from portdev's list and drop the + * kref reference. If no userspace has this port opened, it will + * result in immediate removal the port. + */ +static void unplug_port(struct port *port) +{ + spin_lock_irq(&port->portdev->ports_lock); + list_del(&port->list); + spin_unlock_irq(&port->portdev->ports_lock); + + spin_lock_irq(&port->inbuf_lock); + if (port->guest_connected) { + /* Let the app know the port is going down. */ + send_sigio_to_port(port); + + /* Do this after sigio is actually sent */ + port->guest_connected = false; + port->host_connected = false; + + wake_up_interruptible(&port->waitqueue); + } + spin_unlock_irq(&port->inbuf_lock); + + if (is_console_port(port)) { + spin_lock_irq(&pdrvdata_lock); + list_del(&port->cons.list); + spin_unlock_irq(&pdrvdata_lock); + hvc_remove(port->cons.hvc); + } + + remove_port_data(port); + + /* + * We should just assume the device itself has gone off -- + * else a close on an open port later will try to send out a + * control message. + */ + port->portdev = NULL; + + sysfs_remove_group(&port->dev->kobj, &port_attribute_group); + device_destroy(pdrvdata.class, port->dev->devt); + cdev_del(port->cdev); + + debugfs_remove(port->debugfs_file); + kfree(port->name); + + /* + * Locks around here are not necessary - a port can't be + * opened after we removed the port struct from ports_list + * above. + */ + kref_put(&port->kref, remove_port); +} + +/* Any private messages that the Host and Guest want to share */ +static void handle_control_message(struct virtio_device *vdev, + struct ports_device *portdev, + struct port_buffer *buf) +{ + struct virtio_console_control *cpkt; + struct port *port; + size_t name_size; + int err; + + cpkt = (struct virtio_console_control *)(buf->buf + buf->offset); + + port = find_port_by_id(portdev, virtio32_to_cpu(vdev, cpkt->id)); + if (!port && + cpkt->event != cpu_to_virtio16(vdev, VIRTIO_CONSOLE_PORT_ADD)) { + /* No valid header at start of buffer. Drop it. */ + dev_dbg(&portdev->vdev->dev, + "Invalid index %u in control packet\n", cpkt->id); + return; + } + + switch (virtio16_to_cpu(vdev, cpkt->event)) { + case VIRTIO_CONSOLE_PORT_ADD: + if (port) { + dev_dbg(&portdev->vdev->dev, + "Port %u already added\n", port->id); + send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); + break; + } + if (virtio32_to_cpu(vdev, cpkt->id) >= + portdev->config.max_nr_ports) { + dev_warn(&portdev->vdev->dev, + "Request for adding port with " + "out-of-bound id %u, max. supported id: %u\n", + cpkt->id, portdev->config.max_nr_ports - 1); + break; + } + add_port(portdev, virtio32_to_cpu(vdev, cpkt->id)); + break; + case VIRTIO_CONSOLE_PORT_REMOVE: + unplug_port(port); + break; + case VIRTIO_CONSOLE_CONSOLE_PORT: + if (!cpkt->value) + break; + if (is_console_port(port)) + break; + + init_port_console(port); + complete(&early_console_added); + /* + * Could remove the port here in case init fails - but + * have to notify the host first. + */ + break; + case VIRTIO_CONSOLE_RESIZE: { + struct { + __u16 rows; + __u16 cols; + } size; + + if (!is_console_port(port)) + break; + + memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt), + sizeof(size)); + set_console_size(port, size.rows, size.cols); + + port->cons.hvc->irq_requested = 1; + resize_console(port); + break; + } + case VIRTIO_CONSOLE_PORT_OPEN: + port->host_connected = virtio16_to_cpu(vdev, cpkt->value); + wake_up_interruptible(&port->waitqueue); + /* + * If the host port got closed and the host had any + * unconsumed buffers, we'll be able to reclaim them + * now. + */ + spin_lock_irq(&port->outvq_lock); + reclaim_consumed_buffers(port); + spin_unlock_irq(&port->outvq_lock); + + /* + * If the guest is connected, it'll be interested in + * knowing the host connection state changed. + */ + spin_lock_irq(&port->inbuf_lock); + send_sigio_to_port(port); + spin_unlock_irq(&port->inbuf_lock); + break; + case VIRTIO_CONSOLE_PORT_NAME: + /* + * If we woke up after hibernation, we can get this + * again. Skip it in that case. + */ + if (port->name) + break; + + /* + * Skip the size of the header and the cpkt to get the size + * of the name that was sent + */ + name_size = buf->len - buf->offset - sizeof(*cpkt) + 1; + + port->name = kmalloc(name_size, GFP_KERNEL); + if (!port->name) { + dev_err(port->dev, + "Not enough space to store port name\n"); + break; + } + strncpy(port->name, buf->buf + buf->offset + sizeof(*cpkt), + name_size - 1); + port->name[name_size - 1] = 0; + + /* + * Since we only have one sysfs attribute, 'name', + * create it only if we have a name for the port. + */ + err = sysfs_create_group(&port->dev->kobj, + &port_attribute_group); + if (err) { + dev_err(port->dev, + "Error %d creating sysfs device attributes\n", + err); + } else { + /* + * Generate a udev event so that appropriate + * symlinks can be created based on udev + * rules. + */ + kobject_uevent(&port->dev->kobj, KOBJ_CHANGE); + } + break; + } +} + +static void control_work_handler(struct work_struct *work) +{ + struct ports_device *portdev; + struct virtqueue *vq; + struct port_buffer *buf; + unsigned int len; + + portdev = container_of(work, struct ports_device, control_work); + vq = portdev->c_ivq; + + spin_lock(&portdev->c_ivq_lock); + while ((buf = virtqueue_get_buf(vq, &len))) { + spin_unlock(&portdev->c_ivq_lock); + + buf->len = len; + buf->offset = 0; + + handle_control_message(vq->vdev, portdev, buf); + + spin_lock(&portdev->c_ivq_lock); + if (add_inbuf(portdev->c_ivq, buf) < 0) { + dev_warn(&portdev->vdev->dev, + "Error adding buffer to queue\n"); + free_buf(buf, false); + } + } + spin_unlock(&portdev->c_ivq_lock); +} + +static void out_intr(struct virtqueue *vq) +{ + struct port *port; + + port = find_port_by_vq(vq->vdev->priv, vq); + if (!port) + return; + + wake_up_interruptible(&port->waitqueue); +} + +static void in_intr(struct virtqueue *vq) +{ + struct port *port; + unsigned long flags; + + port = find_port_by_vq(vq->vdev->priv, vq); + if (!port) + return; + + spin_lock_irqsave(&port->inbuf_lock, flags); + port->inbuf = get_inbuf(port); + + /* + * Normally the port should not accept data when the port is + * closed. For generic serial ports, the host won't (shouldn't) + * send data till the guest is connected. But this condition + * can be reached when a console port is not yet connected (no + * tty is spawned) and the other side sends out data over the + * vring, or when a remote devices start sending data before + * the ports are opened. + * + * A generic serial port will discard data if not connected, + * while console ports and rproc-serial ports accepts data at + * any time. rproc-serial is initiated with guest_connected to + * false because port_fops_open expects this. Console ports are + * hooked up with an HVC console and is initialized with + * guest_connected to true. + */ + + if (!port->guest_connected && !is_rproc_serial(port->portdev->vdev)) + discard_port_data(port); + + /* Send a SIGIO indicating new data in case the process asked for it */ + send_sigio_to_port(port); + + spin_unlock_irqrestore(&port->inbuf_lock, flags); + + wake_up_interruptible(&port->waitqueue); + + if (is_console_port(port) && hvc_poll(port->cons.hvc)) + hvc_kick(); +} + +static void control_intr(struct virtqueue *vq) +{ + struct ports_device *portdev; + + portdev = vq->vdev->priv; + schedule_work(&portdev->control_work); +} + +static void config_intr(struct virtio_device *vdev) +{ + struct ports_device *portdev; + + portdev = vdev->priv; + + if (!use_multiport(portdev)) + schedule_work(&portdev->config_work); +} + +static void config_work_handler(struct work_struct *work) +{ + struct ports_device *portdev; + + portdev = container_of(work, struct ports_device, control_work); + if (!use_multiport(portdev)) { + struct virtio_device *vdev; + struct port *port; + u16 rows, cols; + + vdev = portdev->vdev; + virtio_cread(vdev, struct virtio_console_config, cols, &cols); + virtio_cread(vdev, struct virtio_console_config, rows, &rows); + + port = find_port_by_id(portdev, 0); + set_console_size(port, rows, cols); + + /* + * We'll use this way of resizing only for legacy + * support. For newer userspace + * (VIRTIO_CONSOLE_F_MULTPORT+), use control messages + * to indicate console size changes so that it can be + * done per-port. + */ + resize_console(port); + } +} + +static int init_vqs(struct ports_device *portdev) +{ + vq_callback_t **io_callbacks; + char **io_names; + struct virtqueue **vqs; + u32 i, j, nr_ports, nr_queues; + int err; + + nr_ports = portdev->config.max_nr_ports; + nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2; + + vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL); + io_callbacks = kmalloc(nr_queues * sizeof(vq_callback_t *), GFP_KERNEL); + io_names = kmalloc(nr_queues * sizeof(char *), GFP_KERNEL); + portdev->in_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), + GFP_KERNEL); + portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), + GFP_KERNEL); + if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs || + !portdev->out_vqs) { + err = -ENOMEM; + goto free; + } + + /* + * For backward compat (newer host but older guest), the host + * spawns a console port first and also inits the vqs for port + * 0 before others. + */ + j = 0; + io_callbacks[j] = in_intr; + io_callbacks[j + 1] = out_intr; + io_names[j] = "input"; + io_names[j + 1] = "output"; + j += 2; + + if (use_multiport(portdev)) { + io_callbacks[j] = control_intr; + io_callbacks[j + 1] = NULL; + io_names[j] = "control-i"; + io_names[j + 1] = "control-o"; + + for (i = 1; i < nr_ports; i++) { + j += 2; + io_callbacks[j] = in_intr; + io_callbacks[j + 1] = out_intr; + io_names[j] = "input"; + io_names[j + 1] = "output"; + } + } + /* Find the queues. */ + err = portdev->vdev->config->find_vqs(portdev->vdev, nr_queues, vqs, + io_callbacks, + (const char **)io_names); + if (err) + goto free; + + j = 0; + portdev->in_vqs[0] = vqs[0]; + portdev->out_vqs[0] = vqs[1]; + j += 2; + if (use_multiport(portdev)) { + portdev->c_ivq = vqs[j]; + portdev->c_ovq = vqs[j + 1]; + + for (i = 1; i < nr_ports; i++) { + j += 2; + portdev->in_vqs[i] = vqs[j]; + portdev->out_vqs[i] = vqs[j + 1]; + } + } + kfree(io_names); + kfree(io_callbacks); + kfree(vqs); + + return 0; + +free: + kfree(portdev->out_vqs); + kfree(portdev->in_vqs); + kfree(io_names); + kfree(io_callbacks); + kfree(vqs); + + return err; +} + +static const struct file_operations portdev_fops = { + .owner = THIS_MODULE, +}; + +static void remove_vqs(struct ports_device *portdev) +{ + portdev->vdev->config->del_vqs(portdev->vdev); + kfree(portdev->in_vqs); + kfree(portdev->out_vqs); +} + +static void remove_controlq_data(struct ports_device *portdev) +{ + struct port_buffer *buf; + unsigned int len; + + if (!use_multiport(portdev)) + return; + + while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) + free_buf(buf, true); + + while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) + free_buf(buf, true); +} + +/* + * Once we're further in boot, we get probed like any other virtio + * device. + * + * If the host also supports multiple console ports, we check the + * config space to see how many ports the host has spawned. We + * initialize each port found. + */ +static int virtcons_probe(struct virtio_device *vdev) +{ + struct ports_device *portdev; + int err; + bool multiport; + bool early = early_put_chars != NULL; + + /* We only need a config space if features are offered */ + if (!vdev->config->get && + (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE) + || virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT))) { + dev_err(&vdev->dev, "%s failure: config access disabled\n", + __func__); + return -EINVAL; + } + + /* Ensure to read early_put_chars now */ + barrier(); + + portdev = kmalloc(sizeof(*portdev), GFP_KERNEL); + if (!portdev) { + err = -ENOMEM; + goto fail; + } + + /* Attach this portdev to this virtio_device, and vice-versa. */ + portdev->vdev = vdev; + vdev->priv = portdev; + + portdev->chr_major = register_chrdev(0, "virtio-portsdev", + &portdev_fops); + if (portdev->chr_major < 0) { + dev_err(&vdev->dev, + "Error %d registering chrdev for device %u\n", + portdev->chr_major, vdev->index); + err = portdev->chr_major; + goto free; + } + + multiport = false; + portdev->config.max_nr_ports = 1; + + /* Don't test MULTIPORT at all if we're rproc: not a valid feature! */ + if (!is_rproc_serial(vdev) && + virtio_cread_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT, + struct virtio_console_config, max_nr_ports, + &portdev->config.max_nr_ports) == 0) { + multiport = true; + } + + err = init_vqs(portdev); + if (err < 0) { + dev_err(&vdev->dev, "Error %d initializing vqs\n", err); + goto free_chrdev; + } + + spin_lock_init(&portdev->ports_lock); + INIT_LIST_HEAD(&portdev->ports); + + virtio_device_ready(portdev->vdev); + + INIT_WORK(&portdev->config_work, &config_work_handler); + INIT_WORK(&portdev->control_work, &control_work_handler); + + if (multiport) { + unsigned int nr_added_bufs; + + spin_lock_init(&portdev->c_ivq_lock); + spin_lock_init(&portdev->c_ovq_lock); + + nr_added_bufs = fill_queue(portdev->c_ivq, + &portdev->c_ivq_lock); + if (!nr_added_bufs) { + dev_err(&vdev->dev, + "Error allocating buffers for control queue\n"); + err = -ENOMEM; + goto free_vqs; + } + } else { + /* + * For backward compatibility: Create a console port + * if we're running on older host. + */ + add_port(portdev, 0); + } + + spin_lock_irq(&pdrvdata_lock); + list_add_tail(&portdev->list, &pdrvdata.portdevs); + spin_unlock_irq(&pdrvdata_lock); + + __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, + VIRTIO_CONSOLE_DEVICE_READY, 1); + + /* + * If there was an early virtio console, assume that there are no + * other consoles. We need to wait until the hvc_alloc matches the + * hvc_instantiate, otherwise tty_open will complain, resulting in + * a "Warning: unable to open an initial console" boot failure. + * Without multiport this is done in add_port above. With multiport + * this might take some host<->guest communication - thus we have to + * wait. + */ + if (multiport && early) + wait_for_completion(&early_console_added); + + return 0; + +free_vqs: + /* The host might want to notify mgmt sw about device add failure */ + __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, + VIRTIO_CONSOLE_DEVICE_READY, 0); + remove_vqs(portdev); +free_chrdev: + unregister_chrdev(portdev->chr_major, "virtio-portsdev"); +free: + kfree(portdev); +fail: + return err; +} + +static void virtcons_remove(struct virtio_device *vdev) +{ + struct ports_device *portdev; + struct port *port, *port2; + + portdev = vdev->priv; + + spin_lock_irq(&pdrvdata_lock); + list_del(&portdev->list); + spin_unlock_irq(&pdrvdata_lock); + + /* Disable interrupts for vqs */ + vdev->config->reset(vdev); + /* Finish up work that's lined up */ + if (use_multiport(portdev)) + cancel_work_sync(&portdev->control_work); + else + cancel_work_sync(&portdev->config_work); + + list_for_each_entry_safe(port, port2, &portdev->ports, list) + unplug_port(port); + + unregister_chrdev(portdev->chr_major, "virtio-portsdev"); + + /* + * When yanking out a device, we immediately lose the + * (device-side) queues. So there's no point in keeping the + * guest side around till we drop our final reference. This + * also means that any ports which are in an open state will + * have to just stop using the port, as the vqs are going + * away. + */ + remove_controlq_data(portdev); + remove_vqs(portdev); + kfree(portdev); +} + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +static unsigned int features[] = { + VIRTIO_CONSOLE_F_SIZE, + VIRTIO_CONSOLE_F_MULTIPORT, +}; + +static struct virtio_device_id rproc_serial_id_table[] = { +#if IS_ENABLED(CONFIG_REMOTEPROC) + { VIRTIO_ID_RPROC_SERIAL, VIRTIO_DEV_ANY_ID }, +#endif + { 0 }, +}; + +static unsigned int rproc_serial_features[] = { +}; + +#ifdef CONFIG_PM_SLEEP +static int virtcons_freeze(struct virtio_device *vdev) +{ + struct ports_device *portdev; + struct port *port; + + portdev = vdev->priv; + + vdev->config->reset(vdev); + + virtqueue_disable_cb(portdev->c_ivq); + cancel_work_sync(&portdev->control_work); + cancel_work_sync(&portdev->config_work); + /* + * Once more: if control_work_handler() was running, it would + * enable the cb as the last step. + */ + virtqueue_disable_cb(portdev->c_ivq); + remove_controlq_data(portdev); + + list_for_each_entry(port, &portdev->ports, list) { + virtqueue_disable_cb(port->in_vq); + virtqueue_disable_cb(port->out_vq); + /* + * We'll ask the host later if the new invocation has + * the port opened or closed. + */ + port->host_connected = false; + remove_port_data(port); + } + remove_vqs(portdev); + + return 0; +} + +static int virtcons_restore(struct virtio_device *vdev) +{ + struct ports_device *portdev; + struct port *port; + int ret; + + portdev = vdev->priv; + + ret = init_vqs(portdev); + if (ret) + return ret; + + virtio_device_ready(portdev->vdev); + + if (use_multiport(portdev)) + fill_queue(portdev->c_ivq, &portdev->c_ivq_lock); + + list_for_each_entry(port, &portdev->ports, list) { + port->in_vq = portdev->in_vqs[port->id]; + port->out_vq = portdev->out_vqs[port->id]; + + fill_queue(port->in_vq, &port->inbuf_lock); + + /* Get port open/close status on the host */ + send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); + + /* + * If a port was open at the time of suspending, we + * have to let the host know that it's still open. + */ + if (port->guest_connected) + send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1); + } + return 0; +} +#endif + +static struct virtio_driver virtio_console = { + .feature_table = features, + .feature_table_size = ARRAY_SIZE(features), + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = virtcons_probe, + .remove = virtcons_remove, + .config_changed = config_intr, +#ifdef CONFIG_PM_SLEEP + .freeze = virtcons_freeze, + .restore = virtcons_restore, +#endif +}; + +static struct virtio_driver virtio_rproc_serial = { + .feature_table = rproc_serial_features, + .feature_table_size = ARRAY_SIZE(rproc_serial_features), + .driver.name = "virtio_rproc_serial", + .driver.owner = THIS_MODULE, + .id_table = rproc_serial_id_table, + .probe = virtcons_probe, + .remove = virtcons_remove, +}; + +static int __init init(void) +{ + int err; + + pdrvdata.class = class_create(THIS_MODULE, "virtio-ports"); + if (IS_ERR(pdrvdata.class)) { + err = PTR_ERR(pdrvdata.class); + pr_err("Error %d creating virtio-ports class\n", err); + return err; + } + + pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL); + if (!pdrvdata.debugfs_dir) + pr_warning("Error creating debugfs dir for virtio-ports\n"); + INIT_LIST_HEAD(&pdrvdata.consoles); + INIT_LIST_HEAD(&pdrvdata.portdevs); + + err = register_virtio_driver(&virtio_console); + if (err < 0) { + pr_err("Error %d registering virtio driver\n", err); + goto free; + } + err = register_virtio_driver(&virtio_rproc_serial); + if (err < 0) { + pr_err("Error %d registering virtio rproc serial driver\n", + err); + goto unregister; + } + return 0; +unregister: + unregister_virtio_driver(&virtio_console); +free: + debugfs_remove_recursive(pdrvdata.debugfs_dir); + class_destroy(pdrvdata.class); + return err; +} + +static void __exit fini(void) +{ + reclaim_dma_bufs(); + + unregister_virtio_driver(&virtio_console); + unregister_virtio_driver(&virtio_rproc_serial); + + class_destroy(pdrvdata.class); + debugfs_remove_recursive(pdrvdata.debugfs_dir); +} +module_init(init); +module_exit(fini); + +MODULE_DEVICE_TABLE(virtio, id_table); +MODULE_DESCRIPTION("Virtio console driver"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/xilinx_hwicap/Makefile b/kernel/drivers/char/xilinx_hwicap/Makefile new file mode 100644 index 000000000..5491cbc42 --- /dev/null +++ b/kernel/drivers/char/xilinx_hwicap/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the Xilinx OPB hwicap driver +# + +obj-$(CONFIG_XILINX_HWICAP) += xilinx_hwicap_m.o + +xilinx_hwicap_m-y := xilinx_hwicap.o fifo_icap.o buffer_icap.o diff --git a/kernel/drivers/char/xilinx_hwicap/buffer_icap.c b/kernel/drivers/char/xilinx_hwicap/buffer_icap.c new file mode 100644 index 000000000..05d897764 --- /dev/null +++ b/kernel/drivers/char/xilinx_hwicap/buffer_icap.c @@ -0,0 +1,365 @@ +/***************************************************************************** + * + * Author: Xilinx, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS" + * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND + * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE, + * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE, + * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION + * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT, + * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE + * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY + * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE + * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR + * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF + * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. + * + * (c) Copyright 2003-2008 Xilinx Inc. + * All rights reserved. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + *****************************************************************************/ + +#include "buffer_icap.h" + +/* Indicates how many bytes will fit in a buffer. (1 BRAM) */ +#define XHI_MAX_BUFFER_BYTES 2048 +#define XHI_MAX_BUFFER_INTS (XHI_MAX_BUFFER_BYTES >> 2) + +/* File access and error constants */ +#define XHI_DEVICE_READ_ERROR -1 +#define XHI_DEVICE_WRITE_ERROR -2 +#define XHI_BUFFER_OVERFLOW_ERROR -3 + +#define XHI_DEVICE_READ 0x1 +#define XHI_DEVICE_WRITE 0x0 + +/* Constants for checking transfer status */ +#define XHI_CYCLE_DONE 0 +#define XHI_CYCLE_EXECUTING 1 + +/* buffer_icap register offsets */ + +/* Size of transfer, read & write */ +#define XHI_SIZE_REG_OFFSET 0x800L +/* offset into bram, read & write */ +#define XHI_BRAM_OFFSET_REG_OFFSET 0x804L +/* Read not Configure, direction of transfer. Write only */ +#define XHI_RNC_REG_OFFSET 0x808L +/* Indicates transfer complete. Read only */ +#define XHI_STATUS_REG_OFFSET 0x80CL + +/* Constants for setting the RNC register */ +#define XHI_CONFIGURE 0x0UL +#define XHI_READBACK 0x1UL + +/* Constants for the Done register */ +#define XHI_NOT_FINISHED 0x0UL +#define XHI_FINISHED 0x1UL + +#define XHI_BUFFER_START 0 + +/** + * buffer_icap_get_status - Get the contents of the status register. + * @drvdata: a pointer to the drvdata. + * + * The status register contains the ICAP status and the done bit. + * + * D8 - cfgerr + * D7 - dalign + * D6 - rip + * D5 - in_abort_l + * D4 - Always 1 + * D3 - Always 1 + * D2 - Always 1 + * D1 - Always 1 + * D0 - Done bit + **/ +u32 buffer_icap_get_status(struct hwicap_drvdata *drvdata) +{ + return in_be32(drvdata->base_address + XHI_STATUS_REG_OFFSET); +} + +/** + * buffer_icap_get_bram - Reads data from the storage buffer bram. + * @base_address: contains the base address of the component. + * @offset: The word offset from which the data should be read. + * + * A bram is used as a configuration memory cache. One frame of data can + * be stored in this "storage buffer". + **/ +static inline u32 buffer_icap_get_bram(void __iomem *base_address, + u32 offset) +{ + return in_be32(base_address + (offset << 2)); +} + +/** + * buffer_icap_busy - Return true if the icap device is busy + * @base_address: is the base address of the device + * + * The queries the low order bit of the status register, which + * indicates whether the current configuration or readback operation + * has completed. + **/ +static inline bool buffer_icap_busy(void __iomem *base_address) +{ + u32 status = in_be32(base_address + XHI_STATUS_REG_OFFSET); + return (status & 1) == XHI_NOT_FINISHED; +} + +/** + * buffer_icap_set_size - Set the size register. + * @base_address: is the base address of the device + * @data: The size in bytes. + * + * The size register holds the number of 8 bit bytes to transfer between + * bram and the icap (or icap to bram). + **/ +static inline void buffer_icap_set_size(void __iomem *base_address, + u32 data) +{ + out_be32(base_address + XHI_SIZE_REG_OFFSET, data); +} + +/** + * buffer_icap_set_offset - Set the bram offset register. + * @base_address: contains the base address of the device. + * @data: is the value to be written to the data register. + * + * The bram offset register holds the starting bram address to transfer + * data from during configuration or write data to during readback. + **/ +static inline void buffer_icap_set_offset(void __iomem *base_address, + u32 data) +{ + out_be32(base_address + XHI_BRAM_OFFSET_REG_OFFSET, data); +} + +/** + * buffer_icap_set_rnc - Set the RNC (Readback not Configure) register. + * @base_address: contains the base address of the device. + * @data: is the value to be written to the data register. + * + * The RNC register determines the direction of the data transfer. It + * controls whether a configuration or readback take place. Writing to + * this register initiates the transfer. A value of 1 initiates a + * readback while writing a value of 0 initiates a configuration. + **/ +static inline void buffer_icap_set_rnc(void __iomem *base_address, + u32 data) +{ + out_be32(base_address + XHI_RNC_REG_OFFSET, data); +} + +/** + * buffer_icap_set_bram - Write data to the storage buffer bram. + * @base_address: contains the base address of the component. + * @offset: The word offset at which the data should be written. + * @data: The value to be written to the bram offset. + * + * A bram is used as a configuration memory cache. One frame of data can + * be stored in this "storage buffer". + **/ +static inline void buffer_icap_set_bram(void __iomem *base_address, + u32 offset, u32 data) +{ + out_be32(base_address + (offset << 2), data); +} + +/** + * buffer_icap_device_read - Transfer bytes from ICAP to the storage buffer. + * @drvdata: a pointer to the drvdata. + * @offset: The storage buffer start address. + * @count: The number of words (32 bit) to read from the + * device (ICAP). + **/ +static int buffer_icap_device_read(struct hwicap_drvdata *drvdata, + u32 offset, u32 count) +{ + + s32 retries = 0; + void __iomem *base_address = drvdata->base_address; + + if (buffer_icap_busy(base_address)) + return -EBUSY; + + if ((offset + count) > XHI_MAX_BUFFER_INTS) + return -EINVAL; + + /* setSize count*4 to get bytes. */ + buffer_icap_set_size(base_address, (count << 2)); + buffer_icap_set_offset(base_address, offset); + buffer_icap_set_rnc(base_address, XHI_READBACK); + + while (buffer_icap_busy(base_address)) { + retries++; + if (retries > XHI_MAX_RETRIES) + return -EBUSY; + } + return 0; + +}; + +/** + * buffer_icap_device_write - Transfer bytes from ICAP to the storage buffer. + * @drvdata: a pointer to the drvdata. + * @offset: The storage buffer start address. + * @count: The number of words (32 bit) to read from the + * device (ICAP). + **/ +static int buffer_icap_device_write(struct hwicap_drvdata *drvdata, + u32 offset, u32 count) +{ + + s32 retries = 0; + void __iomem *base_address = drvdata->base_address; + + if (buffer_icap_busy(base_address)) + return -EBUSY; + + if ((offset + count) > XHI_MAX_BUFFER_INTS) + return -EINVAL; + + /* setSize count*4 to get bytes. */ + buffer_icap_set_size(base_address, count << 2); + buffer_icap_set_offset(base_address, offset); + buffer_icap_set_rnc(base_address, XHI_CONFIGURE); + + while (buffer_icap_busy(base_address)) { + retries++; + if (retries > XHI_MAX_RETRIES) + return -EBUSY; + } + return 0; + +}; + +/** + * buffer_icap_reset - Reset the logic of the icap device. + * @drvdata: a pointer to the drvdata. + * + * Writing to the status register resets the ICAP logic in an internal + * version of the core. For the version of the core published in EDK, + * this is a noop. + **/ +void buffer_icap_reset(struct hwicap_drvdata *drvdata) +{ + out_be32(drvdata->base_address + XHI_STATUS_REG_OFFSET, 0xFEFE); +} + +/** + * buffer_icap_set_configuration - Load a partial bitstream from system memory. + * @drvdata: a pointer to the drvdata. + * @data: Kernel address of the partial bitstream. + * @size: the size of the partial bitstream in 32 bit words. + **/ +int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data, + u32 size) +{ + int status; + s32 buffer_count = 0; + s32 num_writes = 0; + bool dirty = 0; + u32 i; + void __iomem *base_address = drvdata->base_address; + + /* Loop through all the data */ + for (i = 0, buffer_count = 0; i < size; i++) { + + /* Copy data to bram */ + buffer_icap_set_bram(base_address, buffer_count, data[i]); + dirty = 1; + + if (buffer_count < XHI_MAX_BUFFER_INTS - 1) { + buffer_count++; + continue; + } + + /* Write data to ICAP */ + status = buffer_icap_device_write( + drvdata, + XHI_BUFFER_START, + XHI_MAX_BUFFER_INTS); + if (status != 0) { + /* abort. */ + buffer_icap_reset(drvdata); + return status; + } + + buffer_count = 0; + num_writes++; + dirty = 0; + } + + /* Write unwritten data to ICAP */ + if (dirty) { + /* Write data to ICAP */ + status = buffer_icap_device_write(drvdata, XHI_BUFFER_START, + buffer_count); + if (status != 0) { + /* abort. */ + buffer_icap_reset(drvdata); + } + return status; + } + + return 0; +}; + +/** + * buffer_icap_get_configuration - Read configuration data from the device. + * @drvdata: a pointer to the drvdata. + * @data: Address of the data representing the partial bitstream + * @size: the size of the partial bitstream in 32 bit words. + **/ +int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data, + u32 size) +{ + int status; + s32 buffer_count = 0; + s32 read_count = 0; + u32 i; + void __iomem *base_address = drvdata->base_address; + + /* Loop through all the data */ + for (i = 0, buffer_count = XHI_MAX_BUFFER_INTS; i < size; i++) { + if (buffer_count == XHI_MAX_BUFFER_INTS) { + u32 words_remaining = size - i; + u32 words_to_read = + words_remaining < + XHI_MAX_BUFFER_INTS ? words_remaining : + XHI_MAX_BUFFER_INTS; + + /* Read data from ICAP */ + status = buffer_icap_device_read( + drvdata, + XHI_BUFFER_START, + words_to_read); + if (status != 0) { + /* abort. */ + buffer_icap_reset(drvdata); + return status; + } + + buffer_count = 0; + read_count++; + } + + /* Copy data from bram */ + data[i] = buffer_icap_get_bram(base_address, buffer_count); + buffer_count++; + } + + return 0; +}; diff --git a/kernel/drivers/char/xilinx_hwicap/buffer_icap.h b/kernel/drivers/char/xilinx_hwicap/buffer_icap.h new file mode 100644 index 000000000..d4f419ee8 --- /dev/null +++ b/kernel/drivers/char/xilinx_hwicap/buffer_icap.h @@ -0,0 +1,54 @@ +/***************************************************************************** + * + * Author: Xilinx, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS" + * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND + * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE, + * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE, + * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION + * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT, + * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE + * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY + * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE + * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR + * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF + * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. + * + * (c) Copyright 2003-2008 Xilinx Inc. + * All rights reserved. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + *****************************************************************************/ + +#ifndef XILINX_BUFFER_ICAP_H_ /* prevent circular inclusions */ +#define XILINX_BUFFER_ICAP_H_ /* by using protection macros */ + +#include +#include +#include + +#include +#include "xilinx_hwicap.h" + +/* Loads a partial bitstream from system memory. */ +int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data, + u32 Size); + +/* Loads a partial bitstream from system memory. */ +int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data, + u32 Size); + +u32 buffer_icap_get_status(struct hwicap_drvdata *drvdata); +void buffer_icap_reset(struct hwicap_drvdata *drvdata); + +#endif diff --git a/kernel/drivers/char/xilinx_hwicap/fifo_icap.c b/kernel/drivers/char/xilinx_hwicap/fifo_icap.c new file mode 100644 index 000000000..02225eb19 --- /dev/null +++ b/kernel/drivers/char/xilinx_hwicap/fifo_icap.c @@ -0,0 +1,393 @@ +/***************************************************************************** + * + * Author: Xilinx, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS" + * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND + * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE, + * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE, + * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION + * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT, + * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE + * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY + * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE + * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR + * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF + * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. + * + * (c) Copyright 2007-2008 Xilinx Inc. + * All rights reserved. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + *****************************************************************************/ + +#include "fifo_icap.h" + +/* Register offsets for the XHwIcap device. */ +#define XHI_GIER_OFFSET 0x1C /* Device Global Interrupt Enable Reg */ +#define XHI_IPISR_OFFSET 0x20 /* Interrupt Status Register */ +#define XHI_IPIER_OFFSET 0x28 /* Interrupt Enable Register */ +#define XHI_WF_OFFSET 0x100 /* Write FIFO */ +#define XHI_RF_OFFSET 0x104 /* Read FIFO */ +#define XHI_SZ_OFFSET 0x108 /* Size Register */ +#define XHI_CR_OFFSET 0x10C /* Control Register */ +#define XHI_SR_OFFSET 0x110 /* Status Register */ +#define XHI_WFV_OFFSET 0x114 /* Write FIFO Vacancy Register */ +#define XHI_RFO_OFFSET 0x118 /* Read FIFO Occupancy Register */ + +/* Device Global Interrupt Enable Register (GIER) bit definitions */ + +#define XHI_GIER_GIE_MASK 0x80000000 /* Global Interrupt enable Mask */ + +/** + * HwIcap Device Interrupt Status/Enable Registers + * + * Interrupt Status Register (IPISR) : This register holds the + * interrupt status flags for the device. These bits are toggle on + * write. + * + * Interrupt Enable Register (IPIER) : This register is used to enable + * interrupt sources for the device. + * Writing a '1' to a bit enables the corresponding interrupt. + * Writing a '0' to a bit disables the corresponding interrupt. + * + * IPISR/IPIER registers have the same bit definitions and are only defined + * once. + */ +#define XHI_IPIXR_RFULL_MASK 0x00000008 /* Read FIFO Full */ +#define XHI_IPIXR_WEMPTY_MASK 0x00000004 /* Write FIFO Empty */ +#define XHI_IPIXR_RDP_MASK 0x00000002 /* Read FIFO half full */ +#define XHI_IPIXR_WRP_MASK 0x00000001 /* Write FIFO half full */ +#define XHI_IPIXR_ALL_MASK 0x0000000F /* Mask of all interrupts */ + +/* Control Register (CR) */ +#define XHI_CR_SW_RESET_MASK 0x00000008 /* SW Reset Mask */ +#define XHI_CR_FIFO_CLR_MASK 0x00000004 /* FIFO Clear Mask */ +#define XHI_CR_READ_MASK 0x00000002 /* Read from ICAP to FIFO */ +#define XHI_CR_WRITE_MASK 0x00000001 /* Write from FIFO to ICAP */ + + +#define XHI_WFO_MAX_VACANCY 1024 /* Max Write FIFO Vacancy, in words */ +#define XHI_RFO_MAX_OCCUPANCY 256 /* Max Read FIFO Occupancy, in words */ +/* The maximum amount we can request from fifo_icap_get_configuration + at once, in bytes. */ +#define XHI_MAX_READ_TRANSACTION_WORDS 0xFFF + + +/** + * fifo_icap_fifo_write - Write data to the write FIFO. + * @drvdata: a pointer to the drvdata. + * @data: the 32-bit value to be written to the FIFO. + * + * This function will silently fail if the fifo is full. + **/ +static inline void fifo_icap_fifo_write(struct hwicap_drvdata *drvdata, + u32 data) +{ + dev_dbg(drvdata->dev, "fifo_write: %x\n", data); + out_be32(drvdata->base_address + XHI_WF_OFFSET, data); +} + +/** + * fifo_icap_fifo_read - Read data from the Read FIFO. + * @drvdata: a pointer to the drvdata. + * + * This function will silently fail if the fifo is empty. + **/ +static inline u32 fifo_icap_fifo_read(struct hwicap_drvdata *drvdata) +{ + u32 data = in_be32(drvdata->base_address + XHI_RF_OFFSET); + dev_dbg(drvdata->dev, "fifo_read: %x\n", data); + return data; +} + +/** + * fifo_icap_set_read_size - Set the the size register. + * @drvdata: a pointer to the drvdata. + * @data: the size of the following read transaction, in words. + **/ +static inline void fifo_icap_set_read_size(struct hwicap_drvdata *drvdata, + u32 data) +{ + out_be32(drvdata->base_address + XHI_SZ_OFFSET, data); +} + +/** + * fifo_icap_start_config - Initiate a configuration (write) to the device. + * @drvdata: a pointer to the drvdata. + **/ +static inline void fifo_icap_start_config(struct hwicap_drvdata *drvdata) +{ + out_be32(drvdata->base_address + XHI_CR_OFFSET, XHI_CR_WRITE_MASK); + dev_dbg(drvdata->dev, "configuration started\n"); +} + +/** + * fifo_icap_start_readback - Initiate a readback from the device. + * @drvdata: a pointer to the drvdata. + **/ +static inline void fifo_icap_start_readback(struct hwicap_drvdata *drvdata) +{ + out_be32(drvdata->base_address + XHI_CR_OFFSET, XHI_CR_READ_MASK); + dev_dbg(drvdata->dev, "readback started\n"); +} + +/** + * fifo_icap_get_status - Get the contents of the status register. + * @drvdata: a pointer to the drvdata. + * + * The status register contains the ICAP status and the done bit. + * + * D8 - cfgerr + * D7 - dalign + * D6 - rip + * D5 - in_abort_l + * D4 - Always 1 + * D3 - Always 1 + * D2 - Always 1 + * D1 - Always 1 + * D0 - Done bit + **/ +u32 fifo_icap_get_status(struct hwicap_drvdata *drvdata) +{ + u32 status = in_be32(drvdata->base_address + XHI_SR_OFFSET); + dev_dbg(drvdata->dev, "Getting status = %x\n", status); + return status; +} + +/** + * fifo_icap_busy - Return true if the ICAP is still processing a transaction. + * @drvdata: a pointer to the drvdata. + **/ +static inline u32 fifo_icap_busy(struct hwicap_drvdata *drvdata) +{ + u32 status = in_be32(drvdata->base_address + XHI_SR_OFFSET); + return (status & XHI_SR_DONE_MASK) ? 0 : 1; +} + +/** + * fifo_icap_write_fifo_vacancy - Query the write fifo available space. + * @drvdata: a pointer to the drvdata. + * + * Return the number of words that can be safely pushed into the write fifo. + **/ +static inline u32 fifo_icap_write_fifo_vacancy( + struct hwicap_drvdata *drvdata) +{ + return in_be32(drvdata->base_address + XHI_WFV_OFFSET); +} + +/** + * fifo_icap_read_fifo_occupancy - Query the read fifo available data. + * @drvdata: a pointer to the drvdata. + * + * Return the number of words that can be safely read from the read fifo. + **/ +static inline u32 fifo_icap_read_fifo_occupancy( + struct hwicap_drvdata *drvdata) +{ + return in_be32(drvdata->base_address + XHI_RFO_OFFSET); +} + +/** + * fifo_icap_set_configuration - Send configuration data to the ICAP. + * @drvdata: a pointer to the drvdata. + * @frame_buffer: a pointer to the data to be written to the + * ICAP device. + * @num_words: the number of words (32 bit) to write to the ICAP + * device. + + * This function writes the given user data to the Write FIFO in + * polled mode and starts the transfer of the data to + * the ICAP device. + **/ +int fifo_icap_set_configuration(struct hwicap_drvdata *drvdata, + u32 *frame_buffer, u32 num_words) +{ + + u32 write_fifo_vacancy = 0; + u32 retries = 0; + u32 remaining_words; + + dev_dbg(drvdata->dev, "fifo_set_configuration\n"); + + /* + * Check if the ICAP device is Busy with the last Read/Write + */ + if (fifo_icap_busy(drvdata)) + return -EBUSY; + + /* + * Set up the buffer pointer and the words to be transferred. + */ + remaining_words = num_words; + + while (remaining_words > 0) { + /* + * Wait until we have some data in the fifo. + */ + while (write_fifo_vacancy == 0) { + write_fifo_vacancy = + fifo_icap_write_fifo_vacancy(drvdata); + retries++; + if (retries > XHI_MAX_RETRIES) + return -EIO; + } + + /* + * Write data into the Write FIFO. + */ + while ((write_fifo_vacancy != 0) && + (remaining_words > 0)) { + fifo_icap_fifo_write(drvdata, *frame_buffer); + + remaining_words--; + write_fifo_vacancy--; + frame_buffer++; + } + /* Start pushing whatever is in the FIFO into the ICAP. */ + fifo_icap_start_config(drvdata); + } + + /* Wait until the write has finished. */ + while (fifo_icap_busy(drvdata)) { + retries++; + if (retries > XHI_MAX_RETRIES) + break; + } + + dev_dbg(drvdata->dev, "done fifo_set_configuration\n"); + + /* + * If the requested number of words have not been read from + * the device then indicate failure. + */ + if (remaining_words != 0) + return -EIO; + + return 0; +} + +/** + * fifo_icap_get_configuration - Read configuration data from the device. + * @drvdata: a pointer to the drvdata. + * @data: Address of the data representing the partial bitstream + * @size: the size of the partial bitstream in 32 bit words. + * + * This function reads the specified number of words from the ICAP device in + * the polled mode. + */ +int fifo_icap_get_configuration(struct hwicap_drvdata *drvdata, + u32 *frame_buffer, u32 num_words) +{ + + u32 read_fifo_occupancy = 0; + u32 retries = 0; + u32 *data = frame_buffer; + u32 remaining_words; + u32 words_to_read; + + dev_dbg(drvdata->dev, "fifo_get_configuration\n"); + + /* + * Check if the ICAP device is Busy with the last Write/Read + */ + if (fifo_icap_busy(drvdata)) + return -EBUSY; + + remaining_words = num_words; + + while (remaining_words > 0) { + words_to_read = remaining_words; + /* The hardware has a limit on the number of words + that can be read at one time. */ + if (words_to_read > XHI_MAX_READ_TRANSACTION_WORDS) + words_to_read = XHI_MAX_READ_TRANSACTION_WORDS; + + remaining_words -= words_to_read; + + fifo_icap_set_read_size(drvdata, words_to_read); + fifo_icap_start_readback(drvdata); + + while (words_to_read > 0) { + /* Wait until we have some data in the fifo. */ + while (read_fifo_occupancy == 0) { + read_fifo_occupancy = + fifo_icap_read_fifo_occupancy(drvdata); + retries++; + if (retries > XHI_MAX_RETRIES) + return -EIO; + } + + if (read_fifo_occupancy > words_to_read) + read_fifo_occupancy = words_to_read; + + words_to_read -= read_fifo_occupancy; + + /* Read the data from the Read FIFO. */ + while (read_fifo_occupancy != 0) { + *data++ = fifo_icap_fifo_read(drvdata); + read_fifo_occupancy--; + } + } + } + + dev_dbg(drvdata->dev, "done fifo_get_configuration\n"); + + return 0; +} + +/** + * buffer_icap_reset - Reset the logic of the icap device. + * @drvdata: a pointer to the drvdata. + * + * This function forces the software reset of the complete HWICAP device. + * All the registers will return to the default value and the FIFO is also + * flushed as a part of this software reset. + */ +void fifo_icap_reset(struct hwicap_drvdata *drvdata) +{ + u32 reg_data; + /* + * Reset the device by setting/clearing the RESET bit in the + * Control Register. + */ + reg_data = in_be32(drvdata->base_address + XHI_CR_OFFSET); + + out_be32(drvdata->base_address + XHI_CR_OFFSET, + reg_data | XHI_CR_SW_RESET_MASK); + + out_be32(drvdata->base_address + XHI_CR_OFFSET, + reg_data & (~XHI_CR_SW_RESET_MASK)); + +} + +/** + * fifo_icap_flush_fifo - This function flushes the FIFOs in the device. + * @drvdata: a pointer to the drvdata. + */ +void fifo_icap_flush_fifo(struct hwicap_drvdata *drvdata) +{ + u32 reg_data; + /* + * Flush the FIFO by setting/clearing the FIFO Clear bit in the + * Control Register. + */ + reg_data = in_be32(drvdata->base_address + XHI_CR_OFFSET); + + out_be32(drvdata->base_address + XHI_CR_OFFSET, + reg_data | XHI_CR_FIFO_CLR_MASK); + + out_be32(drvdata->base_address + XHI_CR_OFFSET, + reg_data & (~XHI_CR_FIFO_CLR_MASK)); +} + diff --git a/kernel/drivers/char/xilinx_hwicap/fifo_icap.h b/kernel/drivers/char/xilinx_hwicap/fifo_icap.h new file mode 100644 index 000000000..4c9dd9a3b --- /dev/null +++ b/kernel/drivers/char/xilinx_hwicap/fifo_icap.h @@ -0,0 +1,59 @@ +/***************************************************************************** + * + * Author: Xilinx, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS" + * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND + * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE, + * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE, + * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION + * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT, + * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE + * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY + * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE + * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR + * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF + * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. + * + * (c) Copyright 2007-2008 Xilinx Inc. + * All rights reserved. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + *****************************************************************************/ + +#ifndef XILINX_FIFO_ICAP_H_ /* prevent circular inclusions */ +#define XILINX_FIFO_ICAP_H_ /* by using protection macros */ + +#include +#include +#include + +#include +#include "xilinx_hwicap.h" + +/* Reads integers from the device into the storage buffer. */ +int fifo_icap_get_configuration( + struct hwicap_drvdata *drvdata, + u32 *FrameBuffer, + u32 NumWords); + +/* Writes integers to the device from the storage buffer. */ +int fifo_icap_set_configuration( + struct hwicap_drvdata *drvdata, + u32 *FrameBuffer, + u32 NumWords); + +u32 fifo_icap_get_status(struct hwicap_drvdata *drvdata); +void fifo_icap_reset(struct hwicap_drvdata *drvdata); +void fifo_icap_flush_fifo(struct hwicap_drvdata *drvdata); + +#endif diff --git a/kernel/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/kernel/drivers/char/xilinx_hwicap/xilinx_hwicap.c new file mode 100644 index 000000000..c07dfe5c4 --- /dev/null +++ b/kernel/drivers/char/xilinx_hwicap/xilinx_hwicap.c @@ -0,0 +1,894 @@ +/***************************************************************************** + * + * Author: Xilinx, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS" + * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND + * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE, + * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE, + * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION + * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT, + * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE + * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY + * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE + * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR + * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF + * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. + * + * (c) Copyright 2002 Xilinx Inc., Systems Engineering Group + * (c) Copyright 2004 Xilinx Inc., Systems Engineering Group + * (c) Copyright 2007-2008 Xilinx Inc. + * All rights reserved. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + *****************************************************************************/ + +/* + * This is the code behind /dev/icap* -- it allows a user-space + * application to use the Xilinx ICAP subsystem. + * + * The following operations are possible: + * + * open open the port and initialize for access. + * release release port + * write Write a bitstream to the configuration processor. + * read Read a data stream from the configuration processor. + * + * After being opened, the port is initialized and accessed to avoid a + * corrupted first read which may occur with some hardware. The port + * is left in a desynched state, requiring that a synch sequence be + * transmitted before any valid configuration data. A user will have + * exclusive access to the device while it remains open, and the state + * of the ICAP cannot be guaranteed after the device is closed. Note + * that a complete reset of the core and the state of the ICAP cannot + * be performed on many versions of the cores, hence users of this + * device should avoid making inconsistent accesses to the device. In + * particular, accessing the read interface, without first generating + * a write containing a readback packet can leave the ICAP in an + * inaccessible state. + * + * Note that in order to use the read interface, it is first necessary + * to write a request packet to the write interface. i.e., it is not + * possible to simply readback the bitstream (or any configuration + * bits) from a device without specifically requesting them first. + * The code to craft such packets is intended to be part of the + * user-space application code that uses this device. The simplest + * way to use this interface is simply: + * + * cp foo.bit /dev/icap0 + * + * Note that unless foo.bit is an appropriately constructed partial + * bitstream, this has a high likelihood of overwriting the design + * currently programmed in the FPGA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#ifdef CONFIG_OF +/* For open firmware. */ +#include +#include +#include +#endif + +#include "xilinx_hwicap.h" +#include "buffer_icap.h" +#include "fifo_icap.h" + +#define DRIVER_NAME "icap" + +#define HWICAP_REGS (0x10000) + +#define XHWICAP_MAJOR 259 +#define XHWICAP_MINOR 0 +#define HWICAP_DEVICES 1 + +/* An array, which is set to true when the device is registered. */ +static DEFINE_MUTEX(hwicap_mutex); +static bool probed_devices[HWICAP_DEVICES]; +static struct mutex icap_sem; + +static struct class *icap_class; + +#define UNIMPLEMENTED 0xFFFF + +static const struct config_registers v2_config_registers = { + .CRC = 0, + .FAR = 1, + .FDRI = 2, + .FDRO = 3, + .CMD = 4, + .CTL = 5, + .MASK = 6, + .STAT = 7, + .LOUT = 8, + .COR = 9, + .MFWR = 10, + .FLR = 11, + .KEY = 12, + .CBC = 13, + .IDCODE = 14, + .AXSS = UNIMPLEMENTED, + .C0R_1 = UNIMPLEMENTED, + .CSOB = UNIMPLEMENTED, + .WBSTAR = UNIMPLEMENTED, + .TIMER = UNIMPLEMENTED, + .BOOTSTS = UNIMPLEMENTED, + .CTL_1 = UNIMPLEMENTED, +}; + +static const struct config_registers v4_config_registers = { + .CRC = 0, + .FAR = 1, + .FDRI = 2, + .FDRO = 3, + .CMD = 4, + .CTL = 5, + .MASK = 6, + .STAT = 7, + .LOUT = 8, + .COR = 9, + .MFWR = 10, + .FLR = UNIMPLEMENTED, + .KEY = UNIMPLEMENTED, + .CBC = 11, + .IDCODE = 12, + .AXSS = 13, + .C0R_1 = UNIMPLEMENTED, + .CSOB = UNIMPLEMENTED, + .WBSTAR = UNIMPLEMENTED, + .TIMER = UNIMPLEMENTED, + .BOOTSTS = UNIMPLEMENTED, + .CTL_1 = UNIMPLEMENTED, +}; + +static const struct config_registers v5_config_registers = { + .CRC = 0, + .FAR = 1, + .FDRI = 2, + .FDRO = 3, + .CMD = 4, + .CTL = 5, + .MASK = 6, + .STAT = 7, + .LOUT = 8, + .COR = 9, + .MFWR = 10, + .FLR = UNIMPLEMENTED, + .KEY = UNIMPLEMENTED, + .CBC = 11, + .IDCODE = 12, + .AXSS = 13, + .C0R_1 = 14, + .CSOB = 15, + .WBSTAR = 16, + .TIMER = 17, + .BOOTSTS = 18, + .CTL_1 = 19, +}; + +static const struct config_registers v6_config_registers = { + .CRC = 0, + .FAR = 1, + .FDRI = 2, + .FDRO = 3, + .CMD = 4, + .CTL = 5, + .MASK = 6, + .STAT = 7, + .LOUT = 8, + .COR = 9, + .MFWR = 10, + .FLR = UNIMPLEMENTED, + .KEY = UNIMPLEMENTED, + .CBC = 11, + .IDCODE = 12, + .AXSS = 13, + .C0R_1 = 14, + .CSOB = 15, + .WBSTAR = 16, + .TIMER = 17, + .BOOTSTS = 22, + .CTL_1 = 24, +}; + +/** + * hwicap_command_desync - Send a DESYNC command to the ICAP port. + * @drvdata: a pointer to the drvdata. + * + * This command desynchronizes the ICAP After this command, a + * bitstream containing a NULL packet, followed by a SYNCH packet is + * required before the ICAP will recognize commands. + */ +static int hwicap_command_desync(struct hwicap_drvdata *drvdata) +{ + u32 buffer[4]; + u32 index = 0; + + /* + * Create the data to be written to the ICAP. + */ + buffer[index++] = hwicap_type_1_write(drvdata->config_regs->CMD) | 1; + buffer[index++] = XHI_CMD_DESYNCH; + buffer[index++] = XHI_NOOP_PACKET; + buffer[index++] = XHI_NOOP_PACKET; + + /* + * Write the data to the FIFO and intiate the transfer of data present + * in the FIFO to the ICAP device. + */ + return drvdata->config->set_configuration(drvdata, + &buffer[0], index); +} + +/** + * hwicap_get_configuration_register - Query a configuration register. + * @drvdata: a pointer to the drvdata. + * @reg: a constant which represents the configuration + * register value to be returned. + * Examples: XHI_IDCODE, XHI_FLR. + * @reg_data: returns the value of the register. + * + * Sends a query packet to the ICAP and then receives the response. + * The icap is left in Synched state. + */ +static int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata, + u32 reg, u32 *reg_data) +{ + int status; + u32 buffer[6]; + u32 index = 0; + + /* + * Create the data to be written to the ICAP. + */ + buffer[index++] = XHI_DUMMY_PACKET; + buffer[index++] = XHI_NOOP_PACKET; + buffer[index++] = XHI_SYNC_PACKET; + buffer[index++] = XHI_NOOP_PACKET; + buffer[index++] = XHI_NOOP_PACKET; + + /* + * Write the data to the FIFO and initiate the transfer of data present + * in the FIFO to the ICAP device. + */ + status = drvdata->config->set_configuration(drvdata, + &buffer[0], index); + if (status) + return status; + + /* If the syncword was not found, then we need to start over. */ + status = drvdata->config->get_status(drvdata); + if ((status & XHI_SR_DALIGN_MASK) != XHI_SR_DALIGN_MASK) + return -EIO; + + index = 0; + buffer[index++] = hwicap_type_1_read(reg) | 1; + buffer[index++] = XHI_NOOP_PACKET; + buffer[index++] = XHI_NOOP_PACKET; + + /* + * Write the data to the FIFO and intiate the transfer of data present + * in the FIFO to the ICAP device. + */ + status = drvdata->config->set_configuration(drvdata, + &buffer[0], index); + if (status) + return status; + + /* + * Read the configuration register + */ + status = drvdata->config->get_configuration(drvdata, reg_data, 1); + if (status) + return status; + + return 0; +} + +static int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata) +{ + int status; + u32 idcode; + + dev_dbg(drvdata->dev, "initializing\n"); + + /* Abort any current transaction, to make sure we have the + * ICAP in a good state. */ + dev_dbg(drvdata->dev, "Reset...\n"); + drvdata->config->reset(drvdata); + + dev_dbg(drvdata->dev, "Desync...\n"); + status = hwicap_command_desync(drvdata); + if (status) + return status; + + /* Attempt to read the IDCODE from ICAP. This + * may not be returned correctly, due to the design of the + * hardware. + */ + dev_dbg(drvdata->dev, "Reading IDCODE...\n"); + status = hwicap_get_configuration_register( + drvdata, drvdata->config_regs->IDCODE, &idcode); + dev_dbg(drvdata->dev, "IDCODE = %x\n", idcode); + if (status) + return status; + + dev_dbg(drvdata->dev, "Desync...\n"); + status = hwicap_command_desync(drvdata); + if (status) + return status; + + return 0; +} + +static ssize_t +hwicap_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + struct hwicap_drvdata *drvdata = file->private_data; + ssize_t bytes_to_read = 0; + u32 *kbuf; + u32 words; + u32 bytes_remaining; + int status; + + status = mutex_lock_interruptible(&drvdata->sem); + if (status) + return status; + + if (drvdata->read_buffer_in_use) { + /* If there are leftover bytes in the buffer, just */ + /* return them and don't try to read more from the */ + /* ICAP device. */ + bytes_to_read = + (count < drvdata->read_buffer_in_use) ? count : + drvdata->read_buffer_in_use; + + /* Return the data currently in the read buffer. */ + if (copy_to_user(buf, drvdata->read_buffer, bytes_to_read)) { + status = -EFAULT; + goto error; + } + drvdata->read_buffer_in_use -= bytes_to_read; + memmove(drvdata->read_buffer, + drvdata->read_buffer + bytes_to_read, + 4 - bytes_to_read); + } else { + /* Get new data from the ICAP, and return was was requested. */ + kbuf = (u32 *) get_zeroed_page(GFP_KERNEL); + if (!kbuf) { + status = -ENOMEM; + goto error; + } + + /* The ICAP device is only able to read complete */ + /* words. If a number of bytes that do not correspond */ + /* to complete words is requested, then we read enough */ + /* words to get the required number of bytes, and then */ + /* save the remaining bytes for the next read. */ + + /* Determine the number of words to read, rounding up */ + /* if necessary. */ + words = ((count + 3) >> 2); + bytes_to_read = words << 2; + + if (bytes_to_read > PAGE_SIZE) + bytes_to_read = PAGE_SIZE; + + /* Ensure we only read a complete number of words. */ + bytes_remaining = bytes_to_read & 3; + bytes_to_read &= ~3; + words = bytes_to_read >> 2; + + status = drvdata->config->get_configuration(drvdata, + kbuf, words); + + /* If we didn't read correctly, then bail out. */ + if (status) { + free_page((unsigned long)kbuf); + goto error; + } + + /* If we fail to return the data to the user, then bail out. */ + if (copy_to_user(buf, kbuf, bytes_to_read)) { + free_page((unsigned long)kbuf); + status = -EFAULT; + goto error; + } + memcpy(drvdata->read_buffer, + kbuf, + bytes_remaining); + drvdata->read_buffer_in_use = bytes_remaining; + free_page((unsigned long)kbuf); + } + status = bytes_to_read; + error: + mutex_unlock(&drvdata->sem); + return status; +} + +static ssize_t +hwicap_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct hwicap_drvdata *drvdata = file->private_data; + ssize_t written = 0; + ssize_t left = count; + u32 *kbuf; + ssize_t len; + ssize_t status; + + status = mutex_lock_interruptible(&drvdata->sem); + if (status) + return status; + + left += drvdata->write_buffer_in_use; + + /* Only write multiples of 4 bytes. */ + if (left < 4) { + status = 0; + goto error; + } + + kbuf = (u32 *) __get_free_page(GFP_KERNEL); + if (!kbuf) { + status = -ENOMEM; + goto error; + } + + while (left > 3) { + /* only write multiples of 4 bytes, so there might */ + /* be as many as 3 bytes left (at the end). */ + len = left; + + if (len > PAGE_SIZE) + len = PAGE_SIZE; + len &= ~3; + + if (drvdata->write_buffer_in_use) { + memcpy(kbuf, drvdata->write_buffer, + drvdata->write_buffer_in_use); + if (copy_from_user( + (((char *)kbuf) + drvdata->write_buffer_in_use), + buf + written, + len - (drvdata->write_buffer_in_use))) { + free_page((unsigned long)kbuf); + status = -EFAULT; + goto error; + } + } else { + if (copy_from_user(kbuf, buf + written, len)) { + free_page((unsigned long)kbuf); + status = -EFAULT; + goto error; + } + } + + status = drvdata->config->set_configuration(drvdata, + kbuf, len >> 2); + + if (status) { + free_page((unsigned long)kbuf); + status = -EFAULT; + goto error; + } + if (drvdata->write_buffer_in_use) { + len -= drvdata->write_buffer_in_use; + left -= drvdata->write_buffer_in_use; + drvdata->write_buffer_in_use = 0; + } + written += len; + left -= len; + } + if ((left > 0) && (left < 4)) { + if (!copy_from_user(drvdata->write_buffer, + buf + written, left)) { + drvdata->write_buffer_in_use = left; + written += left; + left = 0; + } + } + + free_page((unsigned long)kbuf); + status = written; + error: + mutex_unlock(&drvdata->sem); + return status; +} + +static int hwicap_open(struct inode *inode, struct file *file) +{ + struct hwicap_drvdata *drvdata; + int status; + + mutex_lock(&hwicap_mutex); + drvdata = container_of(inode->i_cdev, struct hwicap_drvdata, cdev); + + status = mutex_lock_interruptible(&drvdata->sem); + if (status) + goto out; + + if (drvdata->is_open) { + status = -EBUSY; + goto error; + } + + status = hwicap_initialize_hwicap(drvdata); + if (status) { + dev_err(drvdata->dev, "Failed to open file"); + goto error; + } + + file->private_data = drvdata; + drvdata->write_buffer_in_use = 0; + drvdata->read_buffer_in_use = 0; + drvdata->is_open = 1; + + error: + mutex_unlock(&drvdata->sem); + out: + mutex_unlock(&hwicap_mutex); + return status; +} + +static int hwicap_release(struct inode *inode, struct file *file) +{ + struct hwicap_drvdata *drvdata = file->private_data; + int i; + int status = 0; + + mutex_lock(&drvdata->sem); + + if (drvdata->write_buffer_in_use) { + /* Flush write buffer. */ + for (i = drvdata->write_buffer_in_use; i < 4; i++) + drvdata->write_buffer[i] = 0; + + status = drvdata->config->set_configuration(drvdata, + (u32 *) drvdata->write_buffer, 1); + if (status) + goto error; + } + + status = hwicap_command_desync(drvdata); + if (status) + goto error; + + error: + drvdata->is_open = 0; + mutex_unlock(&drvdata->sem); + return status; +} + +static const struct file_operations hwicap_fops = { + .owner = THIS_MODULE, + .write = hwicap_write, + .read = hwicap_read, + .open = hwicap_open, + .release = hwicap_release, + .llseek = noop_llseek, +}; + +static int hwicap_setup(struct device *dev, int id, + const struct resource *regs_res, + const struct hwicap_driver_config *config, + const struct config_registers *config_regs) +{ + dev_t devt; + struct hwicap_drvdata *drvdata = NULL; + int retval = 0; + + dev_info(dev, "Xilinx icap port driver\n"); + + mutex_lock(&icap_sem); + + if (id < 0) { + for (id = 0; id < HWICAP_DEVICES; id++) + if (!probed_devices[id]) + break; + } + if (id < 0 || id >= HWICAP_DEVICES) { + mutex_unlock(&icap_sem); + dev_err(dev, "%s%i too large\n", DRIVER_NAME, id); + return -EINVAL; + } + if (probed_devices[id]) { + mutex_unlock(&icap_sem); + dev_err(dev, "cannot assign to %s%i; it is already in use\n", + DRIVER_NAME, id); + return -EBUSY; + } + + probed_devices[id] = 1; + mutex_unlock(&icap_sem); + + devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR + id); + + drvdata = kzalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL); + if (!drvdata) { + dev_err(dev, "Couldn't allocate device private record\n"); + retval = -ENOMEM; + goto failed0; + } + dev_set_drvdata(dev, (void *)drvdata); + + if (!regs_res) { + dev_err(dev, "Couldn't get registers resource\n"); + retval = -EFAULT; + goto failed1; + } + + drvdata->mem_start = regs_res->start; + drvdata->mem_end = regs_res->end; + drvdata->mem_size = resource_size(regs_res); + + if (!request_mem_region(drvdata->mem_start, + drvdata->mem_size, DRIVER_NAME)) { + dev_err(dev, "Couldn't lock memory region at %Lx\n", + (unsigned long long) regs_res->start); + retval = -EBUSY; + goto failed1; + } + + drvdata->devt = devt; + drvdata->dev = dev; + drvdata->base_address = ioremap(drvdata->mem_start, drvdata->mem_size); + if (!drvdata->base_address) { + dev_err(dev, "ioremap() failed\n"); + retval = -ENOMEM; + goto failed2; + } + + drvdata->config = config; + drvdata->config_regs = config_regs; + + mutex_init(&drvdata->sem); + drvdata->is_open = 0; + + dev_info(dev, "ioremap %llx to %p with size %llx\n", + (unsigned long long) drvdata->mem_start, + drvdata->base_address, + (unsigned long long) drvdata->mem_size); + + cdev_init(&drvdata->cdev, &hwicap_fops); + drvdata->cdev.owner = THIS_MODULE; + retval = cdev_add(&drvdata->cdev, devt, 1); + if (retval) { + dev_err(dev, "cdev_add() failed\n"); + goto failed3; + } + + device_create(icap_class, dev, devt, NULL, "%s%d", DRIVER_NAME, id); + return 0; /* success */ + + failed3: + iounmap(drvdata->base_address); + + failed2: + release_mem_region(regs_res->start, drvdata->mem_size); + + failed1: + kfree(drvdata); + + failed0: + mutex_lock(&icap_sem); + probed_devices[id] = 0; + mutex_unlock(&icap_sem); + + return retval; +} + +static struct hwicap_driver_config buffer_icap_config = { + .get_configuration = buffer_icap_get_configuration, + .set_configuration = buffer_icap_set_configuration, + .get_status = buffer_icap_get_status, + .reset = buffer_icap_reset, +}; + +static struct hwicap_driver_config fifo_icap_config = { + .get_configuration = fifo_icap_get_configuration, + .set_configuration = fifo_icap_set_configuration, + .get_status = fifo_icap_get_status, + .reset = fifo_icap_reset, +}; + +static int hwicap_remove(struct device *dev) +{ + struct hwicap_drvdata *drvdata; + + drvdata = dev_get_drvdata(dev); + + if (!drvdata) + return 0; + + device_destroy(icap_class, drvdata->devt); + cdev_del(&drvdata->cdev); + iounmap(drvdata->base_address); + release_mem_region(drvdata->mem_start, drvdata->mem_size); + kfree(drvdata); + + mutex_lock(&icap_sem); + probed_devices[MINOR(dev->devt)-XHWICAP_MINOR] = 0; + mutex_unlock(&icap_sem); + return 0; /* success */ +} + +#ifdef CONFIG_OF +static int hwicap_of_probe(struct platform_device *op, + const struct hwicap_driver_config *config) +{ + struct resource res; + const unsigned int *id; + const char *family; + int rc; + const struct config_registers *regs; + + + rc = of_address_to_resource(op->dev.of_node, 0, &res); + if (rc) { + dev_err(&op->dev, "invalid address\n"); + return rc; + } + + id = of_get_property(op->dev.of_node, "port-number", NULL); + + /* It's most likely that we're using V4, if the family is not + specified */ + regs = &v4_config_registers; + family = of_get_property(op->dev.of_node, "xlnx,family", NULL); + + if (family) { + if (!strcmp(family, "virtex2p")) { + regs = &v2_config_registers; + } else if (!strcmp(family, "virtex4")) { + regs = &v4_config_registers; + } else if (!strcmp(family, "virtex5")) { + regs = &v5_config_registers; + } else if (!strcmp(family, "virtex6")) { + regs = &v6_config_registers; + } + } + return hwicap_setup(&op->dev, id ? *id : -1, &res, config, + regs); +} +#else +static inline int hwicap_of_probe(struct platform_device *op, + const struct hwicap_driver_config *config) +{ + return -EINVAL; +} +#endif /* CONFIG_OF */ + +static const struct of_device_id hwicap_of_match[]; +static int hwicap_drv_probe(struct platform_device *pdev) +{ + const struct of_device_id *match; + struct resource *res; + const struct config_registers *regs; + const char *family; + + match = of_match_device(hwicap_of_match, &pdev->dev); + if (match) + return hwicap_of_probe(pdev, match->data); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + /* It's most likely that we're using V4, if the family is not + specified */ + regs = &v4_config_registers; + family = pdev->dev.platform_data; + + if (family) { + if (!strcmp(family, "virtex2p")) { + regs = &v2_config_registers; + } else if (!strcmp(family, "virtex4")) { + regs = &v4_config_registers; + } else if (!strcmp(family, "virtex5")) { + regs = &v5_config_registers; + } else if (!strcmp(family, "virtex6")) { + regs = &v6_config_registers; + } + } + + return hwicap_setup(&pdev->dev, pdev->id, res, + &buffer_icap_config, regs); +} + +static int hwicap_drv_remove(struct platform_device *pdev) +{ + return hwicap_remove(&pdev->dev); +} + +#ifdef CONFIG_OF +/* Match table for device tree binding */ +static const struct of_device_id hwicap_of_match[] = { + { .compatible = "xlnx,opb-hwicap-1.00.b", .data = &buffer_icap_config}, + { .compatible = "xlnx,xps-hwicap-1.00.a", .data = &fifo_icap_config}, + {}, +}; +MODULE_DEVICE_TABLE(of, hwicap_of_match); +#else +#define hwicap_of_match NULL +#endif + +static struct platform_driver hwicap_platform_driver = { + .probe = hwicap_drv_probe, + .remove = hwicap_drv_remove, + .driver = { + .name = DRIVER_NAME, + .of_match_table = hwicap_of_match, + }, +}; + +static int __init hwicap_module_init(void) +{ + dev_t devt; + int retval; + + icap_class = class_create(THIS_MODULE, "xilinx_config"); + mutex_init(&icap_sem); + + devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR); + retval = register_chrdev_region(devt, + HWICAP_DEVICES, + DRIVER_NAME); + if (retval < 0) + return retval; + + retval = platform_driver_register(&hwicap_platform_driver); + if (retval) + goto failed; + + return retval; + + failed: + unregister_chrdev_region(devt, HWICAP_DEVICES); + + return retval; +} + +static void __exit hwicap_module_cleanup(void) +{ + dev_t devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR); + + class_destroy(icap_class); + + platform_driver_unregister(&hwicap_platform_driver); + + unregister_chrdev_region(devt, HWICAP_DEVICES); +} + +module_init(hwicap_module_init); +module_exit(hwicap_module_cleanup); + +MODULE_AUTHOR("Xilinx, Inc; Xilinx Research Labs Group"); +MODULE_DESCRIPTION("Xilinx ICAP Port Driver"); +MODULE_LICENSE("GPL"); diff --git a/kernel/drivers/char/xilinx_hwicap/xilinx_hwicap.h b/kernel/drivers/char/xilinx_hwicap/xilinx_hwicap.h new file mode 100644 index 000000000..38b145eaf --- /dev/null +++ b/kernel/drivers/char/xilinx_hwicap/xilinx_hwicap.h @@ -0,0 +1,219 @@ +/***************************************************************************** + * + * Author: Xilinx, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS" + * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND + * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE, + * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE, + * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION + * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT, + * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE + * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY + * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE + * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR + * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF + * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. + * + * (c) Copyright 2003-2007 Xilinx Inc. + * All rights reserved. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + *****************************************************************************/ + +#ifndef XILINX_HWICAP_H_ /* prevent circular inclusions */ +#define XILINX_HWICAP_H_ /* by using protection macros */ + +#include +#include +#include + +#include + +struct hwicap_drvdata { + u32 write_buffer_in_use; /* Always in [0,3] */ + u8 write_buffer[4]; + u32 read_buffer_in_use; /* Always in [0,3] */ + u8 read_buffer[4]; + resource_size_t mem_start;/* phys. address of the control registers */ + resource_size_t mem_end; /* phys. address of the control registers */ + resource_size_t mem_size; + void __iomem *base_address;/* virt. address of the control registers */ + + struct device *dev; + struct cdev cdev; /* Char device structure */ + dev_t devt; + + const struct hwicap_driver_config *config; + const struct config_registers *config_regs; + void *private_data; + bool is_open; + struct mutex sem; +}; + +struct hwicap_driver_config { + /* Read configuration data given by size into the data buffer. + Return 0 if successful. */ + int (*get_configuration)(struct hwicap_drvdata *drvdata, u32 *data, + u32 size); + /* Write configuration data given by size from the data buffer. + Return 0 if successful. */ + int (*set_configuration)(struct hwicap_drvdata *drvdata, u32 *data, + u32 size); + /* Get the status register, bit pattern given by: + * D8 - 0 = configuration error + * D7 - 1 = alignment found + * D6 - 1 = readback in progress + * D5 - 0 = abort in progress + * D4 - Always 1 + * D3 - Always 1 + * D2 - Always 1 + * D1 - Always 1 + * D0 - 1 = operation completed + */ + u32 (*get_status)(struct hwicap_drvdata *drvdata); + /* Reset the hw */ + void (*reset)(struct hwicap_drvdata *drvdata); +}; + +/* Number of times to poll the done register. This has to be large + * enough to allow an entire configuration to complete. If an entire + * page (4kb) is configured at once, that could take up to 4k cycles + * with a byte-wide icap interface. In most cases, this driver is + * used with a much smaller fifo, but this should be sufficient in the + * worst case. + */ +#define XHI_MAX_RETRIES 5000 + +/************ Constant Definitions *************/ + +#define XHI_PAD_FRAMES 0x1 + +/* Mask for calculating configuration packet headers */ +#define XHI_WORD_COUNT_MASK_TYPE_1 0x7FFUL +#define XHI_WORD_COUNT_MASK_TYPE_2 0x1FFFFFUL +#define XHI_TYPE_MASK 0x7 +#define XHI_REGISTER_MASK 0xF +#define XHI_OP_MASK 0x3 + +#define XHI_TYPE_SHIFT 29 +#define XHI_REGISTER_SHIFT 13 +#define XHI_OP_SHIFT 27 + +#define XHI_TYPE_1 1 +#define XHI_TYPE_2 2 +#define XHI_OP_WRITE 2 +#define XHI_OP_READ 1 + +/* Address Block Types */ +#define XHI_FAR_CLB_BLOCK 0 +#define XHI_FAR_BRAM_BLOCK 1 +#define XHI_FAR_BRAM_INT_BLOCK 2 + +struct config_registers { + u32 CRC; + u32 FAR; + u32 FDRI; + u32 FDRO; + u32 CMD; + u32 CTL; + u32 MASK; + u32 STAT; + u32 LOUT; + u32 COR; + u32 MFWR; + u32 FLR; + u32 KEY; + u32 CBC; + u32 IDCODE; + u32 AXSS; + u32 C0R_1; + u32 CSOB; + u32 WBSTAR; + u32 TIMER; + u32 BOOTSTS; + u32 CTL_1; +}; + +/* Configuration Commands */ +#define XHI_CMD_NULL 0 +#define XHI_CMD_WCFG 1 +#define XHI_CMD_MFW 2 +#define XHI_CMD_DGHIGH 3 +#define XHI_CMD_RCFG 4 +#define XHI_CMD_START 5 +#define XHI_CMD_RCAP 6 +#define XHI_CMD_RCRC 7 +#define XHI_CMD_AGHIGH 8 +#define XHI_CMD_SWITCH 9 +#define XHI_CMD_GRESTORE 10 +#define XHI_CMD_SHUTDOWN 11 +#define XHI_CMD_GCAPTURE 12 +#define XHI_CMD_DESYNCH 13 +#define XHI_CMD_IPROG 15 /* Only in Virtex5 */ +#define XHI_CMD_CRCC 16 /* Only in Virtex5 */ +#define XHI_CMD_LTIMER 17 /* Only in Virtex5 */ + +/* Packet constants */ +#define XHI_SYNC_PACKET 0xAA995566UL +#define XHI_DUMMY_PACKET 0xFFFFFFFFUL +#define XHI_NOOP_PACKET (XHI_TYPE_1 << XHI_TYPE_SHIFT) +#define XHI_TYPE_2_READ ((XHI_TYPE_2 << XHI_TYPE_SHIFT) | \ + (XHI_OP_READ << XHI_OP_SHIFT)) + +#define XHI_TYPE_2_WRITE ((XHI_TYPE_2 << XHI_TYPE_SHIFT) | \ + (XHI_OP_WRITE << XHI_OP_SHIFT)) + +#define XHI_TYPE2_CNT_MASK 0x07FFFFFF + +#define XHI_TYPE_1_PACKET_MAX_WORDS 2047UL +#define XHI_TYPE_1_HEADER_BYTES 4 +#define XHI_TYPE_2_HEADER_BYTES 8 + +/* Constant to use for CRC check when CRC has been disabled */ +#define XHI_DISABLED_AUTO_CRC 0x0000DEFCUL + +/* Meanings of the bits returned by get_status */ +#define XHI_SR_CFGERR_N_MASK 0x00000100 /* Config Error Mask */ +#define XHI_SR_DALIGN_MASK 0x00000080 /* Data Alignment Mask */ +#define XHI_SR_RIP_MASK 0x00000040 /* Read back Mask */ +#define XHI_SR_IN_ABORT_N_MASK 0x00000020 /* Select Map Abort Mask */ +#define XHI_SR_DONE_MASK 0x00000001 /* Done bit Mask */ + +/** + * hwicap_type_1_read - Generates a Type 1 read packet header. + * @reg: is the address of the register to be read back. + * + * Generates a Type 1 read packet header, which is used to indirectly + * read registers in the configuration logic. This packet must then + * be sent through the icap device, and a return packet received with + * the information. + **/ +static inline u32 hwicap_type_1_read(u32 reg) +{ + return (XHI_TYPE_1 << XHI_TYPE_SHIFT) | + (reg << XHI_REGISTER_SHIFT) | + (XHI_OP_READ << XHI_OP_SHIFT); +} + +/** + * hwicap_type_1_write - Generates a Type 1 write packet header + * @reg: is the address of the register to be read back. + **/ +static inline u32 hwicap_type_1_write(u32 reg) +{ + return (XHI_TYPE_1 << XHI_TYPE_SHIFT) | + (reg << XHI_REGISTER_SHIFT) | + (XHI_OP_WRITE << XHI_OP_SHIFT); +} + +#endif diff --git a/kernel/drivers/char/xillybus/Kconfig b/kernel/drivers/char/xillybus/Kconfig new file mode 100644 index 000000000..b53bdf12d --- /dev/null +++ b/kernel/drivers/char/xillybus/Kconfig @@ -0,0 +1,33 @@ +# +# Xillybus devices +# + +config XILLYBUS + tristate "Xillybus generic FPGA interface" + depends on PCI || (OF_ADDRESS && OF_IRQ) + select CRC32 + help + Xillybus is a generic interface for peripherals designed on + programmable logic (FPGA). The driver probes the hardware for + its capabilities, and creates device files accordingly. + + If unsure, say N. + +if XILLYBUS + +config XILLYBUS_PCIE + tristate "Xillybus over PCIe" + depends on PCI_MSI + help + Set to M if you want Xillybus to use PCI Express for communicating + with the FPGA. + +config XILLYBUS_OF + tristate "Xillybus over Device Tree" + depends on OF_ADDRESS && OF_IRQ + help + Set to M if you want Xillybus to find its resources from the + Open Firmware Flattened Device Tree. If the target is an embedded + system, say M. + +endif # if XILLYBUS diff --git a/kernel/drivers/char/xillybus/Makefile b/kernel/drivers/char/xillybus/Makefile new file mode 100644 index 000000000..b68b7ebfd --- /dev/null +++ b/kernel/drivers/char/xillybus/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for Xillybus driver +# + +obj-$(CONFIG_XILLYBUS) += xillybus_core.o +obj-$(CONFIG_XILLYBUS_PCIE) += xillybus_pcie.o +obj-$(CONFIG_XILLYBUS_OF) += xillybus_of.o diff --git a/kernel/drivers/char/xillybus/xillybus.h b/kernel/drivers/char/xillybus/xillybus.h new file mode 100644 index 000000000..b9a9eb6d4 --- /dev/null +++ b/kernel/drivers/char/xillybus/xillybus.h @@ -0,0 +1,160 @@ +/* + * linux/drivers/misc/xillybus.h + * + * Copyright 2011 Xillybus Ltd, http://xillybus.com + * + * Header file for the Xillybus FPGA/host framework. + * + * This program is free software; you can redistribute it and/or modify + * it under the smems of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ + +#ifndef __XILLYBUS_H +#define __XILLYBUS_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct xilly_endpoint_hardware; + +struct xilly_buffer { + void *addr; + dma_addr_t dma_addr; + int end_offset; /* Counting elements, not bytes */ +}; + +struct xilly_idt_handle { + unsigned char *chandesc; + unsigned char *idt; + int entries; +}; + +/* + * Read-write confusion: wr_* and rd_* notation sticks to FPGA view, so + * wr_* buffers are those consumed by read(), since the FPGA writes to them + * and vice versa. + */ + +struct xilly_channel { + struct xilly_endpoint *endpoint; + int chan_num; + int log2_element_size; + int seekable; + + struct xilly_buffer **wr_buffers; /* FPGA writes, driver reads! */ + int num_wr_buffers; + unsigned int wr_buf_size; /* In bytes */ + int wr_fpga_buf_idx; + int wr_host_buf_idx; + int wr_host_buf_pos; + int wr_empty; + int wr_ready; /* Significant only when wr_empty == 1 */ + int wr_sleepy; + int wr_eof; + int wr_hangup; + spinlock_t wr_spinlock; + struct mutex wr_mutex; + wait_queue_head_t wr_wait; + wait_queue_head_t wr_ready_wait; + int wr_ref_count; + int wr_synchronous; + int wr_allow_partial; + int wr_exclusive_open; + int wr_supports_nonempty; + + struct xilly_buffer **rd_buffers; /* FPGA reads, driver writes! */ + int num_rd_buffers; + unsigned int rd_buf_size; /* In bytes */ + int rd_fpga_buf_idx; + int rd_host_buf_pos; + int rd_host_buf_idx; + int rd_full; + spinlock_t rd_spinlock; + struct mutex rd_mutex; + wait_queue_head_t rd_wait; + int rd_ref_count; + int rd_allow_partial; + int rd_synchronous; + int rd_exclusive_open; + struct delayed_work rd_workitem; + unsigned char rd_leftovers[4]; +}; + +struct xilly_endpoint { + /* + * One of pdev and dev is always NULL, and the other is a valid + * pointer, depending on the type of device + */ + struct pci_dev *pdev; + struct device *dev; + struct xilly_endpoint_hardware *ephw; + + struct list_head ep_list; + int dma_using_dac; /* =1 if 64-bit DMA is used, =0 otherwise. */ + __iomem void *registers; + int fatal_error; + + struct mutex register_mutex; + wait_queue_head_t ep_wait; + + /* Channels and message handling */ + struct cdev cdev; + + int major; + int lowest_minor; /* Highest minor = lowest_minor + num_channels - 1 */ + + int num_channels; /* EXCLUDING message buffer */ + struct xilly_channel **channels; + int msg_counter; + int failed_messages; + int idtlen; + + u32 *msgbuf_addr; + dma_addr_t msgbuf_dma_addr; + unsigned int msg_buf_size; +}; + +struct xilly_endpoint_hardware { + struct module *owner; + void (*hw_sync_sgl_for_cpu)(struct xilly_endpoint *, + dma_addr_t, + size_t, + int); + void (*hw_sync_sgl_for_device)(struct xilly_endpoint *, + dma_addr_t, + size_t, + int); + int (*map_single)(struct xilly_endpoint *, + void *, + size_t, + int, + dma_addr_t *); +}; + +struct xilly_mapping { + void *device; + dma_addr_t dma_addr; + size_t size; + int direction; +}; + +irqreturn_t xillybus_isr(int irq, void *data); + +struct xilly_endpoint *xillybus_init_endpoint(struct pci_dev *pdev, + struct device *dev, + struct xilly_endpoint_hardware + *ephw); + +int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint); + +void xillybus_endpoint_remove(struct xilly_endpoint *endpoint); + +#endif /* __XILLYBUS_H */ diff --git a/kernel/drivers/char/xillybus/xillybus_core.c b/kernel/drivers/char/xillybus/xillybus_core.c new file mode 100644 index 000000000..77d6c127e --- /dev/null +++ b/kernel/drivers/char/xillybus/xillybus_core.c @@ -0,0 +1,2105 @@ +/* + * linux/drivers/misc/xillybus_core.c + * + * Copyright 2011 Xillybus Ltd, http://xillybus.com + * + * Driver for the Xillybus FPGA/host framework. + * + * This driver interfaces with a special IP core in an FPGA, setting up + * a pipe between a hardware FIFO in the programmable logic and a device + * file in the host. The number of such pipes and their attributes are + * set up on the logic. This driver detects these automatically and + * creates the device files accordingly. + * + * This program is free software; you can redistribute it and/or modify + * it under the smems of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "xillybus.h" + +MODULE_DESCRIPTION("Xillybus core functions"); +MODULE_AUTHOR("Eli Billauer, Xillybus Ltd."); +MODULE_VERSION("1.07"); +MODULE_ALIAS("xillybus_core"); +MODULE_LICENSE("GPL v2"); + +/* General timeout is 100 ms, rx timeout is 10 ms */ +#define XILLY_RX_TIMEOUT (10*HZ/1000) +#define XILLY_TIMEOUT (100*HZ/1000) + +#define fpga_msg_ctrl_reg 0x0008 +#define fpga_dma_control_reg 0x0020 +#define fpga_dma_bufno_reg 0x0024 +#define fpga_dma_bufaddr_lowaddr_reg 0x0028 +#define fpga_dma_bufaddr_highaddr_reg 0x002c +#define fpga_buf_ctrl_reg 0x0030 +#define fpga_buf_offset_reg 0x0034 +#define fpga_endian_reg 0x0040 + +#define XILLYMSG_OPCODE_RELEASEBUF 1 +#define XILLYMSG_OPCODE_QUIESCEACK 2 +#define XILLYMSG_OPCODE_FIFOEOF 3 +#define XILLYMSG_OPCODE_FATAL_ERROR 4 +#define XILLYMSG_OPCODE_NONEMPTY 5 + +static const char xillyname[] = "xillybus"; + +static struct class *xillybus_class; + +/* + * ep_list_lock is the last lock to be taken; No other lock requests are + * allowed while holding it. It merely protects list_of_endpoints, and not + * the endpoints listed in it. + */ + +static LIST_HEAD(list_of_endpoints); +static struct mutex ep_list_lock; +static struct workqueue_struct *xillybus_wq; + +/* + * Locking scheme: Mutexes protect invocations of character device methods. + * If both locks are taken, wr_mutex is taken first, rd_mutex second. + * + * wr_spinlock protects wr_*_buf_idx, wr_empty, wr_sleepy, wr_ready and the + * buffers' end_offset fields against changes made by IRQ handler (and in + * theory, other file request handlers, but the mutex handles that). Nothing + * else. + * They are held for short direct memory manipulations. Needless to say, + * no mutex locking is allowed when a spinlock is held. + * + * rd_spinlock does the same with rd_*_buf_idx, rd_empty and end_offset. + * + * register_mutex is endpoint-specific, and is held when non-atomic + * register operations are performed. wr_mutex and rd_mutex may be + * held when register_mutex is taken, but none of the spinlocks. Note that + * register_mutex doesn't protect against sporadic buf_ctrl_reg writes + * which are unrelated to buf_offset_reg, since they are harmless. + * + * Blocking on the wait queues is allowed with mutexes held, but not with + * spinlocks. + * + * Only interruptible blocking is allowed on mutexes and wait queues. + * + * All in all, the locking order goes (with skips allowed, of course): + * wr_mutex -> rd_mutex -> register_mutex -> wr_spinlock -> rd_spinlock + */ + +static void malformed_message(struct xilly_endpoint *endpoint, u32 *buf) +{ + int opcode; + int msg_channel, msg_bufno, msg_data, msg_dir; + + opcode = (buf[0] >> 24) & 0xff; + msg_dir = buf[0] & 1; + msg_channel = (buf[0] >> 1) & 0x7ff; + msg_bufno = (buf[0] >> 12) & 0x3ff; + msg_data = buf[1] & 0xfffffff; + + dev_warn(endpoint->dev, + "Malformed message (skipping): opcode=%d, channel=%03x, dir=%d, bufno=%03x, data=%07x\n", + opcode, msg_channel, msg_dir, msg_bufno, msg_data); +} + +/* + * xillybus_isr assumes the interrupt is allocated exclusively to it, + * which is the natural case MSI and several other hardware-oriented + * interrupts. Sharing is not allowed. + */ + +irqreturn_t xillybus_isr(int irq, void *data) +{ + struct xilly_endpoint *ep = data; + u32 *buf; + unsigned int buf_size; + int i; + int opcode; + unsigned int msg_channel, msg_bufno, msg_data, msg_dir; + struct xilly_channel *channel; + + buf = ep->msgbuf_addr; + buf_size = ep->msg_buf_size/sizeof(u32); + + ep->ephw->hw_sync_sgl_for_cpu(ep, + ep->msgbuf_dma_addr, + ep->msg_buf_size, + DMA_FROM_DEVICE); + + for (i = 0; i < buf_size; i += 2) { + if (((buf[i+1] >> 28) & 0xf) != ep->msg_counter) { + malformed_message(ep, &buf[i]); + dev_warn(ep->dev, + "Sending a NACK on counter %x (instead of %x) on entry %d\n", + ((buf[i+1] >> 28) & 0xf), + ep->msg_counter, + i/2); + + if (++ep->failed_messages > 10) { + dev_err(ep->dev, + "Lost sync with interrupt messages. Stopping.\n"); + } else { + ep->ephw->hw_sync_sgl_for_device( + ep, + ep->msgbuf_dma_addr, + ep->msg_buf_size, + DMA_FROM_DEVICE); + + iowrite32(0x01, /* Message NACK */ + ep->registers + fpga_msg_ctrl_reg); + } + return IRQ_HANDLED; + } else if (buf[i] & (1 << 22)) /* Last message */ + break; + } + + if (i >= buf_size) { + dev_err(ep->dev, "Bad interrupt message. Stopping.\n"); + return IRQ_HANDLED; + } + + buf_size = i + 2; + + for (i = 0; i < buf_size; i += 2) { /* Scan through messages */ + opcode = (buf[i] >> 24) & 0xff; + + msg_dir = buf[i] & 1; + msg_channel = (buf[i] >> 1) & 0x7ff; + msg_bufno = (buf[i] >> 12) & 0x3ff; + msg_data = buf[i+1] & 0xfffffff; + + switch (opcode) { + case XILLYMSG_OPCODE_RELEASEBUF: + if ((msg_channel > ep->num_channels) || + (msg_channel == 0)) { + malformed_message(ep, &buf[i]); + break; + } + + channel = ep->channels[msg_channel]; + + if (msg_dir) { /* Write channel */ + if (msg_bufno >= channel->num_wr_buffers) { + malformed_message(ep, &buf[i]); + break; + } + spin_lock(&channel->wr_spinlock); + channel->wr_buffers[msg_bufno]->end_offset = + msg_data; + channel->wr_fpga_buf_idx = msg_bufno; + channel->wr_empty = 0; + channel->wr_sleepy = 0; + spin_unlock(&channel->wr_spinlock); + + wake_up_interruptible(&channel->wr_wait); + + } else { + /* Read channel */ + + if (msg_bufno >= channel->num_rd_buffers) { + malformed_message(ep, &buf[i]); + break; + } + + spin_lock(&channel->rd_spinlock); + channel->rd_fpga_buf_idx = msg_bufno; + channel->rd_full = 0; + spin_unlock(&channel->rd_spinlock); + + wake_up_interruptible(&channel->rd_wait); + if (!channel->rd_synchronous) + queue_delayed_work( + xillybus_wq, + &channel->rd_workitem, + XILLY_RX_TIMEOUT); + } + + break; + case XILLYMSG_OPCODE_NONEMPTY: + if ((msg_channel > ep->num_channels) || + (msg_channel == 0) || (!msg_dir) || + !ep->channels[msg_channel]->wr_supports_nonempty) { + malformed_message(ep, &buf[i]); + break; + } + + channel = ep->channels[msg_channel]; + + if (msg_bufno >= channel->num_wr_buffers) { + malformed_message(ep, &buf[i]); + break; + } + spin_lock(&channel->wr_spinlock); + if (msg_bufno == channel->wr_host_buf_idx) + channel->wr_ready = 1; + spin_unlock(&channel->wr_spinlock); + + wake_up_interruptible(&channel->wr_ready_wait); + + break; + case XILLYMSG_OPCODE_QUIESCEACK: + ep->idtlen = msg_data; + wake_up_interruptible(&ep->ep_wait); + + break; + case XILLYMSG_OPCODE_FIFOEOF: + if ((msg_channel > ep->num_channels) || + (msg_channel == 0) || (!msg_dir) || + !ep->channels[msg_channel]->num_wr_buffers) { + malformed_message(ep, &buf[i]); + break; + } + channel = ep->channels[msg_channel]; + spin_lock(&channel->wr_spinlock); + channel->wr_eof = msg_bufno; + channel->wr_sleepy = 0; + + channel->wr_hangup = channel->wr_empty && + (channel->wr_host_buf_idx == msg_bufno); + + spin_unlock(&channel->wr_spinlock); + + wake_up_interruptible(&channel->wr_wait); + + break; + case XILLYMSG_OPCODE_FATAL_ERROR: + ep->fatal_error = 1; + wake_up_interruptible(&ep->ep_wait); /* For select() */ + dev_err(ep->dev, + "FPGA reported a fatal error. This means that the low-level communication with the device has failed. This hardware problem is most likely unrelated to Xillybus (neither kernel module nor FPGA core), but reports are still welcome. All I/O is aborted.\n"); + break; + default: + malformed_message(ep, &buf[i]); + break; + } + } + + ep->ephw->hw_sync_sgl_for_device(ep, + ep->msgbuf_dma_addr, + ep->msg_buf_size, + DMA_FROM_DEVICE); + + ep->msg_counter = (ep->msg_counter + 1) & 0xf; + ep->failed_messages = 0; + iowrite32(0x03, ep->registers + fpga_msg_ctrl_reg); /* Message ACK */ + + return IRQ_HANDLED; +} +EXPORT_SYMBOL(xillybus_isr); + +/* + * A few trivial memory management functions. + * NOTE: These functions are used only on probe and remove, and therefore + * no locks are applied! + */ + +static void xillybus_autoflush(struct work_struct *work); + +struct xilly_alloc_state { + void *salami; + int left_of_salami; + int nbuffer; + enum dma_data_direction direction; + u32 regdirection; +}; + +static int xilly_get_dma_buffers(struct xilly_endpoint *ep, + struct xilly_alloc_state *s, + struct xilly_buffer **buffers, + int bufnum, int bytebufsize) +{ + int i, rc; + dma_addr_t dma_addr; + struct device *dev = ep->dev; + struct xilly_buffer *this_buffer = NULL; /* Init to silence warning */ + + if (buffers) { /* Not the message buffer */ + this_buffer = devm_kcalloc(dev, bufnum, + sizeof(struct xilly_buffer), + GFP_KERNEL); + if (!this_buffer) + return -ENOMEM; + } + + for (i = 0; i < bufnum; i++) { + /* + * Buffers are expected in descending size order, so there + * is either enough space for this buffer or none at all. + */ + + if ((s->left_of_salami < bytebufsize) && + (s->left_of_salami > 0)) { + dev_err(ep->dev, + "Corrupt buffer allocation in IDT. Aborting.\n"); + return -ENODEV; + } + + if (s->left_of_salami == 0) { + int allocorder, allocsize; + + allocsize = PAGE_SIZE; + allocorder = 0; + while (bytebufsize > allocsize) { + allocsize *= 2; + allocorder++; + } + + s->salami = (void *) devm_get_free_pages( + dev, + GFP_KERNEL | __GFP_DMA32 | __GFP_ZERO, + allocorder); + if (!s->salami) + return -ENOMEM; + + s->left_of_salami = allocsize; + } + + rc = ep->ephw->map_single(ep, s->salami, + bytebufsize, s->direction, + &dma_addr); + if (rc) + return rc; + + iowrite32((u32) (dma_addr & 0xffffffff), + ep->registers + fpga_dma_bufaddr_lowaddr_reg); + iowrite32(((u32) ((((u64) dma_addr) >> 32) & 0xffffffff)), + ep->registers + fpga_dma_bufaddr_highaddr_reg); + + if (buffers) { /* Not the message buffer */ + this_buffer->addr = s->salami; + this_buffer->dma_addr = dma_addr; + buffers[i] = this_buffer++; + + iowrite32(s->regdirection | s->nbuffer++, + ep->registers + fpga_dma_bufno_reg); + } else { + ep->msgbuf_addr = s->salami; + ep->msgbuf_dma_addr = dma_addr; + ep->msg_buf_size = bytebufsize; + + iowrite32(s->regdirection, + ep->registers + fpga_dma_bufno_reg); + } + + s->left_of_salami -= bytebufsize; + s->salami += bytebufsize; + } + return 0; +} + +static int xilly_setupchannels(struct xilly_endpoint *ep, + unsigned char *chandesc, + int entries) +{ + struct device *dev = ep->dev; + int i, entry, rc; + struct xilly_channel *channel; + int channelnum, bufnum, bufsize, format, is_writebuf; + int bytebufsize; + int synchronous, allowpartial, exclusive_open, seekable; + int supports_nonempty; + int msg_buf_done = 0; + + struct xilly_alloc_state rd_alloc = { + .salami = NULL, + .left_of_salami = 0, + .nbuffer = 1, + .direction = DMA_TO_DEVICE, + .regdirection = 0, + }; + + struct xilly_alloc_state wr_alloc = { + .salami = NULL, + .left_of_salami = 0, + .nbuffer = 1, + .direction = DMA_FROM_DEVICE, + .regdirection = 0x80000000, + }; + + channel = devm_kcalloc(dev, ep->num_channels, + sizeof(struct xilly_channel), GFP_KERNEL); + if (!channel) + return -ENOMEM; + + ep->channels = devm_kcalloc(dev, ep->num_channels + 1, + sizeof(struct xilly_channel *), + GFP_KERNEL); + if (!ep->channels) + return -ENOMEM; + + ep->channels[0] = NULL; /* Channel 0 is message buf. */ + + /* Initialize all channels with defaults */ + + for (i = 1; i <= ep->num_channels; i++) { + channel->wr_buffers = NULL; + channel->rd_buffers = NULL; + channel->num_wr_buffers = 0; + channel->num_rd_buffers = 0; + channel->wr_fpga_buf_idx = -1; + channel->wr_host_buf_idx = 0; + channel->wr_host_buf_pos = 0; + channel->wr_empty = 1; + channel->wr_ready = 0; + channel->wr_sleepy = 1; + channel->rd_fpga_buf_idx = 0; + channel->rd_host_buf_idx = 0; + channel->rd_host_buf_pos = 0; + channel->rd_full = 0; + channel->wr_ref_count = 0; + channel->rd_ref_count = 0; + + spin_lock_init(&channel->wr_spinlock); + spin_lock_init(&channel->rd_spinlock); + mutex_init(&channel->wr_mutex); + mutex_init(&channel->rd_mutex); + init_waitqueue_head(&channel->rd_wait); + init_waitqueue_head(&channel->wr_wait); + init_waitqueue_head(&channel->wr_ready_wait); + + INIT_DELAYED_WORK(&channel->rd_workitem, xillybus_autoflush); + + channel->endpoint = ep; + channel->chan_num = i; + + channel->log2_element_size = 0; + + ep->channels[i] = channel++; + } + + for (entry = 0; entry < entries; entry++, chandesc += 4) { + struct xilly_buffer **buffers = NULL; + + is_writebuf = chandesc[0] & 0x01; + channelnum = (chandesc[0] >> 1) | ((chandesc[1] & 0x0f) << 7); + format = (chandesc[1] >> 4) & 0x03; + allowpartial = (chandesc[1] >> 6) & 0x01; + synchronous = (chandesc[1] >> 7) & 0x01; + bufsize = 1 << (chandesc[2] & 0x1f); + bufnum = 1 << (chandesc[3] & 0x0f); + exclusive_open = (chandesc[2] >> 7) & 0x01; + seekable = (chandesc[2] >> 6) & 0x01; + supports_nonempty = (chandesc[2] >> 5) & 0x01; + + if ((channelnum > ep->num_channels) || + ((channelnum == 0) && !is_writebuf)) { + dev_err(ep->dev, + "IDT requests channel out of range. Aborting.\n"); + return -ENODEV; + } + + channel = ep->channels[channelnum]; /* NULL for msg channel */ + + if (!is_writebuf || channelnum > 0) { + channel->log2_element_size = ((format > 2) ? + 2 : format); + + bytebufsize = channel->rd_buf_size = bufsize * + (1 << channel->log2_element_size); + + buffers = devm_kcalloc(dev, bufnum, + sizeof(struct xilly_buffer *), + GFP_KERNEL); + if (!buffers) + return -ENOMEM; + } else { + bytebufsize = bufsize << 2; + } + + if (!is_writebuf) { + channel->num_rd_buffers = bufnum; + channel->rd_allow_partial = allowpartial; + channel->rd_synchronous = synchronous; + channel->rd_exclusive_open = exclusive_open; + channel->seekable = seekable; + + channel->rd_buffers = buffers; + rc = xilly_get_dma_buffers(ep, &rd_alloc, buffers, + bufnum, bytebufsize); + } else if (channelnum > 0) { + channel->num_wr_buffers = bufnum; + + channel->seekable = seekable; + channel->wr_supports_nonempty = supports_nonempty; + + channel->wr_allow_partial = allowpartial; + channel->wr_synchronous = synchronous; + channel->wr_exclusive_open = exclusive_open; + + channel->wr_buffers = buffers; + rc = xilly_get_dma_buffers(ep, &wr_alloc, buffers, + bufnum, bytebufsize); + } else { + rc = xilly_get_dma_buffers(ep, &wr_alloc, NULL, + bufnum, bytebufsize); + msg_buf_done++; + } + + if (rc) + return -ENOMEM; + } + + if (!msg_buf_done) { + dev_err(ep->dev, + "Corrupt IDT: No message buffer. Aborting.\n"); + return -ENODEV; + } + return 0; +} + +static int xilly_scan_idt(struct xilly_endpoint *endpoint, + struct xilly_idt_handle *idt_handle) +{ + int count = 0; + unsigned char *idt = endpoint->channels[1]->wr_buffers[0]->addr; + unsigned char *end_of_idt = idt + endpoint->idtlen - 4; + unsigned char *scan; + int len; + + scan = idt; + idt_handle->idt = idt; + + scan++; /* Skip version number */ + + while ((scan <= end_of_idt) && *scan) { + while ((scan <= end_of_idt) && *scan++) + /* Do nothing, just scan thru string */; + count++; + } + + scan++; + + if (scan > end_of_idt) { + dev_err(endpoint->dev, + "IDT device name list overflow. Aborting.\n"); + return -ENODEV; + } + idt_handle->chandesc = scan; + + len = endpoint->idtlen - (3 + ((int) (scan - idt))); + + if (len & 0x03) { + dev_err(endpoint->dev, + "Corrupt IDT device name list. Aborting.\n"); + return -ENODEV; + } + + idt_handle->entries = len >> 2; + endpoint->num_channels = count; + + return 0; +} + +static int xilly_obtain_idt(struct xilly_endpoint *endpoint) +{ + struct xilly_channel *channel; + unsigned char *version; + long t; + + channel = endpoint->channels[1]; /* This should be generated ad-hoc */ + + channel->wr_sleepy = 1; + + iowrite32(1 | + (3 << 24), /* Opcode 3 for channel 0 = Send IDT */ + endpoint->registers + fpga_buf_ctrl_reg); + + t = wait_event_interruptible_timeout(channel->wr_wait, + (!channel->wr_sleepy), + XILLY_TIMEOUT); + + if (t <= 0) { + dev_err(endpoint->dev, "Failed to obtain IDT. Aborting.\n"); + + if (endpoint->fatal_error) + return -EIO; + + return -ENODEV; + } + + endpoint->ephw->hw_sync_sgl_for_cpu( + channel->endpoint, + channel->wr_buffers[0]->dma_addr, + channel->wr_buf_size, + DMA_FROM_DEVICE); + + if (channel->wr_buffers[0]->end_offset != endpoint->idtlen) { + dev_err(endpoint->dev, + "IDT length mismatch (%d != %d). Aborting.\n", + channel->wr_buffers[0]->end_offset, endpoint->idtlen); + return -ENODEV; + } + + if (crc32_le(~0, channel->wr_buffers[0]->addr, + endpoint->idtlen+1) != 0) { + dev_err(endpoint->dev, "IDT failed CRC check. Aborting.\n"); + return -ENODEV; + } + + version = channel->wr_buffers[0]->addr; + + /* Check version number. Accept anything below 0x82 for now. */ + if (*version > 0x82) { + dev_err(endpoint->dev, + "No support for IDT version 0x%02x. Maybe the xillybus driver needs an upgarde. Aborting.\n", + *version); + return -ENODEV; + } + + return 0; +} + +static ssize_t xillybus_read(struct file *filp, char __user *userbuf, + size_t count, loff_t *f_pos) +{ + ssize_t rc; + unsigned long flags; + int bytes_done = 0; + int no_time_left = 0; + long deadline, left_to_sleep; + struct xilly_channel *channel = filp->private_data; + + int empty, reached_eof, exhausted, ready; + /* Initializations are there only to silence warnings */ + + int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0; + int waiting_bufidx; + + if (channel->endpoint->fatal_error) + return -EIO; + + deadline = jiffies + 1 + XILLY_RX_TIMEOUT; + + rc = mutex_lock_interruptible(&channel->wr_mutex); + if (rc) + return rc; + + while (1) { /* Note that we may drop mutex within this loop */ + int bytes_to_do = count - bytes_done; + + spin_lock_irqsave(&channel->wr_spinlock, flags); + + empty = channel->wr_empty; + ready = !empty || channel->wr_ready; + + if (!empty) { + bufidx = channel->wr_host_buf_idx; + bufpos = channel->wr_host_buf_pos; + howmany = ((channel->wr_buffers[bufidx]->end_offset + + 1) << channel->log2_element_size) + - bufpos; + + /* Update wr_host_* to its post-operation state */ + if (howmany > bytes_to_do) { + bufferdone = 0; + + howmany = bytes_to_do; + channel->wr_host_buf_pos += howmany; + } else { + bufferdone = 1; + + channel->wr_host_buf_pos = 0; + + if (bufidx == channel->wr_fpga_buf_idx) { + channel->wr_empty = 1; + channel->wr_sleepy = 1; + channel->wr_ready = 0; + } + + if (bufidx >= (channel->num_wr_buffers - 1)) + channel->wr_host_buf_idx = 0; + else + channel->wr_host_buf_idx++; + } + } + + /* + * Marking our situation after the possible changes above, + * for use after releasing the spinlock. + * + * empty = empty before change + * exhasted = empty after possible change + */ + + reached_eof = channel->wr_empty && + (channel->wr_host_buf_idx == channel->wr_eof); + channel->wr_hangup = reached_eof; + exhausted = channel->wr_empty; + waiting_bufidx = channel->wr_host_buf_idx; + + spin_unlock_irqrestore(&channel->wr_spinlock, flags); + + if (!empty) { /* Go on, now without the spinlock */ + + if (bufpos == 0) /* Position zero means it's virgin */ + channel->endpoint->ephw->hw_sync_sgl_for_cpu( + channel->endpoint, + channel->wr_buffers[bufidx]->dma_addr, + channel->wr_buf_size, + DMA_FROM_DEVICE); + + if (copy_to_user( + userbuf, + channel->wr_buffers[bufidx]->addr + + bufpos, howmany)) + rc = -EFAULT; + + userbuf += howmany; + bytes_done += howmany; + + if (bufferdone) { + channel->endpoint->ephw->hw_sync_sgl_for_device( + channel->endpoint, + channel->wr_buffers[bufidx]->dma_addr, + channel->wr_buf_size, + DMA_FROM_DEVICE); + + /* + * Tell FPGA the buffer is done with. It's an + * atomic operation to the FPGA, so what + * happens with other channels doesn't matter, + * and the certain channel is protected with + * the channel-specific mutex. + */ + + iowrite32(1 | (channel->chan_num << 1) | + (bufidx << 12), + channel->endpoint->registers + + fpga_buf_ctrl_reg); + } + + if (rc) { + mutex_unlock(&channel->wr_mutex); + return rc; + } + } + + /* This includes a zero-count return = EOF */ + if ((bytes_done >= count) || reached_eof) + break; + + if (!exhausted) + continue; /* More in RAM buffer(s)? Just go on. */ + + if ((bytes_done > 0) && + (no_time_left || + (channel->wr_synchronous && channel->wr_allow_partial))) + break; + + /* + * Nonblocking read: The "ready" flag tells us that the FPGA + * has data to send. In non-blocking mode, if it isn't on, + * just return. But if there is, we jump directly to the point + * where we ask for the FPGA to send all it has, and wait + * until that data arrives. So in a sense, we *do* block in + * nonblocking mode, but only for a very short time. + */ + + if (!no_time_left && (filp->f_flags & O_NONBLOCK)) { + if (bytes_done > 0) + break; + + if (ready) + goto desperate; + + rc = -EAGAIN; + break; + } + + if (!no_time_left || (bytes_done > 0)) { + /* + * Note that in case of an element-misaligned read + * request, offsetlimit will include the last element, + * which will be partially read from. + */ + int offsetlimit = ((count - bytes_done) - 1) >> + channel->log2_element_size; + int buf_elements = channel->wr_buf_size >> + channel->log2_element_size; + + /* + * In synchronous mode, always send an offset limit. + * Just don't send a value too big. + */ + + if (channel->wr_synchronous) { + /* Don't request more than one buffer */ + if (channel->wr_allow_partial && + (offsetlimit >= buf_elements)) + offsetlimit = buf_elements - 1; + + /* Don't request more than all buffers */ + if (!channel->wr_allow_partial && + (offsetlimit >= + (buf_elements * channel->num_wr_buffers))) + offsetlimit = buf_elements * + channel->num_wr_buffers - 1; + } + + /* + * In asynchronous mode, force early flush of a buffer + * only if that will allow returning a full count. The + * "offsetlimit < ( ... )" rather than "<=" excludes + * requesting a full buffer, which would obviously + * cause a buffer transmission anyhow + */ + + if (channel->wr_synchronous || + (offsetlimit < (buf_elements - 1))) { + mutex_lock(&channel->endpoint->register_mutex); + + iowrite32(offsetlimit, + channel->endpoint->registers + + fpga_buf_offset_reg); + + iowrite32(1 | (channel->chan_num << 1) | + (2 << 24) | /* 2 = offset limit */ + (waiting_bufidx << 12), + channel->endpoint->registers + + fpga_buf_ctrl_reg); + + mutex_unlock(&channel->endpoint-> + register_mutex); + } + } + + /* + * If partial completion is disallowed, there is no point in + * timeout sleeping. Neither if no_time_left is set and + * there's no data. + */ + + if (!channel->wr_allow_partial || + (no_time_left && (bytes_done == 0))) { + /* + * This do-loop will run more than once if another + * thread reasserted wr_sleepy before we got the mutex + * back, so we try again. + */ + + do { + mutex_unlock(&channel->wr_mutex); + + if (wait_event_interruptible( + channel->wr_wait, + (!channel->wr_sleepy))) + goto interrupted; + + if (mutex_lock_interruptible( + &channel->wr_mutex)) + goto interrupted; + } while (channel->wr_sleepy); + + continue; + +interrupted: /* Mutex is not held if got here */ + if (channel->endpoint->fatal_error) + return -EIO; + if (bytes_done) + return bytes_done; + if (filp->f_flags & O_NONBLOCK) + return -EAGAIN; /* Don't admit snoozing */ + return -EINTR; + } + + left_to_sleep = deadline - ((long) jiffies); + + /* + * If our time is out, skip the waiting. We may miss wr_sleepy + * being deasserted but hey, almost missing the train is like + * missing it. + */ + + if (left_to_sleep > 0) { + left_to_sleep = + wait_event_interruptible_timeout( + channel->wr_wait, + (!channel->wr_sleepy), + left_to_sleep); + + if (left_to_sleep > 0) /* wr_sleepy deasserted */ + continue; + + if (left_to_sleep < 0) { /* Interrupt */ + mutex_unlock(&channel->wr_mutex); + if (channel->endpoint->fatal_error) + return -EIO; + if (bytes_done) + return bytes_done; + return -EINTR; + } + } + +desperate: + no_time_left = 1; /* We're out of sleeping time. Desperate! */ + + if (bytes_done == 0) { + /* + * Reaching here means that we allow partial return, + * that we've run out of time, and that we have + * nothing to return. + * So tell the FPGA to send anything it has or gets. + */ + + iowrite32(1 | (channel->chan_num << 1) | + (3 << 24) | /* Opcode 3, flush it all! */ + (waiting_bufidx << 12), + channel->endpoint->registers + + fpga_buf_ctrl_reg); + } + + /* + * Reaching here means that we *do* have data in the buffer, + * but the "partial" flag disallows returning less than + * required. And we don't have as much. So loop again, + * which is likely to end up blocking indefinitely until + * enough data has arrived. + */ + } + + mutex_unlock(&channel->wr_mutex); + + if (channel->endpoint->fatal_error) + return -EIO; + + if (rc) + return rc; + + return bytes_done; +} + +/* + * The timeout argument takes values as follows: + * >0 : Flush with timeout + * ==0 : Flush, and wait idefinitely for the flush to complete + * <0 : Autoflush: Flush only if there's a single buffer occupied + */ + +static int xillybus_myflush(struct xilly_channel *channel, long timeout) +{ + int rc; + unsigned long flags; + + int end_offset_plus1; + int bufidx, bufidx_minus1; + int i; + int empty; + int new_rd_host_buf_pos; + + if (channel->endpoint->fatal_error) + return -EIO; + rc = mutex_lock_interruptible(&channel->rd_mutex); + if (rc) + return rc; + + /* + * Don't flush a closed channel. This can happen when the work queued + * autoflush thread fires off after the file has closed. This is not + * an error, just something to dismiss. + */ + + if (!channel->rd_ref_count) + goto done; + + bufidx = channel->rd_host_buf_idx; + + bufidx_minus1 = (bufidx == 0) ? + channel->num_rd_buffers - 1 : + bufidx - 1; + + end_offset_plus1 = channel->rd_host_buf_pos >> + channel->log2_element_size; + + new_rd_host_buf_pos = channel->rd_host_buf_pos - + (end_offset_plus1 << channel->log2_element_size); + + /* Submit the current buffer if it's nonempty */ + if (end_offset_plus1) { + unsigned char *tail = channel->rd_buffers[bufidx]->addr + + (end_offset_plus1 << channel->log2_element_size); + + /* Copy unflushed data, so we can put it in next buffer */ + for (i = 0; i < new_rd_host_buf_pos; i++) + channel->rd_leftovers[i] = *tail++; + + spin_lock_irqsave(&channel->rd_spinlock, flags); + + /* Autoflush only if a single buffer is occupied */ + + if ((timeout < 0) && + (channel->rd_full || + (bufidx_minus1 != channel->rd_fpga_buf_idx))) { + spin_unlock_irqrestore(&channel->rd_spinlock, flags); + /* + * A new work item may be queued by the ISR exactly + * now, since the execution of a work item allows the + * queuing of a new one while it's running. + */ + goto done; + } + + /* The 4th element is never needed for data, so it's a flag */ + channel->rd_leftovers[3] = (new_rd_host_buf_pos != 0); + + /* Set up rd_full to reflect a certain moment's state */ + + if (bufidx == channel->rd_fpga_buf_idx) + channel->rd_full = 1; + spin_unlock_irqrestore(&channel->rd_spinlock, flags); + + if (bufidx >= (channel->num_rd_buffers - 1)) + channel->rd_host_buf_idx = 0; + else + channel->rd_host_buf_idx++; + + channel->endpoint->ephw->hw_sync_sgl_for_device( + channel->endpoint, + channel->rd_buffers[bufidx]->dma_addr, + channel->rd_buf_size, + DMA_TO_DEVICE); + + mutex_lock(&channel->endpoint->register_mutex); + + iowrite32(end_offset_plus1 - 1, + channel->endpoint->registers + fpga_buf_offset_reg); + + iowrite32((channel->chan_num << 1) | /* Channel ID */ + (2 << 24) | /* Opcode 2, submit buffer */ + (bufidx << 12), + channel->endpoint->registers + fpga_buf_ctrl_reg); + + mutex_unlock(&channel->endpoint->register_mutex); + } else if (bufidx == 0) { + bufidx = channel->num_rd_buffers - 1; + } else { + bufidx--; + } + + channel->rd_host_buf_pos = new_rd_host_buf_pos; + + if (timeout < 0) + goto done; /* Autoflush */ + + /* + * bufidx is now the last buffer written to (or equal to + * rd_fpga_buf_idx if buffer was never written to), and + * channel->rd_host_buf_idx the one after it. + * + * If bufidx == channel->rd_fpga_buf_idx we're either empty or full. + */ + + while (1) { /* Loop waiting for draining of buffers */ + spin_lock_irqsave(&channel->rd_spinlock, flags); + + if (bufidx != channel->rd_fpga_buf_idx) + channel->rd_full = 1; /* + * Not really full, + * but needs waiting. + */ + + empty = !channel->rd_full; + + spin_unlock_irqrestore(&channel->rd_spinlock, flags); + + if (empty) + break; + + /* + * Indefinite sleep with mutex taken. With data waiting for + * flushing user should not be surprised if open() for write + * sleeps. + */ + if (timeout == 0) + wait_event_interruptible(channel->rd_wait, + (!channel->rd_full)); + + else if (wait_event_interruptible_timeout( + channel->rd_wait, + (!channel->rd_full), + timeout) == 0) { + dev_warn(channel->endpoint->dev, + "Timed out while flushing. Output data may be lost.\n"); + + rc = -ETIMEDOUT; + break; + } + + if (channel->rd_full) { + rc = -EINTR; + break; + } + } + +done: + mutex_unlock(&channel->rd_mutex); + + if (channel->endpoint->fatal_error) + return -EIO; + + return rc; +} + +static int xillybus_flush(struct file *filp, fl_owner_t id) +{ + if (!(filp->f_mode & FMODE_WRITE)) + return 0; + + return xillybus_myflush(filp->private_data, HZ); /* 1 second timeout */ +} + +static void xillybus_autoflush(struct work_struct *work) +{ + struct delayed_work *workitem = container_of( + work, struct delayed_work, work); + struct xilly_channel *channel = container_of( + workitem, struct xilly_channel, rd_workitem); + int rc; + + rc = xillybus_myflush(channel, -1); + if (rc == -EINTR) + dev_warn(channel->endpoint->dev, + "Autoflush failed because work queue thread got a signal.\n"); + else if (rc) + dev_err(channel->endpoint->dev, + "Autoflush failed under weird circumstances.\n"); +} + +static ssize_t xillybus_write(struct file *filp, const char __user *userbuf, + size_t count, loff_t *f_pos) +{ + ssize_t rc; + unsigned long flags; + int bytes_done = 0; + struct xilly_channel *channel = filp->private_data; + + int full, exhausted; + /* Initializations are there only to silence warnings */ + + int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0; + int end_offset_plus1 = 0; + + if (channel->endpoint->fatal_error) + return -EIO; + + rc = mutex_lock_interruptible(&channel->rd_mutex); + if (rc) + return rc; + + while (1) { + int bytes_to_do = count - bytes_done; + + spin_lock_irqsave(&channel->rd_spinlock, flags); + + full = channel->rd_full; + + if (!full) { + bufidx = channel->rd_host_buf_idx; + bufpos = channel->rd_host_buf_pos; + howmany = channel->rd_buf_size - bufpos; + + /* + * Update rd_host_* to its state after this operation. + * count=0 means committing the buffer immediately, + * which is like flushing, but not necessarily block. + */ + + if ((howmany > bytes_to_do) && + (count || + ((bufpos >> channel->log2_element_size) == 0))) { + bufferdone = 0; + + howmany = bytes_to_do; + channel->rd_host_buf_pos += howmany; + } else { + bufferdone = 1; + + if (count) { + end_offset_plus1 = + channel->rd_buf_size >> + channel->log2_element_size; + channel->rd_host_buf_pos = 0; + } else { + unsigned char *tail; + int i; + + howmany = 0; + + end_offset_plus1 = bufpos >> + channel->log2_element_size; + + channel->rd_host_buf_pos -= + end_offset_plus1 << + channel->log2_element_size; + + tail = channel-> + rd_buffers[bufidx]->addr + + (end_offset_plus1 << + channel->log2_element_size); + + for (i = 0; + i < channel->rd_host_buf_pos; + i++) + channel->rd_leftovers[i] = + *tail++; + } + + if (bufidx == channel->rd_fpga_buf_idx) + channel->rd_full = 1; + + if (bufidx >= (channel->num_rd_buffers - 1)) + channel->rd_host_buf_idx = 0; + else + channel->rd_host_buf_idx++; + } + } + + /* + * Marking our situation after the possible changes above, + * for use after releasing the spinlock. + * + * full = full before change + * exhasted = full after possible change + */ + + exhausted = channel->rd_full; + + spin_unlock_irqrestore(&channel->rd_spinlock, flags); + + if (!full) { /* Go on, now without the spinlock */ + unsigned char *head = + channel->rd_buffers[bufidx]->addr; + int i; + + if ((bufpos == 0) || /* Zero means it's virgin */ + (channel->rd_leftovers[3] != 0)) { + channel->endpoint->ephw->hw_sync_sgl_for_cpu( + channel->endpoint, + channel->rd_buffers[bufidx]->dma_addr, + channel->rd_buf_size, + DMA_TO_DEVICE); + + /* Virgin, but leftovers are due */ + for (i = 0; i < bufpos; i++) + *head++ = channel->rd_leftovers[i]; + + channel->rd_leftovers[3] = 0; /* Clear flag */ + } + + if (copy_from_user( + channel->rd_buffers[bufidx]->addr + bufpos, + userbuf, howmany)) + rc = -EFAULT; + + userbuf += howmany; + bytes_done += howmany; + + if (bufferdone) { + channel->endpoint->ephw->hw_sync_sgl_for_device( + channel->endpoint, + channel->rd_buffers[bufidx]->dma_addr, + channel->rd_buf_size, + DMA_TO_DEVICE); + + mutex_lock(&channel->endpoint->register_mutex); + + iowrite32(end_offset_plus1 - 1, + channel->endpoint->registers + + fpga_buf_offset_reg); + + iowrite32((channel->chan_num << 1) | + (2 << 24) | /* 2 = submit buffer */ + (bufidx << 12), + channel->endpoint->registers + + fpga_buf_ctrl_reg); + + mutex_unlock(&channel->endpoint-> + register_mutex); + + channel->rd_leftovers[3] = + (channel->rd_host_buf_pos != 0); + } + + if (rc) { + mutex_unlock(&channel->rd_mutex); + + if (channel->endpoint->fatal_error) + return -EIO; + + if (!channel->rd_synchronous) + queue_delayed_work( + xillybus_wq, + &channel->rd_workitem, + XILLY_RX_TIMEOUT); + + return rc; + } + } + + if (bytes_done >= count) + break; + + if (!exhausted) + continue; /* If there's more space, just go on */ + + if ((bytes_done > 0) && channel->rd_allow_partial) + break; + + /* + * Indefinite sleep with mutex taken. With data waiting for + * flushing, user should not be surprised if open() for write + * sleeps. + */ + + if (filp->f_flags & O_NONBLOCK) { + rc = -EAGAIN; + break; + } + + if (wait_event_interruptible(channel->rd_wait, + (!channel->rd_full))) { + mutex_unlock(&channel->rd_mutex); + + if (channel->endpoint->fatal_error) + return -EIO; + + if (bytes_done) + return bytes_done; + return -EINTR; + } + } + + mutex_unlock(&channel->rd_mutex); + + if (!channel->rd_synchronous) + queue_delayed_work(xillybus_wq, + &channel->rd_workitem, + XILLY_RX_TIMEOUT); + + if (channel->endpoint->fatal_error) + return -EIO; + + if (rc) + return rc; + + if ((channel->rd_synchronous) && (bytes_done > 0)) { + rc = xillybus_myflush(filp->private_data, 0); /* No timeout */ + + if (rc && (rc != -EINTR)) + return rc; + } + + return bytes_done; +} + +static int xillybus_open(struct inode *inode, struct file *filp) +{ + int rc = 0; + unsigned long flags; + int minor = iminor(inode); + int major = imajor(inode); + struct xilly_endpoint *ep_iter, *endpoint = NULL; + struct xilly_channel *channel; + + mutex_lock(&ep_list_lock); + + list_for_each_entry(ep_iter, &list_of_endpoints, ep_list) { + if ((ep_iter->major == major) && + (minor >= ep_iter->lowest_minor) && + (minor < (ep_iter->lowest_minor + + ep_iter->num_channels))) { + endpoint = ep_iter; + break; + } + } + mutex_unlock(&ep_list_lock); + + if (!endpoint) { + pr_err("xillybus: open() failed to find a device for major=%d and minor=%d\n", + major, minor); + return -ENODEV; + } + + if (endpoint->fatal_error) + return -EIO; + + channel = endpoint->channels[1 + minor - endpoint->lowest_minor]; + filp->private_data = channel; + + /* + * It gets complicated because: + * 1. We don't want to take a mutex we don't have to + * 2. We don't want to open one direction if the other will fail. + */ + + if ((filp->f_mode & FMODE_READ) && (!channel->num_wr_buffers)) + return -ENODEV; + + if ((filp->f_mode & FMODE_WRITE) && (!channel->num_rd_buffers)) + return -ENODEV; + + if ((filp->f_mode & FMODE_READ) && (filp->f_flags & O_NONBLOCK) && + (channel->wr_synchronous || !channel->wr_allow_partial || + !channel->wr_supports_nonempty)) { + dev_err(endpoint->dev, + "open() failed: O_NONBLOCK not allowed for read on this device\n"); + return -ENODEV; + } + + if ((filp->f_mode & FMODE_WRITE) && (filp->f_flags & O_NONBLOCK) && + (channel->rd_synchronous || !channel->rd_allow_partial)) { + dev_err(endpoint->dev, + "open() failed: O_NONBLOCK not allowed for write on this device\n"); + return -ENODEV; + } + + /* + * Note: open() may block on getting mutexes despite O_NONBLOCK. + * This shouldn't occur normally, since multiple open of the same + * file descriptor is almost always prohibited anyhow + * (*_exclusive_open is normally set in real-life systems). + */ + + if (filp->f_mode & FMODE_READ) { + rc = mutex_lock_interruptible(&channel->wr_mutex); + if (rc) + return rc; + } + + if (filp->f_mode & FMODE_WRITE) { + rc = mutex_lock_interruptible(&channel->rd_mutex); + if (rc) + goto unlock_wr; + } + + if ((filp->f_mode & FMODE_READ) && + (channel->wr_ref_count != 0) && + (channel->wr_exclusive_open)) { + rc = -EBUSY; + goto unlock; + } + + if ((filp->f_mode & FMODE_WRITE) && + (channel->rd_ref_count != 0) && + (channel->rd_exclusive_open)) { + rc = -EBUSY; + goto unlock; + } + + if (filp->f_mode & FMODE_READ) { + if (channel->wr_ref_count == 0) { /* First open of file */ + /* Move the host to first buffer */ + spin_lock_irqsave(&channel->wr_spinlock, flags); + channel->wr_host_buf_idx = 0; + channel->wr_host_buf_pos = 0; + channel->wr_fpga_buf_idx = -1; + channel->wr_empty = 1; + channel->wr_ready = 0; + channel->wr_sleepy = 1; + channel->wr_eof = -1; + channel->wr_hangup = 0; + + spin_unlock_irqrestore(&channel->wr_spinlock, flags); + + iowrite32(1 | (channel->chan_num << 1) | + (4 << 24) | /* Opcode 4, open channel */ + ((channel->wr_synchronous & 1) << 23), + channel->endpoint->registers + + fpga_buf_ctrl_reg); + } + + channel->wr_ref_count++; + } + + if (filp->f_mode & FMODE_WRITE) { + if (channel->rd_ref_count == 0) { /* First open of file */ + /* Move the host to first buffer */ + spin_lock_irqsave(&channel->rd_spinlock, flags); + channel->rd_host_buf_idx = 0; + channel->rd_host_buf_pos = 0; + channel->rd_leftovers[3] = 0; /* No leftovers. */ + channel->rd_fpga_buf_idx = channel->num_rd_buffers - 1; + channel->rd_full = 0; + + spin_unlock_irqrestore(&channel->rd_spinlock, flags); + + iowrite32((channel->chan_num << 1) | + (4 << 24), /* Opcode 4, open channel */ + channel->endpoint->registers + + fpga_buf_ctrl_reg); + } + + channel->rd_ref_count++; + } + +unlock: + if (filp->f_mode & FMODE_WRITE) + mutex_unlock(&channel->rd_mutex); +unlock_wr: + if (filp->f_mode & FMODE_READ) + mutex_unlock(&channel->wr_mutex); + + if (!rc && (!channel->seekable)) + return nonseekable_open(inode, filp); + + return rc; +} + +static int xillybus_release(struct inode *inode, struct file *filp) +{ + unsigned long flags; + struct xilly_channel *channel = filp->private_data; + + int buf_idx; + int eof; + + if (channel->endpoint->fatal_error) + return -EIO; + + if (filp->f_mode & FMODE_WRITE) { + mutex_lock(&channel->rd_mutex); + + channel->rd_ref_count--; + + if (channel->rd_ref_count == 0) { + /* + * We rely on the kernel calling flush() + * before we get here. + */ + + iowrite32((channel->chan_num << 1) | /* Channel ID */ + (5 << 24), /* Opcode 5, close channel */ + channel->endpoint->registers + + fpga_buf_ctrl_reg); + } + mutex_unlock(&channel->rd_mutex); + } + + if (filp->f_mode & FMODE_READ) { + mutex_lock(&channel->wr_mutex); + + channel->wr_ref_count--; + + if (channel->wr_ref_count == 0) { + iowrite32(1 | (channel->chan_num << 1) | + (5 << 24), /* Opcode 5, close channel */ + channel->endpoint->registers + + fpga_buf_ctrl_reg); + + /* + * This is crazily cautious: We make sure that not + * only that we got an EOF (be it because we closed + * the channel or because of a user's EOF), but verify + * that it's one beyond the last buffer arrived, so + * we have no leftover buffers pending before wrapping + * up (which can only happen in asynchronous channels, + * BTW) + */ + + while (1) { + spin_lock_irqsave(&channel->wr_spinlock, + flags); + buf_idx = channel->wr_fpga_buf_idx; + eof = channel->wr_eof; + channel->wr_sleepy = 1; + spin_unlock_irqrestore(&channel->wr_spinlock, + flags); + + /* + * Check if eof points at the buffer after + * the last one the FPGA submitted. Note that + * no EOF is marked by negative eof. + */ + + buf_idx++; + if (buf_idx == channel->num_wr_buffers) + buf_idx = 0; + + if (buf_idx == eof) + break; + + /* + * Steal extra 100 ms if awaken by interrupt. + * This is a simple workaround for an + * interrupt pending when entering, which would + * otherwise result in declaring the hardware + * non-responsive. + */ + + if (wait_event_interruptible( + channel->wr_wait, + (!channel->wr_sleepy))) + msleep(100); + + if (channel->wr_sleepy) { + mutex_unlock(&channel->wr_mutex); + dev_warn(channel->endpoint->dev, + "Hardware failed to respond to close command, therefore left in messy state.\n"); + return -EINTR; + } + } + } + + mutex_unlock(&channel->wr_mutex); + } + + return 0; +} + +static loff_t xillybus_llseek(struct file *filp, loff_t offset, int whence) +{ + struct xilly_channel *channel = filp->private_data; + loff_t pos = filp->f_pos; + int rc = 0; + + /* + * Take both mutexes not allowing interrupts, since it seems like + * common applications don't expect an -EINTR here. Besides, multiple + * access to a single file descriptor on seekable devices is a mess + * anyhow. + */ + + if (channel->endpoint->fatal_error) + return -EIO; + + mutex_lock(&channel->wr_mutex); + mutex_lock(&channel->rd_mutex); + + switch (whence) { + case SEEK_SET: + pos = offset; + break; + case SEEK_CUR: + pos += offset; + break; + case SEEK_END: + pos = offset; /* Going to the end => to the beginning */ + break; + default: + rc = -EINVAL; + goto end; + } + + /* In any case, we must finish on an element boundary */ + if (pos & ((1 << channel->log2_element_size) - 1)) { + rc = -EINVAL; + goto end; + } + + mutex_lock(&channel->endpoint->register_mutex); + + iowrite32(pos >> channel->log2_element_size, + channel->endpoint->registers + fpga_buf_offset_reg); + + iowrite32((channel->chan_num << 1) | + (6 << 24), /* Opcode 6, set address */ + channel->endpoint->registers + fpga_buf_ctrl_reg); + + mutex_unlock(&channel->endpoint->register_mutex); + +end: + mutex_unlock(&channel->rd_mutex); + mutex_unlock(&channel->wr_mutex); + + if (rc) /* Return error after releasing mutexes */ + return rc; + + filp->f_pos = pos; + + /* + * Since seekable devices are allowed only when the channel is + * synchronous, we assume that there is no data pending in either + * direction (which holds true as long as no concurrent access on the + * file descriptor takes place). + * The only thing we may need to throw away is leftovers from partial + * write() flush. + */ + + channel->rd_leftovers[3] = 0; + + return pos; +} + +static unsigned int xillybus_poll(struct file *filp, poll_table *wait) +{ + struct xilly_channel *channel = filp->private_data; + unsigned int mask = 0; + unsigned long flags; + + poll_wait(filp, &channel->endpoint->ep_wait, wait); + + /* + * poll() won't play ball regarding read() channels which + * aren't asynchronous and support the nonempty message. Allowing + * that will create situations where data has been delivered at + * the FPGA, and users expecting select() to wake up, which it may + * not. + */ + + if (!channel->wr_synchronous && channel->wr_supports_nonempty) { + poll_wait(filp, &channel->wr_wait, wait); + poll_wait(filp, &channel->wr_ready_wait, wait); + + spin_lock_irqsave(&channel->wr_spinlock, flags); + if (!channel->wr_empty || channel->wr_ready) + mask |= POLLIN | POLLRDNORM; + + if (channel->wr_hangup) + /* + * Not POLLHUP, because its behavior is in the + * mist, and POLLIN does what we want: Wake up + * the read file descriptor so it sees EOF. + */ + mask |= POLLIN | POLLRDNORM; + spin_unlock_irqrestore(&channel->wr_spinlock, flags); + } + + /* + * If partial data write is disallowed on a write() channel, + * it's pointless to ever signal OK to write, because is could + * block despite some space being available. + */ + + if (channel->rd_allow_partial) { + poll_wait(filp, &channel->rd_wait, wait); + + spin_lock_irqsave(&channel->rd_spinlock, flags); + if (!channel->rd_full) + mask |= POLLOUT | POLLWRNORM; + spin_unlock_irqrestore(&channel->rd_spinlock, flags); + } + + if (channel->endpoint->fatal_error) + mask |= POLLERR; + + return mask; +} + +static const struct file_operations xillybus_fops = { + .owner = THIS_MODULE, + .read = xillybus_read, + .write = xillybus_write, + .open = xillybus_open, + .flush = xillybus_flush, + .release = xillybus_release, + .llseek = xillybus_llseek, + .poll = xillybus_poll, +}; + +static int xillybus_init_chrdev(struct xilly_endpoint *endpoint, + const unsigned char *idt) +{ + int rc; + dev_t dev; + int devnum, i, minor, major; + char devname[48]; + struct device *device; + + rc = alloc_chrdev_region(&dev, 0, /* minor start */ + endpoint->num_channels, + xillyname); + if (rc) { + dev_warn(endpoint->dev, "Failed to obtain major/minors"); + return rc; + } + + endpoint->major = major = MAJOR(dev); + endpoint->lowest_minor = minor = MINOR(dev); + + cdev_init(&endpoint->cdev, &xillybus_fops); + endpoint->cdev.owner = endpoint->ephw->owner; + rc = cdev_add(&endpoint->cdev, MKDEV(major, minor), + endpoint->num_channels); + if (rc) { + dev_warn(endpoint->dev, "Failed to add cdev. Aborting.\n"); + goto unregister_chrdev; + } + + idt++; + + for (i = minor, devnum = 0; + devnum < endpoint->num_channels; + devnum++, i++) { + snprintf(devname, sizeof(devname)-1, "xillybus_%s", idt); + + devname[sizeof(devname)-1] = 0; /* Should never matter */ + + while (*idt++) + /* Skip to next */; + + device = device_create(xillybus_class, + NULL, + MKDEV(major, i), + NULL, + "%s", devname); + + if (IS_ERR(device)) { + dev_warn(endpoint->dev, + "Failed to create %s device. Aborting.\n", + devname); + rc = -ENODEV; + goto unroll_device_create; + } + } + + dev_info(endpoint->dev, "Created %d device files.\n", + endpoint->num_channels); + return 0; /* succeed */ + +unroll_device_create: + devnum--; i--; + for (; devnum >= 0; devnum--, i--) + device_destroy(xillybus_class, MKDEV(major, i)); + + cdev_del(&endpoint->cdev); +unregister_chrdev: + unregister_chrdev_region(MKDEV(major, minor), endpoint->num_channels); + + return rc; +} + +static void xillybus_cleanup_chrdev(struct xilly_endpoint *endpoint) +{ + int minor; + + for (minor = endpoint->lowest_minor; + minor < (endpoint->lowest_minor + endpoint->num_channels); + minor++) + device_destroy(xillybus_class, MKDEV(endpoint->major, minor)); + cdev_del(&endpoint->cdev); + unregister_chrdev_region(MKDEV(endpoint->major, + endpoint->lowest_minor), + endpoint->num_channels); + + dev_info(endpoint->dev, "Removed %d device files.\n", + endpoint->num_channels); +} + +struct xilly_endpoint *xillybus_init_endpoint(struct pci_dev *pdev, + struct device *dev, + struct xilly_endpoint_hardware + *ephw) +{ + struct xilly_endpoint *endpoint; + + endpoint = devm_kzalloc(dev, sizeof(*endpoint), GFP_KERNEL); + if (!endpoint) + return NULL; + + endpoint->pdev = pdev; + endpoint->dev = dev; + endpoint->ephw = ephw; + endpoint->msg_counter = 0x0b; + endpoint->failed_messages = 0; + endpoint->fatal_error = 0; + + init_waitqueue_head(&endpoint->ep_wait); + mutex_init(&endpoint->register_mutex); + + return endpoint; +} +EXPORT_SYMBOL(xillybus_init_endpoint); + +static int xilly_quiesce(struct xilly_endpoint *endpoint) +{ + long t; + + endpoint->idtlen = -1; + + iowrite32((u32) (endpoint->dma_using_dac & 0x0001), + endpoint->registers + fpga_dma_control_reg); + + t = wait_event_interruptible_timeout(endpoint->ep_wait, + (endpoint->idtlen >= 0), + XILLY_TIMEOUT); + if (t <= 0) { + dev_err(endpoint->dev, + "Failed to quiesce the device on exit.\n"); + return -ENODEV; + } + return 0; +} + +int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint) +{ + int rc; + long t; + + void *bootstrap_resources; + int idtbuffersize = (1 << PAGE_SHIFT); + struct device *dev = endpoint->dev; + + /* + * The bogus IDT is used during bootstrap for allocating the initial + * message buffer, and then the message buffer and space for the IDT + * itself. The initial message buffer is of a single page's size, but + * it's soon replaced with a more modest one (and memory is freed). + */ + + unsigned char bogus_idt[8] = { 1, 224, (PAGE_SHIFT)-2, 0, + 3, 192, PAGE_SHIFT, 0 }; + struct xilly_idt_handle idt_handle; + + /* + * Writing the value 0x00000001 to Endianness register signals which + * endianness this processor is using, so the FPGA can swap words as + * necessary. + */ + + iowrite32(1, endpoint->registers + fpga_endian_reg); + + /* Bootstrap phase I: Allocate temporary message buffer */ + + bootstrap_resources = devres_open_group(dev, NULL, GFP_KERNEL); + if (!bootstrap_resources) + return -ENOMEM; + + endpoint->num_channels = 0; + + rc = xilly_setupchannels(endpoint, bogus_idt, 1); + if (rc) + return rc; + + /* Clear the message subsystem (and counter in particular) */ + iowrite32(0x04, endpoint->registers + fpga_msg_ctrl_reg); + + endpoint->idtlen = -1; + + /* + * Set DMA 32/64 bit mode, quiesce the device (?!) and get IDT + * buffer size. + */ + iowrite32((u32) (endpoint->dma_using_dac & 0x0001), + endpoint->registers + fpga_dma_control_reg); + + t = wait_event_interruptible_timeout(endpoint->ep_wait, + (endpoint->idtlen >= 0), + XILLY_TIMEOUT); + if (t <= 0) { + dev_err(endpoint->dev, "No response from FPGA. Aborting.\n"); + return -ENODEV; + } + + /* Enable DMA */ + iowrite32((u32) (0x0002 | (endpoint->dma_using_dac & 0x0001)), + endpoint->registers + fpga_dma_control_reg); + + /* Bootstrap phase II: Allocate buffer for IDT and obtain it */ + while (endpoint->idtlen >= idtbuffersize) { + idtbuffersize *= 2; + bogus_idt[6]++; + } + + endpoint->num_channels = 1; + + rc = xilly_setupchannels(endpoint, bogus_idt, 2); + if (rc) + goto failed_idt; + + rc = xilly_obtain_idt(endpoint); + if (rc) + goto failed_idt; + + rc = xilly_scan_idt(endpoint, &idt_handle); + if (rc) + goto failed_idt; + + devres_close_group(dev, bootstrap_resources); + + /* Bootstrap phase III: Allocate buffers according to IDT */ + + rc = xilly_setupchannels(endpoint, + idt_handle.chandesc, + idt_handle.entries); + if (rc) + goto failed_idt; + + /* + * endpoint is now completely configured. We put it on the list + * available to open() before registering the char device(s) + */ + + mutex_lock(&ep_list_lock); + list_add_tail(&endpoint->ep_list, &list_of_endpoints); + mutex_unlock(&ep_list_lock); + + rc = xillybus_init_chrdev(endpoint, idt_handle.idt); + if (rc) + goto failed_chrdevs; + + devres_release_group(dev, bootstrap_resources); + + return 0; + +failed_chrdevs: + mutex_lock(&ep_list_lock); + list_del(&endpoint->ep_list); + mutex_unlock(&ep_list_lock); + +failed_idt: + xilly_quiesce(endpoint); + flush_workqueue(xillybus_wq); + + return rc; +} +EXPORT_SYMBOL(xillybus_endpoint_discovery); + +void xillybus_endpoint_remove(struct xilly_endpoint *endpoint) +{ + xillybus_cleanup_chrdev(endpoint); + + mutex_lock(&ep_list_lock); + list_del(&endpoint->ep_list); + mutex_unlock(&ep_list_lock); + + xilly_quiesce(endpoint); + + /* + * Flushing is done upon endpoint release to prevent access to memory + * just about to be released. This makes the quiesce complete. + */ + flush_workqueue(xillybus_wq); +} +EXPORT_SYMBOL(xillybus_endpoint_remove); + +static int __init xillybus_init(void) +{ + mutex_init(&ep_list_lock); + + xillybus_class = class_create(THIS_MODULE, xillyname); + if (IS_ERR(xillybus_class)) + return PTR_ERR(xillybus_class); + + xillybus_wq = alloc_workqueue(xillyname, 0, 0); + if (!xillybus_wq) { + class_destroy(xillybus_class); + return -ENOMEM; + } + + return 0; +} + +static void __exit xillybus_exit(void) +{ + /* flush_workqueue() was called for each endpoint released */ + destroy_workqueue(xillybus_wq); + + class_destroy(xillybus_class); +} + +module_init(xillybus_init); +module_exit(xillybus_exit); diff --git a/kernel/drivers/char/xillybus/xillybus_of.c b/kernel/drivers/char/xillybus/xillybus_of.c new file mode 100644 index 000000000..781865084 --- /dev/null +++ b/kernel/drivers/char/xillybus/xillybus_of.c @@ -0,0 +1,186 @@ +/* + * linux/drivers/misc/xillybus_of.c + * + * Copyright 2011 Xillybus Ltd, http://xillybus.com + * + * Driver for the Xillybus FPGA/host framework using Open Firmware. + * + * This program is free software; you can redistribute it and/or modify + * it under the smems of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "xillybus.h" + +MODULE_DESCRIPTION("Xillybus driver for Open Firmware"); +MODULE_AUTHOR("Eli Billauer, Xillybus Ltd."); +MODULE_VERSION("1.06"); +MODULE_ALIAS("xillybus_of"); +MODULE_LICENSE("GPL v2"); + +static const char xillyname[] = "xillybus_of"; + +/* Match table for of_platform binding */ +static const struct of_device_id xillybus_of_match[] = { + { .compatible = "xillybus,xillybus-1.00.a", }, + { .compatible = "xlnx,xillybus-1.00.a", }, /* Deprecated */ + {} +}; + +MODULE_DEVICE_TABLE(of, xillybus_of_match); + +static void xilly_dma_sync_single_for_cpu_of(struct xilly_endpoint *ep, + dma_addr_t dma_handle, + size_t size, + int direction) +{ + dma_sync_single_for_cpu(ep->dev, dma_handle, size, direction); +} + +static void xilly_dma_sync_single_for_device_of(struct xilly_endpoint *ep, + dma_addr_t dma_handle, + size_t size, + int direction) +{ + dma_sync_single_for_device(ep->dev, dma_handle, size, direction); +} + +static void xilly_dma_sync_single_nop(struct xilly_endpoint *ep, + dma_addr_t dma_handle, + size_t size, + int direction) +{ +} + +static void xilly_of_unmap(void *ptr) +{ + struct xilly_mapping *data = ptr; + + dma_unmap_single(data->device, data->dma_addr, + data->size, data->direction); + + kfree(ptr); +} + +static int xilly_map_single_of(struct xilly_endpoint *ep, + void *ptr, + size_t size, + int direction, + dma_addr_t *ret_dma_handle + ) +{ + dma_addr_t addr; + struct xilly_mapping *this; + int rc; + + this = kzalloc(sizeof(*this), GFP_KERNEL); + if (!this) + return -ENOMEM; + + addr = dma_map_single(ep->dev, ptr, size, direction); + + if (dma_mapping_error(ep->dev, addr)) { + kfree(this); + return -ENODEV; + } + + this->device = ep->dev; + this->dma_addr = addr; + this->size = size; + this->direction = direction; + + *ret_dma_handle = addr; + + rc = devm_add_action(ep->dev, xilly_of_unmap, this); + + if (rc) { + dma_unmap_single(ep->dev, addr, size, direction); + kfree(this); + return rc; + } + + return 0; +} + +static struct xilly_endpoint_hardware of_hw = { + .owner = THIS_MODULE, + .hw_sync_sgl_for_cpu = xilly_dma_sync_single_for_cpu_of, + .hw_sync_sgl_for_device = xilly_dma_sync_single_for_device_of, + .map_single = xilly_map_single_of, +}; + +static struct xilly_endpoint_hardware of_hw_coherent = { + .owner = THIS_MODULE, + .hw_sync_sgl_for_cpu = xilly_dma_sync_single_nop, + .hw_sync_sgl_for_device = xilly_dma_sync_single_nop, + .map_single = xilly_map_single_of, +}; + +static int xilly_drv_probe(struct platform_device *op) +{ + struct device *dev = &op->dev; + struct xilly_endpoint *endpoint; + int rc; + int irq; + struct resource res; + struct xilly_endpoint_hardware *ephw = &of_hw; + + if (of_property_read_bool(dev->of_node, "dma-coherent")) + ephw = &of_hw_coherent; + + endpoint = xillybus_init_endpoint(NULL, dev, ephw); + + if (!endpoint) + return -ENOMEM; + + dev_set_drvdata(dev, endpoint); + + rc = of_address_to_resource(dev->of_node, 0, &res); + endpoint->registers = devm_ioremap_resource(dev, &res); + + if (IS_ERR(endpoint->registers)) + return PTR_ERR(endpoint->registers); + + irq = irq_of_parse_and_map(dev->of_node, 0); + + rc = devm_request_irq(dev, irq, xillybus_isr, 0, xillyname, endpoint); + + if (rc) { + dev_err(endpoint->dev, + "Failed to register IRQ handler. Aborting.\n"); + return -ENODEV; + } + + return xillybus_endpoint_discovery(endpoint); +} + +static int xilly_drv_remove(struct platform_device *op) +{ + struct device *dev = &op->dev; + struct xilly_endpoint *endpoint = dev_get_drvdata(dev); + + xillybus_endpoint_remove(endpoint); + + return 0; +} + +static struct platform_driver xillybus_platform_driver = { + .probe = xilly_drv_probe, + .remove = xilly_drv_remove, + .driver = { + .name = xillyname, + .of_match_table = xillybus_of_match, + }, +}; + +module_platform_driver(xillybus_platform_driver); diff --git a/kernel/drivers/char/xillybus/xillybus_pcie.c b/kernel/drivers/char/xillybus/xillybus_pcie.c new file mode 100644 index 000000000..d8266bc2a --- /dev/null +++ b/kernel/drivers/char/xillybus/xillybus_pcie.c @@ -0,0 +1,228 @@ +/* + * linux/drivers/misc/xillybus_pcie.c + * + * Copyright 2011 Xillybus Ltd, http://xillybus.com + * + * Driver for the Xillybus FPGA/host framework using PCI Express. + * + * This program is free software; you can redistribute it and/or modify + * it under the smems of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ + +#include +#include +#include +#include +#include "xillybus.h" + +MODULE_DESCRIPTION("Xillybus driver for PCIe"); +MODULE_AUTHOR("Eli Billauer, Xillybus Ltd."); +MODULE_VERSION("1.06"); +MODULE_ALIAS("xillybus_pcie"); +MODULE_LICENSE("GPL v2"); + +#define PCI_DEVICE_ID_XILLYBUS 0xebeb + +#define PCI_VENDOR_ID_ALTERA 0x1172 +#define PCI_VENDOR_ID_ACTEL 0x11aa +#define PCI_VENDOR_ID_LATTICE 0x1204 + +static const char xillyname[] = "xillybus_pcie"; + +static const struct pci_device_id xillyids[] = { + {PCI_DEVICE(PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_XILLYBUS)}, + {PCI_DEVICE(PCI_VENDOR_ID_ALTERA, PCI_DEVICE_ID_XILLYBUS)}, + {PCI_DEVICE(PCI_VENDOR_ID_ACTEL, PCI_DEVICE_ID_XILLYBUS)}, + {PCI_DEVICE(PCI_VENDOR_ID_LATTICE, PCI_DEVICE_ID_XILLYBUS)}, + { /* End: all zeroes */ } +}; + +static int xilly_pci_direction(int direction) +{ + switch (direction) { + case DMA_TO_DEVICE: + return PCI_DMA_TODEVICE; + case DMA_FROM_DEVICE: + return PCI_DMA_FROMDEVICE; + default: + return PCI_DMA_BIDIRECTIONAL; + } +} + +static void xilly_dma_sync_single_for_cpu_pci(struct xilly_endpoint *ep, + dma_addr_t dma_handle, + size_t size, + int direction) +{ + pci_dma_sync_single_for_cpu(ep->pdev, + dma_handle, + size, + xilly_pci_direction(direction)); +} + +static void xilly_dma_sync_single_for_device_pci(struct xilly_endpoint *ep, + dma_addr_t dma_handle, + size_t size, + int direction) +{ + pci_dma_sync_single_for_device(ep->pdev, + dma_handle, + size, + xilly_pci_direction(direction)); +} + +static void xilly_pci_unmap(void *ptr) +{ + struct xilly_mapping *data = ptr; + + pci_unmap_single(data->device, data->dma_addr, + data->size, data->direction); + + kfree(ptr); +} + +/* + * Map either through the PCI DMA mapper or the non_PCI one. Behind the + * scenes exactly the same functions are called with the same parameters, + * but that can change. + */ + +static int xilly_map_single_pci(struct xilly_endpoint *ep, + void *ptr, + size_t size, + int direction, + dma_addr_t *ret_dma_handle + ) +{ + int pci_direction; + dma_addr_t addr; + struct xilly_mapping *this; + int rc; + + this = kzalloc(sizeof(*this), GFP_KERNEL); + if (!this) + return -ENOMEM; + + pci_direction = xilly_pci_direction(direction); + + addr = pci_map_single(ep->pdev, ptr, size, pci_direction); + + if (pci_dma_mapping_error(ep->pdev, addr)) { + kfree(this); + return -ENODEV; + } + + this->device = ep->pdev; + this->dma_addr = addr; + this->size = size; + this->direction = pci_direction; + + *ret_dma_handle = addr; + + rc = devm_add_action(ep->dev, xilly_pci_unmap, this); + if (rc) { + pci_unmap_single(ep->pdev, addr, size, pci_direction); + kfree(this); + return rc; + } + + return 0; +} + +static struct xilly_endpoint_hardware pci_hw = { + .owner = THIS_MODULE, + .hw_sync_sgl_for_cpu = xilly_dma_sync_single_for_cpu_pci, + .hw_sync_sgl_for_device = xilly_dma_sync_single_for_device_pci, + .map_single = xilly_map_single_pci, +}; + +static int xilly_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct xilly_endpoint *endpoint; + int rc; + + endpoint = xillybus_init_endpoint(pdev, &pdev->dev, &pci_hw); + + if (!endpoint) + return -ENOMEM; + + pci_set_drvdata(pdev, endpoint); + + rc = pcim_enable_device(pdev); + if (rc) { + dev_err(endpoint->dev, + "pcim_enable_device() failed. Aborting.\n"); + return rc; + } + + /* L0s has caused packet drops. No power saving, thank you. */ + + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S); + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + dev_err(endpoint->dev, + "Incorrect BAR configuration. Aborting.\n"); + return -ENODEV; + } + + rc = pcim_iomap_regions(pdev, 0x01, xillyname); + if (rc) { + dev_err(endpoint->dev, + "pcim_iomap_regions() failed. Aborting.\n"); + return rc; + } + + endpoint->registers = pcim_iomap_table(pdev)[0]; + + pci_set_master(pdev); + + /* Set up a single MSI interrupt */ + if (pci_enable_msi(pdev)) { + dev_err(endpoint->dev, + "Failed to enable MSI interrupts. Aborting.\n"); + return -ENODEV; + } + rc = devm_request_irq(&pdev->dev, pdev->irq, xillybus_isr, 0, + xillyname, endpoint); + if (rc) { + dev_err(endpoint->dev, + "Failed to register MSI handler. Aborting.\n"); + return -ENODEV; + } + + /* + * In theory, an attempt to set the DMA mask to 64 and dma_using_dac=1 + * is the right thing. But some unclever PCIe drivers report it's OK + * when the hardware drops those 64-bit PCIe packets. So trust + * nobody and use 32 bits DMA addressing in any case. + */ + + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { + endpoint->dma_using_dac = 0; + } else { + dev_err(endpoint->dev, "Failed to set DMA mask. Aborting.\n"); + return -ENODEV; + } + + return xillybus_endpoint_discovery(endpoint); +} + +static void xilly_remove(struct pci_dev *pdev) +{ + struct xilly_endpoint *endpoint = pci_get_drvdata(pdev); + + xillybus_endpoint_remove(endpoint); +} + +MODULE_DEVICE_TABLE(pci, xillyids); + +static struct pci_driver xillybus_driver = { + .name = xillyname, + .id_table = xillyids, + .probe = xilly_probe, + .remove = xilly_remove, +}; + +module_pci_driver(xillybus_driver); -- cgit 1.2.3-korg