summaryrefslogtreecommitdiffstats
path: root/kernel/arch/powerpc/platforms/powermac
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/arch/powerpc/platforms/powermac')
-rw-r--r--kernel/arch/powerpc/platforms/powermac/Kconfig30
-rw-r--r--kernel/arch/powerpc/platforms/powermac/Makefile19
-rw-r--r--kernel/arch/powerpc/platforms/powermac/backlight.c220
-rw-r--r--kernel/arch/powerpc/platforms/powermac/bootx_init.c595
-rw-r--r--kernel/arch/powerpc/platforms/powermac/cache.S358
-rw-r--r--kernel/arch/powerpc/platforms/powermac/feature.c3043
-rw-r--r--kernel/arch/powerpc/platforms/powermac/low_i2c.c1515
-rw-r--r--kernel/arch/powerpc/platforms/powermac/nvram.c648
-rw-r--r--kernel/arch/powerpc/platforms/powermac/pci.c1255
-rw-r--r--kernel/arch/powerpc/platforms/powermac/pfunc_base.c410
-rw-r--r--kernel/arch/powerpc/platforms/powermac/pfunc_core.c1021
-rw-r--r--kernel/arch/powerpc/platforms/powermac/pic.c659
-rw-r--r--kernel/arch/powerpc/platforms/powermac/pmac.h43
-rw-r--r--kernel/arch/powerpc/platforms/powermac/setup.c666
-rw-r--r--kernel/arch/powerpc/platforms/powermac/sleep.S397
-rw-r--r--kernel/arch/powerpc/platforms/powermac/smp.c1033
-rw-r--r--kernel/arch/powerpc/platforms/powermac/time.c334
-rw-r--r--kernel/arch/powerpc/platforms/powermac/udbg_adb.c219
-rw-r--r--kernel/arch/powerpc/platforms/powermac/udbg_scc.c184
19 files changed, 12649 insertions, 0 deletions
diff --git a/kernel/arch/powerpc/platforms/powermac/Kconfig b/kernel/arch/powerpc/platforms/powermac/Kconfig
new file mode 100644
index 000000000..607124bae
--- /dev/null
+++ b/kernel/arch/powerpc/platforms/powermac/Kconfig
@@ -0,0 +1,30 @@
+config PPC_PMAC
+ bool "Apple PowerMac based machines"
+ depends on PPC_BOOK3S
+ select MPIC
+ select PCI
+ select PPC_INDIRECT_PCI if PPC32
+ select PPC_MPC106 if PPC32
+ select PPC_NATIVE
+ default y
+
+config PPC_PMAC64
+ bool
+ depends on PPC_PMAC && PPC64
+ select MPIC
+ select U3_DART
+ select MPIC_U3_HT_IRQS
+ select GENERIC_TBSYNC
+ select PPC_970_NAP
+ default y
+
+config PPC_PMAC32_PSURGE
+ bool "Support for powersurge upgrade cards" if EXPERT
+ depends on SMP && PPC32 && PPC_PMAC
+ select PPC_SMP_MUXED_IPI
+ default y
+ help
+ The powersurge cpu boards can be used in the generation
+ of powermacs that have a socket for an upgradeable cpu card,
+ including the 7500, 8500, 9500, 9600. Support exists for
+ both dual and quad socket upgrade cards.
diff --git a/kernel/arch/powerpc/platforms/powermac/Makefile b/kernel/arch/powerpc/platforms/powermac/Makefile
new file mode 100644
index 000000000..52c6ce1cc
--- /dev/null
+++ b/kernel/arch/powerpc/platforms/powermac/Makefile
@@ -0,0 +1,19 @@
+CFLAGS_bootx_init.o += -fPIC
+
+ifdef CONFIG_FUNCTION_TRACER
+# Do not trace early boot code
+CFLAGS_REMOVE_bootx_init.o = -pg -mno-sched-epilog
+endif
+
+obj-y += pic.o setup.o time.o feature.o pci.o \
+ sleep.o low_i2c.o cache.o pfunc_core.o \
+ pfunc_base.o udbg_scc.o udbg_adb.o
+obj-$(CONFIG_PMAC_BACKLIGHT) += backlight.o
+# CONFIG_NVRAM is an arch. independent tristate symbol, for pmac32 we really
+# need this to be a bool. Cheat here and pretend CONFIG_NVRAM=m is really
+# CONFIG_NVRAM=y
+obj-$(CONFIG_NVRAM:m=y) += nvram.o
+# ppc64 pmac doesn't define CONFIG_NVRAM but needs nvram stuff
+obj-$(CONFIG_PPC64) += nvram.o
+obj-$(CONFIG_PPC32) += bootx_init.o
+obj-$(CONFIG_SMP) += smp.o
diff --git a/kernel/arch/powerpc/platforms/powermac/backlight.c b/kernel/arch/powerpc/platforms/powermac/backlight.c
new file mode 100644
index 000000000..a00096b1c
--- /dev/null
+++ b/kernel/arch/powerpc/platforms/powermac/backlight.c
@@ -0,0 +1,220 @@
+/*
+ * Miscellaneous procedures for dealing with the PowerMac hardware.
+ * Contains support for the backlight.
+ *
+ * Copyright (C) 2000 Benjamin Herrenschmidt
+ * Copyright (C) 2006 Michael Hanselmann <linux-kernel@hansmi.ch>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/fb.h>
+#include <linux/backlight.h>
+#include <linux/adb.h>
+#include <linux/pmu.h>
+#include <linux/atomic.h>
+#include <linux/export.h>
+#include <asm/prom.h>
+#include <asm/backlight.h>
+
+#define OLD_BACKLIGHT_MAX 15
+
+static void pmac_backlight_key_worker(struct work_struct *work);
+static void pmac_backlight_set_legacy_worker(struct work_struct *work);
+
+static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker);
+static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker);
+
+/* Although these variables are used in interrupt context, it makes no sense to
+ * protect them. No user is able to produce enough key events per second and
+ * notice the errors that might happen.
+ */
+static int pmac_backlight_key_queued;
+static int pmac_backlight_set_legacy_queued;
+
+/* The via-pmu code allows the backlight to be grabbed, in which case the
+ * in-kernel control of the brightness needs to be disabled. This should
+ * only be used by really old PowerBooks.
+ */
+static atomic_t kernel_backlight_disabled = ATOMIC_INIT(0);
+
+/* Protect the pmac_backlight variable below.
+ You should hold this lock when using the pmac_backlight pointer to
+ prevent its potential removal. */
+DEFINE_MUTEX(pmac_backlight_mutex);
+
+/* Main backlight storage
+ *
+ * Backlight drivers in this variable are required to have the "ops"
+ * attribute set and to have an update_status function.
+ *
+ * We can only store one backlight here, but since Apple laptops have only one
+ * internal display, it doesn't matter. Other backlight drivers can be used
+ * independently.
+ *
+ */
+struct backlight_device *pmac_backlight;
+
+int pmac_has_backlight_type(const char *type)
+{
+ struct device_node* bk_node = of_find_node_by_name(NULL, "backlight");
+
+ if (bk_node) {
+ const char *prop = of_get_property(bk_node,
+ "backlight-control", NULL);
+ if (prop && strncmp(prop, type, strlen(type)) == 0) {
+ of_node_put(bk_node);
+ return 1;
+ }
+ of_node_put(bk_node);
+ }
+
+ return 0;
+}
+
+int pmac_backlight_curve_lookup(struct fb_info *info, int value)
+{
+ int level = (FB_BACKLIGHT_LEVELS - 1);
+
+ if (info && info->bl_dev) {
+ int i, max = 0;
+
+ /* Look for biggest value */
+ for (i = 0; i < FB_BACKLIGHT_LEVELS; i++)
+ max = max((int)info->bl_curve[i], max);
+
+ /* Look for nearest value */
+ for (i = 0; i < FB_BACKLIGHT_LEVELS; i++) {
+ int diff = abs(info->bl_curve[i] - value);
+ if (diff < max) {
+ max = diff;
+ level = i;
+ }
+ }
+
+ }
+
+ return level;
+}
+
+static void pmac_backlight_key_worker(struct work_struct *work)
+{
+ if (atomic_read(&kernel_backlight_disabled))
+ return;
+
+ mutex_lock(&pmac_backlight_mutex);
+ if (pmac_backlight) {
+ struct backlight_properties *props;
+ int brightness;
+
+ props = &pmac_backlight->props;
+
+ brightness = props->brightness +
+ ((pmac_backlight_key_queued?-1:1) *
+ (props->max_brightness / 15));
+
+ if (brightness < 0)
+ brightness = 0;
+ else if (brightness > props->max_brightness)
+ brightness = props->max_brightness;
+
+ props->brightness = brightness;
+ backlight_update_status(pmac_backlight);
+ }
+ mutex_unlock(&pmac_backlight_mutex);
+}
+
+/* This function is called in interrupt context */
+void pmac_backlight_key(int direction)
+{
+ if (atomic_read(&kernel_backlight_disabled))
+ return;
+
+ /* we can receive multiple interrupts here, but the scheduled work
+ * will run only once, with the last value
+ */
+ pmac_backlight_key_queued = direction;
+ schedule_work(&pmac_backlight_key_work);
+}
+
+static int __pmac_backlight_set_legacy_brightness(int brightness)
+{
+ int error = -ENXIO;
+
+ mutex_lock(&pmac_backlight_mutex);
+ if (pmac_backlight) {
+ struct backlight_properties *props;
+
+ props = &pmac_backlight->props;
+ props->brightness = brightness *
+ (props->max_brightness + 1) /
+ (OLD_BACKLIGHT_MAX + 1);
+
+ if (props->brightness > props->max_brightness)
+ props->brightness = props->max_brightness;
+ else if (props->brightness < 0)
+ props->brightness = 0;
+
+ backlight_update_status(pmac_backlight);
+
+ error = 0;
+ }
+ mutex_unlock(&pmac_backlight_mutex);
+
+ return error;
+}
+
+static void pmac_backlight_set_legacy_worker(struct work_struct *work)
+{
+ if (atomic_read(&kernel_backlight_disabled))
+ return;
+
+ __pmac_backlight_set_legacy_brightness(pmac_backlight_set_legacy_queued);
+}
+
+/* This function is called in interrupt context */
+void pmac_backlight_set_legacy_brightness_pmu(int brightness) {
+ if (atomic_read(&kernel_backlight_disabled))
+ return;
+
+ pmac_backlight_set_legacy_queued = brightness;
+ schedule_work(&pmac_backlight_set_legacy_work);
+}
+
+int pmac_backlight_set_legacy_brightness(int brightness)
+{
+ return __pmac_backlight_set_legacy_brightness(brightness);
+}
+
+int pmac_backlight_get_legacy_brightness()
+{
+ int result = -ENXIO;
+
+ mutex_lock(&pmac_backlight_mutex);
+ if (pmac_backlight) {
+ struct backlight_properties *props;
+
+ props = &pmac_backlight->props;
+
+ result = props->brightness *
+ (OLD_BACKLIGHT_MAX + 1) /
+ (props->max_brightness + 1);
+ }
+ mutex_unlock(&pmac_backlight_mutex);
+
+ return result;
+}
+
+void pmac_backlight_disable()
+{
+ atomic_inc(&kernel_backlight_disabled);
+}
+
+void pmac_backlight_enable()
+{
+ atomic_dec(&kernel_backlight_disabled);
+}
+
+EXPORT_SYMBOL_GPL(pmac_backlight);
+EXPORT_SYMBOL_GPL(pmac_backlight_mutex);
+EXPORT_SYMBOL_GPL(pmac_has_backlight_type);
diff --git a/kernel/arch/powerpc/platforms/powermac/bootx_init.c b/kernel/arch/powerpc/platforms/powermac/bootx_init.c
new file mode 100644
index 000000000..76f5013c3
--- /dev/null
+++ b/kernel/arch/powerpc/platforms/powermac/bootx_init.c
@@ -0,0 +1,595 @@
+/*
+ * Early boot support code for BootX bootloader
+ *
+ * Copyright (C) 2005 Ben. Herrenschmidt (benh@kernel.crashing.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <generated/utsrelease.h>
+#include <asm/sections.h>
+#include <asm/prom.h>
+#include <asm/page.h>
+#include <asm/bootx.h>
+#include <asm/btext.h>
+#include <asm/io.h>
+#include <asm/setup.h>
+
+#undef DEBUG
+#define SET_BOOT_BAT
+
+#ifdef DEBUG
+#define DBG(fmt...) do { bootx_printf(fmt); } while(0)
+#else
+#define DBG(fmt...) do { } while(0)
+#endif
+
+extern void __start(unsigned long r3, unsigned long r4, unsigned long r5);
+
+static unsigned long __initdata bootx_dt_strbase;
+static unsigned long __initdata bootx_dt_strend;
+static unsigned long __initdata bootx_node_chosen;
+static boot_infos_t * __initdata bootx_info;
+static char __initdata bootx_disp_path[256];
+
+/* Is boot-info compatible ? */
+#define BOOT_INFO_IS_COMPATIBLE(bi) \
+ ((bi)->compatible_version <= BOOT_INFO_VERSION)
+#define BOOT_INFO_IS_V2_COMPATIBLE(bi) ((bi)->version >= 2)
+#define BOOT_INFO_IS_V4_COMPATIBLE(bi) ((bi)->version >= 4)
+
+#ifdef CONFIG_BOOTX_TEXT
+static void __init bootx_printf(const char *format, ...)
+{
+ const char *p, *q, *s;
+ va_list args;
+ unsigned long v;
+
+ va_start(args, format);
+ for (p = format; *p != 0; p = q) {
+ for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
+ ;
+ if (q > p)
+ btext_drawtext(p, q - p);
+ if (*q == 0)
+ break;
+ if (*q == '\n') {
+ ++q;
+ btext_flushline();
+ btext_drawstring("\r\n");
+ btext_flushline();
+ continue;
+ }
+ ++q;
+ if (*q == 0)
+ break;
+ switch (*q) {
+ case 's':
+ ++q;
+ s = va_arg(args, const char *);
+ if (s == NULL)
+ s = "<NULL>";
+ btext_drawstring(s);
+ break;
+ case 'x':
+ ++q;
+ v = va_arg(args, unsigned long);
+ btext_drawhex(v);
+ break;
+ }
+ }
+}
+#else /* CONFIG_BOOTX_TEXT */
+static void __init bootx_printf(const char *format, ...) {}
+#endif /* CONFIG_BOOTX_TEXT */
+
+static void * __init bootx_early_getprop(unsigned long base,
+ unsigned long node,
+ char *prop)
+{
+ struct bootx_dt_node *np = (struct bootx_dt_node *)(base + node);
+ u32 *ppp = &np->properties;
+
+ while(*ppp) {
+ struct bootx_dt_prop *pp =
+ (struct bootx_dt_prop *)(base + *ppp);
+
+ if (strcmp((char *)((unsigned long)pp->name + base),
+ prop) == 0) {
+ return (void *)((unsigned long)pp->value + base);
+ }
+ ppp = &pp->next;
+ }
+ return NULL;
+}
+
+#define dt_push_token(token, mem) \
+ do { \
+ *(mem) = _ALIGN_UP(*(mem),4); \
+ *((u32 *)*(mem)) = token; \
+ *(mem) += 4; \
+ } while(0)
+
+static unsigned long __init bootx_dt_find_string(char *str)
+{
+ char *s, *os;
+
+ s = os = (char *)bootx_dt_strbase;
+ s += 4;
+ while (s < (char *)bootx_dt_strend) {
+ if (strcmp(s, str) == 0)
+ return s - os;
+ s += strlen(s) + 1;
+ }
+ return 0;
+}
+
+static void __init bootx_dt_add_prop(char *name, void *data, int size,
+ unsigned long *mem_end)
+{
+ unsigned long soff = bootx_dt_find_string(name);
+ if (data == NULL)
+ size = 0;
+ if (soff == 0) {
+ bootx_printf("WARNING: Can't find string index for <%s>\n",
+ name);
+ return;
+ }
+ if (size > 0x20000) {
+ bootx_printf("WARNING: ignoring large property ");
+ bootx_printf("%s length 0x%x\n", name, size);
+ return;
+ }
+ dt_push_token(OF_DT_PROP, mem_end);
+ dt_push_token(size, mem_end);
+ dt_push_token(soff, mem_end);
+
+ /* push property content */
+ if (size && data) {
+ memcpy((void *)*mem_end, data, size);
+ *mem_end = _ALIGN_UP(*mem_end + size, 4);
+ }
+}
+
+static void __init bootx_add_chosen_props(unsigned long base,
+ unsigned long *mem_end)
+{
+ u32 val;
+
+ bootx_dt_add_prop("linux,bootx", NULL, 0, mem_end);
+
+ if (bootx_info->kernelParamsOffset) {
+ char *args = (char *)((unsigned long)bootx_info) +
+ bootx_info->kernelParamsOffset;
+ bootx_dt_add_prop("bootargs", args, strlen(args) + 1, mem_end);
+ }
+ if (bootx_info->ramDisk) {
+ val = ((unsigned long)bootx_info) + bootx_info->ramDisk;
+ bootx_dt_add_prop("linux,initrd-start", &val, 4, mem_end);
+ val += bootx_info->ramDiskSize;
+ bootx_dt_add_prop("linux,initrd-end", &val, 4, mem_end);
+ }
+ if (strlen(bootx_disp_path))
+ bootx_dt_add_prop("linux,stdout-path", bootx_disp_path,
+ strlen(bootx_disp_path) + 1, mem_end);
+}
+
+static void __init bootx_add_display_props(unsigned long base,
+ unsigned long *mem_end,
+ int has_real_node)
+{
+ boot_infos_t *bi = bootx_info;
+ u32 tmp;
+
+ if (has_real_node) {
+ bootx_dt_add_prop("linux,boot-display", NULL, 0, mem_end);
+ bootx_dt_add_prop("linux,opened", NULL, 0, mem_end);
+ } else
+ bootx_dt_add_prop("linux,bootx-noscreen", NULL, 0, mem_end);
+
+ tmp = bi->dispDeviceDepth;
+ bootx_dt_add_prop("linux,bootx-depth", &tmp, 4, mem_end);
+ tmp = bi->dispDeviceRect[2] - bi->dispDeviceRect[0];
+ bootx_dt_add_prop("linux,bootx-width", &tmp, 4, mem_end);
+ tmp = bi->dispDeviceRect[3] - bi->dispDeviceRect[1];
+ bootx_dt_add_prop("linux,bootx-height", &tmp, 4, mem_end);
+ tmp = bi->dispDeviceRowBytes;
+ bootx_dt_add_prop("linux,bootx-linebytes", &tmp, 4, mem_end);
+ tmp = (u32)bi->dispDeviceBase;
+ if (tmp == 0)
+ tmp = (u32)bi->logicalDisplayBase;
+ tmp += bi->dispDeviceRect[1] * bi->dispDeviceRowBytes;
+ tmp += bi->dispDeviceRect[0] * ((bi->dispDeviceDepth + 7) / 8);
+ bootx_dt_add_prop("linux,bootx-addr", &tmp, 4, mem_end);
+}
+
+static void __init bootx_dt_add_string(char *s, unsigned long *mem_end)
+{
+ unsigned int l = strlen(s) + 1;
+ memcpy((void *)*mem_end, s, l);
+ bootx_dt_strend = *mem_end = *mem_end + l;
+}
+
+static void __init bootx_scan_dt_build_strings(unsigned long base,
+ unsigned long node,
+ unsigned long *mem_end)
+{
+ struct bootx_dt_node *np = (struct bootx_dt_node *)(base + node);
+ u32 *cpp, *ppp = &np->properties;
+ unsigned long soff;
+ char *namep;
+
+ /* Keep refs to known nodes */
+ namep = np->full_name ? (char *)(base + np->full_name) : NULL;
+ if (namep == NULL) {
+ bootx_printf("Node without a full name !\n");
+ namep = "";
+ }
+ DBG("* strings: %s\n", namep);
+
+ if (!strcmp(namep, "/chosen")) {
+ DBG(" detected /chosen ! adding properties names !\n");
+ bootx_dt_add_string("linux,bootx", mem_end);
+ bootx_dt_add_string("linux,stdout-path", mem_end);
+ bootx_dt_add_string("linux,initrd-start", mem_end);
+ bootx_dt_add_string("linux,initrd-end", mem_end);
+ bootx_dt_add_string("bootargs", mem_end);
+ bootx_node_chosen = node;
+ }
+ if (node == bootx_info->dispDeviceRegEntryOffset) {
+ DBG(" detected display ! adding properties names !\n");
+ bootx_dt_add_string("linux,boot-display", mem_end);
+ bootx_dt_add_string("linux,opened", mem_end);
+ strlcpy(bootx_disp_path, namep, sizeof(bootx_disp_path));
+ }
+
+ /* get and store all property names */
+ while (*ppp) {
+ struct bootx_dt_prop *pp =
+ (struct bootx_dt_prop *)(base + *ppp);
+
+ namep = pp->name ? (char *)(base + pp->name) : NULL;
+ if (namep == NULL || strcmp(namep, "name") == 0)
+ goto next;
+ /* get/create string entry */
+ soff = bootx_dt_find_string(namep);
+ if (soff == 0)
+ bootx_dt_add_string(namep, mem_end);
+ next:
+ ppp = &pp->next;
+ }
+
+ /* do all our children */
+ cpp = &np->child;
+ while(*cpp) {
+ np = (struct bootx_dt_node *)(base + *cpp);
+ bootx_scan_dt_build_strings(base, *cpp, mem_end);
+ cpp = &np->sibling;
+ }
+}
+
+static void __init bootx_scan_dt_build_struct(unsigned long base,
+ unsigned long node,
+ unsigned long *mem_end)
+{
+ struct bootx_dt_node *np = (struct bootx_dt_node *)(base + node);
+ u32 *cpp, *ppp = &np->properties;
+ char *namep, *p, *ep, *lp;
+ int l;
+
+ dt_push_token(OF_DT_BEGIN_NODE, mem_end);
+
+ /* get the node's full name */
+ namep = np->full_name ? (char *)(base + np->full_name) : NULL;
+ if (namep == NULL)
+ namep = "";
+ l = strlen(namep);
+
+ DBG("* struct: %s\n", namep);
+
+ /* Fixup an Apple bug where they have bogus \0 chars in the
+ * middle of the path in some properties, and extract
+ * the unit name (everything after the last '/').
+ */
+ memcpy((void *)*mem_end, namep, l + 1);
+ namep = (char *)*mem_end;
+ for (lp = p = namep, ep = namep + l; p < ep; p++) {
+ if (*p == '/')
+ lp = namep;
+ else if (*p != 0)
+ *lp++ = *p;
+ }
+ *lp = 0;
+ *mem_end = _ALIGN_UP((unsigned long)lp + 1, 4);
+
+ /* get and store all properties */
+ while (*ppp) {
+ struct bootx_dt_prop *pp =
+ (struct bootx_dt_prop *)(base + *ppp);
+
+ namep = pp->name ? (char *)(base + pp->name) : NULL;
+ /* Skip "name" */
+ if (namep == NULL || !strcmp(namep, "name"))
+ goto next;
+ /* Skip "bootargs" in /chosen too as we replace it */
+ if (node == bootx_node_chosen && !strcmp(namep, "bootargs"))
+ goto next;
+
+ /* push property head */
+ bootx_dt_add_prop(namep,
+ pp->value ? (void *)(base + pp->value): NULL,
+ pp->length, mem_end);
+ next:
+ ppp = &pp->next;
+ }
+
+ if (node == bootx_node_chosen) {
+ bootx_add_chosen_props(base, mem_end);
+ if (bootx_info->dispDeviceRegEntryOffset == 0)
+ bootx_add_display_props(base, mem_end, 0);
+ }
+ else if (node == bootx_info->dispDeviceRegEntryOffset)
+ bootx_add_display_props(base, mem_end, 1);
+
+ /* do all our children */
+ cpp = &np->child;
+ while(*cpp) {
+ np = (struct bootx_dt_node *)(base + *cpp);
+ bootx_scan_dt_build_struct(base, *cpp, mem_end);
+ cpp = &np->sibling;
+ }
+
+ dt_push_token(OF_DT_END_NODE, mem_end);
+}
+
+static unsigned long __init bootx_flatten_dt(unsigned long start)
+{
+ boot_infos_t *bi = bootx_info;
+ unsigned long mem_start, mem_end;
+ struct boot_param_header *hdr;
+ unsigned long base;
+ u64 *rsvmap;
+
+ /* Start using memory after the big blob passed by BootX, get
+ * some space for the header
+ */
+ mem_start = mem_end = _ALIGN_UP(((unsigned long)bi) + start, 4);
+ DBG("Boot params header at: %x\n", mem_start);
+ hdr = (struct boot_param_header *)mem_start;
+ mem_end += sizeof(struct boot_param_header);
+ rsvmap = (u64 *)(_ALIGN_UP(mem_end, 8));
+ hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - mem_start;
+ mem_end = ((unsigned long)rsvmap) + 8 * sizeof(u64);
+
+ /* Get base of tree */
+ base = ((unsigned long)bi) + bi->deviceTreeOffset;
+
+ /* Build string array */
+ DBG("Building string array at: %x\n", mem_end);
+ DBG("Device Tree Base=%x\n", base);
+ bootx_dt_strbase = mem_end;
+ mem_end += 4;
+ bootx_dt_strend = mem_end;
+ bootx_scan_dt_build_strings(base, 4, &mem_end);
+ /* Add some strings */
+ bootx_dt_add_string("linux,bootx-noscreen", &mem_end);
+ bootx_dt_add_string("linux,bootx-depth", &mem_end);
+ bootx_dt_add_string("linux,bootx-width", &mem_end);
+ bootx_dt_add_string("linux,bootx-height", &mem_end);
+ bootx_dt_add_string("linux,bootx-linebytes", &mem_end);
+ bootx_dt_add_string("linux,bootx-addr", &mem_end);
+ /* Wrap up strings */
+ hdr->off_dt_strings = bootx_dt_strbase - mem_start;
+ hdr->dt_strings_size = bootx_dt_strend - bootx_dt_strbase;
+
+ /* Build structure */
+ mem_end = _ALIGN(mem_end, 16);
+ DBG("Building device tree structure at: %x\n", mem_end);
+ hdr->off_dt_struct = mem_end - mem_start;
+ bootx_scan_dt_build_struct(base, 4, &mem_end);
+ dt_push_token(OF_DT_END, &mem_end);
+
+ /* Finish header */
+ hdr->boot_cpuid_phys = 0;
+ hdr->magic = OF_DT_HEADER;
+ hdr->totalsize = mem_end - mem_start;
+ hdr->version = OF_DT_VERSION;
+ /* Version 16 is not backward compatible */
+ hdr->last_comp_version = 0x10;
+
+ /* Reserve the whole thing and copy the reserve map in, we
+ * also bump mem_reserve_cnt to cause further reservations to
+ * fail since it's too late.
+ */
+ mem_end = _ALIGN(mem_end, PAGE_SIZE);
+ DBG("End of boot params: %x\n", mem_end);
+ rsvmap[0] = mem_start;
+ rsvmap[1] = mem_end;
+ if (bootx_info->ramDisk) {
+ rsvmap[2] = ((unsigned long)bootx_info) + bootx_info->ramDisk;
+ rsvmap[3] = rsvmap[2] + bootx_info->ramDiskSize;
+ rsvmap[4] = 0;
+ rsvmap[5] = 0;
+ } else {
+ rsvmap[2] = 0;
+ rsvmap[3] = 0;
+ }
+
+ return (unsigned long)hdr;
+}
+
+
+#ifdef CONFIG_BOOTX_TEXT
+static void __init btext_welcome(boot_infos_t *bi)
+{
+ unsigned long flags;
+ unsigned long pvr;
+
+ bootx_printf("Welcome to Linux, kernel " UTS_RELEASE "\n");
+ bootx_printf("\nlinked at : 0x%x", KERNELBASE);
+ bootx_printf("\nframe buffer at : 0x%x", bi->dispDeviceBase);
+ bootx_printf(" (phys), 0x%x", bi->logicalDisplayBase);
+ bootx_printf(" (log)");
+ bootx_printf("\nklimit : 0x%x",(unsigned long)klimit);
+ bootx_printf("\nboot_info at : 0x%x", bi);
+ __asm__ __volatile__ ("mfmsr %0" : "=r" (flags));
+ bootx_printf("\nMSR : 0x%x", flags);
+ __asm__ __volatile__ ("mfspr %0, 287" : "=r" (pvr));
+ bootx_printf("\nPVR : 0x%x", pvr);
+ pvr >>= 16;
+ if (pvr > 1) {
+ __asm__ __volatile__ ("mfspr %0, 1008" : "=r" (flags));
+ bootx_printf("\nHID0 : 0x%x", flags);
+ }
+ if (pvr == 8 || pvr == 12 || pvr == 0x800c) {
+ __asm__ __volatile__ ("mfspr %0, 1019" : "=r" (flags));
+ bootx_printf("\nICTC : 0x%x", flags);
+ }
+#ifdef DEBUG
+ bootx_printf("\n\n");
+ bootx_printf("bi->deviceTreeOffset : 0x%x\n",
+ bi->deviceTreeOffset);
+ bootx_printf("bi->deviceTreeSize : 0x%x\n",
+ bi->deviceTreeSize);
+#endif
+ bootx_printf("\n\n");
+}
+#endif /* CONFIG_BOOTX_TEXT */
+
+void __init bootx_init(unsigned long r3, unsigned long r4)
+{
+ boot_infos_t *bi = (boot_infos_t *) r4;
+ unsigned long hdr;
+ unsigned long space;
+ unsigned long ptr, x;
+ char *model;
+ unsigned long offset = reloc_offset();
+
+ reloc_got2(offset);
+
+ bootx_info = bi;
+
+ /* We haven't cleared any bss at this point, make sure
+ * what we need is initialized
+ */
+ bootx_dt_strbase = bootx_dt_strend = 0;
+ bootx_node_chosen = 0;
+ bootx_disp_path[0] = 0;
+
+ if (!BOOT_INFO_IS_V2_COMPATIBLE(bi))
+ bi->logicalDisplayBase = bi->dispDeviceBase;
+
+ /* Fixup depth 16 -> 15 as that's what MacOS calls 16bpp */
+ if (bi->dispDeviceDepth == 16)
+ bi->dispDeviceDepth = 15;
+
+
+#ifdef CONFIG_BOOTX_TEXT
+ ptr = (unsigned long)bi->logicalDisplayBase;
+ ptr += bi->dispDeviceRect[1] * bi->dispDeviceRowBytes;
+ ptr += bi->dispDeviceRect[0] * ((bi->dispDeviceDepth + 7) / 8);
+ btext_setup_display(bi->dispDeviceRect[2] - bi->dispDeviceRect[0],
+ bi->dispDeviceRect[3] - bi->dispDeviceRect[1],
+ bi->dispDeviceDepth, bi->dispDeviceRowBytes,
+ (unsigned long)bi->logicalDisplayBase);
+ btext_clearscreen();
+ btext_flushscreen();
+#endif /* CONFIG_BOOTX_TEXT */
+
+ /*
+ * Test if boot-info is compatible. Done only in config
+ * CONFIG_BOOTX_TEXT since there is nothing much we can do
+ * with an incompatible version, except display a message
+ * and eventually hang the processor...
+ *
+ * I'll try to keep enough of boot-info compatible in the
+ * future to always allow display of this message;
+ */
+ if (!BOOT_INFO_IS_COMPATIBLE(bi)) {
+ bootx_printf(" !!! WARNING - Incompatible version"
+ " of BootX !!!\n\n\n");
+ for (;;)
+ ;
+ }
+ if (bi->architecture != BOOT_ARCH_PCI) {
+ bootx_printf(" !!! WARNING - Usupported machine"
+ " architecture !\n");
+ for (;;)
+ ;
+ }
+
+#ifdef CONFIG_BOOTX_TEXT
+ btext_welcome(bi);
+#endif
+
+ /* New BootX enters kernel with MMU off, i/os are not allowed
+ * here. This hack will have been done by the boostrap anyway.
+ */
+ if (bi->version < 4) {
+ /*
+ * XXX If this is an iMac, turn off the USB controller.
+ */
+ model = (char *) bootx_early_getprop(r4 + bi->deviceTreeOffset,
+ 4, "model");
+ if (model
+ && (strcmp(model, "iMac,1") == 0
+ || strcmp(model, "PowerMac1,1") == 0)) {
+ bootx_printf("iMac,1 detected, shutting down USB\n");
+ out_le32((unsigned __iomem *)0x80880008, 1); /* XXX */
+ }
+ }
+
+ /* Get a pointer that points above the device tree, args, ramdisk,
+ * etc... to use for generating the flattened tree
+ */
+ if (bi->version < 5) {
+ space = bi->deviceTreeOffset + bi->deviceTreeSize;
+ if (bi->ramDisk >= space)
+ space = bi->ramDisk + bi->ramDiskSize;
+ } else
+ space = bi->totalParamsSize;
+
+ bootx_printf("Total space used by parameters & ramdisk: 0x%x\n", space);
+
+ /* New BootX will have flushed all TLBs and enters kernel with
+ * MMU switched OFF, so this should not be useful anymore.
+ */
+ if (bi->version < 4) {
+ bootx_printf("Touching pages...\n");
+
+ /*
+ * Touch each page to make sure the PTEs for them
+ * are in the hash table - the aim is to try to avoid
+ * getting DSI exceptions while copying the kernel image.
+ */
+ for (ptr = ((unsigned long) &_stext) & PAGE_MASK;
+ ptr < (unsigned long)bi + space; ptr += PAGE_SIZE)
+ x = *(volatile unsigned long *)ptr;
+ }
+
+ /* Ok, now we need to generate a flattened device-tree to pass
+ * to the kernel
+ */
+ bootx_printf("Preparing boot params...\n");
+
+ hdr = bootx_flatten_dt(space);
+
+#ifdef CONFIG_BOOTX_TEXT
+#ifdef SET_BOOT_BAT
+ bootx_printf("Preparing BAT...\n");
+ btext_prepare_BAT();
+#else
+ btext_unmap();
+#endif
+#endif
+
+ reloc_got2(-offset);
+
+ __start(hdr, KERNELBASE + offset, 0);
+}
diff --git a/kernel/arch/powerpc/platforms/powermac/cache.S b/kernel/arch/powerpc/platforms/powermac/cache.S
new file mode 100644
index 000000000..6be1a4af3
--- /dev/null
+++ b/kernel/arch/powerpc/platforms/powermac/cache.S
@@ -0,0 +1,358 @@
+/*
+ * This file contains low-level cache management functions
+ * used for sleep and CPU speed changes on Apple machines.
+ * (In fact the only thing that is Apple-specific is that we assume
+ * that we can read from ROM at physical address 0xfff00000.)
+ *
+ * Copyright (C) 2004 Paul Mackerras (paulus@samba.org) and
+ * Benjamin Herrenschmidt (benh@kernel.crashing.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <asm/processor.h>
+#include <asm/ppc_asm.h>
+#include <asm/cputable.h>
+
+/*
+ * Flush and disable all data caches (dL1, L2, L3). This is used
+ * when going to sleep, when doing a PMU based cpufreq transition,
+ * or when "offlining" a CPU on SMP machines. This code is over
+ * paranoid, but I've had enough issues with various CPU revs and
+ * bugs that I decided it was worth beeing over cautious
+ */
+
+_GLOBAL(flush_disable_caches)
+#ifndef CONFIG_6xx
+ blr
+#else
+BEGIN_FTR_SECTION
+ b flush_disable_745x
+END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
+BEGIN_FTR_SECTION
+ b flush_disable_75x
+END_FTR_SECTION_IFSET(CPU_FTR_L2CR)
+ b __flush_disable_L1
+
+/* This is the code for G3 and 74[01]0 */
+flush_disable_75x:
+ mflr r10
+
+ /* Turn off EE and DR in MSR */
+ mfmsr r11
+ rlwinm r0,r11,0,~MSR_EE
+ rlwinm r0,r0,0,~MSR_DR
+ sync
+ mtmsr r0
+ isync
+
+ /* Stop DST streams */
+BEGIN_FTR_SECTION
+ DSSALL
+ sync
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+
+ /* Stop DPM */
+ mfspr r8,SPRN_HID0 /* Save SPRN_HID0 in r8 */
+ rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */
+ sync
+ mtspr SPRN_HID0,r4 /* Disable DPM */
+ sync
+
+ /* Disp-flush L1. We have a weird problem here that I never
+ * totally figured out. On 750FX, using the ROM for the flush
+ * results in a non-working flush. We use that workaround for
+ * now until I finally understand what's going on. --BenH
+ */
+
+ /* ROM base by default */
+ lis r4,0xfff0
+ mfpvr r3
+ srwi r3,r3,16
+ cmplwi cr0,r3,0x7000
+ bne+ 1f
+ /* RAM base on 750FX */
+ li r4,0
+1: li r4,0x4000
+ mtctr r4
+1: lwz r0,0(r4)
+ addi r4,r4,32
+ bdnz 1b
+ sync
+ isync
+
+ /* Disable / invalidate / enable L1 data */
+ mfspr r3,SPRN_HID0
+ rlwinm r3,r3,0,~(HID0_DCE | HID0_ICE)
+ mtspr SPRN_HID0,r3
+ sync
+ isync
+ ori r3,r3,(HID0_DCE|HID0_DCI|HID0_ICE|HID0_ICFI)
+ sync
+ isync
+ mtspr SPRN_HID0,r3
+ xori r3,r3,(HID0_DCI|HID0_ICFI)
+ mtspr SPRN_HID0,r3
+ sync
+
+ /* Get the current enable bit of the L2CR into r4 */
+ mfspr r5,SPRN_L2CR
+ /* Set to data-only (pre-745x bit) */
+ oris r3,r5,L2CR_L2DO@h
+ b 2f
+ /* When disabling L2, code must be in L1 */
+ .balign 32
+1: mtspr SPRN_L2CR,r3
+3: sync
+ isync
+ b 1f
+2: b 3f
+3: sync
+ isync
+ b 1b
+1: /* disp-flush L2. The interesting thing here is that the L2 can be
+ * up to 2Mb ... so using the ROM, we'll end up wrapping back to memory
+ * but that is probbaly fine. We disp-flush over 4Mb to be safe
+ */
+ lis r4,2
+ mtctr r4
+ lis r4,0xfff0
+1: lwz r0,0(r4)
+ addi r4,r4,32
+ bdnz 1b
+ sync
+ isync
+ lis r4,2
+ mtctr r4
+ lis r4,0xfff0
+1: dcbf 0,r4
+ addi r4,r4,32
+ bdnz 1b
+ sync
+ isync
+
+ /* now disable L2 */
+ rlwinm r5,r5,0,~L2CR_L2E
+ b 2f
+ /* When disabling L2, code must be in L1 */
+ .balign 32
+1: mtspr SPRN_L2CR,r5
+3: sync
+ isync
+ b 1f
+2: b 3f
+3: sync
+ isync
+ b 1b
+1: sync
+ isync
+ /* Invalidate L2. This is pre-745x, we clear the L2I bit ourselves */
+ oris r4,r5,L2CR_L2I@h
+ mtspr SPRN_L2CR,r4
+ sync
+ isync
+
+ /* Wait for the invalidation to complete */
+1: mfspr r3,SPRN_L2CR
+ rlwinm. r0,r3,0,31,31
+ bne 1b
+
+ /* Clear L2I */
+ xoris r4,r4,L2CR_L2I@h
+ sync
+ mtspr SPRN_L2CR,r4
+ sync
+
+ /* now disable the L1 data cache */
+ mfspr r0,SPRN_HID0
+ rlwinm r0,r0,0,~(HID0_DCE|HID0_ICE)
+ mtspr SPRN_HID0,r0
+ sync
+ isync
+
+ /* Restore HID0[DPM] to whatever it was before */
+ sync
+ mfspr r0,SPRN_HID0
+ rlwimi r0,r8,0,11,11 /* Turn back HID0[DPM] */
+ mtspr SPRN_HID0,r0
+ sync
+
+ /* restore DR and EE */
+ sync
+ mtmsr r11
+ isync
+
+ mtlr r10
+ blr
+
+/* This code is for 745x processors */
+flush_disable_745x:
+ /* Turn off EE and DR in MSR */
+ mfmsr r11
+ rlwinm r0,r11,0,~MSR_EE
+ rlwinm r0,r0,0,~MSR_DR
+ sync
+ mtmsr r0
+ isync
+
+ /* Stop prefetch streams */
+ DSSALL
+ sync
+
+ /* Disable L2 prefetching */
+ mfspr r0,SPRN_MSSCR0
+ rlwinm r0,r0,0,0,29
+ mtspr SPRN_MSSCR0,r0
+ sync
+ isync
+ lis r4,0
+ dcbf 0,r4
+ dcbf 0,r4
+ dcbf 0,r4
+ dcbf 0,r4
+ dcbf 0,r4
+ dcbf 0,r4
+ dcbf 0,r4
+ dcbf 0,r4
+
+ /* Due to a bug with the HW flush on some CPU revs, we occasionally
+ * experience data corruption. I'm adding a displacement flush along
+ * with a dcbf loop over a few Mb to "help". The problem isn't totally
+ * fixed by this in theory, but at least, in practice, I couldn't reproduce
+ * it even with a big hammer...
+ */
+
+ lis r4,0x0002
+ mtctr r4
+ li r4,0
+1:
+ lwz r0,0(r4)
+ addi r4,r4,32 /* Go to start of next cache line */
+ bdnz 1b
+ isync
+
+ /* Now, flush the first 4MB of memory */
+ lis r4,0x0002
+ mtctr r4
+ li r4,0
+ sync
+1:
+ dcbf 0,r4
+ addi r4,r4,32 /* Go to start of next cache line */
+ bdnz 1b
+
+ /* Flush and disable the L1 data cache */
+ mfspr r6,SPRN_LDSTCR
+ lis r3,0xfff0 /* read from ROM for displacement flush */
+ li r4,0xfe /* start with only way 0 unlocked */
+ li r5,128 /* 128 lines in each way */
+1: mtctr r5
+ rlwimi r6,r4,0,24,31
+ mtspr SPRN_LDSTCR,r6
+ sync
+ isync
+2: lwz r0,0(r3) /* touch each cache line */
+ addi r3,r3,32
+ bdnz 2b
+ rlwinm r4,r4,1,24,30 /* move on to the next way */
+ ori r4,r4,1
+ cmpwi r4,0xff /* all done? */
+ bne 1b
+ /* now unlock the L1 data cache */
+ li r4,0
+ rlwimi r6,r4,0,24,31
+ sync
+ mtspr SPRN_LDSTCR,r6
+ sync
+ isync
+
+ /* Flush the L2 cache using the hardware assist */
+ mfspr r3,SPRN_L2CR
+ cmpwi r3,0 /* check if it is enabled first */
+ bge 4f
+ oris r0,r3,(L2CR_L2IO_745x|L2CR_L2DO_745x)@h
+ b 2f
+ /* When disabling/locking L2, code must be in L1 */
+ .balign 32
+1: mtspr SPRN_L2CR,r0 /* lock the L2 cache */
+3: sync
+ isync
+ b 1f
+2: b 3f
+3: sync
+ isync
+ b 1b
+1: sync
+ isync
+ ori r0,r3,L2CR_L2HWF_745x
+ sync
+ mtspr SPRN_L2CR,r0 /* set the hardware flush bit */
+3: mfspr r0,SPRN_L2CR /* wait for it to go to 0 */
+ andi. r0,r0,L2CR_L2HWF_745x
+ bne 3b
+ sync
+ rlwinm r3,r3,0,~L2CR_L2E
+ b 2f
+ /* When disabling L2, code must be in L1 */
+ .balign 32
+1: mtspr SPRN_L2CR,r3 /* disable the L2 cache */
+3: sync
+ isync
+ b 1f
+2: b 3f
+3: sync
+ isync
+ b 1b
+1: sync
+ isync
+ oris r4,r3,L2CR_L2I@h
+ mtspr SPRN_L2CR,r4
+ sync
+ isync
+1: mfspr r4,SPRN_L2CR
+ andis. r0,r4,L2CR_L2I@h
+ bne 1b
+ sync
+
+BEGIN_FTR_SECTION
+ /* Flush the L3 cache using the hardware assist */
+4: mfspr r3,SPRN_L3CR
+ cmpwi r3,0 /* check if it is enabled */
+ bge 6f
+ oris r0,r3,L3CR_L3IO@h
+ ori r0,r0,L3CR_L3DO
+ sync
+ mtspr SPRN_L3CR,r0 /* lock the L3 cache */
+ sync
+ isync
+ ori r0,r0,L3CR_L3HWF
+ sync
+ mtspr SPRN_L3CR,r0 /* set the hardware flush bit */
+5: mfspr r0,SPRN_L3CR /* wait for it to go to zero */
+ andi. r0,r0,L3CR_L3HWF
+ bne 5b
+ rlwinm r3,r3,0,~L3CR_L3E
+ sync
+ mtspr SPRN_L3CR,r3 /* disable the L3 cache */
+ sync
+ ori r4,r3,L3CR_L3I
+ mtspr SPRN_L3CR,r4
+1: mfspr r4,SPRN_L3CR
+ andi. r0,r4,L3CR_L3I
+ bne 1b
+ sync
+END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
+
+6: mfspr r0,SPRN_HID0 /* now disable the L1 data cache */
+ rlwinm r0,r0,0,~HID0_DCE
+ mtspr SPRN_HID0,r0
+ sync
+ isync
+ mtmsr r11 /* restore DR and EE */
+ isync
+ blr
+#endif /* CONFIG_6xx */
diff --git a/kernel/arch/powerpc/platforms/powermac/feature.c b/kernel/arch/powerpc/platforms/powermac/feature.c
new file mode 100644
index 000000000..4882bfd90
--- /dev/null
+++ b/kernel/arch/powerpc/platforms/powermac/feature.c
@@ -0,0 +1,3043 @@
+/*
+ * Copyright (C) 1996-2001 Paul Mackerras (paulus@cs.anu.edu.au)
+ * Ben. Herrenschmidt (benh@kernel.crashing.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * TODO:
+ *
+ * - Replace mdelay with some schedule loop if possible
+ * - Shorten some obfuscated delays on some routines (like modem
+ * power)
+ * - Refcount some clocks (see darwin)
+ * - Split split split...
+ *
+ */
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/spinlock.h>
+#include <linux/adb.h>
+#include <linux/pmu.h>
+#include <linux/ioport.h>
+#include <linux/export.h>
+#include <linux/pci.h>
+#include <asm/sections.h>
+#include <asm/errno.h>
+#include <asm/ohare.h>
+#include <asm/heathrow.h>
+#include <asm/keylargo.h>
+#include <asm/uninorth.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+#include <asm/dbdma.h>
+#include <asm/pci-bridge.h>
+#include <asm/pmac_low_i2c.h>
+
+#undef DEBUG_FEATURE
+
+#ifdef DEBUG_FEATURE
+#define DBG(fmt...) printk(KERN_DEBUG fmt)
+#else
+#define DBG(fmt...)
+#endif
+
+#ifdef CONFIG_6xx
+extern int powersave_lowspeed;
+#endif
+
+extern int powersave_nap;
+extern struct device_node *k2_skiplist[2];
+
+/*
+ * We use a single global lock to protect accesses. Each driver has
+ * to take care of its own locking
+ */
+DEFINE_RAW_SPINLOCK(feature_lock);
+
+#define LOCK(flags) raw_spin_lock_irqsave(&feature_lock, flags);
+#define UNLOCK(flags) raw_spin_unlock_irqrestore(&feature_lock, flags);
+
+
+/*
+ * Instance of some macio stuffs
+ */
+struct macio_chip macio_chips[MAX_MACIO_CHIPS];
+
+struct macio_chip *macio_find(struct device_node *child, int type)
+{
+ while(child) {
+ int i;
+
+ for (i=0; i < MAX_MACIO_CHIPS && macio_chips[i].of_node; i++)
+ if (child == macio_chips[i].of_node &&
+ (!type || macio_chips[i].type == type))
+ return &macio_chips[i];
+ child = child->parent;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(macio_find);
+
+static const char *macio_names[] =
+{
+ "Unknown",
+ "Grand Central",
+ "OHare",
+ "OHareII",
+ "Heathrow",
+ "Gatwick",
+ "Paddington",
+ "Keylargo",
+ "Pangea",
+ "Intrepid",
+ "K2",
+ "Shasta",
+};
+
+
+struct device_node *uninorth_node;
+u32 __iomem *uninorth_base;
+
+static u32 uninorth_rev;
+static int uninorth_maj;
+static void __iomem *u3_ht_base;
+
+/*
+ * For each motherboard family, we have a table of functions pointers
+ * that handle the various features.
+ */
+
+typedef long (*feature_call)(struct device_node *node, long param, long value);
+
+struct feature_table_entry {
+ unsigned int selector;
+ feature_call function;
+};
+
+struct pmac_mb_def
+{
+ const char* model_string;
+ const char* model_name;
+ int model_id;
+ struct feature_table_entry* features;
+ unsigned long board_flags;
+};
+static struct pmac_mb_def pmac_mb;
+
+/*
+ * Here are the chip specific feature functions
+ */
+
+static inline int simple_feature_tweak(struct device_node *node, int type,
+ int reg, u32 mask, int value)
+{
+ struct macio_chip* macio;
+ unsigned long flags;
+
+ macio = macio_find(node, type);
+ if (!macio)
+ return -ENODEV;
+ LOCK(flags);
+ if (value)
+ MACIO_BIS(reg, mask);
+ else
+ MACIO_BIC(reg, mask);
+ (void)MACIO_IN32(reg);
+ UNLOCK(flags);
+
+ return 0;
+}
+
+#ifndef CONFIG_PPC64
+
+static long ohare_htw_scc_enable(struct device_node *node, long param,
+ long value)
+{
+ struct macio_chip* macio;
+ unsigned long chan_mask;
+ unsigned long fcr;
+ unsigned long flags;
+ int htw, trans;
+ unsigned long rmask;
+
+ macio = macio_find(node, 0);
+ if (!macio)
+ return -ENODEV;
+ if (!strcmp(node->name, "ch-a"))
+ chan_mask = MACIO_FLAG_SCCA_ON;
+ else if (!strcmp(node->name, "ch-b"))
+ chan_mask = MACIO_FLAG_SCCB_ON;
+ else
+ return -ENODEV;
+
+ htw = (macio->type == macio_heathrow || macio->type == macio_paddington
+ || macio->type == macio_gatwick);
+ /* On these machines, the HRW_SCC_TRANS_EN_N bit mustn't be touched */
+ trans = (pmac_mb.model_id != PMAC_TYPE_YOSEMITE &&
+ pmac_mb.model_id != PMAC_TYPE_YIKES);
+ if (value) {
+#ifdef CONFIG_ADB_PMU
+ if ((param & 0xfff) == PMAC_SCC_IRDA)
+ pmu_enable_irled(1);
+#endif /* CONFIG_ADB_PMU */
+ LOCK(flags);
+ fcr = MACIO_IN32(OHARE_FCR);
+ /* Check if scc cell need enabling */
+ if (!(fcr & OH_SCC_ENABLE)) {
+ fcr |= OH_SCC_ENABLE;
+ if (htw) {
+ /* Side effect: this will also power up the
+ * modem, but it's too messy to figure out on which
+ * ports this controls the tranceiver and on which
+ * it controls the modem
+ */
+ if (trans)
+ fcr &= ~HRW_SCC_TRANS_EN_N;
+ MACIO_OUT32(OHARE_FCR, fcr);
+ fcr |= (rmask = HRW_RESET_SCC);
+ MACIO_OUT32(OHARE_FCR, fcr);
+ } else {
+ fcr |= (rmask = OH_SCC_RESET);
+ MACIO_OUT32(OHARE_FCR, fcr);
+ }
+ UNLOCK(flags);
+ (void)MACIO_IN32(OHARE_FCR);
+ mdelay(15);
+ LOCK(flags);
+ fcr &= ~rmask;
+ MACIO_OUT32(OHARE_FCR, fcr);
+ }
+ if (chan_mask & MACIO_FLAG_SCCA_ON)
+ fcr |= OH_SCCA_IO;
+ if (chan_mask & MACIO_FLAG_SCCB_ON)
+ fcr |= OH_SCCB_IO;
+ MACIO_OUT32(OHARE_FCR, fcr);
+ macio->flags |= chan_mask;
+ UNLOCK(flags);
+ if (param & PMAC_SCC_FLAG_XMON)
+ macio->flags |= MACIO_FLAG_SCC_LOCKED;
+ } else {
+ if (macio->flags & MACIO_FLAG_SCC_LOCKED)
+ return -EPERM;
+ LOCK(flags);
+ fcr = MACIO_IN32(OHARE_FCR);
+ if (chan_mask & MACIO_FLAG_SCCA_ON)
+ fcr &= ~OH_SCCA_IO;
+ if (chan_mask & MACIO_FLAG_SCCB_ON)
+ fcr &= ~OH_SCCB_IO;
+ MACIO_OUT32(OHARE_FCR, fcr);
+ if ((fcr & (OH_SCCA_IO | OH_SCCB_IO)) == 0) {
+ fcr &= ~OH_SCC_ENABLE;
+ if (htw && trans)
+ fcr |= HRW_SCC_TRANS_EN_N;
+ MACIO_OUT32(OHARE_FCR, fcr);
+ }
+ macio->flags &= ~(chan_mask);
+ UNLOCK(flags);
+ mdelay(10);
+#ifdef CONFIG_ADB_PMU
+ if ((param & 0xfff) == PMAC_SCC_IRDA)
+ pmu_enable_irled(0);
+#endif /* CONFIG_ADB_PMU */
+ }
+ return 0;
+}
+
+static long ohare_floppy_enable(struct device_node *node, long param,
+ long value)
+{
+ return simple_feature_tweak(node, macio_ohare,
+ OHARE_FCR, OH_FLOPPY_ENABLE, value);
+}
+
+static long ohare_mesh_enable(struct device_node *node, long param, long value)
+{
+ return simple_feature_tweak(node, macio_ohare,
+ OHARE_FCR, OH_MESH_ENABLE, value);
+}
+
+static long ohare_ide_enable(struct device_node *node, long param, long value)
+{
+ switch(param) {
+ case 0:
+ /* For some reason, setting the bit in set_initial_features()
+ * doesn't stick. I'm still investigating... --BenH.
+ */
+ if (value)
+ simple_feature_tweak(node, macio_ohare,
+ OHARE_FCR, OH_IOBUS_ENABLE, 1);
+ return simple_feature_tweak(node, macio_ohare,
+ OHARE_FCR, OH_IDE0_ENABLE, value);
+ case 1:
+ return simple_feature_tweak(node, macio_ohare,
+ OHARE_FCR, OH_BAY_IDE_ENABLE, value);
+ default:
+ return -ENODEV;
+ }
+}
+
+static long ohare_ide_reset(struct device_node *node, long param, long value)
+{
+ switch(param) {
+ case 0:
+ return simple_feature_tweak(node, macio_ohare,
+ OHARE_FCR, OH_IDE0_RESET_N, !value);
+ case 1:
+ return simple_feature_tweak(node, macio_ohare,
+ OHARE_FCR, OH_IDE1_RESET_N, !value);
+ default:
+ return -ENODEV;
+ }
+}
+
+static long ohare_sleep_state(struct device_node *node, long param, long value)
+{
+ struct macio_chip* macio = &macio_chips[0];
+
+ if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0)
+ return -EPERM;
+ if (value == 1) {
+ MACIO_BIC(OHARE_FCR, OH_IOBUS_ENABLE);
+ } else if (value == 0) {
+ MACIO_BIS(OHARE_FCR, OH_IOBUS_ENABLE);
+ }
+
+ return 0;
+}
+
+static long heathrow_modem_enable(struct device_node *node, long param,
+ long value)
+{
+ struct macio_chip* macio;
+ u8 gpio;
+ unsigned long flags;
+
+ macio = macio_find(node, macio_unknown);
+ if (!macio)
+ return -ENODEV;
+ gpio = MACIO_IN8(HRW_GPIO_MODEM_RESET) & ~1;
+ if (!value) {
+ LOCK(flags);
+ MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio);
+ UNLOCK(flags);
+ (void)MACIO_IN8(HRW_GPIO_MODEM_RESET);
+ mdelay(250);
+ }
+ if (pmac_mb.model_id != PMAC_TYPE_YOSEMITE &&
+ pmac_mb.model_id != PMAC_TYPE_YIKES) {
+ LOCK(flags);
+ if (value)
+ MACIO_BIC(HEATHROW_FCR, HRW_SCC_TRANS_EN_N);
+ else
+ MACIO_BIS(HEATHROW_FCR, HRW_SCC_TRANS_EN_N);
+ UNLOCK(flags);
+ (void)MACIO_IN32(HEATHROW_FCR);
+ mdelay(250);
+ }
+ if (value) {
+ LOCK(flags);
+ MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio | 1);
+ (void)MACIO_IN8(HRW_GPIO_MODEM_RESET);
+ UNLOCK(flags); mdelay(250); LOCK(flags);
+ MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio);
+ (void)MACIO_IN8(HRW_GPIO_MODEM_RESET);
+ UNLOCK(flags); mdelay(250); LOCK(flags);
+ MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio | 1);
+ (void)MACIO_IN8(HRW_GPIO_MODEM_RESET);
+ UNLOCK(flags); mdelay(250);
+ }
+ return 0;
+}
+
+static long heathrow_floppy_enable(struct device_node *node, long param,
+ long value)
+{
+ return simple_feature_tweak(node, macio_unknown,
+ HEATHROW_FCR,
+ HRW_SWIM_ENABLE|HRW_BAY_FLOPPY_ENABLE,
+ value);
+}
+
+static long heathrow_mesh_enable(struct device_node *node, long param,
+ long value)
+{
+ struct macio_chip* macio;
+ unsigned long flags;
+
+ macio = macio_find(node, macio_unknown);
+ if (!macio)
+ return -ENODEV;
+ LOCK(flags);
+ /* Set clear mesh cell enable */
+ if (value)
+ MACIO_BIS(HEATHROW_FCR, HRW_MESH_ENABLE);
+ else
+ MACIO_BIC(HEATHROW_FCR, HRW_MESH_ENABLE);
+ (void)MACIO_IN32(HEATHROW_FCR);
+ udelay(10);
+ /* Set/Clear termination power */
+ if (value)
+ MACIO_BIC(HEATHROW_MBCR, 0x04000000);
+ else
+ MACIO_BIS(HEATHROW_MBCR, 0x04000000);
+ (void)MACIO_IN32(HEATHROW_MBCR);
+ udelay(10);
+ UNLOCK(flags);
+
+ return 0;
+}
+
+static long heathrow_ide_enable(struct device_node *node, long param,
+ long value)
+{
+ switch(param) {
+ case 0:
+ return simple_feature_tweak(node, macio_unknown,
+ HEATHROW_FCR, HRW_IDE0_ENABLE, value);
+ case 1:
+ return simple_feature_tweak(node, macio_unknown,
+ HEATHROW_FCR, HRW_BAY_IDE_ENABLE, value);
+ default:
+ return -ENODEV;
+ }
+}
+
+static long heathrow_ide_reset(struct device_node *node, long param,
+ long value)
+{
+ switch(param) {
+ case 0:
+ return simple_feature_tweak(node, macio_unknown,
+ HEATHROW_FCR, HRW_IDE0_RESET_N, !value);
+ case 1:
+ return simple_feature_tweak(node, macio_unknown,
+ HEATHROW_FCR, HRW_IDE1_RESET_N, !value);
+ default:
+ return -ENODEV;
+ }
+}
+
+static long heathrow_bmac_enable(struct device_node *node, long param,
+ long value)
+{
+ struct macio_chip* macio;
+ unsigned long flags;
+
+ macio = macio_find(node, 0);
+ if (!macio)
+ return -ENODEV;
+ if (value) {
+ LOCK(flags);
+ MACIO_BIS(HEATHROW_FCR, HRW_BMAC_IO_ENABLE);
+ MACIO_BIS(HEATHROW_FCR, HRW_BMAC_RESET);
+ UNLOCK(flags);
+ (void)MACIO_IN32(HEATHROW_FCR);
+ mdelay(10);
+ LOCK(flags);
+ MACIO_BIC(HEATHROW_FCR, HRW_BMAC_RESET);
+ UNLOCK(flags);
+ (void)MACIO_IN32(HEATHROW_FCR);
+ mdelay(10);
+ } else {
+ LOCK(flags);
+ MACIO_BIC(HEATHROW_FCR, HRW_BMAC_IO_ENABLE);
+ UNLOCK(flags);
+ }
+ return 0;
+}
+
+static long heathrow_sound_enable(struct device_node *node, long param,
+ long value)
+{
+ struct macio_chip* macio;
+ unsigned long flags;
+
+ /* B&W G3 and Yikes don't support that properly (the
+ * sound appear to never come back after beeing shut down).
+ */
+ if (pmac_mb.model_id == PMAC_TYPE_YOSEMITE ||
+ pmac_mb.model_id == PMAC_TYPE_YIKES)
+ return 0;
+
+ macio = macio_find(node, 0);
+ if (!macio)
+ return -ENODEV;
+ if (value) {
+ LOCK(flags);
+ MACIO_BIS(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE);
+ MACIO_BIC(HEATHROW_FCR, HRW_SOUND_POWER_N);
+ UNLOCK(flags);
+ (void)MACIO_IN32(HEATHROW_FCR);
+ } else {
+ LOCK(flags);
+ MACIO_BIS(HEATHROW_FCR, HRW_SOUND_POWER_N);
+ MACIO_BIC(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE);
+ UNLOCK(flags);
+ }
+ return 0;
+}
+
+static u32 save_fcr[6];
+static u32 save_mbcr;
+static struct dbdma_regs save_dbdma[13];
+static struct dbdma_regs save_alt_dbdma[13];
+
+static void dbdma_save(struct macio_chip *macio, struct dbdma_regs *save)
+{
+ int i;
+
+ /* Save state & config of DBDMA channels */
+ for (i = 0; i < 13; i++) {
+ volatile struct dbdma_regs __iomem * chan = (void __iomem *)
+ (macio->base + ((0x8000+i*0x100)>>2));
+ save[i].cmdptr_hi = in_le32(&chan->cmdptr_hi);
+ save[i].cmdptr = in_le32(&chan->cmdptr);
+ save[i].intr_sel = in_le32(&chan->intr_sel);
+ save[i].br_sel = in_le32(&chan->br_sel);
+ save[i].wait_sel = in_le32(&chan->wait_sel);
+ }
+}
+
+static void dbdma_restore(struct macio_chip *macio, struct dbdma_regs *save)
+{
+ int i;
+
+ /* Save state & config of DBDMA channels */
+ for (i = 0; i < 13; i++) {
+ volatile struct dbdma_regs __iomem * chan = (void __iomem *)
+ (macio->base + ((0x8000+i*0x100)>>2));
+ out_le32(&chan->control, (ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN)<<16);
+ while (in_le32(&chan->status) & ACTIVE)
+ mb();
+ out_le32(&chan->cmdptr_hi, save[i].cmdptr_hi);
+ out_le32(&chan->cmdptr, save[i].cmdptr);
+ out_le32(&chan->intr_sel, save[i].intr_sel);
+ out_le32(&chan->br_sel, save[i].br_sel);
+ out_le32(&chan->wait_sel, save[i].wait_sel);
+ }
+}
+
+static void heathrow_sleep(struct macio_chip *macio, int secondary)
+{
+ if (secondary) {
+ dbdma_save(macio, save_alt_dbdma);
+ save_fcr[2] = MACIO_IN32(0x38);
+ save_fcr[3] = MACIO_IN32(0x3c);
+ } else {
+ dbdma_save(macio, save_dbdma);
+ save_fcr[0] = MACIO_IN32(0x38);
+ save_fcr[1] = MACIO_IN32(0x3c);
+ save_mbcr = MACIO_IN32(0x34);
+ /* Make sure sound is shut down */
+ MACIO_BIS(HEATHROW_FCR, HRW_SOUND_POWER_N);
+ MACIO_BIC(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE);
+ /* This seems to be necessary as well or the fan
+ * keeps coming up and battery drains fast */
+ MACIO_BIC(HEATHROW_FCR, HRW_IOBUS_ENABLE);
+ MACIO_BIC(HEATHROW_FCR, HRW_IDE0_RESET_N);
+ /* Make sure eth is down even if module or sleep
+ * won't work properly */
+ MACIO_BIC(HEATHROW_FCR, HRW_BMAC_IO_ENABLE | HRW_BMAC_RESET);
+ }
+ /* Make sure modem is shut down */
+ MACIO_OUT8(HRW_GPIO_MODEM_RESET,
+ MACIO_IN8(HRW_GPIO_MODEM_RESET) & ~1);
+ MACIO_BIS(HEATHROW_FCR, HRW_SCC_TRANS_EN_N);
+ MACIO_BIC(HEATHROW_FCR, OH_SCCA_IO|OH_SCCB_IO|HRW_SCC_ENABLE);
+
+ /* Let things settle */
+ (void)MACIO_IN32(HEATHROW_FCR);
+}
+
+static void heathrow_wakeup(struct macio_chip *macio, int secondary)
+{
+ if (secondary) {
+ MACIO_OUT32(0x38, save_fcr[2]);
+ (void)MACIO_IN32(0x38);
+ mdelay(1);
+ MACIO_OUT32(0x3c, save_fcr[3]);
+ (void)MACIO_IN32(0x38);
+ mdelay(10);
+ dbdma_restore(macio, save_alt_dbdma);
+ } else {
+ MACIO_OUT32(0x38, save_fcr[0] | HRW_IOBUS_ENABLE);
+ (void)MACIO_IN32(0x38);
+ mdelay(1);
+ MACIO_OUT32(0x3c, save_fcr[1]);
+ (void)MACIO_IN32(0x38);
+ mdelay(1);
+ MACIO_OUT32(0x34, save_mbcr);
+ (void)MACIO_IN32(0x38);
+ mdelay(10);
+ dbdma_restore(macio, save_dbdma);
+ }
+}
+
+static long heathrow_sleep_state(struct device_node *node, long param,
+ long value)
+{
+ if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0)
+ return -EPERM;
+ if (value == 1) {
+ if (macio_chips[1].type == macio_gatwick)
+ heathrow_sleep(&macio_chips[0], 1);
+ heathrow_sleep(&macio_chips[0], 0);
+ } else if (value == 0) {
+ heathrow_wakeup(&macio_chips[0], 0);
+ if (macio_chips[1].type == macio_gatwick)
+ heathrow_wakeup(&macio_chips[0], 1);
+ }
+ return 0;
+}
+
+static long core99_scc_enable(struct device_node *node, long param, long value)
+{
+ struct macio_chip* macio;
+ unsigned long flags;
+ unsigned long chan_mask;
+ u32 fcr;
+
+ macio = macio_find(node, 0);
+ if (!macio)
+ return -ENODEV;
+ if (!strcmp(node->name, "ch-a"))
+ chan_mask = MACIO_FLAG_SCCA_ON;
+ else if (!strcmp(node->name, "ch-b"))
+ chan_mask = MACIO_FLAG_SCCB_ON;
+ else
+ return -ENODEV;
+
+ if (value) {
+ int need_reset_scc = 0;
+ int need_reset_irda = 0;
+
+ LOCK(flags);
+ fcr = MACIO_IN32(KEYLARGO_FCR0);
+ /* Check if scc cell need enabling */
+ if (!(fcr & KL0_SCC_CELL_ENABLE)) {
+ fcr |= KL0_SCC_CELL_ENABLE;
+ need_reset_scc = 1;
+ }
+ if (chan_mask & MACIO_FLAG_SCCA_ON) {
+ fcr |= KL0_SCCA_ENABLE;
+ /* Don't enable line drivers for I2S modem */
+ if ((param & 0xfff) == PMAC_SCC_I2S1)
+ fcr &= ~KL0_SCC_A_INTF_ENABLE;
+ else
+ fcr |= KL0_SCC_A_INTF_ENABLE;
+ }
+ if (chan_mask & MACIO_FLAG_SCCB_ON) {
+ fcr |= KL0_SCCB_ENABLE;
+ /* Perform irda specific inits */
+ if ((param & 0xfff) == PMAC_SCC_IRDA) {
+ fcr &= ~KL0_SCC_B_INTF_ENABLE;
+ fcr |= KL0_IRDA_ENABLE;
+ fcr |= KL0_IRDA_CLK32_ENABLE | KL0_IRDA_CLK19_ENABLE;
+ fcr |= KL0_IRDA_SOURCE1_SEL;
+ fcr &= ~(KL0_IRDA_FAST_CONNECT|KL0_IRDA_DEFAULT1|KL0_IRDA_DEFAULT0);
+ fcr &= ~(KL0_IRDA_SOURCE2_SEL|KL0_IRDA_HIGH_BAND);
+ need_reset_irda = 1;
+ } else
+ fcr |= KL0_SCC_B_INTF_ENABLE;
+ }
+ MACIO_OUT32(KEYLARGO_FCR0, fcr);
+ macio->flags |= chan_mask;
+ if (need_reset_scc) {
+ MACIO_BIS(KEYLARGO_FCR0, KL0_SCC_RESET);
+ (void)MACIO_IN32(KEYLARGO_FCR0);
+ UNLOCK(flags);
+ mdelay(15);
+ LOCK(flags);
+ MACIO_BIC(KEYLARGO_FCR0, KL0_SCC_RESET);
+ }
+ if (need_reset_irda) {
+ MACIO_BIS(KEYLARGO_FCR0, KL0_IRDA_RESET);
+ (void)MACIO_IN32(KEYLARGO_FCR0);
+ UNLOCK(flags);
+ mdelay(15);
+ LOCK(flags);
+ MACIO_BIC(KEYLARGO_FCR0, KL0_IRDA_RESET);
+ }
+ UNLOCK(flags);
+ if (param & PMAC_SCC_FLAG_XMON)
+ macio->flags |= MACIO_FLAG_SCC_LOCKED;
+ } else {
+ if (macio->flags & MACIO_FLAG_SCC_LOCKED)
+ return -EPERM;
+ LOCK(flags);
+ fcr = MACIO_IN32(KEYLARGO_FCR0);
+ if (chan_mask & MACIO_FLAG_SCCA_ON)
+ fcr &= ~KL0_SCCA_ENABLE;
+ if (chan_mask & MACIO_FLAG_SCCB_ON) {
+ fcr &= ~KL0_SCCB_ENABLE;
+ /* Perform irda specific clears */
+ if ((param & 0xfff) == PMAC_SCC_IRDA) {
+ fcr &= ~KL0_IRDA_ENABLE;
+ fcr &= ~(KL0_IRDA_CLK32_ENABLE | KL0_IRDA_CLK19_ENABLE);
+ fcr &= ~(KL0_IRDA_FAST_CONNECT|KL0_IRDA_DEFAULT1|KL0_IRDA_DEFAULT0);
+ fcr &= ~(KL0_IRDA_SOURCE1_SEL|KL0_IRDA_SOURCE2_SEL|KL0_IRDA_HIGH_BAND);
+ }
+ }
+ MACIO_OUT32(KEYLARGO_FCR0, fcr);
+ if ((fcr & (KL0_SCCA_ENABLE | KL0_SCCB_ENABLE)) == 0) {
+ fcr &= ~KL0_SCC_CELL_ENABLE;
+ MACIO_OUT32(KEYLARGO_FCR0, fcr);
+ }
+ macio->flags &= ~(chan_mask);
+ UNLOCK(flags);
+ mdelay(10);
+ }
+ return 0;
+}
+
+static long
+core99_modem_enable(struct device_node *node, long param, long value)
+{
+ struct macio_chip* macio;
+ u8 gpio;
+ unsigned long flags;
+
+ /* Hack for internal USB modem */
+ if (node == NULL) {
+ if (macio_chips[0].type != macio_keylargo)
+ return -ENODEV;
+ node = macio_chips[0].of_node;
+ }
+ macio = macio_find(node, 0);
+ if (!macio)
+ return -ENODEV;
+ gpio = MACIO_IN8(KL_GPIO_MODEM_RESET);
+ gpio |= KEYLARGO_GPIO_OUTPUT_ENABLE;
+ gpio &= ~KEYLARGO_GPIO_OUTOUT_DATA;
+
+ if (!value) {
+ LOCK(flags);
+ MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio);
+ UNLOCK(flags);
+ (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
+ mdelay(250);
+ }
+ LOCK(flags);
+ if (value) {
+ MACIO_BIC(KEYLARGO_FCR2, KL2_ALT_DATA_OUT);
+ UNLOCK(flags);
+ (void)MACIO_IN32(KEYLARGO_FCR2);
+ mdelay(250);
+ } else {
+ MACIO_BIS(KEYLARGO_FCR2, KL2_ALT_DATA_OUT);
+ UNLOCK(flags);
+ }
+ if (value) {
+ LOCK(flags);
+ MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA);
+ (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
+ UNLOCK(flags); mdelay(250); LOCK(flags);
+ MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio);
+ (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
+ UNLOCK(flags); mdelay(250); LOCK(flags);
+ MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA);
+ (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
+ UNLOCK(flags); mdelay(250);
+ }
+ return 0;
+}
+
+static long
+pangea_modem_enable(struct device_node *node, long param, long value)
+{
+ struct macio_chip* macio;
+ u8 gpio;
+ unsigned long flags;
+
+ /* Hack for internal USB modem */
+ if (node == NULL) {
+ if (macio_chips[0].type != macio_pangea &&
+ macio_chips[0].type != macio_intrepid)
+ return -ENODEV;
+ node = macio_chips[0].of_node;
+ }
+ macio = macio_find(node, 0);
+ if (!macio)
+ return -ENODEV;
+ gpio = MACIO_IN8(KL_GPIO_MODEM_RESET);
+ gpio |= KEYLARGO_GPIO_OUTPUT_ENABLE;
+ gpio &= ~KEYLARGO_GPIO_OUTOUT_DATA;
+
+ if (!value) {
+ LOCK(flags);
+ MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio);
+ UNLOCK(flags);
+ (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
+ mdelay(250);
+ }
+ LOCK(flags);
+ if (value) {
+ MACIO_OUT8(KL_GPIO_MODEM_POWER,
+ KEYLARGO_GPIO_OUTPUT_ENABLE);
+ UNLOCK(flags);
+ (void)MACIO_IN32(KEYLARGO_FCR2);
+ mdelay(250);
+ } else {
+ MACIO_OUT8(KL_GPIO_MODEM_POWER,
+ KEYLARGO_GPIO_OUTPUT_ENABLE | KEYLARGO_GPIO_OUTOUT_DATA);
+ UNLOCK(flags);
+ }
+ if (value) {
+ LOCK(flags);
+ MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA);
+ (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
+ UNLOCK(flags); mdelay(250); LOCK(flags);
+ MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio);
+ (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
+ UNLOCK(flags); mdelay(250); LOCK(flags);
+ MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA);
+ (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
+ UNLOCK(flags); mdelay(250);
+ }
+ return 0;
+}
+
+static long
+core99_ata100_enable(struct device_node *node, long value)
+{
+ unsigned long flags;
+ struct pci_dev *pdev = NULL;
+ u8 pbus, pid;
+ int rc;
+
+ if (uninorth_rev < 0x24)
+ return -ENODEV;
+
+ LOCK(flags);
+ if (value)
+ UN_BIS(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_ATA100);
+ else
+ UN_BIC(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_ATA100);
+ (void)UN_IN(UNI_N_CLOCK_CNTL);
+ UNLOCK(flags);
+ udelay(20);
+
+ if (value) {
+ if (pci_device_from_OF_node(node, &pbus, &pid) == 0)
+ pdev = pci_get_bus_and_slot(pbus, pid);
+ if (pdev == NULL)
+ return 0;
+ rc = pci_enable_device(pdev);
+ if (rc == 0)
+ pci_set_master(pdev);
+ pci_dev_put(pdev);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static long
+core99_ide_enable(struct device_node *node, long param, long value)
+{
+ /* Bus ID 0 to 2 are KeyLargo based IDE, busID 3 is U2
+ * based ata-100
+ */
+ switch(param) {
+ case 0:
+ return simple_feature_tweak(node, macio_unknown,
+ KEYLARGO_FCR1, KL1_EIDE0_ENABLE, value);
+ case 1:
+ return simple_feature_tweak(node, macio_unknown,
+ KEYLARGO_FCR1, KL1_EIDE1_ENABLE, value);
+ case 2:
+ return simple_feature_tweak(node, macio_unknown,
+ KEYLARGO_FCR1, KL1_UIDE_ENABLE, value);
+ case 3:
+ return core99_ata100_enable(node, value);
+ default:
+ return -ENODEV;
+ }
+}
+
+static long
+core99_ide_reset(struct device_node *node, long param, long value)
+{
+ switch(param) {
+ case 0:
+ return simple_feature_tweak(node, macio_unknown,
+ KEYLARGO_FCR1, KL1_EIDE0_RESET_N, !value);
+ case 1:
+ return simple_feature_tweak(node, macio_unknown,
+ KEYLARGO_FCR1, KL1_EIDE1_RESET_N, !value);
+ case 2:
+ return simple_feature_tweak(node, macio_unknown,
+ KEYLARGO_FCR1, KL1_UIDE_RESET_N, !value);
+ default:
+ return -ENODEV;
+ }
+}
+
+static long
+core99_gmac_enable(struct device_node *node, long param, long value)
+{
+ unsigned long flags;
+
+ LOCK(flags);
+ if (value)
+ UN_BIS(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_GMAC);
+ else
+ UN_BIC(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_GMAC);
+ (void)UN_IN(UNI_N_CLOCK_CNTL);
+ UNLOCK(flags);
+ udelay(20);
+
+ return 0;
+}
+
+static long
+core99_gmac_phy_reset(struct device_node *node, long param, long value)
+{
+ unsigned long flags;
+ struct macio_chip *macio;
+
+ macio = &macio_chips[0];
+ if (macio->type != macio_keylargo && macio->type != macio_pangea &&
+ macio->type != macio_intrepid)
+ return -ENODEV;
+
+ LOCK(flags);
+ MACIO_OUT8(KL_GPIO_ETH_PHY_RESET, KEYLARGO_GPIO_OUTPUT_ENABLE);
+ (void)MACIO_IN8(KL_GPIO_ETH_PHY_RESET);
+ UNLOCK(flags);
+ mdelay(10);
+ LOCK(flags);
+ MACIO_OUT8(KL_GPIO_ETH_PHY_RESET, /*KEYLARGO_GPIO_OUTPUT_ENABLE | */
+ KEYLARGO_GPIO_OUTOUT_DATA);
+ UNLOCK(flags);
+ mdelay(10);
+
+ return 0;
+}
+
+static long
+core99_sound_chip_enable(struct device_node *node, long param, long value)
+{
+ struct macio_chip* macio;
+ unsigned long flags;
+
+ macio = macio_find(node, 0);
+ if (!macio)
+ return -ENODEV;
+
+ /* Do a better probe code, screamer G4 desktops &
+ * iMacs can do that too, add a recalibrate in
+ * the driver as well
+ */
+ if (pmac_mb.model_id == PMAC_TYPE_PISMO ||
+ pmac_mb.model_id == PMAC_TYPE_TITANIUM) {
+ LOCK(flags);
+ if (value)
+ MACIO_OUT8(KL_GPIO_SOUND_POWER,
+ KEYLARGO_GPIO_OUTPUT_ENABLE |
+ KEYLARGO_GPIO_OUTOUT_DATA);
+ else
+ MACIO_OUT8(KL_GPIO_SOUND_POWER,
+ KEYLARGO_GPIO_OUTPUT_ENABLE);
+ (void)MACIO_IN8(KL_GPIO_SOUND_POWER);
+ UNLOCK(flags);
+ }
+ return 0;
+}
+
+static long
+core99_airport_enable(struct device_node *node, long param, long value)
+{
+ struct macio_chip* macio;
+ unsigned long flags;
+ int state;
+
+ macio = macio_find(node, 0);
+ if (!macio)
+ return -ENODEV;
+
+ /* Hint: we allow passing of macio itself for the sake of the
+ * sleep code
+ */
+ if (node != macio->of_node &&
+ (!node->parent || node->parent != macio->of_node))
+ return -ENODEV;
+ state = (macio->flags & MACIO_FLAG_AIRPORT_ON) != 0;
+ if (value == state)
+ return 0;
+ if (value) {
+ /* This code is a reproduction of OF enable-cardslot
+ * and init-wireless methods, slightly hacked until
+ * I got it working.
+ */
+ LOCK(flags);
+ MACIO_OUT8(KEYLARGO_GPIO_0+0xf, 5);
+ (void)MACIO_IN8(KEYLARGO_GPIO_0+0xf);
+ UNLOCK(flags);
+ mdelay(10);
+ LOCK(flags);
+ MACIO_OUT8(KEYLARGO_GPIO_0+0xf, 4);
+ (void)MACIO_IN8(KEYLARGO_GPIO_0+0xf);
+ UNLOCK(flags);
+
+ mdelay(10);
+
+ LOCK(flags);
+ MACIO_BIC(KEYLARGO_FCR2, KL2_CARDSEL_16);
+ (void)MACIO_IN32(KEYLARGO_FCR2);
+ udelay(10);
+ MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+0xb, 0);
+ (void)MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+0xb);
+ udelay(10);
+ MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+0xa, 0x28);
+ (void)MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+0xa);
+ udelay(10);
+ MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+0xd, 0x28);
+ (void)MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+0xd);
+ udelay(10);
+ MACIO_OUT8(KEYLARGO_GPIO_0+0xd, 0x28);
+ (void)MACIO_IN8(KEYLARGO_GPIO_0+0xd);
+ udelay(10);
+ MACIO_OUT8(KEYLARGO_GPIO_0+0xe, 0x28);
+ (void)MACIO_IN8(KEYLARGO_GPIO_0+0xe);
+ UNLOCK(flags);
+ udelay(10);
+ MACIO_OUT32(0x1c000, 0);
+ mdelay(1);
+ MACIO_OUT8(0x1a3e0, 0x41);
+ (void)MACIO_IN8(0x1a3e0);
+ udelay(10);
+ LOCK(flags);
+ MACIO_BIS(KEYLARGO_FCR2, KL2_CARDSEL_16);
+ (void)MACIO_IN32(KEYLARGO_FCR2);
+ UNLOCK(flags);
+ mdelay(100);
+
+ macio->flags |= MACIO_FLAG_AIRPORT_ON;
+ } else {
+ LOCK(flags);
+ MACIO_BIC(KEYLARGO_FCR2, KL2_CARDSEL_16);
+ (void)MACIO_IN32(KEYLARGO_FCR2);
+ MACIO_OUT8(KL_GPIO_AIRPORT_0, 0);
+ MACIO_OUT8(KL_GPIO_AIRPORT_1, 0);
+ MACIO_OUT8(KL_GPIO_AIRPORT_2, 0);
+ MACIO_OUT8(KL_GPIO_AIRPORT_3, 0);
+ MACIO_OUT8(KL_GPIO_AIRPORT_4, 0);
+ (void)MACIO_IN8(KL_GPIO_AIRPORT_4);
+ UNLOCK(flags);
+
+ macio->flags &= ~MACIO_FLAG_AIRPORT_ON;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_SMP
+static long
+core99_reset_cpu(struct device_node *node, long param, long value)
+{
+ unsigned int reset_io = 0;
+ unsigned long flags;
+ struct macio_chip *macio;
+ struct device_node *np;
+ struct device_node *cpus;
+ const int dflt_reset_lines[] = { KL_GPIO_RESET_CPU0,
+ KL_GPIO_RESET_CPU1,
+ KL_GPIO_RESET_CPU2,
+ KL_GPIO_RESET_CPU3 };
+
+ macio = &macio_chips[0];
+ if (macio->type != macio_keylargo)
+ return -ENODEV;
+
+ cpus = of_find_node_by_path("/cpus");
+ if (cpus == NULL)
+ return -ENODEV;
+ for (np = cpus->child; np != NULL; np = np->sibling) {
+ const u32 *num = of_get_property(np, "reg", NULL);
+ const u32 *rst = of_get_property(np, "soft-reset", NULL);
+ if (num == NULL || rst == NULL)
+ continue;
+ if (param == *num) {
+ reset_io = *rst;
+ break;
+ }
+ }
+ of_node_put(cpus);
+ if (np == NULL || reset_io == 0)
+ reset_io = dflt_reset_lines[param];
+
+ LOCK(flags);
+ MACIO_OUT8(reset_io, KEYLARGO_GPIO_OUTPUT_ENABLE);
+ (void)MACIO_IN8(reset_io);
+ udelay(1);
+ MACIO_OUT8(reset_io, 0);
+ (void)MACIO_IN8(reset_io);
+ UNLOCK(flags);
+
+ return 0;
+}
+#endif /* CONFIG_SMP */
+
+static long
+core99_usb_enable(struct device_node *node, long param, long value)
+{
+ struct macio_chip *macio;
+ unsigned long flags;
+ const char *prop;
+ int number;
+ u32 reg;
+
+ macio = &macio_chips[0];
+ if (macio->type != macio_keylargo && macio->type != macio_pangea &&
+ macio->type != macio_intrepid)
+ return -ENODEV;
+
+ prop = of_get_property(node, "AAPL,clock-id", NULL);
+ if (!prop)
+ return -ENODEV;
+ if (strncmp(prop, "usb0u048", 8) == 0)
+ number = 0;
+ else if (strncmp(prop, "usb1u148", 8) == 0)
+ number = 2;
+ else if (strncmp(prop, "usb2u248", 8) == 0)
+ number = 4;
+ else
+ return -ENODEV;
+
+ /* Sorry for the brute-force locking, but this is only used during
+ * sleep and the timing seem to be critical
+ */
+ LOCK(flags);
+ if (value) {
+ /* Turn ON */
+ if (number == 0) {
+ MACIO_BIC(KEYLARGO_FCR0, (KL0_USB0_PAD_SUSPEND0 | KL0_USB0_PAD_SUSPEND1));
+ (void)MACIO_IN32(KEYLARGO_FCR0);
+ UNLOCK(flags);
+ mdelay(1);
+ LOCK(flags);
+ MACIO_BIS(KEYLARGO_FCR0, KL0_USB0_CELL_ENABLE);
+ } else if (number == 2) {
+ MACIO_BIC(KEYLARGO_FCR0, (KL0_USB1_PAD_SUSPEND0 | KL0_USB1_PAD_SUSPEND1));
+ UNLOCK(flags);
+ (void)MACIO_IN32(KEYLARGO_FCR0);
+ mdelay(1);
+ LOCK(flags);
+ MACIO_BIS(KEYLARGO_FCR0, KL0_USB1_CELL_ENABLE);
+ } else if (number == 4) {
+ MACIO_BIC(KEYLARGO_FCR1, (KL1_USB2_PAD_SUSPEND0 | KL1_USB2_PAD_SUSPEND1));
+ UNLOCK(flags);
+ (void)MACIO_IN32(KEYLARGO_FCR1);
+ mdelay(1);
+ LOCK(flags);
+ MACIO_BIS(KEYLARGO_FCR1, KL1_USB2_CELL_ENABLE);
+ }
+ if (number < 4) {
+ reg = MACIO_IN32(KEYLARGO_FCR4);
+ reg &= ~(KL4_PORT_WAKEUP_ENABLE(number) | KL4_PORT_RESUME_WAKE_EN(number) |
+ KL4_PORT_CONNECT_WAKE_EN(number) | KL4_PORT_DISCONNECT_WAKE_EN(number));
+ reg &= ~(KL4_PORT_WAKEUP_ENABLE(number+1) | KL4_PORT_RESUME_WAKE_EN(number+1) |
+ KL4_PORT_CONNECT_WAKE_EN(number+1) | KL4_PORT_DISCONNECT_WAKE_EN(number+1));
+ MACIO_OUT32(KEYLARGO_FCR4, reg);
+ (void)MACIO_IN32(KEYLARGO_FCR4);
+ udelay(10);
+ } else {
+ reg = MACIO_IN32(KEYLARGO_FCR3);
+ reg &= ~(KL3_IT_PORT_WAKEUP_ENABLE(0) | KL3_IT_PORT_RESUME_WAKE_EN(0) |
+ KL3_IT_PORT_CONNECT_WAKE_EN(0) | KL3_IT_PORT_DISCONNECT_WAKE_EN(0));
+ reg &= ~(KL3_IT_PORT_WAKEUP_ENABLE(1) | KL3_IT_PORT_RESUME_WAKE_EN(1) |
+ KL3_IT_PORT_CONNECT_WAKE_EN(1) | KL3_IT_PORT_DISCONNECT_WAKE_EN(1));
+ MACIO_OUT32(KEYLARGO_FCR3, reg);
+ (void)MACIO_IN32(KEYLARGO_FCR3);
+ udelay(10);
+ }
+ if (macio->type == macio_intrepid) {
+ /* wait for clock stopped bits to clear */
+ u32 test0 = 0, test1 = 0;
+ u32 status0, status1;
+ int timeout = 1000;
+
+ UNLOCK(flags);
+ switch (number) {
+ case 0:
+ test0 = UNI_N_CLOCK_STOPPED_USB0;
+ test1 = UNI_N_CLOCK_STOPPED_USB0PCI;
+ break;
+ case 2:
+ test0 = UNI_N_CLOCK_STOPPED_USB1;
+ test1 = UNI_N_CLOCK_STOPPED_USB1PCI;
+ break;
+ case 4:
+ test0 = UNI_N_CLOCK_STOPPED_USB2;
+ test1 = UNI_N_CLOCK_STOPPED_USB2PCI;
+ break;
+ }
+ do {
+ if (--timeout <= 0) {
+ printk(KERN_ERR "core99_usb_enable: "
+ "Timeout waiting for clocks\n");
+ break;
+ }
+ mdelay(1);
+ status0 = UN_IN(UNI_N_CLOCK_STOP_STATUS0);
+ status1 = UN_IN(UNI_N_CLOCK_STOP_STATUS1);
+ } while ((status0 & test0) | (status1 & test1));
+ LOCK(flags);
+ }
+ } else {
+ /* Turn OFF */
+ if (number < 4) {
+ reg = MACIO_IN32(KEYLARGO_FCR4);
+ reg |= KL4_PORT_WAKEUP_ENABLE(number) | KL4_PORT_RESUME_WAKE_EN(number) |
+ KL4_PORT_CONNECT_WAKE_EN(number) | KL4_PORT_DISCONNECT_WAKE_EN(number);
+ reg |= KL4_PORT_WAKEUP_ENABLE(number+1) | KL4_PORT_RESUME_WAKE_EN(number+1) |
+ KL4_PORT_CONNECT_WAKE_EN(number+1) | KL4_PORT_DISCONNECT_WAKE_EN(number+1);
+ MACIO_OUT32(KEYLARGO_FCR4, reg);
+ (void)MACIO_IN32(KEYLARGO_FCR4);
+ udelay(1);
+ } else {
+ reg = MACIO_IN32(KEYLARGO_FCR3);
+ reg |= KL3_IT_PORT_WAKEUP_ENABLE(0) | KL3_IT_PORT_RESUME_WAKE_EN(0) |
+ KL3_IT_PORT_CONNECT_WAKE_EN(0) | KL3_IT_PORT_DISCONNECT_WAKE_EN(0);
+ reg |= KL3_IT_PORT_WAKEUP_ENABLE(1) | KL3_IT_PORT_RESUME_WAKE_EN(1) |
+ KL3_IT_PORT_CONNECT_WAKE_EN(1) | KL3_IT_PORT_DISCONNECT_WAKE_EN(1);
+ MACIO_OUT32(KEYLARGO_FCR3, reg);
+ (void)MACIO_IN32(KEYLARGO_FCR3);
+ udelay(1);
+ }
+ if (number == 0) {
+ if (macio->type != macio_intrepid)
+ MACIO_BIC(KEYLARGO_FCR0, KL0_USB0_CELL_ENABLE);
+ (void)MACIO_IN32(KEYLARGO_FCR0);
+ udelay(1);
+ MACIO_BIS(KEYLARGO_FCR0, (KL0_USB0_PAD_SUSPEND0 | KL0_USB0_PAD_SUSPEND1));
+ (void)MACIO_IN32(KEYLARGO_FCR0);
+ } else if (number == 2) {
+ if (macio->type != macio_intrepid)
+ MACIO_BIC(KEYLARGO_FCR0, KL0_USB1_CELL_ENABLE);
+ (void)MACIO_IN32(KEYLARGO_FCR0);
+ udelay(1);
+ MACIO_BIS(KEYLARGO_FCR0, (KL0_USB1_PAD_SUSPEND0 | KL0_USB1_PAD_SUSPEND1));
+ (void)MACIO_IN32(KEYLARGO_FCR0);
+ } else if (number == 4) {
+ udelay(1);
+ MACIO_BIS(KEYLARGO_FCR1, (KL1_USB2_PAD_SUSPEND0 | KL1_USB2_PAD_SUSPEND1));
+ (void)MACIO_IN32(KEYLARGO_FCR1);
+ }
+ udelay(1);
+ }
+ UNLOCK(flags);
+
+ return 0;
+}
+
+static long
+core99_firewire_enable(struct device_node *node, long param, long value)
+{
+ unsigned long flags;
+ struct macio_chip *macio;
+
+ macio = &macio_chips[0];
+ if (macio->type != macio_keylargo && macio->type != macio_pangea &&
+ macio->type != macio_intrepid)
+ return -ENODEV;
+ if (!(macio->flags & MACIO_FLAG_FW_SUPPORTED))
+ return -ENODEV;
+
+ LOCK(flags);
+ if (value) {
+ UN_BIS(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_FW);
+ (void)UN_IN(UNI_N_CLOCK_CNTL);
+ } else {
+ UN_BIC(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_FW);
+ (void)UN_IN(UNI_N_CLOCK_CNTL);
+ }
+ UNLOCK(flags);
+ mdelay(1);
+
+ return 0;
+}
+
+static long
+core99_firewire_cable_power(struct device_node *node, long param, long value)
+{
+ unsigned long flags;
+ struct macio_chip *macio;
+
+ /* Trick: we allow NULL node */
+ if ((pmac_mb.board_flags & PMAC_MB_HAS_FW_POWER) == 0)
+ return -ENODEV;
+ macio = &macio_chips[0];
+ if (macio->type != macio_keylargo && macio->type != macio_pangea &&
+ macio->type != macio_intrepid)
+ return -ENODEV;
+ if (!(macio->flags & MACIO_FLAG_FW_SUPPORTED))
+ return -ENODEV;
+
+ LOCK(flags);
+ if (value) {
+ MACIO_OUT8(KL_GPIO_FW_CABLE_POWER , 0);
+ MACIO_IN8(KL_GPIO_FW_CABLE_POWER);
+ udelay(10);
+ } else {
+ MACIO_OUT8(KL_GPIO_FW_CABLE_POWER , 4);
+ MACIO_IN8(KL_GPIO_FW_CABLE_POWER); udelay(10);
+ }
+ UNLOCK(flags);
+ mdelay(1);
+
+ return 0;
+}
+
+static long
+intrepid_aack_delay_enable(struct device_node *node, long param, long value)
+{
+ unsigned long flags;
+
+ if (uninorth_rev < 0xd2)
+ return -ENODEV;
+
+ LOCK(flags);
+ if (param)
+ UN_BIS(UNI_N_AACK_DELAY, UNI_N_AACK_DELAY_ENABLE);
+ else
+ UN_BIC(UNI_N_AACK_DELAY, UNI_N_AACK_DELAY_ENABLE);
+ UNLOCK(flags);
+
+ return 0;
+}
+
+
+#endif /* CONFIG_PPC64 */
+
+static long
+core99_read_gpio(struct device_node *node, long param, long value)
+{
+ struct macio_chip *macio = &macio_chips[0];
+
+ return MACIO_IN8(param);
+}
+
+
+static long
+core99_write_gpio(struct device_node *node, long param, long value)
+{
+ struct macio_chip *macio = &macio_chips[0];
+
+ MACIO_OUT8(param, (u8)(value & 0xff));
+ return 0;
+}
+
+#ifdef CONFIG_PPC64
+static long g5_gmac_enable(struct device_node *node, long param, long value)
+{
+ struct macio_chip *macio = &macio_chips[0];
+ unsigned long flags;
+
+ if (node == NULL)
+ return -ENODEV;
+
+ LOCK(flags);
+ if (value) {
+ MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_GMAC_CLK_ENABLE);
+ mb();
+ k2_skiplist[0] = NULL;
+ } else {
+ k2_skiplist[0] = node;
+ mb();
+ MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_GMAC_CLK_ENABLE);
+ }
+
+ UNLOCK(flags);
+ mdelay(1);
+
+ return 0;
+}
+
+static long g5_fw_enable(struct device_node *node, long param, long value)
+{
+ struct macio_chip *macio = &macio_chips[0];
+ unsigned long flags;
+
+ if (node == NULL)
+ return -ENODEV;
+
+ LOCK(flags);
+ if (value) {
+ MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_FW_CLK_ENABLE);
+ mb();
+ k2_skiplist[1] = NULL;
+ } else {
+ k2_skiplist[1] = node;
+ mb();
+ MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_FW_CLK_ENABLE);
+ }
+
+ UNLOCK(flags);
+ mdelay(1);
+
+ return 0;
+}
+
+static long g5_mpic_enable(struct device_node *node, long param, long value)
+{
+ unsigned long flags;
+ struct device_node *parent = of_get_parent(node);
+ int is_u3;
+
+ if (parent == NULL)
+ return 0;
+ is_u3 = strcmp(parent->name, "u3") == 0 ||
+ strcmp(parent->name, "u4") == 0;
+ of_node_put(parent);
+ if (!is_u3)
+ return 0;
+
+ LOCK(flags);
+ UN_BIS(U3_TOGGLE_REG, U3_MPIC_RESET | U3_MPIC_OUTPUT_ENABLE);
+ UNLOCK(flags);
+
+ return 0;
+}
+
+static long g5_eth_phy_reset(struct device_node *node, long param, long value)
+{
+ struct macio_chip *macio = &macio_chips[0];
+ struct device_node *phy;
+ int need_reset;
+
+ /*
+ * We must not reset the combo PHYs, only the BCM5221 found in
+ * the iMac G5.
+ */
+ phy = of_get_next_child(node, NULL);
+ if (!phy)
+ return -ENODEV;
+ need_reset = of_device_is_compatible(phy, "B5221");
+ of_node_put(phy);
+ if (!need_reset)
+ return 0;
+
+ /* PHY reset is GPIO 29, not in device-tree unfortunately */
+ MACIO_OUT8(K2_GPIO_EXTINT_0 + 29,
+ KEYLARGO_GPIO_OUTPUT_ENABLE | KEYLARGO_GPIO_OUTOUT_DATA);
+ /* Thankfully, this is now always called at a time when we can
+ * schedule by sungem.
+ */
+ msleep(10);
+ MACIO_OUT8(K2_GPIO_EXTINT_0 + 29, 0);
+
+ return 0;
+}
+
+static long g5_i2s_enable(struct device_node *node, long param, long value)
+{
+ /* Very crude implementation for now */
+ struct macio_chip *macio = &macio_chips[0];
+ unsigned long flags;
+ int cell;
+ u32 fcrs[3][3] = {
+ { 0,
+ K2_FCR1_I2S0_CELL_ENABLE |
+ K2_FCR1_I2S0_CLK_ENABLE_BIT | K2_FCR1_I2S0_ENABLE,
+ KL3_I2S0_CLK18_ENABLE
+ },
+ { KL0_SCC_A_INTF_ENABLE,
+ K2_FCR1_I2S1_CELL_ENABLE |
+ K2_FCR1_I2S1_CLK_ENABLE_BIT | K2_FCR1_I2S1_ENABLE,
+ KL3_I2S1_CLK18_ENABLE
+ },
+ { KL0_SCC_B_INTF_ENABLE,
+ SH_FCR1_I2S2_CELL_ENABLE |
+ SH_FCR1_I2S2_CLK_ENABLE_BIT | SH_FCR1_I2S2_ENABLE,
+ SH_FCR3_I2S2_CLK18_ENABLE
+ },
+ };
+
+ if (macio->type != macio_keylargo2 && macio->type != macio_shasta)
+ return -ENODEV;
+ if (strncmp(node->name, "i2s-", 4))
+ return -ENODEV;
+ cell = node->name[4] - 'a';
+ switch(cell) {
+ case 0:
+ case 1:
+ break;
+ case 2:
+ if (macio->type == macio_shasta)
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ LOCK(flags);
+ if (value) {
+ MACIO_BIC(KEYLARGO_FCR0, fcrs[cell][0]);
+ MACIO_BIS(KEYLARGO_FCR1, fcrs[cell][1]);
+ MACIO_BIS(KEYLARGO_FCR3, fcrs[cell][2]);
+ } else {
+ MACIO_BIC(KEYLARGO_FCR3, fcrs[cell][2]);
+ MACIO_BIC(KEYLARGO_FCR1, fcrs[cell][1]);
+ MACIO_BIS(KEYLARGO_FCR0, fcrs[cell][0]);
+ }
+ udelay(10);
+ UNLOCK(flags);
+
+ return 0;
+}
+
+
+#ifdef CONFIG_SMP
+static long g5_reset_cpu(struct device_node *node, long param, long value)
+{
+ unsigned int reset_io = 0;
+ unsigned long flags;
+ struct macio_chip *macio;
+ struct device_node *np;
+ struct device_node *cpus;
+
+ macio = &macio_chips[0];
+ if (macio->type != macio_keylargo2 && macio->type != macio_shasta)
+ return -ENODEV;
+
+ cpus = of_find_node_by_path("/cpus");
+ if (cpus == NULL)
+ return -ENODEV;
+ for (np = cpus->child; np != NULL; np = np->sibling) {
+ const u32 *num = of_get_property(np, "reg", NULL);
+ const u32 *rst = of_get_property(np, "soft-reset", NULL);
+ if (num == NULL || rst == NULL)
+ continue;
+ if (param == *num) {
+ reset_io = *rst;
+ break;
+ }
+ }
+ of_node_put(cpus);
+ if (np == NULL || reset_io == 0)
+ return -ENODEV;
+
+ LOCK(flags);
+ MACIO_OUT8(reset_io, KEYLARGO_GPIO_OUTPUT_ENABLE);
+ (void)MACIO_IN8(reset_io);
+ udelay(1);
+ MACIO_OUT8(reset_io, 0);
+ (void)MACIO_IN8(reset_io);
+ UNLOCK(flags);
+
+ return 0;
+}
+#endif /* CONFIG_SMP */
+
+/*
+ * This can be called from pmac_smp so isn't static
+ *
+ * This takes the second CPU off the bus on dual CPU machines
+ * running UP
+ */
+void g5_phy_disable_cpu1(void)
+{
+ if (uninorth_maj == 3)
+ UN_OUT(U3_API_PHY_CONFIG_1, 0);
+}
+#endif /* CONFIG_PPC64 */
+
+#ifndef CONFIG_PPC64
+
+
+#ifdef CONFIG_PM
+static u32 save_gpio_levels[2];
+static u8 save_gpio_extint[KEYLARGO_GPIO_EXTINT_CNT];
+static u8 save_gpio_normal[KEYLARGO_GPIO_CNT];
+static u32 save_unin_clock_ctl;
+
+static void keylargo_shutdown(struct macio_chip *macio, int sleep_mode)
+{
+ u32 temp;
+
+ if (sleep_mode) {
+ mdelay(1);
+ MACIO_BIS(KEYLARGO_FCR0, KL0_USB_REF_SUSPEND);
+ (void)MACIO_IN32(KEYLARGO_FCR0);
+ mdelay(1);
+ }
+
+ MACIO_BIC(KEYLARGO_FCR0,KL0_SCCA_ENABLE | KL0_SCCB_ENABLE |
+ KL0_SCC_CELL_ENABLE |
+ KL0_IRDA_ENABLE | KL0_IRDA_CLK32_ENABLE |
+ KL0_IRDA_CLK19_ENABLE);
+
+ MACIO_BIC(KEYLARGO_MBCR, KL_MBCR_MB0_DEV_MASK);
+ MACIO_BIS(KEYLARGO_MBCR, KL_MBCR_MB0_IDE_ENABLE);
+
+ MACIO_BIC(KEYLARGO_FCR1,
+ KL1_AUDIO_SEL_22MCLK | KL1_AUDIO_CLK_ENABLE_BIT |
+ KL1_AUDIO_CLK_OUT_ENABLE | KL1_AUDIO_CELL_ENABLE |
+ KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT |
+ KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE |
+ KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE |
+ KL1_EIDE0_ENABLE | KL1_EIDE0_RESET_N |
+ KL1_EIDE1_ENABLE | KL1_EIDE1_RESET_N |
+ KL1_UIDE_ENABLE);
+
+ MACIO_BIS(KEYLARGO_FCR2, KL2_ALT_DATA_OUT);
+ MACIO_BIC(KEYLARGO_FCR2, KL2_IOBUS_ENABLE);
+
+ temp = MACIO_IN32(KEYLARGO_FCR3);
+ if (macio->rev >= 2) {
+ temp |= KL3_SHUTDOWN_PLL2X;
+ if (sleep_mode)
+ temp |= KL3_SHUTDOWN_PLL_TOTAL;
+ }
+
+ temp |= KL3_SHUTDOWN_PLLKW6 | KL3_SHUTDOWN_PLLKW4 |
+ KL3_SHUTDOWN_PLLKW35;
+ if (sleep_mode)
+ temp |= KL3_SHUTDOWN_PLLKW12;
+ temp &= ~(KL3_CLK66_ENABLE | KL3_CLK49_ENABLE | KL3_CLK45_ENABLE
+ | KL3_CLK31_ENABLE | KL3_I2S1_CLK18_ENABLE | KL3_I2S0_CLK18_ENABLE);
+ if (sleep_mode)
+ temp &= ~(KL3_TIMER_CLK18_ENABLE | KL3_VIA_CLK16_ENABLE);
+ MACIO_OUT32(KEYLARGO_FCR3, temp);
+
+ /* Flush posted writes & wait a bit */
+ (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1);
+}
+
+static void pangea_shutdown(struct macio_chip *macio, int sleep_mode)
+{
+ u32 temp;
+
+ MACIO_BIC(KEYLARGO_FCR0,KL0_SCCA_ENABLE | KL0_SCCB_ENABLE |
+ KL0_SCC_CELL_ENABLE |
+ KL0_USB0_CELL_ENABLE | KL0_USB1_CELL_ENABLE);
+
+ MACIO_BIC(KEYLARGO_FCR1,
+ KL1_AUDIO_SEL_22MCLK | KL1_AUDIO_CLK_ENABLE_BIT |
+ KL1_AUDIO_CLK_OUT_ENABLE | KL1_AUDIO_CELL_ENABLE |
+ KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT |
+ KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE |
+ KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE |
+ KL1_UIDE_ENABLE);
+ if (pmac_mb.board_flags & PMAC_MB_MOBILE)
+ MACIO_BIC(KEYLARGO_FCR1, KL1_UIDE_RESET_N);
+
+ MACIO_BIS(KEYLARGO_FCR2, KL2_ALT_DATA_OUT);
+
+ temp = MACIO_IN32(KEYLARGO_FCR3);
+ temp |= KL3_SHUTDOWN_PLLKW6 | KL3_SHUTDOWN_PLLKW4 |
+ KL3_SHUTDOWN_PLLKW35;
+ temp &= ~(KL3_CLK49_ENABLE | KL3_CLK45_ENABLE | KL3_CLK31_ENABLE
+ | KL3_I2S0_CLK18_ENABLE | KL3_I2S1_CLK18_ENABLE);
+ if (sleep_mode)
+ temp &= ~(KL3_VIA_CLK16_ENABLE | KL3_TIMER_CLK18_ENABLE);
+ MACIO_OUT32(KEYLARGO_FCR3, temp);
+
+ /* Flush posted writes & wait a bit */
+ (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1);
+}
+
+static void intrepid_shutdown(struct macio_chip *macio, int sleep_mode)
+{
+ u32 temp;
+
+ MACIO_BIC(KEYLARGO_FCR0,KL0_SCCA_ENABLE | KL0_SCCB_ENABLE |
+ KL0_SCC_CELL_ENABLE);
+
+ MACIO_BIC(KEYLARGO_FCR1,
+ KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT |
+ KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE |
+ KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE |
+ KL1_EIDE0_ENABLE);
+ if (pmac_mb.board_flags & PMAC_MB_MOBILE)
+ MACIO_BIC(KEYLARGO_FCR1, KL1_UIDE_RESET_N);
+
+ temp = MACIO_IN32(KEYLARGO_FCR3);
+ temp &= ~(KL3_CLK49_ENABLE | KL3_CLK45_ENABLE |
+ KL3_I2S1_CLK18_ENABLE | KL3_I2S0_CLK18_ENABLE);
+ if (sleep_mode)
+ temp &= ~(KL3_TIMER_CLK18_ENABLE | KL3_IT_VIA_CLK32_ENABLE);
+ MACIO_OUT32(KEYLARGO_FCR3, temp);
+
+ /* Flush posted writes & wait a bit */
+ (void)MACIO_IN32(KEYLARGO_FCR0);
+ mdelay(10);
+}
+
+
+static int
+core99_sleep(void)
+{
+ struct macio_chip *macio;
+ int i;
+
+ macio = &macio_chips[0];
+ if (macio->type != macio_keylargo && macio->type != macio_pangea &&
+ macio->type != macio_intrepid)
+ return -ENODEV;
+
+ /* We power off the wireless slot in case it was not done
+ * by the driver. We don't power it on automatically however
+ */
+ if (macio->flags & MACIO_FLAG_AIRPORT_ON)
+ core99_airport_enable(macio->of_node, 0, 0);
+
+ /* We power off the FW cable. Should be done by the driver... */
+ if (macio->flags & MACIO_FLAG_FW_SUPPORTED) {
+ core99_firewire_enable(NULL, 0, 0);
+ core99_firewire_cable_power(NULL, 0, 0);
+ }
+
+ /* We make sure int. modem is off (in case driver lost it) */
+ if (macio->type == macio_keylargo)
+ core99_modem_enable(macio->of_node, 0, 0);
+ else
+ pangea_modem_enable(macio->of_node, 0, 0);
+
+ /* We make sure the sound is off as well */
+ core99_sound_chip_enable(macio->of_node, 0, 0);
+
+ /*
+ * Save various bits of KeyLargo
+ */
+
+ /* Save the state of the various GPIOs */
+ save_gpio_levels[0] = MACIO_IN32(KEYLARGO_GPIO_LEVELS0);
+ save_gpio_levels[1] = MACIO_IN32(KEYLARGO_GPIO_LEVELS1);
+ for (i=0; i<KEYLARGO_GPIO_EXTINT_CNT; i++)
+ save_gpio_extint[i] = MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+i);
+ for (i=0; i<KEYLARGO_GPIO_CNT; i++)
+ save_gpio_normal[i] = MACIO_IN8(KEYLARGO_GPIO_0+i);
+
+ /* Save the FCRs */
+ if (macio->type == macio_keylargo)
+ save_mbcr = MACIO_IN32(KEYLARGO_MBCR);
+ save_fcr[0] = MACIO_IN32(KEYLARGO_FCR0);
+ save_fcr[1] = MACIO_IN32(KEYLARGO_FCR1);
+ save_fcr[2] = MACIO_IN32(KEYLARGO_FCR2);
+ save_fcr[3] = MACIO_IN32(KEYLARGO_FCR3);
+ save_fcr[4] = MACIO_IN32(KEYLARGO_FCR4);
+ if (macio->type == macio_pangea || macio->type == macio_intrepid)
+ save_fcr[5] = MACIO_IN32(KEYLARGO_FCR5);
+
+ /* Save state & config of DBDMA channels */
+ dbdma_save(macio, save_dbdma);
+
+ /*
+ * Turn off as much as we can
+ */
+ if (macio->type == macio_pangea)
+ pangea_shutdown(macio, 1);
+ else if (macio->type == macio_intrepid)
+ intrepid_shutdown(macio, 1);
+ else if (macio->type == macio_keylargo)
+ keylargo_shutdown(macio, 1);
+
+ /*
+ * Put the host bridge to sleep
+ */
+
+ save_unin_clock_ctl = UN_IN(UNI_N_CLOCK_CNTL);
+ /* Note: do not switch GMAC off, driver does it when necessary, WOL must keep it
+ * enabled !
+ */
+ UN_OUT(UNI_N_CLOCK_CNTL, save_unin_clock_ctl &
+ ~(/*UNI_N_CLOCK_CNTL_GMAC|*/UNI_N_CLOCK_CNTL_FW/*|UNI_N_CLOCK_CNTL_PCI*/));
+ udelay(100);
+ UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_SLEEPING);
+ UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_SLEEP);
+ mdelay(10);
+
+ /*
+ * FIXME: A bit of black magic with OpenPIC (don't ask me why)
+ */
+ if (pmac_mb.model_id == PMAC_TYPE_SAWTOOTH) {
+ MACIO_BIS(0x506e0, 0x00400000);
+ MACIO_BIS(0x506e0, 0x80000000);
+ }
+ return 0;
+}
+
+static int
+core99_wake_up(void)
+{
+ struct macio_chip *macio;
+ int i;
+
+ macio = &macio_chips[0];
+ if (macio->type != macio_keylargo && macio->type != macio_pangea &&
+ macio->type != macio_intrepid)
+ return -ENODEV;
+
+ /*
+ * Wakeup the host bridge
+ */
+ UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_NORMAL);
+ udelay(10);
+ UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_RUNNING);
+ udelay(10);
+
+ /*
+ * Restore KeyLargo
+ */
+
+ if (macio->type == macio_keylargo) {
+ MACIO_OUT32(KEYLARGO_MBCR, save_mbcr);
+ (void)MACIO_IN32(KEYLARGO_MBCR); udelay(10);
+ }
+ MACIO_OUT32(KEYLARGO_FCR0, save_fcr[0]);
+ (void)MACIO_IN32(KEYLARGO_FCR0); udelay(10);
+ MACIO_OUT32(KEYLARGO_FCR1, save_fcr[1]);
+ (void)MACIO_IN32(KEYLARGO_FCR1); udelay(10);
+ MACIO_OUT32(KEYLARGO_FCR2, save_fcr[2]);
+ (void)MACIO_IN32(KEYLARGO_FCR2); udelay(10);
+ MACIO_OUT32(KEYLARGO_FCR3, save_fcr[3]);
+ (void)MACIO_IN32(KEYLARGO_FCR3); udelay(10);
+ MACIO_OUT32(KEYLARGO_FCR4, save_fcr[4]);
+ (void)MACIO_IN32(KEYLARGO_FCR4); udelay(10);
+ if (macio->type == macio_pangea || macio->type == macio_intrepid) {
+ MACIO_OUT32(KEYLARGO_FCR5, save_fcr[5]);
+ (void)MACIO_IN32(KEYLARGO_FCR5); udelay(10);
+ }
+
+ dbdma_restore(macio, save_dbdma);
+
+ MACIO_OUT32(KEYLARGO_GPIO_LEVELS0, save_gpio_levels[0]);
+ MACIO_OUT32(KEYLARGO_GPIO_LEVELS1, save_gpio_levels[1]);
+ for (i=0; i<KEYLARGO_GPIO_EXTINT_CNT; i++)
+ MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+i, save_gpio_extint[i]);
+ for (i=0; i<KEYLARGO_GPIO_CNT; i++)
+ MACIO_OUT8(KEYLARGO_GPIO_0+i, save_gpio_normal[i]);
+
+ /* FIXME more black magic with OpenPIC ... */
+ if (pmac_mb.model_id == PMAC_TYPE_SAWTOOTH) {
+ MACIO_BIC(0x506e0, 0x00400000);
+ MACIO_BIC(0x506e0, 0x80000000);
+ }
+
+ UN_OUT(UNI_N_CLOCK_CNTL, save_unin_clock_ctl);
+ udelay(100);
+
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+static long
+core99_sleep_state(struct device_node *node, long param, long value)
+{
+ /* Param == 1 means to enter the "fake sleep" mode that is
+ * used for CPU speed switch
+ */
+ if (param == 1) {
+ if (value == 1) {
+ UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_SLEEPING);
+ UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_IDLE2);
+ } else {
+ UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_NORMAL);
+ udelay(10);
+ UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_RUNNING);
+ udelay(10);
+ }
+ return 0;
+ }
+ if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0)
+ return -EPERM;
+
+#ifdef CONFIG_PM
+ if (value == 1)
+ return core99_sleep();
+ else if (value == 0)
+ return core99_wake_up();
+
+#endif /* CONFIG_PM */
+ return 0;
+}
+
+#endif /* CONFIG_PPC64 */
+
+static long
+generic_dev_can_wake(struct device_node *node, long param, long value)
+{
+ /* Todo: eventually check we are really dealing with on-board
+ * video device ...
+ */
+
+ if (pmac_mb.board_flags & PMAC_MB_MAY_SLEEP)
+ pmac_mb.board_flags |= PMAC_MB_CAN_SLEEP;
+ return 0;
+}
+
+static long generic_get_mb_info(struct device_node *node, long param, long value)
+{
+ switch(param) {
+ case PMAC_MB_INFO_MODEL:
+ return pmac_mb.model_id;
+ case PMAC_MB_INFO_FLAGS:
+ return pmac_mb.board_flags;
+ case PMAC_MB_INFO_NAME:
+ /* hack hack hack... but should work */
+ *((const char **)value) = pmac_mb.model_name;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+
+/*
+ * Table definitions
+ */
+
+/* Used on any machine
+ */
+static struct feature_table_entry any_features[] = {
+ { PMAC_FTR_GET_MB_INFO, generic_get_mb_info },
+ { PMAC_FTR_DEVICE_CAN_WAKE, generic_dev_can_wake },
+ { 0, NULL }
+};
+
+#ifndef CONFIG_PPC64
+
+/* OHare based motherboards. Currently, we only use these on the
+ * 2400,3400 and 3500 series powerbooks. Some older desktops seem
+ * to have issues with turning on/off those asic cells
+ */
+static struct feature_table_entry ohare_features[] = {
+ { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable },
+ { PMAC_FTR_SWIM3_ENABLE, ohare_floppy_enable },
+ { PMAC_FTR_MESH_ENABLE, ohare_mesh_enable },
+ { PMAC_FTR_IDE_ENABLE, ohare_ide_enable},
+ { PMAC_FTR_IDE_RESET, ohare_ide_reset},
+ { PMAC_FTR_SLEEP_STATE, ohare_sleep_state },
+ { 0, NULL }
+};
+
+/* Heathrow desktop machines (Beige G3).
+ * Separated as some features couldn't be properly tested
+ * and the serial port control bits appear to confuse it.
+ */
+static struct feature_table_entry heathrow_desktop_features[] = {
+ { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable },
+ { PMAC_FTR_MESH_ENABLE, heathrow_mesh_enable },
+ { PMAC_FTR_IDE_ENABLE, heathrow_ide_enable },
+ { PMAC_FTR_IDE_RESET, heathrow_ide_reset },
+ { PMAC_FTR_BMAC_ENABLE, heathrow_bmac_enable },
+ { 0, NULL }
+};
+
+/* Heathrow based laptop, that is the Wallstreet and mainstreet
+ * powerbooks.
+ */
+static struct feature_table_entry heathrow_laptop_features[] = {
+ { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable },
+ { PMAC_FTR_MODEM_ENABLE, heathrow_modem_enable },
+ { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable },
+ { PMAC_FTR_MESH_ENABLE, heathrow_mesh_enable },
+ { PMAC_FTR_IDE_ENABLE, heathrow_ide_enable },
+ { PMAC_FTR_IDE_RESET, heathrow_ide_reset },
+ { PMAC_FTR_BMAC_ENABLE, heathrow_bmac_enable },
+ { PMAC_FTR_SOUND_CHIP_ENABLE, heathrow_sound_enable },
+ { PMAC_FTR_SLEEP_STATE, heathrow_sleep_state },
+ { 0, NULL }
+};
+
+/* Paddington based machines
+ * The lombard (101) powerbook, first iMac models, B&W G3 and Yikes G4.
+ */
+static struct feature_table_entry paddington_features[] = {
+ { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable },
+ { PMAC_FTR_MODEM_ENABLE, heathrow_modem_enable },
+ { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable },
+ { PMAC_FTR_MESH_ENABLE, heathrow_mesh_enable },
+ { PMAC_FTR_IDE_ENABLE, heathrow_ide_enable },
+ { PMAC_FTR_IDE_RESET, heathrow_ide_reset },
+ { PMAC_FTR_BMAC_ENABLE, heathrow_bmac_enable },
+ { PMAC_FTR_SOUND_CHIP_ENABLE, heathrow_sound_enable },
+ { PMAC_FTR_SLEEP_STATE, heathrow_sleep_state },
+ { 0, NULL }
+};
+
+/* Core99 & MacRISC 2 machines (all machines released since the
+ * iBook (included), that is all AGP machines, except pangea
+ * chipset. The pangea chipset is the "combo" UniNorth/KeyLargo
+ * used on iBook2 & iMac "flow power".
+ */
+static struct feature_table_entry core99_features[] = {
+ { PMAC_FTR_SCC_ENABLE, core99_scc_enable },
+ { PMAC_FTR_MODEM_ENABLE, core99_modem_enable },
+ { PMAC_FTR_IDE_ENABLE, core99_ide_enable },
+ { PMAC_FTR_IDE_RESET, core99_ide_reset },
+ { PMAC_FTR_GMAC_ENABLE, core99_gmac_enable },
+ { PMAC_FTR_GMAC_PHY_RESET, core99_gmac_phy_reset },
+ { PMAC_FTR_SOUND_CHIP_ENABLE, core99_sound_chip_enable },
+ { PMAC_FTR_AIRPORT_ENABLE, core99_airport_enable },
+ { PMAC_FTR_USB_ENABLE, core99_usb_enable },
+ { PMAC_FTR_1394_ENABLE, core99_firewire_enable },
+ { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power },
+#ifdef CONFIG_PM
+ { PMAC_FTR_SLEEP_STATE, core99_sleep_state },
+#endif
+#ifdef CONFIG_SMP
+ { PMAC_FTR_RESET_CPU, core99_reset_cpu },
+#endif /* CONFIG_SMP */
+ { PMAC_FTR_READ_GPIO, core99_read_gpio },
+ { PMAC_FTR_WRITE_GPIO, core99_write_gpio },
+ { 0, NULL }
+};
+
+/* RackMac
+ */
+static struct feature_table_entry rackmac_features[] = {
+ { PMAC_FTR_SCC_ENABLE, core99_scc_enable },
+ { PMAC_FTR_IDE_ENABLE, core99_ide_enable },
+ { PMAC_FTR_IDE_RESET, core99_ide_reset },
+ { PMAC_FTR_GMAC_ENABLE, core99_gmac_enable },
+ { PMAC_FTR_GMAC_PHY_RESET, core99_gmac_phy_reset },
+ { PMAC_FTR_USB_ENABLE, core99_usb_enable },
+ { PMAC_FTR_1394_ENABLE, core99_firewire_enable },
+ { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power },
+ { PMAC_FTR_SLEEP_STATE, core99_sleep_state },
+#ifdef CONFIG_SMP
+ { PMAC_FTR_RESET_CPU, core99_reset_cpu },
+#endif /* CONFIG_SMP */
+ { PMAC_FTR_READ_GPIO, core99_read_gpio },
+ { PMAC_FTR_WRITE_GPIO, core99_write_gpio },
+ { 0, NULL }
+};
+
+/* Pangea features
+ */
+static struct feature_table_entry pangea_features[] = {
+ { PMAC_FTR_SCC_ENABLE, core99_scc_enable },
+ { PMAC_FTR_MODEM_ENABLE, pangea_modem_enable },
+ { PMAC_FTR_IDE_ENABLE, core99_ide_enable },
+ { PMAC_FTR_IDE_RESET, core99_ide_reset },
+ { PMAC_FTR_GMAC_ENABLE, core99_gmac_enable },
+ { PMAC_FTR_GMAC_PHY_RESET, core99_gmac_phy_reset },
+ { PMAC_FTR_SOUND_CHIP_ENABLE, core99_sound_chip_enable },
+ { PMAC_FTR_AIRPORT_ENABLE, core99_airport_enable },
+ { PMAC_FTR_USB_ENABLE, core99_usb_enable },
+ { PMAC_FTR_1394_ENABLE, core99_firewire_enable },
+ { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power },
+ { PMAC_FTR_SLEEP_STATE, core99_sleep_state },
+ { PMAC_FTR_READ_GPIO, core99_read_gpio },
+ { PMAC_FTR_WRITE_GPIO, core99_write_gpio },
+ { 0, NULL }
+};
+
+/* Intrepid features
+ */
+static struct feature_table_entry intrepid_features[] = {
+ { PMAC_FTR_SCC_ENABLE, core99_scc_enable },
+ { PMAC_FTR_MODEM_ENABLE, pangea_modem_enable },
+ { PMAC_FTR_IDE_ENABLE, core99_ide_enable },
+ { PMAC_FTR_IDE_RESET, core99_ide_reset },
+ { PMAC_FTR_GMAC_ENABLE, core99_gmac_enable },
+ { PMAC_FTR_GMAC_PHY_RESET, core99_gmac_phy_reset },
+ { PMAC_FTR_SOUND_CHIP_ENABLE, core99_sound_chip_enable },
+ { PMAC_FTR_AIRPORT_ENABLE, core99_airport_enable },
+ { PMAC_FTR_USB_ENABLE, core99_usb_enable },
+ { PMAC_FTR_1394_ENABLE, core99_firewire_enable },
+ { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power },
+ { PMAC_FTR_SLEEP_STATE, core99_sleep_state },
+ { PMAC_FTR_READ_GPIO, core99_read_gpio },
+ { PMAC_FTR_WRITE_GPIO, core99_write_gpio },
+ { PMAC_FTR_AACK_DELAY_ENABLE, intrepid_aack_delay_enable },
+ { 0, NULL }
+};
+
+#else /* CONFIG_PPC64 */
+
+/* G5 features
+ */
+static struct feature_table_entry g5_features[] = {
+ { PMAC_FTR_GMAC_ENABLE, g5_gmac_enable },
+ { PMAC_FTR_1394_ENABLE, g5_fw_enable },
+ { PMAC_FTR_ENABLE_MPIC, g5_mpic_enable },
+ { PMAC_FTR_GMAC_PHY_RESET, g5_eth_phy_reset },
+ { PMAC_FTR_SOUND_CHIP_ENABLE, g5_i2s_enable },
+#ifdef CONFIG_SMP
+ { PMAC_FTR_RESET_CPU, g5_reset_cpu },
+#endif /* CONFIG_SMP */
+ { PMAC_FTR_READ_GPIO, core99_read_gpio },
+ { PMAC_FTR_WRITE_GPIO, core99_write_gpio },
+ { 0, NULL }
+};
+
+#endif /* CONFIG_PPC64 */
+
+static struct pmac_mb_def pmac_mb_defs[] = {
+#ifndef CONFIG_PPC64
+ /*
+ * Desktops
+ */
+
+ { "AAPL,8500", "PowerMac 8500/8600",
+ PMAC_TYPE_PSURGE, NULL,
+ 0
+ },
+ { "AAPL,9500", "PowerMac 9500/9600",
+ PMAC_TYPE_PSURGE, NULL,
+ 0
+ },
+ { "AAPL,7200", "PowerMac 7200",
+ PMAC_TYPE_PSURGE, NULL,
+ 0
+ },
+ { "AAPL,7300", "PowerMac 7200/7300",
+ PMAC_TYPE_PSURGE, NULL,
+ 0
+ },
+ { "AAPL,7500", "PowerMac 7500",
+ PMAC_TYPE_PSURGE, NULL,
+ 0
+ },
+ { "AAPL,ShinerESB", "Apple Network Server",
+ PMAC_TYPE_ANS, NULL,
+ 0
+ },
+ { "AAPL,e407", "Alchemy",
+ PMAC_TYPE_ALCHEMY, NULL,
+ 0
+ },
+ { "AAPL,e411", "Gazelle",
+ PMAC_TYPE_GAZELLE, NULL,
+ 0
+ },
+ { "AAPL,Gossamer", "PowerMac G3 (Gossamer)",
+ PMAC_TYPE_GOSSAMER, heathrow_desktop_features,
+ 0
+ },
+ { "AAPL,PowerMac G3", "PowerMac G3 (Silk)",
+ PMAC_TYPE_SILK, heathrow_desktop_features,
+ 0
+ },
+ { "PowerMac1,1", "Blue&White G3",
+ PMAC_TYPE_YOSEMITE, paddington_features,
+ 0
+ },
+ { "PowerMac1,2", "PowerMac G4 PCI Graphics",
+ PMAC_TYPE_YIKES, paddington_features,
+ 0
+ },
+ { "PowerMac2,1", "iMac FireWire",
+ PMAC_TYPE_FW_IMAC, core99_features,
+ PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99
+ },
+ { "PowerMac2,2", "iMac FireWire",
+ PMAC_TYPE_FW_IMAC, core99_features,
+ PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99
+ },
+ { "PowerMac3,1", "PowerMac G4 AGP Graphics",
+ PMAC_TYPE_SAWTOOTH, core99_features,
+ PMAC_MB_OLD_CORE99
+ },
+ { "PowerMac3,2", "PowerMac G4 AGP Graphics",
+ PMAC_TYPE_SAWTOOTH, core99_features,
+ PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99
+ },
+ { "PowerMac3,3", "PowerMac G4 AGP Graphics",
+ PMAC_TYPE_SAWTOOTH, core99_features,
+ PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99
+ },
+ { "PowerMac3,4", "PowerMac G4 Silver",
+ PMAC_TYPE_QUICKSILVER, core99_features,
+ PMAC_MB_MAY_SLEEP
+ },
+ { "PowerMac3,5", "PowerMac G4 Silver",
+ PMAC_TYPE_QUICKSILVER, core99_features,
+ PMAC_MB_MAY_SLEEP
+ },
+ { "PowerMac3,6", "PowerMac G4 Windtunnel",
+ PMAC_TYPE_WINDTUNNEL, core99_features,
+ PMAC_MB_MAY_SLEEP,
+ },
+ { "PowerMac4,1", "iMac \"Flower Power\"",
+ PMAC_TYPE_PANGEA_IMAC, pangea_features,
+ PMAC_MB_MAY_SLEEP
+ },
+ { "PowerMac4,2", "Flat panel iMac",
+ PMAC_TYPE_FLAT_PANEL_IMAC, pangea_features,
+ PMAC_MB_CAN_SLEEP
+ },
+ { "PowerMac4,4", "eMac",
+ PMAC_TYPE_EMAC, core99_features,
+ PMAC_MB_MAY_SLEEP
+ },
+ { "PowerMac5,1", "PowerMac G4 Cube",
+ PMAC_TYPE_CUBE, core99_features,
+ PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99
+ },
+ { "PowerMac6,1", "Flat panel iMac",
+ PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
+ PMAC_MB_MAY_SLEEP,
+ },
+ { "PowerMac6,3", "Flat panel iMac",
+ PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
+ PMAC_MB_MAY_SLEEP,
+ },
+ { "PowerMac6,4", "eMac",
+ PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
+ PMAC_MB_MAY_SLEEP,
+ },
+ { "PowerMac10,1", "Mac mini",
+ PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
+ PMAC_MB_MAY_SLEEP,
+ },
+ { "PowerMac10,2", "Mac mini (Late 2005)",
+ PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
+ PMAC_MB_MAY_SLEEP,
+ },
+ { "iMac,1", "iMac (first generation)",
+ PMAC_TYPE_ORIG_IMAC, paddington_features,
+ 0
+ },
+
+ /*
+ * Xserve's
+ */
+
+ { "RackMac1,1", "XServe",
+ PMAC_TYPE_RACKMAC, rackmac_features,
+ 0,
+ },
+ { "RackMac1,2", "XServe rev. 2",
+ PMAC_TYPE_RACKMAC, rackmac_features,
+ 0,
+ },
+
+ /*
+ * Laptops
+ */
+
+ { "AAPL,3400/2400", "PowerBook 3400",
+ PMAC_TYPE_HOOPER, ohare_features,
+ PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE
+ },
+ { "AAPL,3500", "PowerBook 3500",
+ PMAC_TYPE_KANGA, ohare_features,
+ PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE
+ },
+ { "AAPL,PowerBook1998", "PowerBook Wallstreet",
+ PMAC_TYPE_WALLSTREET, heathrow_laptop_features,
+ PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE
+ },
+ { "PowerBook1,1", "PowerBook 101 (Lombard)",
+ PMAC_TYPE_101_PBOOK, paddington_features,
+ PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE
+ },
+ { "PowerBook2,1", "iBook (first generation)",
+ PMAC_TYPE_ORIG_IBOOK, core99_features,
+ PMAC_MB_CAN_SLEEP | PMAC_MB_OLD_CORE99 | PMAC_MB_MOBILE
+ },
+ { "PowerBook2,2", "iBook FireWire",
+ PMAC_TYPE_FW_IBOOK, core99_features,
+ PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER |
+ PMAC_MB_OLD_CORE99 | PMAC_MB_MOBILE
+ },
+ { "PowerBook3,1", "PowerBook Pismo",
+ PMAC_TYPE_PISMO, core99_features,
+ PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER |
+ PMAC_MB_OLD_CORE99 | PMAC_MB_MOBILE
+ },
+ { "PowerBook3,2", "PowerBook Titanium",
+ PMAC_TYPE_TITANIUM, core99_features,
+ PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
+ },
+ { "PowerBook3,3", "PowerBook Titanium II",
+ PMAC_TYPE_TITANIUM2, core99_features,
+ PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
+ },
+ { "PowerBook3,4", "PowerBook Titanium III",
+ PMAC_TYPE_TITANIUM3, core99_features,
+ PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
+ },
+ { "PowerBook3,5", "PowerBook Titanium IV",
+ PMAC_TYPE_TITANIUM4, core99_features,
+ PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
+ },
+ { "PowerBook4,1", "iBook 2",
+ PMAC_TYPE_IBOOK2, pangea_features,
+ PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
+ },
+ { "PowerBook4,2", "iBook 2",
+ PMAC_TYPE_IBOOK2, pangea_features,
+ PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
+ },
+ { "PowerBook4,3", "iBook 2 rev. 2",
+ PMAC_TYPE_IBOOK2, pangea_features,
+ PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
+ },
+ { "PowerBook5,1", "PowerBook G4 17\"",
+ PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
+ PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+ },
+ { "PowerBook5,2", "PowerBook G4 15\"",
+ PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
+ PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+ },
+ { "PowerBook5,3", "PowerBook G4 17\"",
+ PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
+ PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+ },
+ { "PowerBook5,4", "PowerBook G4 15\"",
+ PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
+ PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+ },
+ { "PowerBook5,5", "PowerBook G4 17\"",
+ PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
+ PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+ },
+ { "PowerBook5,6", "PowerBook G4 15\"",
+ PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
+ PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+ },
+ { "PowerBook5,7", "PowerBook G4 17\"",
+ PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
+ PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+ },
+ { "PowerBook5,8", "PowerBook G4 15\"",
+ PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
+ PMAC_MB_MAY_SLEEP | PMAC_MB_MOBILE,
+ },
+ { "PowerBook5,9", "PowerBook G4 17\"",
+ PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
+ PMAC_MB_MAY_SLEEP | PMAC_MB_MOBILE,
+ },
+ { "PowerBook6,1", "PowerBook G4 12\"",
+ PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
+ PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+ },
+ { "PowerBook6,2", "PowerBook G4",
+ PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
+ PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+ },
+ { "PowerBook6,3", "iBook G4",
+ PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
+ PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+ },
+ { "PowerBook6,4", "PowerBook G4 12\"",
+ PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
+ PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+ },
+ { "PowerBook6,5", "iBook G4",
+ PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
+ PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+ },
+ { "PowerBook6,7", "iBook G4",
+ PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
+ PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+ },
+ { "PowerBook6,8", "PowerBook G4 12\"",
+ PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
+ PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+ },
+#else /* CONFIG_PPC64 */
+ { "PowerMac7,2", "PowerMac G5",
+ PMAC_TYPE_POWERMAC_G5, g5_features,
+ 0,
+ },
+#ifdef CONFIG_PPC64
+ { "PowerMac7,3", "PowerMac G5",
+ PMAC_TYPE_POWERMAC_G5, g5_features,
+ 0,
+ },
+ { "PowerMac8,1", "iMac G5",
+ PMAC_TYPE_IMAC_G5, g5_features,
+ 0,
+ },
+ { "PowerMac9,1", "PowerMac G5",
+ PMAC_TYPE_POWERMAC_G5_U3L, g5_features,
+ 0,
+ },
+ { "PowerMac11,2", "PowerMac G5 Dual Core",
+ PMAC_TYPE_POWERMAC_G5_U3L, g5_features,
+ 0,
+ },
+ { "PowerMac12,1", "iMac G5 (iSight)",
+ PMAC_TYPE_POWERMAC_G5_U3L, g5_features,
+ 0,
+ },
+ { "RackMac3,1", "XServe G5",
+ PMAC_TYPE_XSERVE_G5, g5_features,
+ 0,
+ },
+#endif /* CONFIG_PPC64 */
+#endif /* CONFIG_PPC64 */
+};
+
+/*
+ * The toplevel feature_call callback
+ */
+long pmac_do_feature_call(unsigned int selector, ...)
+{
+ struct device_node *node;
+ long param, value;
+ int i;
+ feature_call func = NULL;
+ va_list args;
+
+ if (pmac_mb.features)
+ for (i=0; pmac_mb.features[i].function; i++)
+ if (pmac_mb.features[i].selector == selector) {
+ func = pmac_mb.features[i].function;
+ break;
+ }
+ if (!func)
+ for (i=0; any_features[i].function; i++)
+ if (any_features[i].selector == selector) {
+ func = any_features[i].function;
+ break;
+ }
+ if (!func)
+ return -ENODEV;
+
+ va_start(args, selector);
+ node = (struct device_node*)va_arg(args, void*);
+ param = va_arg(args, long);
+ value = va_arg(args, long);
+ va_end(args);
+
+ return func(node, param, value);
+}
+
+static int __init probe_motherboard(void)
+{
+ int i;
+ struct macio_chip *macio = &macio_chips[0];
+ const char *model = NULL;
+ struct device_node *dt;
+ int ret = 0;
+
+ /* Lookup known motherboard type in device-tree. First try an
+ * exact match on the "model" property, then try a "compatible"
+ * match is none is found.
+ */
+ dt = of_find_node_by_name(NULL, "device-tree");
+ if (dt != NULL)
+ model = of_get_property(dt, "model", NULL);
+ for(i=0; model && i<ARRAY_SIZE(pmac_mb_defs); i++) {
+ if (strcmp(model, pmac_mb_defs[i].model_string) == 0) {
+ pmac_mb = pmac_mb_defs[i];
+ goto found;
+ }
+ }
+ for(i=0; i<ARRAY_SIZE(pmac_mb_defs); i++) {
+ if (of_machine_is_compatible(pmac_mb_defs[i].model_string)) {
+ pmac_mb = pmac_mb_defs[i];
+ goto found;
+ }
+ }
+
+ /* Fallback to selection depending on mac-io chip type */
+ switch(macio->type) {
+#ifndef CONFIG_PPC64
+ case macio_grand_central:
+ pmac_mb.model_id = PMAC_TYPE_PSURGE;
+ pmac_mb.model_name = "Unknown PowerSurge";
+ break;
+ case macio_ohare:
+ pmac_mb.model_id = PMAC_TYPE_UNKNOWN_OHARE;
+ pmac_mb.model_name = "Unknown OHare-based";
+ break;
+ case macio_heathrow:
+ pmac_mb.model_id = PMAC_TYPE_UNKNOWN_HEATHROW;
+ pmac_mb.model_name = "Unknown Heathrow-based";
+ pmac_mb.features = heathrow_desktop_features;
+ break;
+ case macio_paddington:
+ pmac_mb.model_id = PMAC_TYPE_UNKNOWN_PADDINGTON;
+ pmac_mb.model_name = "Unknown Paddington-based";
+ pmac_mb.features = paddington_features;
+ break;
+ case macio_keylargo:
+ pmac_mb.model_id = PMAC_TYPE_UNKNOWN_CORE99;
+ pmac_mb.model_name = "Unknown Keylargo-based";
+ pmac_mb.features = core99_features;
+ break;
+ case macio_pangea:
+ pmac_mb.model_id = PMAC_TYPE_UNKNOWN_PANGEA;
+ pmac_mb.model_name = "Unknown Pangea-based";
+ pmac_mb.features = pangea_features;
+ break;
+ case macio_intrepid:
+ pmac_mb.model_id = PMAC_TYPE_UNKNOWN_INTREPID;
+ pmac_mb.model_name = "Unknown Intrepid-based";
+ pmac_mb.features = intrepid_features;
+ break;
+#else /* CONFIG_PPC64 */
+ case macio_keylargo2:
+ pmac_mb.model_id = PMAC_TYPE_UNKNOWN_K2;
+ pmac_mb.model_name = "Unknown K2-based";
+ pmac_mb.features = g5_features;
+ break;
+ case macio_shasta:
+ pmac_mb.model_id = PMAC_TYPE_UNKNOWN_SHASTA;
+ pmac_mb.model_name = "Unknown Shasta-based";
+ pmac_mb.features = g5_features;
+ break;
+#endif /* CONFIG_PPC64 */
+ default:
+ ret = -ENODEV;
+ goto done;
+ }
+found:
+#ifndef CONFIG_PPC64
+ /* Fixup Hooper vs. Comet */
+ if (pmac_mb.model_id == PMAC_TYPE_HOOPER) {
+ u32 __iomem * mach_id_ptr = ioremap(0xf3000034, 4);
+ if (!mach_id_ptr) {
+ ret = -ENODEV;
+ goto done;
+ }
+ /* Here, I used to disable the media-bay on comet. It
+ * appears this is wrong, the floppy connector is actually
+ * a kind of media-bay and works with the current driver.
+ */
+ if (__raw_readl(mach_id_ptr) & 0x20000000UL)
+ pmac_mb.model_id = PMAC_TYPE_COMET;
+ iounmap(mach_id_ptr);
+ }
+
+ /* Set default value of powersave_nap on machines that support it.
+ * It appears that uninorth rev 3 has a problem with it, we don't
+ * enable it on those. In theory, the flush-on-lock property is
+ * supposed to be set when not supported, but I'm not very confident
+ * that all Apple OF revs did it properly, I do it the paranoid way.
+ */
+ while (uninorth_base && uninorth_rev > 3) {
+ struct device_node *cpus = of_find_node_by_path("/cpus");
+ struct device_node *np;
+
+ if (!cpus || !cpus->child) {
+ printk(KERN_WARNING "Can't find CPU(s) in device tree !\n");
+ of_node_put(cpus);
+ break;
+ }
+ np = cpus->child;
+ /* Nap mode not supported on SMP */
+ if (np->sibling) {
+ of_node_put(cpus);
+ break;
+ }
+ /* Nap mode not supported if flush-on-lock property is present */
+ if (of_get_property(np, "flush-on-lock", NULL)) {
+ of_node_put(cpus);
+ break;
+ }
+ of_node_put(cpus);
+ powersave_nap = 1;
+ printk(KERN_DEBUG "Processor NAP mode on idle enabled.\n");
+ break;
+ }
+
+ /* On CPUs that support it (750FX), lowspeed by default during
+ * NAP mode
+ */
+ powersave_lowspeed = 1;
+
+#else /* CONFIG_PPC64 */
+ powersave_nap = 1;
+#endif /* CONFIG_PPC64 */
+
+ /* Check for "mobile" machine */
+ if (model && (strncmp(model, "PowerBook", 9) == 0
+ || strncmp(model, "iBook", 5) == 0))
+ pmac_mb.board_flags |= PMAC_MB_MOBILE;
+
+
+ printk(KERN_INFO "PowerMac motherboard: %s\n", pmac_mb.model_name);
+done:
+ of_node_put(dt);
+ return ret;
+}
+
+/* Initialize the Core99 UniNorth host bridge and memory controller
+ */
+static void __init probe_uninorth(void)
+{
+ const u32 *addrp;
+ phys_addr_t address;
+ unsigned long actrl;
+
+ /* Locate core99 Uni-N */
+ uninorth_node = of_find_node_by_name(NULL, "uni-n");
+ uninorth_maj = 1;
+
+ /* Locate G5 u3 */
+ if (uninorth_node == NULL) {
+ uninorth_node = of_find_node_by_name(NULL, "u3");
+ uninorth_maj = 3;
+ }
+ /* Locate G5 u4 */
+ if (uninorth_node == NULL) {
+ uninorth_node = of_find_node_by_name(NULL, "u4");
+ uninorth_maj = 4;
+ }
+ if (uninorth_node == NULL) {
+ uninorth_maj = 0;
+ return;
+ }
+
+ addrp = of_get_property(uninorth_node, "reg", NULL);
+ if (addrp == NULL)
+ return;
+ address = of_translate_address(uninorth_node, addrp);
+ if (address == 0)
+ return;
+ uninorth_base = ioremap(address, 0x40000);
+ if (uninorth_base == NULL)
+ return;
+ uninorth_rev = in_be32(UN_REG(UNI_N_VERSION));
+ if (uninorth_maj == 3 || uninorth_maj == 4) {
+ u3_ht_base = ioremap(address + U3_HT_CONFIG_BASE, 0x1000);
+ if (u3_ht_base == NULL) {
+ iounmap(uninorth_base);
+ return;
+ }
+ }
+
+ printk(KERN_INFO "Found %s memory controller & host bridge"
+ " @ 0x%08x revision: 0x%02x\n", uninorth_maj == 3 ? "U3" :
+ uninorth_maj == 4 ? "U4" : "UniNorth",
+ (unsigned int)address, uninorth_rev);
+ printk(KERN_INFO "Mapped at 0x%08lx\n", (unsigned long)uninorth_base);
+
+ /* Set the arbitrer QAck delay according to what Apple does
+ */
+ if (uninorth_rev < 0x11) {
+ actrl = UN_IN(UNI_N_ARB_CTRL) & ~UNI_N_ARB_CTRL_QACK_DELAY_MASK;
+ actrl |= ((uninorth_rev < 3) ? UNI_N_ARB_CTRL_QACK_DELAY105 :
+ UNI_N_ARB_CTRL_QACK_DELAY) <<
+ UNI_N_ARB_CTRL_QACK_DELAY_SHIFT;
+ UN_OUT(UNI_N_ARB_CTRL, actrl);
+ }
+
+ /* Some more magic as done by them in recent MacOS X on UniNorth
+ * revs 1.5 to 2.O and Pangea. Seem to toggle the UniN Maxbus/PCI
+ * memory timeout
+ */
+ if ((uninorth_rev >= 0x11 && uninorth_rev <= 0x24) ||
+ uninorth_rev == 0xc0)
+ UN_OUT(0x2160, UN_IN(0x2160) & 0x00ffffff);
+}
+
+static void __init probe_one_macio(const char *name, const char *compat, int type)
+{
+ struct device_node* node;
+ int i;
+ volatile u32 __iomem *base;
+ const u32 *addrp, *revp;
+ phys_addr_t addr;
+ u64 size;
+
+ for (node = NULL; (node = of_find_node_by_name(node, name)) != NULL;) {
+ if (!compat)
+ break;
+ if (of_device_is_compatible(node, compat))
+ break;
+ }
+ if (!node)
+ return;
+ for(i=0; i<MAX_MACIO_CHIPS; i++) {
+ if (!macio_chips[i].of_node)
+ break;
+ if (macio_chips[i].of_node == node)
+ return;
+ }
+
+ if (i >= MAX_MACIO_CHIPS) {
+ printk(KERN_ERR "pmac_feature: Please increase MAX_MACIO_CHIPS !\n");
+ printk(KERN_ERR "pmac_feature: %s skipped\n", node->full_name);
+ return;
+ }
+ addrp = of_get_pci_address(node, 0, &size, NULL);
+ if (addrp == NULL) {
+ printk(KERN_ERR "pmac_feature: %s: can't find base !\n",
+ node->full_name);
+ return;
+ }
+ addr = of_translate_address(node, addrp);
+ if (addr == 0) {
+ printk(KERN_ERR "pmac_feature: %s, can't translate base !\n",
+ node->full_name);
+ return;
+ }
+ base = ioremap(addr, (unsigned long)size);
+ if (!base) {
+ printk(KERN_ERR "pmac_feature: %s, can't map mac-io chip !\n",
+ node->full_name);
+ return;
+ }
+ if (type == macio_keylargo || type == macio_keylargo2) {
+ const u32 *did = of_get_property(node, "device-id", NULL);
+ if (*did == 0x00000025)
+ type = macio_pangea;
+ if (*did == 0x0000003e)
+ type = macio_intrepid;
+ if (*did == 0x0000004f)
+ type = macio_shasta;
+ }
+ macio_chips[i].of_node = node;
+ macio_chips[i].type = type;
+ macio_chips[i].base = base;
+ macio_chips[i].flags = MACIO_FLAG_SCCA_ON | MACIO_FLAG_SCCB_ON;
+ macio_chips[i].name = macio_names[type];
+ revp = of_get_property(node, "revision-id", NULL);
+ if (revp)
+ macio_chips[i].rev = *revp;
+ printk(KERN_INFO "Found a %s mac-io controller, rev: %d, mapped at 0x%p\n",
+ macio_names[type], macio_chips[i].rev, macio_chips[i].base);
+}
+
+static int __init
+probe_macios(void)
+{
+ /* Warning, ordering is important */
+ probe_one_macio("gc", NULL, macio_grand_central);
+ probe_one_macio("ohare", NULL, macio_ohare);
+ probe_one_macio("pci106b,7", NULL, macio_ohareII);
+ probe_one_macio("mac-io", "keylargo", macio_keylargo);
+ probe_one_macio("mac-io", "paddington", macio_paddington);
+ probe_one_macio("mac-io", "gatwick", macio_gatwick);
+ probe_one_macio("mac-io", "heathrow", macio_heathrow);
+ probe_one_macio("mac-io", "K2-Keylargo", macio_keylargo2);
+
+ /* Make sure the "main" macio chip appear first */
+ if (macio_chips[0].type == macio_gatwick
+ && macio_chips[1].type == macio_heathrow) {
+ struct macio_chip temp = macio_chips[0];
+ macio_chips[0] = macio_chips[1];
+ macio_chips[1] = temp;
+ }
+ if (macio_chips[0].type == macio_ohareII
+ && macio_chips[1].type == macio_ohare) {
+ struct macio_chip temp = macio_chips[0];
+ macio_chips[0] = macio_chips[1];
+ macio_chips[1] = temp;
+ }
+ macio_chips[0].lbus.index = 0;
+ macio_chips[1].lbus.index = 1;
+
+ return (macio_chips[0].of_node == NULL) ? -ENODEV : 0;
+}
+
+static void __init
+initial_serial_shutdown(struct device_node *np)
+{
+ int len;
+ const struct slot_names_prop {
+ int count;
+ char name[1];
+ } *slots;
+ const char *conn;
+ int port_type = PMAC_SCC_ASYNC;
+ int modem = 0;
+
+ slots = of_get_property(np, "slot-names", &len);
+ conn = of_get_property(np, "AAPL,connector", &len);
+ if (conn && (strcmp(conn, "infrared") == 0))
+ port_type = PMAC_SCC_IRDA;
+ else if (of_device_is_compatible(np, "cobalt"))
+ modem = 1;
+ else if (slots && slots->count > 0) {
+ if (strcmp(slots->name, "IrDA") == 0)
+ port_type = PMAC_SCC_IRDA;
+ else if (strcmp(slots->name, "Modem") == 0)
+ modem = 1;
+ }
+ if (modem)
+ pmac_call_feature(PMAC_FTR_MODEM_ENABLE, np, 0, 0);
+ pmac_call_feature(PMAC_FTR_SCC_ENABLE, np, port_type, 0);
+}
+
+static void __init
+set_initial_features(void)
+{
+ struct device_node *np;
+
+ /* That hack appears to be necessary for some StarMax motherboards
+ * but I'm not too sure it was audited for side-effects on other
+ * ohare based machines...
+ * Since I still have difficulties figuring the right way to
+ * differenciate them all and since that hack was there for a long
+ * time, I'll keep it around
+ */
+ if (macio_chips[0].type == macio_ohare) {
+ struct macio_chip *macio = &macio_chips[0];
+ np = of_find_node_by_name(NULL, "via-pmu");
+ if (np)
+ MACIO_BIS(OHARE_FCR, OH_IOBUS_ENABLE);
+ else
+ MACIO_OUT32(OHARE_FCR, STARMAX_FEATURES);
+ of_node_put(np);
+ } else if (macio_chips[1].type == macio_ohare) {
+ struct macio_chip *macio = &macio_chips[1];
+ MACIO_BIS(OHARE_FCR, OH_IOBUS_ENABLE);
+ }
+
+#ifdef CONFIG_PPC64
+ if (macio_chips[0].type == macio_keylargo2 ||
+ macio_chips[0].type == macio_shasta) {
+#ifndef CONFIG_SMP
+ /* On SMP machines running UP, we have the second CPU eating
+ * bus cycles. We need to take it off the bus. This is done
+ * from pmac_smp for SMP kernels running on one CPU
+ */
+ np = of_find_node_by_type(NULL, "cpu");
+ if (np != NULL)
+ np = of_find_node_by_type(np, "cpu");
+ if (np != NULL) {
+ g5_phy_disable_cpu1();
+ of_node_put(np);
+ }
+#endif /* CONFIG_SMP */
+ /* Enable GMAC for now for PCI probing. It will be disabled
+ * later on after PCI probe
+ */
+ for_each_node_by_name(np, "ethernet")
+ if (of_device_is_compatible(np, "K2-GMAC"))
+ g5_gmac_enable(np, 0, 1);
+
+ /* Enable FW before PCI probe. Will be disabled later on
+ * Note: We should have a batter way to check that we are
+ * dealing with uninorth internal cell and not a PCI cell
+ * on the external PCI. The code below works though.
+ */
+ for_each_node_by_name(np, "firewire") {
+ if (of_device_is_compatible(np, "pci106b,5811")) {
+ macio_chips[0].flags |= MACIO_FLAG_FW_SUPPORTED;
+ g5_fw_enable(np, 0, 1);
+ }
+ }
+ }
+#else /* CONFIG_PPC64 */
+
+ if (macio_chips[0].type == macio_keylargo ||
+ macio_chips[0].type == macio_pangea ||
+ macio_chips[0].type == macio_intrepid) {
+ /* Enable GMAC for now for PCI probing. It will be disabled
+ * later on after PCI probe
+ */
+ for_each_node_by_name(np, "ethernet") {
+ if (np->parent
+ && of_device_is_compatible(np->parent, "uni-north")
+ && of_device_is_compatible(np, "gmac"))
+ core99_gmac_enable(np, 0, 1);
+ }
+
+ /* Enable FW before PCI probe. Will be disabled later on
+ * Note: We should have a batter way to check that we are
+ * dealing with uninorth internal cell and not a PCI cell
+ * on the external PCI. The code below works though.
+ */
+ for_each_node_by_name(np, "firewire") {
+ if (np->parent
+ && of_device_is_compatible(np->parent, "uni-north")
+ && (of_device_is_compatible(np, "pci106b,18") ||
+ of_device_is_compatible(np, "pci106b,30") ||
+ of_device_is_compatible(np, "pci11c1,5811"))) {
+ macio_chips[0].flags |= MACIO_FLAG_FW_SUPPORTED;
+ core99_firewire_enable(np, 0, 1);
+ }
+ }
+
+ /* Enable ATA-100 before PCI probe. */
+ np = of_find_node_by_name(NULL, "ata-6");
+ for_each_node_by_name(np, "ata-6") {
+ if (np->parent
+ && of_device_is_compatible(np->parent, "uni-north")
+ && of_device_is_compatible(np, "kauai-ata")) {
+ core99_ata100_enable(np, 1);
+ }
+ }
+
+ /* Switch airport off */
+ for_each_node_by_name(np, "radio") {
+ if (np->parent == macio_chips[0].of_node) {
+ macio_chips[0].flags |= MACIO_FLAG_AIRPORT_ON;
+ core99_airport_enable(np, 0, 0);
+ }
+ }
+ }
+
+ /* On all machines that support sound PM, switch sound off */
+ if (macio_chips[0].of_node)
+ pmac_do_feature_call(PMAC_FTR_SOUND_CHIP_ENABLE,
+ macio_chips[0].of_node, 0, 0);
+
+ /* While on some desktop G3s, we turn it back on */
+ if (macio_chips[0].of_node && macio_chips[0].type == macio_heathrow
+ && (pmac_mb.model_id == PMAC_TYPE_GOSSAMER ||
+ pmac_mb.model_id == PMAC_TYPE_SILK)) {
+ struct macio_chip *macio = &macio_chips[0];
+ MACIO_BIS(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE);
+ MACIO_BIC(HEATHROW_FCR, HRW_SOUND_POWER_N);
+ }
+
+#endif /* CONFIG_PPC64 */
+
+ /* On all machines, switch modem & serial ports off */
+ for_each_node_by_name(np, "ch-a")
+ initial_serial_shutdown(np);
+ of_node_put(np);
+ for_each_node_by_name(np, "ch-b")
+ initial_serial_shutdown(np);
+ of_node_put(np);
+}
+
+void __init
+pmac_feature_init(void)
+{
+ /* Detect the UniNorth memory controller */
+ probe_uninorth();
+
+ /* Probe mac-io controllers */
+ if (probe_macios()) {
+ printk(KERN_WARNING "No mac-io chip found\n");
+ return;
+ }
+
+ /* Probe machine type */
+ if (probe_motherboard())
+ printk(KERN_WARNING "Unknown PowerMac !\n");
+
+ /* Set some initial features (turn off some chips that will
+ * be later turned on)
+ */
+ set_initial_features();
+}
+
+#if 0
+static void dump_HT_speeds(char *name, u32 cfg, u32 frq)
+{
+ int freqs[16] = { 200,300,400,500,600,800,1000,0,0,0,0,0,0,0,0,0 };
+ int bits[8] = { 8,16,0,32,2,4,0,0 };
+ int freq = (frq >> 8) & 0xf;
+
+ if (freqs[freq] == 0)
+ printk("%s: Unknown HT link frequency %x\n", name, freq);
+ else
+ printk("%s: %d MHz on main link, (%d in / %d out) bits width\n",
+ name, freqs[freq],
+ bits[(cfg >> 28) & 0x7], bits[(cfg >> 24) & 0x7]);
+}
+
+void __init pmac_check_ht_link(void)
+{
+ u32 ufreq, freq, ucfg, cfg;
+ struct device_node *pcix_node;
+ u8 px_bus, px_devfn;
+ struct pci_controller *px_hose;
+
+ (void)in_be32(u3_ht_base + U3_HT_LINK_COMMAND);
+ ucfg = cfg = in_be32(u3_ht_base + U3_HT_LINK_CONFIG);
+ ufreq = freq = in_be32(u3_ht_base + U3_HT_LINK_FREQ);
+ dump_HT_speeds("U3 HyperTransport", cfg, freq);
+
+ pcix_node = of_find_compatible_node(NULL, "pci", "pci-x");
+ if (pcix_node == NULL) {
+ printk("No PCI-X bridge found\n");
+ return;
+ }
+ if (pci_device_from_OF_node(pcix_node, &px_bus, &px_devfn) != 0) {
+ printk("PCI-X bridge found but not matched to pci\n");
+ return;
+ }
+ px_hose = pci_find_hose_for_OF_device(pcix_node);
+ if (px_hose == NULL) {
+ printk("PCI-X bridge found but not matched to host\n");
+ return;
+ }
+ early_read_config_dword(px_hose, px_bus, px_devfn, 0xc4, &cfg);
+ early_read_config_dword(px_hose, px_bus, px_devfn, 0xcc, &freq);
+ dump_HT_speeds("PCI-X HT Uplink", cfg, freq);
+ early_read_config_dword(px_hose, px_bus, px_devfn, 0xc8, &cfg);
+ early_read_config_dword(px_hose, px_bus, px_devfn, 0xd0, &freq);
+ dump_HT_speeds("PCI-X HT Downlink", cfg, freq);
+}
+#endif /* 0 */
+
+/*
+ * Early video resume hook
+ */
+
+static void (*pmac_early_vresume_proc)(void *data);
+static void *pmac_early_vresume_data;
+
+void pmac_set_early_video_resume(void (*proc)(void *data), void *data)
+{
+ if (!machine_is(powermac))
+ return;
+ preempt_disable();
+ pmac_early_vresume_proc = proc;
+ pmac_early_vresume_data = data;
+ preempt_enable();
+}
+EXPORT_SYMBOL(pmac_set_early_video_resume);
+
+void pmac_call_early_video_resume(void)
+{
+ if (pmac_early_vresume_proc)
+ pmac_early_vresume_proc(pmac_early_vresume_data);
+}
+
+/*
+ * AGP related suspend/resume code
+ */
+
+static struct pci_dev *pmac_agp_bridge;
+static int (*pmac_agp_suspend)(struct pci_dev *bridge);
+static int (*pmac_agp_resume)(struct pci_dev *bridge);
+
+void pmac_register_agp_pm(struct pci_dev *bridge,
+ int (*suspend)(struct pci_dev *bridge),
+ int (*resume)(struct pci_dev *bridge))
+{
+ if (suspend || resume) {
+ pmac_agp_bridge = bridge;
+ pmac_agp_suspend = suspend;
+ pmac_agp_resume = resume;
+ return;
+ }
+ if (bridge != pmac_agp_bridge)
+ return;
+ pmac_agp_suspend = pmac_agp_resume = NULL;
+ return;
+}
+EXPORT_SYMBOL(pmac_register_agp_pm);
+
+void pmac_suspend_agp_for_card(struct pci_dev *dev)
+{
+ if (pmac_agp_bridge == NULL || pmac_agp_suspend == NULL)
+ return;
+ if (pmac_agp_bridge->bus != dev->bus)
+ return;
+ pmac_agp_suspend(pmac_agp_bridge);
+}
+EXPORT_SYMBOL(pmac_suspend_agp_for_card);
+
+void pmac_resume_agp_for_card(struct pci_dev *dev)
+{
+ if (pmac_agp_bridge == NULL || pmac_agp_resume == NULL)
+ return;
+ if (pmac_agp_bridge->bus != dev->bus)
+ return;
+ pmac_agp_resume(pmac_agp_bridge);
+}
+EXPORT_SYMBOL(pmac_resume_agp_for_card);
+
+int pmac_get_uninorth_variant(void)
+{
+ return uninorth_maj;
+}
diff --git a/kernel/arch/powerpc/platforms/powermac/low_i2c.c b/kernel/arch/powerpc/platforms/powermac/low_i2c.c
new file mode 100644
index 000000000..7553b6a77
--- /dev/null
+++ b/kernel/arch/powerpc/platforms/powermac/low_i2c.c
@@ -0,0 +1,1515 @@
+/*
+ * arch/powerpc/platforms/powermac/low_i2c.c
+ *
+ * Copyright (C) 2003-2005 Ben. Herrenschmidt (benh@kernel.crashing.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * The linux i2c layer isn't completely suitable for our needs for various
+ * reasons ranging from too late initialisation to semantics not perfectly
+ * matching some requirements of the apple platform functions etc...
+ *
+ * This file thus provides a simple low level unified i2c interface for
+ * powermac that covers the various types of i2c busses used in Apple machines.
+ * For now, keywest, PMU and SMU, though we could add Cuda, or other bit
+ * banging busses found on older chipstes in earlier machines if we ever need
+ * one of them.
+ *
+ * The drivers in this file are synchronous/blocking. In addition, the
+ * keywest one is fairly slow due to the use of msleep instead of interrupts
+ * as the interrupt is currently used by i2c-keywest. In the long run, we
+ * might want to get rid of those high-level interfaces to linux i2c layer
+ * either completely (converting all drivers) or replacing them all with a
+ * single stub driver on top of this one. Once done, the interrupt will be
+ * available for our use.
+ */
+
+#undef DEBUG
+#undef DEBUG_LOW
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/adb.h>
+#include <linux/pmu.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/mutex.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <asm/keylargo.h>
+#include <asm/uninorth.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/smu.h>
+#include <asm/pmac_pfunc.h>
+#include <asm/pmac_low_i2c.h>
+
+#ifdef DEBUG
+#define DBG(x...) do {\
+ printk(KERN_DEBUG "low_i2c:" x); \
+ } while(0)
+#else
+#define DBG(x...)
+#endif
+
+#ifdef DEBUG_LOW
+#define DBG_LOW(x...) do {\
+ printk(KERN_DEBUG "low_i2c:" x); \
+ } while(0)
+#else
+#define DBG_LOW(x...)
+#endif
+
+
+static int pmac_i2c_force_poll = 1;
+
+/*
+ * A bus structure. Each bus in the system has such a structure associated.
+ */
+struct pmac_i2c_bus
+{
+ struct list_head link;
+ struct device_node *controller;
+ struct device_node *busnode;
+ int type;
+ int flags;
+ struct i2c_adapter adapter;
+ void *hostdata;
+ int channel; /* some hosts have multiple */
+ int mode; /* current mode */
+ struct mutex mutex;
+ int opened;
+ int polled; /* open mode */
+ struct platform_device *platform_dev;
+
+ /* ops */
+ int (*open)(struct pmac_i2c_bus *bus);
+ void (*close)(struct pmac_i2c_bus *bus);
+ int (*xfer)(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
+ u32 subaddr, u8 *data, int len);
+};
+
+static LIST_HEAD(pmac_i2c_busses);
+
+/*
+ * Keywest implementation
+ */
+
+struct pmac_i2c_host_kw
+{
+ struct mutex mutex; /* Access mutex for use by
+ * i2c-keywest */
+ void __iomem *base; /* register base address */
+ int bsteps; /* register stepping */
+ int speed; /* speed */
+ int irq;
+ u8 *data;
+ unsigned len;
+ int state;
+ int rw;
+ int polled;
+ int result;
+ struct completion complete;
+ spinlock_t lock;
+ struct timer_list timeout_timer;
+};
+
+/* Register indices */
+typedef enum {
+ reg_mode = 0,
+ reg_control,
+ reg_status,
+ reg_isr,
+ reg_ier,
+ reg_addr,
+ reg_subaddr,
+ reg_data
+} reg_t;
+
+/* The Tumbler audio equalizer can be really slow sometimes */
+#define KW_POLL_TIMEOUT (2*HZ)
+
+/* Mode register */
+#define KW_I2C_MODE_100KHZ 0x00
+#define KW_I2C_MODE_50KHZ 0x01
+#define KW_I2C_MODE_25KHZ 0x02
+#define KW_I2C_MODE_DUMB 0x00
+#define KW_I2C_MODE_STANDARD 0x04
+#define KW_I2C_MODE_STANDARDSUB 0x08
+#define KW_I2C_MODE_COMBINED 0x0C
+#define KW_I2C_MODE_MODE_MASK 0x0C
+#define KW_I2C_MODE_CHAN_MASK 0xF0
+
+/* Control register */
+#define KW_I2C_CTL_AAK 0x01
+#define KW_I2C_CTL_XADDR 0x02
+#define KW_I2C_CTL_STOP 0x04
+#define KW_I2C_CTL_START 0x08
+
+/* Status register */
+#define KW_I2C_STAT_BUSY 0x01
+#define KW_I2C_STAT_LAST_AAK 0x02
+#define KW_I2C_STAT_LAST_RW 0x04
+#define KW_I2C_STAT_SDA 0x08
+#define KW_I2C_STAT_SCL 0x10
+
+/* IER & ISR registers */
+#define KW_I2C_IRQ_DATA 0x01
+#define KW_I2C_IRQ_ADDR 0x02
+#define KW_I2C_IRQ_STOP 0x04
+#define KW_I2C_IRQ_START 0x08
+#define KW_I2C_IRQ_MASK 0x0F
+
+/* State machine states */
+enum {
+ state_idle,
+ state_addr,
+ state_read,
+ state_write,
+ state_stop,
+ state_dead
+};
+
+#define WRONG_STATE(name) do {\
+ printk(KERN_DEBUG "KW: wrong state. Got %s, state: %s " \
+ "(isr: %02x)\n", \
+ name, __kw_state_names[host->state], isr); \
+ } while(0)
+
+static const char *__kw_state_names[] = {
+ "state_idle",
+ "state_addr",
+ "state_read",
+ "state_write",
+ "state_stop",
+ "state_dead"
+};
+
+static inline u8 __kw_read_reg(struct pmac_i2c_host_kw *host, reg_t reg)
+{
+ return readb(host->base + (((unsigned int)reg) << host->bsteps));
+}
+
+static inline void __kw_write_reg(struct pmac_i2c_host_kw *host,
+ reg_t reg, u8 val)
+{
+ writeb(val, host->base + (((unsigned)reg) << host->bsteps));
+ (void)__kw_read_reg(host, reg_subaddr);
+}
+
+#define kw_write_reg(reg, val) __kw_write_reg(host, reg, val)
+#define kw_read_reg(reg) __kw_read_reg(host, reg)
+
+static u8 kw_i2c_wait_interrupt(struct pmac_i2c_host_kw *host)
+{
+ int i, j;
+ u8 isr;
+
+ for (i = 0; i < 1000; i++) {
+ isr = kw_read_reg(reg_isr) & KW_I2C_IRQ_MASK;
+ if (isr != 0)
+ return isr;
+
+ /* This code is used with the timebase frozen, we cannot rely
+ * on udelay nor schedule when in polled mode !
+ * For now, just use a bogus loop....
+ */
+ if (host->polled) {
+ for (j = 1; j < 100000; j++)
+ mb();
+ } else
+ msleep(1);
+ }
+ return isr;
+}
+
+static void kw_i2c_do_stop(struct pmac_i2c_host_kw *host, int result)
+{
+ kw_write_reg(reg_control, KW_I2C_CTL_STOP);
+ host->state = state_stop;
+ host->result = result;
+}
+
+
+static void kw_i2c_handle_interrupt(struct pmac_i2c_host_kw *host, u8 isr)
+{
+ u8 ack;
+
+ DBG_LOW("kw_handle_interrupt(%s, isr: %x)\n",
+ __kw_state_names[host->state], isr);
+
+ if (host->state == state_idle) {
+ printk(KERN_WARNING "low_i2c: Keywest got an out of state"
+ " interrupt, ignoring\n");
+ kw_write_reg(reg_isr, isr);
+ return;
+ }
+
+ if (isr == 0) {
+ printk(KERN_WARNING "low_i2c: Timeout in i2c transfer"
+ " on keywest !\n");
+ if (host->state != state_stop) {
+ kw_i2c_do_stop(host, -EIO);
+ return;
+ }
+ ack = kw_read_reg(reg_status);
+ if (ack & KW_I2C_STAT_BUSY)
+ kw_write_reg(reg_status, 0);
+ host->state = state_idle;
+ kw_write_reg(reg_ier, 0x00);
+ if (!host->polled)
+ complete(&host->complete);
+ return;
+ }
+
+ if (isr & KW_I2C_IRQ_ADDR) {
+ ack = kw_read_reg(reg_status);
+ if (host->state != state_addr) {
+ WRONG_STATE("KW_I2C_IRQ_ADDR");
+ kw_i2c_do_stop(host, -EIO);
+ }
+ if ((ack & KW_I2C_STAT_LAST_AAK) == 0) {
+ host->result = -ENXIO;
+ host->state = state_stop;
+ DBG_LOW("KW: NAK on address\n");
+ } else {
+ if (host->len == 0)
+ kw_i2c_do_stop(host, 0);
+ else if (host->rw) {
+ host->state = state_read;
+ if (host->len > 1)
+ kw_write_reg(reg_control,
+ KW_I2C_CTL_AAK);
+ } else {
+ host->state = state_write;
+ kw_write_reg(reg_data, *(host->data++));
+ host->len--;
+ }
+ }
+ kw_write_reg(reg_isr, KW_I2C_IRQ_ADDR);
+ }
+
+ if (isr & KW_I2C_IRQ_DATA) {
+ if (host->state == state_read) {
+ *(host->data++) = kw_read_reg(reg_data);
+ host->len--;
+ kw_write_reg(reg_isr, KW_I2C_IRQ_DATA);
+ if (host->len == 0)
+ host->state = state_stop;
+ else if (host->len == 1)
+ kw_write_reg(reg_control, 0);
+ } else if (host->state == state_write) {
+ ack = kw_read_reg(reg_status);
+ if ((ack & KW_I2C_STAT_LAST_AAK) == 0) {
+ DBG_LOW("KW: nack on data write\n");
+ host->result = -EFBIG;
+ host->state = state_stop;
+ } else if (host->len) {
+ kw_write_reg(reg_data, *(host->data++));
+ host->len--;
+ } else
+ kw_i2c_do_stop(host, 0);
+ } else {
+ WRONG_STATE("KW_I2C_IRQ_DATA");
+ if (host->state != state_stop)
+ kw_i2c_do_stop(host, -EIO);
+ }
+ kw_write_reg(reg_isr, KW_I2C_IRQ_DATA);
+ }
+
+ if (isr & KW_I2C_IRQ_STOP) {
+ kw_write_reg(reg_isr, KW_I2C_IRQ_STOP);
+ if (host->state != state_stop) {
+ WRONG_STATE("KW_I2C_IRQ_STOP");
+ host->result = -EIO;
+ }
+ host->state = state_idle;
+ if (!host->polled)
+ complete(&host->complete);
+ }
+
+ /* Below should only happen in manual mode which we don't use ... */
+ if (isr & KW_I2C_IRQ_START)
+ kw_write_reg(reg_isr, KW_I2C_IRQ_START);
+
+}
+
+/* Interrupt handler */
+static irqreturn_t kw_i2c_irq(int irq, void *dev_id)
+{
+ struct pmac_i2c_host_kw *host = dev_id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ del_timer(&host->timeout_timer);
+ kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr));
+ if (host->state != state_idle) {
+ host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT;
+ add_timer(&host->timeout_timer);
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static void kw_i2c_timeout(unsigned long data)
+{
+ struct pmac_i2c_host_kw *host = (struct pmac_i2c_host_kw *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ /*
+ * If the timer is pending, that means we raced with the
+ * irq, in which case we just return
+ */
+ if (timer_pending(&host->timeout_timer))
+ goto skip;
+
+ kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr));
+ if (host->state != state_idle) {
+ host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT;
+ add_timer(&host->timeout_timer);
+ }
+ skip:
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static int kw_i2c_open(struct pmac_i2c_bus *bus)
+{
+ struct pmac_i2c_host_kw *host = bus->hostdata;
+ mutex_lock(&host->mutex);
+ return 0;
+}
+
+static void kw_i2c_close(struct pmac_i2c_bus *bus)
+{
+ struct pmac_i2c_host_kw *host = bus->hostdata;
+ mutex_unlock(&host->mutex);
+}
+
+static int kw_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
+ u32 subaddr, u8 *data, int len)
+{
+ struct pmac_i2c_host_kw *host = bus->hostdata;
+ u8 mode_reg = host->speed;
+ int use_irq = host->irq != NO_IRQ && !bus->polled;
+
+ /* Setup mode & subaddress if any */
+ switch(bus->mode) {
+ case pmac_i2c_mode_dumb:
+ return -EINVAL;
+ case pmac_i2c_mode_std:
+ mode_reg |= KW_I2C_MODE_STANDARD;
+ if (subsize != 0)
+ return -EINVAL;
+ break;
+ case pmac_i2c_mode_stdsub:
+ mode_reg |= KW_I2C_MODE_STANDARDSUB;
+ if (subsize != 1)
+ return -EINVAL;
+ break;
+ case pmac_i2c_mode_combined:
+ mode_reg |= KW_I2C_MODE_COMBINED;
+ if (subsize != 1)
+ return -EINVAL;
+ break;
+ }
+
+ /* Setup channel & clear pending irqs */
+ kw_write_reg(reg_isr, kw_read_reg(reg_isr));
+ kw_write_reg(reg_mode, mode_reg | (bus->channel << 4));
+ kw_write_reg(reg_status, 0);
+
+ /* Set up address and r/w bit, strip possible stale bus number from
+ * address top bits
+ */
+ kw_write_reg(reg_addr, addrdir & 0xff);
+
+ /* Set up the sub address */
+ if ((mode_reg & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_STANDARDSUB
+ || (mode_reg & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_COMBINED)
+ kw_write_reg(reg_subaddr, subaddr);
+
+ /* Prepare for async operations */
+ host->data = data;
+ host->len = len;
+ host->state = state_addr;
+ host->result = 0;
+ host->rw = (addrdir & 1);
+ host->polled = bus->polled;
+
+ /* Enable interrupt if not using polled mode and interrupt is
+ * available
+ */
+ if (use_irq) {
+ /* Clear completion */
+ reinit_completion(&host->complete);
+ /* Ack stale interrupts */
+ kw_write_reg(reg_isr, kw_read_reg(reg_isr));
+ /* Arm timeout */
+ host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT;
+ add_timer(&host->timeout_timer);
+ /* Enable emission */
+ kw_write_reg(reg_ier, KW_I2C_IRQ_MASK);
+ }
+
+ /* Start sending address */
+ kw_write_reg(reg_control, KW_I2C_CTL_XADDR);
+
+ /* Wait for completion */
+ if (use_irq)
+ wait_for_completion(&host->complete);
+ else {
+ while(host->state != state_idle) {
+ unsigned long flags;
+
+ u8 isr = kw_i2c_wait_interrupt(host);
+ spin_lock_irqsave(&host->lock, flags);
+ kw_i2c_handle_interrupt(host, isr);
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+ }
+
+ /* Disable emission */
+ kw_write_reg(reg_ier, 0);
+
+ return host->result;
+}
+
+static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np)
+{
+ struct pmac_i2c_host_kw *host;
+ const u32 *psteps, *prate, *addrp;
+ u32 steps;
+
+ host = kzalloc(sizeof(struct pmac_i2c_host_kw), GFP_KERNEL);
+ if (host == NULL) {
+ printk(KERN_ERR "low_i2c: Can't allocate host for %s\n",
+ np->full_name);
+ return NULL;
+ }
+
+ /* Apple is kind enough to provide a valid AAPL,address property
+ * on all i2c keywest nodes so far ... we would have to fallback
+ * to macio parsing if that wasn't the case
+ */
+ addrp = of_get_property(np, "AAPL,address", NULL);
+ if (addrp == NULL) {
+ printk(KERN_ERR "low_i2c: Can't find address for %s\n",
+ np->full_name);
+ kfree(host);
+ return NULL;
+ }
+ mutex_init(&host->mutex);
+ init_completion(&host->complete);
+ spin_lock_init(&host->lock);
+ init_timer(&host->timeout_timer);
+ host->timeout_timer.function = kw_i2c_timeout;
+ host->timeout_timer.data = (unsigned long)host;
+
+ psteps = of_get_property(np, "AAPL,address-step", NULL);
+ steps = psteps ? (*psteps) : 0x10;
+ for (host->bsteps = 0; (steps & 0x01) == 0; host->bsteps++)
+ steps >>= 1;
+ /* Select interface rate */
+ host->speed = KW_I2C_MODE_25KHZ;
+ prate = of_get_property(np, "AAPL,i2c-rate", NULL);
+ if (prate) switch(*prate) {
+ case 100:
+ host->speed = KW_I2C_MODE_100KHZ;
+ break;
+ case 50:
+ host->speed = KW_I2C_MODE_50KHZ;
+ break;
+ case 25:
+ host->speed = KW_I2C_MODE_25KHZ;
+ break;
+ }
+ host->irq = irq_of_parse_and_map(np, 0);
+ if (host->irq == NO_IRQ)
+ printk(KERN_WARNING
+ "low_i2c: Failed to map interrupt for %s\n",
+ np->full_name);
+
+ host->base = ioremap((*addrp), 0x1000);
+ if (host->base == NULL) {
+ printk(KERN_ERR "low_i2c: Can't map registers for %s\n",
+ np->full_name);
+ kfree(host);
+ return NULL;
+ }
+
+ /* Make sure IRQ is disabled */
+ kw_write_reg(reg_ier, 0);
+
+ /* Request chip interrupt. We set IRQF_NO_SUSPEND because we don't
+ * want that interrupt disabled between the 2 passes of driver
+ * suspend or we'll have issues running the pfuncs
+ */
+ if (request_irq(host->irq, kw_i2c_irq, IRQF_NO_SUSPEND,
+ "keywest i2c", host))
+ host->irq = NO_IRQ;
+
+ printk(KERN_INFO "KeyWest i2c @0x%08x irq %d %s\n",
+ *addrp, host->irq, np->full_name);
+
+ return host;
+}
+
+
+static void __init kw_i2c_add(struct pmac_i2c_host_kw *host,
+ struct device_node *controller,
+ struct device_node *busnode,
+ int channel)
+{
+ struct pmac_i2c_bus *bus;
+
+ bus = kzalloc(sizeof(struct pmac_i2c_bus), GFP_KERNEL);
+ if (bus == NULL)
+ return;
+
+ bus->controller = of_node_get(controller);
+ bus->busnode = of_node_get(busnode);
+ bus->type = pmac_i2c_bus_keywest;
+ bus->hostdata = host;
+ bus->channel = channel;
+ bus->mode = pmac_i2c_mode_std;
+ bus->open = kw_i2c_open;
+ bus->close = kw_i2c_close;
+ bus->xfer = kw_i2c_xfer;
+ mutex_init(&bus->mutex);
+ if (controller == busnode)
+ bus->flags = pmac_i2c_multibus;
+ list_add(&bus->link, &pmac_i2c_busses);
+
+ printk(KERN_INFO " channel %d bus %s\n", channel,
+ (controller == busnode) ? "<multibus>" : busnode->full_name);
+}
+
+static void __init kw_i2c_probe(void)
+{
+ struct device_node *np, *child, *parent;
+
+ /* Probe keywest-i2c busses */
+ for_each_compatible_node(np, "i2c","keywest-i2c") {
+ struct pmac_i2c_host_kw *host;
+ int multibus;
+
+ /* Found one, init a host structure */
+ host = kw_i2c_host_init(np);
+ if (host == NULL)
+ continue;
+
+ /* Now check if we have a multibus setup (old style) or if we
+ * have proper bus nodes. Note that the "new" way (proper bus
+ * nodes) might cause us to not create some busses that are
+ * kept hidden in the device-tree. In the future, we might
+ * want to work around that by creating busses without a node
+ * but not for now
+ */
+ child = of_get_next_child(np, NULL);
+ multibus = !child || strcmp(child->name, "i2c-bus");
+ of_node_put(child);
+
+ /* For a multibus setup, we get the bus count based on the
+ * parent type
+ */
+ if (multibus) {
+ int chans, i;
+
+ parent = of_get_parent(np);
+ if (parent == NULL)
+ continue;
+ chans = parent->name[0] == 'u' ? 2 : 1;
+ for (i = 0; i < chans; i++)
+ kw_i2c_add(host, np, np, i);
+ } else {
+ for (child = NULL;
+ (child = of_get_next_child(np, child)) != NULL;) {
+ const u32 *reg = of_get_property(child,
+ "reg", NULL);
+ if (reg == NULL)
+ continue;
+ kw_i2c_add(host, np, child, *reg);
+ }
+ }
+ }
+}
+
+
+/*
+ *
+ * PMU implementation
+ *
+ */
+
+#ifdef CONFIG_ADB_PMU
+
+/*
+ * i2c command block to the PMU
+ */
+struct pmu_i2c_hdr {
+ u8 bus;
+ u8 mode;
+ u8 bus2;
+ u8 address;
+ u8 sub_addr;
+ u8 comb_addr;
+ u8 count;
+ u8 data[];
+};
+
+static void pmu_i2c_complete(struct adb_request *req)
+{
+ complete(req->arg);
+}
+
+static int pmu_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
+ u32 subaddr, u8 *data, int len)
+{
+ struct adb_request *req = bus->hostdata;
+ struct pmu_i2c_hdr *hdr = (struct pmu_i2c_hdr *)&req->data[1];
+ struct completion comp;
+ int read = addrdir & 1;
+ int retry;
+ int rc = 0;
+
+ /* For now, limit ourselves to 16 bytes transfers */
+ if (len > 16)
+ return -EINVAL;
+
+ init_completion(&comp);
+
+ for (retry = 0; retry < 16; retry++) {
+ memset(req, 0, sizeof(struct adb_request));
+ hdr->bus = bus->channel;
+ hdr->count = len;
+
+ switch(bus->mode) {
+ case pmac_i2c_mode_std:
+ if (subsize != 0)
+ return -EINVAL;
+ hdr->address = addrdir;
+ hdr->mode = PMU_I2C_MODE_SIMPLE;
+ break;
+ case pmac_i2c_mode_stdsub:
+ case pmac_i2c_mode_combined:
+ if (subsize != 1)
+ return -EINVAL;
+ hdr->address = addrdir & 0xfe;
+ hdr->comb_addr = addrdir;
+ hdr->sub_addr = subaddr;
+ if (bus->mode == pmac_i2c_mode_stdsub)
+ hdr->mode = PMU_I2C_MODE_STDSUB;
+ else
+ hdr->mode = PMU_I2C_MODE_COMBINED;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ reinit_completion(&comp);
+ req->data[0] = PMU_I2C_CMD;
+ req->reply[0] = 0xff;
+ req->nbytes = sizeof(struct pmu_i2c_hdr) + 1;
+ req->done = pmu_i2c_complete;
+ req->arg = &comp;
+ if (!read && len) {
+ memcpy(hdr->data, data, len);
+ req->nbytes += len;
+ }
+ rc = pmu_queue_request(req);
+ if (rc)
+ return rc;
+ wait_for_completion(&comp);
+ if (req->reply[0] == PMU_I2C_STATUS_OK)
+ break;
+ msleep(15);
+ }
+ if (req->reply[0] != PMU_I2C_STATUS_OK)
+ return -EIO;
+
+ for (retry = 0; retry < 16; retry++) {
+ memset(req, 0, sizeof(struct adb_request));
+
+ /* I know that looks like a lot, slow as hell, but darwin
+ * does it so let's be on the safe side for now
+ */
+ msleep(15);
+
+ hdr->bus = PMU_I2C_BUS_STATUS;
+
+ reinit_completion(&comp);
+ req->data[0] = PMU_I2C_CMD;
+ req->reply[0] = 0xff;
+ req->nbytes = 2;
+ req->done = pmu_i2c_complete;
+ req->arg = &comp;
+ rc = pmu_queue_request(req);
+ if (rc)
+ return rc;
+ wait_for_completion(&comp);
+
+ if (req->reply[0] == PMU_I2C_STATUS_OK && !read)
+ return 0;
+ if (req->reply[0] == PMU_I2C_STATUS_DATAREAD && read) {
+ int rlen = req->reply_len - 1;
+
+ if (rlen != len) {
+ printk(KERN_WARNING "low_i2c: PMU returned %d"
+ " bytes, expected %d !\n", rlen, len);
+ return -EIO;
+ }
+ if (len)
+ memcpy(data, &req->reply[1], len);
+ return 0;
+ }
+ }
+ return -EIO;
+}
+
+static void __init pmu_i2c_probe(void)
+{
+ struct pmac_i2c_bus *bus;
+ struct device_node *busnode;
+ int channel, sz;
+
+ if (!pmu_present())
+ return;
+
+ /* There might or might not be a "pmu-i2c" node, we use that
+ * or via-pmu itself, whatever we find. I haven't seen a machine
+ * with separate bus nodes, so we assume a multibus setup
+ */
+ busnode = of_find_node_by_name(NULL, "pmu-i2c");
+ if (busnode == NULL)
+ busnode = of_find_node_by_name(NULL, "via-pmu");
+ if (busnode == NULL)
+ return;
+
+ printk(KERN_INFO "PMU i2c %s\n", busnode->full_name);
+
+ /*
+ * We add bus 1 and 2 only for now, bus 0 is "special"
+ */
+ for (channel = 1; channel <= 2; channel++) {
+ sz = sizeof(struct pmac_i2c_bus) + sizeof(struct adb_request);
+ bus = kzalloc(sz, GFP_KERNEL);
+ if (bus == NULL)
+ return;
+
+ bus->controller = busnode;
+ bus->busnode = busnode;
+ bus->type = pmac_i2c_bus_pmu;
+ bus->channel = channel;
+ bus->mode = pmac_i2c_mode_std;
+ bus->hostdata = bus + 1;
+ bus->xfer = pmu_i2c_xfer;
+ mutex_init(&bus->mutex);
+ bus->flags = pmac_i2c_multibus;
+ list_add(&bus->link, &pmac_i2c_busses);
+
+ printk(KERN_INFO " channel %d bus <multibus>\n", channel);
+ }
+}
+
+#endif /* CONFIG_ADB_PMU */
+
+
+/*
+ *
+ * SMU implementation
+ *
+ */
+
+#ifdef CONFIG_PMAC_SMU
+
+static void smu_i2c_complete(struct smu_i2c_cmd *cmd, void *misc)
+{
+ complete(misc);
+}
+
+static int smu_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
+ u32 subaddr, u8 *data, int len)
+{
+ struct smu_i2c_cmd *cmd = bus->hostdata;
+ struct completion comp;
+ int read = addrdir & 1;
+ int rc = 0;
+
+ if ((read && len > SMU_I2C_READ_MAX) ||
+ ((!read) && len > SMU_I2C_WRITE_MAX))
+ return -EINVAL;
+
+ memset(cmd, 0, sizeof(struct smu_i2c_cmd));
+ cmd->info.bus = bus->channel;
+ cmd->info.devaddr = addrdir;
+ cmd->info.datalen = len;
+
+ switch(bus->mode) {
+ case pmac_i2c_mode_std:
+ if (subsize != 0)
+ return -EINVAL;
+ cmd->info.type = SMU_I2C_TRANSFER_SIMPLE;
+ break;
+ case pmac_i2c_mode_stdsub:
+ case pmac_i2c_mode_combined:
+ if (subsize > 3 || subsize < 1)
+ return -EINVAL;
+ cmd->info.sublen = subsize;
+ /* that's big-endian only but heh ! */
+ memcpy(&cmd->info.subaddr, ((char *)&subaddr) + (4 - subsize),
+ subsize);
+ if (bus->mode == pmac_i2c_mode_stdsub)
+ cmd->info.type = SMU_I2C_TRANSFER_STDSUB;
+ else
+ cmd->info.type = SMU_I2C_TRANSFER_COMBINED;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (!read && len)
+ memcpy(cmd->info.data, data, len);
+
+ init_completion(&comp);
+ cmd->done = smu_i2c_complete;
+ cmd->misc = &comp;
+ rc = smu_queue_i2c(cmd);
+ if (rc < 0)
+ return rc;
+ wait_for_completion(&comp);
+ rc = cmd->status;
+
+ if (read && len)
+ memcpy(data, cmd->info.data, len);
+ return rc < 0 ? rc : 0;
+}
+
+static void __init smu_i2c_probe(void)
+{
+ struct device_node *controller, *busnode;
+ struct pmac_i2c_bus *bus;
+ const u32 *reg;
+ int sz;
+
+ if (!smu_present())
+ return;
+
+ controller = of_find_node_by_name(NULL, "smu-i2c-control");
+ if (controller == NULL)
+ controller = of_find_node_by_name(NULL, "smu");
+ if (controller == NULL)
+ return;
+
+ printk(KERN_INFO "SMU i2c %s\n", controller->full_name);
+
+ /* Look for childs, note that they might not be of the right
+ * type as older device trees mix i2c busses and other things
+ * at the same level
+ */
+ for (busnode = NULL;
+ (busnode = of_get_next_child(controller, busnode)) != NULL;) {
+ if (strcmp(busnode->type, "i2c") &&
+ strcmp(busnode->type, "i2c-bus"))
+ continue;
+ reg = of_get_property(busnode, "reg", NULL);
+ if (reg == NULL)
+ continue;
+
+ sz = sizeof(struct pmac_i2c_bus) + sizeof(struct smu_i2c_cmd);
+ bus = kzalloc(sz, GFP_KERNEL);
+ if (bus == NULL)
+ return;
+
+ bus->controller = controller;
+ bus->busnode = of_node_get(busnode);
+ bus->type = pmac_i2c_bus_smu;
+ bus->channel = *reg;
+ bus->mode = pmac_i2c_mode_std;
+ bus->hostdata = bus + 1;
+ bus->xfer = smu_i2c_xfer;
+ mutex_init(&bus->mutex);
+ bus->flags = 0;
+ list_add(&bus->link, &pmac_i2c_busses);
+
+ printk(KERN_INFO " channel %x bus %s\n",
+ bus->channel, busnode->full_name);
+ }
+}
+
+#endif /* CONFIG_PMAC_SMU */
+
+/*
+ *
+ * Core code
+ *
+ */
+
+
+struct pmac_i2c_bus *pmac_i2c_find_bus(struct device_node *node)
+{
+ struct device_node *p = of_node_get(node);
+ struct device_node *prev = NULL;
+ struct pmac_i2c_bus *bus;
+
+ while(p) {
+ list_for_each_entry(bus, &pmac_i2c_busses, link) {
+ if (p == bus->busnode) {
+ if (prev && bus->flags & pmac_i2c_multibus) {
+ const u32 *reg;
+ reg = of_get_property(prev, "reg",
+ NULL);
+ if (!reg)
+ continue;
+ if (((*reg) >> 8) != bus->channel)
+ continue;
+ }
+ of_node_put(p);
+ of_node_put(prev);
+ return bus;
+ }
+ }
+ of_node_put(prev);
+ prev = p;
+ p = of_get_parent(p);
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(pmac_i2c_find_bus);
+
+u8 pmac_i2c_get_dev_addr(struct device_node *device)
+{
+ const u32 *reg = of_get_property(device, "reg", NULL);
+
+ if (reg == NULL)
+ return 0;
+
+ return (*reg) & 0xff;
+}
+EXPORT_SYMBOL_GPL(pmac_i2c_get_dev_addr);
+
+struct device_node *pmac_i2c_get_controller(struct pmac_i2c_bus *bus)
+{
+ return bus->controller;
+}
+EXPORT_SYMBOL_GPL(pmac_i2c_get_controller);
+
+struct device_node *pmac_i2c_get_bus_node(struct pmac_i2c_bus *bus)
+{
+ return bus->busnode;
+}
+EXPORT_SYMBOL_GPL(pmac_i2c_get_bus_node);
+
+int pmac_i2c_get_type(struct pmac_i2c_bus *bus)
+{
+ return bus->type;
+}
+EXPORT_SYMBOL_GPL(pmac_i2c_get_type);
+
+int pmac_i2c_get_flags(struct pmac_i2c_bus *bus)
+{
+ return bus->flags;
+}
+EXPORT_SYMBOL_GPL(pmac_i2c_get_flags);
+
+int pmac_i2c_get_channel(struct pmac_i2c_bus *bus)
+{
+ return bus->channel;
+}
+EXPORT_SYMBOL_GPL(pmac_i2c_get_channel);
+
+
+struct i2c_adapter *pmac_i2c_get_adapter(struct pmac_i2c_bus *bus)
+{
+ return &bus->adapter;
+}
+EXPORT_SYMBOL_GPL(pmac_i2c_get_adapter);
+
+struct pmac_i2c_bus *pmac_i2c_adapter_to_bus(struct i2c_adapter *adapter)
+{
+ struct pmac_i2c_bus *bus;
+
+ list_for_each_entry(bus, &pmac_i2c_busses, link)
+ if (&bus->adapter == adapter)
+ return bus;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(pmac_i2c_adapter_to_bus);
+
+int pmac_i2c_match_adapter(struct device_node *dev, struct i2c_adapter *adapter)
+{
+ struct pmac_i2c_bus *bus = pmac_i2c_find_bus(dev);
+
+ if (bus == NULL)
+ return 0;
+ return (&bus->adapter == adapter);
+}
+EXPORT_SYMBOL_GPL(pmac_i2c_match_adapter);
+
+int pmac_low_i2c_lock(struct device_node *np)
+{
+ struct pmac_i2c_bus *bus, *found = NULL;
+
+ list_for_each_entry(bus, &pmac_i2c_busses, link) {
+ if (np == bus->controller) {
+ found = bus;
+ break;
+ }
+ }
+ if (!found)
+ return -ENODEV;
+ return pmac_i2c_open(bus, 0);
+}
+EXPORT_SYMBOL_GPL(pmac_low_i2c_lock);
+
+int pmac_low_i2c_unlock(struct device_node *np)
+{
+ struct pmac_i2c_bus *bus, *found = NULL;
+
+ list_for_each_entry(bus, &pmac_i2c_busses, link) {
+ if (np == bus->controller) {
+ found = bus;
+ break;
+ }
+ }
+ if (!found)
+ return -ENODEV;
+ pmac_i2c_close(bus);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pmac_low_i2c_unlock);
+
+
+int pmac_i2c_open(struct pmac_i2c_bus *bus, int polled)
+{
+ int rc;
+
+ mutex_lock(&bus->mutex);
+ bus->polled = polled || pmac_i2c_force_poll;
+ bus->opened = 1;
+ bus->mode = pmac_i2c_mode_std;
+ if (bus->open && (rc = bus->open(bus)) != 0) {
+ bus->opened = 0;
+ mutex_unlock(&bus->mutex);
+ return rc;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pmac_i2c_open);
+
+void pmac_i2c_close(struct pmac_i2c_bus *bus)
+{
+ WARN_ON(!bus->opened);
+ if (bus->close)
+ bus->close(bus);
+ bus->opened = 0;
+ mutex_unlock(&bus->mutex);
+}
+EXPORT_SYMBOL_GPL(pmac_i2c_close);
+
+int pmac_i2c_setmode(struct pmac_i2c_bus *bus, int mode)
+{
+ WARN_ON(!bus->opened);
+
+ /* Report me if you see the error below as there might be a new
+ * "combined4" mode that I need to implement for the SMU bus
+ */
+ if (mode < pmac_i2c_mode_dumb || mode > pmac_i2c_mode_combined) {
+ printk(KERN_ERR "low_i2c: Invalid mode %d requested on"
+ " bus %s !\n", mode, bus->busnode->full_name);
+ return -EINVAL;
+ }
+ bus->mode = mode;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pmac_i2c_setmode);
+
+int pmac_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
+ u32 subaddr, u8 *data, int len)
+{
+ int rc;
+
+ WARN_ON(!bus->opened);
+
+ DBG("xfer() chan=%d, addrdir=0x%x, mode=%d, subsize=%d, subaddr=0x%x,"
+ " %d bytes, bus %s\n", bus->channel, addrdir, bus->mode, subsize,
+ subaddr, len, bus->busnode->full_name);
+
+ rc = bus->xfer(bus, addrdir, subsize, subaddr, data, len);
+
+#ifdef DEBUG
+ if (rc)
+ DBG("xfer error %d\n", rc);
+#endif
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pmac_i2c_xfer);
+
+/* some quirks for platform function decoding */
+enum {
+ pmac_i2c_quirk_invmask = 0x00000001u,
+ pmac_i2c_quirk_skip = 0x00000002u,
+};
+
+static void pmac_i2c_devscan(void (*callback)(struct device_node *dev,
+ int quirks))
+{
+ struct pmac_i2c_bus *bus;
+ struct device_node *np;
+ static struct whitelist_ent {
+ char *name;
+ char *compatible;
+ int quirks;
+ } whitelist[] = {
+ /* XXX Study device-tree's & apple drivers are get the quirks
+ * right !
+ */
+ /* Workaround: It seems that running the clockspreading
+ * properties on the eMac will cause lockups during boot.
+ * The machine seems to work fine without that. So for now,
+ * let's make sure i2c-hwclock doesn't match about "imic"
+ * clocks and we'll figure out if we really need to do
+ * something special about those later.
+ */
+ { "i2c-hwclock", "imic5002", pmac_i2c_quirk_skip },
+ { "i2c-hwclock", "imic5003", pmac_i2c_quirk_skip },
+ { "i2c-hwclock", NULL, pmac_i2c_quirk_invmask },
+ { "i2c-cpu-voltage", NULL, 0},
+ { "temp-monitor", NULL, 0 },
+ { "supply-monitor", NULL, 0 },
+ { NULL, NULL, 0 },
+ };
+
+ /* Only some devices need to have platform functions instanciated
+ * here. For now, we have a table. Others, like 9554 i2c GPIOs used
+ * on Xserve, if we ever do a driver for them, will use their own
+ * platform function instance
+ */
+ list_for_each_entry(bus, &pmac_i2c_busses, link) {
+ for (np = NULL;
+ (np = of_get_next_child(bus->busnode, np)) != NULL;) {
+ struct whitelist_ent *p;
+ /* If multibus, check if device is on that bus */
+ if (bus->flags & pmac_i2c_multibus)
+ if (bus != pmac_i2c_find_bus(np))
+ continue;
+ for (p = whitelist; p->name != NULL; p++) {
+ if (strcmp(np->name, p->name))
+ continue;
+ if (p->compatible &&
+ !of_device_is_compatible(np, p->compatible))
+ continue;
+ if (p->quirks & pmac_i2c_quirk_skip)
+ break;
+ callback(np, p->quirks);
+ break;
+ }
+ }
+ }
+}
+
+#define MAX_I2C_DATA 64
+
+struct pmac_i2c_pf_inst
+{
+ struct pmac_i2c_bus *bus;
+ u8 addr;
+ u8 buffer[MAX_I2C_DATA];
+ u8 scratch[MAX_I2C_DATA];
+ int bytes;
+ int quirks;
+};
+
+static void* pmac_i2c_do_begin(struct pmf_function *func, struct pmf_args *args)
+{
+ struct pmac_i2c_pf_inst *inst;
+ struct pmac_i2c_bus *bus;
+
+ bus = pmac_i2c_find_bus(func->node);
+ if (bus == NULL) {
+ printk(KERN_ERR "low_i2c: Can't find bus for %s (pfunc)\n",
+ func->node->full_name);
+ return NULL;
+ }
+ if (pmac_i2c_open(bus, 0)) {
+ printk(KERN_ERR "low_i2c: Can't open i2c bus for %s (pfunc)\n",
+ func->node->full_name);
+ return NULL;
+ }
+
+ /* XXX might need GFP_ATOMIC when called during the suspend process,
+ * but then, there are already lots of issues with suspending when
+ * near OOM that need to be resolved, the allocator itself should
+ * probably make GFP_NOIO implicit during suspend
+ */
+ inst = kzalloc(sizeof(struct pmac_i2c_pf_inst), GFP_KERNEL);
+ if (inst == NULL) {
+ pmac_i2c_close(bus);
+ return NULL;
+ }
+ inst->bus = bus;
+ inst->addr = pmac_i2c_get_dev_addr(func->node);
+ inst->quirks = (int)(long)func->driver_data;
+ return inst;
+}
+
+static void pmac_i2c_do_end(struct pmf_function *func, void *instdata)
+{
+ struct pmac_i2c_pf_inst *inst = instdata;
+
+ if (inst == NULL)
+ return;
+ pmac_i2c_close(inst->bus);
+ kfree(inst);
+}
+
+static int pmac_i2c_do_read(PMF_STD_ARGS, u32 len)
+{
+ struct pmac_i2c_pf_inst *inst = instdata;
+
+ inst->bytes = len;
+ return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_read, 0, 0,
+ inst->buffer, len);
+}
+
+static int pmac_i2c_do_write(PMF_STD_ARGS, u32 len, const u8 *data)
+{
+ struct pmac_i2c_pf_inst *inst = instdata;
+
+ return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 0, 0,
+ (u8 *)data, len);
+}
+
+/* This function is used to do the masking & OR'ing for the "rmw" type
+ * callbacks. Ze should apply the mask and OR in the values in the
+ * buffer before writing back. The problem is that it seems that
+ * various darwin drivers implement the mask/or differently, thus
+ * we need to check the quirks first
+ */
+static void pmac_i2c_do_apply_rmw(struct pmac_i2c_pf_inst *inst,
+ u32 len, const u8 *mask, const u8 *val)
+{
+ int i;
+
+ if (inst->quirks & pmac_i2c_quirk_invmask) {
+ for (i = 0; i < len; i ++)
+ inst->scratch[i] = (inst->buffer[i] & mask[i]) | val[i];
+ } else {
+ for (i = 0; i < len; i ++)
+ inst->scratch[i] = (inst->buffer[i] & ~mask[i])
+ | (val[i] & mask[i]);
+ }
+}
+
+static int pmac_i2c_do_rmw(PMF_STD_ARGS, u32 masklen, u32 valuelen,
+ u32 totallen, const u8 *maskdata,
+ const u8 *valuedata)
+{
+ struct pmac_i2c_pf_inst *inst = instdata;
+
+ if (masklen > inst->bytes || valuelen > inst->bytes ||
+ totallen > inst->bytes || valuelen > masklen)
+ return -EINVAL;
+
+ pmac_i2c_do_apply_rmw(inst, masklen, maskdata, valuedata);
+
+ return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 0, 0,
+ inst->scratch, totallen);
+}
+
+static int pmac_i2c_do_read_sub(PMF_STD_ARGS, u8 subaddr, u32 len)
+{
+ struct pmac_i2c_pf_inst *inst = instdata;
+
+ inst->bytes = len;
+ return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_read, 1, subaddr,
+ inst->buffer, len);
+}
+
+static int pmac_i2c_do_write_sub(PMF_STD_ARGS, u8 subaddr, u32 len,
+ const u8 *data)
+{
+ struct pmac_i2c_pf_inst *inst = instdata;
+
+ return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 1,
+ subaddr, (u8 *)data, len);
+}
+
+static int pmac_i2c_do_set_mode(PMF_STD_ARGS, int mode)
+{
+ struct pmac_i2c_pf_inst *inst = instdata;
+
+ return pmac_i2c_setmode(inst->bus, mode);
+}
+
+static int pmac_i2c_do_rmw_sub(PMF_STD_ARGS, u8 subaddr, u32 masklen,
+ u32 valuelen, u32 totallen, const u8 *maskdata,
+ const u8 *valuedata)
+{
+ struct pmac_i2c_pf_inst *inst = instdata;
+
+ if (masklen > inst->bytes || valuelen > inst->bytes ||
+ totallen > inst->bytes || valuelen > masklen)
+ return -EINVAL;
+
+ pmac_i2c_do_apply_rmw(inst, masklen, maskdata, valuedata);
+
+ return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 1,
+ subaddr, inst->scratch, totallen);
+}
+
+static int pmac_i2c_do_mask_and_comp(PMF_STD_ARGS, u32 len,
+ const u8 *maskdata,
+ const u8 *valuedata)
+{
+ struct pmac_i2c_pf_inst *inst = instdata;
+ int i, match;
+
+ /* Get return value pointer, it's assumed to be a u32 */
+ if (!args || !args->count || !args->u[0].p)
+ return -EINVAL;
+
+ /* Check buffer */
+ if (len > inst->bytes)
+ return -EINVAL;
+
+ for (i = 0, match = 1; match && i < len; i ++)
+ if ((inst->buffer[i] & maskdata[i]) != valuedata[i])
+ match = 0;
+ *args->u[0].p = match;
+ return 0;
+}
+
+static int pmac_i2c_do_delay(PMF_STD_ARGS, u32 duration)
+{
+ msleep((duration + 999) / 1000);
+ return 0;
+}
+
+
+static struct pmf_handlers pmac_i2c_pfunc_handlers = {
+ .begin = pmac_i2c_do_begin,
+ .end = pmac_i2c_do_end,
+ .read_i2c = pmac_i2c_do_read,
+ .write_i2c = pmac_i2c_do_write,
+ .rmw_i2c = pmac_i2c_do_rmw,
+ .read_i2c_sub = pmac_i2c_do_read_sub,
+ .write_i2c_sub = pmac_i2c_do_write_sub,
+ .rmw_i2c_sub = pmac_i2c_do_rmw_sub,
+ .set_i2c_mode = pmac_i2c_do_set_mode,
+ .mask_and_compare = pmac_i2c_do_mask_and_comp,
+ .delay = pmac_i2c_do_delay,
+};
+
+static void __init pmac_i2c_dev_create(struct device_node *np, int quirks)
+{
+ DBG("dev_create(%s)\n", np->full_name);
+
+ pmf_register_driver(np, &pmac_i2c_pfunc_handlers,
+ (void *)(long)quirks);
+}
+
+static void __init pmac_i2c_dev_init(struct device_node *np, int quirks)
+{
+ DBG("dev_create(%s)\n", np->full_name);
+
+ pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_INIT, NULL);
+}
+
+static void pmac_i2c_dev_suspend(struct device_node *np, int quirks)
+{
+ DBG("dev_suspend(%s)\n", np->full_name);
+ pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_SLEEP, NULL);
+}
+
+static void pmac_i2c_dev_resume(struct device_node *np, int quirks)
+{
+ DBG("dev_resume(%s)\n", np->full_name);
+ pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_WAKE, NULL);
+}
+
+void pmac_pfunc_i2c_suspend(void)
+{
+ pmac_i2c_devscan(pmac_i2c_dev_suspend);
+}
+
+void pmac_pfunc_i2c_resume(void)
+{
+ pmac_i2c_devscan(pmac_i2c_dev_resume);
+}
+
+/*
+ * Initialize us: probe all i2c busses on the machine, instantiate
+ * busses and platform functions as needed.
+ */
+/* This is non-static as it might be called early by smp code */
+int __init pmac_i2c_init(void)
+{
+ static int i2c_inited;
+
+ if (i2c_inited)
+ return 0;
+ i2c_inited = 1;
+
+ /* Probe keywest-i2c busses */
+ kw_i2c_probe();
+
+#ifdef CONFIG_ADB_PMU
+ /* Probe PMU i2c busses */
+ pmu_i2c_probe();
+#endif
+
+#ifdef CONFIG_PMAC_SMU
+ /* Probe SMU i2c busses */
+ smu_i2c_probe();
+#endif
+
+ /* Now add plaform functions for some known devices */
+ pmac_i2c_devscan(pmac_i2c_dev_create);
+
+ return 0;
+}
+machine_arch_initcall(powermac, pmac_i2c_init);
+
+/* Since pmac_i2c_init can be called too early for the platform device
+ * registration, we need to do it at a later time. In our case, subsys
+ * happens to fit well, though I agree it's a bit of a hack...
+ */
+static int __init pmac_i2c_create_platform_devices(void)
+{
+ struct pmac_i2c_bus *bus;
+ int i = 0;
+
+ /* In the case where we are initialized from smp_init(), we must
+ * not use the timer (and thus the irq). It's safe from now on
+ * though
+ */
+ pmac_i2c_force_poll = 0;
+
+ /* Create platform devices */
+ list_for_each_entry(bus, &pmac_i2c_busses, link) {
+ bus->platform_dev =
+ platform_device_alloc("i2c-powermac", i++);
+ if (bus->platform_dev == NULL)
+ return -ENOMEM;
+ bus->platform_dev->dev.platform_data = bus;
+ bus->platform_dev->dev.of_node = bus->busnode;
+ platform_device_add(bus->platform_dev);
+ }
+
+ /* Now call platform "init" functions */
+ pmac_i2c_devscan(pmac_i2c_dev_init);
+
+ return 0;
+}
+machine_subsys_initcall(powermac, pmac_i2c_create_platform_devices);
diff --git a/kernel/arch/powerpc/platforms/powermac/nvram.c b/kernel/arch/powerpc/platforms/powermac/nvram.c
new file mode 100644
index 000000000..60b03a170
--- /dev/null
+++ b/kernel/arch/powerpc/platforms/powermac/nvram.c
@@ -0,0 +1,648 @@
+/*
+ * Copyright (C) 2002 Benjamin Herrenschmidt (benh@kernel.crashing.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Todo: - add support for the OF persistent properties
+ */
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/nvram.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/adb.h>
+#include <linux/pmu.h>
+#include <linux/bootmem.h>
+#include <linux/completion.h>
+#include <linux/spinlock.h>
+#include <asm/sections.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/nvram.h>
+
+#include "pmac.h"
+
+#define DEBUG
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+#define NVRAM_SIZE 0x2000 /* 8kB of non-volatile RAM */
+
+#define CORE99_SIGNATURE 0x5a
+#define CORE99_ADLER_START 0x14
+
+/* On Core99, nvram is either a sharp, a micron or an AMD flash */
+#define SM_FLASH_STATUS_DONE 0x80
+#define SM_FLASH_STATUS_ERR 0x38
+
+#define SM_FLASH_CMD_ERASE_CONFIRM 0xd0
+#define SM_FLASH_CMD_ERASE_SETUP 0x20
+#define SM_FLASH_CMD_RESET 0xff
+#define SM_FLASH_CMD_WRITE_SETUP 0x40
+#define SM_FLASH_CMD_CLEAR_STATUS 0x50
+#define SM_FLASH_CMD_READ_STATUS 0x70
+
+/* CHRP NVRAM header */
+struct chrp_header {
+ u8 signature;
+ u8 cksum;
+ u16 len;
+ char name[12];
+ u8 data[0];
+};
+
+struct core99_header {
+ struct chrp_header hdr;
+ u32 adler;
+ u32 generation;
+ u32 reserved[2];
+};
+
+/*
+ * Read and write the non-volatile RAM on PowerMacs and CHRP machines.
+ */
+static int nvram_naddrs;
+static volatile unsigned char __iomem *nvram_data;
+static int is_core_99;
+static int core99_bank = 0;
+static int nvram_partitions[3];
+// XXX Turn that into a sem
+static DEFINE_RAW_SPINLOCK(nv_lock);
+
+static int (*core99_write_bank)(int bank, u8* datas);
+static int (*core99_erase_bank)(int bank);
+
+static char *nvram_image;
+
+
+static unsigned char core99_nvram_read_byte(int addr)
+{
+ if (nvram_image == NULL)
+ return 0xff;
+ return nvram_image[addr];
+}
+
+static void core99_nvram_write_byte(int addr, unsigned char val)
+{
+ if (nvram_image == NULL)
+ return;
+ nvram_image[addr] = val;
+}
+
+static ssize_t core99_nvram_read(char *buf, size_t count, loff_t *index)
+{
+ int i;
+
+ if (nvram_image == NULL)
+ return -ENODEV;
+ if (*index > NVRAM_SIZE)
+ return 0;
+
+ i = *index;
+ if (i + count > NVRAM_SIZE)
+ count = NVRAM_SIZE - i;
+
+ memcpy(buf, &nvram_image[i], count);
+ *index = i + count;
+ return count;
+}
+
+static ssize_t core99_nvram_write(char *buf, size_t count, loff_t *index)
+{
+ int i;
+
+ if (nvram_image == NULL)
+ return -ENODEV;
+ if (*index > NVRAM_SIZE)
+ return 0;
+
+ i = *index;
+ if (i + count > NVRAM_SIZE)
+ count = NVRAM_SIZE - i;
+
+ memcpy(&nvram_image[i], buf, count);
+ *index = i + count;
+ return count;
+}
+
+static ssize_t core99_nvram_size(void)
+{
+ if (nvram_image == NULL)
+ return -ENODEV;
+ return NVRAM_SIZE;
+}
+
+#ifdef CONFIG_PPC32
+static volatile unsigned char __iomem *nvram_addr;
+static int nvram_mult;
+
+static unsigned char direct_nvram_read_byte(int addr)
+{
+ return in_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult]);
+}
+
+static void direct_nvram_write_byte(int addr, unsigned char val)
+{
+ out_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult], val);
+}
+
+
+static unsigned char indirect_nvram_read_byte(int addr)
+{
+ unsigned char val;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&nv_lock, flags);
+ out_8(nvram_addr, addr >> 5);
+ val = in_8(&nvram_data[(addr & 0x1f) << 4]);
+ raw_spin_unlock_irqrestore(&nv_lock, flags);
+
+ return val;
+}
+
+static void indirect_nvram_write_byte(int addr, unsigned char val)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&nv_lock, flags);
+ out_8(nvram_addr, addr >> 5);
+ out_8(&nvram_data[(addr & 0x1f) << 4], val);
+ raw_spin_unlock_irqrestore(&nv_lock, flags);
+}
+
+
+#ifdef CONFIG_ADB_PMU
+
+static void pmu_nvram_complete(struct adb_request *req)
+{
+ if (req->arg)
+ complete((struct completion *)req->arg);
+}
+
+static unsigned char pmu_nvram_read_byte(int addr)
+{
+ struct adb_request req;
+ DECLARE_COMPLETION_ONSTACK(req_complete);
+
+ req.arg = system_state == SYSTEM_RUNNING ? &req_complete : NULL;
+ if (pmu_request(&req, pmu_nvram_complete, 3, PMU_READ_NVRAM,
+ (addr >> 8) & 0xff, addr & 0xff))
+ return 0xff;
+ if (system_state == SYSTEM_RUNNING)
+ wait_for_completion(&req_complete);
+ while (!req.complete)
+ pmu_poll();
+ return req.reply[0];
+}
+
+static void pmu_nvram_write_byte(int addr, unsigned char val)
+{
+ struct adb_request req;
+ DECLARE_COMPLETION_ONSTACK(req_complete);
+
+ req.arg = system_state == SYSTEM_RUNNING ? &req_complete : NULL;
+ if (pmu_request(&req, pmu_nvram_complete, 4, PMU_WRITE_NVRAM,
+ (addr >> 8) & 0xff, addr & 0xff, val))
+ return;
+ if (system_state == SYSTEM_RUNNING)
+ wait_for_completion(&req_complete);
+ while (!req.complete)
+ pmu_poll();
+}
+
+#endif /* CONFIG_ADB_PMU */
+#endif /* CONFIG_PPC32 */
+
+static u8 chrp_checksum(struct chrp_header* hdr)
+{
+ u8 *ptr;
+ u16 sum = hdr->signature;
+ for (ptr = (u8 *)&hdr->len; ptr < hdr->data; ptr++)
+ sum += *ptr;
+ while (sum > 0xFF)
+ sum = (sum & 0xFF) + (sum>>8);
+ return sum;
+}
+
+static u32 core99_calc_adler(u8 *buffer)
+{
+ int cnt;
+ u32 low, high;
+
+ buffer += CORE99_ADLER_START;
+ low = 1;
+ high = 0;
+ for (cnt=0; cnt<(NVRAM_SIZE-CORE99_ADLER_START); cnt++) {
+ if ((cnt % 5000) == 0) {
+ high %= 65521UL;
+ high %= 65521UL;
+ }
+ low += buffer[cnt];
+ high += low;
+ }
+ low %= 65521UL;
+ high %= 65521UL;
+
+ return (high << 16) | low;
+}
+
+static u32 core99_check(u8* datas)
+{
+ struct core99_header* hdr99 = (struct core99_header*)datas;
+
+ if (hdr99->hdr.signature != CORE99_SIGNATURE) {
+ DBG("Invalid signature\n");
+ return 0;
+ }
+ if (hdr99->hdr.cksum != chrp_checksum(&hdr99->hdr)) {
+ DBG("Invalid checksum\n");
+ return 0;
+ }
+ if (hdr99->adler != core99_calc_adler(datas)) {
+ DBG("Invalid adler\n");
+ return 0;
+ }
+ return hdr99->generation;
+}
+
+static int sm_erase_bank(int bank)
+{
+ int stat;
+ unsigned long timeout;
+
+ u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE;
+
+ DBG("nvram: Sharp/Micron Erasing bank %d...\n", bank);
+
+ out_8(base, SM_FLASH_CMD_ERASE_SETUP);
+ out_8(base, SM_FLASH_CMD_ERASE_CONFIRM);
+ timeout = 0;
+ do {
+ if (++timeout > 1000000) {
+ printk(KERN_ERR "nvram: Sharp/Micron flash erase timeout !\n");
+ break;
+ }
+ out_8(base, SM_FLASH_CMD_READ_STATUS);
+ stat = in_8(base);
+ } while (!(stat & SM_FLASH_STATUS_DONE));
+
+ out_8(base, SM_FLASH_CMD_CLEAR_STATUS);
+ out_8(base, SM_FLASH_CMD_RESET);
+
+ if (memchr_inv(base, 0xff, NVRAM_SIZE)) {
+ printk(KERN_ERR "nvram: Sharp/Micron flash erase failed !\n");
+ return -ENXIO;
+ }
+ return 0;
+}
+
+static int sm_write_bank(int bank, u8* datas)
+{
+ int i, stat = 0;
+ unsigned long timeout;
+
+ u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE;
+
+ DBG("nvram: Sharp/Micron Writing bank %d...\n", bank);
+
+ for (i=0; i<NVRAM_SIZE; i++) {
+ out_8(base+i, SM_FLASH_CMD_WRITE_SETUP);
+ udelay(1);
+ out_8(base+i, datas[i]);
+ timeout = 0;
+ do {
+ if (++timeout > 1000000) {
+ printk(KERN_ERR "nvram: Sharp/Micron flash write timeout !\n");
+ break;
+ }
+ out_8(base, SM_FLASH_CMD_READ_STATUS);
+ stat = in_8(base);
+ } while (!(stat & SM_FLASH_STATUS_DONE));
+ if (!(stat & SM_FLASH_STATUS_DONE))
+ break;
+ }
+ out_8(base, SM_FLASH_CMD_CLEAR_STATUS);
+ out_8(base, SM_FLASH_CMD_RESET);
+ if (memcmp(base, datas, NVRAM_SIZE)) {
+ printk(KERN_ERR "nvram: Sharp/Micron flash write failed !\n");
+ return -ENXIO;
+ }
+ return 0;
+}
+
+static int amd_erase_bank(int bank)
+{
+ int stat = 0;
+ unsigned long timeout;
+
+ u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE;
+
+ DBG("nvram: AMD Erasing bank %d...\n", bank);
+
+ /* Unlock 1 */
+ out_8(base+0x555, 0xaa);
+ udelay(1);
+ /* Unlock 2 */
+ out_8(base+0x2aa, 0x55);
+ udelay(1);
+
+ /* Sector-Erase */
+ out_8(base+0x555, 0x80);
+ udelay(1);
+ out_8(base+0x555, 0xaa);
+ udelay(1);
+ out_8(base+0x2aa, 0x55);
+ udelay(1);
+ out_8(base, 0x30);
+ udelay(1);
+
+ timeout = 0;
+ do {
+ if (++timeout > 1000000) {
+ printk(KERN_ERR "nvram: AMD flash erase timeout !\n");
+ break;
+ }
+ stat = in_8(base) ^ in_8(base);
+ } while (stat != 0);
+
+ /* Reset */
+ out_8(base, 0xf0);
+ udelay(1);
+
+ if (memchr_inv(base, 0xff, NVRAM_SIZE)) {
+ printk(KERN_ERR "nvram: AMD flash erase failed !\n");
+ return -ENXIO;
+ }
+ return 0;
+}
+
+static int amd_write_bank(int bank, u8* datas)
+{
+ int i, stat = 0;
+ unsigned long timeout;
+
+ u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE;
+
+ DBG("nvram: AMD Writing bank %d...\n", bank);
+
+ for (i=0; i<NVRAM_SIZE; i++) {
+ /* Unlock 1 */
+ out_8(base+0x555, 0xaa);
+ udelay(1);
+ /* Unlock 2 */
+ out_8(base+0x2aa, 0x55);
+ udelay(1);
+
+ /* Write single word */
+ out_8(base+0x555, 0xa0);
+ udelay(1);
+ out_8(base+i, datas[i]);
+
+ timeout = 0;
+ do {
+ if (++timeout > 1000000) {
+ printk(KERN_ERR "nvram: AMD flash write timeout !\n");
+ break;
+ }
+ stat = in_8(base) ^ in_8(base);
+ } while (stat != 0);
+ if (stat != 0)
+ break;
+ }
+
+ /* Reset */
+ out_8(base, 0xf0);
+ udelay(1);
+
+ if (memcmp(base, datas, NVRAM_SIZE)) {
+ printk(KERN_ERR "nvram: AMD flash write failed !\n");
+ return -ENXIO;
+ }
+ return 0;
+}
+
+static void __init lookup_partitions(void)
+{
+ u8 buffer[17];
+ int i, offset;
+ struct chrp_header* hdr;
+
+ if (pmac_newworld) {
+ nvram_partitions[pmac_nvram_OF] = -1;
+ nvram_partitions[pmac_nvram_XPRAM] = -1;
+ nvram_partitions[pmac_nvram_NR] = -1;
+ hdr = (struct chrp_header *)buffer;
+
+ offset = 0;
+ buffer[16] = 0;
+ do {
+ for (i=0;i<16;i++)
+ buffer[i] = ppc_md.nvram_read_val(offset+i);
+ if (!strcmp(hdr->name, "common"))
+ nvram_partitions[pmac_nvram_OF] = offset + 0x10;
+ if (!strcmp(hdr->name, "APL,MacOS75")) {
+ nvram_partitions[pmac_nvram_XPRAM] = offset + 0x10;
+ nvram_partitions[pmac_nvram_NR] = offset + 0x110;
+ }
+ offset += (hdr->len * 0x10);
+ } while(offset < NVRAM_SIZE);
+ } else {
+ nvram_partitions[pmac_nvram_OF] = 0x1800;
+ nvram_partitions[pmac_nvram_XPRAM] = 0x1300;
+ nvram_partitions[pmac_nvram_NR] = 0x1400;
+ }
+ DBG("nvram: OF partition at 0x%x\n", nvram_partitions[pmac_nvram_OF]);
+ DBG("nvram: XP partition at 0x%x\n", nvram_partitions[pmac_nvram_XPRAM]);
+ DBG("nvram: NR partition at 0x%x\n", nvram_partitions[pmac_nvram_NR]);
+}
+
+static void core99_nvram_sync(void)
+{
+ struct core99_header* hdr99;
+ unsigned long flags;
+
+ if (!is_core_99 || !nvram_data || !nvram_image)
+ return;
+
+ raw_spin_lock_irqsave(&nv_lock, flags);
+ if (!memcmp(nvram_image, (u8*)nvram_data + core99_bank*NVRAM_SIZE,
+ NVRAM_SIZE))
+ goto bail;
+
+ DBG("Updating nvram...\n");
+
+ hdr99 = (struct core99_header*)nvram_image;
+ hdr99->generation++;
+ hdr99->hdr.signature = CORE99_SIGNATURE;
+ hdr99->hdr.cksum = chrp_checksum(&hdr99->hdr);
+ hdr99->adler = core99_calc_adler(nvram_image);
+ core99_bank = core99_bank ? 0 : 1;
+ if (core99_erase_bank)
+ if (core99_erase_bank(core99_bank)) {
+ printk("nvram: Error erasing bank %d\n", core99_bank);
+ goto bail;
+ }
+ if (core99_write_bank)
+ if (core99_write_bank(core99_bank, nvram_image))
+ printk("nvram: Error writing bank %d\n", core99_bank);
+ bail:
+ raw_spin_unlock_irqrestore(&nv_lock, flags);
+
+#ifdef DEBUG
+ mdelay(2000);
+#endif
+}
+
+static int __init core99_nvram_setup(struct device_node *dp, unsigned long addr)
+{
+ int i;
+ u32 gen_bank0, gen_bank1;
+
+ if (nvram_naddrs < 1) {
+ printk(KERN_ERR "nvram: no address\n");
+ return -EINVAL;
+ }
+ nvram_image = memblock_virt_alloc(NVRAM_SIZE, 0);
+ nvram_data = ioremap(addr, NVRAM_SIZE*2);
+ nvram_naddrs = 1; /* Make sure we get the correct case */
+
+ DBG("nvram: Checking bank 0...\n");
+
+ gen_bank0 = core99_check((u8 *)nvram_data);
+ gen_bank1 = core99_check((u8 *)nvram_data + NVRAM_SIZE);
+ core99_bank = (gen_bank0 < gen_bank1) ? 1 : 0;
+
+ DBG("nvram: gen0=%d, gen1=%d\n", gen_bank0, gen_bank1);
+ DBG("nvram: Active bank is: %d\n", core99_bank);
+
+ for (i=0; i<NVRAM_SIZE; i++)
+ nvram_image[i] = nvram_data[i + core99_bank*NVRAM_SIZE];
+
+ ppc_md.nvram_read_val = core99_nvram_read_byte;
+ ppc_md.nvram_write_val = core99_nvram_write_byte;
+ ppc_md.nvram_read = core99_nvram_read;
+ ppc_md.nvram_write = core99_nvram_write;
+ ppc_md.nvram_size = core99_nvram_size;
+ ppc_md.nvram_sync = core99_nvram_sync;
+ ppc_md.machine_shutdown = core99_nvram_sync;
+ /*
+ * Maybe we could be smarter here though making an exclusive list
+ * of known flash chips is a bit nasty as older OF didn't provide us
+ * with a useful "compatible" entry. A solution would be to really
+ * identify the chip using flash id commands and base ourselves on
+ * a list of known chips IDs
+ */
+ if (of_device_is_compatible(dp, "amd-0137")) {
+ core99_erase_bank = amd_erase_bank;
+ core99_write_bank = amd_write_bank;
+ } else {
+ core99_erase_bank = sm_erase_bank;
+ core99_write_bank = sm_write_bank;
+ }
+ return 0;
+}
+
+int __init pmac_nvram_init(void)
+{
+ struct device_node *dp;
+ struct resource r1, r2;
+ unsigned int s1 = 0, s2 = 0;
+ int err = 0;
+
+ nvram_naddrs = 0;
+
+ dp = of_find_node_by_name(NULL, "nvram");
+ if (dp == NULL) {
+ printk(KERN_ERR "Can't find NVRAM device\n");
+ return -ENODEV;
+ }
+
+ /* Try to obtain an address */
+ if (of_address_to_resource(dp, 0, &r1) == 0) {
+ nvram_naddrs = 1;
+ s1 = resource_size(&r1);
+ if (of_address_to_resource(dp, 1, &r2) == 0) {
+ nvram_naddrs = 2;
+ s2 = resource_size(&r2);
+ }
+ }
+
+ is_core_99 = of_device_is_compatible(dp, "nvram,flash");
+ if (is_core_99) {
+ err = core99_nvram_setup(dp, r1.start);
+ goto bail;
+ }
+
+#ifdef CONFIG_PPC32
+ if (machine_is(chrp) && nvram_naddrs == 1) {
+ nvram_data = ioremap(r1.start, s1);
+ nvram_mult = 1;
+ ppc_md.nvram_read_val = direct_nvram_read_byte;
+ ppc_md.nvram_write_val = direct_nvram_write_byte;
+ } else if (nvram_naddrs == 1) {
+ nvram_data = ioremap(r1.start, s1);
+ nvram_mult = (s1 + NVRAM_SIZE - 1) / NVRAM_SIZE;
+ ppc_md.nvram_read_val = direct_nvram_read_byte;
+ ppc_md.nvram_write_val = direct_nvram_write_byte;
+ } else if (nvram_naddrs == 2) {
+ nvram_addr = ioremap(r1.start, s1);
+ nvram_data = ioremap(r2.start, s2);
+ ppc_md.nvram_read_val = indirect_nvram_read_byte;
+ ppc_md.nvram_write_val = indirect_nvram_write_byte;
+ } else if (nvram_naddrs == 0 && sys_ctrler == SYS_CTRLER_PMU) {
+#ifdef CONFIG_ADB_PMU
+ nvram_naddrs = -1;
+ ppc_md.nvram_read_val = pmu_nvram_read_byte;
+ ppc_md.nvram_write_val = pmu_nvram_write_byte;
+#endif /* CONFIG_ADB_PMU */
+ } else {
+ printk(KERN_ERR "Incompatible type of NVRAM\n");
+ err = -ENXIO;
+ }
+#endif /* CONFIG_PPC32 */
+bail:
+ of_node_put(dp);
+ if (err == 0)
+ lookup_partitions();
+ return err;
+}
+
+int pmac_get_partition(int partition)
+{
+ return nvram_partitions[partition];
+}
+
+u8 pmac_xpram_read(int xpaddr)
+{
+ int offset = pmac_get_partition(pmac_nvram_XPRAM);
+
+ if (offset < 0 || xpaddr < 0 || xpaddr > 0x100)
+ return 0xff;
+
+ return ppc_md.nvram_read_val(xpaddr + offset);
+}
+
+void pmac_xpram_write(int xpaddr, u8 data)
+{
+ int offset = pmac_get_partition(pmac_nvram_XPRAM);
+
+ if (offset < 0 || xpaddr < 0 || xpaddr > 0x100)
+ return;
+
+ ppc_md.nvram_write_val(xpaddr + offset, data);
+}
+
+EXPORT_SYMBOL(pmac_get_partition);
+EXPORT_SYMBOL(pmac_xpram_read);
+EXPORT_SYMBOL(pmac_xpram_write);
diff --git a/kernel/arch/powerpc/platforms/powermac/pci.c b/kernel/arch/powerpc/platforms/powermac/pci.c
new file mode 100644
index 000000000..59ab16fa6
--- /dev/null
+++ b/kernel/arch/powerpc/platforms/powermac/pci.c
@@ -0,0 +1,1255 @@
+/*
+ * Support for PCI bridges found on Power Macintoshes.
+ *
+ * Copyright (C) 2003-2005 Benjamin Herrenschmuidt (benh@kernel.crashing.org)
+ * Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/of_pci.h>
+
+#include <asm/sections.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+#include <asm/grackle.h>
+#include <asm/ppc-pci.h>
+
+#include "pmac.h"
+
+#undef DEBUG
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+/* XXX Could be per-controller, but I don't think we risk anything by
+ * assuming we won't have both UniNorth and Bandit */
+static int has_uninorth;
+#ifdef CONFIG_PPC64
+static struct pci_controller *u3_agp;
+#else
+static int has_second_ohare;
+#endif /* CONFIG_PPC64 */
+
+extern int pcibios_assign_bus_offset;
+
+struct device_node *k2_skiplist[2];
+
+/*
+ * Magic constants for enabling cache coherency in the bandit/PSX bridge.
+ */
+#define BANDIT_DEVID_2 8
+#define BANDIT_REVID 3
+
+#define BANDIT_DEVNUM 11
+#define BANDIT_MAGIC 0x50
+#define BANDIT_COHERENT 0x40
+
+static int __init fixup_one_level_bus_range(struct device_node *node, int higher)
+{
+ for (; node != 0;node = node->sibling) {
+ const int * bus_range;
+ const unsigned int *class_code;
+ int len;
+
+ /* For PCI<->PCI bridges or CardBus bridges, we go down */
+ class_code = of_get_property(node, "class-code", NULL);
+ if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
+ (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
+ continue;
+ bus_range = of_get_property(node, "bus-range", &len);
+ if (bus_range != NULL && len > 2 * sizeof(int)) {
+ if (bus_range[1] > higher)
+ higher = bus_range[1];
+ }
+ higher = fixup_one_level_bus_range(node->child, higher);
+ }
+ return higher;
+}
+
+/* This routine fixes the "bus-range" property of all bridges in the
+ * system since they tend to have their "last" member wrong on macs
+ *
+ * Note that the bus numbers manipulated here are OF bus numbers, they
+ * are not Linux bus numbers.
+ */
+static void __init fixup_bus_range(struct device_node *bridge)
+{
+ int *bus_range, len;
+ struct property *prop;
+
+ /* Lookup the "bus-range" property for the hose */
+ prop = of_find_property(bridge, "bus-range", &len);
+ if (prop == NULL || prop->length < 2 * sizeof(int))
+ return;
+
+ bus_range = prop->value;
+ bus_range[1] = fixup_one_level_bus_range(bridge->child, bus_range[1]);
+}
+
+/*
+ * Apple MacRISC (U3, UniNorth, Bandit, Chaos) PCI controllers.
+ *
+ * The "Bandit" version is present in all early PCI PowerMacs,
+ * and up to the first ones using Grackle. Some machines may
+ * have 2 bandit controllers (2 PCI busses).
+ *
+ * "Chaos" is used in some "Bandit"-type machines as a bridge
+ * for the separate display bus. It is accessed the same
+ * way as bandit, but cannot be probed for devices. It therefore
+ * has its own config access functions.
+ *
+ * The "UniNorth" version is present in all Core99 machines
+ * (iBook, G4, new IMacs, and all the recent Apple machines).
+ * It contains 3 controllers in one ASIC.
+ *
+ * The U3 is the bridge used on G5 machines. It contains an
+ * AGP bus which is dealt with the old UniNorth access routines
+ * and a HyperTransport bus which uses its own set of access
+ * functions.
+ */
+
+#define MACRISC_CFA0(devfn, off) \
+ ((1 << (unsigned int)PCI_SLOT(dev_fn)) \
+ | (((unsigned int)PCI_FUNC(dev_fn)) << 8) \
+ | (((unsigned int)(off)) & 0xFCUL))
+
+#define MACRISC_CFA1(bus, devfn, off) \
+ ((((unsigned int)(bus)) << 16) \
+ |(((unsigned int)(devfn)) << 8) \
+ |(((unsigned int)(off)) & 0xFCUL) \
+ |1UL)
+
+static void __iomem *macrisc_cfg_map_bus(struct pci_bus *bus,
+ unsigned int dev_fn,
+ int offset)
+{
+ unsigned int caddr;
+ struct pci_controller *hose;
+
+ hose = pci_bus_to_host(bus);
+ if (hose == NULL)
+ return NULL;
+
+ if (bus->number == hose->first_busno) {
+ if (dev_fn < (11 << 3))
+ return NULL;
+ caddr = MACRISC_CFA0(dev_fn, offset);
+ } else
+ caddr = MACRISC_CFA1(bus->number, dev_fn, offset);
+
+ /* Uninorth will return garbage if we don't read back the value ! */
+ do {
+ out_le32(hose->cfg_addr, caddr);
+ } while (in_le32(hose->cfg_addr) != caddr);
+
+ offset &= has_uninorth ? 0x07 : 0x03;
+ return hose->cfg_data + offset;
+}
+
+static struct pci_ops macrisc_pci_ops =
+{
+ .map_bus = macrisc_cfg_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+#ifdef CONFIG_PPC32
+/*
+ * Verify that a specific (bus, dev_fn) exists on chaos
+ */
+static void __iomem *chaos_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int offset)
+{
+ struct device_node *np;
+ const u32 *vendor, *device;
+
+ if (offset >= 0x100)
+ return NULL;
+ np = of_pci_find_child_device(bus->dev.of_node, devfn);
+ if (np == NULL)
+ return NULL;
+
+ vendor = of_get_property(np, "vendor-id", NULL);
+ device = of_get_property(np, "device-id", NULL);
+ if (vendor == NULL || device == NULL)
+ return NULL;
+
+ if ((*vendor == 0x106b) && (*device == 3) && (offset >= 0x10)
+ && (offset != 0x14) && (offset != 0x18) && (offset <= 0x24))
+ return NULL;
+
+ return macrisc_cfg_map_bus(bus, devfn, offset);
+}
+
+static struct pci_ops chaos_pci_ops =
+{
+ .map_bus = chaos_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+static void __init setup_chaos(struct pci_controller *hose,
+ struct resource *addr)
+{
+ /* assume a `chaos' bridge */
+ hose->ops = &chaos_pci_ops;
+ hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000);
+ hose->cfg_data = ioremap(addr->start + 0xc00000, 0x1000);
+}
+#endif /* CONFIG_PPC32 */
+
+#ifdef CONFIG_PPC64
+/*
+ * These versions of U3 HyperTransport config space access ops do not
+ * implement self-view of the HT host yet
+ */
+
+/*
+ * This function deals with some "special cases" devices.
+ *
+ * 0 -> No special case
+ * 1 -> Skip the device but act as if the access was successful
+ * (return 0xff's on reads, eventually, cache config space
+ * accesses in a later version)
+ * -1 -> Hide the device (unsuccessful access)
+ */
+static int u3_ht_skip_device(struct pci_controller *hose,
+ struct pci_bus *bus, unsigned int devfn)
+{
+ struct device_node *busdn, *dn;
+ int i;
+
+ /* We only allow config cycles to devices that are in OF device-tree
+ * as we are apparently having some weird things going on with some
+ * revs of K2 on recent G5s, except for the host bridge itself, which
+ * is missing from the tree but we know we can probe.
+ */
+ if (bus->self)
+ busdn = pci_device_to_OF_node(bus->self);
+ else if (devfn == 0)
+ return 0;
+ else
+ busdn = hose->dn;
+ for (dn = busdn->child; dn; dn = dn->sibling)
+ if (PCI_DN(dn) && PCI_DN(dn)->devfn == devfn)
+ break;
+ if (dn == NULL)
+ return -1;
+
+ /*
+ * When a device in K2 is powered down, we die on config
+ * cycle accesses. Fix that here.
+ */
+ for (i=0; i<2; i++)
+ if (k2_skiplist[i] == dn)
+ return 1;
+
+ return 0;
+}
+
+#define U3_HT_CFA0(devfn, off) \
+ ((((unsigned int)devfn) << 8) | offset)
+#define U3_HT_CFA1(bus, devfn, off) \
+ (U3_HT_CFA0(devfn, off) \
+ + (((unsigned int)bus) << 16) \
+ + 0x01000000UL)
+
+static void __iomem *u3_ht_cfg_access(struct pci_controller *hose, u8 bus,
+ u8 devfn, u8 offset, int *swap)
+{
+ *swap = 1;
+ if (bus == hose->first_busno) {
+ if (devfn != 0)
+ return hose->cfg_data + U3_HT_CFA0(devfn, offset);
+ *swap = 0;
+ return ((void __iomem *)hose->cfg_addr) + (offset << 2);
+ } else
+ return hose->cfg_data + U3_HT_CFA1(bus, devfn, offset);
+}
+
+static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn,
+ int offset, int len, u32 *val)
+{
+ struct pci_controller *hose;
+ void __iomem *addr;
+ int swap;
+
+ hose = pci_bus_to_host(bus);
+ if (hose == NULL)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ if (offset >= 0x100)
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ addr = u3_ht_cfg_access(hose, bus->number, devfn, offset, &swap);
+ if (!addr)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (u3_ht_skip_device(hose, bus, devfn)) {
+ case 0:
+ break;
+ case 1:
+ switch (len) {
+ case 1:
+ *val = 0xff; break;
+ case 2:
+ *val = 0xffff; break;
+ default:
+ *val = 0xfffffffful; break;
+ }
+ return PCIBIOS_SUCCESSFUL;
+ default:
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ /*
+ * Note: the caller has already checked that offset is
+ * suitably aligned and that len is 1, 2 or 4.
+ */
+ switch (len) {
+ case 1:
+ *val = in_8(addr);
+ break;
+ case 2:
+ *val = swap ? in_le16(addr) : in_be16(addr);
+ break;
+ default:
+ *val = swap ? in_le32(addr) : in_be32(addr);
+ break;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int u3_ht_write_config(struct pci_bus *bus, unsigned int devfn,
+ int offset, int len, u32 val)
+{
+ struct pci_controller *hose;
+ void __iomem *addr;
+ int swap;
+
+ hose = pci_bus_to_host(bus);
+ if (hose == NULL)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ if (offset >= 0x100)
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ addr = u3_ht_cfg_access(hose, bus->number, devfn, offset, &swap);
+ if (!addr)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (u3_ht_skip_device(hose, bus, devfn)) {
+ case 0:
+ break;
+ case 1:
+ return PCIBIOS_SUCCESSFUL;
+ default:
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ /*
+ * Note: the caller has already checked that offset is
+ * suitably aligned and that len is 1, 2 or 4.
+ */
+ switch (len) {
+ case 1:
+ out_8(addr, val);
+ break;
+ case 2:
+ swap ? out_le16(addr, val) : out_be16(addr, val);
+ break;
+ default:
+ swap ? out_le32(addr, val) : out_be32(addr, val);
+ break;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops u3_ht_pci_ops =
+{
+ .read = u3_ht_read_config,
+ .write = u3_ht_write_config,
+};
+
+#define U4_PCIE_CFA0(devfn, off) \
+ ((1 << ((unsigned int)PCI_SLOT(dev_fn))) \
+ | (((unsigned int)PCI_FUNC(dev_fn)) << 8) \
+ | ((((unsigned int)(off)) >> 8) << 28) \
+ | (((unsigned int)(off)) & 0xfcU))
+
+#define U4_PCIE_CFA1(bus, devfn, off) \
+ ((((unsigned int)(bus)) << 16) \
+ |(((unsigned int)(devfn)) << 8) \
+ | ((((unsigned int)(off)) >> 8) << 28) \
+ |(((unsigned int)(off)) & 0xfcU) \
+ |1UL)
+
+static void __iomem *u4_pcie_cfg_map_bus(struct pci_bus *bus,
+ unsigned int dev_fn,
+ int offset)
+{
+ struct pci_controller *hose;
+ unsigned int caddr;
+
+ if (offset >= 0x1000)
+ return NULL;
+
+ hose = pci_bus_to_host(bus);
+ if (!hose)
+ return NULL;
+
+ if (bus->number == hose->first_busno) {
+ caddr = U4_PCIE_CFA0(dev_fn, offset);
+ } else
+ caddr = U4_PCIE_CFA1(bus->number, dev_fn, offset);
+
+ /* Uninorth will return garbage if we don't read back the value ! */
+ do {
+ out_le32(hose->cfg_addr, caddr);
+ } while (in_le32(hose->cfg_addr) != caddr);
+
+ offset &= 0x03;
+ return hose->cfg_data + offset;
+}
+
+static struct pci_ops u4_pcie_pci_ops =
+{
+ .map_bus = u4_pcie_cfg_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+static void pmac_pci_fixup_u4_of_node(struct pci_dev *dev)
+{
+ /* Apple's device-tree "hides" the root complex virtual P2P bridge
+ * on U4. However, Linux sees it, causing the PCI <-> OF matching
+ * code to fail to properly match devices below it. This works around
+ * it by setting the node of the bridge to point to the PHB node,
+ * which is not entirely correct but fixes the matching code and
+ * doesn't break anything else. It's also the simplest possible fix.
+ */
+ if (dev->dev.of_node == NULL)
+ dev->dev.of_node = pcibios_get_phb_of_node(dev->bus);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_APPLE, 0x5b, pmac_pci_fixup_u4_of_node);
+
+#endif /* CONFIG_PPC64 */
+
+#ifdef CONFIG_PPC32
+/*
+ * For a bandit bridge, turn on cache coherency if necessary.
+ * N.B. we could clean this up using the hose ops directly.
+ */
+static void __init init_bandit(struct pci_controller *bp)
+{
+ unsigned int vendev, magic;
+ int rev;
+
+ /* read the word at offset 0 in config space for device 11 */
+ out_le32(bp->cfg_addr, (1UL << BANDIT_DEVNUM) + PCI_VENDOR_ID);
+ udelay(2);
+ vendev = in_le32(bp->cfg_data);
+ if (vendev == (PCI_DEVICE_ID_APPLE_BANDIT << 16) +
+ PCI_VENDOR_ID_APPLE) {
+ /* read the revision id */
+ out_le32(bp->cfg_addr,
+ (1UL << BANDIT_DEVNUM) + PCI_REVISION_ID);
+ udelay(2);
+ rev = in_8(bp->cfg_data);
+ if (rev != BANDIT_REVID)
+ printk(KERN_WARNING
+ "Unknown revision %d for bandit\n", rev);
+ } else if (vendev != (BANDIT_DEVID_2 << 16) + PCI_VENDOR_ID_APPLE) {
+ printk(KERN_WARNING "bandit isn't? (%x)\n", vendev);
+ return;
+ }
+
+ /* read the word at offset 0x50 */
+ out_le32(bp->cfg_addr, (1UL << BANDIT_DEVNUM) + BANDIT_MAGIC);
+ udelay(2);
+ magic = in_le32(bp->cfg_data);
+ if ((magic & BANDIT_COHERENT) != 0)
+ return;
+ magic |= BANDIT_COHERENT;
+ udelay(2);
+ out_le32(bp->cfg_data, magic);
+ printk(KERN_INFO "Cache coherency enabled for bandit/PSX\n");
+}
+
+/*
+ * Tweak the PCI-PCI bridge chip on the blue & white G3s.
+ */
+static void __init init_p2pbridge(void)
+{
+ struct device_node *p2pbridge;
+ struct pci_controller* hose;
+ u8 bus, devfn;
+ u16 val;
+
+ /* XXX it would be better here to identify the specific
+ PCI-PCI bridge chip we have. */
+ p2pbridge = of_find_node_by_name(NULL, "pci-bridge");
+ if (p2pbridge == NULL
+ || p2pbridge->parent == NULL
+ || strcmp(p2pbridge->parent->name, "pci") != 0)
+ goto done;
+ if (pci_device_from_OF_node(p2pbridge, &bus, &devfn) < 0) {
+ DBG("Can't find PCI infos for PCI<->PCI bridge\n");
+ goto done;
+ }
+ /* Warning: At this point, we have not yet renumbered all busses.
+ * So we must use OF walking to find out hose
+ */
+ hose = pci_find_hose_for_OF_device(p2pbridge);
+ if (!hose) {
+ DBG("Can't find hose for PCI<->PCI bridge\n");
+ goto done;
+ }
+ if (early_read_config_word(hose, bus, devfn,
+ PCI_BRIDGE_CONTROL, &val) < 0) {
+ printk(KERN_ERR "init_p2pbridge: couldn't read bridge"
+ " control\n");
+ goto done;
+ }
+ val &= ~PCI_BRIDGE_CTL_MASTER_ABORT;
+ early_write_config_word(hose, bus, devfn, PCI_BRIDGE_CONTROL, val);
+done:
+ of_node_put(p2pbridge);
+}
+
+static void __init init_second_ohare(void)
+{
+ struct device_node *np = of_find_node_by_name(NULL, "pci106b,7");
+ unsigned char bus, devfn;
+ unsigned short cmd;
+
+ if (np == NULL)
+ return;
+
+ /* This must run before we initialize the PICs since the second
+ * ohare hosts a PIC that will be accessed there.
+ */
+ if (pci_device_from_OF_node(np, &bus, &devfn) == 0) {
+ struct pci_controller* hose =
+ pci_find_hose_for_OF_device(np);
+ if (!hose) {
+ printk(KERN_ERR "Can't find PCI hose for OHare2 !\n");
+ of_node_put(np);
+ return;
+ }
+ early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd);
+ cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
+ cmd &= ~PCI_COMMAND_IO;
+ early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd);
+ }
+ has_second_ohare = 1;
+ of_node_put(np);
+}
+
+/*
+ * Some Apple desktop machines have a NEC PD720100A USB2 controller
+ * on the motherboard. Open Firmware, on these, will disable the
+ * EHCI part of it so it behaves like a pair of OHCI's. This fixup
+ * code re-enables it ;)
+ */
+static void __init fixup_nec_usb2(void)
+{
+ struct device_node *nec;
+
+ for_each_node_by_name(nec, "usb") {
+ struct pci_controller *hose;
+ u32 data;
+ const u32 *prop;
+ u8 bus, devfn;
+
+ prop = of_get_property(nec, "vendor-id", NULL);
+ if (prop == NULL)
+ continue;
+ if (0x1033 != *prop)
+ continue;
+ prop = of_get_property(nec, "device-id", NULL);
+ if (prop == NULL)
+ continue;
+ if (0x0035 != *prop)
+ continue;
+ prop = of_get_property(nec, "reg", NULL);
+ if (prop == NULL)
+ continue;
+ devfn = (prop[0] >> 8) & 0xff;
+ bus = (prop[0] >> 16) & 0xff;
+ if (PCI_FUNC(devfn) != 0)
+ continue;
+ hose = pci_find_hose_for_OF_device(nec);
+ if (!hose)
+ continue;
+ early_read_config_dword(hose, bus, devfn, 0xe4, &data);
+ if (data & 1UL) {
+ printk("Found NEC PD720100A USB2 chip with disabled"
+ " EHCI, fixing up...\n");
+ data &= ~1UL;
+ early_write_config_dword(hose, bus, devfn, 0xe4, data);
+ }
+ }
+}
+
+static void __init setup_bandit(struct pci_controller *hose,
+ struct resource *addr)
+{
+ hose->ops = &macrisc_pci_ops;
+ hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000);
+ hose->cfg_data = ioremap(addr->start + 0xc00000, 0x1000);
+ init_bandit(hose);
+}
+
+static int __init setup_uninorth(struct pci_controller *hose,
+ struct resource *addr)
+{
+ pci_add_flags(PCI_REASSIGN_ALL_BUS);
+ has_uninorth = 1;
+ hose->ops = &macrisc_pci_ops;
+ hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000);
+ hose->cfg_data = ioremap(addr->start + 0xc00000, 0x1000);
+ /* We "know" that the bridge at f2000000 has the PCI slots. */
+ return addr->start == 0xf2000000;
+}
+#endif /* CONFIG_PPC32 */
+
+#ifdef CONFIG_PPC64
+static void __init setup_u3_agp(struct pci_controller* hose)
+{
+ /* On G5, we move AGP up to high bus number so we don't need
+ * to reassign bus numbers for HT. If we ever have P2P bridges
+ * on AGP, we'll have to move pci_assign_all_busses to the
+ * pci_controller structure so we enable it for AGP and not for
+ * HT childs.
+ * We hard code the address because of the different size of
+ * the reg address cell, we shall fix that by killing struct
+ * reg_property and using some accessor functions instead
+ */
+ hose->first_busno = 0xf0;
+ hose->last_busno = 0xff;
+ has_uninorth = 1;
+ hose->ops = &macrisc_pci_ops;
+ hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000);
+ hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000);
+ u3_agp = hose;
+}
+
+static void __init setup_u4_pcie(struct pci_controller* hose)
+{
+ /* We currently only implement the "non-atomic" config space, to
+ * be optimised later.
+ */
+ hose->ops = &u4_pcie_pci_ops;
+ hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000);
+ hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000);
+
+ /* The bus contains a bridge from root -> device, we need to
+ * make it visible on bus 0 so that we pick the right type
+ * of config cycles. If we didn't, we would have to force all
+ * config cycles to be type 1. So we override the "bus-range"
+ * property here
+ */
+ hose->first_busno = 0x00;
+ hose->last_busno = 0xff;
+}
+
+static void __init parse_region_decode(struct pci_controller *hose,
+ u32 decode)
+{
+ unsigned long base, end, next = -1;
+ int i, cur = -1;
+
+ /* Iterate through all bits. We ignore the last bit as this region is
+ * reserved for the ROM among other niceties
+ */
+ for (i = 0; i < 31; i++) {
+ if ((decode & (0x80000000 >> i)) == 0)
+ continue;
+ if (i < 16) {
+ base = 0xf0000000 | (((u32)i) << 24);
+ end = base + 0x00ffffff;
+ } else {
+ base = ((u32)i-16) << 28;
+ end = base + 0x0fffffff;
+ }
+ if (base != next) {
+ if (++cur >= 3) {
+ printk(KERN_WARNING "PCI: Too many ranges !\n");
+ break;
+ }
+ hose->mem_resources[cur].flags = IORESOURCE_MEM;
+ hose->mem_resources[cur].name = hose->dn->full_name;
+ hose->mem_resources[cur].start = base;
+ hose->mem_resources[cur].end = end;
+ hose->mem_offset[cur] = 0;
+ DBG(" %d: 0x%08lx-0x%08lx\n", cur, base, end);
+ } else {
+ DBG(" : -0x%08lx\n", end);
+ hose->mem_resources[cur].end = end;
+ }
+ next = end + 1;
+ }
+}
+
+static void __init setup_u3_ht(struct pci_controller* hose)
+{
+ struct device_node *np = hose->dn;
+ struct resource cfg_res, self_res;
+ u32 decode;
+
+ hose->ops = &u3_ht_pci_ops;
+
+ /* Get base addresses from OF tree
+ */
+ if (of_address_to_resource(np, 0, &cfg_res) ||
+ of_address_to_resource(np, 1, &self_res)) {
+ printk(KERN_ERR "PCI: Failed to get U3/U4 HT resources !\n");
+ return;
+ }
+
+ /* Map external cfg space access into cfg_data and self registers
+ * into cfg_addr
+ */
+ hose->cfg_data = ioremap(cfg_res.start, 0x02000000);
+ hose->cfg_addr = ioremap(self_res.start, resource_size(&self_res));
+
+ /*
+ * /ht node doesn't expose a "ranges" property, we read the register
+ * that controls the decoding logic and use that for memory regions.
+ * The IO region is hard coded since it is fixed in HW as well.
+ */
+ hose->io_base_phys = 0xf4000000;
+ hose->pci_io_size = 0x00400000;
+ hose->io_resource.name = np->full_name;
+ hose->io_resource.start = 0;
+ hose->io_resource.end = 0x003fffff;
+ hose->io_resource.flags = IORESOURCE_IO;
+ hose->first_busno = 0;
+ hose->last_busno = 0xef;
+
+ /* Note: fix offset when cfg_addr becomes a void * */
+ decode = in_be32(hose->cfg_addr + 0x80);
+
+ DBG("PCI: Apple HT bridge decode register: 0x%08x\n", decode);
+
+ /* NOTE: The decode register setup is a bit weird... region
+ * 0xf8000000 for example is marked as enabled in there while it's
+ & actually the memory controller registers.
+ * That means that we are incorrectly attributing it to HT.
+ *
+ * In a similar vein, region 0xf4000000 is actually the HT IO space but
+ * also marked as enabled in here and 0xf9000000 is used by some other
+ * internal bits of the northbridge.
+ *
+ * Unfortunately, we can't just mask out those bit as we would end
+ * up with more regions than we can cope (linux can only cope with
+ * 3 memory regions for a PHB at this stage).
+ *
+ * So for now, we just do a little hack. We happen to -know- that
+ * Apple firmware doesn't assign things below 0xfa000000 for that
+ * bridge anyway so we mask out all bits we don't want.
+ */
+ decode &= 0x003fffff;
+
+ /* Now parse the resulting bits and build resources */
+ parse_region_decode(hose, decode);
+}
+#endif /* CONFIG_PPC64 */
+
+/*
+ * We assume that if we have a G3 powermac, we have one bridge called
+ * "pci" (a MPC106) and no bandit or chaos bridges, and contrariwise,
+ * if we have one or more bandit or chaos bridges, we don't have a MPC106.
+ */
+static int __init pmac_add_bridge(struct device_node *dev)
+{
+ int len;
+ struct pci_controller *hose;
+ struct resource rsrc;
+ char *disp_name;
+ const int *bus_range;
+ int primary = 1, has_address = 0;
+
+ DBG("Adding PCI host bridge %s\n", dev->full_name);
+
+ /* Fetch host bridge registers address */
+ has_address = (of_address_to_resource(dev, 0, &rsrc) == 0);
+
+ /* Get bus range if any */
+ bus_range = of_get_property(dev, "bus-range", &len);
+ if (bus_range == NULL || len < 2 * sizeof(int)) {
+ printk(KERN_WARNING "Can't get bus-range for %s, assume"
+ " bus 0\n", dev->full_name);
+ }
+
+ hose = pcibios_alloc_controller(dev);
+ if (!hose)
+ return -ENOMEM;
+ hose->first_busno = bus_range ? bus_range[0] : 0;
+ hose->last_busno = bus_range ? bus_range[1] : 0xff;
+ hose->controller_ops = pmac_pci_controller_ops;
+
+ disp_name = NULL;
+
+ /* 64 bits only bridges */
+#ifdef CONFIG_PPC64
+ if (of_device_is_compatible(dev, "u3-agp")) {
+ setup_u3_agp(hose);
+ disp_name = "U3-AGP";
+ primary = 0;
+ } else if (of_device_is_compatible(dev, "u3-ht")) {
+ setup_u3_ht(hose);
+ disp_name = "U3-HT";
+ primary = 1;
+ } else if (of_device_is_compatible(dev, "u4-pcie")) {
+ setup_u4_pcie(hose);
+ disp_name = "U4-PCIE";
+ primary = 0;
+ }
+ printk(KERN_INFO "Found %s PCI host bridge. Firmware bus number:"
+ " %d->%d\n", disp_name, hose->first_busno, hose->last_busno);
+#endif /* CONFIG_PPC64 */
+
+ /* 32 bits only bridges */
+#ifdef CONFIG_PPC32
+ if (of_device_is_compatible(dev, "uni-north")) {
+ primary = setup_uninorth(hose, &rsrc);
+ disp_name = "UniNorth";
+ } else if (strcmp(dev->name, "pci") == 0) {
+ /* XXX assume this is a mpc106 (grackle) */
+ setup_grackle(hose);
+ disp_name = "Grackle (MPC106)";
+ } else if (strcmp(dev->name, "bandit") == 0) {
+ setup_bandit(hose, &rsrc);
+ disp_name = "Bandit";
+ } else if (strcmp(dev->name, "chaos") == 0) {
+ setup_chaos(hose, &rsrc);
+ disp_name = "Chaos";
+ primary = 0;
+ }
+ printk(KERN_INFO "Found %s PCI host bridge at 0x%016llx. "
+ "Firmware bus number: %d->%d\n",
+ disp_name, (unsigned long long)rsrc.start, hose->first_busno,
+ hose->last_busno);
+#endif /* CONFIG_PPC32 */
+
+ DBG(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
+ hose, hose->cfg_addr, hose->cfg_data);
+
+ /* Interpret the "ranges" property */
+ /* This also maps the I/O region and sets isa_io/mem_base */
+ pci_process_bridge_OF_ranges(hose, dev, primary);
+
+ /* Fixup "bus-range" OF property */
+ fixup_bus_range(dev);
+
+ return 0;
+}
+
+void pmac_pci_irq_fixup(struct pci_dev *dev)
+{
+#ifdef CONFIG_PPC32
+ /* Fixup interrupt for the modem/ethernet combo controller.
+ * on machines with a second ohare chip.
+ * The number in the device tree (27) is bogus (correct for
+ * the ethernet-only board but not the combo ethernet/modem
+ * board). The real interrupt is 28 on the second controller
+ * -> 28+32 = 60.
+ */
+ if (has_second_ohare &&
+ dev->vendor == PCI_VENDOR_ID_DEC &&
+ dev->device == PCI_DEVICE_ID_DEC_TULIP_PLUS) {
+ dev->irq = irq_create_mapping(NULL, 60);
+ irq_set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW);
+ }
+#endif /* CONFIG_PPC32 */
+}
+
+void __init pmac_pci_init(void)
+{
+ struct device_node *np, *root;
+ struct device_node *ht = NULL;
+
+ pci_set_flags(PCI_CAN_SKIP_ISA_ALIGN);
+
+ root = of_find_node_by_path("/");
+ if (root == NULL) {
+ printk(KERN_CRIT "pmac_pci_init: can't find root "
+ "of device tree\n");
+ return;
+ }
+ for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) {
+ if (np->name == NULL)
+ continue;
+ if (strcmp(np->name, "bandit") == 0
+ || strcmp(np->name, "chaos") == 0
+ || strcmp(np->name, "pci") == 0) {
+ if (pmac_add_bridge(np) == 0)
+ of_node_get(np);
+ }
+ if (strcmp(np->name, "ht") == 0) {
+ of_node_get(np);
+ ht = np;
+ }
+ }
+ of_node_put(root);
+
+#ifdef CONFIG_PPC64
+ /* Probe HT last as it relies on the agp resources to be already
+ * setup
+ */
+ if (ht && pmac_add_bridge(ht) != 0)
+ of_node_put(ht);
+
+ /* Setup the linkage between OF nodes and PHBs */
+ pci_devs_phb_init();
+
+ /* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We
+ * assume there is no P2P bridge on the AGP bus, which should be a
+ * safe assumptions for now. We should do something better in the
+ * future though
+ */
+ if (u3_agp) {
+ struct device_node *np = u3_agp->dn;
+ PCI_DN(np)->busno = 0xf0;
+ for (np = np->child; np; np = np->sibling)
+ PCI_DN(np)->busno = 0xf0;
+ }
+ /* pmac_check_ht_link(); */
+
+#else /* CONFIG_PPC64 */
+ init_p2pbridge();
+ init_second_ohare();
+ fixup_nec_usb2();
+
+ /* We are still having some issues with the Xserve G4, enabling
+ * some offset between bus number and domains for now when we
+ * assign all busses should help for now
+ */
+ if (pci_has_flag(PCI_REASSIGN_ALL_BUS))
+ pcibios_assign_bus_offset = 0x10;
+#endif
+}
+
+#ifdef CONFIG_PPC32
+static bool pmac_pci_enable_device_hook(struct pci_dev *dev)
+{
+ struct device_node* node;
+ int updatecfg = 0;
+ int uninorth_child;
+
+ node = pci_device_to_OF_node(dev);
+
+ /* We don't want to enable USB controllers absent from the OF tree
+ * (iBook second controller)
+ */
+ if (dev->vendor == PCI_VENDOR_ID_APPLE
+ && dev->class == PCI_CLASS_SERIAL_USB_OHCI
+ && !node) {
+ printk(KERN_INFO "Apple USB OHCI %s disabled by firmware\n",
+ pci_name(dev));
+ return false;
+ }
+
+ if (!node)
+ return true;
+
+ uninorth_child = node->parent &&
+ of_device_is_compatible(node->parent, "uni-north");
+
+ /* Firewire & GMAC were disabled after PCI probe, the driver is
+ * claiming them, we must re-enable them now.
+ */
+ if (uninorth_child && !strcmp(node->name, "firewire") &&
+ (of_device_is_compatible(node, "pci106b,18") ||
+ of_device_is_compatible(node, "pci106b,30") ||
+ of_device_is_compatible(node, "pci11c1,5811"))) {
+ pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, node, 0, 1);
+ pmac_call_feature(PMAC_FTR_1394_ENABLE, node, 0, 1);
+ updatecfg = 1;
+ }
+ if (uninorth_child && !strcmp(node->name, "ethernet") &&
+ of_device_is_compatible(node, "gmac")) {
+ pmac_call_feature(PMAC_FTR_GMAC_ENABLE, node, 0, 1);
+ updatecfg = 1;
+ }
+
+ /*
+ * Fixup various header fields on 32 bits. We don't do that on
+ * 64 bits as some of these have strange values behind the HT
+ * bridge and we must not, for example, enable MWI or set the
+ * cache line size on them.
+ */
+ if (updatecfg) {
+ u16 cmd;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER
+ | PCI_COMMAND_INVALIDATE;
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 16);
+
+ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE,
+ L1_CACHE_BYTES >> 2);
+ }
+
+ return true;
+}
+
+void pmac_pci_fixup_ohci(struct pci_dev *dev)
+{
+ struct device_node *node = pci_device_to_OF_node(dev);
+
+ /* We don't want to assign resources to USB controllers
+ * absent from the OF tree (iBook second controller)
+ */
+ if (dev->class == PCI_CLASS_SERIAL_USB_OHCI && !node)
+ dev->resource[0].flags = 0;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_APPLE, PCI_ANY_ID, pmac_pci_fixup_ohci);
+
+/* We power down some devices after they have been probed. They'll
+ * be powered back on later on
+ */
+void __init pmac_pcibios_after_init(void)
+{
+ struct device_node* nd;
+
+ for_each_node_by_name(nd, "firewire") {
+ if (nd->parent && (of_device_is_compatible(nd, "pci106b,18") ||
+ of_device_is_compatible(nd, "pci106b,30") ||
+ of_device_is_compatible(nd, "pci11c1,5811"))
+ && of_device_is_compatible(nd->parent, "uni-north")) {
+ pmac_call_feature(PMAC_FTR_1394_ENABLE, nd, 0, 0);
+ pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, nd, 0, 0);
+ }
+ }
+ for_each_node_by_name(nd, "ethernet") {
+ if (nd->parent && of_device_is_compatible(nd, "gmac")
+ && of_device_is_compatible(nd->parent, "uni-north"))
+ pmac_call_feature(PMAC_FTR_GMAC_ENABLE, nd, 0, 0);
+ }
+}
+
+void pmac_pci_fixup_cardbus(struct pci_dev* dev)
+{
+ if (!machine_is(powermac))
+ return;
+ /*
+ * Fix the interrupt routing on the various cardbus bridges
+ * used on powerbooks
+ */
+ if (dev->vendor != PCI_VENDOR_ID_TI)
+ return;
+ if (dev->device == PCI_DEVICE_ID_TI_1130 ||
+ dev->device == PCI_DEVICE_ID_TI_1131) {
+ u8 val;
+ /* Enable PCI interrupt */
+ if (pci_read_config_byte(dev, 0x91, &val) == 0)
+ pci_write_config_byte(dev, 0x91, val | 0x30);
+ /* Disable ISA interrupt mode */
+ if (pci_read_config_byte(dev, 0x92, &val) == 0)
+ pci_write_config_byte(dev, 0x92, val & ~0x06);
+ }
+ if (dev->device == PCI_DEVICE_ID_TI_1210 ||
+ dev->device == PCI_DEVICE_ID_TI_1211 ||
+ dev->device == PCI_DEVICE_ID_TI_1410 ||
+ dev->device == PCI_DEVICE_ID_TI_1510) {
+ u8 val;
+ /* 0x8c == TI122X_IRQMUX, 2 says to route the INTA
+ signal out the MFUNC0 pin */
+ if (pci_read_config_byte(dev, 0x8c, &val) == 0)
+ pci_write_config_byte(dev, 0x8c, (val & ~0x0f) | 2);
+ /* Disable ISA interrupt mode */
+ if (pci_read_config_byte(dev, 0x92, &val) == 0)
+ pci_write_config_byte(dev, 0x92, val & ~0x06);
+ }
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_ANY_ID, pmac_pci_fixup_cardbus);
+
+void pmac_pci_fixup_pciata(struct pci_dev* dev)
+{
+ u8 progif = 0;
+
+ /*
+ * On PowerMacs, we try to switch any PCI ATA controller to
+ * fully native mode
+ */
+ if (!machine_is(powermac))
+ return;
+
+ /* Some controllers don't have the class IDE */
+ if (dev->vendor == PCI_VENDOR_ID_PROMISE)
+ switch(dev->device) {
+ case PCI_DEVICE_ID_PROMISE_20246:
+ case PCI_DEVICE_ID_PROMISE_20262:
+ case PCI_DEVICE_ID_PROMISE_20263:
+ case PCI_DEVICE_ID_PROMISE_20265:
+ case PCI_DEVICE_ID_PROMISE_20267:
+ case PCI_DEVICE_ID_PROMISE_20268:
+ case PCI_DEVICE_ID_PROMISE_20269:
+ case PCI_DEVICE_ID_PROMISE_20270:
+ case PCI_DEVICE_ID_PROMISE_20271:
+ case PCI_DEVICE_ID_PROMISE_20275:
+ case PCI_DEVICE_ID_PROMISE_20276:
+ case PCI_DEVICE_ID_PROMISE_20277:
+ goto good;
+ }
+ /* Others, check PCI class */
+ if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
+ return;
+ good:
+ pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
+ if ((progif & 5) != 5) {
+ printk(KERN_INFO "PCI: %s Forcing PCI IDE into native mode\n",
+ pci_name(dev));
+ (void) pci_write_config_byte(dev, PCI_CLASS_PROG, progif|5);
+ if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) ||
+ (progif & 5) != 5)
+ printk(KERN_ERR "Rewrite of PROGIF failed !\n");
+ else {
+ /* Clear IO BARs, they will be reassigned */
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, 0);
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, 0);
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_2, 0);
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_3, 0);
+ }
+ }
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, pmac_pci_fixup_pciata);
+#endif /* CONFIG_PPC32 */
+
+/*
+ * Disable second function on K2-SATA, it's broken
+ * and disable IO BARs on first one
+ */
+static void fixup_k2_sata(struct pci_dev* dev)
+{
+ int i;
+ u16 cmd;
+
+ if (PCI_FUNC(dev->devfn) > 0) {
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ for (i = 0; i < 6; i++) {
+ dev->resource[i].start = dev->resource[i].end = 0;
+ dev->resource[i].flags = 0;
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i,
+ 0);
+ }
+ } else {
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ cmd &= ~PCI_COMMAND_IO;
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ for (i = 0; i < 5; i++) {
+ dev->resource[i].start = dev->resource[i].end = 0;
+ dev->resource[i].flags = 0;
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i,
+ 0);
+ }
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, 0x0240, fixup_k2_sata);
+
+/*
+ * On U4 (aka CPC945) the PCIe root complex "P2P" bridge resource ranges aren't
+ * configured by the firmware. The bridge itself seems to ignore them but it
+ * causes problems with Linux which then re-assigns devices below the bridge,
+ * thus changing addresses of those devices from what was in the device-tree,
+ * which sucks when those are video cards using offb
+ *
+ * We could just mark it transparent but I prefer fixing up the resources to
+ * properly show what's going on here, as I have some doubts about having them
+ * badly configured potentially being an issue for DMA.
+ *
+ * We leave PIO alone, it seems to be fine
+ *
+ * Oh and there's another funny bug. The OF properties advertize the region
+ * 0xf1000000..0xf1ffffff as being forwarded as memory space. But that's
+ * actually not true, this region is the memory mapped config space. So we
+ * also need to filter it out or we'll map things in the wrong place.
+ */
+static void fixup_u4_pcie(struct pci_dev* dev)
+{
+ struct pci_controller *host = pci_bus_to_host(dev->bus);
+ struct resource *region = NULL;
+ u32 reg;
+ int i;
+
+ /* Only do that on PowerMac */
+ if (!machine_is(powermac))
+ return;
+
+ /* Find the largest MMIO region */
+ for (i = 0; i < 3; i++) {
+ struct resource *r = &host->mem_resources[i];
+ if (!(r->flags & IORESOURCE_MEM))
+ continue;
+ /* Skip the 0xf0xxxxxx..f2xxxxxx regions, we know they
+ * are reserved by HW for other things
+ */
+ if (r->start >= 0xf0000000 && r->start < 0xf3000000)
+ continue;
+ if (!region || resource_size(r) > resource_size(region))
+ region = r;
+ }
+ /* Nothing found, bail */
+ if (region == 0)
+ return;
+
+ /* Print things out */
+ printk(KERN_INFO "PCI: Fixup U4 PCIe bridge range: %pR\n", region);
+
+ /* Fixup bridge config space. We know it's a Mac, resource aren't
+ * offset so let's just blast them as-is. We also know that they
+ * fit in 32 bits
+ */
+ reg = ((region->start >> 16) & 0xfff0) | (region->end & 0xfff00000);
+ pci_write_config_dword(dev, PCI_MEMORY_BASE, reg);
+ pci_write_config_dword(dev, PCI_PREF_BASE_UPPER32, 0);
+ pci_write_config_dword(dev, PCI_PREF_LIMIT_UPPER32, 0);
+ pci_write_config_dword(dev, PCI_PREF_MEMORY_BASE, 0);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_U4_PCIE, fixup_u4_pcie);
+
+#ifdef CONFIG_PPC64
+static int pmac_pci_probe_mode(struct pci_bus *bus)
+{
+ struct device_node *node = pci_bus_to_OF_node(bus);
+
+ /* We need to use normal PCI probing for the AGP bus,
+ * since the device for the AGP bridge isn't in the tree.
+ * Same for the PCIe host on U4 and the HT host bridge.
+ */
+ if (bus->self == NULL && (of_device_is_compatible(node, "u3-agp") ||
+ of_device_is_compatible(node, "u4-pcie") ||
+ of_device_is_compatible(node, "u3-ht")))
+ return PCI_PROBE_NORMAL;
+ return PCI_PROBE_DEVTREE;
+}
+#endif /* CONFIG_PPC64 */
+
+struct pci_controller_ops pmac_pci_controller_ops = {
+#ifdef CONFIG_PPC64
+ .probe_mode = pmac_pci_probe_mode,
+#endif
+#ifdef CONFIG_PPC32
+ .enable_device_hook = pmac_pci_enable_device_hook,
+#endif
+};
+
diff --git a/kernel/arch/powerpc/platforms/powermac/pfunc_base.c b/kernel/arch/powerpc/platforms/powermac/pfunc_base.c
new file mode 100644
index 000000000..e49d07f3d
--- /dev/null
+++ b/kernel/arch/powerpc/platforms/powermac/pfunc_base.c
@@ -0,0 +1,410 @@
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/of_irq.h>
+
+#include <asm/pmac_feature.h>
+#include <asm/pmac_pfunc.h>
+
+#undef DEBUG
+#ifdef DEBUG
+#define DBG(fmt...) printk(fmt)
+#else
+#define DBG(fmt...)
+#endif
+
+static irqreturn_t macio_gpio_irq(int irq, void *data)
+{
+ pmf_do_irq(data);
+
+ return IRQ_HANDLED;
+}
+
+static int macio_do_gpio_irq_enable(struct pmf_function *func)
+{
+ unsigned int irq = irq_of_parse_and_map(func->node, 0);
+ if (irq == NO_IRQ)
+ return -EINVAL;
+ return request_irq(irq, macio_gpio_irq, 0, func->node->name, func);
+}
+
+static int macio_do_gpio_irq_disable(struct pmf_function *func)
+{
+ unsigned int irq = irq_of_parse_and_map(func->node, 0);
+ if (irq == NO_IRQ)
+ return -EINVAL;
+ free_irq(irq, func);
+ return 0;
+}
+
+static int macio_do_gpio_write(PMF_STD_ARGS, u8 value, u8 mask)
+{
+ u8 __iomem *addr = (u8 __iomem *)func->driver_data;
+ unsigned long flags;
+ u8 tmp;
+
+ /* Check polarity */
+ if (args && args->count && !args->u[0].v)
+ value = ~value;
+
+ /* Toggle the GPIO */
+ raw_spin_lock_irqsave(&feature_lock, flags);
+ tmp = readb(addr);
+ tmp = (tmp & ~mask) | (value & mask);
+ DBG("Do write 0x%02x to GPIO %s (%p)\n",
+ tmp, func->node->full_name, addr);
+ writeb(tmp, addr);
+ raw_spin_unlock_irqrestore(&feature_lock, flags);
+
+ return 0;
+}
+
+static int macio_do_gpio_read(PMF_STD_ARGS, u8 mask, int rshift, u8 xor)
+{
+ u8 __iomem *addr = (u8 __iomem *)func->driver_data;
+ u32 value;
+
+ /* Check if we have room for reply */
+ if (args == NULL || args->count == 0 || args->u[0].p == NULL)
+ return -EINVAL;
+
+ value = readb(addr);
+ *args->u[0].p = ((value & mask) >> rshift) ^ xor;
+
+ return 0;
+}
+
+static int macio_do_delay(PMF_STD_ARGS, u32 duration)
+{
+ /* assume we can sleep ! */
+ msleep((duration + 999) / 1000);
+ return 0;
+}
+
+static struct pmf_handlers macio_gpio_handlers = {
+ .irq_enable = macio_do_gpio_irq_enable,
+ .irq_disable = macio_do_gpio_irq_disable,
+ .write_gpio = macio_do_gpio_write,
+ .read_gpio = macio_do_gpio_read,
+ .delay = macio_do_delay,
+};
+
+static void macio_gpio_init_one(struct macio_chip *macio)
+{
+ struct device_node *gparent, *gp;
+
+ /*
+ * Find the "gpio" parent node
+ */
+
+ for (gparent = NULL;
+ (gparent = of_get_next_child(macio->of_node, gparent)) != NULL;)
+ if (strcmp(gparent->name, "gpio") == 0)
+ break;
+ if (gparent == NULL)
+ return;
+
+ DBG("Installing GPIO functions for macio %s\n",
+ macio->of_node->full_name);
+
+ /*
+ * Ok, got one, we dont need anything special to track them down, so
+ * we just create them all
+ */
+ for (gp = NULL; (gp = of_get_next_child(gparent, gp)) != NULL;) {
+ const u32 *reg = of_get_property(gp, "reg", NULL);
+ unsigned long offset;
+ if (reg == NULL)
+ continue;
+ offset = *reg;
+ /* Deal with old style device-tree. We can safely hard code the
+ * offset for now too even if it's a bit gross ...
+ */
+ if (offset < 0x50)
+ offset += 0x50;
+ offset += (unsigned long)macio->base;
+ pmf_register_driver(gp, &macio_gpio_handlers, (void *)offset);
+ }
+
+ DBG("Calling initial GPIO functions for macio %s\n",
+ macio->of_node->full_name);
+
+ /* And now we run all the init ones */
+ for (gp = NULL; (gp = of_get_next_child(gparent, gp)) != NULL;)
+ pmf_do_functions(gp, NULL, 0, PMF_FLAGS_ON_INIT, NULL);
+
+ /* Note: We do not at this point implement the "at sleep" or "at wake"
+ * functions. I yet to find any for GPIOs anyway
+ */
+}
+
+static int macio_do_write_reg32(PMF_STD_ARGS, u32 offset, u32 value, u32 mask)
+{
+ struct macio_chip *macio = func->driver_data;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&feature_lock, flags);
+ MACIO_OUT32(offset, (MACIO_IN32(offset) & ~mask) | (value & mask));
+ raw_spin_unlock_irqrestore(&feature_lock, flags);
+ return 0;
+}
+
+static int macio_do_read_reg32(PMF_STD_ARGS, u32 offset)
+{
+ struct macio_chip *macio = func->driver_data;
+
+ /* Check if we have room for reply */
+ if (args == NULL || args->count == 0 || args->u[0].p == NULL)
+ return -EINVAL;
+
+ *args->u[0].p = MACIO_IN32(offset);
+ return 0;
+}
+
+static int macio_do_write_reg8(PMF_STD_ARGS, u32 offset, u8 value, u8 mask)
+{
+ struct macio_chip *macio = func->driver_data;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&feature_lock, flags);
+ MACIO_OUT8(offset, (MACIO_IN8(offset) & ~mask) | (value & mask));
+ raw_spin_unlock_irqrestore(&feature_lock, flags);
+ return 0;
+}
+
+static int macio_do_read_reg8(PMF_STD_ARGS, u32 offset)
+{
+ struct macio_chip *macio = func->driver_data;
+
+ /* Check if we have room for reply */
+ if (args == NULL || args->count == 0 || args->u[0].p == NULL)
+ return -EINVAL;
+
+ *((u8 *)(args->u[0].p)) = MACIO_IN8(offset);
+ return 0;
+}
+
+static int macio_do_read_reg32_msrx(PMF_STD_ARGS, u32 offset, u32 mask,
+ u32 shift, u32 xor)
+{
+ struct macio_chip *macio = func->driver_data;
+
+ /* Check if we have room for reply */
+ if (args == NULL || args->count == 0 || args->u[0].p == NULL)
+ return -EINVAL;
+
+ *args->u[0].p = ((MACIO_IN32(offset) & mask) >> shift) ^ xor;
+ return 0;
+}
+
+static int macio_do_read_reg8_msrx(PMF_STD_ARGS, u32 offset, u32 mask,
+ u32 shift, u32 xor)
+{
+ struct macio_chip *macio = func->driver_data;
+
+ /* Check if we have room for reply */
+ if (args == NULL || args->count == 0 || args->u[0].p == NULL)
+ return -EINVAL;
+
+ *((u8 *)(args->u[0].p)) = ((MACIO_IN8(offset) & mask) >> shift) ^ xor;
+ return 0;
+}
+
+static int macio_do_write_reg32_slm(PMF_STD_ARGS, u32 offset, u32 shift,
+ u32 mask)
+{
+ struct macio_chip *macio = func->driver_data;
+ unsigned long flags;
+ u32 tmp, val;
+
+ /* Check args */
+ if (args == NULL || args->count == 0)
+ return -EINVAL;
+
+ raw_spin_lock_irqsave(&feature_lock, flags);
+ tmp = MACIO_IN32(offset);
+ val = args->u[0].v << shift;
+ tmp = (tmp & ~mask) | (val & mask);
+ MACIO_OUT32(offset, tmp);
+ raw_spin_unlock_irqrestore(&feature_lock, flags);
+ return 0;
+}
+
+static int macio_do_write_reg8_slm(PMF_STD_ARGS, u32 offset, u32 shift,
+ u32 mask)
+{
+ struct macio_chip *macio = func->driver_data;
+ unsigned long flags;
+ u32 tmp, val;
+
+ /* Check args */
+ if (args == NULL || args->count == 0)
+ return -EINVAL;
+
+ raw_spin_lock_irqsave(&feature_lock, flags);
+ tmp = MACIO_IN8(offset);
+ val = args->u[0].v << shift;
+ tmp = (tmp & ~mask) | (val & mask);
+ MACIO_OUT8(offset, tmp);
+ raw_spin_unlock_irqrestore(&feature_lock, flags);
+ return 0;
+}
+
+static struct pmf_handlers macio_mmio_handlers = {
+ .write_reg32 = macio_do_write_reg32,
+ .read_reg32 = macio_do_read_reg32,
+ .write_reg8 = macio_do_write_reg8,
+ .read_reg8 = macio_do_read_reg8,
+ .read_reg32_msrx = macio_do_read_reg32_msrx,
+ .read_reg8_msrx = macio_do_read_reg8_msrx,
+ .write_reg32_slm = macio_do_write_reg32_slm,
+ .write_reg8_slm = macio_do_write_reg8_slm,
+ .delay = macio_do_delay,
+};
+
+static void macio_mmio_init_one(struct macio_chip *macio)
+{
+ DBG("Installing MMIO functions for macio %s\n",
+ macio->of_node->full_name);
+
+ pmf_register_driver(macio->of_node, &macio_mmio_handlers, macio);
+}
+
+static struct device_node *unin_hwclock;
+
+static int unin_do_write_reg32(PMF_STD_ARGS, u32 offset, u32 value, u32 mask)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&feature_lock, flags);
+ /* This is fairly bogus in darwin, but it should work for our needs
+ * implemeted that way:
+ */
+ UN_OUT(offset, (UN_IN(offset) & ~mask) | (value & mask));
+ raw_spin_unlock_irqrestore(&feature_lock, flags);
+ return 0;
+}
+
+
+static struct pmf_handlers unin_mmio_handlers = {
+ .write_reg32 = unin_do_write_reg32,
+ .delay = macio_do_delay,
+};
+
+static void uninorth_install_pfunc(void)
+{
+ struct device_node *np;
+
+ DBG("Installing functions for UniN %s\n",
+ uninorth_node->full_name);
+
+ /*
+ * Install handlers for the bridge itself
+ */
+ pmf_register_driver(uninorth_node, &unin_mmio_handlers, NULL);
+ pmf_do_functions(uninorth_node, NULL, 0, PMF_FLAGS_ON_INIT, NULL);
+
+
+ /*
+ * Install handlers for the hwclock child if any
+ */
+ for (np = NULL; (np = of_get_next_child(uninorth_node, np)) != NULL;)
+ if (strcmp(np->name, "hw-clock") == 0) {
+ unin_hwclock = np;
+ break;
+ }
+ if (unin_hwclock) {
+ DBG("Installing functions for UniN clock %s\n",
+ unin_hwclock->full_name);
+ pmf_register_driver(unin_hwclock, &unin_mmio_handlers, NULL);
+ pmf_do_functions(unin_hwclock, NULL, 0, PMF_FLAGS_ON_INIT,
+ NULL);
+ }
+}
+
+/* We export this as the SMP code might init us early */
+int __init pmac_pfunc_base_install(void)
+{
+ static int pfbase_inited;
+ int i;
+
+ if (pfbase_inited)
+ return 0;
+ pfbase_inited = 1;
+
+ if (!machine_is(powermac))
+ return 0;
+
+ DBG("Installing base platform functions...\n");
+
+ /*
+ * Locate mac-io chips and install handlers
+ */
+ for (i = 0 ; i < MAX_MACIO_CHIPS; i++) {
+ if (macio_chips[i].of_node) {
+ macio_mmio_init_one(&macio_chips[i]);
+ macio_gpio_init_one(&macio_chips[i]);
+ }
+ }
+
+ /*
+ * Install handlers for northbridge and direct mapped hwclock
+ * if any. We do not implement the config space access callback
+ * which is only ever used for functions that we do not call in
+ * the current driver (enabling/disabling cells in U2, mostly used
+ * to restore the PCI settings, we do that differently)
+ */
+ if (uninorth_node && uninorth_base)
+ uninorth_install_pfunc();
+
+ DBG("All base functions installed\n");
+
+ return 0;
+}
+machine_arch_initcall(powermac, pmac_pfunc_base_install);
+
+#ifdef CONFIG_PM
+
+/* Those can be called by pmac_feature. Ultimately, I should use a sysdev
+ * or a device, but for now, that's good enough until I sort out some
+ * ordering issues. Also, we do not bother with GPIOs, as so far I yet have
+ * to see a case where a GPIO function has the on-suspend or on-resume bit
+ */
+void pmac_pfunc_base_suspend(void)
+{
+ int i;
+
+ for (i = 0 ; i < MAX_MACIO_CHIPS; i++) {
+ if (macio_chips[i].of_node)
+ pmf_do_functions(macio_chips[i].of_node, NULL, 0,
+ PMF_FLAGS_ON_SLEEP, NULL);
+ }
+ if (uninorth_node)
+ pmf_do_functions(uninorth_node, NULL, 0,
+ PMF_FLAGS_ON_SLEEP, NULL);
+ if (unin_hwclock)
+ pmf_do_functions(unin_hwclock, NULL, 0,
+ PMF_FLAGS_ON_SLEEP, NULL);
+}
+
+void pmac_pfunc_base_resume(void)
+{
+ int i;
+
+ if (unin_hwclock)
+ pmf_do_functions(unin_hwclock, NULL, 0,
+ PMF_FLAGS_ON_WAKE, NULL);
+ if (uninorth_node)
+ pmf_do_functions(uninorth_node, NULL, 0,
+ PMF_FLAGS_ON_WAKE, NULL);
+ for (i = 0 ; i < MAX_MACIO_CHIPS; i++) {
+ if (macio_chips[i].of_node)
+ pmf_do_functions(macio_chips[i].of_node, NULL, 0,
+ PMF_FLAGS_ON_WAKE, NULL);
+ }
+}
+
+#endif /* CONFIG_PM */
diff --git a/kernel/arch/powerpc/platforms/powermac/pfunc_core.c b/kernel/arch/powerpc/platforms/powermac/pfunc_core.c
new file mode 100644
index 000000000..430750817
--- /dev/null
+++ b/kernel/arch/powerpc/platforms/powermac/pfunc_core.c
@@ -0,0 +1,1021 @@
+/*
+ *
+ * FIXME: Properly make this race free with refcounting etc...
+ *
+ * FIXME: LOCKING !!!
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+
+#include <asm/prom.h>
+#include <asm/pmac_pfunc.h>
+
+/* Debug */
+#define LOG_PARSE(fmt...)
+#define LOG_ERROR(fmt...) printk(fmt)
+#define LOG_BLOB(t,b,c)
+
+#undef DEBUG
+#ifdef DEBUG
+#define DBG(fmt...) printk(fmt)
+#else
+#define DBG(fmt...)
+#endif
+
+/* Command numbers */
+#define PMF_CMD_LIST 0
+#define PMF_CMD_WRITE_GPIO 1
+#define PMF_CMD_READ_GPIO 2
+#define PMF_CMD_WRITE_REG32 3
+#define PMF_CMD_READ_REG32 4
+#define PMF_CMD_WRITE_REG16 5
+#define PMF_CMD_READ_REG16 6
+#define PMF_CMD_WRITE_REG8 7
+#define PMF_CMD_READ_REG8 8
+#define PMF_CMD_DELAY 9
+#define PMF_CMD_WAIT_REG32 10
+#define PMF_CMD_WAIT_REG16 11
+#define PMF_CMD_WAIT_REG8 12
+#define PMF_CMD_READ_I2C 13
+#define PMF_CMD_WRITE_I2C 14
+#define PMF_CMD_RMW_I2C 15
+#define PMF_CMD_GEN_I2C 16
+#define PMF_CMD_SHIFT_BYTES_RIGHT 17
+#define PMF_CMD_SHIFT_BYTES_LEFT 18
+#define PMF_CMD_READ_CFG 19
+#define PMF_CMD_WRITE_CFG 20
+#define PMF_CMD_RMW_CFG 21
+#define PMF_CMD_READ_I2C_SUBADDR 22
+#define PMF_CMD_WRITE_I2C_SUBADDR 23
+#define PMF_CMD_SET_I2C_MODE 24
+#define PMF_CMD_RMW_I2C_SUBADDR 25
+#define PMF_CMD_READ_REG32_MASK_SHR_XOR 26
+#define PMF_CMD_READ_REG16_MASK_SHR_XOR 27
+#define PMF_CMD_READ_REG8_MASK_SHR_XOR 28
+#define PMF_CMD_WRITE_REG32_SHL_MASK 29
+#define PMF_CMD_WRITE_REG16_SHL_MASK 30
+#define PMF_CMD_WRITE_REG8_SHL_MASK 31
+#define PMF_CMD_MASK_AND_COMPARE 32
+#define PMF_CMD_COUNT 33
+
+/* This structure holds the state of the parser while walking through
+ * a function definition
+ */
+struct pmf_cmd {
+ const void *cmdptr;
+ const void *cmdend;
+ struct pmf_function *func;
+ void *instdata;
+ struct pmf_args *args;
+ int error;
+};
+
+#if 0
+/* Debug output */
+static void print_blob(const char *title, const void *blob, int bytes)
+{
+ printk("%s", title);
+ while(bytes--) {
+ printk("%02x ", *((u8 *)blob));
+ blob += 1;
+ }
+ printk("\n");
+}
+#endif
+
+/*
+ * Parser helpers
+ */
+
+static u32 pmf_next32(struct pmf_cmd *cmd)
+{
+ u32 value;
+ if ((cmd->cmdend - cmd->cmdptr) < 4) {
+ cmd->error = 1;
+ return 0;
+ }
+ value = *((u32 *)cmd->cmdptr);
+ cmd->cmdptr += 4;
+ return value;
+}
+
+static const void* pmf_next_blob(struct pmf_cmd *cmd, int count)
+{
+ const void *value;
+ if ((cmd->cmdend - cmd->cmdptr) < count) {
+ cmd->error = 1;
+ return NULL;
+ }
+ value = cmd->cmdptr;
+ cmd->cmdptr += count;
+ return value;
+}
+
+/*
+ * Individual command parsers
+ */
+
+#define PMF_PARSE_CALL(name, cmd, handlers, p...) \
+ do { \
+ if (cmd->error) \
+ return -ENXIO; \
+ if (handlers == NULL) \
+ return 0; \
+ if (handlers->name) \
+ return handlers->name(cmd->func, cmd->instdata, \
+ cmd->args, p); \
+ return -1; \
+ } while(0) \
+
+
+static int pmf_parser_write_gpio(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u8 value = (u8)pmf_next32(cmd);
+ u8 mask = (u8)pmf_next32(cmd);
+
+ LOG_PARSE("pmf: write_gpio(value: %02x, mask: %02x)\n", value, mask);
+
+ PMF_PARSE_CALL(write_gpio, cmd, h, value, mask);
+}
+
+static int pmf_parser_read_gpio(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u8 mask = (u8)pmf_next32(cmd);
+ int rshift = (int)pmf_next32(cmd);
+ u8 xor = (u8)pmf_next32(cmd);
+
+ LOG_PARSE("pmf: read_gpio(mask: %02x, rshift: %d, xor: %02x)\n",
+ mask, rshift, xor);
+
+ PMF_PARSE_CALL(read_gpio, cmd, h, mask, rshift, xor);
+}
+
+static int pmf_parser_write_reg32(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+ u32 value = pmf_next32(cmd);
+ u32 mask = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: write_reg32(offset: %08x, value: %08x, mask: %08x)\n",
+ offset, value, mask);
+
+ PMF_PARSE_CALL(write_reg32, cmd, h, offset, value, mask);
+}
+
+static int pmf_parser_read_reg32(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: read_reg32(offset: %08x)\n", offset);
+
+ PMF_PARSE_CALL(read_reg32, cmd, h, offset);
+}
+
+
+static int pmf_parser_write_reg16(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+ u16 value = (u16)pmf_next32(cmd);
+ u16 mask = (u16)pmf_next32(cmd);
+
+ LOG_PARSE("pmf: write_reg16(offset: %08x, value: %04x, mask: %04x)\n",
+ offset, value, mask);
+
+ PMF_PARSE_CALL(write_reg16, cmd, h, offset, value, mask);
+}
+
+static int pmf_parser_read_reg16(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: read_reg16(offset: %08x)\n", offset);
+
+ PMF_PARSE_CALL(read_reg16, cmd, h, offset);
+}
+
+
+static int pmf_parser_write_reg8(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+ u8 value = (u16)pmf_next32(cmd);
+ u8 mask = (u16)pmf_next32(cmd);
+
+ LOG_PARSE("pmf: write_reg8(offset: %08x, value: %02x, mask: %02x)\n",
+ offset, value, mask);
+
+ PMF_PARSE_CALL(write_reg8, cmd, h, offset, value, mask);
+}
+
+static int pmf_parser_read_reg8(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: read_reg8(offset: %08x)\n", offset);
+
+ PMF_PARSE_CALL(read_reg8, cmd, h, offset);
+}
+
+static int pmf_parser_delay(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 duration = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: delay(duration: %d us)\n", duration);
+
+ PMF_PARSE_CALL(delay, cmd, h, duration);
+}
+
+static int pmf_parser_wait_reg32(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+ u32 value = pmf_next32(cmd);
+ u32 mask = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: wait_reg32(offset: %08x, comp_value: %08x,mask: %08x)\n",
+ offset, value, mask);
+
+ PMF_PARSE_CALL(wait_reg32, cmd, h, offset, value, mask);
+}
+
+static int pmf_parser_wait_reg16(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+ u16 value = (u16)pmf_next32(cmd);
+ u16 mask = (u16)pmf_next32(cmd);
+
+ LOG_PARSE("pmf: wait_reg16(offset: %08x, comp_value: %04x,mask: %04x)\n",
+ offset, value, mask);
+
+ PMF_PARSE_CALL(wait_reg16, cmd, h, offset, value, mask);
+}
+
+static int pmf_parser_wait_reg8(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+ u8 value = (u8)pmf_next32(cmd);
+ u8 mask = (u8)pmf_next32(cmd);
+
+ LOG_PARSE("pmf: wait_reg8(offset: %08x, comp_value: %02x,mask: %02x)\n",
+ offset, value, mask);
+
+ PMF_PARSE_CALL(wait_reg8, cmd, h, offset, value, mask);
+}
+
+static int pmf_parser_read_i2c(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 bytes = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: read_i2c(bytes: %ud)\n", bytes);
+
+ PMF_PARSE_CALL(read_i2c, cmd, h, bytes);
+}
+
+static int pmf_parser_write_i2c(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 bytes = pmf_next32(cmd);
+ const void *blob = pmf_next_blob(cmd, bytes);
+
+ LOG_PARSE("pmf: write_i2c(bytes: %ud) ...\n", bytes);
+ LOG_BLOB("pmf: data: \n", blob, bytes);
+
+ PMF_PARSE_CALL(write_i2c, cmd, h, bytes, blob);
+}
+
+
+static int pmf_parser_rmw_i2c(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 maskbytes = pmf_next32(cmd);
+ u32 valuesbytes = pmf_next32(cmd);
+ u32 totalbytes = pmf_next32(cmd);
+ const void *maskblob = pmf_next_blob(cmd, maskbytes);
+ const void *valuesblob = pmf_next_blob(cmd, valuesbytes);
+
+ LOG_PARSE("pmf: rmw_i2c(maskbytes: %ud, valuebytes: %ud, "
+ "totalbytes: %d) ...\n",
+ maskbytes, valuesbytes, totalbytes);
+ LOG_BLOB("pmf: mask data: \n", maskblob, maskbytes);
+ LOG_BLOB("pmf: values data: \n", valuesblob, valuesbytes);
+
+ PMF_PARSE_CALL(rmw_i2c, cmd, h, maskbytes, valuesbytes, totalbytes,
+ maskblob, valuesblob);
+}
+
+static int pmf_parser_read_cfg(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+ u32 bytes = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: read_cfg(offset: %x, bytes: %ud)\n", offset, bytes);
+
+ PMF_PARSE_CALL(read_cfg, cmd, h, offset, bytes);
+}
+
+
+static int pmf_parser_write_cfg(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+ u32 bytes = pmf_next32(cmd);
+ const void *blob = pmf_next_blob(cmd, bytes);
+
+ LOG_PARSE("pmf: write_cfg(offset: %x, bytes: %ud)\n", offset, bytes);
+ LOG_BLOB("pmf: data: \n", blob, bytes);
+
+ PMF_PARSE_CALL(write_cfg, cmd, h, offset, bytes, blob);
+}
+
+static int pmf_parser_rmw_cfg(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+ u32 maskbytes = pmf_next32(cmd);
+ u32 valuesbytes = pmf_next32(cmd);
+ u32 totalbytes = pmf_next32(cmd);
+ const void *maskblob = pmf_next_blob(cmd, maskbytes);
+ const void *valuesblob = pmf_next_blob(cmd, valuesbytes);
+
+ LOG_PARSE("pmf: rmw_cfg(maskbytes: %ud, valuebytes: %ud,"
+ " totalbytes: %d) ...\n",
+ maskbytes, valuesbytes, totalbytes);
+ LOG_BLOB("pmf: mask data: \n", maskblob, maskbytes);
+ LOG_BLOB("pmf: values data: \n", valuesblob, valuesbytes);
+
+ PMF_PARSE_CALL(rmw_cfg, cmd, h, offset, maskbytes, valuesbytes,
+ totalbytes, maskblob, valuesblob);
+}
+
+
+static int pmf_parser_read_i2c_sub(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u8 subaddr = (u8)pmf_next32(cmd);
+ u32 bytes = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: read_i2c_sub(subaddr: %x, bytes: %ud)\n",
+ subaddr, bytes);
+
+ PMF_PARSE_CALL(read_i2c_sub, cmd, h, subaddr, bytes);
+}
+
+static int pmf_parser_write_i2c_sub(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u8 subaddr = (u8)pmf_next32(cmd);
+ u32 bytes = pmf_next32(cmd);
+ const void *blob = pmf_next_blob(cmd, bytes);
+
+ LOG_PARSE("pmf: write_i2c_sub(subaddr: %x, bytes: %ud) ...\n",
+ subaddr, bytes);
+ LOG_BLOB("pmf: data: \n", blob, bytes);
+
+ PMF_PARSE_CALL(write_i2c_sub, cmd, h, subaddr, bytes, blob);
+}
+
+static int pmf_parser_set_i2c_mode(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 mode = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: set_i2c_mode(mode: %d)\n", mode);
+
+ PMF_PARSE_CALL(set_i2c_mode, cmd, h, mode);
+}
+
+
+static int pmf_parser_rmw_i2c_sub(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u8 subaddr = (u8)pmf_next32(cmd);
+ u32 maskbytes = pmf_next32(cmd);
+ u32 valuesbytes = pmf_next32(cmd);
+ u32 totalbytes = pmf_next32(cmd);
+ const void *maskblob = pmf_next_blob(cmd, maskbytes);
+ const void *valuesblob = pmf_next_blob(cmd, valuesbytes);
+
+ LOG_PARSE("pmf: rmw_i2c_sub(subaddr: %x, maskbytes: %ud, valuebytes: %ud"
+ ", totalbytes: %d) ...\n",
+ subaddr, maskbytes, valuesbytes, totalbytes);
+ LOG_BLOB("pmf: mask data: \n", maskblob, maskbytes);
+ LOG_BLOB("pmf: values data: \n", valuesblob, valuesbytes);
+
+ PMF_PARSE_CALL(rmw_i2c_sub, cmd, h, subaddr, maskbytes, valuesbytes,
+ totalbytes, maskblob, valuesblob);
+}
+
+static int pmf_parser_read_reg32_msrx(struct pmf_cmd *cmd,
+ struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+ u32 mask = pmf_next32(cmd);
+ u32 shift = pmf_next32(cmd);
+ u32 xor = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: read_reg32_msrx(offset: %x, mask: %x, shift: %x,"
+ " xor: %x\n", offset, mask, shift, xor);
+
+ PMF_PARSE_CALL(read_reg32_msrx, cmd, h, offset, mask, shift, xor);
+}
+
+static int pmf_parser_read_reg16_msrx(struct pmf_cmd *cmd,
+ struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+ u32 mask = pmf_next32(cmd);
+ u32 shift = pmf_next32(cmd);
+ u32 xor = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: read_reg16_msrx(offset: %x, mask: %x, shift: %x,"
+ " xor: %x\n", offset, mask, shift, xor);
+
+ PMF_PARSE_CALL(read_reg16_msrx, cmd, h, offset, mask, shift, xor);
+}
+static int pmf_parser_read_reg8_msrx(struct pmf_cmd *cmd,
+ struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+ u32 mask = pmf_next32(cmd);
+ u32 shift = pmf_next32(cmd);
+ u32 xor = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: read_reg8_msrx(offset: %x, mask: %x, shift: %x,"
+ " xor: %x\n", offset, mask, shift, xor);
+
+ PMF_PARSE_CALL(read_reg8_msrx, cmd, h, offset, mask, shift, xor);
+}
+
+static int pmf_parser_write_reg32_slm(struct pmf_cmd *cmd,
+ struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+ u32 shift = pmf_next32(cmd);
+ u32 mask = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: write_reg32_slm(offset: %x, shift: %x, mask: %x\n",
+ offset, shift, mask);
+
+ PMF_PARSE_CALL(write_reg32_slm, cmd, h, offset, shift, mask);
+}
+
+static int pmf_parser_write_reg16_slm(struct pmf_cmd *cmd,
+ struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+ u32 shift = pmf_next32(cmd);
+ u32 mask = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: write_reg16_slm(offset: %x, shift: %x, mask: %x\n",
+ offset, shift, mask);
+
+ PMF_PARSE_CALL(write_reg16_slm, cmd, h, offset, shift, mask);
+}
+
+static int pmf_parser_write_reg8_slm(struct pmf_cmd *cmd,
+ struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+ u32 shift = pmf_next32(cmd);
+ u32 mask = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: write_reg8_slm(offset: %x, shift: %x, mask: %x\n",
+ offset, shift, mask);
+
+ PMF_PARSE_CALL(write_reg8_slm, cmd, h, offset, shift, mask);
+}
+
+static int pmf_parser_mask_and_compare(struct pmf_cmd *cmd,
+ struct pmf_handlers *h)
+{
+ u32 bytes = pmf_next32(cmd);
+ const void *maskblob = pmf_next_blob(cmd, bytes);
+ const void *valuesblob = pmf_next_blob(cmd, bytes);
+
+ LOG_PARSE("pmf: mask_and_compare(length: %ud ...\n", bytes);
+ LOG_BLOB("pmf: mask data: \n", maskblob, bytes);
+ LOG_BLOB("pmf: values data: \n", valuesblob, bytes);
+
+ PMF_PARSE_CALL(mask_and_compare, cmd, h,
+ bytes, maskblob, valuesblob);
+}
+
+
+typedef int (*pmf_cmd_parser_t)(struct pmf_cmd *cmd, struct pmf_handlers *h);
+
+static pmf_cmd_parser_t pmf_parsers[PMF_CMD_COUNT] =
+{
+ NULL,
+ pmf_parser_write_gpio,
+ pmf_parser_read_gpio,
+ pmf_parser_write_reg32,
+ pmf_parser_read_reg32,
+ pmf_parser_write_reg16,
+ pmf_parser_read_reg16,
+ pmf_parser_write_reg8,
+ pmf_parser_read_reg8,
+ pmf_parser_delay,
+ pmf_parser_wait_reg32,
+ pmf_parser_wait_reg16,
+ pmf_parser_wait_reg8,
+ pmf_parser_read_i2c,
+ pmf_parser_write_i2c,
+ pmf_parser_rmw_i2c,
+ NULL, /* Bogus command */
+ NULL, /* Shift bytes right: NYI */
+ NULL, /* Shift bytes left: NYI */
+ pmf_parser_read_cfg,
+ pmf_parser_write_cfg,
+ pmf_parser_rmw_cfg,
+ pmf_parser_read_i2c_sub,
+ pmf_parser_write_i2c_sub,
+ pmf_parser_set_i2c_mode,
+ pmf_parser_rmw_i2c_sub,
+ pmf_parser_read_reg32_msrx,
+ pmf_parser_read_reg16_msrx,
+ pmf_parser_read_reg8_msrx,
+ pmf_parser_write_reg32_slm,
+ pmf_parser_write_reg16_slm,
+ pmf_parser_write_reg8_slm,
+ pmf_parser_mask_and_compare,
+};
+
+struct pmf_device {
+ struct list_head link;
+ struct device_node *node;
+ struct pmf_handlers *handlers;
+ struct list_head functions;
+ struct kref ref;
+};
+
+static LIST_HEAD(pmf_devices);
+static DEFINE_SPINLOCK(pmf_lock);
+static DEFINE_MUTEX(pmf_irq_mutex);
+
+static void pmf_release_device(struct kref *kref)
+{
+ struct pmf_device *dev = container_of(kref, struct pmf_device, ref);
+ kfree(dev);
+}
+
+static inline void pmf_put_device(struct pmf_device *dev)
+{
+ kref_put(&dev->ref, pmf_release_device);
+}
+
+static inline struct pmf_device *pmf_get_device(struct pmf_device *dev)
+{
+ kref_get(&dev->ref);
+ return dev;
+}
+
+static inline struct pmf_device *pmf_find_device(struct device_node *np)
+{
+ struct pmf_device *dev;
+
+ list_for_each_entry(dev, &pmf_devices, link) {
+ if (dev->node == np)
+ return pmf_get_device(dev);
+ }
+ return NULL;
+}
+
+static int pmf_parse_one(struct pmf_function *func,
+ struct pmf_handlers *handlers,
+ void *instdata, struct pmf_args *args)
+{
+ struct pmf_cmd cmd;
+ u32 ccode;
+ int count, rc;
+
+ cmd.cmdptr = func->data;
+ cmd.cmdend = func->data + func->length;
+ cmd.func = func;
+ cmd.instdata = instdata;
+ cmd.args = args;
+ cmd.error = 0;
+
+ LOG_PARSE("pmf: func %s, %d bytes, %s...\n",
+ func->name, func->length,
+ handlers ? "executing" : "parsing");
+
+ /* One subcommand to parse for now */
+ count = 1;
+
+ while(count-- && cmd.cmdptr < cmd.cmdend) {
+ /* Get opcode */
+ ccode = pmf_next32(&cmd);
+ /* Check if we are hitting a command list, fetch new count */
+ if (ccode == 0) {
+ count = pmf_next32(&cmd) - 1;
+ ccode = pmf_next32(&cmd);
+ }
+ if (cmd.error) {
+ LOG_ERROR("pmf: parse error, not enough data\n");
+ return -ENXIO;
+ }
+ if (ccode >= PMF_CMD_COUNT) {
+ LOG_ERROR("pmf: command code %d unknown !\n", ccode);
+ return -ENXIO;
+ }
+ if (pmf_parsers[ccode] == NULL) {
+ LOG_ERROR("pmf: no parser for command %d !\n", ccode);
+ return -ENXIO;
+ }
+ rc = pmf_parsers[ccode](&cmd, handlers);
+ if (rc != 0) {
+ LOG_ERROR("pmf: parser for command %d returned"
+ " error %d\n", ccode, rc);
+ return rc;
+ }
+ }
+
+ /* We are doing an initial parse pass, we need to adjust the size */
+ if (handlers == NULL)
+ func->length = cmd.cmdptr - func->data;
+
+ return 0;
+}
+
+static int pmf_add_function_prop(struct pmf_device *dev, void *driverdata,
+ const char *name, u32 *data,
+ unsigned int length)
+{
+ int count = 0;
+ struct pmf_function *func = NULL;
+
+ DBG("pmf: Adding functions for platform-do-%s\n", name);
+
+ while (length >= 12) {
+ /* Allocate a structure */
+ func = kzalloc(sizeof(struct pmf_function), GFP_KERNEL);
+ if (func == NULL)
+ goto bail;
+ kref_init(&func->ref);
+ INIT_LIST_HEAD(&func->irq_clients);
+ func->node = dev->node;
+ func->driver_data = driverdata;
+ func->name = name;
+ func->phandle = data[0];
+ func->flags = data[1];
+ data += 2;
+ length -= 8;
+ func->data = data;
+ func->length = length;
+ func->dev = dev;
+ DBG("pmf: idx %d: flags=%08x, phandle=%08x "
+ " %d bytes remaining, parsing...\n",
+ count+1, func->flags, func->phandle, length);
+ if (pmf_parse_one(func, NULL, NULL, NULL)) {
+ kfree(func);
+ goto bail;
+ }
+ length -= func->length;
+ data = (u32 *)(((u8 *)data) + func->length);
+ list_add(&func->link, &dev->functions);
+ pmf_get_device(dev);
+ count++;
+ }
+ bail:
+ DBG("pmf: Added %d functions\n", count);
+
+ return count;
+}
+
+static int pmf_add_functions(struct pmf_device *dev, void *driverdata)
+{
+ struct property *pp;
+#define PP_PREFIX "platform-do-"
+ const int plen = strlen(PP_PREFIX);
+ int count = 0;
+
+ for (pp = dev->node->properties; pp != 0; pp = pp->next) {
+ const char *name;
+ if (strncmp(pp->name, PP_PREFIX, plen) != 0)
+ continue;
+ name = pp->name + plen;
+ if (strlen(name) && pp->length >= 12)
+ count += pmf_add_function_prop(dev, driverdata, name,
+ pp->value, pp->length);
+ }
+ return count;
+}
+
+
+int pmf_register_driver(struct device_node *np,
+ struct pmf_handlers *handlers,
+ void *driverdata)
+{
+ struct pmf_device *dev;
+ unsigned long flags;
+ int rc = 0;
+
+ if (handlers == NULL)
+ return -EINVAL;
+
+ DBG("pmf: registering driver for node %s\n", np->full_name);
+
+ spin_lock_irqsave(&pmf_lock, flags);
+ dev = pmf_find_device(np);
+ spin_unlock_irqrestore(&pmf_lock, flags);
+ if (dev != NULL) {
+ DBG("pmf: already there !\n");
+ pmf_put_device(dev);
+ return -EBUSY;
+ }
+
+ dev = kzalloc(sizeof(struct pmf_device), GFP_KERNEL);
+ if (dev == NULL) {
+ DBG("pmf: no memory !\n");
+ return -ENOMEM;
+ }
+ kref_init(&dev->ref);
+ dev->node = of_node_get(np);
+ dev->handlers = handlers;
+ INIT_LIST_HEAD(&dev->functions);
+
+ rc = pmf_add_functions(dev, driverdata);
+ if (rc == 0) {
+ DBG("pmf: no functions, disposing.. \n");
+ of_node_put(np);
+ kfree(dev);
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&pmf_lock, flags);
+ list_add(&dev->link, &pmf_devices);
+ spin_unlock_irqrestore(&pmf_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pmf_register_driver);
+
+struct pmf_function *pmf_get_function(struct pmf_function *func)
+{
+ if (!try_module_get(func->dev->handlers->owner))
+ return NULL;
+ kref_get(&func->ref);
+ return func;
+}
+EXPORT_SYMBOL_GPL(pmf_get_function);
+
+static void pmf_release_function(struct kref *kref)
+{
+ struct pmf_function *func =
+ container_of(kref, struct pmf_function, ref);
+ pmf_put_device(func->dev);
+ kfree(func);
+}
+
+static inline void __pmf_put_function(struct pmf_function *func)
+{
+ kref_put(&func->ref, pmf_release_function);
+}
+
+void pmf_put_function(struct pmf_function *func)
+{
+ if (func == NULL)
+ return;
+ module_put(func->dev->handlers->owner);
+ __pmf_put_function(func);
+}
+EXPORT_SYMBOL_GPL(pmf_put_function);
+
+void pmf_unregister_driver(struct device_node *np)
+{
+ struct pmf_device *dev;
+ unsigned long flags;
+
+ DBG("pmf: unregistering driver for node %s\n", np->full_name);
+
+ spin_lock_irqsave(&pmf_lock, flags);
+ dev = pmf_find_device(np);
+ if (dev == NULL) {
+ DBG("pmf: not such driver !\n");
+ spin_unlock_irqrestore(&pmf_lock, flags);
+ return;
+ }
+ list_del(&dev->link);
+
+ while(!list_empty(&dev->functions)) {
+ struct pmf_function *func =
+ list_entry(dev->functions.next, typeof(*func), link);
+ list_del(&func->link);
+ __pmf_put_function(func);
+ }
+
+ pmf_put_device(dev);
+ spin_unlock_irqrestore(&pmf_lock, flags);
+}
+EXPORT_SYMBOL_GPL(pmf_unregister_driver);
+
+struct pmf_function *__pmf_find_function(struct device_node *target,
+ const char *name, u32 flags)
+{
+ struct device_node *actor = of_node_get(target);
+ struct pmf_device *dev;
+ struct pmf_function *func, *result = NULL;
+ char fname[64];
+ const u32 *prop;
+ u32 ph;
+
+ /*
+ * Look for a "platform-*" function reference. If we can't find
+ * one, then we fallback to a direct call attempt
+ */
+ snprintf(fname, 63, "platform-%s", name);
+ prop = of_get_property(target, fname, NULL);
+ if (prop == NULL)
+ goto find_it;
+ ph = *prop;
+ if (ph == 0)
+ goto find_it;
+
+ /*
+ * Ok, now try to find the actor. If we can't find it, we fail,
+ * there is no point in falling back there
+ */
+ of_node_put(actor);
+ actor = of_find_node_by_phandle(ph);
+ if (actor == NULL)
+ return NULL;
+ find_it:
+ dev = pmf_find_device(actor);
+ if (dev == NULL) {
+ result = NULL;
+ goto out;
+ }
+
+ list_for_each_entry(func, &dev->functions, link) {
+ if (name && strcmp(name, func->name))
+ continue;
+ if (func->phandle && target->phandle != func->phandle)
+ continue;
+ if ((func->flags & flags) == 0)
+ continue;
+ result = func;
+ break;
+ }
+ pmf_put_device(dev);
+out:
+ of_node_put(actor);
+ return result;
+}
+
+
+int pmf_register_irq_client(struct device_node *target,
+ const char *name,
+ struct pmf_irq_client *client)
+{
+ struct pmf_function *func;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pmf_lock, flags);
+ func = __pmf_find_function(target, name, PMF_FLAGS_INT_GEN);
+ if (func)
+ func = pmf_get_function(func);
+ spin_unlock_irqrestore(&pmf_lock, flags);
+ if (func == NULL)
+ return -ENODEV;
+
+ /* guard against manipulations of list */
+ mutex_lock(&pmf_irq_mutex);
+ if (list_empty(&func->irq_clients))
+ func->dev->handlers->irq_enable(func);
+
+ /* guard against pmf_do_irq while changing list */
+ spin_lock_irqsave(&pmf_lock, flags);
+ list_add(&client->link, &func->irq_clients);
+ spin_unlock_irqrestore(&pmf_lock, flags);
+
+ client->func = func;
+ mutex_unlock(&pmf_irq_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pmf_register_irq_client);
+
+void pmf_unregister_irq_client(struct pmf_irq_client *client)
+{
+ struct pmf_function *func = client->func;
+ unsigned long flags;
+
+ BUG_ON(func == NULL);
+
+ /* guard against manipulations of list */
+ mutex_lock(&pmf_irq_mutex);
+ client->func = NULL;
+
+ /* guard against pmf_do_irq while changing list */
+ spin_lock_irqsave(&pmf_lock, flags);
+ list_del(&client->link);
+ spin_unlock_irqrestore(&pmf_lock, flags);
+
+ if (list_empty(&func->irq_clients))
+ func->dev->handlers->irq_disable(func);
+ mutex_unlock(&pmf_irq_mutex);
+ pmf_put_function(func);
+}
+EXPORT_SYMBOL_GPL(pmf_unregister_irq_client);
+
+
+void pmf_do_irq(struct pmf_function *func)
+{
+ unsigned long flags;
+ struct pmf_irq_client *client;
+
+ /* For now, using a spinlock over the whole function. Can be made
+ * to drop the lock using 2 lists if necessary
+ */
+ spin_lock_irqsave(&pmf_lock, flags);
+ list_for_each_entry(client, &func->irq_clients, link) {
+ if (!try_module_get(client->owner))
+ continue;
+ client->handler(client->data);
+ module_put(client->owner);
+ }
+ spin_unlock_irqrestore(&pmf_lock, flags);
+}
+EXPORT_SYMBOL_GPL(pmf_do_irq);
+
+
+int pmf_call_one(struct pmf_function *func, struct pmf_args *args)
+{
+ struct pmf_device *dev = func->dev;
+ void *instdata = NULL;
+ int rc = 0;
+
+ DBG(" ** pmf_call_one(%s/%s) **\n", dev->node->full_name, func->name);
+
+ if (dev->handlers->begin)
+ instdata = dev->handlers->begin(func, args);
+ rc = pmf_parse_one(func, dev->handlers, instdata, args);
+ if (dev->handlers->end)
+ dev->handlers->end(func, instdata);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pmf_call_one);
+
+int pmf_do_functions(struct device_node *np, const char *name,
+ u32 phandle, u32 fflags, struct pmf_args *args)
+{
+ struct pmf_device *dev;
+ struct pmf_function *func, *tmp;
+ unsigned long flags;
+ int rc = -ENODEV;
+
+ spin_lock_irqsave(&pmf_lock, flags);
+
+ dev = pmf_find_device(np);
+ if (dev == NULL) {
+ spin_unlock_irqrestore(&pmf_lock, flags);
+ return -ENODEV;
+ }
+ list_for_each_entry_safe(func, tmp, &dev->functions, link) {
+ if (name && strcmp(name, func->name))
+ continue;
+ if (phandle && func->phandle && phandle != func->phandle)
+ continue;
+ if ((func->flags & fflags) == 0)
+ continue;
+ if (pmf_get_function(func) == NULL)
+ continue;
+ spin_unlock_irqrestore(&pmf_lock, flags);
+ rc = pmf_call_one(func, args);
+ pmf_put_function(func);
+ spin_lock_irqsave(&pmf_lock, flags);
+ }
+ pmf_put_device(dev);
+ spin_unlock_irqrestore(&pmf_lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pmf_do_functions);
+
+
+struct pmf_function *pmf_find_function(struct device_node *target,
+ const char *name)
+{
+ struct pmf_function *func;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pmf_lock, flags);
+ func = __pmf_find_function(target, name, PMF_FLAGS_ON_DEMAND);
+ if (func)
+ func = pmf_get_function(func);
+ spin_unlock_irqrestore(&pmf_lock, flags);
+ return func;
+}
+EXPORT_SYMBOL_GPL(pmf_find_function);
+
+int pmf_call_function(struct device_node *target, const char *name,
+ struct pmf_args *args)
+{
+ struct pmf_function *func = pmf_find_function(target, name);
+ int rc;
+
+ if (func == NULL)
+ return -ENODEV;
+
+ rc = pmf_call_one(func, args);
+ pmf_put_function(func);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pmf_call_function);
+
diff --git a/kernel/arch/powerpc/platforms/powermac/pic.c b/kernel/arch/powerpc/platforms/powermac/pic.c
new file mode 100644
index 000000000..59cfc9d63
--- /dev/null
+++ b/kernel/arch/powerpc/platforms/powermac/pic.c
@@ -0,0 +1,659 @@
+/*
+ * Support for the interrupt controllers found on Power Macintosh,
+ * currently Apple's "Grand Central" interrupt controller in all
+ * it's incarnations. OpenPIC support used on newer machines is
+ * in a separate file
+ *
+ * Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
+ * Copyright (C) 2005 Benjamin Herrenschmidt (benh@kernel.crashing.org)
+ * IBM, Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/stddef.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/syscore_ops.h>
+#include <linux/adb.h>
+#include <linux/pmu.h>
+
+#include <asm/sections.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#include <asm/time.h>
+#include <asm/pmac_feature.h>
+#include <asm/mpic.h>
+#include <asm/xmon.h>
+
+#include "pmac.h"
+
+#ifdef CONFIG_PPC32
+struct pmac_irq_hw {
+ unsigned int event;
+ unsigned int enable;
+ unsigned int ack;
+ unsigned int level;
+};
+
+/* Workaround flags for 32bit powermac machines */
+unsigned int of_irq_workarounds;
+struct device_node *of_irq_dflt_pic;
+
+/* Default addresses */
+static volatile struct pmac_irq_hw __iomem *pmac_irq_hw[4];
+
+static int max_irqs;
+static int max_real_irqs;
+
+static DEFINE_RAW_SPINLOCK(pmac_pic_lock);
+
+/* The max irq number this driver deals with is 128; see max_irqs */
+static DECLARE_BITMAP(ppc_lost_interrupts, 128);
+static DECLARE_BITMAP(ppc_cached_irq_mask, 128);
+static int pmac_irq_cascade = -1;
+static struct irq_domain *pmac_pic_host;
+
+static void __pmac_retrigger(unsigned int irq_nr)
+{
+ if (irq_nr >= max_real_irqs && pmac_irq_cascade > 0) {
+ __set_bit(irq_nr, ppc_lost_interrupts);
+ irq_nr = pmac_irq_cascade;
+ mb();
+ }
+ if (!__test_and_set_bit(irq_nr, ppc_lost_interrupts)) {
+ atomic_inc(&ppc_n_lost_interrupts);
+ set_dec(1);
+ }
+}
+
+static void pmac_mask_and_ack_irq(struct irq_data *d)
+{
+ unsigned int src = irqd_to_hwirq(d);
+ unsigned long bit = 1UL << (src & 0x1f);
+ int i = src >> 5;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&pmac_pic_lock, flags);
+ __clear_bit(src, ppc_cached_irq_mask);
+ if (__test_and_clear_bit(src, ppc_lost_interrupts))
+ atomic_dec(&ppc_n_lost_interrupts);
+ out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]);
+ out_le32(&pmac_irq_hw[i]->ack, bit);
+ do {
+ /* make sure ack gets to controller before we enable
+ interrupts */
+ mb();
+ } while((in_le32(&pmac_irq_hw[i]->enable) & bit)
+ != (ppc_cached_irq_mask[i] & bit));
+ raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
+}
+
+static void pmac_ack_irq(struct irq_data *d)
+{
+ unsigned int src = irqd_to_hwirq(d);
+ unsigned long bit = 1UL << (src & 0x1f);
+ int i = src >> 5;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&pmac_pic_lock, flags);
+ if (__test_and_clear_bit(src, ppc_lost_interrupts))
+ atomic_dec(&ppc_n_lost_interrupts);
+ out_le32(&pmac_irq_hw[i]->ack, bit);
+ (void)in_le32(&pmac_irq_hw[i]->ack);
+ raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
+}
+
+static void __pmac_set_irq_mask(unsigned int irq_nr, int nokicklost)
+{
+ unsigned long bit = 1UL << (irq_nr & 0x1f);
+ int i = irq_nr >> 5;
+
+ if ((unsigned)irq_nr >= max_irqs)
+ return;
+
+ /* enable unmasked interrupts */
+ out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]);
+
+ do {
+ /* make sure mask gets to controller before we
+ return to user */
+ mb();
+ } while((in_le32(&pmac_irq_hw[i]->enable) & bit)
+ != (ppc_cached_irq_mask[i] & bit));
+
+ /*
+ * Unfortunately, setting the bit in the enable register
+ * when the device interrupt is already on *doesn't* set
+ * the bit in the flag register or request another interrupt.
+ */
+ if (bit & ppc_cached_irq_mask[i] & in_le32(&pmac_irq_hw[i]->level))
+ __pmac_retrigger(irq_nr);
+}
+
+/* When an irq gets requested for the first client, if it's an
+ * edge interrupt, we clear any previous one on the controller
+ */
+static unsigned int pmac_startup_irq(struct irq_data *d)
+{
+ unsigned long flags;
+ unsigned int src = irqd_to_hwirq(d);
+ unsigned long bit = 1UL << (src & 0x1f);
+ int i = src >> 5;
+
+ raw_spin_lock_irqsave(&pmac_pic_lock, flags);
+ if (!irqd_is_level_type(d))
+ out_le32(&pmac_irq_hw[i]->ack, bit);
+ __set_bit(src, ppc_cached_irq_mask);
+ __pmac_set_irq_mask(src, 0);
+ raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
+
+ return 0;
+}
+
+static void pmac_mask_irq(struct irq_data *d)
+{
+ unsigned long flags;
+ unsigned int src = irqd_to_hwirq(d);
+
+ raw_spin_lock_irqsave(&pmac_pic_lock, flags);
+ __clear_bit(src, ppc_cached_irq_mask);
+ __pmac_set_irq_mask(src, 1);
+ raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
+}
+
+static void pmac_unmask_irq(struct irq_data *d)
+{
+ unsigned long flags;
+ unsigned int src = irqd_to_hwirq(d);
+
+ raw_spin_lock_irqsave(&pmac_pic_lock, flags);
+ __set_bit(src, ppc_cached_irq_mask);
+ __pmac_set_irq_mask(src, 0);
+ raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
+}
+
+static int pmac_retrigger(struct irq_data *d)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&pmac_pic_lock, flags);
+ __pmac_retrigger(irqd_to_hwirq(d));
+ raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
+ return 1;
+}
+
+static struct irq_chip pmac_pic = {
+ .name = "PMAC-PIC",
+ .irq_startup = pmac_startup_irq,
+ .irq_mask = pmac_mask_irq,
+ .irq_ack = pmac_ack_irq,
+ .irq_mask_ack = pmac_mask_and_ack_irq,
+ .irq_unmask = pmac_unmask_irq,
+ .irq_retrigger = pmac_retrigger,
+};
+
+static irqreturn_t gatwick_action(int cpl, void *dev_id)
+{
+ unsigned long flags;
+ int irq, bits;
+ int rc = IRQ_NONE;
+
+ raw_spin_lock_irqsave(&pmac_pic_lock, flags);
+ for (irq = max_irqs; (irq -= 32) >= max_real_irqs; ) {
+ int i = irq >> 5;
+ bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i];
+ bits |= in_le32(&pmac_irq_hw[i]->level);
+ bits &= ppc_cached_irq_mask[i];
+ if (bits == 0)
+ continue;
+ irq += __ilog2(bits);
+ raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
+ generic_handle_irq(irq);
+ raw_spin_lock_irqsave(&pmac_pic_lock, flags);
+ rc = IRQ_HANDLED;
+ }
+ raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
+ return rc;
+}
+
+static unsigned int pmac_pic_get_irq(void)
+{
+ int irq;
+ unsigned long bits = 0;
+ unsigned long flags;
+
+#ifdef CONFIG_PPC_PMAC32_PSURGE
+ /* IPI's are a hack on the powersurge -- Cort */
+ if (smp_processor_id() != 0) {
+ return psurge_secondary_virq;
+ }
+#endif /* CONFIG_PPC_PMAC32_PSURGE */
+ raw_spin_lock_irqsave(&pmac_pic_lock, flags);
+ for (irq = max_real_irqs; (irq -= 32) >= 0; ) {
+ int i = irq >> 5;
+ bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i];
+ bits |= in_le32(&pmac_irq_hw[i]->level);
+ bits &= ppc_cached_irq_mask[i];
+ if (bits == 0)
+ continue;
+ irq += __ilog2(bits);
+ break;
+ }
+ raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
+ if (unlikely(irq < 0))
+ return NO_IRQ;
+ return irq_linear_revmap(pmac_pic_host, irq);
+}
+
+#ifdef CONFIG_XMON
+static struct irqaction xmon_action = {
+ .handler = xmon_irq,
+ .flags = 0,
+ .name = "NMI - XMON"
+};
+#endif
+
+static struct irqaction gatwick_cascade_action = {
+ .handler = gatwick_action,
+ .name = "cascade",
+};
+
+static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node)
+{
+ /* We match all, we don't always have a node anyway */
+ return 1;
+}
+
+static int pmac_pic_host_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ if (hw >= max_irqs)
+ return -EINVAL;
+
+ /* Mark level interrupts, set delayed disable for edge ones and set
+ * handlers
+ */
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &pmac_pic, handle_level_irq);
+ return 0;
+}
+
+static const struct irq_domain_ops pmac_pic_host_ops = {
+ .match = pmac_pic_host_match,
+ .map = pmac_pic_host_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static void __init pmac_pic_probe_oldstyle(void)
+{
+ int i;
+ struct device_node *master = NULL;
+ struct device_node *slave = NULL;
+ u8 __iomem *addr;
+ struct resource r;
+
+ /* Set our get_irq function */
+ ppc_md.get_irq = pmac_pic_get_irq;
+
+ /*
+ * Find the interrupt controller type & node
+ */
+
+ if ((master = of_find_node_by_name(NULL, "gc")) != NULL) {
+ max_irqs = max_real_irqs = 32;
+ } else if ((master = of_find_node_by_name(NULL, "ohare")) != NULL) {
+ max_irqs = max_real_irqs = 32;
+ /* We might have a second cascaded ohare */
+ slave = of_find_node_by_name(NULL, "pci106b,7");
+ if (slave)
+ max_irqs = 64;
+ } else if ((master = of_find_node_by_name(NULL, "mac-io")) != NULL) {
+ max_irqs = max_real_irqs = 64;
+
+ /* We might have a second cascaded heathrow */
+
+ /* Compensate for of_node_put() in of_find_node_by_name() */
+ of_node_get(master);
+ slave = of_find_node_by_name(master, "mac-io");
+
+ /* Check ordering of master & slave */
+ if (of_device_is_compatible(master, "gatwick")) {
+ struct device_node *tmp;
+ BUG_ON(slave == NULL);
+ tmp = master;
+ master = slave;
+ slave = tmp;
+ }
+
+ /* We found a slave */
+ if (slave)
+ max_irqs = 128;
+ }
+ BUG_ON(master == NULL);
+
+ /*
+ * Allocate an irq host
+ */
+ pmac_pic_host = irq_domain_add_linear(master, max_irqs,
+ &pmac_pic_host_ops, NULL);
+ BUG_ON(pmac_pic_host == NULL);
+ irq_set_default_host(pmac_pic_host);
+
+ /* Get addresses of first controller if we have a node for it */
+ BUG_ON(of_address_to_resource(master, 0, &r));
+
+ /* Map interrupts of primary controller */
+ addr = (u8 __iomem *) ioremap(r.start, 0x40);
+ i = 0;
+ pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *)
+ (addr + 0x20);
+ if (max_real_irqs > 32)
+ pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *)
+ (addr + 0x10);
+ of_node_put(master);
+
+ printk(KERN_INFO "irq: Found primary Apple PIC %s for %d irqs\n",
+ master->full_name, max_real_irqs);
+
+ /* Map interrupts of cascaded controller */
+ if (slave && !of_address_to_resource(slave, 0, &r)) {
+ addr = (u8 __iomem *)ioremap(r.start, 0x40);
+ pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *)
+ (addr + 0x20);
+ if (max_irqs > 64)
+ pmac_irq_hw[i++] =
+ (volatile struct pmac_irq_hw __iomem *)
+ (addr + 0x10);
+ pmac_irq_cascade = irq_of_parse_and_map(slave, 0);
+
+ printk(KERN_INFO "irq: Found slave Apple PIC %s for %d irqs"
+ " cascade: %d\n", slave->full_name,
+ max_irqs - max_real_irqs, pmac_irq_cascade);
+ }
+ of_node_put(slave);
+
+ /* Disable all interrupts in all controllers */
+ for (i = 0; i * 32 < max_irqs; ++i)
+ out_le32(&pmac_irq_hw[i]->enable, 0);
+
+ /* Hookup cascade irq */
+ if (slave && pmac_irq_cascade != NO_IRQ)
+ setup_irq(pmac_irq_cascade, &gatwick_cascade_action);
+
+ printk(KERN_INFO "irq: System has %d possible interrupts\n", max_irqs);
+#ifdef CONFIG_XMON
+ setup_irq(irq_create_mapping(NULL, 20), &xmon_action);
+#endif
+}
+
+int of_irq_parse_oldworld(struct device_node *device, int index,
+ struct of_phandle_args *out_irq)
+{
+ const u32 *ints = NULL;
+ int intlen;
+
+ /*
+ * Old machines just have a list of interrupt numbers
+ * and no interrupt-controller nodes. We also have dodgy
+ * cases where the APPL,interrupts property is completely
+ * missing behind pci-pci bridges and we have to get it
+ * from the parent (the bridge itself, as apple just wired
+ * everything together on these)
+ */
+ while (device) {
+ ints = of_get_property(device, "AAPL,interrupts", &intlen);
+ if (ints != NULL)
+ break;
+ device = device->parent;
+ if (device && strcmp(device->type, "pci") != 0)
+ break;
+ }
+ if (ints == NULL)
+ return -EINVAL;
+ intlen /= sizeof(u32);
+
+ if (index >= intlen)
+ return -EINVAL;
+
+ out_irq->np = NULL;
+ out_irq->args[0] = ints[index];
+ out_irq->args_count = 1;
+
+ return 0;
+}
+#endif /* CONFIG_PPC32 */
+
+static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic)
+{
+#if defined(CONFIG_XMON) && defined(CONFIG_PPC32)
+ struct device_node* pswitch;
+ int nmi_irq;
+
+ pswitch = of_find_node_by_name(NULL, "programmer-switch");
+ if (pswitch) {
+ nmi_irq = irq_of_parse_and_map(pswitch, 0);
+ if (nmi_irq != NO_IRQ) {
+ mpic_irq_set_priority(nmi_irq, 9);
+ setup_irq(nmi_irq, &xmon_action);
+ }
+ of_node_put(pswitch);
+ }
+#endif /* defined(CONFIG_XMON) && defined(CONFIG_PPC32) */
+}
+
+static struct mpic * __init pmac_setup_one_mpic(struct device_node *np,
+ int master)
+{
+ const char *name = master ? " MPIC 1 " : " MPIC 2 ";
+ struct mpic *mpic;
+ unsigned int flags = master ? 0 : MPIC_SECONDARY;
+
+ pmac_call_feature(PMAC_FTR_ENABLE_MPIC, np, 0, 0);
+
+ if (of_get_property(np, "big-endian", NULL))
+ flags |= MPIC_BIG_ENDIAN;
+
+ /* Primary Big Endian means HT interrupts. This is quite dodgy
+ * but works until I find a better way
+ */
+ if (master && (flags & MPIC_BIG_ENDIAN))
+ flags |= MPIC_U3_HT_IRQS;
+
+ mpic = mpic_alloc(np, 0, flags, 0, 0, name);
+ if (mpic == NULL)
+ return NULL;
+
+ mpic_init(mpic);
+
+ return mpic;
+ }
+
+static int __init pmac_pic_probe_mpic(void)
+{
+ struct mpic *mpic1, *mpic2;
+ struct device_node *np, *master = NULL, *slave = NULL;
+
+ /* We can have up to 2 MPICs cascaded */
+ for (np = NULL; (np = of_find_node_by_type(np, "open-pic"))
+ != NULL;) {
+ if (master == NULL &&
+ of_get_property(np, "interrupts", NULL) == NULL)
+ master = of_node_get(np);
+ else if (slave == NULL)
+ slave = of_node_get(np);
+ if (master && slave)
+ break;
+ }
+
+ /* Check for bogus setups */
+ if (master == NULL && slave != NULL) {
+ master = slave;
+ slave = NULL;
+ }
+
+ /* Not found, default to good old pmac pic */
+ if (master == NULL)
+ return -ENODEV;
+
+ /* Set master handler */
+ ppc_md.get_irq = mpic_get_irq;
+
+ /* Setup master */
+ mpic1 = pmac_setup_one_mpic(master, 1);
+ BUG_ON(mpic1 == NULL);
+
+ /* Install NMI if any */
+ pmac_pic_setup_mpic_nmi(mpic1);
+
+ of_node_put(master);
+
+ /* Set up a cascaded controller, if present */
+ if (slave) {
+ mpic2 = pmac_setup_one_mpic(slave, 0);
+ if (mpic2 == NULL)
+ printk(KERN_ERR "Failed to setup slave MPIC\n");
+ of_node_put(slave);
+ }
+
+ return 0;
+}
+
+
+void __init pmac_pic_init(void)
+{
+ /* We configure the OF parsing based on our oldworld vs. newworld
+ * platform type and whether we were booted by BootX.
+ */
+#ifdef CONFIG_PPC32
+ if (!pmac_newworld)
+ of_irq_workarounds |= OF_IMAP_OLDWORLD_MAC;
+ if (of_get_property(of_chosen, "linux,bootx", NULL) != NULL)
+ of_irq_workarounds |= OF_IMAP_NO_PHANDLE;
+
+ /* If we don't have phandles on a newworld, then try to locate a
+ * default interrupt controller (happens when booting with BootX).
+ * We do a first match here, hopefully, that only ever happens on
+ * machines with one controller.
+ */
+ if (pmac_newworld && (of_irq_workarounds & OF_IMAP_NO_PHANDLE)) {
+ struct device_node *np;
+
+ for_each_node_with_property(np, "interrupt-controller") {
+ /* Skip /chosen/interrupt-controller */
+ if (strcmp(np->name, "chosen") == 0)
+ continue;
+ /* It seems like at least one person wants
+ * to use BootX on a machine with an AppleKiwi
+ * controller which happens to pretend to be an
+ * interrupt controller too. */
+ if (strcmp(np->name, "AppleKiwi") == 0)
+ continue;
+ /* I think we found one ! */
+ of_irq_dflt_pic = np;
+ break;
+ }
+ }
+#endif /* CONFIG_PPC32 */
+
+ /* We first try to detect Apple's new Core99 chipset, since mac-io
+ * is quite different on those machines and contains an IBM MPIC2.
+ */
+ if (pmac_pic_probe_mpic() == 0)
+ return;
+
+#ifdef CONFIG_PPC32
+ pmac_pic_probe_oldstyle();
+#endif
+}
+
+#if defined(CONFIG_PM) && defined(CONFIG_PPC32)
+/*
+ * These procedures are used in implementing sleep on the powerbooks.
+ * sleep_save_intrs() saves the states of all interrupt enables
+ * and disables all interrupts except for the nominated one.
+ * sleep_restore_intrs() restores the states of all interrupt enables.
+ */
+unsigned long sleep_save_mask[2];
+
+/* This used to be passed by the PMU driver but that link got
+ * broken with the new driver model. We use this tweak for now...
+ * We really want to do things differently though...
+ */
+static int pmacpic_find_viaint(void)
+{
+ int viaint = -1;
+
+#ifdef CONFIG_ADB_PMU
+ struct device_node *np;
+
+ if (pmu_get_model() != PMU_OHARE_BASED)
+ goto not_found;
+ np = of_find_node_by_name(NULL, "via-pmu");
+ if (np == NULL)
+ goto not_found;
+ viaint = irq_of_parse_and_map(np, 0);
+
+not_found:
+#endif /* CONFIG_ADB_PMU */
+ return viaint;
+}
+
+static int pmacpic_suspend(void)
+{
+ int viaint = pmacpic_find_viaint();
+
+ sleep_save_mask[0] = ppc_cached_irq_mask[0];
+ sleep_save_mask[1] = ppc_cached_irq_mask[1];
+ ppc_cached_irq_mask[0] = 0;
+ ppc_cached_irq_mask[1] = 0;
+ if (viaint > 0)
+ set_bit(viaint, ppc_cached_irq_mask);
+ out_le32(&pmac_irq_hw[0]->enable, ppc_cached_irq_mask[0]);
+ if (max_real_irqs > 32)
+ out_le32(&pmac_irq_hw[1]->enable, ppc_cached_irq_mask[1]);
+ (void)in_le32(&pmac_irq_hw[0]->event);
+ /* make sure mask gets to controller before we return to caller */
+ mb();
+ (void)in_le32(&pmac_irq_hw[0]->enable);
+
+ return 0;
+}
+
+static void pmacpic_resume(void)
+{
+ int i;
+
+ out_le32(&pmac_irq_hw[0]->enable, 0);
+ if (max_real_irqs > 32)
+ out_le32(&pmac_irq_hw[1]->enable, 0);
+ mb();
+ for (i = 0; i < max_real_irqs; ++i)
+ if (test_bit(i, sleep_save_mask))
+ pmac_unmask_irq(irq_get_irq_data(i));
+}
+
+static struct syscore_ops pmacpic_syscore_ops = {
+ .suspend = pmacpic_suspend,
+ .resume = pmacpic_resume,
+};
+
+static int __init init_pmacpic_syscore(void)
+{
+ if (pmac_irq_hw[0])
+ register_syscore_ops(&pmacpic_syscore_ops);
+ return 0;
+}
+
+machine_subsys_initcall(powermac, init_pmacpic_syscore);
+
+#endif /* CONFIG_PM && CONFIG_PPC32 */
diff --git a/kernel/arch/powerpc/platforms/powermac/pmac.h b/kernel/arch/powerpc/platforms/powermac/pmac.h
new file mode 100644
index 000000000..e7f8163d6
--- /dev/null
+++ b/kernel/arch/powerpc/platforms/powermac/pmac.h
@@ -0,0 +1,43 @@
+#ifndef __PMAC_H__
+#define __PMAC_H__
+
+#include <linux/pci.h>
+#include <linux/irq.h>
+
+/*
+ * Declaration for the various functions exported by the
+ * pmac_* files. Mostly for use by pmac_setup
+ */
+
+struct rtc_time;
+
+extern int pmac_newworld;
+
+extern long pmac_time_init(void);
+extern unsigned long pmac_get_boot_time(void);
+extern void pmac_get_rtc_time(struct rtc_time *);
+extern int pmac_set_rtc_time(struct rtc_time *);
+extern void pmac_read_rtc_time(void);
+extern void pmac_calibrate_decr(void);
+extern void pmac_pci_irq_fixup(struct pci_dev *);
+extern void pmac_pci_init(void);
+
+extern void pmac_nvram_update(void);
+extern unsigned char pmac_nvram_read_byte(int addr);
+extern void pmac_nvram_write_byte(int addr, unsigned char val);
+extern void pmac_pcibios_after_init(void);
+extern int of_show_percpuinfo(struct seq_file *m, int i);
+
+extern void pmac_setup_pci_dma(void);
+extern void pmac_check_ht_link(void);
+
+extern void pmac_setup_smp(void);
+extern int psurge_secondary_virq;
+extern void low_cpu_die(void) __attribute__((noreturn));
+
+extern int pmac_nvram_init(void);
+extern void pmac_pic_init(void);
+
+extern struct pci_controller_ops pmac_pci_controller_ops;
+
+#endif /* __PMAC_H__ */
diff --git a/kernel/arch/powerpc/platforms/powermac/setup.c b/kernel/arch/powerpc/platforms/powermac/setup.c
new file mode 100644
index 000000000..8dd78f4e1
--- /dev/null
+++ b/kernel/arch/powerpc/platforms/powermac/setup.c
@@ -0,0 +1,666 @@
+/*
+ * Powermac setup and early boot code plus other random bits.
+ *
+ * PowerPC version
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Adapted for Power Macintosh by Paul Mackerras
+ * Copyright (C) 1996 Paul Mackerras (paulus@samba.org)
+ *
+ * Derived from "arch/alpha/kernel/setup.c"
+ * Copyright (C) 1995 Linus Torvalds
+ *
+ * Maintained by Benjamin Herrenschmidt (benh@kernel.crashing.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+/*
+ * bootup setup stuff..
+ */
+
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/export.h>
+#include <linux/user.h>
+#include <linux/tty.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/major.h>
+#include <linux/initrd.h>
+#include <linux/vt_kern.h>
+#include <linux/console.h>
+#include <linux/pci.h>
+#include <linux/adb.h>
+#include <linux/cuda.h>
+#include <linux/pmu.h>
+#include <linux/irq.h>
+#include <linux/seq_file.h>
+#include <linux/root_dev.h>
+#include <linux/bitops.h>
+#include <linux/suspend.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/memblock.h>
+
+#include <asm/reg.h>
+#include <asm/sections.h>
+#include <asm/prom.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+#include <asm/pci-bridge.h>
+#include <asm/ohare.h>
+#include <asm/mediabay.h>
+#include <asm/machdep.h>
+#include <asm/dma.h>
+#include <asm/cputable.h>
+#include <asm/btext.h>
+#include <asm/pmac_feature.h>
+#include <asm/time.h>
+#include <asm/mmu_context.h>
+#include <asm/iommu.h>
+#include <asm/smu.h>
+#include <asm/pmc.h>
+#include <asm/udbg.h>
+
+#include "pmac.h"
+
+#undef SHOW_GATWICK_IRQS
+
+int ppc_override_l2cr = 0;
+int ppc_override_l2cr_value;
+int has_l2cache = 0;
+
+int pmac_newworld;
+
+static int current_root_goodness = -1;
+
+extern struct machdep_calls pmac_md;
+
+#define DEFAULT_ROOT_DEVICE Root_SDA1 /* sda1 - slightly silly choice */
+
+#ifdef CONFIG_PPC64
+int sccdbg;
+#endif
+
+sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN;
+EXPORT_SYMBOL(sys_ctrler);
+
+#ifdef CONFIG_PMAC_SMU
+unsigned long smu_cmdbuf_abs;
+EXPORT_SYMBOL(smu_cmdbuf_abs);
+#endif
+
+static void pmac_show_cpuinfo(struct seq_file *m)
+{
+ struct device_node *np;
+ const char *pp;
+ int plen;
+ int mbmodel;
+ unsigned int mbflags;
+ char* mbname;
+
+ mbmodel = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL,
+ PMAC_MB_INFO_MODEL, 0);
+ mbflags = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL,
+ PMAC_MB_INFO_FLAGS, 0);
+ if (pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_NAME,
+ (long) &mbname) != 0)
+ mbname = "Unknown";
+
+ /* find motherboard type */
+ seq_printf(m, "machine\t\t: ");
+ np = of_find_node_by_path("/");
+ if (np != NULL) {
+ pp = of_get_property(np, "model", NULL);
+ if (pp != NULL)
+ seq_printf(m, "%s\n", pp);
+ else
+ seq_printf(m, "PowerMac\n");
+ pp = of_get_property(np, "compatible", &plen);
+ if (pp != NULL) {
+ seq_printf(m, "motherboard\t:");
+ while (plen > 0) {
+ int l = strlen(pp) + 1;
+ seq_printf(m, " %s", pp);
+ plen -= l;
+ pp += l;
+ }
+ seq_printf(m, "\n");
+ }
+ of_node_put(np);
+ } else
+ seq_printf(m, "PowerMac\n");
+
+ /* print parsed model */
+ seq_printf(m, "detected as\t: %d (%s)\n", mbmodel, mbname);
+ seq_printf(m, "pmac flags\t: %08x\n", mbflags);
+
+ /* find l2 cache info */
+ np = of_find_node_by_name(NULL, "l2-cache");
+ if (np == NULL)
+ np = of_find_node_by_type(NULL, "cache");
+ if (np != NULL) {
+ const unsigned int *ic =
+ of_get_property(np, "i-cache-size", NULL);
+ const unsigned int *dc =
+ of_get_property(np, "d-cache-size", NULL);
+ seq_printf(m, "L2 cache\t:");
+ has_l2cache = 1;
+ if (of_get_property(np, "cache-unified", NULL) != 0 && dc) {
+ seq_printf(m, " %dK unified", *dc / 1024);
+ } else {
+ if (ic)
+ seq_printf(m, " %dK instruction", *ic / 1024);
+ if (dc)
+ seq_printf(m, "%s %dK data",
+ (ic? " +": ""), *dc / 1024);
+ }
+ pp = of_get_property(np, "ram-type", NULL);
+ if (pp)
+ seq_printf(m, " %s", pp);
+ seq_printf(m, "\n");
+ of_node_put(np);
+ }
+
+ /* Indicate newworld/oldworld */
+ seq_printf(m, "pmac-generation\t: %s\n",
+ pmac_newworld ? "NewWorld" : "OldWorld");
+}
+
+#ifndef CONFIG_ADB_CUDA
+int find_via_cuda(void)
+{
+ struct device_node *dn = of_find_node_by_name(NULL, "via-cuda");
+
+ if (!dn)
+ return 0;
+ of_node_put(dn);
+ printk("WARNING ! Your machine is CUDA-based but your kernel\n");
+ printk(" wasn't compiled with CONFIG_ADB_CUDA option !\n");
+ return 0;
+}
+#endif
+
+#ifndef CONFIG_ADB_PMU
+int find_via_pmu(void)
+{
+ struct device_node *dn = of_find_node_by_name(NULL, "via-pmu");
+
+ if (!dn)
+ return 0;
+ of_node_put(dn);
+ printk("WARNING ! Your machine is PMU-based but your kernel\n");
+ printk(" wasn't compiled with CONFIG_ADB_PMU option !\n");
+ return 0;
+}
+#endif
+
+#ifndef CONFIG_PMAC_SMU
+int smu_init(void)
+{
+ /* should check and warn if SMU is present */
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PPC32
+static volatile u32 *sysctrl_regs;
+
+static void __init ohare_init(void)
+{
+ struct device_node *dn;
+
+ /* this area has the CPU identification register
+ and some registers used by smp boards */
+ sysctrl_regs = (volatile u32 *) ioremap(0xf8000000, 0x1000);
+
+ /*
+ * Turn on the L2 cache.
+ * We assume that we have a PSX memory controller iff
+ * we have an ohare I/O controller.
+ */
+ dn = of_find_node_by_name(NULL, "ohare");
+ if (dn) {
+ of_node_put(dn);
+ if (((sysctrl_regs[2] >> 24) & 0xf) >= 3) {
+ if (sysctrl_regs[4] & 0x10)
+ sysctrl_regs[4] |= 0x04000020;
+ else
+ sysctrl_regs[4] |= 0x04000000;
+ if(has_l2cache)
+ printk(KERN_INFO "Level 2 cache enabled\n");
+ }
+ }
+}
+
+static void __init l2cr_init(void)
+{
+ /* Checks "l2cr-value" property in the registry */
+ if (cpu_has_feature(CPU_FTR_L2CR)) {
+ struct device_node *np = of_find_node_by_name(NULL, "cpus");
+ if (np == 0)
+ np = of_find_node_by_type(NULL, "cpu");
+ if (np != 0) {
+ const unsigned int *l2cr =
+ of_get_property(np, "l2cr-value", NULL);
+ if (l2cr != 0) {
+ ppc_override_l2cr = 1;
+ ppc_override_l2cr_value = *l2cr;
+ _set_L2CR(0);
+ _set_L2CR(ppc_override_l2cr_value);
+ }
+ of_node_put(np);
+ }
+ }
+
+ if (ppc_override_l2cr)
+ printk(KERN_INFO "L2CR overridden (0x%x), "
+ "backside cache is %s\n",
+ ppc_override_l2cr_value,
+ (ppc_override_l2cr_value & 0x80000000)
+ ? "enabled" : "disabled");
+}
+#endif
+
+static void __init pmac_setup_arch(void)
+{
+ struct device_node *cpu, *ic;
+ const int *fp;
+ unsigned long pvr;
+
+ pvr = PVR_VER(mfspr(SPRN_PVR));
+
+ /* Set loops_per_jiffy to a half-way reasonable value,
+ for use until calibrate_delay gets called. */
+ loops_per_jiffy = 50000000 / HZ;
+ cpu = of_find_node_by_type(NULL, "cpu");
+ if (cpu != NULL) {
+ fp = of_get_property(cpu, "clock-frequency", NULL);
+ if (fp != NULL) {
+ if (pvr >= 0x30 && pvr < 0x80)
+ /* PPC970 etc. */
+ loops_per_jiffy = *fp / (3 * HZ);
+ else if (pvr == 4 || pvr >= 8)
+ /* 604, G3, G4 etc. */
+ loops_per_jiffy = *fp / HZ;
+ else
+ /* 601, 603, etc. */
+ loops_per_jiffy = *fp / (2 * HZ);
+ }
+ of_node_put(cpu);
+ }
+
+ /* See if newworld or oldworld */
+ ic = of_find_node_with_property(NULL, "interrupt-controller");
+ if (ic) {
+ pmac_newworld = 1;
+ of_node_put(ic);
+ }
+
+ /* Lookup PCI hosts */
+ pmac_pci_init();
+
+#ifdef CONFIG_PPC32
+ ohare_init();
+ l2cr_init();
+#endif /* CONFIG_PPC32 */
+
+ find_via_cuda();
+ find_via_pmu();
+ smu_init();
+
+#if defined(CONFIG_NVRAM) || defined(CONFIG_NVRAM_MODULE) || \
+ defined(CONFIG_PPC64)
+ pmac_nvram_init();
+#endif
+
+#ifdef CONFIG_PPC32
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_start)
+ ROOT_DEV = Root_RAM0;
+ else
+#endif
+ ROOT_DEV = DEFAULT_ROOT_DEVICE;
+#endif
+
+#ifdef CONFIG_ADB
+ if (strstr(boot_command_line, "adb_sync")) {
+ extern int __adb_probe_sync;
+ __adb_probe_sync = 1;
+ }
+#endif /* CONFIG_ADB */
+}
+
+#ifdef CONFIG_SCSI
+void note_scsi_host(struct device_node *node, void *host)
+{
+}
+EXPORT_SYMBOL(note_scsi_host);
+#endif
+
+static int initializing = 1;
+
+static int pmac_late_init(void)
+{
+ initializing = 0;
+ return 0;
+}
+machine_late_initcall(powermac, pmac_late_init);
+
+/*
+ * This is __init_refok because we check for "initializing" before
+ * touching any of the __init sensitive things and "initializing"
+ * will be false after __init time. This can't be __init because it
+ * can be called whenever a disk is first accessed.
+ */
+void __init_refok note_bootable_part(dev_t dev, int part, int goodness)
+{
+ char *p;
+
+ if (!initializing)
+ return;
+ if ((goodness <= current_root_goodness) &&
+ ROOT_DEV != DEFAULT_ROOT_DEVICE)
+ return;
+ p = strstr(boot_command_line, "root=");
+ if (p != NULL && (p == boot_command_line || p[-1] == ' '))
+ return;
+
+ ROOT_DEV = dev + part;
+ current_root_goodness = goodness;
+}
+
+#ifdef CONFIG_ADB_CUDA
+static void cuda_restart(void)
+{
+ struct adb_request req;
+
+ cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_RESET_SYSTEM);
+ for (;;)
+ cuda_poll();
+}
+
+static void cuda_shutdown(void)
+{
+ struct adb_request req;
+
+ cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_POWERDOWN);
+ for (;;)
+ cuda_poll();
+}
+
+#else
+#define cuda_restart()
+#define cuda_shutdown()
+#endif
+
+#ifndef CONFIG_ADB_PMU
+#define pmu_restart()
+#define pmu_shutdown()
+#endif
+
+#ifndef CONFIG_PMAC_SMU
+#define smu_restart()
+#define smu_shutdown()
+#endif
+
+static void pmac_restart(char *cmd)
+{
+ switch (sys_ctrler) {
+ case SYS_CTRLER_CUDA:
+ cuda_restart();
+ break;
+ case SYS_CTRLER_PMU:
+ pmu_restart();
+ break;
+ case SYS_CTRLER_SMU:
+ smu_restart();
+ break;
+ default: ;
+ }
+}
+
+static void pmac_power_off(void)
+{
+ switch (sys_ctrler) {
+ case SYS_CTRLER_CUDA:
+ cuda_shutdown();
+ break;
+ case SYS_CTRLER_PMU:
+ pmu_shutdown();
+ break;
+ case SYS_CTRLER_SMU:
+ smu_shutdown();
+ break;
+ default: ;
+ }
+}
+
+static void
+pmac_halt(void)
+{
+ pmac_power_off();
+}
+
+/*
+ * Early initialization.
+ */
+static void __init pmac_init_early(void)
+{
+ /* Enable early btext debug if requested */
+ if (strstr(boot_command_line, "btextdbg")) {
+ udbg_adb_init_early();
+ register_early_udbg_console();
+ }
+
+ /* Probe motherboard chipset */
+ pmac_feature_init();
+
+ /* Initialize debug stuff */
+ udbg_scc_init(!!strstr(boot_command_line, "sccdbg"));
+ udbg_adb_init(!!strstr(boot_command_line, "btextdbg"));
+
+#ifdef CONFIG_PPC64
+ iommu_init_early_dart(&pmac_pci_controller_ops);
+#endif
+
+ /* SMP Init has to be done early as we need to patch up
+ * cpu_possible_mask before interrupt stacks are allocated
+ * or kaboom...
+ */
+#ifdef CONFIG_SMP
+ pmac_setup_smp();
+#endif
+}
+
+static int __init pmac_declare_of_platform_devices(void)
+{
+ struct device_node *np;
+
+ if (machine_is(chrp))
+ return -1;
+
+ np = of_find_node_by_name(NULL, "valkyrie");
+ if (np) {
+ of_platform_device_create(np, "valkyrie", NULL);
+ of_node_put(np);
+ }
+ np = of_find_node_by_name(NULL, "platinum");
+ if (np) {
+ of_platform_device_create(np, "platinum", NULL);
+ of_node_put(np);
+ }
+ np = of_find_node_by_type(NULL, "smu");
+ if (np) {
+ of_platform_device_create(np, "smu", NULL);
+ of_node_put(np);
+ }
+ np = of_find_node_by_type(NULL, "fcu");
+ if (np == NULL) {
+ /* Some machines have strangely broken device-tree */
+ np = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/fan@15e");
+ }
+ if (np) {
+ of_platform_device_create(np, "temperature", NULL);
+ of_node_put(np);
+ }
+
+ return 0;
+}
+machine_device_initcall(powermac, pmac_declare_of_platform_devices);
+
+#ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE
+/*
+ * This is called very early, as part of console_init() (typically just after
+ * time_init()). This function is respondible for trying to find a good
+ * default console on serial ports. It tries to match the open firmware
+ * default output with one of the available serial console drivers.
+ */
+static int __init check_pmac_serial_console(void)
+{
+ struct device_node *prom_stdout = NULL;
+ int offset = 0;
+ const char *name;
+#ifdef CONFIG_SERIAL_PMACZILOG_TTYS
+ char *devname = "ttyS";
+#else
+ char *devname = "ttyPZ";
+#endif
+
+ pr_debug(" -> check_pmac_serial_console()\n");
+
+ /* The user has requested a console so this is already set up. */
+ if (strstr(boot_command_line, "console=")) {
+ pr_debug(" console was specified !\n");
+ return -EBUSY;
+ }
+
+ if (!of_chosen) {
+ pr_debug(" of_chosen is NULL !\n");
+ return -ENODEV;
+ }
+
+ /* We are getting a weird phandle from OF ... */
+ /* ... So use the full path instead */
+ name = of_get_property(of_chosen, "linux,stdout-path", NULL);
+ if (name == NULL) {
+ pr_debug(" no linux,stdout-path !\n");
+ return -ENODEV;
+ }
+ prom_stdout = of_find_node_by_path(name);
+ if (!prom_stdout) {
+ pr_debug(" can't find stdout package %s !\n", name);
+ return -ENODEV;
+ }
+ pr_debug("stdout is %s\n", prom_stdout->full_name);
+
+ name = of_get_property(prom_stdout, "name", NULL);
+ if (!name) {
+ pr_debug(" stdout package has no name !\n");
+ goto not_found;
+ }
+
+ if (strcmp(name, "ch-a") == 0)
+ offset = 0;
+ else if (strcmp(name, "ch-b") == 0)
+ offset = 1;
+ else
+ goto not_found;
+ of_node_put(prom_stdout);
+
+ pr_debug("Found serial console at %s%d\n", devname, offset);
+
+ return add_preferred_console(devname, offset, NULL);
+
+ not_found:
+ pr_debug("No preferred console found !\n");
+ of_node_put(prom_stdout);
+ return -ENODEV;
+}
+console_initcall(check_pmac_serial_console);
+
+#endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */
+
+/*
+ * Called very early, MMU is off, device-tree isn't unflattened
+ */
+static int __init pmac_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ if (!of_flat_dt_is_compatible(root, "Power Macintosh") &&
+ !of_flat_dt_is_compatible(root, "MacRISC"))
+ return 0;
+
+#ifdef CONFIG_PPC64
+ /*
+ * On U3, the DART (iommu) must be allocated now since it
+ * has an impact on htab_initialize (due to the large page it
+ * occupies having to be broken up so the DART itself is not
+ * part of the cacheable linar mapping
+ */
+ alloc_dart_table();
+
+ hpte_init_native();
+#endif
+
+#ifdef CONFIG_PPC32
+ /* isa_io_base gets set in pmac_pci_init */
+ ISA_DMA_THRESHOLD = ~0L;
+ DMA_MODE_READ = 1;
+ DMA_MODE_WRITE = 2;
+#endif /* CONFIG_PPC32 */
+
+#ifdef CONFIG_PMAC_SMU
+ /*
+ * SMU based G5s need some memory below 2Gb, at least the current
+ * driver needs that. We have to allocate it now. We allocate 4k
+ * (1 small page) for now.
+ */
+ smu_cmdbuf_abs = memblock_alloc_base(4096, 4096, 0x80000000UL);
+#endif /* CONFIG_PMAC_SMU */
+
+ pm_power_off = pmac_power_off;
+
+ return 1;
+}
+
+define_machine(powermac) {
+ .name = "PowerMac",
+ .probe = pmac_probe,
+ .setup_arch = pmac_setup_arch,
+ .init_early = pmac_init_early,
+ .show_cpuinfo = pmac_show_cpuinfo,
+ .init_IRQ = pmac_pic_init,
+ .get_irq = NULL, /* changed later */
+ .pci_irq_fixup = pmac_pci_irq_fixup,
+ .restart = pmac_restart,
+ .halt = pmac_halt,
+ .time_init = pmac_time_init,
+ .get_boot_time = pmac_get_boot_time,
+ .set_rtc_time = pmac_set_rtc_time,
+ .get_rtc_time = pmac_get_rtc_time,
+ .calibrate_decr = pmac_calibrate_decr,
+ .feature_call = pmac_do_feature_call,
+ .progress = udbg_progress,
+#ifdef CONFIG_PPC64
+ .power_save = power4_idle,
+ .enable_pmcs = power4_enable_pmcs,
+#endif /* CONFIG_PPC64 */
+#ifdef CONFIG_PPC32
+ .pcibios_after_init = pmac_pcibios_after_init,
+ .phys_mem_access_prot = pci_phys_mem_access_prot,
+#endif
+};
diff --git a/kernel/arch/powerpc/platforms/powermac/sleep.S b/kernel/arch/powerpc/platforms/powermac/sleep.S
new file mode 100644
index 000000000..1c2802fab
--- /dev/null
+++ b/kernel/arch/powerpc/platforms/powermac/sleep.S
@@ -0,0 +1,397 @@
+/*
+ * This file contains sleep low-level functions for PowerBook G3.
+ * Copyright (C) 1999 Benjamin Herrenschmidt (benh@kernel.crashing.org)
+ * and Paul Mackerras (paulus@samba.org).
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/ppc_asm.h>
+#include <asm/cputable.h>
+#include <asm/cache.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm/mmu.h>
+
+#define MAGIC 0x4c617273 /* 'Lars' */
+
+/*
+ * Structure for storing CPU registers on the stack.
+ */
+#define SL_SP 0
+#define SL_PC 4
+#define SL_MSR 8
+#define SL_SDR1 0xc
+#define SL_SPRG0 0x10 /* 4 sprg's */
+#define SL_DBAT0 0x20
+#define SL_IBAT0 0x28
+#define SL_DBAT1 0x30
+#define SL_IBAT1 0x38
+#define SL_DBAT2 0x40
+#define SL_IBAT2 0x48
+#define SL_DBAT3 0x50
+#define SL_IBAT3 0x58
+#define SL_TB 0x60
+#define SL_R2 0x68
+#define SL_CR 0x6c
+#define SL_R12 0x70 /* r12 to r31 */
+#define SL_SIZE (SL_R12 + 80)
+
+ .section .text
+ .align 5
+
+#if defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ_PMAC) || \
+ (defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32))
+
+/* This gets called by via-pmu.c late during the sleep process.
+ * The PMU was already send the sleep command and will shut us down
+ * soon. We need to save all that is needed and setup the wakeup
+ * vector that will be called by the ROM on wakeup
+ */
+_GLOBAL(low_sleep_handler)
+#ifndef CONFIG_6xx
+ blr
+#else
+ mflr r0
+ stw r0,4(r1)
+ stwu r1,-SL_SIZE(r1)
+ mfcr r0
+ stw r0,SL_CR(r1)
+ stw r2,SL_R2(r1)
+ stmw r12,SL_R12(r1)
+
+ /* Save MSR & SDR1 */
+ mfmsr r4
+ stw r4,SL_MSR(r1)
+ mfsdr1 r4
+ stw r4,SL_SDR1(r1)
+
+ /* Get a stable timebase and save it */
+1: mftbu r4
+ stw r4,SL_TB(r1)
+ mftb r5
+ stw r5,SL_TB+4(r1)
+ mftbu r3
+ cmpw r3,r4
+ bne 1b
+
+ /* Save SPRGs */
+ mfsprg r4,0
+ stw r4,SL_SPRG0(r1)
+ mfsprg r4,1
+ stw r4,SL_SPRG0+4(r1)
+ mfsprg r4,2
+ stw r4,SL_SPRG0+8(r1)
+ mfsprg r4,3
+ stw r4,SL_SPRG0+12(r1)
+
+ /* Save BATs */
+ mfdbatu r4,0
+ stw r4,SL_DBAT0(r1)
+ mfdbatl r4,0
+ stw r4,SL_DBAT0+4(r1)
+ mfdbatu r4,1
+ stw r4,SL_DBAT1(r1)
+ mfdbatl r4,1
+ stw r4,SL_DBAT1+4(r1)
+ mfdbatu r4,2
+ stw r4,SL_DBAT2(r1)
+ mfdbatl r4,2
+ stw r4,SL_DBAT2+4(r1)
+ mfdbatu r4,3
+ stw r4,SL_DBAT3(r1)
+ mfdbatl r4,3
+ stw r4,SL_DBAT3+4(r1)
+ mfibatu r4,0
+ stw r4,SL_IBAT0(r1)
+ mfibatl r4,0
+ stw r4,SL_IBAT0+4(r1)
+ mfibatu r4,1
+ stw r4,SL_IBAT1(r1)
+ mfibatl r4,1
+ stw r4,SL_IBAT1+4(r1)
+ mfibatu r4,2
+ stw r4,SL_IBAT2(r1)
+ mfibatl r4,2
+ stw r4,SL_IBAT2+4(r1)
+ mfibatu r4,3
+ stw r4,SL_IBAT3(r1)
+ mfibatl r4,3
+ stw r4,SL_IBAT3+4(r1)
+
+ /* Backup various CPU config stuffs */
+ bl __save_cpu_setup
+
+ /* The ROM can wake us up via 2 different vectors:
+ * - On wallstreet & lombard, we must write a magic
+ * value 'Lars' at address 4 and a pointer to a
+ * memory location containing the PC to resume from
+ * at address 0.
+ * - On Core99, we must store the wakeup vector at
+ * address 0x80 and eventually it's parameters
+ * at address 0x84. I've have some trouble with those
+ * parameters however and I no longer use them.
+ */
+ lis r5,grackle_wake_up@ha
+ addi r5,r5,grackle_wake_up@l
+ tophys(r5,r5)
+ stw r5,SL_PC(r1)
+ lis r4,KERNELBASE@h
+ tophys(r5,r1)
+ addi r5,r5,SL_PC
+ lis r6,MAGIC@ha
+ addi r6,r6,MAGIC@l
+ stw r5,0(r4)
+ stw r6,4(r4)
+ /* Setup stuffs at 0x80-0x84 for Core99 */
+ lis r3,core99_wake_up@ha
+ addi r3,r3,core99_wake_up@l
+ tophys(r3,r3)
+ stw r3,0x80(r4)
+ stw r5,0x84(r4)
+ /* Store a pointer to our backup storage into
+ * a kernel global
+ */
+ lis r3,sleep_storage@ha
+ addi r3,r3,sleep_storage@l
+ stw r5,0(r3)
+
+ .globl low_cpu_die
+low_cpu_die:
+ /* Flush & disable all caches */
+ bl flush_disable_caches
+
+ /* Turn off data relocation. */
+ mfmsr r3 /* Save MSR in r7 */
+ rlwinm r3,r3,0,28,26 /* Turn off DR bit */
+ sync
+ mtmsr r3
+ isync
+
+BEGIN_FTR_SECTION
+ /* Flush any pending L2 data prefetches to work around HW bug */
+ sync
+ lis r3,0xfff0
+ lwz r0,0(r3) /* perform cache-inhibited load to ROM */
+ sync /* (caches are disabled at this point) */
+END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
+
+/*
+ * Set the HID0 and MSR for sleep.
+ */
+ mfspr r2,SPRN_HID0
+ rlwinm r2,r2,0,10,7 /* clear doze, nap */
+ oris r2,r2,HID0_SLEEP@h
+ sync
+ isync
+ mtspr SPRN_HID0,r2
+ sync
+
+/* This loop puts us back to sleep in case we have a spurrious
+ * wakeup so that the host bridge properly stays asleep. The
+ * CPU will be turned off, either after a known time (about 1
+ * second) on wallstreet & lombard, or as soon as the CPU enters
+ * SLEEP mode on core99
+ */
+ mfmsr r2
+ oris r2,r2,MSR_POW@h
+1: sync
+ mtmsr r2
+ isync
+ b 1b
+
+/*
+ * Here is the resume code.
+ */
+
+
+/*
+ * Core99 machines resume here
+ * r4 has the physical address of SL_PC(sp) (unused)
+ */
+_GLOBAL(core99_wake_up)
+ /* Make sure HID0 no longer contains any sleep bit and that data cache
+ * is disabled
+ */
+ mfspr r3,SPRN_HID0
+ rlwinm r3,r3,0,11,7 /* clear SLEEP, NAP, DOZE bits */
+ rlwinm 3,r3,0,18,15 /* clear DCE, ICE */
+ mtspr SPRN_HID0,r3
+ sync
+ isync
+
+ /* sanitize MSR */
+ mfmsr r3
+ ori r3,r3,MSR_EE|MSR_IP
+ xori r3,r3,MSR_EE|MSR_IP
+ sync
+ isync
+ mtmsr r3
+ sync
+ isync
+
+ /* Recover sleep storage */
+ lis r3,sleep_storage@ha
+ addi r3,r3,sleep_storage@l
+ tophys(r3,r3)
+ lwz r1,0(r3)
+
+ /* Pass thru to older resume code ... */
+/*
+ * Here is the resume code for older machines.
+ * r1 has the physical address of SL_PC(sp).
+ */
+
+grackle_wake_up:
+
+ /* Restore the kernel's segment registers before
+ * we do any r1 memory access as we are not sure they
+ * are in a sane state above the first 256Mb region
+ */
+ li r0,16 /* load up segment register values */
+ mtctr r0 /* for context 0 */
+ lis r3,0x2000 /* Ku = 1, VSID = 0 */
+ li r4,0
+3: mtsrin r3,r4
+ addi r3,r3,0x111 /* increment VSID */
+ addis r4,r4,0x1000 /* address of next segment */
+ bdnz 3b
+ sync
+ isync
+
+ subi r1,r1,SL_PC
+
+ /* Restore various CPU config stuffs */
+ bl __restore_cpu_setup
+
+ /* Make sure all FPRs have been initialized */
+ bl reloc_offset
+ bl __init_fpu_registers
+
+ /* Invalidate & enable L1 cache, we don't care about
+ * whatever the ROM may have tried to write to memory
+ */
+ bl __inval_enable_L1
+
+ /* Restore the BATs, and SDR1. Then we can turn on the MMU. */
+ lwz r4,SL_SDR1(r1)
+ mtsdr1 r4
+ lwz r4,SL_SPRG0(r1)
+ mtsprg 0,r4
+ lwz r4,SL_SPRG0+4(r1)
+ mtsprg 1,r4
+ lwz r4,SL_SPRG0+8(r1)
+ mtsprg 2,r4
+ lwz r4,SL_SPRG0+12(r1)
+ mtsprg 3,r4
+
+ lwz r4,SL_DBAT0(r1)
+ mtdbatu 0,r4
+ lwz r4,SL_DBAT0+4(r1)
+ mtdbatl 0,r4
+ lwz r4,SL_DBAT1(r1)
+ mtdbatu 1,r4
+ lwz r4,SL_DBAT1+4(r1)
+ mtdbatl 1,r4
+ lwz r4,SL_DBAT2(r1)
+ mtdbatu 2,r4
+ lwz r4,SL_DBAT2+4(r1)
+ mtdbatl 2,r4
+ lwz r4,SL_DBAT3(r1)
+ mtdbatu 3,r4
+ lwz r4,SL_DBAT3+4(r1)
+ mtdbatl 3,r4
+ lwz r4,SL_IBAT0(r1)
+ mtibatu 0,r4
+ lwz r4,SL_IBAT0+4(r1)
+ mtibatl 0,r4
+ lwz r4,SL_IBAT1(r1)
+ mtibatu 1,r4
+ lwz r4,SL_IBAT1+4(r1)
+ mtibatl 1,r4
+ lwz r4,SL_IBAT2(r1)
+ mtibatu 2,r4
+ lwz r4,SL_IBAT2+4(r1)
+ mtibatl 2,r4
+ lwz r4,SL_IBAT3(r1)
+ mtibatu 3,r4
+ lwz r4,SL_IBAT3+4(r1)
+ mtibatl 3,r4
+
+BEGIN_MMU_FTR_SECTION
+ li r4,0
+ mtspr SPRN_DBAT4U,r4
+ mtspr SPRN_DBAT4L,r4
+ mtspr SPRN_DBAT5U,r4
+ mtspr SPRN_DBAT5L,r4
+ mtspr SPRN_DBAT6U,r4
+ mtspr SPRN_DBAT6L,r4
+ mtspr SPRN_DBAT7U,r4
+ mtspr SPRN_DBAT7L,r4
+ mtspr SPRN_IBAT4U,r4
+ mtspr SPRN_IBAT4L,r4
+ mtspr SPRN_IBAT5U,r4
+ mtspr SPRN_IBAT5L,r4
+ mtspr SPRN_IBAT6U,r4
+ mtspr SPRN_IBAT6L,r4
+ mtspr SPRN_IBAT7U,r4
+ mtspr SPRN_IBAT7L,r4
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
+
+ /* Flush all TLBs */
+ lis r4,0x1000
+1: addic. r4,r4,-0x1000
+ tlbie r4
+ blt 1b
+ sync
+
+ /* restore the MSR and turn on the MMU */
+ lwz r3,SL_MSR(r1)
+ bl turn_on_mmu
+
+ /* get back the stack pointer */
+ tovirt(r1,r1)
+
+ /* Restore TB */
+ li r3,0
+ mttbl r3
+ lwz r3,SL_TB(r1)
+ lwz r4,SL_TB+4(r1)
+ mttbu r3
+ mttbl r4
+
+ /* Restore the callee-saved registers and return */
+ lwz r0,SL_CR(r1)
+ mtcr r0
+ lwz r2,SL_R2(r1)
+ lmw r12,SL_R12(r1)
+ addi r1,r1,SL_SIZE
+ lwz r0,4(r1)
+ mtlr r0
+ blr
+
+turn_on_mmu:
+ mflr r4
+ tovirt(r4,r4)
+ mtsrr0 r4
+ mtsrr1 r3
+ sync
+ isync
+ rfi
+
+#endif /* defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ) */
+
+ .section .data
+ .balign L1_CACHE_BYTES
+sleep_storage:
+ .long 0
+ .balign L1_CACHE_BYTES, 0
+
+#endif /* CONFIG_6xx */
+ .section .text
diff --git a/kernel/arch/powerpc/platforms/powermac/smp.c b/kernel/arch/powerpc/platforms/powermac/smp.c
new file mode 100644
index 000000000..28a147ca3
--- /dev/null
+++ b/kernel/arch/powerpc/platforms/powermac/smp.c
@@ -0,0 +1,1033 @@
+/*
+ * SMP support for power macintosh.
+ *
+ * We support both the old "powersurge" SMP architecture
+ * and the current Core99 (G4 PowerMac) machines.
+ *
+ * Note that we don't support the very first rev. of
+ * Apple/DayStar 2 CPUs board, the one with the funky
+ * watchdog. Hopefully, none of these should be there except
+ * maybe internally to Apple. I should probably still add some
+ * code to detect this card though and disable SMP. --BenH.
+ *
+ * Support Macintosh G4 SMP by Troy Benjegerdes (hozer@drgw.net)
+ * and Ben Herrenschmidt <benh@kernel.crashing.org>.
+ *
+ * Support for DayStar quad CPU cards
+ * Copyright (C) XLR8, Inc. 1994-2000
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/hardirq.h>
+#include <linux/cpu.h>
+#include <linux/compiler.h>
+
+#include <asm/ptrace.h>
+#include <linux/atomic.h>
+#include <asm/code-patching.h>
+#include <asm/irq.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/sections.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/smp.h>
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+#include <asm/time.h>
+#include <asm/mpic.h>
+#include <asm/cacheflush.h>
+#include <asm/keylargo.h>
+#include <asm/pmac_low_i2c.h>
+#include <asm/pmac_pfunc.h>
+
+#include "pmac.h"
+
+#undef DEBUG
+
+#ifdef DEBUG
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...)
+#endif
+
+extern void __secondary_start_pmac_0(void);
+extern int pmac_pfunc_base_install(void);
+
+static void (*pmac_tb_freeze)(int freeze);
+static u64 timebase;
+static int tb_req;
+
+#ifdef CONFIG_PPC_PMAC32_PSURGE
+
+/*
+ * Powersurge (old powermac SMP) support.
+ */
+
+/* Addresses for powersurge registers */
+#define HAMMERHEAD_BASE 0xf8000000
+#define HHEAD_CONFIG 0x90
+#define HHEAD_SEC_INTR 0xc0
+
+/* register for interrupting the primary processor on the powersurge */
+/* N.B. this is actually the ethernet ROM! */
+#define PSURGE_PRI_INTR 0xf3019000
+
+/* register for storing the start address for the secondary processor */
+/* N.B. this is the PCI config space address register for the 1st bridge */
+#define PSURGE_START 0xf2800000
+
+/* Daystar/XLR8 4-CPU card */
+#define PSURGE_QUAD_REG_ADDR 0xf8800000
+
+#define PSURGE_QUAD_IRQ_SET 0
+#define PSURGE_QUAD_IRQ_CLR 1
+#define PSURGE_QUAD_IRQ_PRIMARY 2
+#define PSURGE_QUAD_CKSTOP_CTL 3
+#define PSURGE_QUAD_PRIMARY_ARB 4
+#define PSURGE_QUAD_BOARD_ID 6
+#define PSURGE_QUAD_WHICH_CPU 7
+#define PSURGE_QUAD_CKSTOP_RDBK 8
+#define PSURGE_QUAD_RESET_CTL 11
+
+#define PSURGE_QUAD_OUT(r, v) (out_8(quad_base + ((r) << 4) + 4, (v)))
+#define PSURGE_QUAD_IN(r) (in_8(quad_base + ((r) << 4) + 4) & 0x0f)
+#define PSURGE_QUAD_BIS(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) | (v)))
+#define PSURGE_QUAD_BIC(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) & ~(v)))
+
+/* virtual addresses for the above */
+static volatile u8 __iomem *hhead_base;
+static volatile u8 __iomem *quad_base;
+static volatile u32 __iomem *psurge_pri_intr;
+static volatile u8 __iomem *psurge_sec_intr;
+static volatile u32 __iomem *psurge_start;
+
+/* values for psurge_type */
+#define PSURGE_NONE -1
+#define PSURGE_DUAL 0
+#define PSURGE_QUAD_OKEE 1
+#define PSURGE_QUAD_COTTON 2
+#define PSURGE_QUAD_ICEGRASS 3
+
+/* what sort of powersurge board we have */
+static int psurge_type = PSURGE_NONE;
+
+/* irq for secondary cpus to report */
+static struct irq_domain *psurge_host;
+int psurge_secondary_virq;
+
+/*
+ * Set and clear IPIs for powersurge.
+ */
+static inline void psurge_set_ipi(int cpu)
+{
+ if (psurge_type == PSURGE_NONE)
+ return;
+ if (cpu == 0)
+ in_be32(psurge_pri_intr);
+ else if (psurge_type == PSURGE_DUAL)
+ out_8(psurge_sec_intr, 0);
+ else
+ PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_SET, 1 << cpu);
+}
+
+static inline void psurge_clr_ipi(int cpu)
+{
+ if (cpu > 0) {
+ switch(psurge_type) {
+ case PSURGE_DUAL:
+ out_8(psurge_sec_intr, ~0);
+ case PSURGE_NONE:
+ break;
+ default:
+ PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, 1 << cpu);
+ }
+ }
+}
+
+/*
+ * On powersurge (old SMP powermac architecture) we don't have
+ * separate IPIs for separate messages like openpic does. Instead
+ * use the generic demux helpers
+ * -- paulus.
+ */
+static irqreturn_t psurge_ipi_intr(int irq, void *d)
+{
+ psurge_clr_ipi(smp_processor_id());
+ smp_ipi_demux();
+
+ return IRQ_HANDLED;
+}
+
+static void smp_psurge_cause_ipi(int cpu, unsigned long data)
+{
+ psurge_set_ipi(cpu);
+}
+
+static int psurge_host_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_percpu_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops psurge_host_ops = {
+ .map = psurge_host_map,
+};
+
+static int psurge_secondary_ipi_init(void)
+{
+ int rc = -ENOMEM;
+
+ psurge_host = irq_domain_add_nomap(NULL, ~0, &psurge_host_ops, NULL);
+
+ if (psurge_host)
+ psurge_secondary_virq = irq_create_direct_mapping(psurge_host);
+
+ if (psurge_secondary_virq)
+ rc = request_irq(psurge_secondary_virq, psurge_ipi_intr,
+ IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL);
+
+ if (rc)
+ pr_err("Failed to setup secondary cpu IPI\n");
+
+ return rc;
+}
+
+/*
+ * Determine a quad card presence. We read the board ID register, we
+ * force the data bus to change to something else, and we read it again.
+ * It it's stable, then the register probably exist (ugh !)
+ */
+static int __init psurge_quad_probe(void)
+{
+ int type;
+ unsigned int i;
+
+ type = PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID);
+ if (type < PSURGE_QUAD_OKEE || type > PSURGE_QUAD_ICEGRASS
+ || type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
+ return PSURGE_DUAL;
+
+ /* looks OK, try a slightly more rigorous test */
+ /* bogus is not necessarily cacheline-aligned,
+ though I don't suppose that really matters. -- paulus */
+ for (i = 0; i < 100; i++) {
+ volatile u32 bogus[8];
+ bogus[(0+i)%8] = 0x00000000;
+ bogus[(1+i)%8] = 0x55555555;
+ bogus[(2+i)%8] = 0xFFFFFFFF;
+ bogus[(3+i)%8] = 0xAAAAAAAA;
+ bogus[(4+i)%8] = 0x33333333;
+ bogus[(5+i)%8] = 0xCCCCCCCC;
+ bogus[(6+i)%8] = 0xCCCCCCCC;
+ bogus[(7+i)%8] = 0x33333333;
+ wmb();
+ asm volatile("dcbf 0,%0" : : "r" (bogus) : "memory");
+ mb();
+ if (type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
+ return PSURGE_DUAL;
+ }
+ return type;
+}
+
+static void __init psurge_quad_init(void)
+{
+ int procbits;
+
+ if (ppc_md.progress) ppc_md.progress("psurge_quad_init", 0x351);
+ procbits = ~PSURGE_QUAD_IN(PSURGE_QUAD_WHICH_CPU);
+ if (psurge_type == PSURGE_QUAD_ICEGRASS)
+ PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
+ else
+ PSURGE_QUAD_BIC(PSURGE_QUAD_CKSTOP_CTL, procbits);
+ mdelay(33);
+ out_8(psurge_sec_intr, ~0);
+ PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, procbits);
+ PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
+ if (psurge_type != PSURGE_QUAD_ICEGRASS)
+ PSURGE_QUAD_BIS(PSURGE_QUAD_CKSTOP_CTL, procbits);
+ PSURGE_QUAD_BIC(PSURGE_QUAD_PRIMARY_ARB, procbits);
+ mdelay(33);
+ PSURGE_QUAD_BIC(PSURGE_QUAD_RESET_CTL, procbits);
+ mdelay(33);
+ PSURGE_QUAD_BIS(PSURGE_QUAD_PRIMARY_ARB, procbits);
+ mdelay(33);
+}
+
+static void __init smp_psurge_probe(void)
+{
+ int i, ncpus;
+ struct device_node *dn;
+
+ /* We don't do SMP on the PPC601 -- paulus */
+ if (PVR_VER(mfspr(SPRN_PVR)) == 1)
+ return;
+
+ /*
+ * The powersurge cpu board can be used in the generation
+ * of powermacs that have a socket for an upgradeable cpu card,
+ * including the 7500, 8500, 9500, 9600.
+ * The device tree doesn't tell you if you have 2 cpus because
+ * OF doesn't know anything about the 2nd processor.
+ * Instead we look for magic bits in magic registers,
+ * in the hammerhead memory controller in the case of the
+ * dual-cpu powersurge board. -- paulus.
+ */
+ dn = of_find_node_by_name(NULL, "hammerhead");
+ if (dn == NULL)
+ return;
+ of_node_put(dn);
+
+ hhead_base = ioremap(HAMMERHEAD_BASE, 0x800);
+ quad_base = ioremap(PSURGE_QUAD_REG_ADDR, 1024);
+ psurge_sec_intr = hhead_base + HHEAD_SEC_INTR;
+
+ psurge_type = psurge_quad_probe();
+ if (psurge_type != PSURGE_DUAL) {
+ psurge_quad_init();
+ /* All released cards using this HW design have 4 CPUs */
+ ncpus = 4;
+ /* No sure how timebase sync works on those, let's use SW */
+ smp_ops->give_timebase = smp_generic_give_timebase;
+ smp_ops->take_timebase = smp_generic_take_timebase;
+ } else {
+ iounmap(quad_base);
+ if ((in_8(hhead_base + HHEAD_CONFIG) & 0x02) == 0) {
+ /* not a dual-cpu card */
+ iounmap(hhead_base);
+ psurge_type = PSURGE_NONE;
+ return;
+ }
+ ncpus = 2;
+ }
+
+ if (psurge_secondary_ipi_init())
+ return;
+
+ psurge_start = ioremap(PSURGE_START, 4);
+ psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
+
+ /* This is necessary because OF doesn't know about the
+ * secondary cpu(s), and thus there aren't nodes in the
+ * device tree for them, and smp_setup_cpu_maps hasn't
+ * set their bits in cpu_present_mask.
+ */
+ if (ncpus > NR_CPUS)
+ ncpus = NR_CPUS;
+ for (i = 1; i < ncpus ; ++i)
+ set_cpu_present(i, true);
+
+ if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
+}
+
+static int __init smp_psurge_kick_cpu(int nr)
+{
+ unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8;
+ unsigned long a, flags;
+ int i, j;
+
+ /* Defining this here is evil ... but I prefer hiding that
+ * crap to avoid giving people ideas that they can do the
+ * same.
+ */
+ extern volatile unsigned int cpu_callin_map[NR_CPUS];
+
+ /* may need to flush here if secondary bats aren't setup */
+ for (a = KERNELBASE; a < KERNELBASE + 0x800000; a += 32)
+ asm volatile("dcbf 0,%0" : : "r" (a) : "memory");
+ asm volatile("sync");
+
+ if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353);
+
+ /* This is going to freeze the timeebase, we disable interrupts */
+ local_irq_save(flags);
+
+ out_be32(psurge_start, start);
+ mb();
+
+ psurge_set_ipi(nr);
+
+ /*
+ * We can't use udelay here because the timebase is now frozen.
+ */
+ for (i = 0; i < 2000; ++i)
+ asm volatile("nop" : : : "memory");
+ psurge_clr_ipi(nr);
+
+ /*
+ * Also, because the timebase is frozen, we must not return to the
+ * caller which will try to do udelay's etc... Instead, we wait -here-
+ * for the CPU to callin.
+ */
+ for (i = 0; i < 100000 && !cpu_callin_map[nr]; ++i) {
+ for (j = 1; j < 10000; j++)
+ asm volatile("nop" : : : "memory");
+ asm volatile("sync" : : : "memory");
+ }
+ if (!cpu_callin_map[nr])
+ goto stuck;
+
+ /* And we do the TB sync here too for standard dual CPU cards */
+ if (psurge_type == PSURGE_DUAL) {
+ while(!tb_req)
+ barrier();
+ tb_req = 0;
+ mb();
+ timebase = get_tb();
+ mb();
+ while (timebase)
+ barrier();
+ mb();
+ }
+ stuck:
+ /* now interrupt the secondary, restarting both TBs */
+ if (psurge_type == PSURGE_DUAL)
+ psurge_set_ipi(1);
+
+ if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354);
+
+ return 0;
+}
+
+static struct irqaction psurge_irqaction = {
+ .handler = psurge_ipi_intr,
+ .flags = IRQF_PERCPU | IRQF_NO_THREAD,
+ .name = "primary IPI",
+};
+
+static void __init smp_psurge_setup_cpu(int cpu_nr)
+{
+ if (cpu_nr != 0 || !psurge_start)
+ return;
+
+ /* reset the entry point so if we get another intr we won't
+ * try to startup again */
+ out_be32(psurge_start, 0x100);
+ if (setup_irq(irq_create_mapping(NULL, 30), &psurge_irqaction))
+ printk(KERN_ERR "Couldn't get primary IPI interrupt");
+}
+
+void __init smp_psurge_take_timebase(void)
+{
+ if (psurge_type != PSURGE_DUAL)
+ return;
+
+ tb_req = 1;
+ mb();
+ while (!timebase)
+ barrier();
+ mb();
+ set_tb(timebase >> 32, timebase & 0xffffffff);
+ timebase = 0;
+ mb();
+ set_dec(tb_ticks_per_jiffy/2);
+}
+
+void __init smp_psurge_give_timebase(void)
+{
+ /* Nothing to do here */
+}
+
+/* PowerSurge-style Macs */
+struct smp_ops_t psurge_smp_ops = {
+ .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
+ .cause_ipi = smp_psurge_cause_ipi,
+ .probe = smp_psurge_probe,
+ .kick_cpu = smp_psurge_kick_cpu,
+ .setup_cpu = smp_psurge_setup_cpu,
+ .give_timebase = smp_psurge_give_timebase,
+ .take_timebase = smp_psurge_take_timebase,
+};
+#endif /* CONFIG_PPC_PMAC32_PSURGE */
+
+/*
+ * Core 99 and later support
+ */
+
+
+static void smp_core99_give_timebase(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ while(!tb_req)
+ barrier();
+ tb_req = 0;
+ (*pmac_tb_freeze)(1);
+ mb();
+ timebase = get_tb();
+ mb();
+ while (timebase)
+ barrier();
+ mb();
+ (*pmac_tb_freeze)(0);
+ mb();
+
+ local_irq_restore(flags);
+}
+
+
+static void smp_core99_take_timebase(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ tb_req = 1;
+ mb();
+ while (!timebase)
+ barrier();
+ mb();
+ set_tb(timebase >> 32, timebase & 0xffffffff);
+ timebase = 0;
+ mb();
+
+ local_irq_restore(flags);
+}
+
+#ifdef CONFIG_PPC64
+/*
+ * G5s enable/disable the timebase via an i2c-connected clock chip.
+ */
+static struct pmac_i2c_bus *pmac_tb_clock_chip_host;
+static u8 pmac_tb_pulsar_addr;
+
+static void smp_core99_cypress_tb_freeze(int freeze)
+{
+ u8 data;
+ int rc;
+
+ /* Strangely, the device-tree says address is 0xd2, but darwin
+ * accesses 0xd0 ...
+ */
+ pmac_i2c_setmode(pmac_tb_clock_chip_host,
+ pmac_i2c_mode_combined);
+ rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
+ 0xd0 | pmac_i2c_read,
+ 1, 0x81, &data, 1);
+ if (rc != 0)
+ goto bail;
+
+ data = (data & 0xf3) | (freeze ? 0x00 : 0x0c);
+
+ pmac_i2c_setmode(pmac_tb_clock_chip_host, pmac_i2c_mode_stdsub);
+ rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
+ 0xd0 | pmac_i2c_write,
+ 1, 0x81, &data, 1);
+
+ bail:
+ if (rc != 0) {
+ printk("Cypress Timebase %s rc: %d\n",
+ freeze ? "freeze" : "unfreeze", rc);
+ panic("Timebase freeze failed !\n");
+ }
+}
+
+
+static void smp_core99_pulsar_tb_freeze(int freeze)
+{
+ u8 data;
+ int rc;
+
+ pmac_i2c_setmode(pmac_tb_clock_chip_host,
+ pmac_i2c_mode_combined);
+ rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
+ pmac_tb_pulsar_addr | pmac_i2c_read,
+ 1, 0x2e, &data, 1);
+ if (rc != 0)
+ goto bail;
+
+ data = (data & 0x88) | (freeze ? 0x11 : 0x22);
+
+ pmac_i2c_setmode(pmac_tb_clock_chip_host, pmac_i2c_mode_stdsub);
+ rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
+ pmac_tb_pulsar_addr | pmac_i2c_write,
+ 1, 0x2e, &data, 1);
+ bail:
+ if (rc != 0) {
+ printk(KERN_ERR "Pulsar Timebase %s rc: %d\n",
+ freeze ? "freeze" : "unfreeze", rc);
+ panic("Timebase freeze failed !\n");
+ }
+}
+
+static void __init smp_core99_setup_i2c_hwsync(int ncpus)
+{
+ struct device_node *cc = NULL;
+ struct device_node *p;
+ const char *name = NULL;
+ const u32 *reg;
+ int ok;
+
+ /* Look for the clock chip */
+ for_each_node_by_name(cc, "i2c-hwclock") {
+ p = of_get_parent(cc);
+ ok = p && of_device_is_compatible(p, "uni-n-i2c");
+ of_node_put(p);
+ if (!ok)
+ continue;
+
+ pmac_tb_clock_chip_host = pmac_i2c_find_bus(cc);
+ if (pmac_tb_clock_chip_host == NULL)
+ continue;
+ reg = of_get_property(cc, "reg", NULL);
+ if (reg == NULL)
+ continue;
+ switch (*reg) {
+ case 0xd2:
+ if (of_device_is_compatible(cc,"pulsar-legacy-slewing")) {
+ pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
+ pmac_tb_pulsar_addr = 0xd2;
+ name = "Pulsar";
+ } else if (of_device_is_compatible(cc, "cy28508")) {
+ pmac_tb_freeze = smp_core99_cypress_tb_freeze;
+ name = "Cypress";
+ }
+ break;
+ case 0xd4:
+ pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
+ pmac_tb_pulsar_addr = 0xd4;
+ name = "Pulsar";
+ break;
+ }
+ if (pmac_tb_freeze != NULL)
+ break;
+ }
+ if (pmac_tb_freeze != NULL) {
+ /* Open i2c bus for synchronous access */
+ if (pmac_i2c_open(pmac_tb_clock_chip_host, 1)) {
+ printk(KERN_ERR "Failed top open i2c bus for clock"
+ " sync, fallback to software sync !\n");
+ goto no_i2c_sync;
+ }
+ printk(KERN_INFO "Processor timebase sync using %s i2c clock\n",
+ name);
+ return;
+ }
+ no_i2c_sync:
+ pmac_tb_freeze = NULL;
+ pmac_tb_clock_chip_host = NULL;
+}
+
+
+
+/*
+ * Newer G5s uses a platform function
+ */
+
+static void smp_core99_pfunc_tb_freeze(int freeze)
+{
+ struct device_node *cpus;
+ struct pmf_args args;
+
+ cpus = of_find_node_by_path("/cpus");
+ BUG_ON(cpus == NULL);
+ args.count = 1;
+ args.u[0].v = !freeze;
+ pmf_call_function(cpus, "cpu-timebase", &args);
+ of_node_put(cpus);
+}
+
+#else /* CONFIG_PPC64 */
+
+/*
+ * SMP G4 use a GPIO to enable/disable the timebase.
+ */
+
+static unsigned int core99_tb_gpio; /* Timebase freeze GPIO */
+
+static void smp_core99_gpio_tb_freeze(int freeze)
+{
+ if (freeze)
+ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 4);
+ else
+ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0);
+ pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
+}
+
+
+#endif /* !CONFIG_PPC64 */
+
+/* L2 and L3 cache settings to pass from CPU0 to CPU1 on G4 cpus */
+volatile static long int core99_l2_cache;
+volatile static long int core99_l3_cache;
+
+static void core99_init_caches(int cpu)
+{
+#ifndef CONFIG_PPC64
+ if (!cpu_has_feature(CPU_FTR_L2CR))
+ return;
+
+ if (cpu == 0) {
+ core99_l2_cache = _get_L2CR();
+ printk("CPU0: L2CR is %lx\n", core99_l2_cache);
+ } else {
+ printk("CPU%d: L2CR was %lx\n", cpu, _get_L2CR());
+ _set_L2CR(0);
+ _set_L2CR(core99_l2_cache);
+ printk("CPU%d: L2CR set to %lx\n", cpu, core99_l2_cache);
+ }
+
+ if (!cpu_has_feature(CPU_FTR_L3CR))
+ return;
+
+ if (cpu == 0){
+ core99_l3_cache = _get_L3CR();
+ printk("CPU0: L3CR is %lx\n", core99_l3_cache);
+ } else {
+ printk("CPU%d: L3CR was %lx\n", cpu, _get_L3CR());
+ _set_L3CR(0);
+ _set_L3CR(core99_l3_cache);
+ printk("CPU%d: L3CR set to %lx\n", cpu, core99_l3_cache);
+ }
+#endif /* !CONFIG_PPC64 */
+}
+
+static void __init smp_core99_setup(int ncpus)
+{
+#ifdef CONFIG_PPC64
+
+ /* i2c based HW sync on some G5s */
+ if (of_machine_is_compatible("PowerMac7,2") ||
+ of_machine_is_compatible("PowerMac7,3") ||
+ of_machine_is_compatible("RackMac3,1"))
+ smp_core99_setup_i2c_hwsync(ncpus);
+
+ /* pfunc based HW sync on recent G5s */
+ if (pmac_tb_freeze == NULL) {
+ struct device_node *cpus =
+ of_find_node_by_path("/cpus");
+ if (cpus &&
+ of_get_property(cpus, "platform-cpu-timebase", NULL)) {
+ pmac_tb_freeze = smp_core99_pfunc_tb_freeze;
+ printk(KERN_INFO "Processor timebase sync using"
+ " platform function\n");
+ }
+ }
+
+#else /* CONFIG_PPC64 */
+
+ /* GPIO based HW sync on ppc32 Core99 */
+ if (pmac_tb_freeze == NULL && !of_machine_is_compatible("MacRISC4")) {
+ struct device_node *cpu;
+ const u32 *tbprop = NULL;
+
+ core99_tb_gpio = KL_GPIO_TB_ENABLE; /* default value */
+ cpu = of_find_node_by_type(NULL, "cpu");
+ if (cpu != NULL) {
+ tbprop = of_get_property(cpu, "timebase-enable", NULL);
+ if (tbprop)
+ core99_tb_gpio = *tbprop;
+ of_node_put(cpu);
+ }
+ pmac_tb_freeze = smp_core99_gpio_tb_freeze;
+ printk(KERN_INFO "Processor timebase sync using"
+ " GPIO 0x%02x\n", core99_tb_gpio);
+ }
+
+#endif /* CONFIG_PPC64 */
+
+ /* No timebase sync, fallback to software */
+ if (pmac_tb_freeze == NULL) {
+ smp_ops->give_timebase = smp_generic_give_timebase;
+ smp_ops->take_timebase = smp_generic_take_timebase;
+ printk(KERN_INFO "Processor timebase sync using software\n");
+ }
+
+#ifndef CONFIG_PPC64
+ {
+ int i;
+
+ /* XXX should get this from reg properties */
+ for (i = 1; i < ncpus; ++i)
+ set_hard_smp_processor_id(i, i);
+ }
+#endif
+
+ /* 32 bits SMP can't NAP */
+ if (!of_machine_is_compatible("MacRISC4"))
+ powersave_nap = 0;
+}
+
+static void __init smp_core99_probe(void)
+{
+ struct device_node *cpus;
+ int ncpus = 0;
+
+ if (ppc_md.progress) ppc_md.progress("smp_core99_probe", 0x345);
+
+ /* Count CPUs in the device-tree */
+ for (cpus = NULL; (cpus = of_find_node_by_type(cpus, "cpu")) != NULL;)
+ ++ncpus;
+
+ printk(KERN_INFO "PowerMac SMP probe found %d cpus\n", ncpus);
+
+ /* Nothing more to do if less than 2 of them */
+ if (ncpus <= 1)
+ return;
+
+ /* We need to perform some early initialisations before we can start
+ * setting up SMP as we are running before initcalls
+ */
+ pmac_pfunc_base_install();
+ pmac_i2c_init();
+
+ /* Setup various bits like timebase sync method, ability to nap, ... */
+ smp_core99_setup(ncpus);
+
+ /* Install IPIs */
+ mpic_request_ipis();
+
+ /* Collect l2cr and l3cr values from CPU 0 */
+ core99_init_caches(0);
+}
+
+static int smp_core99_kick_cpu(int nr)
+{
+ unsigned int save_vector;
+ unsigned long target, flags;
+ unsigned int *vector = (unsigned int *)(PAGE_OFFSET+0x100);
+
+ if (nr < 0 || nr > 3)
+ return -ENOENT;
+
+ if (ppc_md.progress)
+ ppc_md.progress("smp_core99_kick_cpu", 0x346);
+
+ local_irq_save(flags);
+
+ /* Save reset vector */
+ save_vector = *vector;
+
+ /* Setup fake reset vector that does
+ * b __secondary_start_pmac_0 + nr*8
+ */
+ target = (unsigned long) __secondary_start_pmac_0 + nr * 8;
+ patch_branch(vector, target, BRANCH_SET_LINK);
+
+ /* Put some life in our friend */
+ pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0);
+
+ /* FIXME: We wait a bit for the CPU to take the exception, I should
+ * instead wait for the entry code to set something for me. Well,
+ * ideally, all that crap will be done in prom.c and the CPU left
+ * in a RAM-based wait loop like CHRP.
+ */
+ mdelay(1);
+
+ /* Restore our exception vector */
+ *vector = save_vector;
+ flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
+
+ local_irq_restore(flags);
+ if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
+
+ return 0;
+}
+
+static void smp_core99_setup_cpu(int cpu_nr)
+{
+ /* Setup L2/L3 */
+ if (cpu_nr != 0)
+ core99_init_caches(cpu_nr);
+
+ /* Setup openpic */
+ mpic_setup_this_cpu();
+}
+
+#ifdef CONFIG_PPC64
+#ifdef CONFIG_HOTPLUG_CPU
+static int smp_core99_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ int rc;
+
+ switch(action) {
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ /* Open i2c bus if it was used for tb sync */
+ if (pmac_tb_clock_chip_host) {
+ rc = pmac_i2c_open(pmac_tb_clock_chip_host, 1);
+ if (rc) {
+ pr_err("Failed to open i2c bus for time sync\n");
+ return notifier_from_errno(rc);
+ }
+ }
+ break;
+ case CPU_ONLINE:
+ case CPU_UP_CANCELED:
+ /* Close i2c bus if it was used for tb sync */
+ if (pmac_tb_clock_chip_host)
+ pmac_i2c_close(pmac_tb_clock_chip_host);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block smp_core99_cpu_nb = {
+ .notifier_call = smp_core99_cpu_notify,
+};
+#endif /* CONFIG_HOTPLUG_CPU */
+
+static void __init smp_core99_bringup_done(void)
+{
+ extern void g5_phy_disable_cpu1(void);
+
+ /* Close i2c bus if it was used for tb sync */
+ if (pmac_tb_clock_chip_host)
+ pmac_i2c_close(pmac_tb_clock_chip_host);
+
+ /* If we didn't start the second CPU, we must take
+ * it off the bus.
+ */
+ if (of_machine_is_compatible("MacRISC4") &&
+ num_online_cpus() < 2) {
+ set_cpu_present(1, false);
+ g5_phy_disable_cpu1();
+ }
+#ifdef CONFIG_HOTPLUG_CPU
+ register_cpu_notifier(&smp_core99_cpu_nb);
+#endif
+
+ if (ppc_md.progress)
+ ppc_md.progress("smp_core99_bringup_done", 0x349);
+}
+#endif /* CONFIG_PPC64 */
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+static int smp_core99_cpu_disable(void)
+{
+ int rc = generic_cpu_disable();
+ if (rc)
+ return rc;
+
+ mpic_cpu_set_priority(0xf);
+
+ return 0;
+}
+
+#ifdef CONFIG_PPC32
+
+static void pmac_cpu_die(void)
+{
+ int cpu = smp_processor_id();
+
+ local_irq_disable();
+ idle_task_exit();
+ pr_debug("CPU%d offline\n", cpu);
+ generic_set_cpu_dead(cpu);
+ smp_wmb();
+ mb();
+ low_cpu_die();
+}
+
+#else /* CONFIG_PPC32 */
+
+static void pmac_cpu_die(void)
+{
+ int cpu = smp_processor_id();
+
+ local_irq_disable();
+ idle_task_exit();
+
+ /*
+ * turn off as much as possible, we'll be
+ * kicked out as this will only be invoked
+ * on core99 platforms for now ...
+ */
+
+ printk(KERN_INFO "CPU#%d offline\n", cpu);
+ generic_set_cpu_dead(cpu);
+ smp_wmb();
+
+ /*
+ * Re-enable interrupts. The NAP code needs to enable them
+ * anyways, do it now so we deal with the case where one already
+ * happened while soft-disabled.
+ * We shouldn't get any external interrupts, only decrementer, and the
+ * decrementer handler is safe for use on offline CPUs
+ */
+ local_irq_enable();
+
+ while (1) {
+ /* let's not take timer interrupts too often ... */
+ set_dec(0x7fffffff);
+
+ /* Enter NAP mode */
+ power4_idle();
+ }
+}
+
+#endif /* else CONFIG_PPC32 */
+#endif /* CONFIG_HOTPLUG_CPU */
+
+/* Core99 Macs (dual G4s and G5s) */
+struct smp_ops_t core99_smp_ops = {
+ .message_pass = smp_mpic_message_pass,
+ .probe = smp_core99_probe,
+#ifdef CONFIG_PPC64
+ .bringup_done = smp_core99_bringup_done,
+#endif
+ .kick_cpu = smp_core99_kick_cpu,
+ .setup_cpu = smp_core99_setup_cpu,
+ .give_timebase = smp_core99_give_timebase,
+ .take_timebase = smp_core99_take_timebase,
+#if defined(CONFIG_HOTPLUG_CPU)
+ .cpu_disable = smp_core99_cpu_disable,
+ .cpu_die = generic_cpu_die,
+#endif
+};
+
+void __init pmac_setup_smp(void)
+{
+ struct device_node *np;
+
+ /* Check for Core99 */
+ np = of_find_node_by_name(NULL, "uni-n");
+ if (!np)
+ np = of_find_node_by_name(NULL, "u3");
+ if (!np)
+ np = of_find_node_by_name(NULL, "u4");
+ if (np) {
+ of_node_put(np);
+ smp_ops = &core99_smp_ops;
+ }
+#ifdef CONFIG_PPC_PMAC32_PSURGE
+ else {
+ /* We have to set bits in cpu_possible_mask here since the
+ * secondary CPU(s) aren't in the device tree. Various
+ * things won't be initialized for CPUs not in the possible
+ * map, so we really need to fix it up here.
+ */
+ int cpu;
+
+ for (cpu = 1; cpu < 4 && cpu < NR_CPUS; ++cpu)
+ set_cpu_possible(cpu, true);
+ smp_ops = &psurge_smp_ops;
+ }
+#endif /* CONFIG_PPC_PMAC32_PSURGE */
+
+#ifdef CONFIG_HOTPLUG_CPU
+ ppc_md.cpu_die = pmac_cpu_die;
+#endif
+}
+
+
diff --git a/kernel/arch/powerpc/platforms/powermac/time.c b/kernel/arch/powerpc/platforms/powermac/time.c
new file mode 100644
index 000000000..8680bb697
--- /dev/null
+++ b/kernel/arch/powerpc/platforms/powermac/time.c
@@ -0,0 +1,334 @@
+/*
+ * Support for periodic interrupts (100 per second) and for getting
+ * the current time from the RTC on Power Macintoshes.
+ *
+ * We use the decrementer register for our periodic interrupts.
+ *
+ * Paul Mackerras August 1996.
+ * Copyright (C) 1996 Paul Mackerras.
+ * Copyright (C) 2003-2005 Benjamin Herrenschmidt.
+ *
+ */
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/adb.h>
+#include <linux/cuda.h>
+#include <linux/pmu.h>
+#include <linux/interrupt.h>
+#include <linux/hardirq.h>
+#include <linux/rtc.h>
+
+#include <asm/sections.h>
+#include <asm/prom.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/machdep.h>
+#include <asm/time.h>
+#include <asm/nvram.h>
+#include <asm/smu.h>
+
+#undef DEBUG
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+/* Apparently the RTC stores seconds since 1 Jan 1904 */
+#define RTC_OFFSET 2082844800
+
+/*
+ * Calibrate the decrementer frequency with the VIA timer 1.
+ */
+#define VIA_TIMER_FREQ_6 4700000 /* time 1 frequency * 6 */
+
+/* VIA registers */
+#define RS 0x200 /* skip between registers */
+#define T1CL (4*RS) /* Timer 1 ctr/latch (low 8 bits) */
+#define T1CH (5*RS) /* Timer 1 counter (high 8 bits) */
+#define T1LL (6*RS) /* Timer 1 latch (low 8 bits) */
+#define T1LH (7*RS) /* Timer 1 latch (high 8 bits) */
+#define ACR (11*RS) /* Auxiliary control register */
+#define IFR (13*RS) /* Interrupt flag register */
+
+/* Bits in ACR */
+#define T1MODE 0xc0 /* Timer 1 mode */
+#define T1MODE_CONT 0x40 /* continuous interrupts */
+
+/* Bits in IFR and IER */
+#define T1_INT 0x40 /* Timer 1 interrupt */
+
+long __init pmac_time_init(void)
+{
+ s32 delta = 0;
+#ifdef CONFIG_NVRAM
+ int dst;
+
+ delta = ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x9)) << 16;
+ delta |= ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0xa)) << 8;
+ delta |= pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0xb);
+ if (delta & 0x00800000UL)
+ delta |= 0xFF000000UL;
+ dst = ((pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x8) & 0x80) != 0);
+ printk("GMT Delta read from XPRAM: %d minutes, DST: %s\n", delta/60,
+ dst ? "on" : "off");
+#endif
+ return delta;
+}
+
+#if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU)
+static void to_rtc_time(unsigned long now, struct rtc_time *tm)
+{
+ to_tm(now, tm);
+ tm->tm_year -= 1900;
+ tm->tm_mon -= 1;
+}
+#endif
+
+#if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU) || \
+ defined(CONFIG_PMAC_SMU)
+static unsigned long from_rtc_time(struct rtc_time *tm)
+{
+ return mktime(tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
+}
+#endif
+
+#ifdef CONFIG_ADB_CUDA
+static unsigned long cuda_get_time(void)
+{
+ struct adb_request req;
+ unsigned int now;
+
+ if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0)
+ return 0;
+ while (!req.complete)
+ cuda_poll();
+ if (req.reply_len != 7)
+ printk(KERN_ERR "cuda_get_time: got %d byte reply\n",
+ req.reply_len);
+ now = (req.reply[3] << 24) + (req.reply[4] << 16)
+ + (req.reply[5] << 8) + req.reply[6];
+ return ((unsigned long)now) - RTC_OFFSET;
+}
+
+#define cuda_get_rtc_time(tm) to_rtc_time(cuda_get_time(), (tm))
+
+static int cuda_set_rtc_time(struct rtc_time *tm)
+{
+ unsigned int nowtime;
+ struct adb_request req;
+
+ nowtime = from_rtc_time(tm) + RTC_OFFSET;
+ if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
+ nowtime >> 24, nowtime >> 16, nowtime >> 8,
+ nowtime) < 0)
+ return -ENXIO;
+ while (!req.complete)
+ cuda_poll();
+ if ((req.reply_len != 3) && (req.reply_len != 7))
+ printk(KERN_ERR "cuda_set_rtc_time: got %d byte reply\n",
+ req.reply_len);
+ return 0;
+}
+
+#else
+#define cuda_get_time() 0
+#define cuda_get_rtc_time(tm)
+#define cuda_set_rtc_time(tm) 0
+#endif
+
+#ifdef CONFIG_ADB_PMU
+static unsigned long pmu_get_time(void)
+{
+ struct adb_request req;
+ unsigned int now;
+
+ if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
+ return 0;
+ pmu_wait_complete(&req);
+ if (req.reply_len != 4)
+ printk(KERN_ERR "pmu_get_time: got %d byte reply from PMU\n",
+ req.reply_len);
+ now = (req.reply[0] << 24) + (req.reply[1] << 16)
+ + (req.reply[2] << 8) + req.reply[3];
+ return ((unsigned long)now) - RTC_OFFSET;
+}
+
+#define pmu_get_rtc_time(tm) to_rtc_time(pmu_get_time(), (tm))
+
+static int pmu_set_rtc_time(struct rtc_time *tm)
+{
+ unsigned int nowtime;
+ struct adb_request req;
+
+ nowtime = from_rtc_time(tm) + RTC_OFFSET;
+ if (pmu_request(&req, NULL, 5, PMU_SET_RTC, nowtime >> 24,
+ nowtime >> 16, nowtime >> 8, nowtime) < 0)
+ return -ENXIO;
+ pmu_wait_complete(&req);
+ if (req.reply_len != 0)
+ printk(KERN_ERR "pmu_set_rtc_time: %d byte reply from PMU\n",
+ req.reply_len);
+ return 0;
+}
+
+#else
+#define pmu_get_time() 0
+#define pmu_get_rtc_time(tm)
+#define pmu_set_rtc_time(tm) 0
+#endif
+
+#ifdef CONFIG_PMAC_SMU
+static unsigned long smu_get_time(void)
+{
+ struct rtc_time tm;
+
+ if (smu_get_rtc_time(&tm, 1))
+ return 0;
+ return from_rtc_time(&tm);
+}
+
+#else
+#define smu_get_time() 0
+#define smu_get_rtc_time(tm, spin)
+#define smu_set_rtc_time(tm, spin) 0
+#endif
+
+/* Can't be __init, it's called when suspending and resuming */
+unsigned long pmac_get_boot_time(void)
+{
+ /* Get the time from the RTC, used only at boot time */
+ switch (sys_ctrler) {
+ case SYS_CTRLER_CUDA:
+ return cuda_get_time();
+ case SYS_CTRLER_PMU:
+ return pmu_get_time();
+ case SYS_CTRLER_SMU:
+ return smu_get_time();
+ default:
+ return 0;
+ }
+}
+
+void pmac_get_rtc_time(struct rtc_time *tm)
+{
+ /* Get the time from the RTC, used only at boot time */
+ switch (sys_ctrler) {
+ case SYS_CTRLER_CUDA:
+ cuda_get_rtc_time(tm);
+ break;
+ case SYS_CTRLER_PMU:
+ pmu_get_rtc_time(tm);
+ break;
+ case SYS_CTRLER_SMU:
+ smu_get_rtc_time(tm, 1);
+ break;
+ default:
+ ;
+ }
+}
+
+int pmac_set_rtc_time(struct rtc_time *tm)
+{
+ switch (sys_ctrler) {
+ case SYS_CTRLER_CUDA:
+ return cuda_set_rtc_time(tm);
+ case SYS_CTRLER_PMU:
+ return pmu_set_rtc_time(tm);
+ case SYS_CTRLER_SMU:
+ return smu_set_rtc_time(tm, 1);
+ default:
+ return -ENODEV;
+ }
+}
+
+#ifdef CONFIG_PPC32
+/*
+ * Calibrate the decrementer register using VIA timer 1.
+ * This is used both on powermacs and CHRP machines.
+ */
+int __init via_calibrate_decr(void)
+{
+ struct device_node *vias;
+ volatile unsigned char __iomem *via;
+ int count = VIA_TIMER_FREQ_6 / 100;
+ unsigned int dstart, dend;
+ struct resource rsrc;
+
+ vias = of_find_node_by_name(NULL, "via-cuda");
+ if (vias == NULL)
+ vias = of_find_node_by_name(NULL, "via-pmu");
+ if (vias == NULL)
+ vias = of_find_node_by_name(NULL, "via");
+ if (vias == NULL || of_address_to_resource(vias, 0, &rsrc)) {
+ of_node_put(vias);
+ return 0;
+ }
+ of_node_put(vias);
+ via = ioremap(rsrc.start, resource_size(&rsrc));
+ if (via == NULL) {
+ printk(KERN_ERR "Failed to map VIA for timer calibration !\n");
+ return 0;
+ }
+
+ /* set timer 1 for continuous interrupts */
+ out_8(&via[ACR], (via[ACR] & ~T1MODE) | T1MODE_CONT);
+ /* set the counter to a small value */
+ out_8(&via[T1CH], 2);
+ /* set the latch to `count' */
+ out_8(&via[T1LL], count);
+ out_8(&via[T1LH], count >> 8);
+ /* wait until it hits 0 */
+ while ((in_8(&via[IFR]) & T1_INT) == 0)
+ ;
+ dstart = get_dec();
+ /* clear the interrupt & wait until it hits 0 again */
+ in_8(&via[T1CL]);
+ while ((in_8(&via[IFR]) & T1_INT) == 0)
+ ;
+ dend = get_dec();
+
+ ppc_tb_freq = (dstart - dend) * 100 / 6;
+
+ iounmap(via);
+
+ return 1;
+}
+#endif
+
+/*
+ * Query the OF and get the decr frequency.
+ */
+void __init pmac_calibrate_decr(void)
+{
+ generic_calibrate_decr();
+
+#ifdef CONFIG_PPC32
+ /* We assume MacRISC2 machines have correct device-tree
+ * calibration. That's better since the VIA itself seems
+ * to be slightly off. --BenH
+ */
+ if (!of_machine_is_compatible("MacRISC2") &&
+ !of_machine_is_compatible("MacRISC3") &&
+ !of_machine_is_compatible("MacRISC4"))
+ if (via_calibrate_decr())
+ return;
+
+ /* Special case: QuickSilver G4s seem to have a badly calibrated
+ * timebase-frequency in OF, VIA is much better on these. We should
+ * probably implement calibration based on the KL timer on these
+ * machines anyway... -BenH
+ */
+ if (of_machine_is_compatible("PowerMac3,5"))
+ if (via_calibrate_decr())
+ return;
+#endif
+}
diff --git a/kernel/arch/powerpc/platforms/powermac/udbg_adb.c b/kernel/arch/powerpc/platforms/powermac/udbg_adb.c
new file mode 100644
index 000000000..366bd221e
--- /dev/null
+++ b/kernel/arch/powerpc/platforms/powermac/udbg_adb.c
@@ -0,0 +1,219 @@
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/bitops.h>
+#include <linux/ptrace.h>
+#include <linux/adb.h>
+#include <linux/pmu.h>
+#include <linux/cuda.h>
+#include <asm/machdep.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/xmon.h>
+#include <asm/prom.h>
+#include <asm/bootx.h>
+#include <asm/errno.h>
+#include <asm/pmac_feature.h>
+#include <asm/processor.h>
+#include <asm/delay.h>
+#include <asm/btext.h>
+#include <asm/time.h>
+#include <asm/udbg.h>
+
+/*
+ * This implementation is "special", it can "patch" the current
+ * udbg implementation and work on top of it. It must thus be
+ * initialized last
+ */
+
+static void (*udbg_adb_old_putc)(char c);
+static int (*udbg_adb_old_getc)(void);
+static int (*udbg_adb_old_getc_poll)(void);
+
+static enum {
+ input_adb_none,
+ input_adb_pmu,
+ input_adb_cuda,
+} input_type = input_adb_none;
+
+int xmon_wants_key, xmon_adb_keycode;
+
+static inline void udbg_adb_poll(void)
+{
+#ifdef CONFIG_ADB_PMU
+ if (input_type == input_adb_pmu)
+ pmu_poll_adb();
+#endif /* CONFIG_ADB_PMU */
+#ifdef CONFIG_ADB_CUDA
+ if (input_type == input_adb_cuda)
+ cuda_poll();
+#endif /* CONFIG_ADB_CUDA */
+}
+
+#ifdef CONFIG_BOOTX_TEXT
+
+static int udbg_adb_use_btext;
+static int xmon_adb_shiftstate;
+
+static unsigned char xmon_keytab[128] =
+ "asdfhgzxcv\000bqwer" /* 0x00 - 0x0f */
+ "yt123465=97-80]o" /* 0x10 - 0x1f */
+ "u[ip\rlj'k;\\,/nm." /* 0x20 - 0x2f */
+ "\t `\177\0\033\0\0\0\0\0\0\0\0\0\0" /* 0x30 - 0x3f */
+ "\0.\0*\0+\0\0\0\0\0/\r\0-\0" /* 0x40 - 0x4f */
+ "\0\0000123456789\0\0\0"; /* 0x50 - 0x5f */
+
+static unsigned char xmon_shift_keytab[128] =
+ "ASDFHGZXCV\000BQWER" /* 0x00 - 0x0f */
+ "YT!@#$^%+(&_*)}O" /* 0x10 - 0x1f */
+ "U{IP\rLJ\"K:|<?NM>" /* 0x20 - 0x2f */
+ "\t ~\177\0\033\0\0\0\0\0\0\0\0\0\0" /* 0x30 - 0x3f */
+ "\0.\0*\0+\0\0\0\0\0/\r\0-\0" /* 0x40 - 0x4f */
+ "\0\0000123456789\0\0\0"; /* 0x50 - 0x5f */
+
+static int udbg_adb_local_getc(void)
+{
+ int k, t, on;
+
+ xmon_wants_key = 1;
+ for (;;) {
+ xmon_adb_keycode = -1;
+ t = 0;
+ on = 0;
+ k = -1;
+ do {
+ if (--t < 0) {
+ on = 1 - on;
+ btext_drawchar(on? 0xdb: 0x20);
+ btext_drawchar('\b');
+ t = 200000;
+ }
+ udbg_adb_poll();
+ if (udbg_adb_old_getc_poll)
+ k = udbg_adb_old_getc_poll();
+ } while (k == -1 && xmon_adb_keycode == -1);
+ if (on)
+ btext_drawstring(" \b");
+ if (k != -1)
+ return k;
+ k = xmon_adb_keycode;
+
+ /* test for shift keys */
+ if ((k & 0x7f) == 0x38 || (k & 0x7f) == 0x7b) {
+ xmon_adb_shiftstate = (k & 0x80) == 0;
+ continue;
+ }
+ if (k >= 0x80)
+ continue; /* ignore up transitions */
+ k = (xmon_adb_shiftstate? xmon_shift_keytab: xmon_keytab)[k];
+ if (k != 0)
+ break;
+ }
+ xmon_wants_key = 0;
+ return k;
+}
+#endif /* CONFIG_BOOTX_TEXT */
+
+static int udbg_adb_getc(void)
+{
+#ifdef CONFIG_BOOTX_TEXT
+ if (udbg_adb_use_btext && input_type != input_adb_none)
+ return udbg_adb_local_getc();
+#endif
+ if (udbg_adb_old_getc)
+ return udbg_adb_old_getc();
+ return -1;
+}
+
+/* getc_poll() is not really used, unless you have the xmon-over modem
+ * hack that doesn't quite concern us here, thus we just poll the low level
+ * ADB driver to prevent it from timing out and call back the original poll
+ * routine.
+ */
+static int udbg_adb_getc_poll(void)
+{
+ udbg_adb_poll();
+
+ if (udbg_adb_old_getc_poll)
+ return udbg_adb_old_getc_poll();
+ return -1;
+}
+
+static void udbg_adb_putc(char c)
+{
+#ifdef CONFIG_BOOTX_TEXT
+ if (udbg_adb_use_btext)
+ btext_drawchar(c);
+#endif
+ if (udbg_adb_old_putc)
+ return udbg_adb_old_putc(c);
+}
+
+void __init udbg_adb_init_early(void)
+{
+#ifdef CONFIG_BOOTX_TEXT
+ if (btext_find_display(1) == 0) {
+ udbg_adb_use_btext = 1;
+ udbg_putc = udbg_adb_putc;
+ }
+#endif
+}
+
+int __init udbg_adb_init(int force_btext)
+{
+ struct device_node *np;
+
+ /* Capture existing callbacks */
+ udbg_adb_old_putc = udbg_putc;
+ udbg_adb_old_getc = udbg_getc;
+ udbg_adb_old_getc_poll = udbg_getc_poll;
+
+ /* Check if our early init was already called */
+ if (udbg_adb_old_putc == udbg_adb_putc)
+ udbg_adb_old_putc = NULL;
+#ifdef CONFIG_BOOTX_TEXT
+ if (udbg_adb_old_putc == btext_drawchar)
+ udbg_adb_old_putc = NULL;
+#endif
+
+ /* Set ours as output */
+ udbg_putc = udbg_adb_putc;
+ udbg_getc = udbg_adb_getc;
+ udbg_getc_poll = udbg_adb_getc_poll;
+
+#ifdef CONFIG_BOOTX_TEXT
+ /* Check if we should use btext output */
+ if (btext_find_display(force_btext) == 0)
+ udbg_adb_use_btext = 1;
+#endif
+
+ /* See if there is a keyboard in the device tree with a parent
+ * of type "adb". If not, we return a failure, but we keep the
+ * bext output set for now
+ */
+ for_each_node_by_name(np, "keyboard") {
+ struct device_node *parent = of_get_parent(np);
+ int found = (parent && strcmp(parent->type, "adb") == 0);
+ of_node_put(parent);
+ if (found)
+ break;
+ }
+ if (np == NULL)
+ return -ENODEV;
+ of_node_put(np);
+
+#ifdef CONFIG_ADB_PMU
+ if (find_via_pmu())
+ input_type = input_adb_pmu;
+#endif
+#ifdef CONFIG_ADB_CUDA
+ if (find_via_cuda())
+ input_type = input_adb_cuda;
+#endif
+
+ /* Same as above: nothing found, keep btext set for output */
+ if (input_type == input_adb_none)
+ return -ENODEV;
+
+ return 0;
+}
diff --git a/kernel/arch/powerpc/platforms/powermac/udbg_scc.c b/kernel/arch/powerpc/platforms/powermac/udbg_scc.c
new file mode 100644
index 000000000..d83135a98
--- /dev/null
+++ b/kernel/arch/powerpc/platforms/powermac/udbg_scc.c
@@ -0,0 +1,184 @@
+/*
+ * udbg for zilog scc ports as found on Apple PowerMacs
+ *
+ * Copyright (C) 2001-2005 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/types.h>
+#include <asm/udbg.h>
+#include <asm/processor.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/pmac_feature.h>
+
+extern u8 real_readb(volatile u8 __iomem *addr);
+extern void real_writeb(u8 data, volatile u8 __iomem *addr);
+
+#define SCC_TXRDY 4
+#define SCC_RXRDY 1
+
+static volatile u8 __iomem *sccc;
+static volatile u8 __iomem *sccd;
+
+static void udbg_scc_putc(char c)
+{
+ if (sccc) {
+ while ((in_8(sccc) & SCC_TXRDY) == 0)
+ ;
+ out_8(sccd, c);
+ if (c == '\n')
+ udbg_scc_putc('\r');
+ }
+}
+
+static int udbg_scc_getc_poll(void)
+{
+ if (sccc) {
+ if ((in_8(sccc) & SCC_RXRDY) != 0)
+ return in_8(sccd);
+ else
+ return -1;
+ }
+ return -1;
+}
+
+static int udbg_scc_getc(void)
+{
+ if (sccc) {
+ while ((in_8(sccc) & SCC_RXRDY) == 0)
+ ;
+ return in_8(sccd);
+ }
+ return -1;
+}
+
+static unsigned char scc_inittab[] = {
+ 13, 0, /* set baud rate divisor */
+ 12, 0,
+ 14, 1, /* baud rate gen enable, src=rtxc */
+ 11, 0x50, /* clocks = br gen */
+ 5, 0xea, /* tx 8 bits, assert DTR & RTS */
+ 4, 0x46, /* x16 clock, 1 stop */
+ 3, 0xc1, /* rx enable, 8 bits */
+};
+
+void udbg_scc_init(int force_scc)
+{
+ const u32 *reg;
+ unsigned long addr;
+ struct device_node *stdout = NULL, *escc = NULL, *macio = NULL;
+ struct device_node *ch, *ch_def = NULL, *ch_a = NULL;
+ const char *path;
+ int i, x;
+
+ escc = of_find_node_by_name(NULL, "escc");
+ if (escc == NULL)
+ goto bail;
+ macio = of_get_parent(escc);
+ if (macio == NULL)
+ goto bail;
+ path = of_get_property(of_chosen, "linux,stdout-path", NULL);
+ if (path != NULL)
+ stdout = of_find_node_by_path(path);
+ for (ch = NULL; (ch = of_get_next_child(escc, ch)) != NULL;) {
+ if (ch == stdout)
+ ch_def = of_node_get(ch);
+ if (strcmp(ch->name, "ch-a") == 0)
+ ch_a = of_node_get(ch);
+ }
+ if (ch_def == NULL && !force_scc)
+ goto bail;
+
+ ch = ch_def ? ch_def : ch_a;
+
+ /* Get address within mac-io ASIC */
+ reg = of_get_property(escc, "reg", NULL);
+ if (reg == NULL)
+ goto bail;
+ addr = reg[0];
+
+ /* Get address of mac-io PCI itself */
+ reg = of_get_property(macio, "assigned-addresses", NULL);
+ if (reg == NULL)
+ goto bail;
+ addr += reg[2];
+
+ /* Lock the serial port */
+ pmac_call_feature(PMAC_FTR_SCC_ENABLE, ch,
+ PMAC_SCC_ASYNC | PMAC_SCC_FLAG_XMON, 1);
+
+ if (ch == ch_a)
+ addr += 0x20;
+ sccc = ioremap(addr & PAGE_MASK, PAGE_SIZE) ;
+ sccc += addr & ~PAGE_MASK;
+ sccd = sccc + 0x10;
+
+ mb();
+
+ for (i = 20000; i != 0; --i)
+ x = in_8(sccc);
+ out_8(sccc, 0x09); /* reset A or B side */
+ out_8(sccc, 0xc0);
+
+ /* If SCC was the OF output port, read the BRG value, else
+ * Setup for 38400 or 57600 8N1 depending on the machine
+ */
+ if (ch_def != NULL) {
+ out_8(sccc, 13);
+ scc_inittab[1] = in_8(sccc);
+ out_8(sccc, 12);
+ scc_inittab[3] = in_8(sccc);
+ } else if (of_machine_is_compatible("RackMac1,1")
+ || of_machine_is_compatible("RackMac1,2")
+ || of_machine_is_compatible("MacRISC4")) {
+ /* Xserves and G5s default to 57600 */
+ scc_inittab[1] = 0;
+ scc_inittab[3] = 0;
+ } else {
+ /* Others default to 38400 */
+ scc_inittab[1] = 0;
+ scc_inittab[3] = 1;
+ }
+
+ for (i = 0; i < sizeof(scc_inittab); ++i)
+ out_8(sccc, scc_inittab[i]);
+
+
+ udbg_putc = udbg_scc_putc;
+ udbg_getc = udbg_scc_getc;
+ udbg_getc_poll = udbg_scc_getc_poll;
+
+ udbg_puts("Hello World !\n");
+
+ bail:
+ of_node_put(macio);
+ of_node_put(escc);
+ of_node_put(stdout);
+ of_node_put(ch_def);
+ of_node_put(ch_a);
+}
+
+#ifdef CONFIG_PPC64
+static void udbg_real_scc_putc(char c)
+{
+ while ((real_readb(sccc) & SCC_TXRDY) == 0)
+ ;
+ real_writeb(c, sccd);
+ if (c == '\n')
+ udbg_real_scc_putc('\r');
+}
+
+void __init udbg_init_pmac_realmode(void)
+{
+ sccc = (volatile u8 __iomem *)0x80013020ul;
+ sccd = (volatile u8 __iomem *)0x80013030ul;
+
+ udbg_putc = udbg_real_scc_putc;
+ udbg_getc = NULL;
+ udbg_getc_poll = NULL;
+}
+#endif /* CONFIG_PPC64 */