summaryrefslogtreecommitdiffstats
path: root/qemu/hw/misc/macio
diff options
context:
space:
mode:
Diffstat (limited to 'qemu/hw/misc/macio')
-rw-r--r--qemu/hw/misc/macio/Makefile.objs3
-rw-r--r--qemu/hw/misc/macio/cuda.c756
-rw-r--r--qemu/hw/misc/macio/mac_dbdma.c768
-rw-r--r--qemu/hw/misc/macio/macio.c446
4 files changed, 1973 insertions, 0 deletions
diff --git a/qemu/hw/misc/macio/Makefile.objs b/qemu/hw/misc/macio/Makefile.objs
new file mode 100644
index 000000000..ef7ac249e
--- /dev/null
+++ b/qemu/hw/misc/macio/Makefile.objs
@@ -0,0 +1,3 @@
+common-obj-y += macio.o
+common-obj-$(CONFIG_CUDA) += cuda.o
+common-obj-$(CONFIG_MAC_DBDMA) += mac_dbdma.o
diff --git a/qemu/hw/misc/macio/cuda.c b/qemu/hw/misc/macio/cuda.c
new file mode 100644
index 000000000..f3984e3a2
--- /dev/null
+++ b/qemu/hw/misc/macio/cuda.c
@@ -0,0 +1,756 @@
+/*
+ * QEMU PowerMac CUDA device support
+ *
+ * Copyright (c) 2004-2007 Fabrice Bellard
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "hw/hw.h"
+#include "hw/ppc/mac.h"
+#include "hw/input/adb.h"
+#include "qemu/timer.h"
+#include "sysemu/sysemu.h"
+
+/* XXX: implement all timer modes */
+
+/* debug CUDA */
+//#define DEBUG_CUDA
+
+/* debug CUDA packets */
+//#define DEBUG_CUDA_PACKET
+
+#ifdef DEBUG_CUDA
+#define CUDA_DPRINTF(fmt, ...) \
+ do { printf("CUDA: " fmt , ## __VA_ARGS__); } while (0)
+#else
+#define CUDA_DPRINTF(fmt, ...)
+#endif
+
+/* Bits in B data register: all active low */
+#define TREQ 0x08 /* Transfer request (input) */
+#define TACK 0x10 /* Transfer acknowledge (output) */
+#define TIP 0x20 /* Transfer in progress (output) */
+
+/* Bits in ACR */
+#define SR_CTRL 0x1c /* Shift register control bits */
+#define SR_EXT 0x0c /* Shift on external clock */
+#define SR_OUT 0x10 /* Shift out if 1 */
+
+/* Bits in IFR and IER */
+#define IER_SET 0x80 /* set bits in IER */
+#define IER_CLR 0 /* clear bits in IER */
+#define SR_INT 0x04 /* Shift register full/empty */
+#define T1_INT 0x40 /* Timer 1 interrupt */
+#define T2_INT 0x20 /* Timer 2 interrupt */
+
+/* Bits in ACR */
+#define T1MODE 0xc0 /* Timer 1 mode */
+#define T1MODE_CONT 0x40 /* continuous interrupts */
+
+/* commands (1st byte) */
+#define ADB_PACKET 0
+#define CUDA_PACKET 1
+#define ERROR_PACKET 2
+#define TIMER_PACKET 3
+#define POWER_PACKET 4
+#define MACIIC_PACKET 5
+#define PMU_PACKET 6
+
+
+/* CUDA commands (2nd byte) */
+#define CUDA_WARM_START 0x0
+#define CUDA_AUTOPOLL 0x1
+#define CUDA_GET_6805_ADDR 0x2
+#define CUDA_GET_TIME 0x3
+#define CUDA_GET_PRAM 0x7
+#define CUDA_SET_6805_ADDR 0x8
+#define CUDA_SET_TIME 0x9
+#define CUDA_POWERDOWN 0xa
+#define CUDA_POWERUP_TIME 0xb
+#define CUDA_SET_PRAM 0xc
+#define CUDA_MS_RESET 0xd
+#define CUDA_SEND_DFAC 0xe
+#define CUDA_BATTERY_SWAP_SENSE 0x10
+#define CUDA_RESET_SYSTEM 0x11
+#define CUDA_SET_IPL 0x12
+#define CUDA_FILE_SERVER_FLAG 0x13
+#define CUDA_SET_AUTO_RATE 0x14
+#define CUDA_GET_AUTO_RATE 0x16
+#define CUDA_SET_DEVICE_LIST 0x19
+#define CUDA_GET_DEVICE_LIST 0x1a
+#define CUDA_SET_ONE_SECOND_MODE 0x1b
+#define CUDA_SET_POWER_MESSAGES 0x21
+#define CUDA_GET_SET_IIC 0x22
+#define CUDA_WAKEUP 0x23
+#define CUDA_TIMER_TICKLE 0x24
+#define CUDA_COMBINED_FORMAT_IIC 0x25
+
+#define CUDA_TIMER_FREQ (4700000 / 6)
+#define CUDA_ADB_POLL_FREQ 50
+
+/* CUDA returns time_t's offset from Jan 1, 1904, not 1970 */
+#define RTC_OFFSET 2082844800
+
+static void cuda_update(CUDAState *s);
+static void cuda_receive_packet_from_host(CUDAState *s,
+ const uint8_t *data, int len);
+static void cuda_timer_update(CUDAState *s, CUDATimer *ti,
+ int64_t current_time);
+
+static void cuda_update_irq(CUDAState *s)
+{
+ if (s->ifr & s->ier & (SR_INT | T1_INT)) {
+ qemu_irq_raise(s->irq);
+ } else {
+ qemu_irq_lower(s->irq);
+ }
+}
+
+static uint64_t get_tb(uint64_t freq)
+{
+ return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
+ freq, get_ticks_per_sec());
+}
+
+static unsigned int get_counter(CUDATimer *s)
+{
+ int64_t d;
+ unsigned int counter;
+ uint64_t tb_diff;
+
+ /* Reverse of the tb calculation algorithm that Mac OS X uses on bootup. */
+ tb_diff = get_tb(s->frequency) - s->load_time;
+ d = (tb_diff * 0xBF401675E5DULL) / (s->frequency << 24);
+
+ if (s->index == 0) {
+ /* the timer goes down from latch to -1 (period of latch + 2) */
+ if (d <= (s->counter_value + 1)) {
+ counter = (s->counter_value - d) & 0xffff;
+ } else {
+ counter = (d - (s->counter_value + 1)) % (s->latch + 2);
+ counter = (s->latch - counter) & 0xffff;
+ }
+ } else {
+ counter = (s->counter_value - d) & 0xffff;
+ }
+ return counter;
+}
+
+static void set_counter(CUDAState *s, CUDATimer *ti, unsigned int val)
+{
+ CUDA_DPRINTF("T%d.counter=%d\n", 1 + (ti->timer == NULL), val);
+ ti->load_time = get_tb(s->frequency);
+ ti->counter_value = val;
+ cuda_timer_update(s, ti, ti->load_time);
+}
+
+static int64_t get_next_irq_time(CUDATimer *s, int64_t current_time)
+{
+ int64_t d, next_time;
+ unsigned int counter;
+
+ /* current counter value */
+ d = muldiv64(current_time - s->load_time,
+ CUDA_TIMER_FREQ, get_ticks_per_sec());
+ /* the timer goes down from latch to -1 (period of latch + 2) */
+ if (d <= (s->counter_value + 1)) {
+ counter = (s->counter_value - d) & 0xffff;
+ } else {
+ counter = (d - (s->counter_value + 1)) % (s->latch + 2);
+ counter = (s->latch - counter) & 0xffff;
+ }
+
+ /* Note: we consider the irq is raised on 0 */
+ if (counter == 0xffff) {
+ next_time = d + s->latch + 1;
+ } else if (counter == 0) {
+ next_time = d + s->latch + 2;
+ } else {
+ next_time = d + counter;
+ }
+ CUDA_DPRINTF("latch=%d counter=%" PRId64 " delta_next=%" PRId64 "\n",
+ s->latch, d, next_time - d);
+ next_time = muldiv64(next_time, get_ticks_per_sec(), CUDA_TIMER_FREQ) +
+ s->load_time;
+ if (next_time <= current_time)
+ next_time = current_time + 1;
+ return next_time;
+}
+
+static void cuda_timer_update(CUDAState *s, CUDATimer *ti,
+ int64_t current_time)
+{
+ if (!ti->timer)
+ return;
+ if ((s->acr & T1MODE) != T1MODE_CONT) {
+ timer_del(ti->timer);
+ } else {
+ ti->next_irq_time = get_next_irq_time(ti, current_time);
+ timer_mod(ti->timer, ti->next_irq_time);
+ }
+}
+
+static void cuda_timer1(void *opaque)
+{
+ CUDAState *s = opaque;
+ CUDATimer *ti = &s->timers[0];
+
+ cuda_timer_update(s, ti, ti->next_irq_time);
+ s->ifr |= T1_INT;
+ cuda_update_irq(s);
+}
+
+static uint32_t cuda_readb(void *opaque, hwaddr addr)
+{
+ CUDAState *s = opaque;
+ uint32_t val;
+
+ addr = (addr >> 9) & 0xf;
+ switch(addr) {
+ case 0:
+ val = s->b;
+ break;
+ case 1:
+ val = s->a;
+ break;
+ case 2:
+ val = s->dirb;
+ break;
+ case 3:
+ val = s->dira;
+ break;
+ case 4:
+ val = get_counter(&s->timers[0]) & 0xff;
+ s->ifr &= ~T1_INT;
+ cuda_update_irq(s);
+ break;
+ case 5:
+ val = get_counter(&s->timers[0]) >> 8;
+ cuda_update_irq(s);
+ break;
+ case 6:
+ val = s->timers[0].latch & 0xff;
+ break;
+ case 7:
+ /* XXX: check this */
+ val = (s->timers[0].latch >> 8) & 0xff;
+ break;
+ case 8:
+ val = get_counter(&s->timers[1]) & 0xff;
+ s->ifr &= ~T2_INT;
+ break;
+ case 9:
+ val = get_counter(&s->timers[1]) >> 8;
+ break;
+ case 10:
+ val = s->sr;
+ s->ifr &= ~SR_INT;
+ cuda_update_irq(s);
+ break;
+ case 11:
+ val = s->acr;
+ break;
+ case 12:
+ val = s->pcr;
+ break;
+ case 13:
+ val = s->ifr;
+ if (s->ifr & s->ier)
+ val |= 0x80;
+ break;
+ case 14:
+ val = s->ier | 0x80;
+ break;
+ default:
+ case 15:
+ val = s->anh;
+ break;
+ }
+ if (addr != 13 || val != 0) {
+ CUDA_DPRINTF("read: reg=0x%x val=%02x\n", (int)addr, val);
+ }
+
+ return val;
+}
+
+static void cuda_writeb(void *opaque, hwaddr addr, uint32_t val)
+{
+ CUDAState *s = opaque;
+
+ addr = (addr >> 9) & 0xf;
+ CUDA_DPRINTF("write: reg=0x%x val=%02x\n", (int)addr, val);
+
+ switch(addr) {
+ case 0:
+ s->b = val;
+ cuda_update(s);
+ break;
+ case 1:
+ s->a = val;
+ break;
+ case 2:
+ s->dirb = val;
+ break;
+ case 3:
+ s->dira = val;
+ break;
+ case 4:
+ s->timers[0].latch = (s->timers[0].latch & 0xff00) | val;
+ cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
+ break;
+ case 5:
+ s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8);
+ s->ifr &= ~T1_INT;
+ set_counter(s, &s->timers[0], s->timers[0].latch);
+ break;
+ case 6:
+ s->timers[0].latch = (s->timers[0].latch & 0xff00) | val;
+ cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
+ break;
+ case 7:
+ s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8);
+ s->ifr &= ~T1_INT;
+ cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
+ break;
+ case 8:
+ s->timers[1].latch = val;
+ set_counter(s, &s->timers[1], val);
+ break;
+ case 9:
+ set_counter(s, &s->timers[1], (val << 8) | s->timers[1].latch);
+ break;
+ case 10:
+ s->sr = val;
+ break;
+ case 11:
+ s->acr = val;
+ cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
+ cuda_update(s);
+ break;
+ case 12:
+ s->pcr = val;
+ break;
+ case 13:
+ /* reset bits */
+ s->ifr &= ~val;
+ cuda_update_irq(s);
+ break;
+ case 14:
+ if (val & IER_SET) {
+ /* set bits */
+ s->ier |= val & 0x7f;
+ } else {
+ /* reset bits */
+ s->ier &= ~val;
+ }
+ cuda_update_irq(s);
+ break;
+ default:
+ case 15:
+ s->anh = val;
+ break;
+ }
+}
+
+/* NOTE: TIP and TREQ are negated */
+static void cuda_update(CUDAState *s)
+{
+ int packet_received, len;
+
+ packet_received = 0;
+ if (!(s->b & TIP)) {
+ /* transfer requested from host */
+
+ if (s->acr & SR_OUT) {
+ /* data output */
+ if ((s->b & (TACK | TIP)) != (s->last_b & (TACK | TIP))) {
+ if (s->data_out_index < sizeof(s->data_out)) {
+ CUDA_DPRINTF("send: %02x\n", s->sr);
+ s->data_out[s->data_out_index++] = s->sr;
+ s->ifr |= SR_INT;
+ cuda_update_irq(s);
+ }
+ }
+ } else {
+ if (s->data_in_index < s->data_in_size) {
+ /* data input */
+ if ((s->b & (TACK | TIP)) != (s->last_b & (TACK | TIP))) {
+ s->sr = s->data_in[s->data_in_index++];
+ CUDA_DPRINTF("recv: %02x\n", s->sr);
+ /* indicate end of transfer */
+ if (s->data_in_index >= s->data_in_size) {
+ s->b = (s->b | TREQ);
+ }
+ s->ifr |= SR_INT;
+ cuda_update_irq(s);
+ }
+ }
+ }
+ } else {
+ /* no transfer requested: handle sync case */
+ if ((s->last_b & TIP) && (s->b & TACK) != (s->last_b & TACK)) {
+ /* update TREQ state each time TACK change state */
+ if (s->b & TACK)
+ s->b = (s->b | TREQ);
+ else
+ s->b = (s->b & ~TREQ);
+ s->ifr |= SR_INT;
+ cuda_update_irq(s);
+ } else {
+ if (!(s->last_b & TIP)) {
+ /* handle end of host to cuda transfer */
+ packet_received = (s->data_out_index > 0);
+ /* always an IRQ at the end of transfer */
+ s->ifr |= SR_INT;
+ cuda_update_irq(s);
+ }
+ /* signal if there is data to read */
+ if (s->data_in_index < s->data_in_size) {
+ s->b = (s->b & ~TREQ);
+ }
+ }
+ }
+
+ s->last_acr = s->acr;
+ s->last_b = s->b;
+
+ /* NOTE: cuda_receive_packet_from_host() can call cuda_update()
+ recursively */
+ if (packet_received) {
+ len = s->data_out_index;
+ s->data_out_index = 0;
+ cuda_receive_packet_from_host(s, s->data_out, len);
+ }
+}
+
+static void cuda_send_packet_to_host(CUDAState *s,
+ const uint8_t *data, int len)
+{
+#ifdef DEBUG_CUDA_PACKET
+ {
+ int i;
+ printf("cuda_send_packet_to_host:\n");
+ for(i = 0; i < len; i++)
+ printf(" %02x", data[i]);
+ printf("\n");
+ }
+#endif
+ memcpy(s->data_in, data, len);
+ s->data_in_size = len;
+ s->data_in_index = 0;
+ cuda_update(s);
+ s->ifr |= SR_INT;
+ cuda_update_irq(s);
+}
+
+static void cuda_adb_poll(void *opaque)
+{
+ CUDAState *s = opaque;
+ uint8_t obuf[ADB_MAX_OUT_LEN + 2];
+ int olen;
+
+ olen = adb_poll(&s->adb_bus, obuf + 2);
+ if (olen > 0) {
+ obuf[0] = ADB_PACKET;
+ obuf[1] = 0x40; /* polled data */
+ cuda_send_packet_to_host(s, obuf, olen + 2);
+ }
+ timer_mod(s->adb_poll_timer,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
+ (get_ticks_per_sec() / CUDA_ADB_POLL_FREQ));
+}
+
+static void cuda_receive_packet(CUDAState *s,
+ const uint8_t *data, int len)
+{
+ uint8_t obuf[16];
+ int autopoll;
+ uint32_t ti;
+
+ switch(data[0]) {
+ case CUDA_AUTOPOLL:
+ autopoll = (data[1] != 0);
+ if (autopoll != s->autopoll) {
+ s->autopoll = autopoll;
+ if (autopoll) {
+ timer_mod(s->adb_poll_timer,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
+ (get_ticks_per_sec() / CUDA_ADB_POLL_FREQ));
+ } else {
+ timer_del(s->adb_poll_timer);
+ }
+ }
+ obuf[0] = CUDA_PACKET;
+ obuf[1] = data[1];
+ cuda_send_packet_to_host(s, obuf, 2);
+ break;
+ case CUDA_SET_TIME:
+ ti = (((uint32_t)data[1]) << 24) + (((uint32_t)data[2]) << 16) + (((uint32_t)data[3]) << 8) + data[4];
+ s->tick_offset = ti - (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / get_ticks_per_sec());
+ obuf[0] = CUDA_PACKET;
+ obuf[1] = 0;
+ obuf[2] = 0;
+ cuda_send_packet_to_host(s, obuf, 3);
+ break;
+ case CUDA_GET_TIME:
+ ti = s->tick_offset + (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / get_ticks_per_sec());
+ obuf[0] = CUDA_PACKET;
+ obuf[1] = 0;
+ obuf[2] = 0;
+ obuf[3] = ti >> 24;
+ obuf[4] = ti >> 16;
+ obuf[5] = ti >> 8;
+ obuf[6] = ti;
+ cuda_send_packet_to_host(s, obuf, 7);
+ break;
+ case CUDA_FILE_SERVER_FLAG:
+ case CUDA_SET_DEVICE_LIST:
+ case CUDA_SET_AUTO_RATE:
+ case CUDA_SET_POWER_MESSAGES:
+ obuf[0] = CUDA_PACKET;
+ obuf[1] = 0;
+ cuda_send_packet_to_host(s, obuf, 2);
+ break;
+ case CUDA_POWERDOWN:
+ obuf[0] = CUDA_PACKET;
+ obuf[1] = 0;
+ cuda_send_packet_to_host(s, obuf, 2);
+ qemu_system_shutdown_request();
+ break;
+ case CUDA_RESET_SYSTEM:
+ obuf[0] = CUDA_PACKET;
+ obuf[1] = 0;
+ cuda_send_packet_to_host(s, obuf, 2);
+ qemu_system_reset_request();
+ break;
+ default:
+ break;
+ }
+}
+
+static void cuda_receive_packet_from_host(CUDAState *s,
+ const uint8_t *data, int len)
+{
+#ifdef DEBUG_CUDA_PACKET
+ {
+ int i;
+ printf("cuda_receive_packet_from_host:\n");
+ for(i = 0; i < len; i++)
+ printf(" %02x", data[i]);
+ printf("\n");
+ }
+#endif
+ switch(data[0]) {
+ case ADB_PACKET:
+ {
+ uint8_t obuf[ADB_MAX_OUT_LEN + 2];
+ int olen;
+ olen = adb_request(&s->adb_bus, obuf + 2, data + 1, len - 1);
+ if (olen > 0) {
+ obuf[0] = ADB_PACKET;
+ obuf[1] = 0x00;
+ } else {
+ /* error */
+ obuf[0] = ADB_PACKET;
+ obuf[1] = -olen;
+ olen = 0;
+ }
+ cuda_send_packet_to_host(s, obuf, olen + 2);
+ }
+ break;
+ case CUDA_PACKET:
+ cuda_receive_packet(s, data + 1, len - 1);
+ break;
+ }
+}
+
+static void cuda_writew (void *opaque, hwaddr addr, uint32_t value)
+{
+}
+
+static void cuda_writel (void *opaque, hwaddr addr, uint32_t value)
+{
+}
+
+static uint32_t cuda_readw (void *opaque, hwaddr addr)
+{
+ return 0;
+}
+
+static uint32_t cuda_readl (void *opaque, hwaddr addr)
+{
+ return 0;
+}
+
+static const MemoryRegionOps cuda_ops = {
+ .old_mmio = {
+ .write = {
+ cuda_writeb,
+ cuda_writew,
+ cuda_writel,
+ },
+ .read = {
+ cuda_readb,
+ cuda_readw,
+ cuda_readl,
+ },
+ },
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static bool cuda_timer_exist(void *opaque, int version_id)
+{
+ CUDATimer *s = opaque;
+
+ return s->timer != NULL;
+}
+
+static const VMStateDescription vmstate_cuda_timer = {
+ .name = "cuda_timer",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT16(latch, CUDATimer),
+ VMSTATE_UINT16(counter_value, CUDATimer),
+ VMSTATE_INT64(load_time, CUDATimer),
+ VMSTATE_INT64(next_irq_time, CUDATimer),
+ VMSTATE_TIMER_PTR_TEST(timer, CUDATimer, cuda_timer_exist),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_cuda = {
+ .name = "cuda",
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(a, CUDAState),
+ VMSTATE_UINT8(b, CUDAState),
+ VMSTATE_UINT8(dira, CUDAState),
+ VMSTATE_UINT8(dirb, CUDAState),
+ VMSTATE_UINT8(sr, CUDAState),
+ VMSTATE_UINT8(acr, CUDAState),
+ VMSTATE_UINT8(pcr, CUDAState),
+ VMSTATE_UINT8(ifr, CUDAState),
+ VMSTATE_UINT8(ier, CUDAState),
+ VMSTATE_UINT8(anh, CUDAState),
+ VMSTATE_INT32(data_in_size, CUDAState),
+ VMSTATE_INT32(data_in_index, CUDAState),
+ VMSTATE_INT32(data_out_index, CUDAState),
+ VMSTATE_UINT8(autopoll, CUDAState),
+ VMSTATE_BUFFER(data_in, CUDAState),
+ VMSTATE_BUFFER(data_out, CUDAState),
+ VMSTATE_UINT32(tick_offset, CUDAState),
+ VMSTATE_STRUCT_ARRAY(timers, CUDAState, 2, 1,
+ vmstate_cuda_timer, CUDATimer),
+ VMSTATE_TIMER_PTR(adb_poll_timer, CUDAState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void cuda_reset(DeviceState *dev)
+{
+ CUDAState *s = CUDA(dev);
+
+ s->b = 0;
+ s->a = 0;
+ s->dirb = 0;
+ s->dira = 0;
+ s->sr = 0;
+ s->acr = 0;
+ s->pcr = 0;
+ s->ifr = 0;
+ s->ier = 0;
+ // s->ier = T1_INT | SR_INT;
+ s->anh = 0;
+ s->data_in_size = 0;
+ s->data_in_index = 0;
+ s->data_out_index = 0;
+ s->autopoll = 0;
+
+ s->timers[0].latch = 0xffff;
+ set_counter(s, &s->timers[0], 0xffff);
+
+ s->timers[1].latch = 0;
+ set_counter(s, &s->timers[1], 0xffff);
+}
+
+static void cuda_realizefn(DeviceState *dev, Error **errp)
+{
+ CUDAState *s = CUDA(dev);
+ struct tm tm;
+
+ s->timers[0].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_timer1, s);
+ s->timers[0].frequency = s->frequency;
+ s->timers[1].frequency = s->frequency;
+
+ qemu_get_timedate(&tm, 0);
+ s->tick_offset = (uint32_t)mktimegm(&tm) + RTC_OFFSET;
+
+ s->adb_poll_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_adb_poll, s);
+}
+
+static void cuda_initfn(Object *obj)
+{
+ SysBusDevice *d = SYS_BUS_DEVICE(obj);
+ CUDAState *s = CUDA(obj);
+ int i;
+
+ memory_region_init_io(&s->mem, NULL, &cuda_ops, s, "cuda", 0x2000);
+ sysbus_init_mmio(d, &s->mem);
+ sysbus_init_irq(d, &s->irq);
+
+ for (i = 0; i < ARRAY_SIZE(s->timers); i++) {
+ s->timers[i].index = i;
+ }
+
+ qbus_create_inplace(&s->adb_bus, sizeof(s->adb_bus), TYPE_ADB_BUS,
+ DEVICE(obj), "adb.0");
+}
+
+static Property cuda_properties[] = {
+ DEFINE_PROP_UINT64("frequency", CUDAState, frequency, 0),
+ DEFINE_PROP_END_OF_LIST()
+};
+
+static void cuda_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ dc->realize = cuda_realizefn;
+ dc->reset = cuda_reset;
+ dc->vmsd = &vmstate_cuda;
+ dc->props = cuda_properties;
+}
+
+static const TypeInfo cuda_type_info = {
+ .name = TYPE_CUDA,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(CUDAState),
+ .instance_init = cuda_initfn,
+ .class_init = cuda_class_init,
+};
+
+static void cuda_register_types(void)
+{
+ type_register_static(&cuda_type_info);
+}
+
+type_init(cuda_register_types)
diff --git a/qemu/hw/misc/macio/mac_dbdma.c b/qemu/hw/misc/macio/mac_dbdma.c
new file mode 100644
index 000000000..b25e8511b
--- /dev/null
+++ b/qemu/hw/misc/macio/mac_dbdma.c
@@ -0,0 +1,768 @@
+/*
+ * PowerMac descriptor-based DMA emulation
+ *
+ * Copyright (c) 2005-2007 Fabrice Bellard
+ * Copyright (c) 2007 Jocelyn Mayer
+ * Copyright (c) 2009 Laurent Vivier
+ *
+ * some parts from linux-2.6.28, arch/powerpc/include/asm/dbdma.h
+ *
+ * Definitions for using the Apple Descriptor-Based DMA controller
+ * in Power Macintosh computers.
+ *
+ * Copyright (C) 1996 Paul Mackerras.
+ *
+ * some parts from mol 0.9.71
+ *
+ * Descriptor based DMA emulation
+ *
+ * Copyright (C) 1998-2004 Samuel Rydh (samuel@ibrium.se)
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "hw/hw.h"
+#include "hw/isa/isa.h"
+#include "hw/ppc/mac_dbdma.h"
+#include "qemu/main-loop.h"
+
+/* debug DBDMA */
+//#define DEBUG_DBDMA
+
+#ifdef DEBUG_DBDMA
+#define DBDMA_DPRINTF(fmt, ...) \
+ do { printf("DBDMA: " fmt , ## __VA_ARGS__); } while (0)
+#else
+#define DBDMA_DPRINTF(fmt, ...)
+#endif
+
+/*
+ */
+
+static DBDMAState *dbdma_from_ch(DBDMA_channel *ch)
+{
+ return container_of(ch, DBDMAState, channels[ch->channel]);
+}
+
+#ifdef DEBUG_DBDMA
+static void dump_dbdma_cmd(dbdma_cmd *cmd)
+{
+ printf("dbdma_cmd %p\n", cmd);
+ printf(" req_count 0x%04x\n", le16_to_cpu(cmd->req_count));
+ printf(" command 0x%04x\n", le16_to_cpu(cmd->command));
+ printf(" phy_addr 0x%08x\n", le32_to_cpu(cmd->phy_addr));
+ printf(" cmd_dep 0x%08x\n", le32_to_cpu(cmd->cmd_dep));
+ printf(" res_count 0x%04x\n", le16_to_cpu(cmd->res_count));
+ printf(" xfer_status 0x%04x\n", le16_to_cpu(cmd->xfer_status));
+}
+#else
+static void dump_dbdma_cmd(dbdma_cmd *cmd)
+{
+}
+#endif
+static void dbdma_cmdptr_load(DBDMA_channel *ch)
+{
+ DBDMA_DPRINTF("dbdma_cmdptr_load 0x%08x\n",
+ ch->regs[DBDMA_CMDPTR_LO]);
+ cpu_physical_memory_read(ch->regs[DBDMA_CMDPTR_LO],
+ &ch->current, sizeof(dbdma_cmd));
+}
+
+static void dbdma_cmdptr_save(DBDMA_channel *ch)
+{
+ DBDMA_DPRINTF("dbdma_cmdptr_save 0x%08x\n",
+ ch->regs[DBDMA_CMDPTR_LO]);
+ DBDMA_DPRINTF("xfer_status 0x%08x res_count 0x%04x\n",
+ le16_to_cpu(ch->current.xfer_status),
+ le16_to_cpu(ch->current.res_count));
+ cpu_physical_memory_write(ch->regs[DBDMA_CMDPTR_LO],
+ &ch->current, sizeof(dbdma_cmd));
+}
+
+static void kill_channel(DBDMA_channel *ch)
+{
+ DBDMA_DPRINTF("kill_channel\n");
+
+ ch->regs[DBDMA_STATUS] |= DEAD;
+ ch->regs[DBDMA_STATUS] &= ~ACTIVE;
+
+ qemu_irq_raise(ch->irq);
+}
+
+static void conditional_interrupt(DBDMA_channel *ch)
+{
+ dbdma_cmd *current = &ch->current;
+ uint16_t intr;
+ uint16_t sel_mask, sel_value;
+ uint32_t status;
+ int cond;
+
+ DBDMA_DPRINTF("%s\n", __func__);
+
+ intr = le16_to_cpu(current->command) & INTR_MASK;
+
+ switch(intr) {
+ case INTR_NEVER: /* don't interrupt */
+ return;
+ case INTR_ALWAYS: /* always interrupt */
+ qemu_irq_raise(ch->irq);
+ DBDMA_DPRINTF("%s: raise\n", __func__);
+ return;
+ }
+
+ status = ch->regs[DBDMA_STATUS] & DEVSTAT;
+
+ sel_mask = (ch->regs[DBDMA_INTR_SEL] >> 16) & 0x0f;
+ sel_value = ch->regs[DBDMA_INTR_SEL] & 0x0f;
+
+ cond = (status & sel_mask) == (sel_value & sel_mask);
+
+ switch(intr) {
+ case INTR_IFSET: /* intr if condition bit is 1 */
+ if (cond) {
+ qemu_irq_raise(ch->irq);
+ DBDMA_DPRINTF("%s: raise\n", __func__);
+ }
+ return;
+ case INTR_IFCLR: /* intr if condition bit is 0 */
+ if (!cond) {
+ qemu_irq_raise(ch->irq);
+ DBDMA_DPRINTF("%s: raise\n", __func__);
+ }
+ return;
+ }
+}
+
+static int conditional_wait(DBDMA_channel *ch)
+{
+ dbdma_cmd *current = &ch->current;
+ uint16_t wait;
+ uint16_t sel_mask, sel_value;
+ uint32_t status;
+ int cond;
+
+ DBDMA_DPRINTF("conditional_wait\n");
+
+ wait = le16_to_cpu(current->command) & WAIT_MASK;
+
+ switch(wait) {
+ case WAIT_NEVER: /* don't wait */
+ return 0;
+ case WAIT_ALWAYS: /* always wait */
+ return 1;
+ }
+
+ status = ch->regs[DBDMA_STATUS] & DEVSTAT;
+
+ sel_mask = (ch->regs[DBDMA_WAIT_SEL] >> 16) & 0x0f;
+ sel_value = ch->regs[DBDMA_WAIT_SEL] & 0x0f;
+
+ cond = (status & sel_mask) == (sel_value & sel_mask);
+
+ switch(wait) {
+ case WAIT_IFSET: /* wait if condition bit is 1 */
+ if (cond)
+ return 1;
+ return 0;
+ case WAIT_IFCLR: /* wait if condition bit is 0 */
+ if (!cond)
+ return 1;
+ return 0;
+ }
+ return 0;
+}
+
+static void next(DBDMA_channel *ch)
+{
+ uint32_t cp;
+
+ ch->regs[DBDMA_STATUS] &= ~BT;
+
+ cp = ch->regs[DBDMA_CMDPTR_LO];
+ ch->regs[DBDMA_CMDPTR_LO] = cp + sizeof(dbdma_cmd);
+ dbdma_cmdptr_load(ch);
+}
+
+static void branch(DBDMA_channel *ch)
+{
+ dbdma_cmd *current = &ch->current;
+
+ ch->regs[DBDMA_CMDPTR_LO] = current->cmd_dep;
+ ch->regs[DBDMA_STATUS] |= BT;
+ dbdma_cmdptr_load(ch);
+}
+
+static void conditional_branch(DBDMA_channel *ch)
+{
+ dbdma_cmd *current = &ch->current;
+ uint16_t br;
+ uint16_t sel_mask, sel_value;
+ uint32_t status;
+ int cond;
+
+ DBDMA_DPRINTF("conditional_branch\n");
+
+ /* check if we must branch */
+
+ br = le16_to_cpu(current->command) & BR_MASK;
+
+ switch(br) {
+ case BR_NEVER: /* don't branch */
+ next(ch);
+ return;
+ case BR_ALWAYS: /* always branch */
+ branch(ch);
+ return;
+ }
+
+ status = ch->regs[DBDMA_STATUS] & DEVSTAT;
+
+ sel_mask = (ch->regs[DBDMA_BRANCH_SEL] >> 16) & 0x0f;
+ sel_value = ch->regs[DBDMA_BRANCH_SEL] & 0x0f;
+
+ cond = (status & sel_mask) == (sel_value & sel_mask);
+
+ switch(br) {
+ case BR_IFSET: /* branch if condition bit is 1 */
+ if (cond)
+ branch(ch);
+ else
+ next(ch);
+ return;
+ case BR_IFCLR: /* branch if condition bit is 0 */
+ if (!cond)
+ branch(ch);
+ else
+ next(ch);
+ return;
+ }
+}
+
+static void channel_run(DBDMA_channel *ch);
+
+static void dbdma_end(DBDMA_io *io)
+{
+ DBDMA_channel *ch = io->channel;
+ dbdma_cmd *current = &ch->current;
+
+ DBDMA_DPRINTF("%s\n", __func__);
+
+ if (conditional_wait(ch))
+ goto wait;
+
+ current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]);
+ current->res_count = cpu_to_le16(io->len);
+ dbdma_cmdptr_save(ch);
+ if (io->is_last)
+ ch->regs[DBDMA_STATUS] &= ~FLUSH;
+
+ conditional_interrupt(ch);
+ conditional_branch(ch);
+
+wait:
+ /* Indicate that we're ready for a new DMA round */
+ ch->io.processing = false;
+
+ if ((ch->regs[DBDMA_STATUS] & RUN) &&
+ (ch->regs[DBDMA_STATUS] & ACTIVE))
+ channel_run(ch);
+}
+
+static void start_output(DBDMA_channel *ch, int key, uint32_t addr,
+ uint16_t req_count, int is_last)
+{
+ DBDMA_DPRINTF("start_output\n");
+
+ /* KEY_REGS, KEY_DEVICE and KEY_STREAM
+ * are not implemented in the mac-io chip
+ */
+
+ DBDMA_DPRINTF("addr 0x%x key 0x%x\n", addr, key);
+ if (!addr || key > KEY_STREAM3) {
+ kill_channel(ch);
+ return;
+ }
+
+ ch->io.addr = addr;
+ ch->io.len = req_count;
+ ch->io.is_last = is_last;
+ ch->io.dma_end = dbdma_end;
+ ch->io.is_dma_out = 1;
+ ch->io.processing = true;
+ if (ch->rw) {
+ ch->rw(&ch->io);
+ }
+}
+
+static void start_input(DBDMA_channel *ch, int key, uint32_t addr,
+ uint16_t req_count, int is_last)
+{
+ DBDMA_DPRINTF("start_input\n");
+
+ /* KEY_REGS, KEY_DEVICE and KEY_STREAM
+ * are not implemented in the mac-io chip
+ */
+
+ DBDMA_DPRINTF("addr 0x%x key 0x%x\n", addr, key);
+ if (!addr || key > KEY_STREAM3) {
+ kill_channel(ch);
+ return;
+ }
+
+ ch->io.addr = addr;
+ ch->io.len = req_count;
+ ch->io.is_last = is_last;
+ ch->io.dma_end = dbdma_end;
+ ch->io.is_dma_out = 0;
+ ch->io.processing = true;
+ if (ch->rw) {
+ ch->rw(&ch->io);
+ }
+}
+
+static void load_word(DBDMA_channel *ch, int key, uint32_t addr,
+ uint16_t len)
+{
+ dbdma_cmd *current = &ch->current;
+ uint32_t val;
+
+ DBDMA_DPRINTF("load_word\n");
+
+ /* only implements KEY_SYSTEM */
+
+ if (key != KEY_SYSTEM) {
+ printf("DBDMA: LOAD_WORD, unimplemented key %x\n", key);
+ kill_channel(ch);
+ return;
+ }
+
+ cpu_physical_memory_read(addr, &val, len);
+
+ if (len == 2)
+ val = (val << 16) | (current->cmd_dep & 0x0000ffff);
+ else if (len == 1)
+ val = (val << 24) | (current->cmd_dep & 0x00ffffff);
+
+ current->cmd_dep = val;
+
+ if (conditional_wait(ch))
+ goto wait;
+
+ current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]);
+ dbdma_cmdptr_save(ch);
+ ch->regs[DBDMA_STATUS] &= ~FLUSH;
+
+ conditional_interrupt(ch);
+ next(ch);
+
+wait:
+ DBDMA_kick(dbdma_from_ch(ch));
+}
+
+static void store_word(DBDMA_channel *ch, int key, uint32_t addr,
+ uint16_t len)
+{
+ dbdma_cmd *current = &ch->current;
+ uint32_t val;
+
+ DBDMA_DPRINTF("store_word\n");
+
+ /* only implements KEY_SYSTEM */
+
+ if (key != KEY_SYSTEM) {
+ printf("DBDMA: STORE_WORD, unimplemented key %x\n", key);
+ kill_channel(ch);
+ return;
+ }
+
+ val = current->cmd_dep;
+ if (len == 2)
+ val >>= 16;
+ else if (len == 1)
+ val >>= 24;
+
+ cpu_physical_memory_write(addr, &val, len);
+
+ if (conditional_wait(ch))
+ goto wait;
+
+ current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]);
+ dbdma_cmdptr_save(ch);
+ ch->regs[DBDMA_STATUS] &= ~FLUSH;
+
+ conditional_interrupt(ch);
+ next(ch);
+
+wait:
+ DBDMA_kick(dbdma_from_ch(ch));
+}
+
+static void nop(DBDMA_channel *ch)
+{
+ dbdma_cmd *current = &ch->current;
+
+ if (conditional_wait(ch))
+ goto wait;
+
+ current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]);
+ dbdma_cmdptr_save(ch);
+
+ conditional_interrupt(ch);
+ conditional_branch(ch);
+
+wait:
+ DBDMA_kick(dbdma_from_ch(ch));
+}
+
+static void stop(DBDMA_channel *ch)
+{
+ ch->regs[DBDMA_STATUS] &= ~(ACTIVE|DEAD|FLUSH);
+
+ /* the stop command does not increment command pointer */
+}
+
+static void channel_run(DBDMA_channel *ch)
+{
+ dbdma_cmd *current = &ch->current;
+ uint16_t cmd, key;
+ uint16_t req_count;
+ uint32_t phy_addr;
+
+ DBDMA_DPRINTF("channel_run\n");
+ dump_dbdma_cmd(current);
+
+ /* clear WAKE flag at command fetch */
+
+ ch->regs[DBDMA_STATUS] &= ~WAKE;
+
+ cmd = le16_to_cpu(current->command) & COMMAND_MASK;
+
+ switch (cmd) {
+ case DBDMA_NOP:
+ nop(ch);
+ return;
+
+ case DBDMA_STOP:
+ stop(ch);
+ return;
+ }
+
+ key = le16_to_cpu(current->command) & 0x0700;
+ req_count = le16_to_cpu(current->req_count);
+ phy_addr = le32_to_cpu(current->phy_addr);
+
+ if (key == KEY_STREAM4) {
+ printf("command %x, invalid key 4\n", cmd);
+ kill_channel(ch);
+ return;
+ }
+
+ switch (cmd) {
+ case OUTPUT_MORE:
+ start_output(ch, key, phy_addr, req_count, 0);
+ return;
+
+ case OUTPUT_LAST:
+ start_output(ch, key, phy_addr, req_count, 1);
+ return;
+
+ case INPUT_MORE:
+ start_input(ch, key, phy_addr, req_count, 0);
+ return;
+
+ case INPUT_LAST:
+ start_input(ch, key, phy_addr, req_count, 1);
+ return;
+ }
+
+ if (key < KEY_REGS) {
+ printf("command %x, invalid key %x\n", cmd, key);
+ key = KEY_SYSTEM;
+ }
+
+ /* for LOAD_WORD and STORE_WORD, req_count is on 3 bits
+ * and BRANCH is invalid
+ */
+
+ req_count = req_count & 0x0007;
+ if (req_count & 0x4) {
+ req_count = 4;
+ phy_addr &= ~3;
+ } else if (req_count & 0x2) {
+ req_count = 2;
+ phy_addr &= ~1;
+ } else
+ req_count = 1;
+
+ switch (cmd) {
+ case LOAD_WORD:
+ load_word(ch, key, phy_addr, req_count);
+ return;
+
+ case STORE_WORD:
+ store_word(ch, key, phy_addr, req_count);
+ return;
+ }
+}
+
+static void DBDMA_run(DBDMAState *s)
+{
+ int channel;
+
+ for (channel = 0; channel < DBDMA_CHANNELS; channel++) {
+ DBDMA_channel *ch = &s->channels[channel];
+ uint32_t status = ch->regs[DBDMA_STATUS];
+ if (!ch->io.processing && (status & RUN) && (status & ACTIVE)) {
+ channel_run(ch);
+ }
+ }
+}
+
+static void DBDMA_run_bh(void *opaque)
+{
+ DBDMAState *s = opaque;
+
+ DBDMA_DPRINTF("DBDMA_run_bh\n");
+
+ DBDMA_run(s);
+}
+
+void DBDMA_kick(DBDMAState *dbdma)
+{
+ qemu_bh_schedule(dbdma->bh);
+}
+
+void DBDMA_register_channel(void *dbdma, int nchan, qemu_irq irq,
+ DBDMA_rw rw, DBDMA_flush flush,
+ void *opaque)
+{
+ DBDMAState *s = dbdma;
+ DBDMA_channel *ch = &s->channels[nchan];
+
+ DBDMA_DPRINTF("DBDMA_register_channel 0x%x\n", nchan);
+
+ ch->irq = irq;
+ ch->channel = nchan;
+ ch->rw = rw;
+ ch->flush = flush;
+ ch->io.opaque = opaque;
+ ch->io.channel = ch;
+}
+
+static void
+dbdma_control_write(DBDMA_channel *ch)
+{
+ uint16_t mask, value;
+ uint32_t status;
+
+ mask = (ch->regs[DBDMA_CONTROL] >> 16) & 0xffff;
+ value = ch->regs[DBDMA_CONTROL] & 0xffff;
+
+ value &= (RUN | PAUSE | FLUSH | WAKE | DEVSTAT);
+
+ status = ch->regs[DBDMA_STATUS];
+
+ status = (value & mask) | (status & ~mask);
+
+ if (status & WAKE)
+ status |= ACTIVE;
+ if (status & RUN) {
+ status |= ACTIVE;
+ status &= ~DEAD;
+ }
+ if (status & PAUSE)
+ status &= ~ACTIVE;
+ if ((ch->regs[DBDMA_STATUS] & RUN) && !(status & RUN)) {
+ /* RUN is cleared */
+ status &= ~(ACTIVE|DEAD);
+ if ((status & FLUSH) && ch->flush) {
+ ch->flush(&ch->io);
+ status &= ~FLUSH;
+ }
+ }
+
+ DBDMA_DPRINTF(" status 0x%08x\n", status);
+
+ ch->regs[DBDMA_STATUS] = status;
+
+ if (status & ACTIVE) {
+ DBDMA_kick(dbdma_from_ch(ch));
+ }
+ if ((status & FLUSH) && ch->flush) {
+ ch->flush(&ch->io);
+ }
+}
+
+static void dbdma_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ int channel = addr >> DBDMA_CHANNEL_SHIFT;
+ DBDMAState *s = opaque;
+ DBDMA_channel *ch = &s->channels[channel];
+ int reg = (addr - (channel << DBDMA_CHANNEL_SHIFT)) >> 2;
+
+ DBDMA_DPRINTF("writel 0x" TARGET_FMT_plx " <= 0x%08"PRIx64"\n",
+ addr, value);
+ DBDMA_DPRINTF("channel 0x%x reg 0x%x\n",
+ (uint32_t)addr >> DBDMA_CHANNEL_SHIFT, reg);
+
+ /* cmdptr cannot be modified if channel is ACTIVE */
+
+ if (reg == DBDMA_CMDPTR_LO && (ch->regs[DBDMA_STATUS] & ACTIVE)) {
+ return;
+ }
+
+ ch->regs[reg] = value;
+
+ switch(reg) {
+ case DBDMA_CONTROL:
+ dbdma_control_write(ch);
+ break;
+ case DBDMA_CMDPTR_LO:
+ /* 16-byte aligned */
+ ch->regs[DBDMA_CMDPTR_LO] &= ~0xf;
+ dbdma_cmdptr_load(ch);
+ break;
+ case DBDMA_STATUS:
+ case DBDMA_INTR_SEL:
+ case DBDMA_BRANCH_SEL:
+ case DBDMA_WAIT_SEL:
+ /* nothing to do */
+ break;
+ case DBDMA_XFER_MODE:
+ case DBDMA_CMDPTR_HI:
+ case DBDMA_DATA2PTR_HI:
+ case DBDMA_DATA2PTR_LO:
+ case DBDMA_ADDRESS_HI:
+ case DBDMA_BRANCH_ADDR_HI:
+ case DBDMA_RES1:
+ case DBDMA_RES2:
+ case DBDMA_RES3:
+ case DBDMA_RES4:
+ /* unused */
+ break;
+ }
+}
+
+static uint64_t dbdma_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ uint32_t value;
+ int channel = addr >> DBDMA_CHANNEL_SHIFT;
+ DBDMAState *s = opaque;
+ DBDMA_channel *ch = &s->channels[channel];
+ int reg = (addr - (channel << DBDMA_CHANNEL_SHIFT)) >> 2;
+
+ value = ch->regs[reg];
+
+ DBDMA_DPRINTF("readl 0x" TARGET_FMT_plx " => 0x%08x\n", addr, value);
+ DBDMA_DPRINTF("channel 0x%x reg 0x%x\n",
+ (uint32_t)addr >> DBDMA_CHANNEL_SHIFT, reg);
+
+ switch(reg) {
+ case DBDMA_CONTROL:
+ value = 0;
+ break;
+ case DBDMA_STATUS:
+ case DBDMA_CMDPTR_LO:
+ case DBDMA_INTR_SEL:
+ case DBDMA_BRANCH_SEL:
+ case DBDMA_WAIT_SEL:
+ /* nothing to do */
+ break;
+ case DBDMA_XFER_MODE:
+ case DBDMA_CMDPTR_HI:
+ case DBDMA_DATA2PTR_HI:
+ case DBDMA_DATA2PTR_LO:
+ case DBDMA_ADDRESS_HI:
+ case DBDMA_BRANCH_ADDR_HI:
+ /* unused */
+ value = 0;
+ break;
+ case DBDMA_RES1:
+ case DBDMA_RES2:
+ case DBDMA_RES3:
+ case DBDMA_RES4:
+ /* reserved */
+ break;
+ }
+
+ return value;
+}
+
+static const MemoryRegionOps dbdma_ops = {
+ .read = dbdma_read,
+ .write = dbdma_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static const VMStateDescription vmstate_dbdma_channel = {
+ .name = "dbdma_channel",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(regs, struct DBDMA_channel, DBDMA_REGS),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_dbdma = {
+ .name = "dbdma",
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT_ARRAY(channels, DBDMAState, DBDMA_CHANNELS, 1,
+ vmstate_dbdma_channel, DBDMA_channel),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void dbdma_reset(void *opaque)
+{
+ DBDMAState *s = opaque;
+ int i;
+
+ for (i = 0; i < DBDMA_CHANNELS; i++)
+ memset(s->channels[i].regs, 0, DBDMA_SIZE);
+}
+
+void* DBDMA_init (MemoryRegion **dbdma_mem)
+{
+ DBDMAState *s;
+ int i;
+
+ s = g_malloc0(sizeof(DBDMAState));
+
+ for (i = 0; i < DBDMA_CHANNELS; i++) {
+ DBDMA_io *io = &s->channels[i].io;
+ qemu_iovec_init(&io->iov, 1);
+ }
+
+ memory_region_init_io(&s->mem, NULL, &dbdma_ops, s, "dbdma", 0x1000);
+ *dbdma_mem = &s->mem;
+ vmstate_register(NULL, -1, &vmstate_dbdma, s);
+ qemu_register_reset(dbdma_reset, s);
+
+ s->bh = qemu_bh_new(DBDMA_run_bh, s);
+
+ return s;
+}
diff --git a/qemu/hw/misc/macio/macio.c b/qemu/hw/misc/macio/macio.c
new file mode 100644
index 000000000..e3c0242d4
--- /dev/null
+++ b/qemu/hw/misc/macio/macio.c
@@ -0,0 +1,446 @@
+/*
+ * PowerMac MacIO device emulation
+ *
+ * Copyright (c) 2005-2007 Fabrice Bellard
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "hw/hw.h"
+#include "hw/ppc/mac.h"
+#include "hw/pci/pci.h"
+#include "hw/ppc/mac_dbdma.h"
+#include "hw/char/escc.h"
+
+#define TYPE_MACIO "macio"
+#define MACIO(obj) OBJECT_CHECK(MacIOState, (obj), TYPE_MACIO)
+
+typedef struct MacIOState
+{
+ /*< private >*/
+ PCIDevice parent;
+ /*< public >*/
+
+ MemoryRegion bar;
+ CUDAState cuda;
+ void *dbdma;
+ MemoryRegion *pic_mem;
+ MemoryRegion *escc_mem;
+ uint64_t frequency;
+} MacIOState;
+
+#define OLDWORLD_MACIO(obj) \
+ OBJECT_CHECK(OldWorldMacIOState, (obj), TYPE_OLDWORLD_MACIO)
+
+typedef struct OldWorldMacIOState {
+ /*< private >*/
+ MacIOState parent_obj;
+ /*< public >*/
+
+ qemu_irq irqs[5];
+
+ MacIONVRAMState nvram;
+ MACIOIDEState ide[2];
+} OldWorldMacIOState;
+
+#define NEWWORLD_MACIO(obj) \
+ OBJECT_CHECK(NewWorldMacIOState, (obj), TYPE_NEWWORLD_MACIO)
+
+typedef struct NewWorldMacIOState {
+ /*< private >*/
+ MacIOState parent_obj;
+ /*< public >*/
+ qemu_irq irqs[5];
+ MACIOIDEState ide[2];
+} NewWorldMacIOState;
+
+/*
+ * The mac-io has two interfaces to the ESCC. One is called "escc-legacy",
+ * while the other one is the normal, current ESCC interface.
+ *
+ * The magic below creates memory aliases to spawn the escc-legacy device
+ * purely by rerouting the respective registers to our escc region. This
+ * works because the only difference between the two memory regions is the
+ * register layout, not their semantics.
+ *
+ * Reference: ftp://ftp.software.ibm.com/rs6000/technology/spec/chrp/inwork/CHRP_IORef_1.0.pdf
+ */
+static void macio_escc_legacy_setup(MacIOState *macio_state)
+{
+ MemoryRegion *escc_legacy = g_new(MemoryRegion, 1);
+ MemoryRegion *bar = &macio_state->bar;
+ int i;
+ static const int maps[] = {
+ 0x00, 0x00,
+ 0x02, 0x20,
+ 0x04, 0x10,
+ 0x06, 0x30,
+ 0x08, 0x40,
+ 0x0A, 0x50,
+ 0x60, 0x60,
+ 0x70, 0x70,
+ 0x80, 0x70,
+ 0x90, 0x80,
+ 0xA0, 0x90,
+ 0xB0, 0xA0,
+ 0xC0, 0xB0,
+ 0xD0, 0xC0,
+ 0xE0, 0xD0,
+ 0xF0, 0xE0,
+ };
+
+ memory_region_init(escc_legacy, NULL, "escc-legacy", 256);
+ for (i = 0; i < ARRAY_SIZE(maps); i += 2) {
+ MemoryRegion *port = g_new(MemoryRegion, 1);
+ memory_region_init_alias(port, NULL, "escc-legacy-port",
+ macio_state->escc_mem, maps[i+1], 0x2);
+ memory_region_add_subregion(escc_legacy, maps[i], port);
+ }
+
+ memory_region_add_subregion(bar, 0x12000, escc_legacy);
+}
+
+static void macio_bar_setup(MacIOState *macio_state)
+{
+ MemoryRegion *bar = &macio_state->bar;
+
+ if (macio_state->escc_mem) {
+ memory_region_add_subregion(bar, 0x13000, macio_state->escc_mem);
+ macio_escc_legacy_setup(macio_state);
+ }
+}
+
+static void macio_common_realize(PCIDevice *d, Error **errp)
+{
+ MacIOState *s = MACIO(d);
+ SysBusDevice *sysbus_dev;
+ Error *err = NULL;
+
+ object_property_set_bool(OBJECT(&s->cuda), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ sysbus_dev = SYS_BUS_DEVICE(&s->cuda);
+ memory_region_add_subregion(&s->bar, 0x16000,
+ sysbus_mmio_get_region(sysbus_dev, 0));
+
+ macio_bar_setup(s);
+ pci_register_bar(d, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->bar);
+}
+
+static void macio_realize_ide(MacIOState *s, MACIOIDEState *ide,
+ qemu_irq irq0, qemu_irq irq1, int dmaid,
+ Error **errp)
+{
+ SysBusDevice *sysbus_dev;
+
+ sysbus_dev = SYS_BUS_DEVICE(ide);
+ sysbus_connect_irq(sysbus_dev, 0, irq0);
+ sysbus_connect_irq(sysbus_dev, 1, irq1);
+ macio_ide_register_dma(ide, s->dbdma, dmaid);
+ object_property_set_bool(OBJECT(ide), true, "realized", errp);
+}
+
+static void macio_oldworld_realize(PCIDevice *d, Error **errp)
+{
+ MacIOState *s = MACIO(d);
+ OldWorldMacIOState *os = OLDWORLD_MACIO(d);
+ Error *err = NULL;
+ SysBusDevice *sysbus_dev;
+ int i;
+ int cur_irq = 0;
+
+ macio_common_realize(d, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+
+ sysbus_dev = SYS_BUS_DEVICE(&s->cuda);
+ sysbus_connect_irq(sysbus_dev, 0, os->irqs[cur_irq++]);
+
+ object_property_set_bool(OBJECT(&os->nvram), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ sysbus_dev = SYS_BUS_DEVICE(&os->nvram);
+ memory_region_add_subregion(&s->bar, 0x60000,
+ sysbus_mmio_get_region(sysbus_dev, 0));
+ pmac_format_nvram_partition(&os->nvram, os->nvram.size);
+
+ if (s->pic_mem) {
+ /* Heathrow PIC */
+ memory_region_add_subregion(&s->bar, 0x00000, s->pic_mem);
+ }
+
+ /* IDE buses */
+ for (i = 0; i < ARRAY_SIZE(os->ide); i++) {
+ qemu_irq irq0 = os->irqs[cur_irq++];
+ qemu_irq irq1 = os->irqs[cur_irq++];
+
+ macio_realize_ide(s, &os->ide[i], irq0, irq1, 0x16 + (i * 4), &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ }
+}
+
+static void macio_init_ide(MacIOState *s, MACIOIDEState *ide, size_t ide_size,
+ int index)
+{
+ gchar *name;
+
+ object_initialize(ide, ide_size, TYPE_MACIO_IDE);
+ qdev_set_parent_bus(DEVICE(ide), sysbus_get_default());
+ memory_region_add_subregion(&s->bar, 0x1f000 + ((index + 1) * 0x1000),
+ &ide->mem);
+ name = g_strdup_printf("ide[%i]", index);
+ object_property_add_child(OBJECT(s), name, OBJECT(ide), NULL);
+ g_free(name);
+}
+
+static void macio_oldworld_init(Object *obj)
+{
+ MacIOState *s = MACIO(obj);
+ OldWorldMacIOState *os = OLDWORLD_MACIO(obj);
+ DeviceState *dev;
+ int i;
+
+ qdev_init_gpio_out(DEVICE(obj), os->irqs, ARRAY_SIZE(os->irqs));
+
+ object_initialize(&os->nvram, sizeof(os->nvram), TYPE_MACIO_NVRAM);
+ dev = DEVICE(&os->nvram);
+ qdev_prop_set_uint32(dev, "size", 0x2000);
+ qdev_prop_set_uint32(dev, "it_shift", 4);
+
+ for (i = 0; i < 2; i++) {
+ macio_init_ide(s, &os->ide[i], sizeof(os->ide[i]), i);
+ }
+}
+
+static void timer_write(void *opaque, hwaddr addr, uint64_t value,
+ unsigned size)
+{
+}
+
+static uint64_t timer_read(void *opaque, hwaddr addr, unsigned size)
+{
+ uint32_t value = 0;
+ uint64_t systime = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ uint64_t kltime;
+
+ kltime = muldiv64(systime, 4194300, get_ticks_per_sec() * 4);
+ kltime = muldiv64(kltime, 18432000, 1048575);
+
+ switch (addr) {
+ case 0x38:
+ value = kltime;
+ break;
+ case 0x3c:
+ value = kltime >> 32;
+ break;
+ }
+
+ return value;
+}
+
+static const MemoryRegionOps timer_ops = {
+ .read = timer_read,
+ .write = timer_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void macio_newworld_realize(PCIDevice *d, Error **errp)
+{
+ MacIOState *s = MACIO(d);
+ NewWorldMacIOState *ns = NEWWORLD_MACIO(d);
+ Error *err = NULL;
+ SysBusDevice *sysbus_dev;
+ MemoryRegion *timer_memory = NULL;
+ int i;
+ int cur_irq = 0;
+
+ macio_common_realize(d, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+
+ sysbus_dev = SYS_BUS_DEVICE(&s->cuda);
+ sysbus_connect_irq(sysbus_dev, 0, ns->irqs[cur_irq++]);
+
+ if (s->pic_mem) {
+ /* OpenPIC */
+ memory_region_add_subregion(&s->bar, 0x40000, s->pic_mem);
+ }
+
+ /* IDE buses */
+ for (i = 0; i < ARRAY_SIZE(ns->ide); i++) {
+ qemu_irq irq0 = ns->irqs[cur_irq++];
+ qemu_irq irq1 = ns->irqs[cur_irq++];
+
+ macio_realize_ide(s, &ns->ide[i], irq0, irq1, 0x16 + (i * 4), &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ }
+
+ /* Timer */
+ timer_memory = g_new(MemoryRegion, 1);
+ memory_region_init_io(timer_memory, OBJECT(s), &timer_ops, NULL, "timer",
+ 0x1000);
+ memory_region_add_subregion(&s->bar, 0x15000, timer_memory);
+}
+
+static void macio_newworld_init(Object *obj)
+{
+ MacIOState *s = MACIO(obj);
+ NewWorldMacIOState *ns = NEWWORLD_MACIO(obj);
+ int i;
+
+ qdev_init_gpio_out(DEVICE(obj), ns->irqs, ARRAY_SIZE(ns->irqs));
+
+ for (i = 0; i < 2; i++) {
+ macio_init_ide(s, &ns->ide[i], sizeof(ns->ide[i]), i);
+ }
+}
+
+static void macio_instance_init(Object *obj)
+{
+ MacIOState *s = MACIO(obj);
+ MemoryRegion *dbdma_mem;
+
+ memory_region_init(&s->bar, NULL, "macio", 0x80000);
+
+ object_initialize(&s->cuda, sizeof(s->cuda), TYPE_CUDA);
+ qdev_set_parent_bus(DEVICE(&s->cuda), sysbus_get_default());
+ object_property_add_child(obj, "cuda", OBJECT(&s->cuda), NULL);
+
+ s->dbdma = DBDMA_init(&dbdma_mem);
+ memory_region_add_subregion(&s->bar, 0x08000, dbdma_mem);
+}
+
+static const VMStateDescription vmstate_macio_oldworld = {
+ .name = "macio-oldworld",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_PCI_DEVICE(parent_obj.parent, OldWorldMacIOState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void macio_oldworld_class_init(ObjectClass *oc, void *data)
+{
+ PCIDeviceClass *pdc = PCI_DEVICE_CLASS(oc);
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ pdc->realize = macio_oldworld_realize;
+ pdc->device_id = PCI_DEVICE_ID_APPLE_343S1201;
+ dc->vmsd = &vmstate_macio_oldworld;
+}
+
+static const VMStateDescription vmstate_macio_newworld = {
+ .name = "macio-newworld",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_PCI_DEVICE(parent_obj.parent, NewWorldMacIOState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void macio_newworld_class_init(ObjectClass *oc, void *data)
+{
+ PCIDeviceClass *pdc = PCI_DEVICE_CLASS(oc);
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ pdc->realize = macio_newworld_realize;
+ pdc->device_id = PCI_DEVICE_ID_APPLE_UNI_N_KEYL;
+ dc->vmsd = &vmstate_macio_newworld;
+}
+
+static Property macio_properties[] = {
+ DEFINE_PROP_UINT64("frequency", MacIOState, frequency, 0),
+ DEFINE_PROP_END_OF_LIST()
+};
+
+static void macio_class_init(ObjectClass *klass, void *data)
+{
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ k->vendor_id = PCI_VENDOR_ID_APPLE;
+ k->class_id = PCI_CLASS_OTHERS << 8;
+ dc->props = macio_properties;
+}
+
+static const TypeInfo macio_oldworld_type_info = {
+ .name = TYPE_OLDWORLD_MACIO,
+ .parent = TYPE_MACIO,
+ .instance_size = sizeof(OldWorldMacIOState),
+ .instance_init = macio_oldworld_init,
+ .class_init = macio_oldworld_class_init,
+};
+
+static const TypeInfo macio_newworld_type_info = {
+ .name = TYPE_NEWWORLD_MACIO,
+ .parent = TYPE_MACIO,
+ .instance_size = sizeof(NewWorldMacIOState),
+ .instance_init = macio_newworld_init,
+ .class_init = macio_newworld_class_init,
+};
+
+static const TypeInfo macio_type_info = {
+ .name = TYPE_MACIO,
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(MacIOState),
+ .instance_init = macio_instance_init,
+ .abstract = true,
+ .class_init = macio_class_init,
+};
+
+static void macio_register_types(void)
+{
+ type_register_static(&macio_type_info);
+ type_register_static(&macio_oldworld_type_info);
+ type_register_static(&macio_newworld_type_info);
+}
+
+type_init(macio_register_types)
+
+void macio_init(PCIDevice *d,
+ MemoryRegion *pic_mem,
+ MemoryRegion *escc_mem)
+{
+ MacIOState *macio_state = MACIO(d);
+
+ macio_state->pic_mem = pic_mem;
+ macio_state->escc_mem = escc_mem;
+ /* Note: this code is strongly inspirated from the corresponding code
+ in PearPC */
+ qdev_prop_set_uint64(DEVICE(&macio_state->cuda), "frequency",
+ macio_state->frequency);
+
+ qdev_init_nofail(DEVICE(d));
+}