summaryrefslogtreecommitdiffstats
path: root/qemu/roms/SLOF/include/ppcp7
diff options
context:
space:
mode:
authorYang Zhang <yang.z.zhang@intel.com>2015-08-28 09:58:54 +0800
committerYang Zhang <yang.z.zhang@intel.com>2015-09-01 12:44:00 +0800
commite44e3482bdb4d0ebde2d8b41830ac2cdb07948fb (patch)
tree66b09f592c55df2878107a468a91d21506104d3f /qemu/roms/SLOF/include/ppcp7
parent9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 (diff)
Add qemu 2.4.0
Change-Id: Ic99cbad4b61f8b127b7dc74d04576c0bcbaaf4f5 Signed-off-by: Yang Zhang <yang.z.zhang@intel.com>
Diffstat (limited to 'qemu/roms/SLOF/include/ppcp7')
-rw-r--r--qemu/roms/SLOF/include/ppcp7/cache.h150
-rw-r--r--qemu/roms/SLOF/include/ppcp7/cpu.h66
2 files changed, 216 insertions, 0 deletions
diff --git a/qemu/roms/SLOF/include/ppcp7/cache.h b/qemu/roms/SLOF/include/ppcp7/cache.h
new file mode 100644
index 000000000..dc6837196
--- /dev/null
+++ b/qemu/roms/SLOF/include/ppcp7/cache.h
@@ -0,0 +1,150 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#ifndef __CACHE_H
+#define __CACHE_H
+
+#include <cpu.h>
+#include <stdint.h>
+
+// XXX FIXME: Use proper CI load/store */
+#define cache_inhibited_access(type,size) \
+ static inline type ci_read_##size(type * addr) \
+ { \
+ register uint64_t arg0 asm ("r3"); \
+ register uint64_t arg1 asm ("r4"); \
+ register uint64_t arg2 asm ("r5"); \
+ \
+ arg0 = 0x3c; /* H_LOGICAL_CI_LOAD*/ \
+ arg1 = size / 8; \
+ arg2 = (uint64_t)addr; \
+ \
+ asm volatile( \
+ ".long 0x44000022 \n" /* HVCALL */ \
+ : "=&r" (arg0), "=&r"(arg1), "=&r"(arg2) \
+ : "0"(arg0), "1"(arg1), "2"(arg2) \
+ : "r0", "r6", "r7", "r8", "r9", "r10", "r11", \
+ "r12", "memory", "cr0", "cr1", "cr5", \
+ "cr6", "cr7", "ctr", "xer"); \
+ return arg0 ? (type)-1 : arg1; \
+ } \
+ static inline void ci_write_##size(type * addr, type data) \
+ { \
+ register uint64_t arg0 asm ("r3"); \
+ register uint64_t arg1 asm ("r4"); \
+ register uint64_t arg2 asm ("r5"); \
+ register uint64_t arg3 asm ("r6"); \
+ \
+ arg0 = 0x40; /* H_LOGICAL_CI_STORE*/ \
+ arg1 = size / 8; \
+ arg2 = (uint64_t)addr; \
+ arg3 = (uint64_t)data; \
+ \
+ asm volatile( \
+ ".long 0x44000022 \n" /* HVCALL */ \
+ : "=&r"(arg0),"=&r"(arg1),"=&r"(arg2),"=&r"(arg3) \
+ : "0"(arg0),"1"(arg1),"2"(arg2),"3"(arg3) \
+ : "r0", "r7", "r8", "r9", "r10", "r11", \
+ "r12", "memory", "cr0", "cr1", "cr5", \
+ "cr6", "cr7", "ctr", "xer"); \
+ }
+
+cache_inhibited_access(uint8_t, 8)
+cache_inhibited_access(uint16_t, 16)
+cache_inhibited_access(uint32_t, 32)
+cache_inhibited_access(uint64_t, 64)
+
+#define _FWOVERLAP(s, d, size) ((d >= s) && ((type_u)d < ((type_u)s + size)))
+
+// 3.1
+#define _FWMOVE(s, d, size, t) \
+ { t *s1=(t *)s, *d1=(t *)d; \
+ while (size > 0) { *d1++ = *s1++; size -= sizeof(t); } }
+
+#define _BWMOVE(s, d, size, t) { \
+ t *s1=(t *)((char *)s+size), *d1=(t *)((char *)d+size); \
+ while (size > 0) { *--d1 = *--s1; size -= sizeof(t); } \
+}
+
+
+#define _MOVE(s, d, size, t) if _FWOVERLAP(s, d, size) _BWMOVE(s, d, size, t) else _FWMOVE(s, d, size, t)
+
+#define _FASTMOVE(s, d, size) \
+ switch (((type_u)s | (type_u)d | size) & (sizeof(type_u)-1)) { \
+ case 0: _MOVE(s, d, size, type_u); break; \
+ case sizeof(type_l): _MOVE(s, d, size, type_l); break; \
+ case sizeof(type_w): _MOVE(s, d, size, type_w); break; \
+ default: _MOVE(s, d, size, type_c); break; \
+ }
+
+static inline void ci_rmove(void *dst, void *src, unsigned long esize,
+ unsigned long count)
+{
+ register uint64_t arg0 asm ("r3");
+ register uint64_t arg1 asm ("r4");
+ register uint64_t arg2 asm ("r5");
+ register uint64_t arg3 asm ("r6");
+ register uint64_t arg4 asm ("r7");
+ register uint64_t arg5 asm ("r8");
+
+ arg0 = 0xf001; /* KVMPPC_H_LOGICAL_MEMOP */
+ arg1 = (uint64_t)dst;
+ arg2 = (uint64_t)src;
+ arg3 = esize;
+ arg4 = count;
+ arg5 = 0; /* 0 = copy */
+
+ asm volatile(".long 0x44000022 \n" /* HVCALL */
+ : "=&r"(arg0),"=&r"(arg1),"=&r"(arg2),
+ "=&r"(arg3),"=&r"(arg4),"=&r"(arg5)
+ : "0"(arg0),"1"(arg1),"2"(arg2),
+ "3"(arg3),"4"(arg4),"5"(arg5)
+ : "r0", "r9", "r10", "r11",
+ "r12", "memory", "cr0", "cr1", "cr5",
+ "cr6", "cr7", "ctr", "xer");
+}
+
+#define _FASTRMOVE(s, d, size) do { \
+ switch (((type_u)s | (type_u)d | size) & (sizeof(type_u)-1)) {\
+ case 0: ci_rmove(d,s,3,size>>3); break; \
+ case sizeof(type_l): ci_rmove(d,s,2,size>>2); break; \
+ case sizeof(type_w): ci_rmove(d,s,1,size>>1); break; \
+ default: ci_rmove(d,s,0,size); break; \
+ } \
+ } while(0)
+
+static inline uint16_t bswap16_load(uint64_t addr)
+{
+ unsigned int val;
+ asm volatile ("lhbrx %0, 0, %1":"=r" (val):"r"(addr));
+ return val;
+}
+
+static inline uint32_t bswap32_load(uint64_t addr)
+{
+ unsigned int val;
+ asm volatile ("lwbrx %0, 0, %1":"=r" (val):"r"(addr));
+ return val;
+}
+
+static inline void bswap16_store(uint64_t addr, uint16_t val)
+{
+ asm volatile ("sthbrx %0, 0, %1"::"r" (val), "r"(addr));
+}
+
+static inline void bswap32_store(uint64_t addr, uint32_t val)
+{
+ asm volatile ("stwbrx %0, 0, %1"::"r" (val), "r"(addr));
+}
+
+#endif /* __CACHE_H */
+
diff --git a/qemu/roms/SLOF/include/ppcp7/cpu.h b/qemu/roms/SLOF/include/ppcp7/cpu.h
new file mode 100644
index 000000000..1b1fadd82
--- /dev/null
+++ b/qemu/roms/SLOF/include/ppcp7/cpu.h
@@ -0,0 +1,66 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#ifndef __CPU_H
+#define __CPU_H
+
+/* Used in boot_abort.S, will need something better for KVM */
+#define HSPRG0 304
+
+/* XXX FIXME: Can be more efficient, no dcbst nor loop needed on P7 */
+/* This macro uses r0 */
+#define FLUSH_CACHE(r, n) add n, n, r; \
+ addi n, n, 127; \
+ rlwinm r, r, 0,0,24; \
+ rlwinm n, n, 0,0,24; \
+ sub n, n, r; \
+ srwi n, n, 7; \
+ mtctr n; \
+ 0: dcbst 0, r; \
+ sync; \
+ icbi 0, r; \
+ sync; \
+ isync; \
+ addi r, r, 128; \
+ bdnz 0b;
+
+#ifndef __ASSEMBLER__
+#define STRINGIFY(x...) #x
+#define EXPAND(x) STRINGIFY(x)
+
+static inline void flush_cache(void* r, long n)
+{
+ asm volatile(EXPAND(FLUSH_CACHE(%0, %1))
+ : "+r"(r), "+r"(n)
+ :: "memory", "cc", "r0", "ctr");
+}
+
+static inline void eieio(void)
+{
+ asm volatile ("eieio":::"memory");
+}
+
+static inline void barrier(void)
+{
+ asm volatile("" : : : "memory");
+}
+#define cpu_relax() barrier()
+
+static inline void sync(void)
+{
+ asm volatile ("sync" ::: "memory");
+}
+#define mb() sync()
+
+#endif /* __ASSEMBLER__ */
+
+#endif