summaryrefslogtreecommitdiffstats
path: root/qemu/roms/u-boot/common/bouncebuf.c
diff options
context:
space:
mode:
authorYang Zhang <yang.z.zhang@intel.com>2015-08-28 09:58:54 +0800
committerYang Zhang <yang.z.zhang@intel.com>2015-09-01 12:44:00 +0800
commite44e3482bdb4d0ebde2d8b41830ac2cdb07948fb (patch)
tree66b09f592c55df2878107a468a91d21506104d3f /qemu/roms/u-boot/common/bouncebuf.c
parent9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 (diff)
Add qemu 2.4.0
Change-Id: Ic99cbad4b61f8b127b7dc74d04576c0bcbaaf4f5 Signed-off-by: Yang Zhang <yang.z.zhang@intel.com>
Diffstat (limited to 'qemu/roms/u-boot/common/bouncebuf.c')
-rw-r--r--qemu/roms/u-boot/common/bouncebuf.c83
1 files changed, 83 insertions, 0 deletions
diff --git a/qemu/roms/u-boot/common/bouncebuf.c b/qemu/roms/u-boot/common/bouncebuf.c
new file mode 100644
index 000000000..9eece6d75
--- /dev/null
+++ b/qemu/roms/u-boot/common/bouncebuf.c
@@ -0,0 +1,83 @@
+/*
+ * Generic bounce buffer implementation
+ *
+ * Copyright (C) 2012 Marek Vasut <marex@denx.de>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <errno.h>
+#include <bouncebuf.h>
+
+static int addr_aligned(struct bounce_buffer *state)
+{
+ const ulong align_mask = ARCH_DMA_MINALIGN - 1;
+
+ /* Check if start is aligned */
+ if ((ulong)state->user_buffer & align_mask) {
+ debug("Unaligned buffer address %p\n", state->user_buffer);
+ return 0;
+ }
+
+ /* Check if length is aligned */
+ if (state->len != state->len_aligned) {
+ debug("Unaligned buffer length %d\n", state->len);
+ return 0;
+ }
+
+ /* Aligned */
+ return 1;
+}
+
+int bounce_buffer_start(struct bounce_buffer *state, void *data,
+ size_t len, unsigned int flags)
+{
+ state->user_buffer = data;
+ state->bounce_buffer = data;
+ state->len = len;
+ state->len_aligned = roundup(len, ARCH_DMA_MINALIGN);
+ state->flags = flags;
+
+ if (!addr_aligned(state)) {
+ state->bounce_buffer = memalign(ARCH_DMA_MINALIGN,
+ state->len_aligned);
+ if (!state->bounce_buffer)
+ return -ENOMEM;
+
+ if (state->flags & GEN_BB_READ)
+ memcpy(state->bounce_buffer, state->user_buffer,
+ state->len);
+ }
+
+ /*
+ * Flush data to RAM so DMA reads can pick it up,
+ * and any CPU writebacks don't race with DMA writes
+ */
+ flush_dcache_range((unsigned long)state->bounce_buffer,
+ (unsigned long)(state->bounce_buffer) +
+ state->len_aligned);
+
+ return 0;
+}
+
+int bounce_buffer_stop(struct bounce_buffer *state)
+{
+ if (state->flags & GEN_BB_WRITE) {
+ /* Invalidate cache so that CPU can see any newly DMA'd data */
+ invalidate_dcache_range((unsigned long)state->bounce_buffer,
+ (unsigned long)(state->bounce_buffer) +
+ state->len_aligned);
+ }
+
+ if (state->bounce_buffer == state->user_buffer)
+ return 0;
+
+ if (state->flags & GEN_BB_WRITE)
+ memcpy(state->user_buffer, state->bounce_buffer, state->len);
+
+ free(state->bounce_buffer);
+
+ return 0;
+}