summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/gpu/ipu-v3
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/drivers/gpu/ipu-v3')
-rw-r--r--kernel/drivers/gpu/ipu-v3/Kconfig8
-rw-r--r--kernel/drivers/gpu/ipu-v3/Makefile4
-rw-r--r--kernel/drivers/gpu/ipu-v3/ipu-common.c1350
-rw-r--r--kernel/drivers/gpu/ipu-v3/ipu-cpmem.c764
-rw-r--r--kernel/drivers/gpu/ipu-v3/ipu-csi.c741
-rw-r--r--kernel/drivers/gpu/ipu-v3/ipu-dc.c482
-rw-r--r--kernel/drivers/gpu/ipu-v3/ipu-di.c756
-rw-r--r--kernel/drivers/gpu/ipu-v3/ipu-dmfc.c436
-rw-r--r--kernel/drivers/gpu/ipu-v3/ipu-dp.c363
-rw-r--r--kernel/drivers/gpu/ipu-v3/ipu-ic.c778
-rw-r--r--kernel/drivers/gpu/ipu-v3/ipu-prv.h223
-rw-r--r--kernel/drivers/gpu/ipu-v3/ipu-smfc.c208
12 files changed, 6113 insertions, 0 deletions
diff --git a/kernel/drivers/gpu/ipu-v3/Kconfig b/kernel/drivers/gpu/ipu-v3/Kconfig
new file mode 100644
index 000000000..aefdff953
--- /dev/null
+++ b/kernel/drivers/gpu/ipu-v3/Kconfig
@@ -0,0 +1,8 @@
+config IMX_IPUV3_CORE
+ tristate "IPUv3 core support"
+ depends on SOC_IMX5 || SOC_IMX6Q || ARCH_MULTIPLATFORM
+ depends on RESET_CONTROLLER
+ select GENERIC_IRQ_CHIP
+ help
+ Choose this if you have a i.MX5/6 system and want to use the Image
+ Processing Unit. This option only enables IPU base support.
diff --git a/kernel/drivers/gpu/ipu-v3/Makefile b/kernel/drivers/gpu/ipu-v3/Makefile
new file mode 100644
index 000000000..107ec236a
--- /dev/null
+++ b/kernel/drivers/gpu/ipu-v3/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_IMX_IPUV3_CORE) += imx-ipu-v3.o
+
+imx-ipu-v3-objs := ipu-common.o ipu-cpmem.o ipu-csi.o ipu-dc.o ipu-di.o \
+ ipu-dp.o ipu-dmfc.o ipu-ic.o ipu-smfc.o
diff --git a/kernel/drivers/gpu/ipu-v3/ipu-common.c b/kernel/drivers/gpu/ipu-v3/ipu-common.c
new file mode 100644
index 000000000..67bab5c36
--- /dev/null
+++ b/kernel/drivers/gpu/ipu-v3/ipu-common.c
@@ -0,0 +1,1350 @@
+/*
+ * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
+ * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/reset.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/list.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_device.h>
+
+#include <drm/drm_fourcc.h>
+
+#include <video/imx-ipu-v3.h>
+#include "ipu-prv.h"
+
+static inline u32 ipu_cm_read(struct ipu_soc *ipu, unsigned offset)
+{
+ return readl(ipu->cm_reg + offset);
+}
+
+static inline void ipu_cm_write(struct ipu_soc *ipu, u32 value, unsigned offset)
+{
+ writel(value, ipu->cm_reg + offset);
+}
+
+void ipu_srm_dp_sync_update(struct ipu_soc *ipu)
+{
+ u32 val;
+
+ val = ipu_cm_read(ipu, IPU_SRM_PRI2);
+ val |= 0x8;
+ ipu_cm_write(ipu, val, IPU_SRM_PRI2);
+}
+EXPORT_SYMBOL_GPL(ipu_srm_dp_sync_update);
+
+enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc)
+{
+ switch (drm_fourcc) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ return IPUV3_COLORSPACE_RGB;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ return IPUV3_COLORSPACE_YUV;
+ default:
+ return IPUV3_COLORSPACE_UNKNOWN;
+ }
+}
+EXPORT_SYMBOL_GPL(ipu_drm_fourcc_to_colorspace);
+
+enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat)
+{
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_YUV422P:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ return IPUV3_COLORSPACE_YUV;
+ case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_BGR32:
+ case V4L2_PIX_FMT_RGB24:
+ case V4L2_PIX_FMT_BGR24:
+ case V4L2_PIX_FMT_RGB565:
+ return IPUV3_COLORSPACE_RGB;
+ default:
+ return IPUV3_COLORSPACE_UNKNOWN;
+ }
+}
+EXPORT_SYMBOL_GPL(ipu_pixelformat_to_colorspace);
+
+bool ipu_pixelformat_is_planar(u32 pixelformat)
+{
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_YUV422P:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(ipu_pixelformat_is_planar);
+
+enum ipu_color_space ipu_mbus_code_to_colorspace(u32 mbus_code)
+{
+ switch (mbus_code & 0xf000) {
+ case 0x1000:
+ return IPUV3_COLORSPACE_RGB;
+ case 0x2000:
+ return IPUV3_COLORSPACE_YUV;
+ default:
+ return IPUV3_COLORSPACE_UNKNOWN;
+ }
+}
+EXPORT_SYMBOL_GPL(ipu_mbus_code_to_colorspace);
+
+int ipu_stride_to_bytes(u32 pixel_stride, u32 pixelformat)
+{
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_YUV422P:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ /*
+ * for the planar YUV formats, the stride passed to
+ * cpmem must be the stride in bytes of the Y plane.
+ * And all the planar YUV formats have an 8-bit
+ * Y component.
+ */
+ return (8 * pixel_stride) >> 3;
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ return (16 * pixel_stride) >> 3;
+ case V4L2_PIX_FMT_BGR24:
+ case V4L2_PIX_FMT_RGB24:
+ return (24 * pixel_stride) >> 3;
+ case V4L2_PIX_FMT_BGR32:
+ case V4L2_PIX_FMT_RGB32:
+ return (32 * pixel_stride) >> 3;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(ipu_stride_to_bytes);
+
+int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees,
+ bool hflip, bool vflip)
+{
+ u32 r90, vf, hf;
+
+ switch (degrees) {
+ case 0:
+ vf = hf = r90 = 0;
+ break;
+ case 90:
+ vf = hf = 0;
+ r90 = 1;
+ break;
+ case 180:
+ vf = hf = 1;
+ r90 = 0;
+ break;
+ case 270:
+ vf = hf = r90 = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ hf ^= (u32)hflip;
+ vf ^= (u32)vflip;
+
+ *mode = (enum ipu_rotate_mode)((r90 << 2) | (hf << 1) | vf);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_degrees_to_rot_mode);
+
+int ipu_rot_mode_to_degrees(int *degrees, enum ipu_rotate_mode mode,
+ bool hflip, bool vflip)
+{
+ u32 r90, vf, hf;
+
+ r90 = ((u32)mode >> 2) & 0x1;
+ hf = ((u32)mode >> 1) & 0x1;
+ vf = ((u32)mode >> 0) & 0x1;
+ hf ^= (u32)hflip;
+ vf ^= (u32)vflip;
+
+ switch ((enum ipu_rotate_mode)((r90 << 2) | (hf << 1) | vf)) {
+ case IPU_ROTATE_NONE:
+ *degrees = 0;
+ break;
+ case IPU_ROTATE_90_RIGHT:
+ *degrees = 90;
+ break;
+ case IPU_ROTATE_180:
+ *degrees = 180;
+ break;
+ case IPU_ROTATE_90_LEFT:
+ *degrees = 270;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_rot_mode_to_degrees);
+
+struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned num)
+{
+ struct ipuv3_channel *channel;
+
+ dev_dbg(ipu->dev, "%s %d\n", __func__, num);
+
+ if (num > 63)
+ return ERR_PTR(-ENODEV);
+
+ mutex_lock(&ipu->channel_lock);
+
+ channel = &ipu->channel[num];
+
+ if (channel->busy) {
+ channel = ERR_PTR(-EBUSY);
+ goto out;
+ }
+
+ channel->busy = true;
+ channel->num = num;
+
+out:
+ mutex_unlock(&ipu->channel_lock);
+
+ return channel;
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_get);
+
+void ipu_idmac_put(struct ipuv3_channel *channel)
+{
+ struct ipu_soc *ipu = channel->ipu;
+
+ dev_dbg(ipu->dev, "%s %d\n", __func__, channel->num);
+
+ mutex_lock(&ipu->channel_lock);
+
+ channel->busy = false;
+
+ mutex_unlock(&ipu->channel_lock);
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_put);
+
+#define idma_mask(ch) (1 << ((ch) & 0x1f))
+
+/*
+ * This is an undocumented feature, a write one to a channel bit in
+ * IPU_CHA_CUR_BUF and IPU_CHA_TRIPLE_CUR_BUF will reset the channel's
+ * internal current buffer pointer so that transfers start from buffer
+ * 0 on the next channel enable (that's the theory anyway, the imx6 TRM
+ * only says these are read-only registers). This operation is required
+ * for channel linking to work correctly, for instance video capture
+ * pipelines that carry out image rotations will fail after the first
+ * streaming unless this function is called for each channel before
+ * re-enabling the channels.
+ */
+static void __ipu_idmac_reset_current_buffer(struct ipuv3_channel *channel)
+{
+ struct ipu_soc *ipu = channel->ipu;
+ unsigned int chno = channel->num;
+
+ ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_CUR_BUF(chno));
+}
+
+void ipu_idmac_set_double_buffer(struct ipuv3_channel *channel,
+ bool doublebuffer)
+{
+ struct ipu_soc *ipu = channel->ipu;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&ipu->lock, flags);
+
+ reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
+ if (doublebuffer)
+ reg |= idma_mask(channel->num);
+ else
+ reg &= ~idma_mask(channel->num);
+ ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(channel->num));
+
+ __ipu_idmac_reset_current_buffer(channel);
+
+ spin_unlock_irqrestore(&ipu->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_set_double_buffer);
+
+static const struct {
+ int chnum;
+ u32 reg;
+ int shift;
+} idmac_lock_en_info[] = {
+ { .chnum = 5, .reg = IDMAC_CH_LOCK_EN_1, .shift = 0, },
+ { .chnum = 11, .reg = IDMAC_CH_LOCK_EN_1, .shift = 2, },
+ { .chnum = 12, .reg = IDMAC_CH_LOCK_EN_1, .shift = 4, },
+ { .chnum = 14, .reg = IDMAC_CH_LOCK_EN_1, .shift = 6, },
+ { .chnum = 15, .reg = IDMAC_CH_LOCK_EN_1, .shift = 8, },
+ { .chnum = 20, .reg = IDMAC_CH_LOCK_EN_1, .shift = 10, },
+ { .chnum = 21, .reg = IDMAC_CH_LOCK_EN_1, .shift = 12, },
+ { .chnum = 22, .reg = IDMAC_CH_LOCK_EN_1, .shift = 14, },
+ { .chnum = 23, .reg = IDMAC_CH_LOCK_EN_1, .shift = 16, },
+ { .chnum = 27, .reg = IDMAC_CH_LOCK_EN_1, .shift = 18, },
+ { .chnum = 28, .reg = IDMAC_CH_LOCK_EN_1, .shift = 20, },
+ { .chnum = 45, .reg = IDMAC_CH_LOCK_EN_2, .shift = 0, },
+ { .chnum = 46, .reg = IDMAC_CH_LOCK_EN_2, .shift = 2, },
+ { .chnum = 47, .reg = IDMAC_CH_LOCK_EN_2, .shift = 4, },
+ { .chnum = 48, .reg = IDMAC_CH_LOCK_EN_2, .shift = 6, },
+ { .chnum = 49, .reg = IDMAC_CH_LOCK_EN_2, .shift = 8, },
+ { .chnum = 50, .reg = IDMAC_CH_LOCK_EN_2, .shift = 10, },
+};
+
+int ipu_idmac_lock_enable(struct ipuv3_channel *channel, int num_bursts)
+{
+ struct ipu_soc *ipu = channel->ipu;
+ unsigned long flags;
+ u32 bursts, regval;
+ int i;
+
+ switch (num_bursts) {
+ case 0:
+ case 1:
+ bursts = 0x00; /* locking disabled */
+ break;
+ case 2:
+ bursts = 0x01;
+ break;
+ case 4:
+ bursts = 0x02;
+ break;
+ case 8:
+ bursts = 0x03;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) {
+ if (channel->num == idmac_lock_en_info[i].chnum)
+ break;
+ }
+ if (i >= ARRAY_SIZE(idmac_lock_en_info))
+ return -EINVAL;
+
+ spin_lock_irqsave(&ipu->lock, flags);
+
+ regval = ipu_idmac_read(ipu, idmac_lock_en_info[i].reg);
+ regval &= ~(0x03 << idmac_lock_en_info[i].shift);
+ regval |= (bursts << idmac_lock_en_info[i].shift);
+ ipu_idmac_write(ipu, regval, idmac_lock_en_info[i].reg);
+
+ spin_unlock_irqrestore(&ipu->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_lock_enable);
+
+int ipu_module_enable(struct ipu_soc *ipu, u32 mask)
+{
+ unsigned long lock_flags;
+ u32 val;
+
+ spin_lock_irqsave(&ipu->lock, lock_flags);
+
+ val = ipu_cm_read(ipu, IPU_DISP_GEN);
+
+ if (mask & IPU_CONF_DI0_EN)
+ val |= IPU_DI0_COUNTER_RELEASE;
+ if (mask & IPU_CONF_DI1_EN)
+ val |= IPU_DI1_COUNTER_RELEASE;
+
+ ipu_cm_write(ipu, val, IPU_DISP_GEN);
+
+ val = ipu_cm_read(ipu, IPU_CONF);
+ val |= mask;
+ ipu_cm_write(ipu, val, IPU_CONF);
+
+ spin_unlock_irqrestore(&ipu->lock, lock_flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_module_enable);
+
+int ipu_module_disable(struct ipu_soc *ipu, u32 mask)
+{
+ unsigned long lock_flags;
+ u32 val;
+
+ spin_lock_irqsave(&ipu->lock, lock_flags);
+
+ val = ipu_cm_read(ipu, IPU_CONF);
+ val &= ~mask;
+ ipu_cm_write(ipu, val, IPU_CONF);
+
+ val = ipu_cm_read(ipu, IPU_DISP_GEN);
+
+ if (mask & IPU_CONF_DI0_EN)
+ val &= ~IPU_DI0_COUNTER_RELEASE;
+ if (mask & IPU_CONF_DI1_EN)
+ val &= ~IPU_DI1_COUNTER_RELEASE;
+
+ ipu_cm_write(ipu, val, IPU_DISP_GEN);
+
+ spin_unlock_irqrestore(&ipu->lock, lock_flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_module_disable);
+
+int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel)
+{
+ struct ipu_soc *ipu = channel->ipu;
+ unsigned int chno = channel->num;
+
+ return (ipu_cm_read(ipu, IPU_CHA_CUR_BUF(chno)) & idma_mask(chno)) ? 1 : 0;
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_get_current_buffer);
+
+bool ipu_idmac_buffer_is_ready(struct ipuv3_channel *channel, u32 buf_num)
+{
+ struct ipu_soc *ipu = channel->ipu;
+ unsigned long flags;
+ u32 reg = 0;
+
+ spin_lock_irqsave(&ipu->lock, flags);
+ switch (buf_num) {
+ case 0:
+ reg = ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(channel->num));
+ break;
+ case 1:
+ reg = ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(channel->num));
+ break;
+ case 2:
+ reg = ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(channel->num));
+ break;
+ }
+ spin_unlock_irqrestore(&ipu->lock, flags);
+
+ return ((reg & idma_mask(channel->num)) != 0);
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_buffer_is_ready);
+
+void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num)
+{
+ struct ipu_soc *ipu = channel->ipu;
+ unsigned int chno = channel->num;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ipu->lock, flags);
+
+ /* Mark buffer as ready. */
+ if (buf_num == 0)
+ ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF0_RDY(chno));
+ else
+ ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF1_RDY(chno));
+
+ spin_unlock_irqrestore(&ipu->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_select_buffer);
+
+void ipu_idmac_clear_buffer(struct ipuv3_channel *channel, u32 buf_num)
+{
+ struct ipu_soc *ipu = channel->ipu;
+ unsigned int chno = channel->num;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ipu->lock, flags);
+
+ ipu_cm_write(ipu, 0xF0300000, IPU_GPR); /* write one to clear */
+ switch (buf_num) {
+ case 0:
+ ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF0_RDY(chno));
+ break;
+ case 1:
+ ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF1_RDY(chno));
+ break;
+ case 2:
+ ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF2_RDY(chno));
+ break;
+ default:
+ break;
+ }
+ ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
+
+ spin_unlock_irqrestore(&ipu->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_clear_buffer);
+
+int ipu_idmac_enable_channel(struct ipuv3_channel *channel)
+{
+ struct ipu_soc *ipu = channel->ipu;
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ipu->lock, flags);
+
+ val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
+ val |= idma_mask(channel->num);
+ ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
+
+ spin_unlock_irqrestore(&ipu->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_enable_channel);
+
+bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno)
+{
+ return (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(chno)) & idma_mask(chno));
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_channel_busy);
+
+int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms)
+{
+ struct ipu_soc *ipu = channel->ipu;
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(ms);
+ while (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(channel->num)) &
+ idma_mask(channel->num)) {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ cpu_relax();
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_wait_busy);
+
+int ipu_wait_interrupt(struct ipu_soc *ipu, int irq, int ms)
+{
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(ms);
+ ipu_cm_write(ipu, BIT(irq % 32), IPU_INT_STAT(irq / 32));
+ while (!(ipu_cm_read(ipu, IPU_INT_STAT(irq / 32) & BIT(irq % 32)))) {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ cpu_relax();
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_wait_interrupt);
+
+int ipu_idmac_disable_channel(struct ipuv3_channel *channel)
+{
+ struct ipu_soc *ipu = channel->ipu;
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ipu->lock, flags);
+
+ /* Disable DMA channel(s) */
+ val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
+ val &= ~idma_mask(channel->num);
+ ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
+
+ __ipu_idmac_reset_current_buffer(channel);
+
+ /* Set channel buffers NOT to be ready */
+ ipu_cm_write(ipu, 0xf0000000, IPU_GPR); /* write one to clear */
+
+ if (ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(channel->num)) &
+ idma_mask(channel->num)) {
+ ipu_cm_write(ipu, idma_mask(channel->num),
+ IPU_CHA_BUF0_RDY(channel->num));
+ }
+
+ if (ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(channel->num)) &
+ idma_mask(channel->num)) {
+ ipu_cm_write(ipu, idma_mask(channel->num),
+ IPU_CHA_BUF1_RDY(channel->num));
+ }
+
+ ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
+
+ /* Reset the double buffer */
+ val = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
+ val &= ~idma_mask(channel->num);
+ ipu_cm_write(ipu, val, IPU_CHA_DB_MODE_SEL(channel->num));
+
+ spin_unlock_irqrestore(&ipu->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_disable_channel);
+
+/*
+ * The imx6 rev. D TRM says that enabling the WM feature will increase
+ * a channel's priority. Refer to Table 36-8 Calculated priority value.
+ * The sub-module that is the sink or source for the channel must enable
+ * watermark signal for this to take effect (SMFC_WM for instance).
+ */
+void ipu_idmac_enable_watermark(struct ipuv3_channel *channel, bool enable)
+{
+ struct ipu_soc *ipu = channel->ipu;
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&ipu->lock, flags);
+
+ val = ipu_idmac_read(ipu, IDMAC_WM_EN(channel->num));
+ if (enable)
+ val |= 1 << (channel->num % 32);
+ else
+ val &= ~(1 << (channel->num % 32));
+ ipu_idmac_write(ipu, val, IDMAC_WM_EN(channel->num));
+
+ spin_unlock_irqrestore(&ipu->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_enable_watermark);
+
+static int ipu_memory_reset(struct ipu_soc *ipu)
+{
+ unsigned long timeout;
+
+ ipu_cm_write(ipu, 0x807FFFFF, IPU_MEM_RST);
+
+ timeout = jiffies + msecs_to_jiffies(1000);
+ while (ipu_cm_read(ipu, IPU_MEM_RST) & 0x80000000) {
+ if (time_after(jiffies, timeout))
+ return -ETIME;
+ cpu_relax();
+ }
+
+ return 0;
+}
+
+/*
+ * Set the source mux for the given CSI. Selects either parallel or
+ * MIPI CSI2 sources.
+ */
+void ipu_set_csi_src_mux(struct ipu_soc *ipu, int csi_id, bool mipi_csi2)
+{
+ unsigned long flags;
+ u32 val, mask;
+
+ mask = (csi_id == 1) ? IPU_CONF_CSI1_DATA_SOURCE :
+ IPU_CONF_CSI0_DATA_SOURCE;
+
+ spin_lock_irqsave(&ipu->lock, flags);
+
+ val = ipu_cm_read(ipu, IPU_CONF);
+ if (mipi_csi2)
+ val |= mask;
+ else
+ val &= ~mask;
+ ipu_cm_write(ipu, val, IPU_CONF);
+
+ spin_unlock_irqrestore(&ipu->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_set_csi_src_mux);
+
+/*
+ * Set the source mux for the IC. Selects either CSI[01] or the VDI.
+ */
+void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&ipu->lock, flags);
+
+ val = ipu_cm_read(ipu, IPU_CONF);
+ if (vdi) {
+ val |= IPU_CONF_IC_INPUT;
+ } else {
+ val &= ~IPU_CONF_IC_INPUT;
+ if (csi_id == 1)
+ val |= IPU_CONF_CSI_SEL;
+ else
+ val &= ~IPU_CONF_CSI_SEL;
+ }
+ ipu_cm_write(ipu, val, IPU_CONF);
+
+ spin_unlock_irqrestore(&ipu->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_set_ic_src_mux);
+
+struct ipu_devtype {
+ const char *name;
+ unsigned long cm_ofs;
+ unsigned long cpmem_ofs;
+ unsigned long srm_ofs;
+ unsigned long tpm_ofs;
+ unsigned long csi0_ofs;
+ unsigned long csi1_ofs;
+ unsigned long ic_ofs;
+ unsigned long disp0_ofs;
+ unsigned long disp1_ofs;
+ unsigned long dc_tmpl_ofs;
+ unsigned long vdi_ofs;
+ enum ipuv3_type type;
+};
+
+static struct ipu_devtype ipu_type_imx51 = {
+ .name = "IPUv3EX",
+ .cm_ofs = 0x1e000000,
+ .cpmem_ofs = 0x1f000000,
+ .srm_ofs = 0x1f040000,
+ .tpm_ofs = 0x1f060000,
+ .csi0_ofs = 0x1f030000,
+ .csi1_ofs = 0x1f038000,
+ .ic_ofs = 0x1e020000,
+ .disp0_ofs = 0x1e040000,
+ .disp1_ofs = 0x1e048000,
+ .dc_tmpl_ofs = 0x1f080000,
+ .vdi_ofs = 0x1e068000,
+ .type = IPUV3EX,
+};
+
+static struct ipu_devtype ipu_type_imx53 = {
+ .name = "IPUv3M",
+ .cm_ofs = 0x06000000,
+ .cpmem_ofs = 0x07000000,
+ .srm_ofs = 0x07040000,
+ .tpm_ofs = 0x07060000,
+ .csi0_ofs = 0x07030000,
+ .csi1_ofs = 0x07038000,
+ .ic_ofs = 0x06020000,
+ .disp0_ofs = 0x06040000,
+ .disp1_ofs = 0x06048000,
+ .dc_tmpl_ofs = 0x07080000,
+ .vdi_ofs = 0x06068000,
+ .type = IPUV3M,
+};
+
+static struct ipu_devtype ipu_type_imx6q = {
+ .name = "IPUv3H",
+ .cm_ofs = 0x00200000,
+ .cpmem_ofs = 0x00300000,
+ .srm_ofs = 0x00340000,
+ .tpm_ofs = 0x00360000,
+ .csi0_ofs = 0x00230000,
+ .csi1_ofs = 0x00238000,
+ .ic_ofs = 0x00220000,
+ .disp0_ofs = 0x00240000,
+ .disp1_ofs = 0x00248000,
+ .dc_tmpl_ofs = 0x00380000,
+ .vdi_ofs = 0x00268000,
+ .type = IPUV3H,
+};
+
+static const struct of_device_id imx_ipu_dt_ids[] = {
+ { .compatible = "fsl,imx51-ipu", .data = &ipu_type_imx51, },
+ { .compatible = "fsl,imx53-ipu", .data = &ipu_type_imx53, },
+ { .compatible = "fsl,imx6q-ipu", .data = &ipu_type_imx6q, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_ipu_dt_ids);
+
+static int ipu_submodules_init(struct ipu_soc *ipu,
+ struct platform_device *pdev, unsigned long ipu_base,
+ struct clk *ipu_clk)
+{
+ char *unit;
+ int ret;
+ struct device *dev = &pdev->dev;
+ const struct ipu_devtype *devtype = ipu->devtype;
+
+ ret = ipu_cpmem_init(ipu, dev, ipu_base + devtype->cpmem_ofs);
+ if (ret) {
+ unit = "cpmem";
+ goto err_cpmem;
+ }
+
+ ret = ipu_csi_init(ipu, dev, 0, ipu_base + devtype->csi0_ofs,
+ IPU_CONF_CSI0_EN, ipu_clk);
+ if (ret) {
+ unit = "csi0";
+ goto err_csi_0;
+ }
+
+ ret = ipu_csi_init(ipu, dev, 1, ipu_base + devtype->csi1_ofs,
+ IPU_CONF_CSI1_EN, ipu_clk);
+ if (ret) {
+ unit = "csi1";
+ goto err_csi_1;
+ }
+
+ ret = ipu_ic_init(ipu, dev,
+ ipu_base + devtype->ic_ofs,
+ ipu_base + devtype->tpm_ofs);
+ if (ret) {
+ unit = "ic";
+ goto err_ic;
+ }
+
+ ret = ipu_di_init(ipu, dev, 0, ipu_base + devtype->disp0_ofs,
+ IPU_CONF_DI0_EN, ipu_clk);
+ if (ret) {
+ unit = "di0";
+ goto err_di_0;
+ }
+
+ ret = ipu_di_init(ipu, dev, 1, ipu_base + devtype->disp1_ofs,
+ IPU_CONF_DI1_EN, ipu_clk);
+ if (ret) {
+ unit = "di1";
+ goto err_di_1;
+ }
+
+ ret = ipu_dc_init(ipu, dev, ipu_base + devtype->cm_ofs +
+ IPU_CM_DC_REG_OFS, ipu_base + devtype->dc_tmpl_ofs);
+ if (ret) {
+ unit = "dc_template";
+ goto err_dc;
+ }
+
+ ret = ipu_dmfc_init(ipu, dev, ipu_base +
+ devtype->cm_ofs + IPU_CM_DMFC_REG_OFS, ipu_clk);
+ if (ret) {
+ unit = "dmfc";
+ goto err_dmfc;
+ }
+
+ ret = ipu_dp_init(ipu, dev, ipu_base + devtype->srm_ofs);
+ if (ret) {
+ unit = "dp";
+ goto err_dp;
+ }
+
+ ret = ipu_smfc_init(ipu, dev, ipu_base +
+ devtype->cm_ofs + IPU_CM_SMFC_REG_OFS);
+ if (ret) {
+ unit = "smfc";
+ goto err_smfc;
+ }
+
+ return 0;
+
+err_smfc:
+ ipu_dp_exit(ipu);
+err_dp:
+ ipu_dmfc_exit(ipu);
+err_dmfc:
+ ipu_dc_exit(ipu);
+err_dc:
+ ipu_di_exit(ipu, 1);
+err_di_1:
+ ipu_di_exit(ipu, 0);
+err_di_0:
+ ipu_ic_exit(ipu);
+err_ic:
+ ipu_csi_exit(ipu, 1);
+err_csi_1:
+ ipu_csi_exit(ipu, 0);
+err_csi_0:
+ ipu_cpmem_exit(ipu);
+err_cpmem:
+ dev_err(&pdev->dev, "init %s failed with %d\n", unit, ret);
+ return ret;
+}
+
+static void ipu_irq_handle(struct ipu_soc *ipu, const int *regs, int num_regs)
+{
+ unsigned long status;
+ int i, bit, irq;
+
+ for (i = 0; i < num_regs; i++) {
+
+ status = ipu_cm_read(ipu, IPU_INT_STAT(regs[i]));
+ status &= ipu_cm_read(ipu, IPU_INT_CTRL(regs[i]));
+
+ for_each_set_bit(bit, &status, 32) {
+ irq = irq_linear_revmap(ipu->domain,
+ regs[i] * 32 + bit);
+ if (irq)
+ generic_handle_irq(irq);
+ }
+ }
+}
+
+static void ipu_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
+ const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
+ struct irq_chip *chip = irq_get_chip(irq);
+
+ chained_irq_enter(chip, desc);
+
+ ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
+
+ chained_irq_exit(chip, desc);
+}
+
+static void ipu_err_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
+ const int int_reg[] = { 4, 5, 8, 9};
+ struct irq_chip *chip = irq_get_chip(irq);
+
+ chained_irq_enter(chip, desc);
+
+ ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
+
+ chained_irq_exit(chip, desc);
+}
+
+int ipu_map_irq(struct ipu_soc *ipu, int irq)
+{
+ int virq;
+
+ virq = irq_linear_revmap(ipu->domain, irq);
+ if (!virq)
+ virq = irq_create_mapping(ipu->domain, irq);
+
+ return virq;
+}
+EXPORT_SYMBOL_GPL(ipu_map_irq);
+
+int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
+ enum ipu_channel_irq irq_type)
+{
+ return ipu_map_irq(ipu, irq_type + channel->num);
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_channel_irq);
+
+static void ipu_submodules_exit(struct ipu_soc *ipu)
+{
+ ipu_smfc_exit(ipu);
+ ipu_dp_exit(ipu);
+ ipu_dmfc_exit(ipu);
+ ipu_dc_exit(ipu);
+ ipu_di_exit(ipu, 1);
+ ipu_di_exit(ipu, 0);
+ ipu_ic_exit(ipu);
+ ipu_csi_exit(ipu, 1);
+ ipu_csi_exit(ipu, 0);
+ ipu_cpmem_exit(ipu);
+}
+
+static int platform_remove_devices_fn(struct device *dev, void *unused)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ platform_device_unregister(pdev);
+
+ return 0;
+}
+
+static void platform_device_unregister_children(struct platform_device *pdev)
+{
+ device_for_each_child(&pdev->dev, NULL, platform_remove_devices_fn);
+}
+
+struct ipu_platform_reg {
+ struct ipu_client_platformdata pdata;
+ const char *name;
+ int reg_offset;
+};
+
+static const struct ipu_platform_reg client_reg[] = {
+ {
+ .pdata = {
+ .di = 0,
+ .dc = 5,
+ .dp = IPU_DP_FLOW_SYNC_BG,
+ .dma[0] = IPUV3_CHANNEL_MEM_BG_SYNC,
+ .dma[1] = IPUV3_CHANNEL_MEM_FG_SYNC,
+ },
+ .name = "imx-ipuv3-crtc",
+ }, {
+ .pdata = {
+ .di = 1,
+ .dc = 1,
+ .dp = -EINVAL,
+ .dma[0] = IPUV3_CHANNEL_MEM_DC_SYNC,
+ .dma[1] = -EINVAL,
+ },
+ .name = "imx-ipuv3-crtc",
+ }, {
+ .pdata = {
+ .csi = 0,
+ .dma[0] = IPUV3_CHANNEL_CSI0,
+ .dma[1] = -EINVAL,
+ },
+ .reg_offset = IPU_CM_CSI0_REG_OFS,
+ .name = "imx-ipuv3-camera",
+ }, {
+ .pdata = {
+ .csi = 1,
+ .dma[0] = IPUV3_CHANNEL_CSI1,
+ .dma[1] = -EINVAL,
+ },
+ .reg_offset = IPU_CM_CSI1_REG_OFS,
+ .name = "imx-ipuv3-camera",
+ },
+};
+
+static DEFINE_MUTEX(ipu_client_id_mutex);
+static int ipu_client_id;
+
+static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
+{
+ struct device *dev = ipu->dev;
+ unsigned i;
+ int id, ret;
+
+ mutex_lock(&ipu_client_id_mutex);
+ id = ipu_client_id;
+ ipu_client_id += ARRAY_SIZE(client_reg);
+ mutex_unlock(&ipu_client_id_mutex);
+
+ for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
+ const struct ipu_platform_reg *reg = &client_reg[i];
+ struct platform_device *pdev;
+ struct resource res;
+
+ if (reg->reg_offset) {
+ memset(&res, 0, sizeof(res));
+ res.flags = IORESOURCE_MEM;
+ res.start = ipu_base + ipu->devtype->cm_ofs + reg->reg_offset;
+ res.end = res.start + PAGE_SIZE - 1;
+ pdev = platform_device_register_resndata(dev, reg->name,
+ id++, &res, 1, &reg->pdata, sizeof(reg->pdata));
+ } else {
+ pdev = platform_device_register_data(dev, reg->name,
+ id++, &reg->pdata, sizeof(reg->pdata));
+ }
+
+ if (IS_ERR(pdev)) {
+ ret = PTR_ERR(pdev);
+ goto err_register;
+ }
+ }
+
+ return 0;
+
+err_register:
+ platform_device_unregister_children(to_platform_device(dev));
+
+ return ret;
+}
+
+
+static int ipu_irq_init(struct ipu_soc *ipu)
+{
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+ unsigned long unused[IPU_NUM_IRQS / 32] = {
+ 0x400100d0, 0xffe000fd,
+ 0x400100d0, 0xffe000fd,
+ 0x400100d0, 0xffe000fd,
+ 0x4077ffff, 0xffe7e1fd,
+ 0x23fffffe, 0x8880fff0,
+ 0xf98fe7d0, 0xfff81fff,
+ 0x400100d0, 0xffe000fd,
+ 0x00000000,
+ };
+ int ret, i;
+
+ ipu->domain = irq_domain_add_linear(ipu->dev->of_node, IPU_NUM_IRQS,
+ &irq_generic_chip_ops, ipu);
+ if (!ipu->domain) {
+ dev_err(ipu->dev, "failed to add irq domain\n");
+ return -ENODEV;
+ }
+
+ ret = irq_alloc_domain_generic_chips(ipu->domain, 32, 1, "IPU",
+ handle_level_irq, 0,
+ IRQF_VALID, 0);
+ if (ret < 0) {
+ dev_err(ipu->dev, "failed to alloc generic irq chips\n");
+ irq_domain_remove(ipu->domain);
+ return ret;
+ }
+
+ for (i = 0; i < IPU_NUM_IRQS; i += 32) {
+ gc = irq_get_domain_generic_chip(ipu->domain, i);
+ gc->reg_base = ipu->cm_reg;
+ gc->unused = unused[i / 32];
+ ct = gc->chip_types;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
+ ct->chip.irq_mask = irq_gc_mask_clr_bit;
+ ct->chip.irq_unmask = irq_gc_mask_set_bit;
+ ct->regs.ack = IPU_INT_STAT(i / 32);
+ ct->regs.mask = IPU_INT_CTRL(i / 32);
+ }
+
+ irq_set_chained_handler(ipu->irq_sync, ipu_irq_handler);
+ irq_set_handler_data(ipu->irq_sync, ipu);
+ irq_set_chained_handler(ipu->irq_err, ipu_err_irq_handler);
+ irq_set_handler_data(ipu->irq_err, ipu);
+
+ return 0;
+}
+
+static void ipu_irq_exit(struct ipu_soc *ipu)
+{
+ int i, irq;
+
+ irq_set_chained_handler(ipu->irq_err, NULL);
+ irq_set_handler_data(ipu->irq_err, NULL);
+ irq_set_chained_handler(ipu->irq_sync, NULL);
+ irq_set_handler_data(ipu->irq_sync, NULL);
+
+ /* TODO: remove irq_domain_generic_chips */
+
+ for (i = 0; i < IPU_NUM_IRQS; i++) {
+ irq = irq_linear_revmap(ipu->domain, i);
+ if (irq)
+ irq_dispose_mapping(irq);
+ }
+
+ irq_domain_remove(ipu->domain);
+}
+
+void ipu_dump(struct ipu_soc *ipu)
+{
+ int i;
+
+ dev_dbg(ipu->dev, "IPU_CONF = \t0x%08X\n",
+ ipu_cm_read(ipu, IPU_CONF));
+ dev_dbg(ipu->dev, "IDMAC_CONF = \t0x%08X\n",
+ ipu_idmac_read(ipu, IDMAC_CONF));
+ dev_dbg(ipu->dev, "IDMAC_CHA_EN1 = \t0x%08X\n",
+ ipu_idmac_read(ipu, IDMAC_CHA_EN(0)));
+ dev_dbg(ipu->dev, "IDMAC_CHA_EN2 = \t0x%08X\n",
+ ipu_idmac_read(ipu, IDMAC_CHA_EN(32)));
+ dev_dbg(ipu->dev, "IDMAC_CHA_PRI1 = \t0x%08X\n",
+ ipu_idmac_read(ipu, IDMAC_CHA_PRI(0)));
+ dev_dbg(ipu->dev, "IDMAC_CHA_PRI2 = \t0x%08X\n",
+ ipu_idmac_read(ipu, IDMAC_CHA_PRI(32)));
+ dev_dbg(ipu->dev, "IDMAC_BAND_EN1 = \t0x%08X\n",
+ ipu_idmac_read(ipu, IDMAC_BAND_EN(0)));
+ dev_dbg(ipu->dev, "IDMAC_BAND_EN2 = \t0x%08X\n",
+ ipu_idmac_read(ipu, IDMAC_BAND_EN(32)));
+ dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL0 = \t0x%08X\n",
+ ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(0)));
+ dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL1 = \t0x%08X\n",
+ ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(32)));
+ dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW1 = \t0x%08X\n",
+ ipu_cm_read(ipu, IPU_FS_PROC_FLOW1));
+ dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW2 = \t0x%08X\n",
+ ipu_cm_read(ipu, IPU_FS_PROC_FLOW2));
+ dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW3 = \t0x%08X\n",
+ ipu_cm_read(ipu, IPU_FS_PROC_FLOW3));
+ dev_dbg(ipu->dev, "IPU_FS_DISP_FLOW1 = \t0x%08X\n",
+ ipu_cm_read(ipu, IPU_FS_DISP_FLOW1));
+ for (i = 0; i < 15; i++)
+ dev_dbg(ipu->dev, "IPU_INT_CTRL(%d) = \t%08X\n", i,
+ ipu_cm_read(ipu, IPU_INT_CTRL(i)));
+}
+EXPORT_SYMBOL_GPL(ipu_dump);
+
+static int ipu_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id =
+ of_match_device(imx_ipu_dt_ids, &pdev->dev);
+ struct ipu_soc *ipu;
+ struct resource *res;
+ unsigned long ipu_base;
+ int i, ret, irq_sync, irq_err;
+ const struct ipu_devtype *devtype;
+
+ devtype = of_id->data;
+
+ irq_sync = platform_get_irq(pdev, 0);
+ irq_err = platform_get_irq(pdev, 1);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ dev_dbg(&pdev->dev, "irq_sync: %d irq_err: %d\n",
+ irq_sync, irq_err);
+
+ if (!res || irq_sync < 0 || irq_err < 0)
+ return -ENODEV;
+
+ ipu_base = res->start;
+
+ ipu = devm_kzalloc(&pdev->dev, sizeof(*ipu), GFP_KERNEL);
+ if (!ipu)
+ return -ENODEV;
+
+ for (i = 0; i < 64; i++)
+ ipu->channel[i].ipu = ipu;
+ ipu->devtype = devtype;
+ ipu->ipu_type = devtype->type;
+
+ spin_lock_init(&ipu->lock);
+ mutex_init(&ipu->channel_lock);
+
+ dev_dbg(&pdev->dev, "cm_reg: 0x%08lx\n",
+ ipu_base + devtype->cm_ofs);
+ dev_dbg(&pdev->dev, "idmac: 0x%08lx\n",
+ ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS);
+ dev_dbg(&pdev->dev, "cpmem: 0x%08lx\n",
+ ipu_base + devtype->cpmem_ofs);
+ dev_dbg(&pdev->dev, "csi0: 0x%08lx\n",
+ ipu_base + devtype->csi0_ofs);
+ dev_dbg(&pdev->dev, "csi1: 0x%08lx\n",
+ ipu_base + devtype->csi1_ofs);
+ dev_dbg(&pdev->dev, "ic: 0x%08lx\n",
+ ipu_base + devtype->ic_ofs);
+ dev_dbg(&pdev->dev, "disp0: 0x%08lx\n",
+ ipu_base + devtype->disp0_ofs);
+ dev_dbg(&pdev->dev, "disp1: 0x%08lx\n",
+ ipu_base + devtype->disp1_ofs);
+ dev_dbg(&pdev->dev, "srm: 0x%08lx\n",
+ ipu_base + devtype->srm_ofs);
+ dev_dbg(&pdev->dev, "tpm: 0x%08lx\n",
+ ipu_base + devtype->tpm_ofs);
+ dev_dbg(&pdev->dev, "dc: 0x%08lx\n",
+ ipu_base + devtype->cm_ofs + IPU_CM_DC_REG_OFS);
+ dev_dbg(&pdev->dev, "ic: 0x%08lx\n",
+ ipu_base + devtype->cm_ofs + IPU_CM_IC_REG_OFS);
+ dev_dbg(&pdev->dev, "dmfc: 0x%08lx\n",
+ ipu_base + devtype->cm_ofs + IPU_CM_DMFC_REG_OFS);
+ dev_dbg(&pdev->dev, "vdi: 0x%08lx\n",
+ ipu_base + devtype->vdi_ofs);
+
+ ipu->cm_reg = devm_ioremap(&pdev->dev,
+ ipu_base + devtype->cm_ofs, PAGE_SIZE);
+ ipu->idmac_reg = devm_ioremap(&pdev->dev,
+ ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS,
+ PAGE_SIZE);
+
+ if (!ipu->cm_reg || !ipu->idmac_reg)
+ return -ENOMEM;
+
+ ipu->clk = devm_clk_get(&pdev->dev, "bus");
+ if (IS_ERR(ipu->clk)) {
+ ret = PTR_ERR(ipu->clk);
+ dev_err(&pdev->dev, "clk_get failed with %d", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, ipu);
+
+ ret = clk_prepare_enable(ipu->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
+ return ret;
+ }
+
+ ipu->dev = &pdev->dev;
+ ipu->irq_sync = irq_sync;
+ ipu->irq_err = irq_err;
+
+ ret = ipu_irq_init(ipu);
+ if (ret)
+ goto out_failed_irq;
+
+ ret = device_reset(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to reset: %d\n", ret);
+ goto out_failed_reset;
+ }
+ ret = ipu_memory_reset(ipu);
+ if (ret)
+ goto out_failed_reset;
+
+ /* Set MCU_T to divide MCU access window into 2 */
+ ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
+ IPU_DISP_GEN);
+
+ ret = ipu_submodules_init(ipu, pdev, ipu_base, ipu->clk);
+ if (ret)
+ goto failed_submodules_init;
+
+ ret = ipu_add_client_devices(ipu, ipu_base);
+ if (ret) {
+ dev_err(&pdev->dev, "adding client devices failed with %d\n",
+ ret);
+ goto failed_add_clients;
+ }
+
+ dev_info(&pdev->dev, "%s probed\n", devtype->name);
+
+ return 0;
+
+failed_add_clients:
+ ipu_submodules_exit(ipu);
+failed_submodules_init:
+out_failed_reset:
+ ipu_irq_exit(ipu);
+out_failed_irq:
+ clk_disable_unprepare(ipu->clk);
+ return ret;
+}
+
+static int ipu_remove(struct platform_device *pdev)
+{
+ struct ipu_soc *ipu = platform_get_drvdata(pdev);
+
+ platform_device_unregister_children(pdev);
+ ipu_submodules_exit(ipu);
+ ipu_irq_exit(ipu);
+
+ clk_disable_unprepare(ipu->clk);
+
+ return 0;
+}
+
+static struct platform_driver imx_ipu_driver = {
+ .driver = {
+ .name = "imx-ipuv3",
+ .of_match_table = imx_ipu_dt_ids,
+ },
+ .probe = ipu_probe,
+ .remove = ipu_remove,
+};
+
+module_platform_driver(imx_ipu_driver);
+
+MODULE_ALIAS("platform:imx-ipuv3");
+MODULE_DESCRIPTION("i.MX IPU v3 driver");
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/gpu/ipu-v3/ipu-cpmem.c b/kernel/drivers/gpu/ipu-v3/ipu-cpmem.c
new file mode 100644
index 000000000..3bf05bc4a
--- /dev/null
+++ b/kernel/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -0,0 +1,764 @@
+/*
+ * Copyright (C) 2012 Mentor Graphics Inc.
+ * Copyright 2005-2012 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+#include <linux/types.h>
+#include <linux/bitrev.h>
+#include <linux/io.h>
+#include <drm/drm_fourcc.h>
+#include "ipu-prv.h"
+
+struct ipu_cpmem_word {
+ u32 data[5];
+ u32 res[3];
+};
+
+struct ipu_ch_param {
+ struct ipu_cpmem_word word[2];
+};
+
+struct ipu_cpmem {
+ struct ipu_ch_param __iomem *base;
+ u32 module;
+ spinlock_t lock;
+ int use_count;
+ struct ipu_soc *ipu;
+};
+
+#define IPU_CPMEM_WORD(word, ofs, size) ((((word) * 160 + (ofs)) << 8) | (size))
+
+#define IPU_FIELD_UBO IPU_CPMEM_WORD(0, 46, 22)
+#define IPU_FIELD_VBO IPU_CPMEM_WORD(0, 68, 22)
+#define IPU_FIELD_IOX IPU_CPMEM_WORD(0, 90, 4)
+#define IPU_FIELD_RDRW IPU_CPMEM_WORD(0, 94, 1)
+#define IPU_FIELD_SO IPU_CPMEM_WORD(0, 113, 1)
+#define IPU_FIELD_SLY IPU_CPMEM_WORD(1, 102, 14)
+#define IPU_FIELD_SLUV IPU_CPMEM_WORD(1, 128, 14)
+
+#define IPU_FIELD_XV IPU_CPMEM_WORD(0, 0, 10)
+#define IPU_FIELD_YV IPU_CPMEM_WORD(0, 10, 9)
+#define IPU_FIELD_XB IPU_CPMEM_WORD(0, 19, 13)
+#define IPU_FIELD_YB IPU_CPMEM_WORD(0, 32, 12)
+#define IPU_FIELD_NSB_B IPU_CPMEM_WORD(0, 44, 1)
+#define IPU_FIELD_CF IPU_CPMEM_WORD(0, 45, 1)
+#define IPU_FIELD_SX IPU_CPMEM_WORD(0, 46, 12)
+#define IPU_FIELD_SY IPU_CPMEM_WORD(0, 58, 11)
+#define IPU_FIELD_NS IPU_CPMEM_WORD(0, 69, 10)
+#define IPU_FIELD_SDX IPU_CPMEM_WORD(0, 79, 7)
+#define IPU_FIELD_SM IPU_CPMEM_WORD(0, 86, 10)
+#define IPU_FIELD_SCC IPU_CPMEM_WORD(0, 96, 1)
+#define IPU_FIELD_SCE IPU_CPMEM_WORD(0, 97, 1)
+#define IPU_FIELD_SDY IPU_CPMEM_WORD(0, 98, 7)
+#define IPU_FIELD_SDRX IPU_CPMEM_WORD(0, 105, 1)
+#define IPU_FIELD_SDRY IPU_CPMEM_WORD(0, 106, 1)
+#define IPU_FIELD_BPP IPU_CPMEM_WORD(0, 107, 3)
+#define IPU_FIELD_DEC_SEL IPU_CPMEM_WORD(0, 110, 2)
+#define IPU_FIELD_DIM IPU_CPMEM_WORD(0, 112, 1)
+#define IPU_FIELD_BNDM IPU_CPMEM_WORD(0, 114, 3)
+#define IPU_FIELD_BM IPU_CPMEM_WORD(0, 117, 2)
+#define IPU_FIELD_ROT IPU_CPMEM_WORD(0, 119, 1)
+#define IPU_FIELD_ROT_HF_VF IPU_CPMEM_WORD(0, 119, 3)
+#define IPU_FIELD_HF IPU_CPMEM_WORD(0, 120, 1)
+#define IPU_FIELD_VF IPU_CPMEM_WORD(0, 121, 1)
+#define IPU_FIELD_THE IPU_CPMEM_WORD(0, 122, 1)
+#define IPU_FIELD_CAP IPU_CPMEM_WORD(0, 123, 1)
+#define IPU_FIELD_CAE IPU_CPMEM_WORD(0, 124, 1)
+#define IPU_FIELD_FW IPU_CPMEM_WORD(0, 125, 13)
+#define IPU_FIELD_FH IPU_CPMEM_WORD(0, 138, 12)
+#define IPU_FIELD_EBA0 IPU_CPMEM_WORD(1, 0, 29)
+#define IPU_FIELD_EBA1 IPU_CPMEM_WORD(1, 29, 29)
+#define IPU_FIELD_ILO IPU_CPMEM_WORD(1, 58, 20)
+#define IPU_FIELD_NPB IPU_CPMEM_WORD(1, 78, 7)
+#define IPU_FIELD_PFS IPU_CPMEM_WORD(1, 85, 4)
+#define IPU_FIELD_ALU IPU_CPMEM_WORD(1, 89, 1)
+#define IPU_FIELD_ALBM IPU_CPMEM_WORD(1, 90, 3)
+#define IPU_FIELD_ID IPU_CPMEM_WORD(1, 93, 2)
+#define IPU_FIELD_TH IPU_CPMEM_WORD(1, 95, 7)
+#define IPU_FIELD_SL IPU_CPMEM_WORD(1, 102, 14)
+#define IPU_FIELD_WID0 IPU_CPMEM_WORD(1, 116, 3)
+#define IPU_FIELD_WID1 IPU_CPMEM_WORD(1, 119, 3)
+#define IPU_FIELD_WID2 IPU_CPMEM_WORD(1, 122, 3)
+#define IPU_FIELD_WID3 IPU_CPMEM_WORD(1, 125, 3)
+#define IPU_FIELD_OFS0 IPU_CPMEM_WORD(1, 128, 5)
+#define IPU_FIELD_OFS1 IPU_CPMEM_WORD(1, 133, 5)
+#define IPU_FIELD_OFS2 IPU_CPMEM_WORD(1, 138, 5)
+#define IPU_FIELD_OFS3 IPU_CPMEM_WORD(1, 143, 5)
+#define IPU_FIELD_SXYS IPU_CPMEM_WORD(1, 148, 1)
+#define IPU_FIELD_CRE IPU_CPMEM_WORD(1, 149, 1)
+#define IPU_FIELD_DEC_SEL2 IPU_CPMEM_WORD(1, 150, 1)
+
+static inline struct ipu_ch_param __iomem *
+ipu_get_cpmem(struct ipuv3_channel *ch)
+{
+ struct ipu_cpmem *cpmem = ch->ipu->cpmem_priv;
+
+ return cpmem->base + ch->num;
+}
+
+static void ipu_ch_param_write_field(struct ipuv3_channel *ch, u32 wbs, u32 v)
+{
+ struct ipu_ch_param __iomem *base = ipu_get_cpmem(ch);
+ u32 bit = (wbs >> 8) % 160;
+ u32 size = wbs & 0xff;
+ u32 word = (wbs >> 8) / 160;
+ u32 i = bit / 32;
+ u32 ofs = bit % 32;
+ u32 mask = (1 << size) - 1;
+ u32 val;
+
+ pr_debug("%s %d %d %d\n", __func__, word, bit , size);
+
+ val = readl(&base->word[word].data[i]);
+ val &= ~(mask << ofs);
+ val |= v << ofs;
+ writel(val, &base->word[word].data[i]);
+
+ if ((bit + size - 1) / 32 > i) {
+ val = readl(&base->word[word].data[i + 1]);
+ val &= ~(mask >> (ofs ? (32 - ofs) : 0));
+ val |= v >> (ofs ? (32 - ofs) : 0);
+ writel(val, &base->word[word].data[i + 1]);
+ }
+}
+
+static u32 ipu_ch_param_read_field(struct ipuv3_channel *ch, u32 wbs)
+{
+ struct ipu_ch_param __iomem *base = ipu_get_cpmem(ch);
+ u32 bit = (wbs >> 8) % 160;
+ u32 size = wbs & 0xff;
+ u32 word = (wbs >> 8) / 160;
+ u32 i = bit / 32;
+ u32 ofs = bit % 32;
+ u32 mask = (1 << size) - 1;
+ u32 val = 0;
+
+ pr_debug("%s %d %d %d\n", __func__, word, bit , size);
+
+ val = (readl(&base->word[word].data[i]) >> ofs) & mask;
+
+ if ((bit + size - 1) / 32 > i) {
+ u32 tmp;
+
+ tmp = readl(&base->word[word].data[i + 1]);
+ tmp &= mask >> (ofs ? (32 - ofs) : 0);
+ val |= tmp << (ofs ? (32 - ofs) : 0);
+ }
+
+ return val;
+}
+
+/*
+ * The V4L2 spec defines packed RGB formats in memory byte order, which from
+ * point of view of the IPU corresponds to little-endian words with the first
+ * component in the least significant bits.
+ * The DRM pixel formats and IPU internal representation are ordered the other
+ * way around, with the first named component ordered at the most significant
+ * bits. Further, V4L2 formats are not well defined:
+ * http://linuxtv.org/downloads/v4l-dvb-apis/packed-rgb.html
+ * We choose the interpretation which matches GStreamer behavior.
+ */
+static int v4l2_pix_fmt_to_drm_fourcc(u32 pixelformat)
+{
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_RGB565:
+ /*
+ * Here we choose the 'corrected' interpretation of RGBP, a
+ * little-endian 16-bit word with the red component at the most
+ * significant bits:
+ * g[2:0]b[4:0] r[4:0]g[5:3] <=> [16:0] R:G:B
+ */
+ return DRM_FORMAT_RGB565;
+ case V4L2_PIX_FMT_BGR24:
+ /* B G R <=> [24:0] R:G:B */
+ return DRM_FORMAT_RGB888;
+ case V4L2_PIX_FMT_RGB24:
+ /* R G B <=> [24:0] B:G:R */
+ return DRM_FORMAT_BGR888;
+ case V4L2_PIX_FMT_BGR32:
+ /* B G R A <=> [32:0] A:B:G:R */
+ return DRM_FORMAT_XRGB8888;
+ case V4L2_PIX_FMT_RGB32:
+ /* R G B A <=> [32:0] A:B:G:R */
+ return DRM_FORMAT_XBGR8888;
+ case V4L2_PIX_FMT_UYVY:
+ return DRM_FORMAT_UYVY;
+ case V4L2_PIX_FMT_YUYV:
+ return DRM_FORMAT_YUYV;
+ case V4L2_PIX_FMT_YUV420:
+ return DRM_FORMAT_YUV420;
+ case V4L2_PIX_FMT_YUV422P:
+ return DRM_FORMAT_YUV422;
+ case V4L2_PIX_FMT_YVU420:
+ return DRM_FORMAT_YVU420;
+ case V4L2_PIX_FMT_NV12:
+ return DRM_FORMAT_NV12;
+ case V4L2_PIX_FMT_NV16:
+ return DRM_FORMAT_NV16;
+ }
+
+ return -EINVAL;
+}
+
+void ipu_cpmem_zero(struct ipuv3_channel *ch)
+{
+ struct ipu_ch_param __iomem *p = ipu_get_cpmem(ch);
+ void __iomem *base = p;
+ int i;
+
+ for (i = 0; i < sizeof(*p) / sizeof(u32); i++)
+ writel(0, base + i * sizeof(u32));
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_zero);
+
+void ipu_cpmem_set_resolution(struct ipuv3_channel *ch, int xres, int yres)
+{
+ ipu_ch_param_write_field(ch, IPU_FIELD_FW, xres - 1);
+ ipu_ch_param_write_field(ch, IPU_FIELD_FH, yres - 1);
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_resolution);
+
+void ipu_cpmem_set_stride(struct ipuv3_channel *ch, int stride)
+{
+ ipu_ch_param_write_field(ch, IPU_FIELD_SLY, stride - 1);
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_stride);
+
+void ipu_cpmem_set_high_priority(struct ipuv3_channel *ch)
+{
+ struct ipu_soc *ipu = ch->ipu;
+ u32 val;
+
+ if (ipu->ipu_type == IPUV3EX)
+ ipu_ch_param_write_field(ch, IPU_FIELD_ID, 1);
+
+ val = ipu_idmac_read(ipu, IDMAC_CHA_PRI(ch->num));
+ val |= 1 << (ch->num % 32);
+ ipu_idmac_write(ipu, val, IDMAC_CHA_PRI(ch->num));
+};
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_high_priority);
+
+void ipu_cpmem_set_buffer(struct ipuv3_channel *ch, int bufnum, dma_addr_t buf)
+{
+ if (bufnum)
+ ipu_ch_param_write_field(ch, IPU_FIELD_EBA1, buf >> 3);
+ else
+ ipu_ch_param_write_field(ch, IPU_FIELD_EBA0, buf >> 3);
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_buffer);
+
+void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride)
+{
+ ipu_ch_param_write_field(ch, IPU_FIELD_SO, 1);
+ ipu_ch_param_write_field(ch, IPU_FIELD_ILO, stride / 8);
+ ipu_ch_param_write_field(ch, IPU_FIELD_SLY, (stride * 2) - 1);
+};
+EXPORT_SYMBOL_GPL(ipu_cpmem_interlaced_scan);
+
+void ipu_cpmem_set_axi_id(struct ipuv3_channel *ch, u32 id)
+{
+ id &= 0x3;
+ ipu_ch_param_write_field(ch, IPU_FIELD_ID, id);
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_axi_id);
+
+void ipu_cpmem_set_burstsize(struct ipuv3_channel *ch, int burstsize)
+{
+ ipu_ch_param_write_field(ch, IPU_FIELD_NPB, burstsize - 1);
+};
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_burstsize);
+
+void ipu_cpmem_set_block_mode(struct ipuv3_channel *ch)
+{
+ ipu_ch_param_write_field(ch, IPU_FIELD_BM, 1);
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_block_mode);
+
+void ipu_cpmem_set_rotation(struct ipuv3_channel *ch,
+ enum ipu_rotate_mode rot)
+{
+ u32 temp_rot = bitrev8(rot) >> 5;
+
+ ipu_ch_param_write_field(ch, IPU_FIELD_ROT_HF_VF, temp_rot);
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_rotation);
+
+int ipu_cpmem_set_format_rgb(struct ipuv3_channel *ch,
+ const struct ipu_rgb *rgb)
+{
+ int bpp = 0, npb = 0, ro, go, bo, to;
+
+ ro = rgb->bits_per_pixel - rgb->red.length - rgb->red.offset;
+ go = rgb->bits_per_pixel - rgb->green.length - rgb->green.offset;
+ bo = rgb->bits_per_pixel - rgb->blue.length - rgb->blue.offset;
+ to = rgb->bits_per_pixel - rgb->transp.length - rgb->transp.offset;
+
+ ipu_ch_param_write_field(ch, IPU_FIELD_WID0, rgb->red.length - 1);
+ ipu_ch_param_write_field(ch, IPU_FIELD_OFS0, ro);
+ ipu_ch_param_write_field(ch, IPU_FIELD_WID1, rgb->green.length - 1);
+ ipu_ch_param_write_field(ch, IPU_FIELD_OFS1, go);
+ ipu_ch_param_write_field(ch, IPU_FIELD_WID2, rgb->blue.length - 1);
+ ipu_ch_param_write_field(ch, IPU_FIELD_OFS2, bo);
+
+ if (rgb->transp.length) {
+ ipu_ch_param_write_field(ch, IPU_FIELD_WID3,
+ rgb->transp.length - 1);
+ ipu_ch_param_write_field(ch, IPU_FIELD_OFS3, to);
+ } else {
+ ipu_ch_param_write_field(ch, IPU_FIELD_WID3, 7);
+ ipu_ch_param_write_field(ch, IPU_FIELD_OFS3,
+ rgb->bits_per_pixel);
+ }
+
+ switch (rgb->bits_per_pixel) {
+ case 32:
+ bpp = 0;
+ npb = 15;
+ break;
+ case 24:
+ bpp = 1;
+ npb = 19;
+ break;
+ case 16:
+ bpp = 3;
+ npb = 31;
+ break;
+ case 8:
+ bpp = 5;
+ npb = 63;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ipu_ch_param_write_field(ch, IPU_FIELD_BPP, bpp);
+ ipu_ch_param_write_field(ch, IPU_FIELD_NPB, npb);
+ ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 7); /* rgb mode */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_format_rgb);
+
+int ipu_cpmem_set_format_passthrough(struct ipuv3_channel *ch, int width)
+{
+ int bpp = 0, npb = 0;
+
+ switch (width) {
+ case 32:
+ bpp = 0;
+ npb = 15;
+ break;
+ case 24:
+ bpp = 1;
+ npb = 19;
+ break;
+ case 16:
+ bpp = 3;
+ npb = 31;
+ break;
+ case 8:
+ bpp = 5;
+ npb = 63;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ipu_ch_param_write_field(ch, IPU_FIELD_BPP, bpp);
+ ipu_ch_param_write_field(ch, IPU_FIELD_NPB, npb);
+ ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 6); /* raw mode */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_format_passthrough);
+
+void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format)
+{
+ switch (pixel_format) {
+ case V4L2_PIX_FMT_UYVY:
+ ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3); /* bits/pixel */
+ ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0xA);/* pix fmt */
+ ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);/* burst size */
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3); /* bits/pixel */
+ ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0x8);/* pix fmt */
+ ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);/* burst size */
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_interleaved);
+
+void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
+ u32 pixel_format, int stride,
+ int u_offset, int v_offset)
+{
+ switch (pixel_format) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YUV422P:
+ ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, (stride / 2) - 1);
+ ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
+ ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8);
+ break;
+ case V4L2_PIX_FMT_YVU420:
+ ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, (stride / 2) - 1);
+ ipu_ch_param_write_field(ch, IPU_FIELD_UBO, v_offset / 8);
+ ipu_ch_param_write_field(ch, IPU_FIELD_VBO, u_offset / 8);
+ break;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, stride - 1);
+ ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
+ ipu_ch_param_write_field(ch, IPU_FIELD_VBO, u_offset / 8);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar_full);
+
+void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch,
+ u32 pixel_format, int stride, int height)
+{
+ int u_offset, v_offset;
+ int uv_stride = 0;
+
+ switch (pixel_format) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ uv_stride = stride / 2;
+ u_offset = stride * height;
+ v_offset = u_offset + (uv_stride * height / 2);
+ ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
+ u_offset, v_offset);
+ break;
+ case V4L2_PIX_FMT_YUV422P:
+ uv_stride = stride / 2;
+ u_offset = stride * height;
+ v_offset = u_offset + (uv_stride * height);
+ ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
+ u_offset, v_offset);
+ break;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ u_offset = stride * height;
+ ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
+ u_offset, 0);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar);
+
+static const struct ipu_rgb def_rgb_32 = {
+ .red = { .offset = 16, .length = 8, },
+ .green = { .offset = 8, .length = 8, },
+ .blue = { .offset = 0, .length = 8, },
+ .transp = { .offset = 24, .length = 8, },
+ .bits_per_pixel = 32,
+};
+
+static const struct ipu_rgb def_bgr_32 = {
+ .red = { .offset = 0, .length = 8, },
+ .green = { .offset = 8, .length = 8, },
+ .blue = { .offset = 16, .length = 8, },
+ .transp = { .offset = 24, .length = 8, },
+ .bits_per_pixel = 32,
+};
+
+static const struct ipu_rgb def_rgb_24 = {
+ .red = { .offset = 16, .length = 8, },
+ .green = { .offset = 8, .length = 8, },
+ .blue = { .offset = 0, .length = 8, },
+ .transp = { .offset = 0, .length = 0, },
+ .bits_per_pixel = 24,
+};
+
+static const struct ipu_rgb def_bgr_24 = {
+ .red = { .offset = 0, .length = 8, },
+ .green = { .offset = 8, .length = 8, },
+ .blue = { .offset = 16, .length = 8, },
+ .transp = { .offset = 0, .length = 0, },
+ .bits_per_pixel = 24,
+};
+
+static const struct ipu_rgb def_rgb_16 = {
+ .red = { .offset = 11, .length = 5, },
+ .green = { .offset = 5, .length = 6, },
+ .blue = { .offset = 0, .length = 5, },
+ .transp = { .offset = 0, .length = 0, },
+ .bits_per_pixel = 16,
+};
+
+static const struct ipu_rgb def_bgr_16 = {
+ .red = { .offset = 0, .length = 5, },
+ .green = { .offset = 5, .length = 6, },
+ .blue = { .offset = 11, .length = 5, },
+ .transp = { .offset = 0, .length = 0, },
+ .bits_per_pixel = 16,
+};
+
+#define Y_OFFSET(pix, x, y) ((x) + pix->width * (y))
+#define U_OFFSET(pix, x, y) ((pix->width * pix->height) + \
+ (pix->width * (y) / 4) + (x) / 2)
+#define V_OFFSET(pix, x, y) ((pix->width * pix->height) + \
+ (pix->width * pix->height / 4) + \
+ (pix->width * (y) / 4) + (x) / 2)
+#define U2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
+ (pix->width * (y) / 2) + (x) / 2)
+#define V2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
+ (pix->width * pix->height / 2) + \
+ (pix->width * (y) / 2) + (x) / 2)
+#define UV_OFFSET(pix, x, y) ((pix->width * pix->height) + \
+ (pix->width * (y) / 2) + (x))
+#define UV2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
+ (pix->width * y) + (x))
+
+int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc)
+{
+ switch (drm_fourcc) {
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ /* pix format */
+ ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 2);
+ /* burst size */
+ ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
+ break;
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ /* pix format */
+ ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 1);
+ /* burst size */
+ ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
+ break;
+ case DRM_FORMAT_NV12:
+ /* pix format */
+ ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 4);
+ /* burst size */
+ ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
+ break;
+ case DRM_FORMAT_NV16:
+ /* pix format */
+ ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 3);
+ /* burst size */
+ ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
+ break;
+ case DRM_FORMAT_UYVY:
+ /* bits/pixel */
+ ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3);
+ /* pix format */
+ ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0xA);
+ /* burst size */
+ ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
+ break;
+ case DRM_FORMAT_YUYV:
+ /* bits/pixel */
+ ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3);
+ /* pix format */
+ ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0x8);
+ /* burst size */
+ ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
+ break;
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_XBGR8888:
+ ipu_cpmem_set_format_rgb(ch, &def_bgr_32);
+ break;
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XRGB8888:
+ ipu_cpmem_set_format_rgb(ch, &def_rgb_32);
+ break;
+ case DRM_FORMAT_BGR888:
+ ipu_cpmem_set_format_rgb(ch, &def_bgr_24);
+ break;
+ case DRM_FORMAT_RGB888:
+ ipu_cpmem_set_format_rgb(ch, &def_rgb_24);
+ break;
+ case DRM_FORMAT_RGB565:
+ ipu_cpmem_set_format_rgb(ch, &def_rgb_16);
+ break;
+ case DRM_FORMAT_BGR565:
+ ipu_cpmem_set_format_rgb(ch, &def_bgr_16);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_fmt);
+
+int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
+{
+ struct v4l2_pix_format *pix = &image->pix;
+ int offset, u_offset, v_offset;
+
+ pr_debug("%s: resolution: %dx%d stride: %d\n",
+ __func__, pix->width, pix->height,
+ pix->bytesperline);
+
+ ipu_cpmem_set_resolution(ch, image->rect.width, image->rect.height);
+ ipu_cpmem_set_stride(ch, pix->bytesperline);
+
+ ipu_cpmem_set_fmt(ch, v4l2_pix_fmt_to_drm_fourcc(pix->pixelformat));
+
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
+ u_offset = U_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
+ v_offset = V_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
+
+ ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
+ pix->bytesperline,
+ u_offset, v_offset);
+ break;
+ case V4L2_PIX_FMT_YUV422P:
+ offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
+ u_offset = U2_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
+ v_offset = V2_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
+
+ ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
+ pix->bytesperline,
+ u_offset, v_offset);
+ break;
+ case V4L2_PIX_FMT_NV12:
+ offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
+ u_offset = UV_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
+ v_offset = 0;
+
+ ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
+ pix->bytesperline,
+ u_offset, v_offset);
+ break;
+ case V4L2_PIX_FMT_NV16:
+ offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
+ u_offset = UV2_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
+ v_offset = 0;
+
+ ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
+ pix->bytesperline,
+ u_offset, v_offset);
+ break;
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_RGB565:
+ offset = image->rect.left * 2 +
+ image->rect.top * pix->bytesperline;
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_BGR32:
+ offset = image->rect.left * 4 +
+ image->rect.top * pix->bytesperline;
+ break;
+ case V4L2_PIX_FMT_RGB24:
+ case V4L2_PIX_FMT_BGR24:
+ offset = image->rect.left * 3 +
+ image->rect.top * pix->bytesperline;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ipu_cpmem_set_buffer(ch, 0, image->phys0 + offset);
+ ipu_cpmem_set_buffer(ch, 1, image->phys1 + offset);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_image);
+
+void ipu_cpmem_dump(struct ipuv3_channel *ch)
+{
+ struct ipu_ch_param __iomem *p = ipu_get_cpmem(ch);
+ struct ipu_soc *ipu = ch->ipu;
+ int chno = ch->num;
+
+ dev_dbg(ipu->dev, "ch %d word 0 - %08X %08X %08X %08X %08X\n", chno,
+ readl(&p->word[0].data[0]),
+ readl(&p->word[0].data[1]),
+ readl(&p->word[0].data[2]),
+ readl(&p->word[0].data[3]),
+ readl(&p->word[0].data[4]));
+ dev_dbg(ipu->dev, "ch %d word 1 - %08X %08X %08X %08X %08X\n", chno,
+ readl(&p->word[1].data[0]),
+ readl(&p->word[1].data[1]),
+ readl(&p->word[1].data[2]),
+ readl(&p->word[1].data[3]),
+ readl(&p->word[1].data[4]));
+ dev_dbg(ipu->dev, "PFS 0x%x, ",
+ ipu_ch_param_read_field(ch, IPU_FIELD_PFS));
+ dev_dbg(ipu->dev, "BPP 0x%x, ",
+ ipu_ch_param_read_field(ch, IPU_FIELD_BPP));
+ dev_dbg(ipu->dev, "NPB 0x%x\n",
+ ipu_ch_param_read_field(ch, IPU_FIELD_NPB));
+
+ dev_dbg(ipu->dev, "FW %d, ",
+ ipu_ch_param_read_field(ch, IPU_FIELD_FW));
+ dev_dbg(ipu->dev, "FH %d, ",
+ ipu_ch_param_read_field(ch, IPU_FIELD_FH));
+ dev_dbg(ipu->dev, "EBA0 0x%x\n",
+ ipu_ch_param_read_field(ch, IPU_FIELD_EBA0) << 3);
+ dev_dbg(ipu->dev, "EBA1 0x%x\n",
+ ipu_ch_param_read_field(ch, IPU_FIELD_EBA1) << 3);
+ dev_dbg(ipu->dev, "Stride %d\n",
+ ipu_ch_param_read_field(ch, IPU_FIELD_SL));
+ dev_dbg(ipu->dev, "scan_order %d\n",
+ ipu_ch_param_read_field(ch, IPU_FIELD_SO));
+ dev_dbg(ipu->dev, "uv_stride %d\n",
+ ipu_ch_param_read_field(ch, IPU_FIELD_SLUV));
+ dev_dbg(ipu->dev, "u_offset 0x%x\n",
+ ipu_ch_param_read_field(ch, IPU_FIELD_UBO) << 3);
+ dev_dbg(ipu->dev, "v_offset 0x%x\n",
+ ipu_ch_param_read_field(ch, IPU_FIELD_VBO) << 3);
+
+ dev_dbg(ipu->dev, "Width0 %d+1, ",
+ ipu_ch_param_read_field(ch, IPU_FIELD_WID0));
+ dev_dbg(ipu->dev, "Width1 %d+1, ",
+ ipu_ch_param_read_field(ch, IPU_FIELD_WID1));
+ dev_dbg(ipu->dev, "Width2 %d+1, ",
+ ipu_ch_param_read_field(ch, IPU_FIELD_WID2));
+ dev_dbg(ipu->dev, "Width3 %d+1, ",
+ ipu_ch_param_read_field(ch, IPU_FIELD_WID3));
+ dev_dbg(ipu->dev, "Offset0 %d, ",
+ ipu_ch_param_read_field(ch, IPU_FIELD_OFS0));
+ dev_dbg(ipu->dev, "Offset1 %d, ",
+ ipu_ch_param_read_field(ch, IPU_FIELD_OFS1));
+ dev_dbg(ipu->dev, "Offset2 %d, ",
+ ipu_ch_param_read_field(ch, IPU_FIELD_OFS2));
+ dev_dbg(ipu->dev, "Offset3 %d\n",
+ ipu_ch_param_read_field(ch, IPU_FIELD_OFS3));
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_dump);
+
+int ipu_cpmem_init(struct ipu_soc *ipu, struct device *dev, unsigned long base)
+{
+ struct ipu_cpmem *cpmem;
+
+ cpmem = devm_kzalloc(dev, sizeof(*cpmem), GFP_KERNEL);
+ if (!cpmem)
+ return -ENOMEM;
+
+ ipu->cpmem_priv = cpmem;
+
+ spin_lock_init(&cpmem->lock);
+ cpmem->base = devm_ioremap(dev, base, SZ_128K);
+ if (!cpmem->base)
+ return -ENOMEM;
+
+ dev_dbg(dev, "CPMEM base: 0x%08lx remapped to %p\n",
+ base, cpmem->base);
+ cpmem->ipu = ipu;
+
+ return 0;
+}
+
+void ipu_cpmem_exit(struct ipu_soc *ipu)
+{
+}
diff --git a/kernel/drivers/gpu/ipu-v3/ipu-csi.c b/kernel/drivers/gpu/ipu-v3/ipu-csi.c
new file mode 100644
index 000000000..752cdd2da
--- /dev/null
+++ b/kernel/drivers/gpu/ipu-v3/ipu-csi.c
@@ -0,0 +1,741 @@
+/*
+ * Copyright (C) 2012-2014 Mentor Graphics Inc.
+ * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <uapi/linux/v4l2-mediabus.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+
+#include "ipu-prv.h"
+
+struct ipu_csi {
+ void __iomem *base;
+ int id;
+ u32 module;
+ struct clk *clk_ipu; /* IPU bus clock */
+ spinlock_t lock;
+ bool inuse;
+ struct ipu_soc *ipu;
+};
+
+/* CSI Register Offsets */
+#define CSI_SENS_CONF 0x0000
+#define CSI_SENS_FRM_SIZE 0x0004
+#define CSI_ACT_FRM_SIZE 0x0008
+#define CSI_OUT_FRM_CTRL 0x000c
+#define CSI_TST_CTRL 0x0010
+#define CSI_CCIR_CODE_1 0x0014
+#define CSI_CCIR_CODE_2 0x0018
+#define CSI_CCIR_CODE_3 0x001c
+#define CSI_MIPI_DI 0x0020
+#define CSI_SKIP 0x0024
+#define CSI_CPD_CTRL 0x0028
+#define CSI_CPD_RC(n) (0x002c + ((n)*4))
+#define CSI_CPD_RS(n) (0x004c + ((n)*4))
+#define CSI_CPD_GRC(n) (0x005c + ((n)*4))
+#define CSI_CPD_GRS(n) (0x007c + ((n)*4))
+#define CSI_CPD_GBC(n) (0x008c + ((n)*4))
+#define CSI_CPD_GBS(n) (0x00Ac + ((n)*4))
+#define CSI_CPD_BC(n) (0x00Bc + ((n)*4))
+#define CSI_CPD_BS(n) (0x00Dc + ((n)*4))
+#define CSI_CPD_OFFSET1 0x00ec
+#define CSI_CPD_OFFSET2 0x00f0
+
+/* CSI Register Fields */
+#define CSI_SENS_CONF_DATA_FMT_SHIFT 8
+#define CSI_SENS_CONF_DATA_FMT_MASK 0x00000700
+#define CSI_SENS_CONF_DATA_FMT_RGB_YUV444 0L
+#define CSI_SENS_CONF_DATA_FMT_YUV422_YUYV 1L
+#define CSI_SENS_CONF_DATA_FMT_YUV422_UYVY 2L
+#define CSI_SENS_CONF_DATA_FMT_BAYER 3L
+#define CSI_SENS_CONF_DATA_FMT_RGB565 4L
+#define CSI_SENS_CONF_DATA_FMT_RGB555 5L
+#define CSI_SENS_CONF_DATA_FMT_RGB444 6L
+#define CSI_SENS_CONF_DATA_FMT_JPEG 7L
+
+#define CSI_SENS_CONF_VSYNC_POL_SHIFT 0
+#define CSI_SENS_CONF_HSYNC_POL_SHIFT 1
+#define CSI_SENS_CONF_DATA_POL_SHIFT 2
+#define CSI_SENS_CONF_PIX_CLK_POL_SHIFT 3
+#define CSI_SENS_CONF_SENS_PRTCL_MASK 0x00000070
+#define CSI_SENS_CONF_SENS_PRTCL_SHIFT 4
+#define CSI_SENS_CONF_PACK_TIGHT_SHIFT 7
+#define CSI_SENS_CONF_DATA_WIDTH_SHIFT 11
+#define CSI_SENS_CONF_EXT_VSYNC_SHIFT 15
+#define CSI_SENS_CONF_DIVRATIO_SHIFT 16
+
+#define CSI_SENS_CONF_DIVRATIO_MASK 0x00ff0000
+#define CSI_SENS_CONF_DATA_DEST_SHIFT 24
+#define CSI_SENS_CONF_DATA_DEST_MASK 0x07000000
+#define CSI_SENS_CONF_JPEG8_EN_SHIFT 27
+#define CSI_SENS_CONF_JPEG_EN_SHIFT 28
+#define CSI_SENS_CONF_FORCE_EOF_SHIFT 29
+#define CSI_SENS_CONF_DATA_EN_POL_SHIFT 31
+
+#define CSI_DATA_DEST_IC 2
+#define CSI_DATA_DEST_IDMAC 4
+
+#define CSI_CCIR_ERR_DET_EN 0x01000000
+#define CSI_HORI_DOWNSIZE_EN 0x80000000
+#define CSI_VERT_DOWNSIZE_EN 0x40000000
+#define CSI_TEST_GEN_MODE_EN 0x01000000
+
+#define CSI_HSC_MASK 0x1fff0000
+#define CSI_HSC_SHIFT 16
+#define CSI_VSC_MASK 0x00000fff
+#define CSI_VSC_SHIFT 0
+
+#define CSI_TEST_GEN_R_MASK 0x000000ff
+#define CSI_TEST_GEN_R_SHIFT 0
+#define CSI_TEST_GEN_G_MASK 0x0000ff00
+#define CSI_TEST_GEN_G_SHIFT 8
+#define CSI_TEST_GEN_B_MASK 0x00ff0000
+#define CSI_TEST_GEN_B_SHIFT 16
+
+#define CSI_MAX_RATIO_SKIP_SMFC_MASK 0x00000007
+#define CSI_MAX_RATIO_SKIP_SMFC_SHIFT 0
+#define CSI_SKIP_SMFC_MASK 0x000000f8
+#define CSI_SKIP_SMFC_SHIFT 3
+#define CSI_ID_2_SKIP_MASK 0x00000300
+#define CSI_ID_2_SKIP_SHIFT 8
+
+#define CSI_COLOR_FIRST_ROW_MASK 0x00000002
+#define CSI_COLOR_FIRST_COMP_MASK 0x00000001
+
+/* MIPI CSI-2 data types */
+#define MIPI_DT_YUV420 0x18 /* YYY.../UYVY.... */
+#define MIPI_DT_YUV420_LEGACY 0x1a /* UYY.../VYY... */
+#define MIPI_DT_YUV422 0x1e /* UYVY... */
+#define MIPI_DT_RGB444 0x20
+#define MIPI_DT_RGB555 0x21
+#define MIPI_DT_RGB565 0x22
+#define MIPI_DT_RGB666 0x23
+#define MIPI_DT_RGB888 0x24
+#define MIPI_DT_RAW6 0x28
+#define MIPI_DT_RAW7 0x29
+#define MIPI_DT_RAW8 0x2a
+#define MIPI_DT_RAW10 0x2b
+#define MIPI_DT_RAW12 0x2c
+#define MIPI_DT_RAW14 0x2d
+
+/*
+ * Bitfield of CSI bus signal polarities and modes.
+ */
+struct ipu_csi_bus_config {
+ unsigned data_width:4;
+ unsigned clk_mode:3;
+ unsigned ext_vsync:1;
+ unsigned vsync_pol:1;
+ unsigned hsync_pol:1;
+ unsigned pixclk_pol:1;
+ unsigned data_pol:1;
+ unsigned sens_clksrc:1;
+ unsigned pack_tight:1;
+ unsigned force_eof:1;
+ unsigned data_en_pol:1;
+
+ unsigned data_fmt;
+ unsigned mipi_dt;
+};
+
+/*
+ * Enumeration of CSI data bus widths.
+ */
+enum ipu_csi_data_width {
+ IPU_CSI_DATA_WIDTH_4 = 0,
+ IPU_CSI_DATA_WIDTH_8 = 1,
+ IPU_CSI_DATA_WIDTH_10 = 3,
+ IPU_CSI_DATA_WIDTH_12 = 5,
+ IPU_CSI_DATA_WIDTH_16 = 9,
+};
+
+/*
+ * Enumeration of CSI clock modes.
+ */
+enum ipu_csi_clk_mode {
+ IPU_CSI_CLK_MODE_GATED_CLK,
+ IPU_CSI_CLK_MODE_NONGATED_CLK,
+ IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE,
+ IPU_CSI_CLK_MODE_CCIR656_INTERLACED,
+ IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR,
+ IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR,
+ IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR,
+ IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR,
+};
+
+static inline u32 ipu_csi_read(struct ipu_csi *csi, unsigned offset)
+{
+ return readl(csi->base + offset);
+}
+
+static inline void ipu_csi_write(struct ipu_csi *csi, u32 value,
+ unsigned offset)
+{
+ writel(value, csi->base + offset);
+}
+
+/*
+ * Set mclk division ratio for generating test mode mclk. Only used
+ * for test generator.
+ */
+static int ipu_csi_set_testgen_mclk(struct ipu_csi *csi, u32 pixel_clk,
+ u32 ipu_clk)
+{
+ u32 temp;
+ u32 div_ratio;
+
+ div_ratio = (ipu_clk / pixel_clk) - 1;
+
+ if (div_ratio > 0xFF || div_ratio < 0) {
+ dev_err(csi->ipu->dev,
+ "value of pixel_clk extends normal range\n");
+ return -EINVAL;
+ }
+
+ temp = ipu_csi_read(csi, CSI_SENS_CONF);
+ temp &= ~CSI_SENS_CONF_DIVRATIO_MASK;
+ ipu_csi_write(csi, temp | (div_ratio << CSI_SENS_CONF_DIVRATIO_SHIFT),
+ CSI_SENS_CONF);
+
+ return 0;
+}
+
+/*
+ * Find the CSI data format and data width for the given V4L2 media
+ * bus pixel format code.
+ */
+static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)
+{
+ switch (mbus_code) {
+ case MEDIA_BUS_FMT_BGR565_2X8_BE:
+ case MEDIA_BUS_FMT_BGR565_2X8_LE:
+ case MEDIA_BUS_FMT_RGB565_2X8_BE:
+ case MEDIA_BUS_FMT_RGB565_2X8_LE:
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB565;
+ cfg->mipi_dt = MIPI_DT_RGB565;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+ break;
+ case MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE:
+ case MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE:
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB444;
+ cfg->mipi_dt = MIPI_DT_RGB444;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+ break;
+ case MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE:
+ case MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE:
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB555;
+ cfg->mipi_dt = MIPI_DT_RGB555;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY;
+ cfg->mipi_dt = MIPI_DT_YUV422;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+ break;
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV;
+ cfg->mipi_dt = MIPI_DT_YUV422;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY;
+ cfg->mipi_dt = MIPI_DT_YUV422;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_16;
+ break;
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV;
+ cfg->mipi_dt = MIPI_DT_YUV422;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_16;
+ break;
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
+ cfg->mipi_dt = MIPI_DT_RAW8;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+ break;
+ case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE:
+ case MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE:
+ case MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE:
+ case MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE:
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
+ cfg->mipi_dt = MIPI_DT_RAW10;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+ break;
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
+ cfg->mipi_dt = MIPI_DT_RAW10;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_10;
+ break;
+ case MEDIA_BUS_FMT_SBGGR12_1X12:
+ case MEDIA_BUS_FMT_SGBRG12_1X12:
+ case MEDIA_BUS_FMT_SGRBG12_1X12:
+ case MEDIA_BUS_FMT_SRGGB12_1X12:
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
+ cfg->mipi_dt = MIPI_DT_RAW12;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_12;
+ break;
+ case MEDIA_BUS_FMT_JPEG_1X8:
+ /* TODO */
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_JPEG;
+ cfg->mipi_dt = MIPI_DT_RAW8;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Fill a CSI bus config struct from mbus_config and mbus_framefmt.
+ */
+static void fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
+ struct v4l2_mbus_config *mbus_cfg,
+ struct v4l2_mbus_framefmt *mbus_fmt)
+{
+ memset(csicfg, 0, sizeof(*csicfg));
+
+ mbus_code_to_bus_cfg(csicfg, mbus_fmt->code);
+
+ switch (mbus_cfg->type) {
+ case V4L2_MBUS_PARALLEL:
+ csicfg->ext_vsync = 1;
+ csicfg->vsync_pol = (mbus_cfg->flags &
+ V4L2_MBUS_VSYNC_ACTIVE_LOW) ? 1 : 0;
+ csicfg->hsync_pol = (mbus_cfg->flags &
+ V4L2_MBUS_HSYNC_ACTIVE_LOW) ? 1 : 0;
+ csicfg->pixclk_pol = (mbus_cfg->flags &
+ V4L2_MBUS_PCLK_SAMPLE_FALLING) ? 1 : 0;
+ csicfg->clk_mode = IPU_CSI_CLK_MODE_GATED_CLK;
+ break;
+ case V4L2_MBUS_BT656:
+ csicfg->ext_vsync = 0;
+ if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field))
+ csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_INTERLACED;
+ else
+ csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE;
+ break;
+ case V4L2_MBUS_CSI2:
+ /*
+ * MIPI CSI-2 requires non gated clock mode, all other
+ * parameters are not applicable for MIPI CSI-2 bus.
+ */
+ csicfg->clk_mode = IPU_CSI_CLK_MODE_NONGATED_CLK;
+ break;
+ default:
+ /* will never get here, keep compiler quiet */
+ break;
+ }
+}
+
+int ipu_csi_init_interface(struct ipu_csi *csi,
+ struct v4l2_mbus_config *mbus_cfg,
+ struct v4l2_mbus_framefmt *mbus_fmt)
+{
+ struct ipu_csi_bus_config cfg;
+ unsigned long flags;
+ u32 data = 0;
+
+ fill_csi_bus_cfg(&cfg, mbus_cfg, mbus_fmt);
+
+ /* Set the CSI_SENS_CONF register remaining fields */
+ data |= cfg.data_width << CSI_SENS_CONF_DATA_WIDTH_SHIFT |
+ cfg.data_fmt << CSI_SENS_CONF_DATA_FMT_SHIFT |
+ cfg.data_pol << CSI_SENS_CONF_DATA_POL_SHIFT |
+ cfg.vsync_pol << CSI_SENS_CONF_VSYNC_POL_SHIFT |
+ cfg.hsync_pol << CSI_SENS_CONF_HSYNC_POL_SHIFT |
+ cfg.pixclk_pol << CSI_SENS_CONF_PIX_CLK_POL_SHIFT |
+ cfg.ext_vsync << CSI_SENS_CONF_EXT_VSYNC_SHIFT |
+ cfg.clk_mode << CSI_SENS_CONF_SENS_PRTCL_SHIFT |
+ cfg.pack_tight << CSI_SENS_CONF_PACK_TIGHT_SHIFT |
+ cfg.force_eof << CSI_SENS_CONF_FORCE_EOF_SHIFT |
+ cfg.data_en_pol << CSI_SENS_CONF_DATA_EN_POL_SHIFT;
+
+ spin_lock_irqsave(&csi->lock, flags);
+
+ ipu_csi_write(csi, data, CSI_SENS_CONF);
+
+ /* Setup sensor frame size */
+ ipu_csi_write(csi,
+ (mbus_fmt->width - 1) | ((mbus_fmt->height - 1) << 16),
+ CSI_SENS_FRM_SIZE);
+
+ /* Set CCIR registers */
+
+ switch (cfg.clk_mode) {
+ case IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE:
+ ipu_csi_write(csi, 0x40030, CSI_CCIR_CODE_1);
+ ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
+ break;
+ case IPU_CSI_CLK_MODE_CCIR656_INTERLACED:
+ if (mbus_fmt->width == 720 && mbus_fmt->height == 576) {
+ /*
+ * PAL case
+ *
+ * Field0BlankEnd = 0x6, Field0BlankStart = 0x2,
+ * Field0ActiveEnd = 0x4, Field0ActiveStart = 0
+ * Field1BlankEnd = 0x7, Field1BlankStart = 0x3,
+ * Field1ActiveEnd = 0x5, Field1ActiveStart = 0x1
+ */
+ ipu_csi_write(csi, 0x40596 | CSI_CCIR_ERR_DET_EN,
+ CSI_CCIR_CODE_1);
+ ipu_csi_write(csi, 0xD07DF, CSI_CCIR_CODE_2);
+ ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
+
+ } else if (mbus_fmt->width == 720 && mbus_fmt->height == 480) {
+ /*
+ * NTSC case
+ *
+ * Field0BlankEnd = 0x7, Field0BlankStart = 0x3,
+ * Field0ActiveEnd = 0x5, Field0ActiveStart = 0x1
+ * Field1BlankEnd = 0x6, Field1BlankStart = 0x2,
+ * Field1ActiveEnd = 0x4, Field1ActiveStart = 0
+ */
+ ipu_csi_write(csi, 0xD07DF | CSI_CCIR_ERR_DET_EN,
+ CSI_CCIR_CODE_1);
+ ipu_csi_write(csi, 0x40596, CSI_CCIR_CODE_2);
+ ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
+ } else {
+ dev_err(csi->ipu->dev,
+ "Unsupported CCIR656 interlaced video mode\n");
+ spin_unlock_irqrestore(&csi->lock, flags);
+ return -EINVAL;
+ }
+ break;
+ case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR:
+ case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR:
+ case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR:
+ case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR:
+ ipu_csi_write(csi, 0x40030 | CSI_CCIR_ERR_DET_EN,
+ CSI_CCIR_CODE_1);
+ ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
+ break;
+ case IPU_CSI_CLK_MODE_GATED_CLK:
+ case IPU_CSI_CLK_MODE_NONGATED_CLK:
+ ipu_csi_write(csi, 0, CSI_CCIR_CODE_1);
+ break;
+ }
+
+ dev_dbg(csi->ipu->dev, "CSI_SENS_CONF = 0x%08X\n",
+ ipu_csi_read(csi, CSI_SENS_CONF));
+ dev_dbg(csi->ipu->dev, "CSI_ACT_FRM_SIZE = 0x%08X\n",
+ ipu_csi_read(csi, CSI_ACT_FRM_SIZE));
+
+ spin_unlock_irqrestore(&csi->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_csi_init_interface);
+
+bool ipu_csi_is_interlaced(struct ipu_csi *csi)
+{
+ unsigned long flags;
+ u32 sensor_protocol;
+
+ spin_lock_irqsave(&csi->lock, flags);
+ sensor_protocol =
+ (ipu_csi_read(csi, CSI_SENS_CONF) &
+ CSI_SENS_CONF_SENS_PRTCL_MASK) >>
+ CSI_SENS_CONF_SENS_PRTCL_SHIFT;
+ spin_unlock_irqrestore(&csi->lock, flags);
+
+ switch (sensor_protocol) {
+ case IPU_CSI_CLK_MODE_GATED_CLK:
+ case IPU_CSI_CLK_MODE_NONGATED_CLK:
+ case IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE:
+ case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR:
+ case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR:
+ return false;
+ case IPU_CSI_CLK_MODE_CCIR656_INTERLACED:
+ case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR:
+ case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR:
+ return true;
+ default:
+ dev_err(csi->ipu->dev,
+ "CSI %d sensor protocol unsupported\n", csi->id);
+ return false;
+ }
+}
+EXPORT_SYMBOL_GPL(ipu_csi_is_interlaced);
+
+void ipu_csi_get_window(struct ipu_csi *csi, struct v4l2_rect *w)
+{
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&csi->lock, flags);
+
+ reg = ipu_csi_read(csi, CSI_ACT_FRM_SIZE);
+ w->width = (reg & 0xFFFF) + 1;
+ w->height = (reg >> 16 & 0xFFFF) + 1;
+
+ reg = ipu_csi_read(csi, CSI_OUT_FRM_CTRL);
+ w->left = (reg & CSI_HSC_MASK) >> CSI_HSC_SHIFT;
+ w->top = (reg & CSI_VSC_MASK) >> CSI_VSC_SHIFT;
+
+ spin_unlock_irqrestore(&csi->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_csi_get_window);
+
+void ipu_csi_set_window(struct ipu_csi *csi, struct v4l2_rect *w)
+{
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&csi->lock, flags);
+
+ ipu_csi_write(csi, (w->width - 1) | ((w->height - 1) << 16),
+ CSI_ACT_FRM_SIZE);
+
+ reg = ipu_csi_read(csi, CSI_OUT_FRM_CTRL);
+ reg &= ~(CSI_HSC_MASK | CSI_VSC_MASK);
+ reg |= ((w->top << CSI_VSC_SHIFT) | (w->left << CSI_HSC_SHIFT));
+ ipu_csi_write(csi, reg, CSI_OUT_FRM_CTRL);
+
+ spin_unlock_irqrestore(&csi->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_csi_set_window);
+
+void ipu_csi_set_test_generator(struct ipu_csi *csi, bool active,
+ u32 r_value, u32 g_value, u32 b_value,
+ u32 pix_clk)
+{
+ unsigned long flags;
+ u32 ipu_clk = clk_get_rate(csi->clk_ipu);
+ u32 temp;
+
+ spin_lock_irqsave(&csi->lock, flags);
+
+ temp = ipu_csi_read(csi, CSI_TST_CTRL);
+
+ if (active == false) {
+ temp &= ~CSI_TEST_GEN_MODE_EN;
+ ipu_csi_write(csi, temp, CSI_TST_CTRL);
+ } else {
+ /* Set sensb_mclk div_ratio */
+ ipu_csi_set_testgen_mclk(csi, pix_clk, ipu_clk);
+
+ temp &= ~(CSI_TEST_GEN_R_MASK | CSI_TEST_GEN_G_MASK |
+ CSI_TEST_GEN_B_MASK);
+ temp |= CSI_TEST_GEN_MODE_EN;
+ temp |= (r_value << CSI_TEST_GEN_R_SHIFT) |
+ (g_value << CSI_TEST_GEN_G_SHIFT) |
+ (b_value << CSI_TEST_GEN_B_SHIFT);
+ ipu_csi_write(csi, temp, CSI_TST_CTRL);
+ }
+
+ spin_unlock_irqrestore(&csi->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_csi_set_test_generator);
+
+int ipu_csi_set_mipi_datatype(struct ipu_csi *csi, u32 vc,
+ struct v4l2_mbus_framefmt *mbus_fmt)
+{
+ struct ipu_csi_bus_config cfg;
+ unsigned long flags;
+ u32 temp;
+
+ if (vc > 3)
+ return -EINVAL;
+
+ mbus_code_to_bus_cfg(&cfg, mbus_fmt->code);
+
+ spin_lock_irqsave(&csi->lock, flags);
+
+ temp = ipu_csi_read(csi, CSI_MIPI_DI);
+ temp &= ~(0xff << (vc * 8));
+ temp |= (cfg.mipi_dt << (vc * 8));
+ ipu_csi_write(csi, temp, CSI_MIPI_DI);
+
+ spin_unlock_irqrestore(&csi->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_csi_set_mipi_datatype);
+
+int ipu_csi_set_skip_smfc(struct ipu_csi *csi, u32 skip,
+ u32 max_ratio, u32 id)
+{
+ unsigned long flags;
+ u32 temp;
+
+ if (max_ratio > 5 || id > 3)
+ return -EINVAL;
+
+ spin_lock_irqsave(&csi->lock, flags);
+
+ temp = ipu_csi_read(csi, CSI_SKIP);
+ temp &= ~(CSI_MAX_RATIO_SKIP_SMFC_MASK | CSI_ID_2_SKIP_MASK |
+ CSI_SKIP_SMFC_MASK);
+ temp |= (max_ratio << CSI_MAX_RATIO_SKIP_SMFC_SHIFT) |
+ (id << CSI_ID_2_SKIP_SHIFT) |
+ (skip << CSI_SKIP_SMFC_SHIFT);
+ ipu_csi_write(csi, temp, CSI_SKIP);
+
+ spin_unlock_irqrestore(&csi->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_csi_set_skip_smfc);
+
+int ipu_csi_set_dest(struct ipu_csi *csi, enum ipu_csi_dest csi_dest)
+{
+ unsigned long flags;
+ u32 csi_sens_conf, dest;
+
+ if (csi_dest == IPU_CSI_DEST_IDMAC)
+ dest = CSI_DATA_DEST_IDMAC;
+ else
+ dest = CSI_DATA_DEST_IC; /* IC or VDIC */
+
+ spin_lock_irqsave(&csi->lock, flags);
+
+ csi_sens_conf = ipu_csi_read(csi, CSI_SENS_CONF);
+ csi_sens_conf &= ~CSI_SENS_CONF_DATA_DEST_MASK;
+ csi_sens_conf |= (dest << CSI_SENS_CONF_DATA_DEST_SHIFT);
+ ipu_csi_write(csi, csi_sens_conf, CSI_SENS_CONF);
+
+ spin_unlock_irqrestore(&csi->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_csi_set_dest);
+
+int ipu_csi_enable(struct ipu_csi *csi)
+{
+ ipu_module_enable(csi->ipu, csi->module);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_csi_enable);
+
+int ipu_csi_disable(struct ipu_csi *csi)
+{
+ ipu_module_disable(csi->ipu, csi->module);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_csi_disable);
+
+struct ipu_csi *ipu_csi_get(struct ipu_soc *ipu, int id)
+{
+ unsigned long flags;
+ struct ipu_csi *csi, *ret;
+
+ if (id > 1)
+ return ERR_PTR(-EINVAL);
+
+ csi = ipu->csi_priv[id];
+ ret = csi;
+
+ spin_lock_irqsave(&csi->lock, flags);
+
+ if (csi->inuse) {
+ ret = ERR_PTR(-EBUSY);
+ goto unlock;
+ }
+
+ csi->inuse = true;
+unlock:
+ spin_unlock_irqrestore(&csi->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ipu_csi_get);
+
+void ipu_csi_put(struct ipu_csi *csi)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&csi->lock, flags);
+ csi->inuse = false;
+ spin_unlock_irqrestore(&csi->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_csi_put);
+
+int ipu_csi_init(struct ipu_soc *ipu, struct device *dev, int id,
+ unsigned long base, u32 module, struct clk *clk_ipu)
+{
+ struct ipu_csi *csi;
+
+ if (id > 1)
+ return -ENODEV;
+
+ csi = devm_kzalloc(dev, sizeof(*csi), GFP_KERNEL);
+ if (!csi)
+ return -ENOMEM;
+
+ ipu->csi_priv[id] = csi;
+
+ spin_lock_init(&csi->lock);
+ csi->module = module;
+ csi->id = id;
+ csi->clk_ipu = clk_ipu;
+ csi->base = devm_ioremap(dev, base, PAGE_SIZE);
+ if (!csi->base)
+ return -ENOMEM;
+
+ dev_dbg(dev, "CSI%d base: 0x%08lx remapped to %p\n",
+ id, base, csi->base);
+ csi->ipu = ipu;
+
+ return 0;
+}
+
+void ipu_csi_exit(struct ipu_soc *ipu, int id)
+{
+}
+
+void ipu_csi_dump(struct ipu_csi *csi)
+{
+ dev_dbg(csi->ipu->dev, "CSI_SENS_CONF: %08x\n",
+ ipu_csi_read(csi, CSI_SENS_CONF));
+ dev_dbg(csi->ipu->dev, "CSI_SENS_FRM_SIZE: %08x\n",
+ ipu_csi_read(csi, CSI_SENS_FRM_SIZE));
+ dev_dbg(csi->ipu->dev, "CSI_ACT_FRM_SIZE: %08x\n",
+ ipu_csi_read(csi, CSI_ACT_FRM_SIZE));
+ dev_dbg(csi->ipu->dev, "CSI_OUT_FRM_CTRL: %08x\n",
+ ipu_csi_read(csi, CSI_OUT_FRM_CTRL));
+ dev_dbg(csi->ipu->dev, "CSI_TST_CTRL: %08x\n",
+ ipu_csi_read(csi, CSI_TST_CTRL));
+ dev_dbg(csi->ipu->dev, "CSI_CCIR_CODE_1: %08x\n",
+ ipu_csi_read(csi, CSI_CCIR_CODE_1));
+ dev_dbg(csi->ipu->dev, "CSI_CCIR_CODE_2: %08x\n",
+ ipu_csi_read(csi, CSI_CCIR_CODE_2));
+ dev_dbg(csi->ipu->dev, "CSI_CCIR_CODE_3: %08x\n",
+ ipu_csi_read(csi, CSI_CCIR_CODE_3));
+ dev_dbg(csi->ipu->dev, "CSI_MIPI_DI: %08x\n",
+ ipu_csi_read(csi, CSI_MIPI_DI));
+ dev_dbg(csi->ipu->dev, "CSI_SKIP: %08x\n",
+ ipu_csi_read(csi, CSI_SKIP));
+}
+EXPORT_SYMBOL_GPL(ipu_csi_dump);
diff --git a/kernel/drivers/gpu/ipu-v3/ipu-dc.c b/kernel/drivers/gpu/ipu-v3/ipu-dc.c
new file mode 100644
index 000000000..9ef2e1f54
--- /dev/null
+++ b/kernel/drivers/gpu/ipu-v3/ipu-dc.c
@@ -0,0 +1,482 @@
+/*
+ * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
+ * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+#include <video/imx-ipu-v3.h>
+#include "ipu-prv.h"
+
+#define DC_MAP_CONF_PTR(n) (0x108 + ((n) & ~0x1) * 2)
+#define DC_MAP_CONF_VAL(n) (0x144 + ((n) & ~0x1) * 2)
+
+#define DC_EVT_NF 0
+#define DC_EVT_NL 1
+#define DC_EVT_EOF 2
+#define DC_EVT_NFIELD 3
+#define DC_EVT_EOL 4
+#define DC_EVT_EOFIELD 5
+#define DC_EVT_NEW_ADDR 6
+#define DC_EVT_NEW_CHAN 7
+#define DC_EVT_NEW_DATA 8
+
+#define DC_EVT_NEW_ADDR_W_0 0
+#define DC_EVT_NEW_ADDR_W_1 1
+#define DC_EVT_NEW_CHAN_W_0 2
+#define DC_EVT_NEW_CHAN_W_1 3
+#define DC_EVT_NEW_DATA_W_0 4
+#define DC_EVT_NEW_DATA_W_1 5
+#define DC_EVT_NEW_ADDR_R_0 6
+#define DC_EVT_NEW_ADDR_R_1 7
+#define DC_EVT_NEW_CHAN_R_0 8
+#define DC_EVT_NEW_CHAN_R_1 9
+#define DC_EVT_NEW_DATA_R_0 10
+#define DC_EVT_NEW_DATA_R_1 11
+
+#define DC_WR_CH_CONF 0x0
+#define DC_WR_CH_ADDR 0x4
+#define DC_RL_CH(evt) (8 + ((evt) & ~0x1) * 2)
+
+#define DC_GEN 0xd4
+#define DC_DISP_CONF1(disp) (0xd8 + (disp) * 4)
+#define DC_DISP_CONF2(disp) (0xe8 + (disp) * 4)
+#define DC_STAT 0x1c8
+
+#define WROD(lf) (0x18 | ((lf) << 1))
+#define WRG 0x01
+#define WCLK 0xc9
+
+#define SYNC_WAVE 0
+#define NULL_WAVE (-1)
+
+#define DC_GEN_SYNC_1_6_SYNC (2 << 1)
+#define DC_GEN_SYNC_PRIORITY_1 (1 << 7)
+
+#define DC_WR_CH_CONF_WORD_SIZE_8 (0 << 0)
+#define DC_WR_CH_CONF_WORD_SIZE_16 (1 << 0)
+#define DC_WR_CH_CONF_WORD_SIZE_24 (2 << 0)
+#define DC_WR_CH_CONF_WORD_SIZE_32 (3 << 0)
+#define DC_WR_CH_CONF_DISP_ID_PARALLEL(i) (((i) & 0x1) << 3)
+#define DC_WR_CH_CONF_DISP_ID_SERIAL (2 << 3)
+#define DC_WR_CH_CONF_DISP_ID_ASYNC (3 << 4)
+#define DC_WR_CH_CONF_FIELD_MODE (1 << 9)
+#define DC_WR_CH_CONF_PROG_TYPE_NORMAL (4 << 5)
+#define DC_WR_CH_CONF_PROG_TYPE_MASK (7 << 5)
+#define DC_WR_CH_CONF_PROG_DI_ID (1 << 2)
+#define DC_WR_CH_CONF_PROG_DISP_ID(i) (((i) & 0x1) << 3)
+
+#define IPU_DC_NUM_CHANNELS 10
+
+struct ipu_dc_priv;
+
+enum ipu_dc_map {
+ IPU_DC_MAP_RGB24,
+ IPU_DC_MAP_RGB565,
+ IPU_DC_MAP_GBR24, /* TVEv2 */
+ IPU_DC_MAP_BGR666,
+ IPU_DC_MAP_LVDS666,
+ IPU_DC_MAP_BGR24,
+};
+
+struct ipu_dc {
+ /* The display interface number assigned to this dc channel */
+ unsigned int di;
+ void __iomem *base;
+ struct ipu_dc_priv *priv;
+ int chno;
+ bool in_use;
+};
+
+struct ipu_dc_priv {
+ void __iomem *dc_reg;
+ void __iomem *dc_tmpl_reg;
+ struct ipu_soc *ipu;
+ struct device *dev;
+ struct ipu_dc channels[IPU_DC_NUM_CHANNELS];
+ struct mutex mutex;
+ struct completion comp;
+ int dc_irq;
+ int dp_irq;
+ int use_count;
+};
+
+static void dc_link_event(struct ipu_dc *dc, int event, int addr, int priority)
+{
+ u32 reg;
+
+ reg = readl(dc->base + DC_RL_CH(event));
+ reg &= ~(0xffff << (16 * (event & 0x1)));
+ reg |= ((addr << 8) | priority) << (16 * (event & 0x1));
+ writel(reg, dc->base + DC_RL_CH(event));
+}
+
+static void dc_write_tmpl(struct ipu_dc *dc, int word, u32 opcode, u32 operand,
+ int map, int wave, int glue, int sync, int stop)
+{
+ struct ipu_dc_priv *priv = dc->priv;
+ u32 reg1, reg2;
+
+ if (opcode == WCLK) {
+ reg1 = (operand << 20) & 0xfff00000;
+ reg2 = operand >> 12 | opcode << 1 | stop << 9;
+ } else if (opcode == WRG) {
+ reg1 = sync | glue << 4 | ++wave << 11 | ((operand << 15) & 0xffff8000);
+ reg2 = operand >> 17 | opcode << 7 | stop << 9;
+ } else {
+ reg1 = sync | glue << 4 | ++wave << 11 | ++map << 15 | ((operand << 20) & 0xfff00000);
+ reg2 = operand >> 12 | opcode << 4 | stop << 9;
+ }
+ writel(reg1, priv->dc_tmpl_reg + word * 8);
+ writel(reg2, priv->dc_tmpl_reg + word * 8 + 4);
+}
+
+static int ipu_bus_format_to_map(u32 fmt)
+{
+ switch (fmt) {
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ return IPU_DC_MAP_RGB24;
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ return IPU_DC_MAP_RGB565;
+ case MEDIA_BUS_FMT_GBR888_1X24:
+ return IPU_DC_MAP_GBR24;
+ case MEDIA_BUS_FMT_RGB666_1X18:
+ return IPU_DC_MAP_BGR666;
+ case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
+ return IPU_DC_MAP_LVDS666;
+ case MEDIA_BUS_FMT_BGR888_1X24:
+ return IPU_DC_MAP_BGR24;
+ default:
+ return -EINVAL;
+ }
+}
+
+int ipu_dc_init_sync(struct ipu_dc *dc, struct ipu_di *di, bool interlaced,
+ u32 bus_format, u32 width)
+{
+ struct ipu_dc_priv *priv = dc->priv;
+ u32 reg = 0;
+ int map;
+
+ dc->di = ipu_di_get_num(di);
+
+ map = ipu_bus_format_to_map(bus_format);
+ if (map < 0) {
+ dev_dbg(priv->dev, "IPU_DISP: No MAP\n");
+ return map;
+ }
+
+ if (interlaced) {
+ dc_link_event(dc, DC_EVT_NL, 0, 3);
+ dc_link_event(dc, DC_EVT_EOL, 0, 2);
+ dc_link_event(dc, DC_EVT_NEW_DATA, 0, 1);
+
+ /* Init template microcode */
+ dc_write_tmpl(dc, 0, WROD(0), 0, map, SYNC_WAVE, 0, 8, 1);
+ } else {
+ if (dc->di) {
+ dc_link_event(dc, DC_EVT_NL, 2, 3);
+ dc_link_event(dc, DC_EVT_EOL, 3, 2);
+ dc_link_event(dc, DC_EVT_NEW_DATA, 1, 1);
+ /* Init template microcode */
+ dc_write_tmpl(dc, 2, WROD(0), 0, map, SYNC_WAVE, 8, 5, 1);
+ dc_write_tmpl(dc, 3, WROD(0), 0, map, SYNC_WAVE, 4, 5, 0);
+ dc_write_tmpl(dc, 4, WRG, 0, map, NULL_WAVE, 0, 0, 1);
+ dc_write_tmpl(dc, 1, WROD(0), 0, map, SYNC_WAVE, 0, 5, 1);
+ } else {
+ dc_link_event(dc, DC_EVT_NL, 5, 3);
+ dc_link_event(dc, DC_EVT_EOL, 6, 2);
+ dc_link_event(dc, DC_EVT_NEW_DATA, 8, 1);
+ /* Init template microcode */
+ dc_write_tmpl(dc, 5, WROD(0), 0, map, SYNC_WAVE, 8, 5, 1);
+ dc_write_tmpl(dc, 6, WROD(0), 0, map, SYNC_WAVE, 4, 5, 0);
+ dc_write_tmpl(dc, 7, WRG, 0, map, NULL_WAVE, 0, 0, 1);
+ dc_write_tmpl(dc, 8, WROD(0), 0, map, SYNC_WAVE, 0, 5, 1);
+ }
+ }
+ dc_link_event(dc, DC_EVT_NF, 0, 0);
+ dc_link_event(dc, DC_EVT_NFIELD, 0, 0);
+ dc_link_event(dc, DC_EVT_EOF, 0, 0);
+ dc_link_event(dc, DC_EVT_EOFIELD, 0, 0);
+ dc_link_event(dc, DC_EVT_NEW_CHAN, 0, 0);
+ dc_link_event(dc, DC_EVT_NEW_ADDR, 0, 0);
+
+ reg = readl(dc->base + DC_WR_CH_CONF);
+ if (interlaced)
+ reg |= DC_WR_CH_CONF_FIELD_MODE;
+ else
+ reg &= ~DC_WR_CH_CONF_FIELD_MODE;
+ writel(reg, dc->base + DC_WR_CH_CONF);
+
+ writel(0x0, dc->base + DC_WR_CH_ADDR);
+ writel(width, priv->dc_reg + DC_DISP_CONF2(dc->di));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_dc_init_sync);
+
+void ipu_dc_enable(struct ipu_soc *ipu)
+{
+ struct ipu_dc_priv *priv = ipu->dc_priv;
+
+ mutex_lock(&priv->mutex);
+
+ if (!priv->use_count)
+ ipu_module_enable(priv->ipu, IPU_CONF_DC_EN);
+
+ priv->use_count++;
+
+ mutex_unlock(&priv->mutex);
+}
+EXPORT_SYMBOL_GPL(ipu_dc_enable);
+
+void ipu_dc_enable_channel(struct ipu_dc *dc)
+{
+ int di;
+ u32 reg;
+
+ di = dc->di;
+
+ reg = readl(dc->base + DC_WR_CH_CONF);
+ reg |= DC_WR_CH_CONF_PROG_TYPE_NORMAL;
+ writel(reg, dc->base + DC_WR_CH_CONF);
+}
+EXPORT_SYMBOL_GPL(ipu_dc_enable_channel);
+
+static irqreturn_t dc_irq_handler(int irq, void *dev_id)
+{
+ struct ipu_dc *dc = dev_id;
+ u32 reg;
+
+ reg = readl(dc->base + DC_WR_CH_CONF);
+ reg &= ~DC_WR_CH_CONF_PROG_TYPE_MASK;
+ writel(reg, dc->base + DC_WR_CH_CONF);
+
+ /* The Freescale BSP kernel clears DIx_COUNTER_RELEASE here */
+
+ complete(&dc->priv->comp);
+ return IRQ_HANDLED;
+}
+
+void ipu_dc_disable_channel(struct ipu_dc *dc)
+{
+ struct ipu_dc_priv *priv = dc->priv;
+ int irq;
+ unsigned long ret;
+ u32 val;
+
+ /* TODO: Handle MEM_FG_SYNC differently from MEM_BG_SYNC */
+ if (dc->chno == 1)
+ irq = priv->dc_irq;
+ else if (dc->chno == 5)
+ irq = priv->dp_irq;
+ else
+ return;
+
+ init_completion(&priv->comp);
+ enable_irq(irq);
+ ret = wait_for_completion_timeout(&priv->comp, msecs_to_jiffies(50));
+ disable_irq(irq);
+ if (ret == 0) {
+ dev_warn(priv->dev, "DC stop timeout after 50 ms\n");
+
+ val = readl(dc->base + DC_WR_CH_CONF);
+ val &= ~DC_WR_CH_CONF_PROG_TYPE_MASK;
+ writel(val, dc->base + DC_WR_CH_CONF);
+ }
+}
+EXPORT_SYMBOL_GPL(ipu_dc_disable_channel);
+
+void ipu_dc_disable(struct ipu_soc *ipu)
+{
+ struct ipu_dc_priv *priv = ipu->dc_priv;
+
+ mutex_lock(&priv->mutex);
+
+ priv->use_count--;
+ if (!priv->use_count)
+ ipu_module_disable(priv->ipu, IPU_CONF_DC_EN);
+
+ if (priv->use_count < 0)
+ priv->use_count = 0;
+
+ mutex_unlock(&priv->mutex);
+}
+EXPORT_SYMBOL_GPL(ipu_dc_disable);
+
+static void ipu_dc_map_config(struct ipu_dc_priv *priv, enum ipu_dc_map map,
+ int byte_num, int offset, int mask)
+{
+ int ptr = map * 3 + byte_num;
+ u32 reg;
+
+ reg = readl(priv->dc_reg + DC_MAP_CONF_VAL(ptr));
+ reg &= ~(0xffff << (16 * (ptr & 0x1)));
+ reg |= ((offset << 8) | mask) << (16 * (ptr & 0x1));
+ writel(reg, priv->dc_reg + DC_MAP_CONF_VAL(ptr));
+
+ reg = readl(priv->dc_reg + DC_MAP_CONF_PTR(map));
+ reg &= ~(0x1f << ((16 * (map & 0x1)) + (5 * byte_num)));
+ reg |= ptr << ((16 * (map & 0x1)) + (5 * byte_num));
+ writel(reg, priv->dc_reg + DC_MAP_CONF_PTR(map));
+}
+
+static void ipu_dc_map_clear(struct ipu_dc_priv *priv, int map)
+{
+ u32 reg = readl(priv->dc_reg + DC_MAP_CONF_PTR(map));
+
+ writel(reg & ~(0xffff << (16 * (map & 0x1))),
+ priv->dc_reg + DC_MAP_CONF_PTR(map));
+}
+
+struct ipu_dc *ipu_dc_get(struct ipu_soc *ipu, int channel)
+{
+ struct ipu_dc_priv *priv = ipu->dc_priv;
+ struct ipu_dc *dc;
+
+ if (channel >= IPU_DC_NUM_CHANNELS)
+ return ERR_PTR(-ENODEV);
+
+ dc = &priv->channels[channel];
+
+ mutex_lock(&priv->mutex);
+
+ if (dc->in_use) {
+ mutex_unlock(&priv->mutex);
+ return ERR_PTR(-EBUSY);
+ }
+
+ dc->in_use = true;
+
+ mutex_unlock(&priv->mutex);
+
+ return dc;
+}
+EXPORT_SYMBOL_GPL(ipu_dc_get);
+
+void ipu_dc_put(struct ipu_dc *dc)
+{
+ struct ipu_dc_priv *priv = dc->priv;
+
+ mutex_lock(&priv->mutex);
+ dc->in_use = false;
+ mutex_unlock(&priv->mutex);
+}
+EXPORT_SYMBOL_GPL(ipu_dc_put);
+
+int ipu_dc_init(struct ipu_soc *ipu, struct device *dev,
+ unsigned long base, unsigned long template_base)
+{
+ struct ipu_dc_priv *priv;
+ static int channel_offsets[] = { 0, 0x1c, 0x38, 0x54, 0x58, 0x5c,
+ 0x78, 0, 0x94, 0xb4};
+ int i, ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_init(&priv->mutex);
+
+ priv->dev = dev;
+ priv->ipu = ipu;
+ priv->dc_reg = devm_ioremap(dev, base, PAGE_SIZE);
+ priv->dc_tmpl_reg = devm_ioremap(dev, template_base, PAGE_SIZE);
+ if (!priv->dc_reg || !priv->dc_tmpl_reg)
+ return -ENOMEM;
+
+ for (i = 0; i < IPU_DC_NUM_CHANNELS; i++) {
+ priv->channels[i].chno = i;
+ priv->channels[i].priv = priv;
+ priv->channels[i].base = priv->dc_reg + channel_offsets[i];
+ }
+
+ priv->dc_irq = ipu_map_irq(ipu, IPU_IRQ_DC_FC_1);
+ if (!priv->dc_irq)
+ return -EINVAL;
+ ret = devm_request_irq(dev, priv->dc_irq, dc_irq_handler, 0, NULL,
+ &priv->channels[1]);
+ if (ret < 0)
+ return ret;
+ disable_irq(priv->dc_irq);
+ priv->dp_irq = ipu_map_irq(ipu, IPU_IRQ_DP_SF_END);
+ if (!priv->dp_irq)
+ return -EINVAL;
+ ret = devm_request_irq(dev, priv->dp_irq, dc_irq_handler, 0, NULL,
+ &priv->channels[5]);
+ if (ret < 0)
+ return ret;
+ disable_irq(priv->dp_irq);
+
+ writel(DC_WR_CH_CONF_WORD_SIZE_24 | DC_WR_CH_CONF_DISP_ID_PARALLEL(1) |
+ DC_WR_CH_CONF_PROG_DI_ID,
+ priv->channels[1].base + DC_WR_CH_CONF);
+ writel(DC_WR_CH_CONF_WORD_SIZE_24 | DC_WR_CH_CONF_DISP_ID_PARALLEL(0),
+ priv->channels[5].base + DC_WR_CH_CONF);
+
+ writel(DC_GEN_SYNC_1_6_SYNC | DC_GEN_SYNC_PRIORITY_1,
+ priv->dc_reg + DC_GEN);
+
+ ipu->dc_priv = priv;
+
+ dev_dbg(dev, "DC base: 0x%08lx template base: 0x%08lx\n",
+ base, template_base);
+
+ /* rgb24 */
+ ipu_dc_map_clear(priv, IPU_DC_MAP_RGB24);
+ ipu_dc_map_config(priv, IPU_DC_MAP_RGB24, 0, 7, 0xff); /* blue */
+ ipu_dc_map_config(priv, IPU_DC_MAP_RGB24, 1, 15, 0xff); /* green */
+ ipu_dc_map_config(priv, IPU_DC_MAP_RGB24, 2, 23, 0xff); /* red */
+
+ /* rgb565 */
+ ipu_dc_map_clear(priv, IPU_DC_MAP_RGB565);
+ ipu_dc_map_config(priv, IPU_DC_MAP_RGB565, 0, 4, 0xf8); /* blue */
+ ipu_dc_map_config(priv, IPU_DC_MAP_RGB565, 1, 10, 0xfc); /* green */
+ ipu_dc_map_config(priv, IPU_DC_MAP_RGB565, 2, 15, 0xf8); /* red */
+
+ /* gbr24 */
+ ipu_dc_map_clear(priv, IPU_DC_MAP_GBR24);
+ ipu_dc_map_config(priv, IPU_DC_MAP_GBR24, 2, 15, 0xff); /* green */
+ ipu_dc_map_config(priv, IPU_DC_MAP_GBR24, 1, 7, 0xff); /* blue */
+ ipu_dc_map_config(priv, IPU_DC_MAP_GBR24, 0, 23, 0xff); /* red */
+
+ /* bgr666 */
+ ipu_dc_map_clear(priv, IPU_DC_MAP_BGR666);
+ ipu_dc_map_config(priv, IPU_DC_MAP_BGR666, 0, 5, 0xfc); /* blue */
+ ipu_dc_map_config(priv, IPU_DC_MAP_BGR666, 1, 11, 0xfc); /* green */
+ ipu_dc_map_config(priv, IPU_DC_MAP_BGR666, 2, 17, 0xfc); /* red */
+
+ /* lvds666 */
+ ipu_dc_map_clear(priv, IPU_DC_MAP_LVDS666);
+ ipu_dc_map_config(priv, IPU_DC_MAP_LVDS666, 0, 5, 0xfc); /* blue */
+ ipu_dc_map_config(priv, IPU_DC_MAP_LVDS666, 1, 13, 0xfc); /* green */
+ ipu_dc_map_config(priv, IPU_DC_MAP_LVDS666, 2, 21, 0xfc); /* red */
+
+ /* bgr24 */
+ ipu_dc_map_clear(priv, IPU_DC_MAP_BGR24);
+ ipu_dc_map_config(priv, IPU_DC_MAP_BGR24, 2, 7, 0xff); /* red */
+ ipu_dc_map_config(priv, IPU_DC_MAP_BGR24, 1, 15, 0xff); /* green */
+ ipu_dc_map_config(priv, IPU_DC_MAP_BGR24, 0, 23, 0xff); /* blue */
+
+ return 0;
+}
+
+void ipu_dc_exit(struct ipu_soc *ipu)
+{
+}
diff --git a/kernel/drivers/gpu/ipu-v3/ipu-di.c b/kernel/drivers/gpu/ipu-v3/ipu-di.c
new file mode 100644
index 000000000..2970c6bb6
--- /dev/null
+++ b/kernel/drivers/gpu/ipu-v3/ipu-di.c
@@ -0,0 +1,756 @@
+/*
+ * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
+ * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+
+#include <video/imx-ipu-v3.h>
+#include "ipu-prv.h"
+
+struct ipu_di {
+ void __iomem *base;
+ int id;
+ u32 module;
+ struct clk *clk_di; /* display input clock */
+ struct clk *clk_ipu; /* IPU bus clock */
+ struct clk *clk_di_pixel; /* resulting pixel clock */
+ bool inuse;
+ struct ipu_soc *ipu;
+};
+
+static DEFINE_MUTEX(di_mutex);
+
+struct di_sync_config {
+ int run_count;
+ int run_src;
+ int offset_count;
+ int offset_src;
+ int repeat_count;
+ int cnt_clr_src;
+ int cnt_polarity_gen_en;
+ int cnt_polarity_clr_src;
+ int cnt_polarity_trigger_src;
+ int cnt_up;
+ int cnt_down;
+};
+
+enum di_pins {
+ DI_PIN11 = 0,
+ DI_PIN12 = 1,
+ DI_PIN13 = 2,
+ DI_PIN14 = 3,
+ DI_PIN15 = 4,
+ DI_PIN16 = 5,
+ DI_PIN17 = 6,
+ DI_PIN_CS = 7,
+
+ DI_PIN_SER_CLK = 0,
+ DI_PIN_SER_RS = 1,
+};
+
+enum di_sync_wave {
+ DI_SYNC_NONE = 0,
+ DI_SYNC_CLK = 1,
+ DI_SYNC_INT_HSYNC = 2,
+ DI_SYNC_HSYNC = 3,
+ DI_SYNC_VSYNC = 4,
+ DI_SYNC_DE = 6,
+};
+
+#define SYNC_WAVE 0
+
+#define DI_GENERAL 0x0000
+#define DI_BS_CLKGEN0 0x0004
+#define DI_BS_CLKGEN1 0x0008
+#define DI_SW_GEN0(gen) (0x000c + 4 * ((gen) - 1))
+#define DI_SW_GEN1(gen) (0x0030 + 4 * ((gen) - 1))
+#define DI_STP_REP(gen) (0x0148 + 4 * (((gen) - 1)/2))
+#define DI_SYNC_AS_GEN 0x0054
+#define DI_DW_GEN(gen) (0x0058 + 4 * (gen))
+#define DI_DW_SET(gen, set) (0x0088 + 4 * ((gen) + 0xc * (set)))
+#define DI_SER_CONF 0x015c
+#define DI_SSC 0x0160
+#define DI_POL 0x0164
+#define DI_AW0 0x0168
+#define DI_AW1 0x016c
+#define DI_SCR_CONF 0x0170
+#define DI_STAT 0x0174
+
+#define DI_SW_GEN0_RUN_COUNT(x) ((x) << 19)
+#define DI_SW_GEN0_RUN_SRC(x) ((x) << 16)
+#define DI_SW_GEN0_OFFSET_COUNT(x) ((x) << 3)
+#define DI_SW_GEN0_OFFSET_SRC(x) ((x) << 0)
+
+#define DI_SW_GEN1_CNT_POL_GEN_EN(x) ((x) << 29)
+#define DI_SW_GEN1_CNT_CLR_SRC(x) ((x) << 25)
+#define DI_SW_GEN1_CNT_POL_TRIGGER_SRC(x) ((x) << 12)
+#define DI_SW_GEN1_CNT_POL_CLR_SRC(x) ((x) << 9)
+#define DI_SW_GEN1_CNT_DOWN(x) ((x) << 16)
+#define DI_SW_GEN1_CNT_UP(x) (x)
+#define DI_SW_GEN1_AUTO_RELOAD (0x10000000)
+
+#define DI_DW_GEN_ACCESS_SIZE_OFFSET 24
+#define DI_DW_GEN_COMPONENT_SIZE_OFFSET 16
+
+#define DI_GEN_POLARITY_1 (1 << 0)
+#define DI_GEN_POLARITY_2 (1 << 1)
+#define DI_GEN_POLARITY_3 (1 << 2)
+#define DI_GEN_POLARITY_4 (1 << 3)
+#define DI_GEN_POLARITY_5 (1 << 4)
+#define DI_GEN_POLARITY_6 (1 << 5)
+#define DI_GEN_POLARITY_7 (1 << 6)
+#define DI_GEN_POLARITY_8 (1 << 7)
+#define DI_GEN_POLARITY_DISP_CLK (1 << 17)
+#define DI_GEN_DI_CLK_EXT (1 << 20)
+#define DI_GEN_DI_VSYNC_EXT (1 << 21)
+
+#define DI_POL_DRDY_DATA_POLARITY (1 << 7)
+#define DI_POL_DRDY_POLARITY_15 (1 << 4)
+
+#define DI_VSYNC_SEL_OFFSET 13
+
+static inline u32 ipu_di_read(struct ipu_di *di, unsigned offset)
+{
+ return readl(di->base + offset);
+}
+
+static inline void ipu_di_write(struct ipu_di *di, u32 value, unsigned offset)
+{
+ writel(value, di->base + offset);
+}
+
+static void ipu_di_data_wave_config(struct ipu_di *di,
+ int wave_gen,
+ int access_size, int component_size)
+{
+ u32 reg;
+ reg = (access_size << DI_DW_GEN_ACCESS_SIZE_OFFSET) |
+ (component_size << DI_DW_GEN_COMPONENT_SIZE_OFFSET);
+ ipu_di_write(di, reg, DI_DW_GEN(wave_gen));
+}
+
+static void ipu_di_data_pin_config(struct ipu_di *di, int wave_gen, int di_pin,
+ int set, int up, int down)
+{
+ u32 reg;
+
+ reg = ipu_di_read(di, DI_DW_GEN(wave_gen));
+ reg &= ~(0x3 << (di_pin * 2));
+ reg |= set << (di_pin * 2);
+ ipu_di_write(di, reg, DI_DW_GEN(wave_gen));
+
+ ipu_di_write(di, (down << 16) | up, DI_DW_SET(wave_gen, set));
+}
+
+static void ipu_di_sync_config(struct ipu_di *di, struct di_sync_config *config,
+ int start, int count)
+{
+ u32 reg;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ struct di_sync_config *c = &config[i];
+ int wave_gen = start + i + 1;
+
+ if ((c->run_count >= 0x1000) || (c->offset_count >= 0x1000) ||
+ (c->repeat_count >= 0x1000) ||
+ (c->cnt_up >= 0x400) ||
+ (c->cnt_down >= 0x400)) {
+ dev_err(di->ipu->dev, "DI%d counters out of range.\n",
+ di->id);
+ return;
+ }
+
+ reg = DI_SW_GEN0_RUN_COUNT(c->run_count) |
+ DI_SW_GEN0_RUN_SRC(c->run_src) |
+ DI_SW_GEN0_OFFSET_COUNT(c->offset_count) |
+ DI_SW_GEN0_OFFSET_SRC(c->offset_src);
+ ipu_di_write(di, reg, DI_SW_GEN0(wave_gen));
+
+ reg = DI_SW_GEN1_CNT_POL_GEN_EN(c->cnt_polarity_gen_en) |
+ DI_SW_GEN1_CNT_CLR_SRC(c->cnt_clr_src) |
+ DI_SW_GEN1_CNT_POL_TRIGGER_SRC(
+ c->cnt_polarity_trigger_src) |
+ DI_SW_GEN1_CNT_POL_CLR_SRC(c->cnt_polarity_clr_src) |
+ DI_SW_GEN1_CNT_DOWN(c->cnt_down) |
+ DI_SW_GEN1_CNT_UP(c->cnt_up);
+
+ /* Enable auto reload */
+ if (c->repeat_count == 0)
+ reg |= DI_SW_GEN1_AUTO_RELOAD;
+
+ ipu_di_write(di, reg, DI_SW_GEN1(wave_gen));
+
+ reg = ipu_di_read(di, DI_STP_REP(wave_gen));
+ reg &= ~(0xffff << (16 * ((wave_gen - 1) & 0x1)));
+ reg |= c->repeat_count << (16 * ((wave_gen - 1) & 0x1));
+ ipu_di_write(di, reg, DI_STP_REP(wave_gen));
+ }
+}
+
+static void ipu_di_sync_config_interlaced(struct ipu_di *di,
+ struct ipu_di_signal_cfg *sig)
+{
+ u32 h_total = sig->mode.hactive + sig->mode.hsync_len +
+ sig->mode.hback_porch + sig->mode.hfront_porch;
+ u32 v_total = sig->mode.vactive + sig->mode.vsync_len +
+ sig->mode.vback_porch + sig->mode.vfront_porch;
+ u32 reg;
+ struct di_sync_config cfg[] = {
+ {
+ .run_count = h_total / 2 - 1,
+ .run_src = DI_SYNC_CLK,
+ }, {
+ .run_count = h_total - 11,
+ .run_src = DI_SYNC_CLK,
+ .cnt_down = 4,
+ }, {
+ .run_count = v_total * 2 - 1,
+ .run_src = DI_SYNC_INT_HSYNC,
+ .offset_count = 1,
+ .offset_src = DI_SYNC_INT_HSYNC,
+ .cnt_down = 4,
+ }, {
+ .run_count = v_total / 2 - 1,
+ .run_src = DI_SYNC_HSYNC,
+ .offset_count = sig->mode.vback_porch,
+ .offset_src = DI_SYNC_HSYNC,
+ .repeat_count = 2,
+ .cnt_clr_src = DI_SYNC_VSYNC,
+ }, {
+ .run_src = DI_SYNC_HSYNC,
+ .repeat_count = sig->mode.vactive / 2,
+ .cnt_clr_src = 4,
+ }, {
+ .run_count = v_total - 1,
+ .run_src = DI_SYNC_HSYNC,
+ }, {
+ .run_count = v_total / 2 - 1,
+ .run_src = DI_SYNC_HSYNC,
+ .offset_count = 9,
+ .offset_src = DI_SYNC_HSYNC,
+ .repeat_count = 2,
+ .cnt_clr_src = DI_SYNC_VSYNC,
+ }, {
+ .run_src = DI_SYNC_CLK,
+ .offset_count = sig->mode.hback_porch,
+ .offset_src = DI_SYNC_CLK,
+ .repeat_count = sig->mode.hactive,
+ .cnt_clr_src = 5,
+ }, {
+ .run_count = v_total - 1,
+ .run_src = DI_SYNC_INT_HSYNC,
+ .offset_count = v_total / 2,
+ .offset_src = DI_SYNC_INT_HSYNC,
+ .cnt_clr_src = DI_SYNC_HSYNC,
+ .cnt_down = 4,
+ }
+ };
+
+ ipu_di_sync_config(di, cfg, 0, ARRAY_SIZE(cfg));
+
+ /* set gentime select and tag sel */
+ reg = ipu_di_read(di, DI_SW_GEN1(9));
+ reg &= 0x1FFFFFFF;
+ reg |= (3 - 1) << 29 | 0x00008000;
+ ipu_di_write(di, reg, DI_SW_GEN1(9));
+
+ ipu_di_write(di, v_total / 2 - 1, DI_SCR_CONF);
+}
+
+static void ipu_di_sync_config_noninterlaced(struct ipu_di *di,
+ struct ipu_di_signal_cfg *sig, int div)
+{
+ u32 h_total = sig->mode.hactive + sig->mode.hsync_len +
+ sig->mode.hback_porch + sig->mode.hfront_porch;
+ u32 v_total = sig->mode.vactive + sig->mode.vsync_len +
+ sig->mode.vback_porch + sig->mode.vfront_porch;
+ struct di_sync_config cfg[] = {
+ {
+ /* 1: INT_HSYNC */
+ .run_count = h_total - 1,
+ .run_src = DI_SYNC_CLK,
+ } , {
+ /* PIN2: HSYNC */
+ .run_count = h_total - 1,
+ .run_src = DI_SYNC_CLK,
+ .offset_count = div * sig->v_to_h_sync,
+ .offset_src = DI_SYNC_CLK,
+ .cnt_polarity_gen_en = 1,
+ .cnt_polarity_trigger_src = DI_SYNC_CLK,
+ .cnt_down = sig->mode.hsync_len * 2,
+ } , {
+ /* PIN3: VSYNC */
+ .run_count = v_total - 1,
+ .run_src = DI_SYNC_INT_HSYNC,
+ .cnt_polarity_gen_en = 1,
+ .cnt_polarity_trigger_src = DI_SYNC_INT_HSYNC,
+ .cnt_down = sig->mode.vsync_len * 2,
+ } , {
+ /* 4: Line Active */
+ .run_src = DI_SYNC_HSYNC,
+ .offset_count = sig->mode.vsync_len +
+ sig->mode.vback_porch,
+ .offset_src = DI_SYNC_HSYNC,
+ .repeat_count = sig->mode.vactive,
+ .cnt_clr_src = DI_SYNC_VSYNC,
+ } , {
+ /* 5: Pixel Active, referenced by DC */
+ .run_src = DI_SYNC_CLK,
+ .offset_count = sig->mode.hsync_len +
+ sig->mode.hback_porch,
+ .offset_src = DI_SYNC_CLK,
+ .repeat_count = sig->mode.hactive,
+ .cnt_clr_src = 5, /* Line Active */
+ } , {
+ /* unused */
+ } , {
+ /* unused */
+ } , {
+ /* unused */
+ } , {
+ /* unused */
+ },
+ };
+ /* can't use #7 and #8 for line active and pixel active counters */
+ struct di_sync_config cfg_vga[] = {
+ {
+ /* 1: INT_HSYNC */
+ .run_count = h_total - 1,
+ .run_src = DI_SYNC_CLK,
+ } , {
+ /* 2: VSYNC */
+ .run_count = v_total - 1,
+ .run_src = DI_SYNC_INT_HSYNC,
+ } , {
+ /* 3: Line Active */
+ .run_src = DI_SYNC_INT_HSYNC,
+ .offset_count = sig->mode.vsync_len +
+ sig->mode.vback_porch,
+ .offset_src = DI_SYNC_INT_HSYNC,
+ .repeat_count = sig->mode.vactive,
+ .cnt_clr_src = 3 /* VSYNC */,
+ } , {
+ /* PIN4: HSYNC for VGA via TVEv2 on TQ MBa53 */
+ .run_count = h_total - 1,
+ .run_src = DI_SYNC_CLK,
+ .offset_count = div * sig->v_to_h_sync + 18, /* magic value from Freescale TVE driver */
+ .offset_src = DI_SYNC_CLK,
+ .cnt_polarity_gen_en = 1,
+ .cnt_polarity_trigger_src = DI_SYNC_CLK,
+ .cnt_down = sig->mode.hsync_len * 2,
+ } , {
+ /* 5: Pixel Active signal to DC */
+ .run_src = DI_SYNC_CLK,
+ .offset_count = sig->mode.hsync_len +
+ sig->mode.hback_porch,
+ .offset_src = DI_SYNC_CLK,
+ .repeat_count = sig->mode.hactive,
+ .cnt_clr_src = 4, /* Line Active */
+ } , {
+ /* PIN6: VSYNC for VGA via TVEv2 on TQ MBa53 */
+ .run_count = v_total - 1,
+ .run_src = DI_SYNC_INT_HSYNC,
+ .offset_count = 1, /* magic value from Freescale TVE driver */
+ .offset_src = DI_SYNC_INT_HSYNC,
+ .cnt_polarity_gen_en = 1,
+ .cnt_polarity_trigger_src = DI_SYNC_INT_HSYNC,
+ .cnt_down = sig->mode.vsync_len * 2,
+ } , {
+ /* PIN4: HSYNC for VGA via TVEv2 on i.MX53-QSB */
+ .run_count = h_total - 1,
+ .run_src = DI_SYNC_CLK,
+ .offset_count = div * sig->v_to_h_sync + 18, /* magic value from Freescale TVE driver */
+ .offset_src = DI_SYNC_CLK,
+ .cnt_polarity_gen_en = 1,
+ .cnt_polarity_trigger_src = DI_SYNC_CLK,
+ .cnt_down = sig->mode.hsync_len * 2,
+ } , {
+ /* PIN6: VSYNC for VGA via TVEv2 on i.MX53-QSB */
+ .run_count = v_total - 1,
+ .run_src = DI_SYNC_INT_HSYNC,
+ .offset_count = 1, /* magic value from Freescale TVE driver */
+ .offset_src = DI_SYNC_INT_HSYNC,
+ .cnt_polarity_gen_en = 1,
+ .cnt_polarity_trigger_src = DI_SYNC_INT_HSYNC,
+ .cnt_down = sig->mode.vsync_len * 2,
+ } , {
+ /* unused */
+ },
+ };
+
+ ipu_di_write(di, v_total - 1, DI_SCR_CONF);
+ if (sig->hsync_pin == 2 && sig->vsync_pin == 3)
+ ipu_di_sync_config(di, cfg, 0, ARRAY_SIZE(cfg));
+ else
+ ipu_di_sync_config(di, cfg_vga, 0, ARRAY_SIZE(cfg_vga));
+}
+
+static void ipu_di_config_clock(struct ipu_di *di,
+ const struct ipu_di_signal_cfg *sig)
+{
+ struct clk *clk;
+ unsigned clkgen0;
+ uint32_t val;
+
+ if (sig->clkflags & IPU_DI_CLKMODE_EXT) {
+ /*
+ * CLKMODE_EXT means we must use the DI clock: this is
+ * needed for things like LVDS which needs to feed the
+ * DI and LDB with the same pixel clock.
+ */
+ clk = di->clk_di;
+
+ if (sig->clkflags & IPU_DI_CLKMODE_SYNC) {
+ /*
+ * CLKMODE_SYNC means that we want the DI to be
+ * clocked at the same rate as the parent clock.
+ * This is needed (eg) for LDB which needs to be
+ * fed with the same pixel clock. We assume that
+ * the LDB clock has already been set correctly.
+ */
+ clkgen0 = 1 << 4;
+ } else {
+ /*
+ * We can use the divider. We should really have
+ * a flag here indicating whether the bridge can
+ * cope with a fractional divider or not. For the
+ * time being, let's go for simplicitly and
+ * reliability.
+ */
+ unsigned long in_rate;
+ unsigned div;
+
+ clk_set_rate(clk, sig->mode.pixelclock);
+
+ in_rate = clk_get_rate(clk);
+ div = DIV_ROUND_CLOSEST(in_rate, sig->mode.pixelclock);
+ div = clamp(div, 1U, 255U);
+
+ clkgen0 = div << 4;
+ }
+ } else {
+ /*
+ * For other interfaces, we can arbitarily select between
+ * the DI specific clock and the internal IPU clock. See
+ * DI_GENERAL bit 20. We select the IPU clock if it can
+ * give us a clock rate within 1% of the requested frequency,
+ * otherwise we use the DI clock.
+ */
+ unsigned long rate, clkrate;
+ unsigned div, error;
+
+ clkrate = clk_get_rate(di->clk_ipu);
+ div = DIV_ROUND_CLOSEST(clkrate, sig->mode.pixelclock);
+ div = clamp(div, 1U, 255U);
+ rate = clkrate / div;
+
+ error = rate / (sig->mode.pixelclock / 1000);
+
+ dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %d.%u%%\n",
+ rate, div, (signed)(error - 1000) / 10, error % 10);
+
+ /* Allow a 1% error */
+ if (error < 1010 && error >= 990) {
+ clk = di->clk_ipu;
+
+ clkgen0 = div << 4;
+ } else {
+ unsigned long in_rate;
+ unsigned div;
+
+ clk = di->clk_di;
+
+ clk_set_rate(clk, sig->mode.pixelclock);
+
+ in_rate = clk_get_rate(clk);
+ div = DIV_ROUND_CLOSEST(in_rate, sig->mode.pixelclock);
+ div = clamp(div, 1U, 255U);
+
+ clkgen0 = div << 4;
+ }
+ }
+
+ di->clk_di_pixel = clk;
+
+ /* Set the divider */
+ ipu_di_write(di, clkgen0, DI_BS_CLKGEN0);
+
+ /*
+ * Set the high/low periods. Bits 24:16 give us the falling edge,
+ * and bits 8:0 give the rising edge. LSB is fraction, and is
+ * based on the divider above. We want a 50% duty cycle, so set
+ * the falling edge to be half the divider.
+ */
+ ipu_di_write(di, (clkgen0 >> 4) << 16, DI_BS_CLKGEN1);
+
+ /* Finally select the input clock */
+ val = ipu_di_read(di, DI_GENERAL) & ~DI_GEN_DI_CLK_EXT;
+ if (clk == di->clk_di)
+ val |= DI_GEN_DI_CLK_EXT;
+ ipu_di_write(di, val, DI_GENERAL);
+
+ dev_dbg(di->ipu->dev, "Want %luHz IPU %luHz DI %luHz using %s, %luHz\n",
+ sig->mode.pixelclock,
+ clk_get_rate(di->clk_ipu),
+ clk_get_rate(di->clk_di),
+ clk == di->clk_di ? "DI" : "IPU",
+ clk_get_rate(di->clk_di_pixel) / (clkgen0 >> 4));
+}
+
+/*
+ * This function is called to adjust a video mode to IPU restrictions.
+ * It is meant to be called from drm crtc mode_fixup() methods.
+ */
+int ipu_di_adjust_videomode(struct ipu_di *di, struct videomode *mode)
+{
+ u32 diff;
+
+ if (mode->vfront_porch >= 2)
+ return 0;
+
+ diff = 2 - mode->vfront_porch;
+
+ if (mode->vback_porch >= diff) {
+ mode->vfront_porch = 2;
+ mode->vback_porch -= diff;
+ } else if (mode->vsync_len > diff) {
+ mode->vfront_porch = 2;
+ mode->vsync_len = mode->vsync_len - diff;
+ } else {
+ dev_warn(di->ipu->dev, "failed to adjust videomode\n");
+ return -EINVAL;
+ }
+
+ dev_warn(di->ipu->dev, "videomode adapted for IPU restrictions\n");
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_di_adjust_videomode);
+
+int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig)
+{
+ u32 reg;
+ u32 di_gen, vsync_cnt;
+ u32 div;
+
+ dev_dbg(di->ipu->dev, "disp %d: panel size = %d x %d\n",
+ di->id, sig->mode.hactive, sig->mode.vactive);
+
+ if ((sig->mode.vsync_len == 0) || (sig->mode.hsync_len == 0))
+ return -EINVAL;
+
+ dev_dbg(di->ipu->dev, "Clocks: IPU %luHz DI %luHz Needed %luHz\n",
+ clk_get_rate(di->clk_ipu),
+ clk_get_rate(di->clk_di),
+ sig->mode.pixelclock);
+
+ mutex_lock(&di_mutex);
+
+ ipu_di_config_clock(di, sig);
+
+ div = ipu_di_read(di, DI_BS_CLKGEN0) & 0xfff;
+ div = div / 16; /* Now divider is integer portion */
+
+ /* Setup pixel clock timing */
+ /* Down time is half of period */
+ ipu_di_write(di, (div << 16), DI_BS_CLKGEN1);
+
+ ipu_di_data_wave_config(di, SYNC_WAVE, div - 1, div - 1);
+ ipu_di_data_pin_config(di, SYNC_WAVE, DI_PIN15, 3, 0, div * 2);
+
+ di_gen = ipu_di_read(di, DI_GENERAL) & DI_GEN_DI_CLK_EXT;
+ di_gen |= DI_GEN_DI_VSYNC_EXT;
+
+ if (sig->mode.flags & DISPLAY_FLAGS_INTERLACED) {
+ ipu_di_sync_config_interlaced(di, sig);
+
+ /* set y_sel = 1 */
+ di_gen |= 0x10000000;
+ di_gen |= DI_GEN_POLARITY_5;
+ di_gen |= DI_GEN_POLARITY_8;
+
+ vsync_cnt = 7;
+
+ if (sig->mode.flags & DISPLAY_FLAGS_HSYNC_HIGH)
+ di_gen |= DI_GEN_POLARITY_3;
+ if (sig->mode.flags & DISPLAY_FLAGS_VSYNC_HIGH)
+ di_gen |= DI_GEN_POLARITY_2;
+ } else {
+ ipu_di_sync_config_noninterlaced(di, sig, div);
+
+ vsync_cnt = 3;
+ if (di->id == 1)
+ /*
+ * TODO: change only for TVEv2, parallel display
+ * uses pin 2 / 3
+ */
+ if (!(sig->hsync_pin == 2 && sig->vsync_pin == 3))
+ vsync_cnt = 6;
+
+ if (sig->mode.flags & DISPLAY_FLAGS_HSYNC_HIGH) {
+ if (sig->hsync_pin == 2)
+ di_gen |= DI_GEN_POLARITY_2;
+ else if (sig->hsync_pin == 4)
+ di_gen |= DI_GEN_POLARITY_4;
+ else if (sig->hsync_pin == 7)
+ di_gen |= DI_GEN_POLARITY_7;
+ }
+ if (sig->mode.flags & DISPLAY_FLAGS_VSYNC_HIGH) {
+ if (sig->vsync_pin == 3)
+ di_gen |= DI_GEN_POLARITY_3;
+ else if (sig->vsync_pin == 6)
+ di_gen |= DI_GEN_POLARITY_6;
+ else if (sig->vsync_pin == 8)
+ di_gen |= DI_GEN_POLARITY_8;
+ }
+ }
+
+ if (sig->clk_pol)
+ di_gen |= DI_GEN_POLARITY_DISP_CLK;
+
+ ipu_di_write(di, di_gen, DI_GENERAL);
+
+ ipu_di_write(di, (--vsync_cnt << DI_VSYNC_SEL_OFFSET) | 0x00000002,
+ DI_SYNC_AS_GEN);
+
+ reg = ipu_di_read(di, DI_POL);
+ reg &= ~(DI_POL_DRDY_DATA_POLARITY | DI_POL_DRDY_POLARITY_15);
+
+ if (sig->enable_pol)
+ reg |= DI_POL_DRDY_POLARITY_15;
+ if (sig->data_pol)
+ reg |= DI_POL_DRDY_DATA_POLARITY;
+
+ ipu_di_write(di, reg, DI_POL);
+
+ mutex_unlock(&di_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_di_init_sync_panel);
+
+int ipu_di_enable(struct ipu_di *di)
+{
+ int ret;
+
+ WARN_ON(IS_ERR(di->clk_di_pixel));
+
+ ret = clk_prepare_enable(di->clk_di_pixel);
+ if (ret)
+ return ret;
+
+ ipu_module_enable(di->ipu, di->module);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_di_enable);
+
+int ipu_di_disable(struct ipu_di *di)
+{
+ WARN_ON(IS_ERR(di->clk_di_pixel));
+
+ ipu_module_disable(di->ipu, di->module);
+
+ clk_disable_unprepare(di->clk_di_pixel);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_di_disable);
+
+int ipu_di_get_num(struct ipu_di *di)
+{
+ return di->id;
+}
+EXPORT_SYMBOL_GPL(ipu_di_get_num);
+
+static DEFINE_MUTEX(ipu_di_lock);
+
+struct ipu_di *ipu_di_get(struct ipu_soc *ipu, int disp)
+{
+ struct ipu_di *di;
+
+ if (disp > 1)
+ return ERR_PTR(-EINVAL);
+
+ di = ipu->di_priv[disp];
+
+ mutex_lock(&ipu_di_lock);
+
+ if (di->inuse) {
+ di = ERR_PTR(-EBUSY);
+ goto out;
+ }
+
+ di->inuse = true;
+out:
+ mutex_unlock(&ipu_di_lock);
+
+ return di;
+}
+EXPORT_SYMBOL_GPL(ipu_di_get);
+
+void ipu_di_put(struct ipu_di *di)
+{
+ mutex_lock(&ipu_di_lock);
+
+ di->inuse = false;
+
+ mutex_unlock(&ipu_di_lock);
+}
+EXPORT_SYMBOL_GPL(ipu_di_put);
+
+int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id,
+ unsigned long base,
+ u32 module, struct clk *clk_ipu)
+{
+ struct ipu_di *di;
+
+ if (id > 1)
+ return -ENODEV;
+
+ di = devm_kzalloc(dev, sizeof(*di), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ ipu->di_priv[id] = di;
+
+ di->clk_di = devm_clk_get(dev, id ? "di1" : "di0");
+ if (IS_ERR(di->clk_di))
+ return PTR_ERR(di->clk_di);
+
+ di->module = module;
+ di->id = id;
+ di->clk_ipu = clk_ipu;
+ di->base = devm_ioremap(dev, base, PAGE_SIZE);
+ if (!di->base)
+ return -ENOMEM;
+
+ ipu_di_write(di, 0x10, DI_BS_CLKGEN0);
+
+ dev_dbg(dev, "DI%d base: 0x%08lx remapped to %p\n",
+ id, base, di->base);
+ di->inuse = false;
+ di->ipu = ipu;
+
+ return 0;
+}
+
+void ipu_di_exit(struct ipu_soc *ipu, int id)
+{
+}
diff --git a/kernel/drivers/gpu/ipu-v3/ipu-dmfc.c b/kernel/drivers/gpu/ipu-v3/ipu-dmfc.c
new file mode 100644
index 000000000..042c3958e
--- /dev/null
+++ b/kernel/drivers/gpu/ipu-v3/ipu-dmfc.c
@@ -0,0 +1,436 @@
+/*
+ * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
+ * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+
+#include <video/imx-ipu-v3.h>
+#include "ipu-prv.h"
+
+#define DMFC_RD_CHAN 0x0000
+#define DMFC_WR_CHAN 0x0004
+#define DMFC_WR_CHAN_DEF 0x0008
+#define DMFC_DP_CHAN 0x000c
+#define DMFC_DP_CHAN_DEF 0x0010
+#define DMFC_GENERAL1 0x0014
+#define DMFC_GENERAL2 0x0018
+#define DMFC_IC_CTRL 0x001c
+#define DMFC_WR_CHAN_ALT 0x0020
+#define DMFC_WR_CHAN_DEF_ALT 0x0024
+#define DMFC_DP_CHAN_ALT 0x0028
+#define DMFC_DP_CHAN_DEF_ALT 0x002c
+#define DMFC_GENERAL1_ALT 0x0030
+#define DMFC_STAT 0x0034
+
+#define DMFC_WR_CHAN_1_28 0
+#define DMFC_WR_CHAN_2_41 8
+#define DMFC_WR_CHAN_1C_42 16
+#define DMFC_WR_CHAN_2C_43 24
+
+#define DMFC_DP_CHAN_5B_23 0
+#define DMFC_DP_CHAN_5F_27 8
+#define DMFC_DP_CHAN_6B_24 16
+#define DMFC_DP_CHAN_6F_29 24
+
+#define DMFC_FIFO_SIZE_64 (3 << 3)
+#define DMFC_FIFO_SIZE_128 (2 << 3)
+#define DMFC_FIFO_SIZE_256 (1 << 3)
+#define DMFC_FIFO_SIZE_512 (0 << 3)
+
+#define DMFC_SEGMENT(x) ((x & 0x7) << 0)
+#define DMFC_BURSTSIZE_128 (0 << 6)
+#define DMFC_BURSTSIZE_64 (1 << 6)
+#define DMFC_BURSTSIZE_32 (2 << 6)
+#define DMFC_BURSTSIZE_16 (3 << 6)
+
+struct dmfc_channel_data {
+ int ipu_channel;
+ unsigned long channel_reg;
+ unsigned long shift;
+ unsigned eot_shift;
+ unsigned max_fifo_lines;
+};
+
+static const struct dmfc_channel_data dmfcdata[] = {
+ {
+ .ipu_channel = IPUV3_CHANNEL_MEM_BG_SYNC,
+ .channel_reg = DMFC_DP_CHAN,
+ .shift = DMFC_DP_CHAN_5B_23,
+ .eot_shift = 20,
+ .max_fifo_lines = 3,
+ }, {
+ .ipu_channel = 24,
+ .channel_reg = DMFC_DP_CHAN,
+ .shift = DMFC_DP_CHAN_6B_24,
+ .eot_shift = 22,
+ .max_fifo_lines = 1,
+ }, {
+ .ipu_channel = IPUV3_CHANNEL_MEM_FG_SYNC,
+ .channel_reg = DMFC_DP_CHAN,
+ .shift = DMFC_DP_CHAN_5F_27,
+ .eot_shift = 21,
+ .max_fifo_lines = 2,
+ }, {
+ .ipu_channel = IPUV3_CHANNEL_MEM_DC_SYNC,
+ .channel_reg = DMFC_WR_CHAN,
+ .shift = DMFC_WR_CHAN_1_28,
+ .eot_shift = 16,
+ .max_fifo_lines = 2,
+ }, {
+ .ipu_channel = 29,
+ .channel_reg = DMFC_DP_CHAN,
+ .shift = DMFC_DP_CHAN_6F_29,
+ .eot_shift = 23,
+ .max_fifo_lines = 1,
+ },
+};
+
+#define DMFC_NUM_CHANNELS ARRAY_SIZE(dmfcdata)
+
+struct ipu_dmfc_priv;
+
+struct dmfc_channel {
+ unsigned slots;
+ unsigned slotmask;
+ unsigned segment;
+ int burstsize;
+ struct ipu_soc *ipu;
+ struct ipu_dmfc_priv *priv;
+ const struct dmfc_channel_data *data;
+};
+
+struct ipu_dmfc_priv {
+ struct ipu_soc *ipu;
+ struct device *dev;
+ struct dmfc_channel channels[DMFC_NUM_CHANNELS];
+ struct mutex mutex;
+ unsigned long bandwidth_per_slot;
+ void __iomem *base;
+ int use_count;
+};
+
+int ipu_dmfc_enable_channel(struct dmfc_channel *dmfc)
+{
+ struct ipu_dmfc_priv *priv = dmfc->priv;
+ mutex_lock(&priv->mutex);
+
+ if (!priv->use_count)
+ ipu_module_enable(priv->ipu, IPU_CONF_DMFC_EN);
+
+ priv->use_count++;
+
+ mutex_unlock(&priv->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_dmfc_enable_channel);
+
+static void ipu_dmfc_wait_fifos(struct ipu_dmfc_priv *priv)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+ while ((readl(priv->base + DMFC_STAT) & 0x02fff000) != 0x02fff000) {
+ if (time_after(jiffies, timeout)) {
+ dev_warn(priv->dev,
+ "Timeout waiting for DMFC FIFOs to clear\n");
+ break;
+ }
+ cpu_relax();
+ }
+}
+
+void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc)
+{
+ struct ipu_dmfc_priv *priv = dmfc->priv;
+
+ mutex_lock(&priv->mutex);
+
+ priv->use_count--;
+
+ if (!priv->use_count) {
+ ipu_dmfc_wait_fifos(priv);
+ ipu_module_disable(priv->ipu, IPU_CONF_DMFC_EN);
+ }
+
+ if (priv->use_count < 0)
+ priv->use_count = 0;
+
+ mutex_unlock(&priv->mutex);
+}
+EXPORT_SYMBOL_GPL(ipu_dmfc_disable_channel);
+
+static int ipu_dmfc_setup_channel(struct dmfc_channel *dmfc, int slots,
+ int segment, int burstsize)
+{
+ struct ipu_dmfc_priv *priv = dmfc->priv;
+ u32 val, field;
+
+ dev_dbg(priv->dev,
+ "dmfc: using %d slots starting from segment %d for IPU channel %d\n",
+ slots, segment, dmfc->data->ipu_channel);
+
+ switch (slots) {
+ case 1:
+ field = DMFC_FIFO_SIZE_64;
+ break;
+ case 2:
+ field = DMFC_FIFO_SIZE_128;
+ break;
+ case 4:
+ field = DMFC_FIFO_SIZE_256;
+ break;
+ case 8:
+ field = DMFC_FIFO_SIZE_512;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (burstsize) {
+ case 16:
+ field |= DMFC_BURSTSIZE_16;
+ break;
+ case 32:
+ field |= DMFC_BURSTSIZE_32;
+ break;
+ case 64:
+ field |= DMFC_BURSTSIZE_64;
+ break;
+ case 128:
+ field |= DMFC_BURSTSIZE_128;
+ break;
+ }
+
+ field |= DMFC_SEGMENT(segment);
+
+ val = readl(priv->base + dmfc->data->channel_reg);
+
+ val &= ~(0xff << dmfc->data->shift);
+ val |= field << dmfc->data->shift;
+
+ writel(val, priv->base + dmfc->data->channel_reg);
+
+ dmfc->slots = slots;
+ dmfc->segment = segment;
+ dmfc->burstsize = burstsize;
+ dmfc->slotmask = ((1 << slots) - 1) << segment;
+
+ return 0;
+}
+
+static int dmfc_bandwidth_to_slots(struct ipu_dmfc_priv *priv,
+ unsigned long bandwidth)
+{
+ int slots = 1;
+
+ while (slots * priv->bandwidth_per_slot < bandwidth)
+ slots *= 2;
+
+ return slots;
+}
+
+static int dmfc_find_slots(struct ipu_dmfc_priv *priv, int slots)
+{
+ unsigned slotmask_need, slotmask_used = 0;
+ int i, segment = 0;
+
+ slotmask_need = (1 << slots) - 1;
+
+ for (i = 0; i < DMFC_NUM_CHANNELS; i++)
+ slotmask_used |= priv->channels[i].slotmask;
+
+ while (slotmask_need <= 0xff) {
+ if (!(slotmask_used & slotmask_need))
+ return segment;
+
+ slotmask_need <<= 1;
+ segment++;
+ }
+
+ return -EBUSY;
+}
+
+void ipu_dmfc_free_bandwidth(struct dmfc_channel *dmfc)
+{
+ struct ipu_dmfc_priv *priv = dmfc->priv;
+ int i;
+
+ dev_dbg(priv->dev, "dmfc: freeing %d slots starting from segment %d\n",
+ dmfc->slots, dmfc->segment);
+
+ mutex_lock(&priv->mutex);
+
+ if (!dmfc->slots)
+ goto out;
+
+ dmfc->slotmask = 0;
+ dmfc->slots = 0;
+ dmfc->segment = 0;
+
+ for (i = 0; i < DMFC_NUM_CHANNELS; i++)
+ priv->channels[i].slotmask = 0;
+
+ for (i = 0; i < DMFC_NUM_CHANNELS; i++) {
+ if (priv->channels[i].slots > 0) {
+ priv->channels[i].segment =
+ dmfc_find_slots(priv, priv->channels[i].slots);
+ priv->channels[i].slotmask =
+ ((1 << priv->channels[i].slots) - 1) <<
+ priv->channels[i].segment;
+ }
+ }
+
+ for (i = 0; i < DMFC_NUM_CHANNELS; i++) {
+ if (priv->channels[i].slots > 0)
+ ipu_dmfc_setup_channel(&priv->channels[i],
+ priv->channels[i].slots,
+ priv->channels[i].segment,
+ priv->channels[i].burstsize);
+ }
+out:
+ mutex_unlock(&priv->mutex);
+}
+EXPORT_SYMBOL_GPL(ipu_dmfc_free_bandwidth);
+
+int ipu_dmfc_alloc_bandwidth(struct dmfc_channel *dmfc,
+ unsigned long bandwidth_pixel_per_second, int burstsize)
+{
+ struct ipu_dmfc_priv *priv = dmfc->priv;
+ int slots = dmfc_bandwidth_to_slots(priv, bandwidth_pixel_per_second);
+ int segment = -1, ret = 0;
+
+ dev_dbg(priv->dev, "dmfc: trying to allocate %ldMpixel/s for IPU channel %d\n",
+ bandwidth_pixel_per_second / 1000000,
+ dmfc->data->ipu_channel);
+
+ ipu_dmfc_free_bandwidth(dmfc);
+
+ mutex_lock(&priv->mutex);
+
+ if (slots > 8) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* For the MEM_BG channel, first try to allocate twice the slots */
+ if (dmfc->data->ipu_channel == IPUV3_CHANNEL_MEM_BG_SYNC)
+ segment = dmfc_find_slots(priv, slots * 2);
+ else if (slots < 2)
+ /* Always allocate at least 128*4 bytes (2 slots) */
+ slots = 2;
+
+ if (segment >= 0)
+ slots *= 2;
+ else
+ segment = dmfc_find_slots(priv, slots);
+ if (segment < 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ipu_dmfc_setup_channel(dmfc, slots, segment, burstsize);
+
+out:
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ipu_dmfc_alloc_bandwidth);
+
+int ipu_dmfc_init_channel(struct dmfc_channel *dmfc, int width)
+{
+ struct ipu_dmfc_priv *priv = dmfc->priv;
+ u32 dmfc_gen1;
+
+ dmfc_gen1 = readl(priv->base + DMFC_GENERAL1);
+
+ if ((dmfc->slots * 64 * 4) / width > dmfc->data->max_fifo_lines)
+ dmfc_gen1 |= 1 << dmfc->data->eot_shift;
+ else
+ dmfc_gen1 &= ~(1 << dmfc->data->eot_shift);
+
+ writel(dmfc_gen1, priv->base + DMFC_GENERAL1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_dmfc_init_channel);
+
+struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipu_channel)
+{
+ struct ipu_dmfc_priv *priv = ipu->dmfc_priv;
+ int i;
+
+ for (i = 0; i < DMFC_NUM_CHANNELS; i++)
+ if (dmfcdata[i].ipu_channel == ipu_channel)
+ return &priv->channels[i];
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(ipu_dmfc_get);
+
+void ipu_dmfc_put(struct dmfc_channel *dmfc)
+{
+ ipu_dmfc_free_bandwidth(dmfc);
+}
+EXPORT_SYMBOL_GPL(ipu_dmfc_put);
+
+int ipu_dmfc_init(struct ipu_soc *ipu, struct device *dev, unsigned long base,
+ struct clk *ipu_clk)
+{
+ struct ipu_dmfc_priv *priv;
+ int i;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_ioremap(dev, base, PAGE_SIZE);
+ if (!priv->base)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ priv->ipu = ipu;
+ mutex_init(&priv->mutex);
+
+ ipu->dmfc_priv = priv;
+
+ for (i = 0; i < DMFC_NUM_CHANNELS; i++) {
+ priv->channels[i].priv = priv;
+ priv->channels[i].ipu = ipu;
+ priv->channels[i].data = &dmfcdata[i];
+ }
+
+ writel(0x0, priv->base + DMFC_WR_CHAN);
+ writel(0x0, priv->base + DMFC_DP_CHAN);
+
+ /*
+ * We have a total bandwidth of clkrate * 4pixel divided
+ * into 8 slots.
+ */
+ priv->bandwidth_per_slot = clk_get_rate(ipu_clk) * 4 / 8;
+
+ dev_dbg(dev, "dmfc: 8 slots with %ldMpixel/s bandwidth each\n",
+ priv->bandwidth_per_slot / 1000000);
+
+ writel(0x202020f6, priv->base + DMFC_WR_CHAN_DEF);
+ writel(0x2020f6f6, priv->base + DMFC_DP_CHAN_DEF);
+ writel(0x00000003, priv->base + DMFC_GENERAL1);
+
+ return 0;
+}
+
+void ipu_dmfc_exit(struct ipu_soc *ipu)
+{
+}
diff --git a/kernel/drivers/gpu/ipu-v3/ipu-dp.c b/kernel/drivers/gpu/ipu-v3/ipu-dp.c
new file mode 100644
index 000000000..98686edbc
--- /dev/null
+++ b/kernel/drivers/gpu/ipu-v3/ipu-dp.c
@@ -0,0 +1,363 @@
+/*
+ * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
+ * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/err.h>
+
+#include <video/imx-ipu-v3.h>
+#include "ipu-prv.h"
+
+#define DP_SYNC 0
+#define DP_ASYNC0 0x60
+#define DP_ASYNC1 0xBC
+
+#define DP_COM_CONF 0x0
+#define DP_GRAPH_WIND_CTRL 0x0004
+#define DP_FG_POS 0x0008
+#define DP_CSC_A_0 0x0044
+#define DP_CSC_A_1 0x0048
+#define DP_CSC_A_2 0x004C
+#define DP_CSC_A_3 0x0050
+#define DP_CSC_0 0x0054
+#define DP_CSC_1 0x0058
+
+#define DP_COM_CONF_FG_EN (1 << 0)
+#define DP_COM_CONF_GWSEL (1 << 1)
+#define DP_COM_CONF_GWAM (1 << 2)
+#define DP_COM_CONF_GWCKE (1 << 3)
+#define DP_COM_CONF_CSC_DEF_MASK (3 << 8)
+#define DP_COM_CONF_CSC_DEF_OFFSET 8
+#define DP_COM_CONF_CSC_DEF_FG (3 << 8)
+#define DP_COM_CONF_CSC_DEF_BG (2 << 8)
+#define DP_COM_CONF_CSC_DEF_BOTH (1 << 8)
+
+#define IPUV3_NUM_FLOWS 3
+
+struct ipu_dp_priv;
+
+struct ipu_dp {
+ u32 flow;
+ bool in_use;
+ bool foreground;
+ enum ipu_color_space in_cs;
+};
+
+struct ipu_flow {
+ struct ipu_dp foreground;
+ struct ipu_dp background;
+ enum ipu_color_space out_cs;
+ void __iomem *base;
+ struct ipu_dp_priv *priv;
+};
+
+struct ipu_dp_priv {
+ struct ipu_soc *ipu;
+ struct device *dev;
+ void __iomem *base;
+ struct ipu_flow flow[IPUV3_NUM_FLOWS];
+ struct mutex mutex;
+ int use_count;
+};
+
+static u32 ipu_dp_flow_base[] = {DP_SYNC, DP_ASYNC0, DP_ASYNC1};
+
+static inline struct ipu_flow *to_flow(struct ipu_dp *dp)
+{
+ if (dp->foreground)
+ return container_of(dp, struct ipu_flow, foreground);
+ else
+ return container_of(dp, struct ipu_flow, background);
+}
+
+int ipu_dp_set_global_alpha(struct ipu_dp *dp, bool enable,
+ u8 alpha, bool bg_chan)
+{
+ struct ipu_flow *flow = to_flow(dp);
+ struct ipu_dp_priv *priv = flow->priv;
+ u32 reg;
+
+ mutex_lock(&priv->mutex);
+
+ reg = readl(flow->base + DP_COM_CONF);
+ if (bg_chan)
+ reg &= ~DP_COM_CONF_GWSEL;
+ else
+ reg |= DP_COM_CONF_GWSEL;
+ writel(reg, flow->base + DP_COM_CONF);
+
+ if (enable) {
+ reg = readl(flow->base + DP_GRAPH_WIND_CTRL) & 0x00FFFFFFL;
+ writel(reg | ((u32) alpha << 24),
+ flow->base + DP_GRAPH_WIND_CTRL);
+
+ reg = readl(flow->base + DP_COM_CONF);
+ writel(reg | DP_COM_CONF_GWAM, flow->base + DP_COM_CONF);
+ } else {
+ reg = readl(flow->base + DP_COM_CONF);
+ writel(reg & ~DP_COM_CONF_GWAM, flow->base + DP_COM_CONF);
+ }
+
+ ipu_srm_dp_sync_update(priv->ipu);
+
+ mutex_unlock(&priv->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_dp_set_global_alpha);
+
+int ipu_dp_set_window_pos(struct ipu_dp *dp, u16 x_pos, u16 y_pos)
+{
+ struct ipu_flow *flow = to_flow(dp);
+ struct ipu_dp_priv *priv = flow->priv;
+
+ writel((x_pos << 16) | y_pos, flow->base + DP_FG_POS);
+
+ ipu_srm_dp_sync_update(priv->ipu);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_dp_set_window_pos);
+
+static void ipu_dp_csc_init(struct ipu_flow *flow,
+ enum ipu_color_space in,
+ enum ipu_color_space out,
+ u32 place)
+{
+ u32 reg;
+
+ reg = readl(flow->base + DP_COM_CONF);
+ reg &= ~DP_COM_CONF_CSC_DEF_MASK;
+
+ if (in == out) {
+ writel(reg, flow->base + DP_COM_CONF);
+ return;
+ }
+
+ if (in == IPUV3_COLORSPACE_RGB && out == IPUV3_COLORSPACE_YUV) {
+ writel(0x099 | (0x12d << 16), flow->base + DP_CSC_A_0);
+ writel(0x03a | (0x3a9 << 16), flow->base + DP_CSC_A_1);
+ writel(0x356 | (0x100 << 16), flow->base + DP_CSC_A_2);
+ writel(0x100 | (0x329 << 16), flow->base + DP_CSC_A_3);
+ writel(0x3d6 | (0x0000 << 16) | (2 << 30),
+ flow->base + DP_CSC_0);
+ writel(0x200 | (2 << 14) | (0x200 << 16) | (2 << 30),
+ flow->base + DP_CSC_1);
+ } else {
+ writel(0x095 | (0x000 << 16), flow->base + DP_CSC_A_0);
+ writel(0x0cc | (0x095 << 16), flow->base + DP_CSC_A_1);
+ writel(0x3ce | (0x398 << 16), flow->base + DP_CSC_A_2);
+ writel(0x095 | (0x0ff << 16), flow->base + DP_CSC_A_3);
+ writel(0x000 | (0x3e42 << 16) | (1 << 30),
+ flow->base + DP_CSC_0);
+ writel(0x10a | (1 << 14) | (0x3dd6 << 16) | (1 << 30),
+ flow->base + DP_CSC_1);
+ }
+
+ reg |= place;
+
+ writel(reg, flow->base + DP_COM_CONF);
+}
+
+int ipu_dp_setup_channel(struct ipu_dp *dp,
+ enum ipu_color_space in,
+ enum ipu_color_space out)
+{
+ struct ipu_flow *flow = to_flow(dp);
+ struct ipu_dp_priv *priv = flow->priv;
+
+ mutex_lock(&priv->mutex);
+
+ dp->in_cs = in;
+
+ if (!dp->foreground)
+ flow->out_cs = out;
+
+ if (flow->foreground.in_cs == flow->background.in_cs) {
+ /*
+ * foreground and background are of same colorspace, put
+ * colorspace converter after combining unit.
+ */
+ ipu_dp_csc_init(flow, flow->foreground.in_cs, flow->out_cs,
+ DP_COM_CONF_CSC_DEF_BOTH);
+ } else {
+ if (flow->foreground.in_cs == flow->out_cs)
+ /*
+ * foreground identical to output, apply color
+ * conversion on background
+ */
+ ipu_dp_csc_init(flow, flow->background.in_cs,
+ flow->out_cs, DP_COM_CONF_CSC_DEF_BG);
+ else
+ ipu_dp_csc_init(flow, flow->foreground.in_cs,
+ flow->out_cs, DP_COM_CONF_CSC_DEF_FG);
+ }
+
+ ipu_srm_dp_sync_update(priv->ipu);
+
+ mutex_unlock(&priv->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_dp_setup_channel);
+
+int ipu_dp_enable(struct ipu_soc *ipu)
+{
+ struct ipu_dp_priv *priv = ipu->dp_priv;
+
+ mutex_lock(&priv->mutex);
+
+ if (!priv->use_count)
+ ipu_module_enable(priv->ipu, IPU_CONF_DP_EN);
+
+ priv->use_count++;
+
+ mutex_unlock(&priv->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_dp_enable);
+
+int ipu_dp_enable_channel(struct ipu_dp *dp)
+{
+ struct ipu_flow *flow = to_flow(dp);
+ struct ipu_dp_priv *priv = flow->priv;
+ u32 reg;
+
+ if (!dp->foreground)
+ return 0;
+
+ mutex_lock(&priv->mutex);
+
+ reg = readl(flow->base + DP_COM_CONF);
+ reg |= DP_COM_CONF_FG_EN;
+ writel(reg, flow->base + DP_COM_CONF);
+
+ ipu_srm_dp_sync_update(priv->ipu);
+
+ mutex_unlock(&priv->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_dp_enable_channel);
+
+void ipu_dp_disable_channel(struct ipu_dp *dp)
+{
+ struct ipu_flow *flow = to_flow(dp);
+ struct ipu_dp_priv *priv = flow->priv;
+ u32 reg, csc;
+
+ if (!dp->foreground)
+ return;
+
+ mutex_lock(&priv->mutex);
+
+ reg = readl(flow->base + DP_COM_CONF);
+ csc = reg & DP_COM_CONF_CSC_DEF_MASK;
+ if (csc == DP_COM_CONF_CSC_DEF_FG)
+ reg &= ~DP_COM_CONF_CSC_DEF_MASK;
+
+ reg &= ~DP_COM_CONF_FG_EN;
+ writel(reg, flow->base + DP_COM_CONF);
+
+ writel(0, flow->base + DP_FG_POS);
+ ipu_srm_dp_sync_update(priv->ipu);
+
+ if (ipu_idmac_channel_busy(priv->ipu, IPUV3_CHANNEL_MEM_BG_SYNC))
+ ipu_wait_interrupt(priv->ipu, IPU_IRQ_DP_SF_END, 50);
+
+ mutex_unlock(&priv->mutex);
+}
+EXPORT_SYMBOL_GPL(ipu_dp_disable_channel);
+
+void ipu_dp_disable(struct ipu_soc *ipu)
+{
+ struct ipu_dp_priv *priv = ipu->dp_priv;
+
+ mutex_lock(&priv->mutex);
+
+ priv->use_count--;
+
+ if (!priv->use_count)
+ ipu_module_disable(priv->ipu, IPU_CONF_DP_EN);
+
+ if (priv->use_count < 0)
+ priv->use_count = 0;
+
+ mutex_unlock(&priv->mutex);
+}
+EXPORT_SYMBOL_GPL(ipu_dp_disable);
+
+struct ipu_dp *ipu_dp_get(struct ipu_soc *ipu, unsigned int flow)
+{
+ struct ipu_dp_priv *priv = ipu->dp_priv;
+ struct ipu_dp *dp;
+
+ if ((flow >> 1) >= IPUV3_NUM_FLOWS)
+ return ERR_PTR(-EINVAL);
+
+ if (flow & 1)
+ dp = &priv->flow[flow >> 1].foreground;
+ else
+ dp = &priv->flow[flow >> 1].background;
+
+ if (dp->in_use)
+ return ERR_PTR(-EBUSY);
+
+ dp->in_use = true;
+
+ return dp;
+}
+EXPORT_SYMBOL_GPL(ipu_dp_get);
+
+void ipu_dp_put(struct ipu_dp *dp)
+{
+ dp->in_use = false;
+}
+EXPORT_SYMBOL_GPL(ipu_dp_put);
+
+int ipu_dp_init(struct ipu_soc *ipu, struct device *dev, unsigned long base)
+{
+ struct ipu_dp_priv *priv;
+ int i;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->dev = dev;
+ priv->ipu = ipu;
+
+ ipu->dp_priv = priv;
+
+ priv->base = devm_ioremap(dev, base, PAGE_SIZE);
+ if (!priv->base)
+ return -ENOMEM;
+
+ mutex_init(&priv->mutex);
+
+ for (i = 0; i < IPUV3_NUM_FLOWS; i++) {
+ priv->flow[i].foreground.foreground = true;
+ priv->flow[i].base = priv->base + ipu_dp_flow_base[i];
+ priv->flow[i].priv = priv;
+ }
+
+ return 0;
+}
+
+void ipu_dp_exit(struct ipu_soc *ipu)
+{
+}
diff --git a/kernel/drivers/gpu/ipu-v3/ipu-ic.c b/kernel/drivers/gpu/ipu-v3/ipu-ic.c
new file mode 100644
index 000000000..1dcb96ccd
--- /dev/null
+++ b/kernel/drivers/gpu/ipu-v3/ipu-ic.c
@@ -0,0 +1,778 @@
+/*
+ * Copyright (C) 2012-2014 Mentor Graphics Inc.
+ * Copyright 2005-2012 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/bitrev.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include "ipu-prv.h"
+
+/* IC Register Offsets */
+#define IC_CONF 0x0000
+#define IC_PRP_ENC_RSC 0x0004
+#define IC_PRP_VF_RSC 0x0008
+#define IC_PP_RSC 0x000C
+#define IC_CMBP_1 0x0010
+#define IC_CMBP_2 0x0014
+#define IC_IDMAC_1 0x0018
+#define IC_IDMAC_2 0x001C
+#define IC_IDMAC_3 0x0020
+#define IC_IDMAC_4 0x0024
+
+/* IC Register Fields */
+#define IC_CONF_PRPENC_EN (1 << 0)
+#define IC_CONF_PRPENC_CSC1 (1 << 1)
+#define IC_CONF_PRPENC_ROT_EN (1 << 2)
+#define IC_CONF_PRPVF_EN (1 << 8)
+#define IC_CONF_PRPVF_CSC1 (1 << 9)
+#define IC_CONF_PRPVF_CSC2 (1 << 10)
+#define IC_CONF_PRPVF_CMB (1 << 11)
+#define IC_CONF_PRPVF_ROT_EN (1 << 12)
+#define IC_CONF_PP_EN (1 << 16)
+#define IC_CONF_PP_CSC1 (1 << 17)
+#define IC_CONF_PP_CSC2 (1 << 18)
+#define IC_CONF_PP_CMB (1 << 19)
+#define IC_CONF_PP_ROT_EN (1 << 20)
+#define IC_CONF_IC_GLB_LOC_A (1 << 28)
+#define IC_CONF_KEY_COLOR_EN (1 << 29)
+#define IC_CONF_RWS_EN (1 << 30)
+#define IC_CONF_CSI_MEM_WR_EN (1 << 31)
+
+#define IC_IDMAC_1_CB0_BURST_16 (1 << 0)
+#define IC_IDMAC_1_CB1_BURST_16 (1 << 1)
+#define IC_IDMAC_1_CB2_BURST_16 (1 << 2)
+#define IC_IDMAC_1_CB3_BURST_16 (1 << 3)
+#define IC_IDMAC_1_CB4_BURST_16 (1 << 4)
+#define IC_IDMAC_1_CB5_BURST_16 (1 << 5)
+#define IC_IDMAC_1_CB6_BURST_16 (1 << 6)
+#define IC_IDMAC_1_CB7_BURST_16 (1 << 7)
+#define IC_IDMAC_1_PRPENC_ROT_MASK (0x7 << 11)
+#define IC_IDMAC_1_PRPENC_ROT_OFFSET 11
+#define IC_IDMAC_1_PRPVF_ROT_MASK (0x7 << 14)
+#define IC_IDMAC_1_PRPVF_ROT_OFFSET 14
+#define IC_IDMAC_1_PP_ROT_MASK (0x7 << 17)
+#define IC_IDMAC_1_PP_ROT_OFFSET 17
+#define IC_IDMAC_1_PP_FLIP_RS (1 << 22)
+#define IC_IDMAC_1_PRPVF_FLIP_RS (1 << 21)
+#define IC_IDMAC_1_PRPENC_FLIP_RS (1 << 20)
+
+#define IC_IDMAC_2_PRPENC_HEIGHT_MASK (0x3ff << 0)
+#define IC_IDMAC_2_PRPENC_HEIGHT_OFFSET 0
+#define IC_IDMAC_2_PRPVF_HEIGHT_MASK (0x3ff << 10)
+#define IC_IDMAC_2_PRPVF_HEIGHT_OFFSET 10
+#define IC_IDMAC_2_PP_HEIGHT_MASK (0x3ff << 20)
+#define IC_IDMAC_2_PP_HEIGHT_OFFSET 20
+
+#define IC_IDMAC_3_PRPENC_WIDTH_MASK (0x3ff << 0)
+#define IC_IDMAC_3_PRPENC_WIDTH_OFFSET 0
+#define IC_IDMAC_3_PRPVF_WIDTH_MASK (0x3ff << 10)
+#define IC_IDMAC_3_PRPVF_WIDTH_OFFSET 10
+#define IC_IDMAC_3_PP_WIDTH_MASK (0x3ff << 20)
+#define IC_IDMAC_3_PP_WIDTH_OFFSET 20
+
+struct ic_task_regoffs {
+ u32 rsc;
+ u32 tpmem_csc[2];
+};
+
+struct ic_task_bitfields {
+ u32 ic_conf_en;
+ u32 ic_conf_rot_en;
+ u32 ic_conf_cmb_en;
+ u32 ic_conf_csc1_en;
+ u32 ic_conf_csc2_en;
+ u32 ic_cmb_galpha_bit;
+};
+
+static const struct ic_task_regoffs ic_task_reg[IC_NUM_TASKS] = {
+ [IC_TASK_ENCODER] = {
+ .rsc = IC_PRP_ENC_RSC,
+ .tpmem_csc = {0x2008, 0},
+ },
+ [IC_TASK_VIEWFINDER] = {
+ .rsc = IC_PRP_VF_RSC,
+ .tpmem_csc = {0x4028, 0x4040},
+ },
+ [IC_TASK_POST_PROCESSOR] = {
+ .rsc = IC_PP_RSC,
+ .tpmem_csc = {0x6060, 0x6078},
+ },
+};
+
+static const struct ic_task_bitfields ic_task_bit[IC_NUM_TASKS] = {
+ [IC_TASK_ENCODER] = {
+ .ic_conf_en = IC_CONF_PRPENC_EN,
+ .ic_conf_rot_en = IC_CONF_PRPENC_ROT_EN,
+ .ic_conf_cmb_en = 0, /* NA */
+ .ic_conf_csc1_en = IC_CONF_PRPENC_CSC1,
+ .ic_conf_csc2_en = 0, /* NA */
+ .ic_cmb_galpha_bit = 0, /* NA */
+ },
+ [IC_TASK_VIEWFINDER] = {
+ .ic_conf_en = IC_CONF_PRPVF_EN,
+ .ic_conf_rot_en = IC_CONF_PRPVF_ROT_EN,
+ .ic_conf_cmb_en = IC_CONF_PRPVF_CMB,
+ .ic_conf_csc1_en = IC_CONF_PRPVF_CSC1,
+ .ic_conf_csc2_en = IC_CONF_PRPVF_CSC2,
+ .ic_cmb_galpha_bit = 0,
+ },
+ [IC_TASK_POST_PROCESSOR] = {
+ .ic_conf_en = IC_CONF_PP_EN,
+ .ic_conf_rot_en = IC_CONF_PP_ROT_EN,
+ .ic_conf_cmb_en = IC_CONF_PP_CMB,
+ .ic_conf_csc1_en = IC_CONF_PP_CSC1,
+ .ic_conf_csc2_en = IC_CONF_PP_CSC2,
+ .ic_cmb_galpha_bit = 8,
+ },
+};
+
+struct ipu_ic_priv;
+
+struct ipu_ic {
+ enum ipu_ic_task task;
+ const struct ic_task_regoffs *reg;
+ const struct ic_task_bitfields *bit;
+
+ enum ipu_color_space in_cs, g_in_cs;
+ enum ipu_color_space out_cs;
+ bool graphics;
+ bool rotation;
+ bool in_use;
+
+ struct ipu_ic_priv *priv;
+};
+
+struct ipu_ic_priv {
+ void __iomem *base;
+ void __iomem *tpmem_base;
+ spinlock_t lock;
+ struct ipu_soc *ipu;
+ int use_count;
+ struct ipu_ic task[IC_NUM_TASKS];
+};
+
+static inline u32 ipu_ic_read(struct ipu_ic *ic, unsigned offset)
+{
+ return readl(ic->priv->base + offset);
+}
+
+static inline void ipu_ic_write(struct ipu_ic *ic, u32 value, unsigned offset)
+{
+ writel(value, ic->priv->base + offset);
+}
+
+struct ic_csc_params {
+ s16 coeff[3][3]; /* signed 9-bit integer coefficients */
+ s16 offset[3]; /* signed 11+2-bit fixed point offset */
+ u8 scale:2; /* scale coefficients * 2^(scale-1) */
+ bool sat:1; /* saturate to (16, 235(Y) / 240(U, V)) */
+};
+
+/*
+ * Y = R * .299 + G * .587 + B * .114;
+ * U = R * -.169 + G * -.332 + B * .500 + 128.;
+ * V = R * .500 + G * -.419 + B * -.0813 + 128.;
+ */
+static const struct ic_csc_params ic_csc_rgb2ycbcr = {
+ .coeff = {
+ { 77, 150, 29 },
+ { 469, 427, 128 },
+ { 128, 405, 491 },
+ },
+ .offset = { 0, 512, 512 },
+ .scale = 1,
+};
+
+/* transparent RGB->RGB matrix for graphics combining */
+static const struct ic_csc_params ic_csc_rgb2rgb = {
+ .coeff = {
+ { 128, 0, 0 },
+ { 0, 128, 0 },
+ { 0, 0, 128 },
+ },
+ .scale = 2,
+};
+
+/*
+ * R = (1.164 * (Y - 16)) + (1.596 * (Cr - 128));
+ * G = (1.164 * (Y - 16)) - (0.392 * (Cb - 128)) - (0.813 * (Cr - 128));
+ * B = (1.164 * (Y - 16)) + (2.017 * (Cb - 128);
+ */
+static const struct ic_csc_params ic_csc_ycbcr2rgb = {
+ .coeff = {
+ { 149, 0, 204 },
+ { 149, 462, 408 },
+ { 149, 255, 0 },
+ },
+ .offset = { -446, 266, -554 },
+ .scale = 2,
+};
+
+static int init_csc(struct ipu_ic *ic,
+ enum ipu_color_space inf,
+ enum ipu_color_space outf,
+ int csc_index)
+{
+ struct ipu_ic_priv *priv = ic->priv;
+ const struct ic_csc_params *params;
+ u32 __iomem *base;
+ const u16 (*c)[3];
+ const u16 *a;
+ u32 param;
+
+ base = (u32 __iomem *)
+ (priv->tpmem_base + ic->reg->tpmem_csc[csc_index]);
+
+ if (inf == IPUV3_COLORSPACE_YUV && outf == IPUV3_COLORSPACE_RGB)
+ params = &ic_csc_ycbcr2rgb;
+ else if (inf == IPUV3_COLORSPACE_RGB && outf == IPUV3_COLORSPACE_YUV)
+ params = &ic_csc_rgb2ycbcr;
+ else if (inf == IPUV3_COLORSPACE_RGB && outf == IPUV3_COLORSPACE_RGB)
+ params = &ic_csc_rgb2rgb;
+ else {
+ dev_err(priv->ipu->dev, "Unsupported color space conversion\n");
+ return -EINVAL;
+ }
+
+ /* Cast to unsigned */
+ c = (const u16 (*)[3])params->coeff;
+ a = (const u16 *)params->offset;
+
+ param = ((a[0] & 0x1f) << 27) | ((c[0][0] & 0x1ff) << 18) |
+ ((c[1][1] & 0x1ff) << 9) | (c[2][2] & 0x1ff);
+ writel(param, base++);
+
+ param = ((a[0] & 0x1fe0) >> 5) | (params->scale << 8) |
+ (params->sat << 9);
+ writel(param, base++);
+
+ param = ((a[1] & 0x1f) << 27) | ((c[0][1] & 0x1ff) << 18) |
+ ((c[1][0] & 0x1ff) << 9) | (c[2][0] & 0x1ff);
+ writel(param, base++);
+
+ param = ((a[1] & 0x1fe0) >> 5);
+ writel(param, base++);
+
+ param = ((a[2] & 0x1f) << 27) | ((c[0][2] & 0x1ff) << 18) |
+ ((c[1][2] & 0x1ff) << 9) | (c[2][1] & 0x1ff);
+ writel(param, base++);
+
+ param = ((a[2] & 0x1fe0) >> 5);
+ writel(param, base++);
+
+ return 0;
+}
+
+static int calc_resize_coeffs(struct ipu_ic *ic,
+ u32 in_size, u32 out_size,
+ u32 *resize_coeff,
+ u32 *downsize_coeff)
+{
+ struct ipu_ic_priv *priv = ic->priv;
+ struct ipu_soc *ipu = priv->ipu;
+ u32 temp_size, temp_downsize;
+
+ /*
+ * Input size cannot be more than 4096, and output size cannot
+ * be more than 1024
+ */
+ if (in_size > 4096) {
+ dev_err(ipu->dev, "Unsupported resize (in_size > 4096)\n");
+ return -EINVAL;
+ }
+ if (out_size > 1024) {
+ dev_err(ipu->dev, "Unsupported resize (out_size > 1024)\n");
+ return -EINVAL;
+ }
+
+ /* Cannot downsize more than 4:1 */
+ if ((out_size << 2) < in_size) {
+ dev_err(ipu->dev, "Unsupported downsize\n");
+ return -EINVAL;
+ }
+
+ /* Compute downsizing coefficient */
+ temp_downsize = 0;
+ temp_size = in_size;
+ while (((temp_size > 1024) || (temp_size >= out_size * 2)) &&
+ (temp_downsize < 2)) {
+ temp_size >>= 1;
+ temp_downsize++;
+ }
+ *downsize_coeff = temp_downsize;
+
+ /*
+ * compute resizing coefficient using the following equation:
+ * resize_coeff = M * (SI - 1) / (SO - 1)
+ * where M = 2^13, SI = input size, SO = output size
+ */
+ *resize_coeff = (8192L * (temp_size - 1)) / (out_size - 1);
+ if (*resize_coeff >= 16384L) {
+ dev_err(ipu->dev, "Warning! Overflow on resize coeff.\n");
+ *resize_coeff = 0x3FFF;
+ }
+
+ return 0;
+}
+
+void ipu_ic_task_enable(struct ipu_ic *ic)
+{
+ struct ipu_ic_priv *priv = ic->priv;
+ unsigned long flags;
+ u32 ic_conf;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ ic_conf = ipu_ic_read(ic, IC_CONF);
+
+ ic_conf |= ic->bit->ic_conf_en;
+
+ if (ic->rotation)
+ ic_conf |= ic->bit->ic_conf_rot_en;
+
+ if (ic->in_cs != ic->out_cs)
+ ic_conf |= ic->bit->ic_conf_csc1_en;
+
+ if (ic->graphics) {
+ ic_conf |= ic->bit->ic_conf_cmb_en;
+ ic_conf |= ic->bit->ic_conf_csc1_en;
+
+ if (ic->g_in_cs != ic->out_cs)
+ ic_conf |= ic->bit->ic_conf_csc2_en;
+ }
+
+ ipu_ic_write(ic, ic_conf, IC_CONF);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_ic_task_enable);
+
+void ipu_ic_task_disable(struct ipu_ic *ic)
+{
+ struct ipu_ic_priv *priv = ic->priv;
+ unsigned long flags;
+ u32 ic_conf;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ ic_conf = ipu_ic_read(ic, IC_CONF);
+
+ ic_conf &= ~(ic->bit->ic_conf_en |
+ ic->bit->ic_conf_csc1_en |
+ ic->bit->ic_conf_rot_en);
+ if (ic->bit->ic_conf_csc2_en)
+ ic_conf &= ~ic->bit->ic_conf_csc2_en;
+ if (ic->bit->ic_conf_cmb_en)
+ ic_conf &= ~ic->bit->ic_conf_cmb_en;
+
+ ipu_ic_write(ic, ic_conf, IC_CONF);
+
+ ic->rotation = ic->graphics = false;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_ic_task_disable);
+
+int ipu_ic_task_graphics_init(struct ipu_ic *ic,
+ enum ipu_color_space in_g_cs,
+ bool galpha_en, u32 galpha,
+ bool colorkey_en, u32 colorkey)
+{
+ struct ipu_ic_priv *priv = ic->priv;
+ unsigned long flags;
+ u32 reg, ic_conf;
+ int ret = 0;
+
+ if (ic->task == IC_TASK_ENCODER)
+ return -EINVAL;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ ic_conf = ipu_ic_read(ic, IC_CONF);
+
+ if (!(ic_conf & ic->bit->ic_conf_csc1_en)) {
+ /* need transparent CSC1 conversion */
+ ret = init_csc(ic, IPUV3_COLORSPACE_RGB,
+ IPUV3_COLORSPACE_RGB, 0);
+ if (ret)
+ goto unlock;
+ }
+
+ ic->g_in_cs = in_g_cs;
+
+ if (ic->g_in_cs != ic->out_cs) {
+ ret = init_csc(ic, ic->g_in_cs, ic->out_cs, 1);
+ if (ret)
+ goto unlock;
+ }
+
+ if (galpha_en) {
+ ic_conf |= IC_CONF_IC_GLB_LOC_A;
+ reg = ipu_ic_read(ic, IC_CMBP_1);
+ reg &= ~(0xff << ic->bit->ic_cmb_galpha_bit);
+ reg |= (galpha << ic->bit->ic_cmb_galpha_bit);
+ ipu_ic_write(ic, reg, IC_CMBP_1);
+ } else
+ ic_conf &= ~IC_CONF_IC_GLB_LOC_A;
+
+ if (colorkey_en) {
+ ic_conf |= IC_CONF_KEY_COLOR_EN;
+ ipu_ic_write(ic, colorkey, IC_CMBP_2);
+ } else
+ ic_conf &= ~IC_CONF_KEY_COLOR_EN;
+
+ ipu_ic_write(ic, ic_conf, IC_CONF);
+
+ ic->graphics = true;
+unlock:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ipu_ic_task_graphics_init);
+
+int ipu_ic_task_init(struct ipu_ic *ic,
+ int in_width, int in_height,
+ int out_width, int out_height,
+ enum ipu_color_space in_cs,
+ enum ipu_color_space out_cs)
+{
+ struct ipu_ic_priv *priv = ic->priv;
+ u32 reg, downsize_coeff, resize_coeff;
+ unsigned long flags;
+ int ret = 0;
+
+ /* Setup vertical resizing */
+ ret = calc_resize_coeffs(ic, in_height, out_height,
+ &resize_coeff, &downsize_coeff);
+ if (ret)
+ return ret;
+
+ reg = (downsize_coeff << 30) | (resize_coeff << 16);
+
+ /* Setup horizontal resizing */
+ ret = calc_resize_coeffs(ic, in_width, out_width,
+ &resize_coeff, &downsize_coeff);
+ if (ret)
+ return ret;
+
+ reg |= (downsize_coeff << 14) | resize_coeff;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ ipu_ic_write(ic, reg, ic->reg->rsc);
+
+ /* Setup color space conversion */
+ ic->in_cs = in_cs;
+ ic->out_cs = out_cs;
+
+ if (ic->in_cs != ic->out_cs) {
+ ret = init_csc(ic, ic->in_cs, ic->out_cs, 0);
+ if (ret)
+ goto unlock;
+ }
+
+unlock:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ipu_ic_task_init);
+
+int ipu_ic_task_idma_init(struct ipu_ic *ic, struct ipuv3_channel *channel,
+ u32 width, u32 height, int burst_size,
+ enum ipu_rotate_mode rot)
+{
+ struct ipu_ic_priv *priv = ic->priv;
+ struct ipu_soc *ipu = priv->ipu;
+ u32 ic_idmac_1, ic_idmac_2, ic_idmac_3;
+ u32 temp_rot = bitrev8(rot) >> 5;
+ bool need_hor_flip = false;
+ unsigned long flags;
+ int ret = 0;
+
+ if ((burst_size != 8) && (burst_size != 16)) {
+ dev_err(ipu->dev, "Illegal burst length for IC\n");
+ return -EINVAL;
+ }
+
+ width--;
+ height--;
+
+ if (temp_rot & 0x2) /* Need horizontal flip */
+ need_hor_flip = true;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ ic_idmac_1 = ipu_ic_read(ic, IC_IDMAC_1);
+ ic_idmac_2 = ipu_ic_read(ic, IC_IDMAC_2);
+ ic_idmac_3 = ipu_ic_read(ic, IC_IDMAC_3);
+
+ switch (channel->num) {
+ case IPUV3_CHANNEL_IC_PP_MEM:
+ if (burst_size == 16)
+ ic_idmac_1 |= IC_IDMAC_1_CB2_BURST_16;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_CB2_BURST_16;
+
+ if (need_hor_flip)
+ ic_idmac_1 |= IC_IDMAC_1_PP_FLIP_RS;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_PP_FLIP_RS;
+
+ ic_idmac_2 &= ~IC_IDMAC_2_PP_HEIGHT_MASK;
+ ic_idmac_2 |= height << IC_IDMAC_2_PP_HEIGHT_OFFSET;
+
+ ic_idmac_3 &= ~IC_IDMAC_3_PP_WIDTH_MASK;
+ ic_idmac_3 |= width << IC_IDMAC_3_PP_WIDTH_OFFSET;
+ break;
+ case IPUV3_CHANNEL_MEM_IC_PP:
+ if (burst_size == 16)
+ ic_idmac_1 |= IC_IDMAC_1_CB5_BURST_16;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_CB5_BURST_16;
+ break;
+ case IPUV3_CHANNEL_MEM_ROT_PP:
+ ic_idmac_1 &= ~IC_IDMAC_1_PP_ROT_MASK;
+ ic_idmac_1 |= temp_rot << IC_IDMAC_1_PP_ROT_OFFSET;
+ break;
+ case IPUV3_CHANNEL_MEM_IC_PRP_VF:
+ if (burst_size == 16)
+ ic_idmac_1 |= IC_IDMAC_1_CB6_BURST_16;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_CB6_BURST_16;
+ break;
+ case IPUV3_CHANNEL_IC_PRP_ENC_MEM:
+ if (burst_size == 16)
+ ic_idmac_1 |= IC_IDMAC_1_CB0_BURST_16;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_CB0_BURST_16;
+
+ if (need_hor_flip)
+ ic_idmac_1 |= IC_IDMAC_1_PRPENC_FLIP_RS;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_PRPENC_FLIP_RS;
+
+ ic_idmac_2 &= ~IC_IDMAC_2_PRPENC_HEIGHT_MASK;
+ ic_idmac_2 |= height << IC_IDMAC_2_PRPENC_HEIGHT_OFFSET;
+
+ ic_idmac_3 &= ~IC_IDMAC_3_PRPENC_WIDTH_MASK;
+ ic_idmac_3 |= width << IC_IDMAC_3_PRPENC_WIDTH_OFFSET;
+ break;
+ case IPUV3_CHANNEL_MEM_ROT_ENC:
+ ic_idmac_1 &= ~IC_IDMAC_1_PRPENC_ROT_MASK;
+ ic_idmac_1 |= temp_rot << IC_IDMAC_1_PRPENC_ROT_OFFSET;
+ break;
+ case IPUV3_CHANNEL_IC_PRP_VF_MEM:
+ if (burst_size == 16)
+ ic_idmac_1 |= IC_IDMAC_1_CB1_BURST_16;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_CB1_BURST_16;
+
+ if (need_hor_flip)
+ ic_idmac_1 |= IC_IDMAC_1_PRPVF_FLIP_RS;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_PRPVF_FLIP_RS;
+
+ ic_idmac_2 &= ~IC_IDMAC_2_PRPVF_HEIGHT_MASK;
+ ic_idmac_2 |= height << IC_IDMAC_2_PRPVF_HEIGHT_OFFSET;
+
+ ic_idmac_3 &= ~IC_IDMAC_3_PRPVF_WIDTH_MASK;
+ ic_idmac_3 |= width << IC_IDMAC_3_PRPVF_WIDTH_OFFSET;
+ break;
+ case IPUV3_CHANNEL_MEM_ROT_VF:
+ ic_idmac_1 &= ~IC_IDMAC_1_PRPVF_ROT_MASK;
+ ic_idmac_1 |= temp_rot << IC_IDMAC_1_PRPVF_ROT_OFFSET;
+ break;
+ case IPUV3_CHANNEL_G_MEM_IC_PRP_VF:
+ if (burst_size == 16)
+ ic_idmac_1 |= IC_IDMAC_1_CB3_BURST_16;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_CB3_BURST_16;
+ break;
+ case IPUV3_CHANNEL_G_MEM_IC_PP:
+ if (burst_size == 16)
+ ic_idmac_1 |= IC_IDMAC_1_CB4_BURST_16;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_CB4_BURST_16;
+ break;
+ case IPUV3_CHANNEL_VDI_MEM_IC_VF:
+ if (burst_size == 16)
+ ic_idmac_1 |= IC_IDMAC_1_CB7_BURST_16;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_CB7_BURST_16;
+ break;
+ default:
+ goto unlock;
+ }
+
+ ipu_ic_write(ic, ic_idmac_1, IC_IDMAC_1);
+ ipu_ic_write(ic, ic_idmac_2, IC_IDMAC_2);
+ ipu_ic_write(ic, ic_idmac_3, IC_IDMAC_3);
+
+ if (rot >= IPU_ROTATE_90_RIGHT)
+ ic->rotation = true;
+
+unlock:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ipu_ic_task_idma_init);
+
+int ipu_ic_enable(struct ipu_ic *ic)
+{
+ struct ipu_ic_priv *priv = ic->priv;
+ unsigned long flags;
+ u32 module = IPU_CONF_IC_EN;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (ic->rotation)
+ module |= IPU_CONF_ROT_EN;
+
+ if (!priv->use_count)
+ ipu_module_enable(priv->ipu, module);
+
+ priv->use_count++;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_ic_enable);
+
+int ipu_ic_disable(struct ipu_ic *ic)
+{
+ struct ipu_ic_priv *priv = ic->priv;
+ unsigned long flags;
+ u32 module = IPU_CONF_IC_EN | IPU_CONF_ROT_EN;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ priv->use_count--;
+
+ if (!priv->use_count)
+ ipu_module_disable(priv->ipu, module);
+
+ if (priv->use_count < 0)
+ priv->use_count = 0;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_ic_disable);
+
+struct ipu_ic *ipu_ic_get(struct ipu_soc *ipu, enum ipu_ic_task task)
+{
+ struct ipu_ic_priv *priv = ipu->ic_priv;
+ unsigned long flags;
+ struct ipu_ic *ic, *ret;
+
+ if (task >= IC_NUM_TASKS)
+ return ERR_PTR(-EINVAL);
+
+ ic = &priv->task[task];
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (ic->in_use) {
+ ret = ERR_PTR(-EBUSY);
+ goto unlock;
+ }
+
+ ic->in_use = true;
+ ret = ic;
+
+unlock:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ipu_ic_get);
+
+void ipu_ic_put(struct ipu_ic *ic)
+{
+ struct ipu_ic_priv *priv = ic->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ ic->in_use = false;
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_ic_put);
+
+int ipu_ic_init(struct ipu_soc *ipu, struct device *dev,
+ unsigned long base, unsigned long tpmem_base)
+{
+ struct ipu_ic_priv *priv;
+ int i;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ipu->ic_priv = priv;
+
+ spin_lock_init(&priv->lock);
+ priv->base = devm_ioremap(dev, base, PAGE_SIZE);
+ if (!priv->base)
+ return -ENOMEM;
+ priv->tpmem_base = devm_ioremap(dev, tpmem_base, SZ_64K);
+ if (!priv->tpmem_base)
+ return -ENOMEM;
+
+ dev_dbg(dev, "IC base: 0x%08lx remapped to %p\n", base, priv->base);
+
+ priv->ipu = ipu;
+
+ for (i = 0; i < IC_NUM_TASKS; i++) {
+ priv->task[i].task = i;
+ priv->task[i].priv = priv;
+ priv->task[i].reg = &ic_task_reg[i];
+ priv->task[i].bit = &ic_task_bit[i];
+ }
+
+ return 0;
+}
+
+void ipu_ic_exit(struct ipu_soc *ipu)
+{
+}
+
+void ipu_ic_dump(struct ipu_ic *ic)
+{
+ struct ipu_ic_priv *priv = ic->priv;
+ struct ipu_soc *ipu = priv->ipu;
+
+ dev_dbg(ipu->dev, "IC_CONF = \t0x%08X\n",
+ ipu_ic_read(ic, IC_CONF));
+ dev_dbg(ipu->dev, "IC_PRP_ENC_RSC = \t0x%08X\n",
+ ipu_ic_read(ic, IC_PRP_ENC_RSC));
+ dev_dbg(ipu->dev, "IC_PRP_VF_RSC = \t0x%08X\n",
+ ipu_ic_read(ic, IC_PRP_VF_RSC));
+ dev_dbg(ipu->dev, "IC_PP_RSC = \t0x%08X\n",
+ ipu_ic_read(ic, IC_PP_RSC));
+ dev_dbg(ipu->dev, "IC_CMBP_1 = \t0x%08X\n",
+ ipu_ic_read(ic, IC_CMBP_1));
+ dev_dbg(ipu->dev, "IC_CMBP_2 = \t0x%08X\n",
+ ipu_ic_read(ic, IC_CMBP_2));
+ dev_dbg(ipu->dev, "IC_IDMAC_1 = \t0x%08X\n",
+ ipu_ic_read(ic, IC_IDMAC_1));
+ dev_dbg(ipu->dev, "IC_IDMAC_2 = \t0x%08X\n",
+ ipu_ic_read(ic, IC_IDMAC_2));
+ dev_dbg(ipu->dev, "IC_IDMAC_3 = \t0x%08X\n",
+ ipu_ic_read(ic, IC_IDMAC_3));
+ dev_dbg(ipu->dev, "IC_IDMAC_4 = \t0x%08X\n",
+ ipu_ic_read(ic, IC_IDMAC_4));
+}
+EXPORT_SYMBOL_GPL(ipu_ic_dump);
diff --git a/kernel/drivers/gpu/ipu-v3/ipu-prv.h b/kernel/drivers/gpu/ipu-v3/ipu-prv.h
new file mode 100644
index 000000000..bfb1e8a44
--- /dev/null
+++ b/kernel/drivers/gpu/ipu-v3/ipu-prv.h
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
+ * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef __IPU_PRV_H__
+#define __IPU_PRV_H__
+
+struct ipu_soc;
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+
+#include <video/imx-ipu-v3.h>
+
+#define IPU_MCU_T_DEFAULT 8
+#define IPU_CM_IDMAC_REG_OFS 0x00008000
+#define IPU_CM_IC_REG_OFS 0x00020000
+#define IPU_CM_IRT_REG_OFS 0x00028000
+#define IPU_CM_CSI0_REG_OFS 0x00030000
+#define IPU_CM_CSI1_REG_OFS 0x00038000
+#define IPU_CM_SMFC_REG_OFS 0x00050000
+#define IPU_CM_DC_REG_OFS 0x00058000
+#define IPU_CM_DMFC_REG_OFS 0x00060000
+
+/* Register addresses */
+/* IPU Common registers */
+#define IPU_CM_REG(offset) (offset)
+
+#define IPU_CONF IPU_CM_REG(0)
+
+#define IPU_SRM_PRI1 IPU_CM_REG(0x00a0)
+#define IPU_SRM_PRI2 IPU_CM_REG(0x00a4)
+#define IPU_FS_PROC_FLOW1 IPU_CM_REG(0x00a8)
+#define IPU_FS_PROC_FLOW2 IPU_CM_REG(0x00ac)
+#define IPU_FS_PROC_FLOW3 IPU_CM_REG(0x00b0)
+#define IPU_FS_DISP_FLOW1 IPU_CM_REG(0x00b4)
+#define IPU_FS_DISP_FLOW2 IPU_CM_REG(0x00b8)
+#define IPU_SKIP IPU_CM_REG(0x00bc)
+#define IPU_DISP_ALT_CONF IPU_CM_REG(0x00c0)
+#define IPU_DISP_GEN IPU_CM_REG(0x00c4)
+#define IPU_DISP_ALT1 IPU_CM_REG(0x00c8)
+#define IPU_DISP_ALT2 IPU_CM_REG(0x00cc)
+#define IPU_DISP_ALT3 IPU_CM_REG(0x00d0)
+#define IPU_DISP_ALT4 IPU_CM_REG(0x00d4)
+#define IPU_SNOOP IPU_CM_REG(0x00d8)
+#define IPU_MEM_RST IPU_CM_REG(0x00dc)
+#define IPU_PM IPU_CM_REG(0x00e0)
+#define IPU_GPR IPU_CM_REG(0x00e4)
+#define IPU_CHA_DB_MODE_SEL(ch) IPU_CM_REG(0x0150 + 4 * ((ch) / 32))
+#define IPU_ALT_CHA_DB_MODE_SEL(ch) IPU_CM_REG(0x0168 + 4 * ((ch) / 32))
+#define IPU_CHA_CUR_BUF(ch) IPU_CM_REG(0x023C + 4 * ((ch) / 32))
+#define IPU_ALT_CUR_BUF0 IPU_CM_REG(0x0244)
+#define IPU_ALT_CUR_BUF1 IPU_CM_REG(0x0248)
+#define IPU_SRM_STAT IPU_CM_REG(0x024C)
+#define IPU_PROC_TASK_STAT IPU_CM_REG(0x0250)
+#define IPU_DISP_TASK_STAT IPU_CM_REG(0x0254)
+#define IPU_CHA_BUF0_RDY(ch) IPU_CM_REG(0x0268 + 4 * ((ch) / 32))
+#define IPU_CHA_BUF1_RDY(ch) IPU_CM_REG(0x0270 + 4 * ((ch) / 32))
+#define IPU_CHA_BUF2_RDY(ch) IPU_CM_REG(0x0288 + 4 * ((ch) / 32))
+#define IPU_ALT_CHA_BUF0_RDY(ch) IPU_CM_REG(0x0278 + 4 * ((ch) / 32))
+#define IPU_ALT_CHA_BUF1_RDY(ch) IPU_CM_REG(0x0280 + 4 * ((ch) / 32))
+
+#define IPU_INT_CTRL(n) IPU_CM_REG(0x003C + 4 * (n))
+#define IPU_INT_STAT(n) IPU_CM_REG(0x0200 + 4 * (n))
+
+#define IPU_DI0_COUNTER_RELEASE (1 << 24)
+#define IPU_DI1_COUNTER_RELEASE (1 << 25)
+
+#define IPU_IDMAC_REG(offset) (offset)
+
+#define IDMAC_CONF IPU_IDMAC_REG(0x0000)
+#define IDMAC_CHA_EN(ch) IPU_IDMAC_REG(0x0004 + 4 * ((ch) / 32))
+#define IDMAC_SEP_ALPHA IPU_IDMAC_REG(0x000c)
+#define IDMAC_ALT_SEP_ALPHA IPU_IDMAC_REG(0x0010)
+#define IDMAC_CHA_PRI(ch) IPU_IDMAC_REG(0x0014 + 4 * ((ch) / 32))
+#define IDMAC_WM_EN(ch) IPU_IDMAC_REG(0x001c + 4 * ((ch) / 32))
+#define IDMAC_CH_LOCK_EN_1 IPU_IDMAC_REG(0x0024)
+#define IDMAC_CH_LOCK_EN_2 IPU_IDMAC_REG(0x0028)
+#define IDMAC_SUB_ADDR_0 IPU_IDMAC_REG(0x002c)
+#define IDMAC_SUB_ADDR_1 IPU_IDMAC_REG(0x0030)
+#define IDMAC_SUB_ADDR_2 IPU_IDMAC_REG(0x0034)
+#define IDMAC_BAND_EN(ch) IPU_IDMAC_REG(0x0040 + 4 * ((ch) / 32))
+#define IDMAC_CHA_BUSY(ch) IPU_IDMAC_REG(0x0100 + 4 * ((ch) / 32))
+
+#define IPU_NUM_IRQS (32 * 15)
+
+enum ipu_modules {
+ IPU_CONF_CSI0_EN = (1 << 0),
+ IPU_CONF_CSI1_EN = (1 << 1),
+ IPU_CONF_IC_EN = (1 << 2),
+ IPU_CONF_ROT_EN = (1 << 3),
+ IPU_CONF_ISP_EN = (1 << 4),
+ IPU_CONF_DP_EN = (1 << 5),
+ IPU_CONF_DI0_EN = (1 << 6),
+ IPU_CONF_DI1_EN = (1 << 7),
+ IPU_CONF_SMFC_EN = (1 << 8),
+ IPU_CONF_DC_EN = (1 << 9),
+ IPU_CONF_DMFC_EN = (1 << 10),
+
+ IPU_CONF_VDI_EN = (1 << 12),
+
+ IPU_CONF_IDMAC_DIS = (1 << 22),
+
+ IPU_CONF_IC_DMFC_SEL = (1 << 25),
+ IPU_CONF_IC_DMFC_SYNC = (1 << 26),
+ IPU_CONF_VDI_DMFC_SYNC = (1 << 27),
+
+ IPU_CONF_CSI0_DATA_SOURCE = (1 << 28),
+ IPU_CONF_CSI1_DATA_SOURCE = (1 << 29),
+ IPU_CONF_IC_INPUT = (1 << 30),
+ IPU_CONF_CSI_SEL = (1 << 31),
+};
+
+struct ipuv3_channel {
+ unsigned int num;
+
+ bool enabled;
+ bool busy;
+
+ struct ipu_soc *ipu;
+};
+
+struct ipu_cpmem;
+struct ipu_csi;
+struct ipu_dc_priv;
+struct ipu_dmfc_priv;
+struct ipu_di;
+struct ipu_ic_priv;
+struct ipu_smfc_priv;
+
+struct ipu_devtype;
+
+struct ipu_soc {
+ struct device *dev;
+ const struct ipu_devtype *devtype;
+ enum ipuv3_type ipu_type;
+ spinlock_t lock;
+ struct mutex channel_lock;
+
+ void __iomem *cm_reg;
+ void __iomem *idmac_reg;
+
+ int usecount;
+
+ struct clk *clk;
+
+ struct ipuv3_channel channel[64];
+
+ int irq_sync;
+ int irq_err;
+ struct irq_domain *domain;
+
+ struct ipu_cpmem *cpmem_priv;
+ struct ipu_dc_priv *dc_priv;
+ struct ipu_dp_priv *dp_priv;
+ struct ipu_dmfc_priv *dmfc_priv;
+ struct ipu_di *di_priv[2];
+ struct ipu_csi *csi_priv[2];
+ struct ipu_ic_priv *ic_priv;
+ struct ipu_smfc_priv *smfc_priv;
+};
+
+static inline u32 ipu_idmac_read(struct ipu_soc *ipu, unsigned offset)
+{
+ return readl(ipu->idmac_reg + offset);
+}
+
+static inline void ipu_idmac_write(struct ipu_soc *ipu, u32 value,
+ unsigned offset)
+{
+ writel(value, ipu->idmac_reg + offset);
+}
+
+void ipu_srm_dp_sync_update(struct ipu_soc *ipu);
+
+int ipu_module_enable(struct ipu_soc *ipu, u32 mask);
+int ipu_module_disable(struct ipu_soc *ipu, u32 mask);
+
+bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno);
+int ipu_wait_interrupt(struct ipu_soc *ipu, int irq, int ms);
+
+int ipu_csi_init(struct ipu_soc *ipu, struct device *dev, int id,
+ unsigned long base, u32 module, struct clk *clk_ipu);
+void ipu_csi_exit(struct ipu_soc *ipu, int id);
+
+int ipu_ic_init(struct ipu_soc *ipu, struct device *dev,
+ unsigned long base, unsigned long tpmem_base);
+void ipu_ic_exit(struct ipu_soc *ipu);
+
+int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id,
+ unsigned long base, u32 module, struct clk *ipu_clk);
+void ipu_di_exit(struct ipu_soc *ipu, int id);
+
+int ipu_dmfc_init(struct ipu_soc *ipu, struct device *dev, unsigned long base,
+ struct clk *ipu_clk);
+void ipu_dmfc_exit(struct ipu_soc *ipu);
+
+int ipu_dp_init(struct ipu_soc *ipu, struct device *dev, unsigned long base);
+void ipu_dp_exit(struct ipu_soc *ipu);
+
+int ipu_dc_init(struct ipu_soc *ipu, struct device *dev, unsigned long base,
+ unsigned long template_base);
+void ipu_dc_exit(struct ipu_soc *ipu);
+
+int ipu_cpmem_init(struct ipu_soc *ipu, struct device *dev, unsigned long base);
+void ipu_cpmem_exit(struct ipu_soc *ipu);
+
+int ipu_smfc_init(struct ipu_soc *ipu, struct device *dev, unsigned long base);
+void ipu_smfc_exit(struct ipu_soc *ipu);
+
+#endif /* __IPU_PRV_H__ */
diff --git a/kernel/drivers/gpu/ipu-v3/ipu-smfc.c b/kernel/drivers/gpu/ipu-v3/ipu-smfc.c
new file mode 100644
index 000000000..4ef910991
--- /dev/null
+++ b/kernel/drivers/gpu/ipu-v3/ipu-smfc.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright 2008-2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <video/imx-ipu-v3.h>
+
+#include "ipu-prv.h"
+
+struct ipu_smfc {
+ struct ipu_smfc_priv *priv;
+ int chno;
+ bool inuse;
+};
+
+struct ipu_smfc_priv {
+ void __iomem *base;
+ spinlock_t lock;
+ struct ipu_soc *ipu;
+ struct ipu_smfc channel[4];
+ int use_count;
+};
+
+/*SMFC Registers */
+#define SMFC_MAP 0x0000
+#define SMFC_WMC 0x0004
+#define SMFC_BS 0x0008
+
+int ipu_smfc_set_burstsize(struct ipu_smfc *smfc, int burstsize)
+{
+ struct ipu_smfc_priv *priv = smfc->priv;
+ unsigned long flags;
+ u32 val, shift;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ shift = smfc->chno * 4;
+ val = readl(priv->base + SMFC_BS);
+ val &= ~(0xf << shift);
+ val |= burstsize << shift;
+ writel(val, priv->base + SMFC_BS);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_smfc_set_burstsize);
+
+int ipu_smfc_map_channel(struct ipu_smfc *smfc, int csi_id, int mipi_id)
+{
+ struct ipu_smfc_priv *priv = smfc->priv;
+ unsigned long flags;
+ u32 val, shift;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ shift = smfc->chno * 3;
+ val = readl(priv->base + SMFC_MAP);
+ val &= ~(0x7 << shift);
+ val |= ((csi_id << 2) | mipi_id) << shift;
+ writel(val, priv->base + SMFC_MAP);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_smfc_map_channel);
+
+int ipu_smfc_set_watermark(struct ipu_smfc *smfc, u32 set_level, u32 clr_level)
+{
+ struct ipu_smfc_priv *priv = smfc->priv;
+ unsigned long flags;
+ u32 val, shift;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ shift = smfc->chno * 6 + (smfc->chno > 1 ? 4 : 0);
+ val = readl(priv->base + SMFC_WMC);
+ val &= ~(0x3f << shift);
+ val |= ((clr_level << 3) | set_level) << shift;
+ writel(val, priv->base + SMFC_WMC);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_smfc_set_watermark);
+
+int ipu_smfc_enable(struct ipu_smfc *smfc)
+{
+ struct ipu_smfc_priv *priv = smfc->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (!priv->use_count)
+ ipu_module_enable(priv->ipu, IPU_CONF_SMFC_EN);
+
+ priv->use_count++;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_smfc_enable);
+
+int ipu_smfc_disable(struct ipu_smfc *smfc)
+{
+ struct ipu_smfc_priv *priv = smfc->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ priv->use_count--;
+
+ if (!priv->use_count)
+ ipu_module_disable(priv->ipu, IPU_CONF_SMFC_EN);
+
+ if (priv->use_count < 0)
+ priv->use_count = 0;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_smfc_disable);
+
+struct ipu_smfc *ipu_smfc_get(struct ipu_soc *ipu, unsigned int chno)
+{
+ struct ipu_smfc_priv *priv = ipu->smfc_priv;
+ struct ipu_smfc *smfc, *ret;
+ unsigned long flags;
+
+ if (chno >= 4)
+ return ERR_PTR(-EINVAL);
+
+ smfc = &priv->channel[chno];
+ ret = smfc;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (smfc->inuse) {
+ ret = ERR_PTR(-EBUSY);
+ goto unlock;
+ }
+
+ smfc->inuse = true;
+unlock:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ipu_smfc_get);
+
+void ipu_smfc_put(struct ipu_smfc *smfc)
+{
+ struct ipu_smfc_priv *priv = smfc->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ smfc->inuse = false;
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_smfc_put);
+
+int ipu_smfc_init(struct ipu_soc *ipu, struct device *dev,
+ unsigned long base)
+{
+ struct ipu_smfc_priv *priv;
+ int i;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ipu->smfc_priv = priv;
+ spin_lock_init(&priv->lock);
+ priv->ipu = ipu;
+
+ priv->base = devm_ioremap(dev, base, PAGE_SIZE);
+ if (!priv->base)
+ return -ENOMEM;
+
+ for (i = 0; i < 4; i++) {
+ priv->channel[i].priv = priv;
+ priv->channel[i].chno = i;
+ }
+
+ pr_debug("%s: ioremap 0x%08lx -> %p\n", __func__, base, priv->base);
+
+ return 0;
+}
+
+void ipu_smfc_exit(struct ipu_soc *ipu)
+{
+}