diff options
author | Yunhong Jiang <yunhong.jiang@intel.com> | 2015-08-04 12:17:53 -0700 |
---|---|---|
committer | Yunhong Jiang <yunhong.jiang@intel.com> | 2015-08-04 15:44:42 -0700 |
commit | 9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 (patch) | |
tree | 1c9cafbcd35f783a87880a10f85d1a060db1a563 /kernel/drivers/gpu/drm/tegra | |
parent | 98260f3884f4a202f9ca5eabed40b1354c489b29 (diff) |
Add the rt linux 4.1.3-rt3 as base
Import the rt linux 4.1.3-rt3 as OPNFV kvm base.
It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and
the base is:
commit 0917f823c59692d751951bf5ea699a2d1e2f26a2
Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Sat Jul 25 12:13:34 2015 +0200
Prepare v4.1.3-rt3
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
We lose all the git history this way and it's not good. We
should apply another opnfv project repo in future.
Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423
Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Diffstat (limited to 'kernel/drivers/gpu/drm/tegra')
25 files changed, 13003 insertions, 0 deletions
diff --git a/kernel/drivers/gpu/drm/tegra/Kconfig b/kernel/drivers/gpu/drm/tegra/Kconfig new file mode 100644 index 000000000..74d9d6214 --- /dev/null +++ b/kernel/drivers/gpu/drm/tegra/Kconfig @@ -0,0 +1,44 @@ +config DRM_TEGRA + tristate "NVIDIA Tegra DRM" + depends on ARCH_TEGRA || (ARM && COMPILE_TEST) + depends on COMMON_CLK + depends on DRM + depends on RESET_CONTROLLER + select DRM_KMS_HELPER + select DRM_MIPI_DSI + select DRM_PANEL + select TEGRA_HOST1X + help + Choose this option if you have an NVIDIA Tegra SoC. + + To compile this driver as a module, choose M here: the module + will be called tegra-drm. + +if DRM_TEGRA + +config DRM_TEGRA_FBDEV + bool "Enable legacy fbdev support" + select DRM_KMS_FB_HELPER + select FB_SYS_FILLRECT + select FB_SYS_COPYAREA + select FB_SYS_IMAGEBLIT + default y + help + Choose this option if you have a need for the legacy fbdev support. + Note that this support also provides the Linux console on top of + the Tegra modesetting driver. + +config DRM_TEGRA_DEBUG + bool "NVIDIA Tegra DRM debug support" + help + Say yes here to enable debugging support. + +config DRM_TEGRA_STAGING + bool "Enable HOST1X interface" + depends on STAGING + help + Say yes if HOST1X should be available for userspace DRM users. + + If unsure, choose N. + +endif diff --git a/kernel/drivers/gpu/drm/tegra/Makefile b/kernel/drivers/gpu/drm/tegra/Makefile new file mode 100644 index 000000000..2c66a8db9 --- /dev/null +++ b/kernel/drivers/gpu/drm/tegra/Makefile @@ -0,0 +1,18 @@ +ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG + +tegra-drm-y := \ + drm.o \ + gem.o \ + fb.o \ + dc.o \ + output.o \ + rgb.o \ + hdmi.o \ + mipi-phy.o \ + dsi.o \ + sor.o \ + dpaux.o \ + gr2d.o \ + gr3d.o + +obj-$(CONFIG_DRM_TEGRA) += tegra-drm.o diff --git a/kernel/drivers/gpu/drm/tegra/dc.c b/kernel/drivers/gpu/drm/tegra/dc.c new file mode 100644 index 000000000..a287e4fec --- /dev/null +++ b/kernel/drivers/gpu/drm/tegra/dc.c @@ -0,0 +1,2024 @@ +/* + * Copyright (C) 2012 Avionic Design GmbH + * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/clk.h> +#include <linux/debugfs.h> +#include <linux/iommu.h> +#include <linux/reset.h> + +#include <soc/tegra/pmc.h> + +#include "dc.h" +#include "drm.h" +#include "gem.h" + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_plane_helper.h> + +struct tegra_dc_soc_info { + bool supports_border_color; + bool supports_interlacing; + bool supports_cursor; + bool supports_block_linear; + unsigned int pitch_align; + bool has_powergate; +}; + +struct tegra_plane { + struct drm_plane base; + unsigned int index; +}; + +static inline struct tegra_plane *to_tegra_plane(struct drm_plane *plane) +{ + return container_of(plane, struct tegra_plane, base); +} + +struct tegra_dc_state { + struct drm_crtc_state base; + + struct clk *clk; + unsigned long pclk; + unsigned int div; + + u32 planes; +}; + +static inline struct tegra_dc_state *to_dc_state(struct drm_crtc_state *state) +{ + if (state) + return container_of(state, struct tegra_dc_state, base); + + return NULL; +} + +struct tegra_plane_state { + struct drm_plane_state base; + + struct tegra_bo_tiling tiling; + u32 format; + u32 swap; +}; + +static inline struct tegra_plane_state * +to_tegra_plane_state(struct drm_plane_state *state) +{ + if (state) + return container_of(state, struct tegra_plane_state, base); + + return NULL; +} + +/* + * Reads the active copy of a register. This takes the dc->lock spinlock to + * prevent races with the VBLANK processing which also needs access to the + * active copy of some registers. + */ +static u32 tegra_dc_readl_active(struct tegra_dc *dc, unsigned long offset) +{ + unsigned long flags; + u32 value; + + spin_lock_irqsave(&dc->lock, flags); + + tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS); + value = tegra_dc_readl(dc, offset); + tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS); + + spin_unlock_irqrestore(&dc->lock, flags); + return value; +} + +/* + * Double-buffered registers have two copies: ASSEMBLY and ACTIVE. When the + * *_ACT_REQ bits are set the ASSEMBLY copy is latched into the ACTIVE copy. + * Latching happens mmediately if the display controller is in STOP mode or + * on the next frame boundary otherwise. + * + * Triple-buffered registers have three copies: ASSEMBLY, ARM and ACTIVE. The + * ASSEMBLY copy is latched into the ARM copy immediately after *_UPDATE bits + * are written. When the *_ACT_REQ bits are written, the ARM copy is latched + * into the ACTIVE copy, either immediately if the display controller is in + * STOP mode, or at the next frame boundary otherwise. + */ +void tegra_dc_commit(struct tegra_dc *dc) +{ + tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); + tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); +} + +static int tegra_dc_format(u32 fourcc, u32 *format, u32 *swap) +{ + /* assume no swapping of fetched data */ + if (swap) + *swap = BYTE_SWAP_NOSWAP; + + switch (fourcc) { + case DRM_FORMAT_XBGR8888: + *format = WIN_COLOR_DEPTH_R8G8B8A8; + break; + + case DRM_FORMAT_XRGB8888: + *format = WIN_COLOR_DEPTH_B8G8R8A8; + break; + + case DRM_FORMAT_RGB565: + *format = WIN_COLOR_DEPTH_B5G6R5; + break; + + case DRM_FORMAT_UYVY: + *format = WIN_COLOR_DEPTH_YCbCr422; + break; + + case DRM_FORMAT_YUYV: + if (swap) + *swap = BYTE_SWAP_SWAP2; + + *format = WIN_COLOR_DEPTH_YCbCr422; + break; + + case DRM_FORMAT_YUV420: + *format = WIN_COLOR_DEPTH_YCbCr420P; + break; + + case DRM_FORMAT_YUV422: + *format = WIN_COLOR_DEPTH_YCbCr422P; + break; + + default: + return -EINVAL; + } + + return 0; +} + +static bool tegra_dc_format_is_yuv(unsigned int format, bool *planar) +{ + switch (format) { + case WIN_COLOR_DEPTH_YCbCr422: + case WIN_COLOR_DEPTH_YUV422: + if (planar) + *planar = false; + + return true; + + case WIN_COLOR_DEPTH_YCbCr420P: + case WIN_COLOR_DEPTH_YUV420P: + case WIN_COLOR_DEPTH_YCbCr422P: + case WIN_COLOR_DEPTH_YUV422P: + case WIN_COLOR_DEPTH_YCbCr422R: + case WIN_COLOR_DEPTH_YUV422R: + case WIN_COLOR_DEPTH_YCbCr422RA: + case WIN_COLOR_DEPTH_YUV422RA: + if (planar) + *planar = true; + + return true; + } + + if (planar) + *planar = false; + + return false; +} + +static inline u32 compute_dda_inc(unsigned int in, unsigned int out, bool v, + unsigned int bpp) +{ + fixed20_12 outf = dfixed_init(out); + fixed20_12 inf = dfixed_init(in); + u32 dda_inc; + int max; + + if (v) + max = 15; + else { + switch (bpp) { + case 2: + max = 8; + break; + + default: + WARN_ON_ONCE(1); + /* fallthrough */ + case 4: + max = 4; + break; + } + } + + outf.full = max_t(u32, outf.full - dfixed_const(1), dfixed_const(1)); + inf.full -= dfixed_const(1); + + dda_inc = dfixed_div(inf, outf); + dda_inc = min_t(u32, dda_inc, dfixed_const(max)); + + return dda_inc; +} + +static inline u32 compute_initial_dda(unsigned int in) +{ + fixed20_12 inf = dfixed_init(in); + return dfixed_frac(inf); +} + +static void tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index, + const struct tegra_dc_window *window) +{ + unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp; + unsigned long value, flags; + bool yuv, planar; + + /* + * For YUV planar modes, the number of bytes per pixel takes into + * account only the luma component and therefore is 1. + */ + yuv = tegra_dc_format_is_yuv(window->format, &planar); + if (!yuv) + bpp = window->bits_per_pixel / 8; + else + bpp = planar ? 1 : 2; + + spin_lock_irqsave(&dc->lock, flags); + + value = WINDOW_A_SELECT << index; + tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER); + + tegra_dc_writel(dc, window->format, DC_WIN_COLOR_DEPTH); + tegra_dc_writel(dc, window->swap, DC_WIN_BYTE_SWAP); + + value = V_POSITION(window->dst.y) | H_POSITION(window->dst.x); + tegra_dc_writel(dc, value, DC_WIN_POSITION); + + value = V_SIZE(window->dst.h) | H_SIZE(window->dst.w); + tegra_dc_writel(dc, value, DC_WIN_SIZE); + + h_offset = window->src.x * bpp; + v_offset = window->src.y; + h_size = window->src.w * bpp; + v_size = window->src.h; + + value = V_PRESCALED_SIZE(v_size) | H_PRESCALED_SIZE(h_size); + tegra_dc_writel(dc, value, DC_WIN_PRESCALED_SIZE); + + /* + * For DDA computations the number of bytes per pixel for YUV planar + * modes needs to take into account all Y, U and V components. + */ + if (yuv && planar) + bpp = 2; + + h_dda = compute_dda_inc(window->src.w, window->dst.w, false, bpp); + v_dda = compute_dda_inc(window->src.h, window->dst.h, true, bpp); + + value = V_DDA_INC(v_dda) | H_DDA_INC(h_dda); + tegra_dc_writel(dc, value, DC_WIN_DDA_INC); + + h_dda = compute_initial_dda(window->src.x); + v_dda = compute_initial_dda(window->src.y); + + tegra_dc_writel(dc, h_dda, DC_WIN_H_INITIAL_DDA); + tegra_dc_writel(dc, v_dda, DC_WIN_V_INITIAL_DDA); + + tegra_dc_writel(dc, 0, DC_WIN_UV_BUF_STRIDE); + tegra_dc_writel(dc, 0, DC_WIN_BUF_STRIDE); + + tegra_dc_writel(dc, window->base[0], DC_WINBUF_START_ADDR); + + if (yuv && planar) { + tegra_dc_writel(dc, window->base[1], DC_WINBUF_START_ADDR_U); + tegra_dc_writel(dc, window->base[2], DC_WINBUF_START_ADDR_V); + value = window->stride[1] << 16 | window->stride[0]; + tegra_dc_writel(dc, value, DC_WIN_LINE_STRIDE); + } else { + tegra_dc_writel(dc, window->stride[0], DC_WIN_LINE_STRIDE); + } + + if (window->bottom_up) + v_offset += window->src.h - 1; + + tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET); + tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET); + + if (dc->soc->supports_block_linear) { + unsigned long height = window->tiling.value; + + switch (window->tiling.mode) { + case TEGRA_BO_TILING_MODE_PITCH: + value = DC_WINBUF_SURFACE_KIND_PITCH; + break; + + case TEGRA_BO_TILING_MODE_TILED: + value = DC_WINBUF_SURFACE_KIND_TILED; + break; + + case TEGRA_BO_TILING_MODE_BLOCK: + value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) | + DC_WINBUF_SURFACE_KIND_BLOCK; + break; + } + + tegra_dc_writel(dc, value, DC_WINBUF_SURFACE_KIND); + } else { + switch (window->tiling.mode) { + case TEGRA_BO_TILING_MODE_PITCH: + value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV | + DC_WIN_BUFFER_ADDR_MODE_LINEAR; + break; + + case TEGRA_BO_TILING_MODE_TILED: + value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV | + DC_WIN_BUFFER_ADDR_MODE_TILE; + break; + + case TEGRA_BO_TILING_MODE_BLOCK: + /* + * No need to handle this here because ->atomic_check + * will already have filtered it out. + */ + break; + } + + tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE); + } + + value = WIN_ENABLE; + + if (yuv) { + /* setup default colorspace conversion coefficients */ + tegra_dc_writel(dc, 0x00f0, DC_WIN_CSC_YOF); + tegra_dc_writel(dc, 0x012a, DC_WIN_CSC_KYRGB); + tegra_dc_writel(dc, 0x0000, DC_WIN_CSC_KUR); + tegra_dc_writel(dc, 0x0198, DC_WIN_CSC_KVR); + tegra_dc_writel(dc, 0x039b, DC_WIN_CSC_KUG); + tegra_dc_writel(dc, 0x032f, DC_WIN_CSC_KVG); + tegra_dc_writel(dc, 0x0204, DC_WIN_CSC_KUB); + tegra_dc_writel(dc, 0x0000, DC_WIN_CSC_KVB); + + value |= CSC_ENABLE; + } else if (window->bits_per_pixel < 24) { + value |= COLOR_EXPAND; + } + + if (window->bottom_up) + value |= V_DIRECTION; + + tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS); + + /* + * Disable blending and assume Window A is the bottom-most window, + * Window C is the top-most window and Window B is in the middle. + */ + tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_NOKEY); + tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_1WIN); + + switch (index) { + case 0: + tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_X); + tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_Y); + tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_3WIN_XY); + break; + + case 1: + tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_X); + tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_Y); + tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_3WIN_XY); + break; + + case 2: + tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_X); + tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_Y); + tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_3WIN_XY); + break; + } + + spin_unlock_irqrestore(&dc->lock, flags); +} + +static void tegra_plane_destroy(struct drm_plane *plane) +{ + struct tegra_plane *p = to_tegra_plane(plane); + + drm_plane_cleanup(plane); + kfree(p); +} + +static const u32 tegra_primary_plane_formats[] = { + DRM_FORMAT_XBGR8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_RGB565, +}; + +static void tegra_primary_plane_destroy(struct drm_plane *plane) +{ + tegra_plane_destroy(plane); +} + +static void tegra_plane_reset(struct drm_plane *plane) +{ + struct tegra_plane_state *state; + + if (plane->state) + __drm_atomic_helper_plane_destroy_state(plane, plane->state); + + kfree(plane->state); + plane->state = NULL; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (state) { + plane->state = &state->base; + plane->state->plane = plane; + } +} + +static struct drm_plane_state *tegra_plane_atomic_duplicate_state(struct drm_plane *plane) +{ + struct tegra_plane_state *state = to_tegra_plane_state(plane->state); + struct tegra_plane_state *copy; + + copy = kmalloc(sizeof(*copy), GFP_KERNEL); + if (!copy) + return NULL; + + __drm_atomic_helper_plane_duplicate_state(plane, ©->base); + copy->tiling = state->tiling; + copy->format = state->format; + copy->swap = state->swap; + + return ©->base; +} + +static void tegra_plane_atomic_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state) +{ + __drm_atomic_helper_plane_destroy_state(plane, state); + kfree(state); +} + +static const struct drm_plane_funcs tegra_primary_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = tegra_primary_plane_destroy, + .reset = tegra_plane_reset, + .atomic_duplicate_state = tegra_plane_atomic_duplicate_state, + .atomic_destroy_state = tegra_plane_atomic_destroy_state, +}; + +static int tegra_plane_prepare_fb(struct drm_plane *plane, + struct drm_framebuffer *fb, + const struct drm_plane_state *new_state) +{ + return 0; +} + +static void tegra_plane_cleanup_fb(struct drm_plane *plane, + struct drm_framebuffer *fb, + const struct drm_plane_state *old_fb) +{ +} + +static int tegra_plane_state_add(struct tegra_plane *plane, + struct drm_plane_state *state) +{ + struct drm_crtc_state *crtc_state; + struct tegra_dc_state *tegra; + + /* Propagate errors from allocation or locking failures. */ + crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + tegra = to_dc_state(crtc_state); + + tegra->planes |= WIN_A_ACT_REQ << plane->index; + + return 0; +} + +static int tegra_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct tegra_plane_state *plane_state = to_tegra_plane_state(state); + struct tegra_bo_tiling *tiling = &plane_state->tiling; + struct tegra_plane *tegra = to_tegra_plane(plane); + struct tegra_dc *dc = to_tegra_dc(state->crtc); + int err; + + /* no need for further checks if the plane is being disabled */ + if (!state->crtc) + return 0; + + err = tegra_dc_format(state->fb->pixel_format, &plane_state->format, + &plane_state->swap); + if (err < 0) + return err; + + err = tegra_fb_get_tiling(state->fb, tiling); + if (err < 0) + return err; + + if (tiling->mode == TEGRA_BO_TILING_MODE_BLOCK && + !dc->soc->supports_block_linear) { + DRM_ERROR("hardware doesn't support block linear mode\n"); + return -EINVAL; + } + + /* + * Tegra doesn't support different strides for U and V planes so we + * error out if the user tries to display a framebuffer with such a + * configuration. + */ + if (drm_format_num_planes(state->fb->pixel_format) > 2) { + if (state->fb->pitches[2] != state->fb->pitches[1]) { + DRM_ERROR("unsupported UV-plane configuration\n"); + return -EINVAL; + } + } + + err = tegra_plane_state_add(tegra, state); + if (err < 0) + return err; + + return 0; +} + +static void tegra_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct tegra_plane_state *state = to_tegra_plane_state(plane->state); + struct tegra_dc *dc = to_tegra_dc(plane->state->crtc); + struct drm_framebuffer *fb = plane->state->fb; + struct tegra_plane *p = to_tegra_plane(plane); + struct tegra_dc_window window; + unsigned int i; + + /* rien ne va plus */ + if (!plane->state->crtc || !plane->state->fb) + return; + + memset(&window, 0, sizeof(window)); + window.src.x = plane->state->src_x >> 16; + window.src.y = plane->state->src_y >> 16; + window.src.w = plane->state->src_w >> 16; + window.src.h = plane->state->src_h >> 16; + window.dst.x = plane->state->crtc_x; + window.dst.y = plane->state->crtc_y; + window.dst.w = plane->state->crtc_w; + window.dst.h = plane->state->crtc_h; + window.bits_per_pixel = fb->bits_per_pixel; + window.bottom_up = tegra_fb_is_bottom_up(fb); + + /* copy from state */ + window.tiling = state->tiling; + window.format = state->format; + window.swap = state->swap; + + for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) { + struct tegra_bo *bo = tegra_fb_get_plane(fb, i); + + window.base[i] = bo->paddr + fb->offsets[i]; + window.stride[i] = fb->pitches[i]; + } + + tegra_dc_setup_window(dc, p->index, &window); +} + +static void tegra_plane_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct tegra_plane *p = to_tegra_plane(plane); + struct tegra_dc *dc; + unsigned long flags; + u32 value; + + /* rien ne va plus */ + if (!old_state || !old_state->crtc) + return; + + dc = to_tegra_dc(old_state->crtc); + + spin_lock_irqsave(&dc->lock, flags); + + value = WINDOW_A_SELECT << p->index; + tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER); + + value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS); + value &= ~WIN_ENABLE; + tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS); + + spin_unlock_irqrestore(&dc->lock, flags); +} + +static const struct drm_plane_helper_funcs tegra_primary_plane_helper_funcs = { + .prepare_fb = tegra_plane_prepare_fb, + .cleanup_fb = tegra_plane_cleanup_fb, + .atomic_check = tegra_plane_atomic_check, + .atomic_update = tegra_plane_atomic_update, + .atomic_disable = tegra_plane_atomic_disable, +}; + +static struct drm_plane *tegra_dc_primary_plane_create(struct drm_device *drm, + struct tegra_dc *dc) +{ + /* + * Ideally this would use drm_crtc_mask(), but that would require the + * CRTC to already be in the mode_config's list of CRTCs. However, it + * will only be added to that list in the drm_crtc_init_with_planes() + * (in tegra_dc_init()), which in turn requires registration of these + * planes. So we have ourselves a nice little chicken and egg problem + * here. + * + * We work around this by manually creating the mask from the number + * of CRTCs that have been registered, and should therefore always be + * the same as drm_crtc_index() after registration. + */ + unsigned long possible_crtcs = 1 << drm->mode_config.num_crtc; + struct tegra_plane *plane; + unsigned int num_formats; + const u32 *formats; + int err; + + plane = kzalloc(sizeof(*plane), GFP_KERNEL); + if (!plane) + return ERR_PTR(-ENOMEM); + + num_formats = ARRAY_SIZE(tegra_primary_plane_formats); + formats = tegra_primary_plane_formats; + + err = drm_universal_plane_init(drm, &plane->base, possible_crtcs, + &tegra_primary_plane_funcs, formats, + num_formats, DRM_PLANE_TYPE_PRIMARY); + if (err < 0) { + kfree(plane); + return ERR_PTR(err); + } + + drm_plane_helper_add(&plane->base, &tegra_primary_plane_helper_funcs); + + return &plane->base; +} + +static const u32 tegra_cursor_plane_formats[] = { + DRM_FORMAT_RGBA8888, +}; + +static int tegra_cursor_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct tegra_plane *tegra = to_tegra_plane(plane); + int err; + + /* no need for further checks if the plane is being disabled */ + if (!state->crtc) + return 0; + + /* scaling not supported for cursor */ + if ((state->src_w >> 16 != state->crtc_w) || + (state->src_h >> 16 != state->crtc_h)) + return -EINVAL; + + /* only square cursors supported */ + if (state->src_w != state->src_h) + return -EINVAL; + + if (state->crtc_w != 32 && state->crtc_w != 64 && + state->crtc_w != 128 && state->crtc_w != 256) + return -EINVAL; + + err = tegra_plane_state_add(tegra, state); + if (err < 0) + return err; + + return 0; +} + +static void tegra_cursor_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct tegra_bo *bo = tegra_fb_get_plane(plane->state->fb, 0); + struct tegra_dc *dc = to_tegra_dc(plane->state->crtc); + struct drm_plane_state *state = plane->state; + u32 value = CURSOR_CLIP_DISPLAY; + + /* rien ne va plus */ + if (!plane->state->crtc || !plane->state->fb) + return; + + switch (state->crtc_w) { + case 32: + value |= CURSOR_SIZE_32x32; + break; + + case 64: + value |= CURSOR_SIZE_64x64; + break; + + case 128: + value |= CURSOR_SIZE_128x128; + break; + + case 256: + value |= CURSOR_SIZE_256x256; + break; + + default: + WARN(1, "cursor size %ux%u not supported\n", state->crtc_w, + state->crtc_h); + return; + } + + value |= (bo->paddr >> 10) & 0x3fffff; + tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR); + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + value = (bo->paddr >> 32) & 0x3; + tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR_HI); +#endif + + /* enable cursor and set blend mode */ + value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); + value |= CURSOR_ENABLE; + tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); + + value = tegra_dc_readl(dc, DC_DISP_BLEND_CURSOR_CONTROL); + value &= ~CURSOR_DST_BLEND_MASK; + value &= ~CURSOR_SRC_BLEND_MASK; + value |= CURSOR_MODE_NORMAL; + value |= CURSOR_DST_BLEND_NEG_K1_TIMES_SRC; + value |= CURSOR_SRC_BLEND_K1_TIMES_SRC; + value |= CURSOR_ALPHA; + tegra_dc_writel(dc, value, DC_DISP_BLEND_CURSOR_CONTROL); + + /* position the cursor */ + value = (state->crtc_y & 0x3fff) << 16 | (state->crtc_x & 0x3fff); + tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION); + +} + +static void tegra_cursor_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct tegra_dc *dc; + u32 value; + + /* rien ne va plus */ + if (!old_state || !old_state->crtc) + return; + + dc = to_tegra_dc(old_state->crtc); + + value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); + value &= ~CURSOR_ENABLE; + tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); +} + +static const struct drm_plane_funcs tegra_cursor_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = tegra_plane_destroy, + .reset = tegra_plane_reset, + .atomic_duplicate_state = tegra_plane_atomic_duplicate_state, + .atomic_destroy_state = tegra_plane_atomic_destroy_state, +}; + +static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = { + .prepare_fb = tegra_plane_prepare_fb, + .cleanup_fb = tegra_plane_cleanup_fb, + .atomic_check = tegra_cursor_atomic_check, + .atomic_update = tegra_cursor_atomic_update, + .atomic_disable = tegra_cursor_atomic_disable, +}; + +static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm, + struct tegra_dc *dc) +{ + struct tegra_plane *plane; + unsigned int num_formats; + const u32 *formats; + int err; + + plane = kzalloc(sizeof(*plane), GFP_KERNEL); + if (!plane) + return ERR_PTR(-ENOMEM); + + /* + * We'll treat the cursor as an overlay plane with index 6 here so + * that the update and activation request bits in DC_CMD_STATE_CONTROL + * match up. + */ + plane->index = 6; + + num_formats = ARRAY_SIZE(tegra_cursor_plane_formats); + formats = tegra_cursor_plane_formats; + + err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe, + &tegra_cursor_plane_funcs, formats, + num_formats, DRM_PLANE_TYPE_CURSOR); + if (err < 0) { + kfree(plane); + return ERR_PTR(err); + } + + drm_plane_helper_add(&plane->base, &tegra_cursor_plane_helper_funcs); + + return &plane->base; +} + +static void tegra_overlay_plane_destroy(struct drm_plane *plane) +{ + tegra_plane_destroy(plane); +} + +static const struct drm_plane_funcs tegra_overlay_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = tegra_overlay_plane_destroy, + .reset = tegra_plane_reset, + .atomic_duplicate_state = tegra_plane_atomic_duplicate_state, + .atomic_destroy_state = tegra_plane_atomic_destroy_state, +}; + +static const uint32_t tegra_overlay_plane_formats[] = { + DRM_FORMAT_XBGR8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_RGB565, + DRM_FORMAT_UYVY, + DRM_FORMAT_YUYV, + DRM_FORMAT_YUV420, + DRM_FORMAT_YUV422, +}; + +static const struct drm_plane_helper_funcs tegra_overlay_plane_helper_funcs = { + .prepare_fb = tegra_plane_prepare_fb, + .cleanup_fb = tegra_plane_cleanup_fb, + .atomic_check = tegra_plane_atomic_check, + .atomic_update = tegra_plane_atomic_update, + .atomic_disable = tegra_plane_atomic_disable, +}; + +static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm, + struct tegra_dc *dc, + unsigned int index) +{ + struct tegra_plane *plane; + unsigned int num_formats; + const u32 *formats; + int err; + + plane = kzalloc(sizeof(*plane), GFP_KERNEL); + if (!plane) + return ERR_PTR(-ENOMEM); + + plane->index = index; + + num_formats = ARRAY_SIZE(tegra_overlay_plane_formats); + formats = tegra_overlay_plane_formats; + + err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe, + &tegra_overlay_plane_funcs, formats, + num_formats, DRM_PLANE_TYPE_OVERLAY); + if (err < 0) { + kfree(plane); + return ERR_PTR(err); + } + + drm_plane_helper_add(&plane->base, &tegra_overlay_plane_helper_funcs); + + return &plane->base; +} + +static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc) +{ + struct drm_plane *plane; + unsigned int i; + + for (i = 0; i < 2; i++) { + plane = tegra_dc_overlay_plane_create(drm, dc, 1 + i); + if (IS_ERR(plane)) + return PTR_ERR(plane); + } + + return 0; +} + +u32 tegra_dc_get_vblank_counter(struct tegra_dc *dc) +{ + if (dc->syncpt) + return host1x_syncpt_read(dc->syncpt); + + /* fallback to software emulated VBLANK counter */ + return drm_crtc_vblank_count(&dc->base); +} + +void tegra_dc_enable_vblank(struct tegra_dc *dc) +{ + unsigned long value, flags; + + spin_lock_irqsave(&dc->lock, flags); + + value = tegra_dc_readl(dc, DC_CMD_INT_MASK); + value |= VBLANK_INT; + tegra_dc_writel(dc, value, DC_CMD_INT_MASK); + + spin_unlock_irqrestore(&dc->lock, flags); +} + +void tegra_dc_disable_vblank(struct tegra_dc *dc) +{ + unsigned long value, flags; + + spin_lock_irqsave(&dc->lock, flags); + + value = tegra_dc_readl(dc, DC_CMD_INT_MASK); + value &= ~VBLANK_INT; + tegra_dc_writel(dc, value, DC_CMD_INT_MASK); + + spin_unlock_irqrestore(&dc->lock, flags); +} + +static void tegra_dc_finish_page_flip(struct tegra_dc *dc) +{ + struct drm_device *drm = dc->base.dev; + struct drm_crtc *crtc = &dc->base; + unsigned long flags, base; + struct tegra_bo *bo; + + spin_lock_irqsave(&drm->event_lock, flags); + + if (!dc->event) { + spin_unlock_irqrestore(&drm->event_lock, flags); + return; + } + + bo = tegra_fb_get_plane(crtc->primary->fb, 0); + + spin_lock(&dc->lock); + + /* check if new start address has been latched */ + tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER); + tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS); + base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR); + tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS); + + spin_unlock(&dc->lock); + + if (base == bo->paddr + crtc->primary->fb->offsets[0]) { + drm_crtc_send_vblank_event(crtc, dc->event); + drm_crtc_vblank_put(crtc); + dc->event = NULL; + } + + spin_unlock_irqrestore(&drm->event_lock, flags); +} + +void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file) +{ + struct tegra_dc *dc = to_tegra_dc(crtc); + struct drm_device *drm = crtc->dev; + unsigned long flags; + + spin_lock_irqsave(&drm->event_lock, flags); + + if (dc->event && dc->event->base.file_priv == file) { + dc->event->base.destroy(&dc->event->base); + drm_crtc_vblank_put(crtc); + dc->event = NULL; + } + + spin_unlock_irqrestore(&drm->event_lock, flags); +} + +static void tegra_dc_destroy(struct drm_crtc *crtc) +{ + drm_crtc_cleanup(crtc); +} + +static void tegra_crtc_reset(struct drm_crtc *crtc) +{ + struct tegra_dc_state *state; + + if (crtc->state) + __drm_atomic_helper_crtc_destroy_state(crtc, crtc->state); + + kfree(crtc->state); + crtc->state = NULL; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (state) { + crtc->state = &state->base; + crtc->state->crtc = crtc; + } +} + +static struct drm_crtc_state * +tegra_crtc_atomic_duplicate_state(struct drm_crtc *crtc) +{ + struct tegra_dc_state *state = to_dc_state(crtc->state); + struct tegra_dc_state *copy; + + copy = kmalloc(sizeof(*copy), GFP_KERNEL); + if (!copy) + return NULL; + + __drm_atomic_helper_crtc_duplicate_state(crtc, ©->base); + copy->clk = state->clk; + copy->pclk = state->pclk; + copy->div = state->div; + copy->planes = state->planes; + + return ©->base; +} + +static void tegra_crtc_atomic_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + __drm_atomic_helper_crtc_destroy_state(crtc, state); + kfree(state); +} + +static const struct drm_crtc_funcs tegra_crtc_funcs = { + .page_flip = drm_atomic_helper_page_flip, + .set_config = drm_atomic_helper_set_config, + .destroy = tegra_dc_destroy, + .reset = tegra_crtc_reset, + .atomic_duplicate_state = tegra_crtc_atomic_duplicate_state, + .atomic_destroy_state = tegra_crtc_atomic_destroy_state, +}; + +static void tegra_dc_stop(struct tegra_dc *dc) +{ + u32 value; + + /* stop the display controller */ + value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND); + value &= ~DISP_CTRL_MODE_MASK; + tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND); + + tegra_dc_commit(dc); +} + +static bool tegra_dc_idle(struct tegra_dc *dc) +{ + u32 value; + + value = tegra_dc_readl_active(dc, DC_CMD_DISPLAY_COMMAND); + + return (value & DISP_CTRL_MODE_MASK) == 0; +} + +static int tegra_dc_wait_idle(struct tegra_dc *dc, unsigned long timeout) +{ + timeout = jiffies + msecs_to_jiffies(timeout); + + while (time_before(jiffies, timeout)) { + if (tegra_dc_idle(dc)) + return 0; + + usleep_range(1000, 2000); + } + + dev_dbg(dc->dev, "timeout waiting for DC to become idle\n"); + return -ETIMEDOUT; +} + +static void tegra_crtc_disable(struct drm_crtc *crtc) +{ + struct tegra_dc *dc = to_tegra_dc(crtc); + u32 value; + + if (!tegra_dc_idle(dc)) { + tegra_dc_stop(dc); + + /* + * Ignore the return value, there isn't anything useful to do + * in case this fails. + */ + tegra_dc_wait_idle(dc, 100); + } + + /* + * This should really be part of the RGB encoder driver, but clearing + * these bits has the side-effect of stopping the display controller. + * When that happens no VBLANK interrupts will be raised. At the same + * time the encoder is disabled before the display controller, so the + * above code is always going to timeout waiting for the controller + * to go idle. + * + * Given the close coupling between the RGB encoder and the display + * controller doing it here is still kind of okay. None of the other + * encoder drivers require these bits to be cleared. + * + * XXX: Perhaps given that the display controller is switched off at + * this point anyway maybe clearing these bits isn't even useful for + * the RGB encoder? + */ + if (dc->rgb) { + value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL); + value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | + PW4_ENABLE | PM0_ENABLE | PM1_ENABLE); + tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); + } + + drm_crtc_vblank_off(crtc); +} + +static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted) +{ + return true; +} + +static int tegra_dc_set_timings(struct tegra_dc *dc, + struct drm_display_mode *mode) +{ + unsigned int h_ref_to_sync = 1; + unsigned int v_ref_to_sync = 1; + unsigned long value; + + tegra_dc_writel(dc, 0x0, DC_DISP_DISP_TIMING_OPTIONS); + + value = (v_ref_to_sync << 16) | h_ref_to_sync; + tegra_dc_writel(dc, value, DC_DISP_REF_TO_SYNC); + + value = ((mode->vsync_end - mode->vsync_start) << 16) | + ((mode->hsync_end - mode->hsync_start) << 0); + tegra_dc_writel(dc, value, DC_DISP_SYNC_WIDTH); + + value = ((mode->vtotal - mode->vsync_end) << 16) | + ((mode->htotal - mode->hsync_end) << 0); + tegra_dc_writel(dc, value, DC_DISP_BACK_PORCH); + + value = ((mode->vsync_start - mode->vdisplay) << 16) | + ((mode->hsync_start - mode->hdisplay) << 0); + tegra_dc_writel(dc, value, DC_DISP_FRONT_PORCH); + + value = (mode->vdisplay << 16) | mode->hdisplay; + tegra_dc_writel(dc, value, DC_DISP_ACTIVE); + + return 0; +} + +/** + * tegra_dc_state_setup_clock - check clock settings and store them in atomic + * state + * @dc: display controller + * @crtc_state: CRTC atomic state + * @clk: parent clock for display controller + * @pclk: pixel clock + * @div: shift clock divider + * + * Returns: + * 0 on success or a negative error-code on failure. + */ +int tegra_dc_state_setup_clock(struct tegra_dc *dc, + struct drm_crtc_state *crtc_state, + struct clk *clk, unsigned long pclk, + unsigned int div) +{ + struct tegra_dc_state *state = to_dc_state(crtc_state); + + if (!clk_has_parent(dc->clk, clk)) + return -EINVAL; + + state->clk = clk; + state->pclk = pclk; + state->div = div; + + return 0; +} + +static void tegra_dc_commit_state(struct tegra_dc *dc, + struct tegra_dc_state *state) +{ + u32 value; + int err; + + err = clk_set_parent(dc->clk, state->clk); + if (err < 0) + dev_err(dc->dev, "failed to set parent clock: %d\n", err); + + /* + * Outputs may not want to change the parent clock rate. This is only + * relevant to Tegra20 where only a single display PLL is available. + * Since that PLL would typically be used for HDMI, an internal LVDS + * panel would need to be driven by some other clock such as PLL_P + * which is shared with other peripherals. Changing the clock rate + * should therefore be avoided. + */ + if (state->pclk > 0) { + err = clk_set_rate(state->clk, state->pclk); + if (err < 0) + dev_err(dc->dev, + "failed to set clock rate to %lu Hz\n", + state->pclk); + } + + DRM_DEBUG_KMS("rate: %lu, div: %u\n", clk_get_rate(dc->clk), + state->div); + DRM_DEBUG_KMS("pclk: %lu\n", state->pclk); + + value = SHIFT_CLK_DIVIDER(state->div) | PIXEL_CLK_DIVIDER_PCD1; + tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL); +} + +static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc) +{ + struct drm_display_mode *mode = &crtc->state->adjusted_mode; + struct tegra_dc_state *state = to_dc_state(crtc->state); + struct tegra_dc *dc = to_tegra_dc(crtc); + u32 value; + + tegra_dc_commit_state(dc, state); + + /* program display mode */ + tegra_dc_set_timings(dc, mode); + + /* interlacing isn't supported yet, so disable it */ + if (dc->soc->supports_interlacing) { + value = tegra_dc_readl(dc, DC_DISP_INTERLACE_CONTROL); + value &= ~INTERLACE_ENABLE; + tegra_dc_writel(dc, value, DC_DISP_INTERLACE_CONTROL); + } + + value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND); + value &= ~DISP_CTRL_MODE_MASK; + value |= DISP_CTRL_MODE_C_DISPLAY; + tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND); + + value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL); + value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | + PW4_ENABLE | PM0_ENABLE | PM1_ENABLE; + tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); + + tegra_dc_commit(dc); +} + +static void tegra_crtc_prepare(struct drm_crtc *crtc) +{ + drm_crtc_vblank_off(crtc); +} + +static void tegra_crtc_commit(struct drm_crtc *crtc) +{ + drm_crtc_vblank_on(crtc); +} + +static int tegra_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + return 0; +} + +static void tegra_crtc_atomic_begin(struct drm_crtc *crtc) +{ + struct tegra_dc *dc = to_tegra_dc(crtc); + + if (crtc->state->event) { + crtc->state->event->pipe = drm_crtc_index(crtc); + + WARN_ON(drm_crtc_vblank_get(crtc) != 0); + + dc->event = crtc->state->event; + crtc->state->event = NULL; + } +} + +static void tegra_crtc_atomic_flush(struct drm_crtc *crtc) +{ + struct tegra_dc_state *state = to_dc_state(crtc->state); + struct tegra_dc *dc = to_tegra_dc(crtc); + + tegra_dc_writel(dc, state->planes << 8, DC_CMD_STATE_CONTROL); + tegra_dc_writel(dc, state->planes, DC_CMD_STATE_CONTROL); +} + +static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = { + .disable = tegra_crtc_disable, + .mode_fixup = tegra_crtc_mode_fixup, + .mode_set_nofb = tegra_crtc_mode_set_nofb, + .prepare = tegra_crtc_prepare, + .commit = tegra_crtc_commit, + .atomic_check = tegra_crtc_atomic_check, + .atomic_begin = tegra_crtc_atomic_begin, + .atomic_flush = tegra_crtc_atomic_flush, +}; + +static irqreturn_t tegra_dc_irq(int irq, void *data) +{ + struct tegra_dc *dc = data; + unsigned long status; + + status = tegra_dc_readl(dc, DC_CMD_INT_STATUS); + tegra_dc_writel(dc, status, DC_CMD_INT_STATUS); + + if (status & FRAME_END_INT) { + /* + dev_dbg(dc->dev, "%s(): frame end\n", __func__); + */ + } + + if (status & VBLANK_INT) { + /* + dev_dbg(dc->dev, "%s(): vertical blank\n", __func__); + */ + drm_crtc_handle_vblank(&dc->base); + tegra_dc_finish_page_flip(dc); + } + + if (status & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT)) { + /* + dev_dbg(dc->dev, "%s(): underflow\n", __func__); + */ + } + + return IRQ_HANDLED; +} + +static int tegra_dc_show_regs(struct seq_file *s, void *data) +{ + struct drm_info_node *node = s->private; + struct tegra_dc *dc = node->info_ent->data; + +#define DUMP_REG(name) \ + seq_printf(s, "%-40s %#05x %08x\n", #name, name, \ + tegra_dc_readl(dc, name)) + + DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT); + DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT_CNTRL); + DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT_ERROR); + DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT); + DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT_CNTRL); + DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT_ERROR); + DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT); + DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT_CNTRL); + DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT_ERROR); + DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT); + DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT_CNTRL); + DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT_ERROR); + DUMP_REG(DC_CMD_CONT_SYNCPT_VSYNC); + DUMP_REG(DC_CMD_DISPLAY_COMMAND_OPTION0); + DUMP_REG(DC_CMD_DISPLAY_COMMAND); + DUMP_REG(DC_CMD_SIGNAL_RAISE); + DUMP_REG(DC_CMD_DISPLAY_POWER_CONTROL); + DUMP_REG(DC_CMD_INT_STATUS); + DUMP_REG(DC_CMD_INT_MASK); + DUMP_REG(DC_CMD_INT_ENABLE); + DUMP_REG(DC_CMD_INT_TYPE); + DUMP_REG(DC_CMD_INT_POLARITY); + DUMP_REG(DC_CMD_SIGNAL_RAISE1); + DUMP_REG(DC_CMD_SIGNAL_RAISE2); + DUMP_REG(DC_CMD_SIGNAL_RAISE3); + DUMP_REG(DC_CMD_STATE_ACCESS); + DUMP_REG(DC_CMD_STATE_CONTROL); + DUMP_REG(DC_CMD_DISPLAY_WINDOW_HEADER); + DUMP_REG(DC_CMD_REG_ACT_CONTROL); + DUMP_REG(DC_COM_CRC_CONTROL); + DUMP_REG(DC_COM_CRC_CHECKSUM); + DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(0)); + DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(1)); + DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(2)); + DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(3)); + DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(0)); + DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(1)); + DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(2)); + DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(3)); + DUMP_REG(DC_COM_PIN_OUTPUT_DATA(0)); + DUMP_REG(DC_COM_PIN_OUTPUT_DATA(1)); + DUMP_REG(DC_COM_PIN_OUTPUT_DATA(2)); + DUMP_REG(DC_COM_PIN_OUTPUT_DATA(3)); + DUMP_REG(DC_COM_PIN_INPUT_ENABLE(0)); + DUMP_REG(DC_COM_PIN_INPUT_ENABLE(1)); + DUMP_REG(DC_COM_PIN_INPUT_ENABLE(2)); + DUMP_REG(DC_COM_PIN_INPUT_ENABLE(3)); + DUMP_REG(DC_COM_PIN_INPUT_DATA(0)); + DUMP_REG(DC_COM_PIN_INPUT_DATA(1)); + DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(0)); + DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(1)); + DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(2)); + DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(3)); + DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(4)); + DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(5)); + DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(6)); + DUMP_REG(DC_COM_PIN_MISC_CONTROL); + DUMP_REG(DC_COM_PIN_PM0_CONTROL); + DUMP_REG(DC_COM_PIN_PM0_DUTY_CYCLE); + DUMP_REG(DC_COM_PIN_PM1_CONTROL); + DUMP_REG(DC_COM_PIN_PM1_DUTY_CYCLE); + DUMP_REG(DC_COM_SPI_CONTROL); + DUMP_REG(DC_COM_SPI_START_BYTE); + DUMP_REG(DC_COM_HSPI_WRITE_DATA_AB); + DUMP_REG(DC_COM_HSPI_WRITE_DATA_CD); + DUMP_REG(DC_COM_HSPI_CS_DC); + DUMP_REG(DC_COM_SCRATCH_REGISTER_A); + DUMP_REG(DC_COM_SCRATCH_REGISTER_B); + DUMP_REG(DC_COM_GPIO_CTRL); + DUMP_REG(DC_COM_GPIO_DEBOUNCE_COUNTER); + DUMP_REG(DC_COM_CRC_CHECKSUM_LATCHED); + DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS0); + DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS1); + DUMP_REG(DC_DISP_DISP_WIN_OPTIONS); + DUMP_REG(DC_DISP_DISP_MEM_HIGH_PRIORITY); + DUMP_REG(DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER); + DUMP_REG(DC_DISP_DISP_TIMING_OPTIONS); + DUMP_REG(DC_DISP_REF_TO_SYNC); + DUMP_REG(DC_DISP_SYNC_WIDTH); + DUMP_REG(DC_DISP_BACK_PORCH); + DUMP_REG(DC_DISP_ACTIVE); + DUMP_REG(DC_DISP_FRONT_PORCH); + DUMP_REG(DC_DISP_H_PULSE0_CONTROL); + DUMP_REG(DC_DISP_H_PULSE0_POSITION_A); + DUMP_REG(DC_DISP_H_PULSE0_POSITION_B); + DUMP_REG(DC_DISP_H_PULSE0_POSITION_C); + DUMP_REG(DC_DISP_H_PULSE0_POSITION_D); + DUMP_REG(DC_DISP_H_PULSE1_CONTROL); + DUMP_REG(DC_DISP_H_PULSE1_POSITION_A); + DUMP_REG(DC_DISP_H_PULSE1_POSITION_B); + DUMP_REG(DC_DISP_H_PULSE1_POSITION_C); + DUMP_REG(DC_DISP_H_PULSE1_POSITION_D); + DUMP_REG(DC_DISP_H_PULSE2_CONTROL); + DUMP_REG(DC_DISP_H_PULSE2_POSITION_A); + DUMP_REG(DC_DISP_H_PULSE2_POSITION_B); + DUMP_REG(DC_DISP_H_PULSE2_POSITION_C); + DUMP_REG(DC_DISP_H_PULSE2_POSITION_D); + DUMP_REG(DC_DISP_V_PULSE0_CONTROL); + DUMP_REG(DC_DISP_V_PULSE0_POSITION_A); + DUMP_REG(DC_DISP_V_PULSE0_POSITION_B); + DUMP_REG(DC_DISP_V_PULSE0_POSITION_C); + DUMP_REG(DC_DISP_V_PULSE1_CONTROL); + DUMP_REG(DC_DISP_V_PULSE1_POSITION_A); + DUMP_REG(DC_DISP_V_PULSE1_POSITION_B); + DUMP_REG(DC_DISP_V_PULSE1_POSITION_C); + DUMP_REG(DC_DISP_V_PULSE2_CONTROL); + DUMP_REG(DC_DISP_V_PULSE2_POSITION_A); + DUMP_REG(DC_DISP_V_PULSE3_CONTROL); + DUMP_REG(DC_DISP_V_PULSE3_POSITION_A); + DUMP_REG(DC_DISP_M0_CONTROL); + DUMP_REG(DC_DISP_M1_CONTROL); + DUMP_REG(DC_DISP_DI_CONTROL); + DUMP_REG(DC_DISP_PP_CONTROL); + DUMP_REG(DC_DISP_PP_SELECT_A); + DUMP_REG(DC_DISP_PP_SELECT_B); + DUMP_REG(DC_DISP_PP_SELECT_C); + DUMP_REG(DC_DISP_PP_SELECT_D); + DUMP_REG(DC_DISP_DISP_CLOCK_CONTROL); + DUMP_REG(DC_DISP_DISP_INTERFACE_CONTROL); + DUMP_REG(DC_DISP_DISP_COLOR_CONTROL); + DUMP_REG(DC_DISP_SHIFT_CLOCK_OPTIONS); + DUMP_REG(DC_DISP_DATA_ENABLE_OPTIONS); + DUMP_REG(DC_DISP_SERIAL_INTERFACE_OPTIONS); + DUMP_REG(DC_DISP_LCD_SPI_OPTIONS); + DUMP_REG(DC_DISP_BORDER_COLOR); + DUMP_REG(DC_DISP_COLOR_KEY0_LOWER); + DUMP_REG(DC_DISP_COLOR_KEY0_UPPER); + DUMP_REG(DC_DISP_COLOR_KEY1_LOWER); + DUMP_REG(DC_DISP_COLOR_KEY1_UPPER); + DUMP_REG(DC_DISP_CURSOR_FOREGROUND); + DUMP_REG(DC_DISP_CURSOR_BACKGROUND); + DUMP_REG(DC_DISP_CURSOR_START_ADDR); + DUMP_REG(DC_DISP_CURSOR_START_ADDR_NS); + DUMP_REG(DC_DISP_CURSOR_POSITION); + DUMP_REG(DC_DISP_CURSOR_POSITION_NS); + DUMP_REG(DC_DISP_INIT_SEQ_CONTROL); + DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_A); + DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_B); + DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_C); + DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_D); + DUMP_REG(DC_DISP_DC_MCCIF_FIFOCTRL); + DUMP_REG(DC_DISP_MCCIF_DISPLAY0A_HYST); + DUMP_REG(DC_DISP_MCCIF_DISPLAY0B_HYST); + DUMP_REG(DC_DISP_MCCIF_DISPLAY1A_HYST); + DUMP_REG(DC_DISP_MCCIF_DISPLAY1B_HYST); + DUMP_REG(DC_DISP_DAC_CRT_CTRL); + DUMP_REG(DC_DISP_DISP_MISC_CONTROL); + DUMP_REG(DC_DISP_SD_CONTROL); + DUMP_REG(DC_DISP_SD_CSC_COEFF); + DUMP_REG(DC_DISP_SD_LUT(0)); + DUMP_REG(DC_DISP_SD_LUT(1)); + DUMP_REG(DC_DISP_SD_LUT(2)); + DUMP_REG(DC_DISP_SD_LUT(3)); + DUMP_REG(DC_DISP_SD_LUT(4)); + DUMP_REG(DC_DISP_SD_LUT(5)); + DUMP_REG(DC_DISP_SD_LUT(6)); + DUMP_REG(DC_DISP_SD_LUT(7)); + DUMP_REG(DC_DISP_SD_LUT(8)); + DUMP_REG(DC_DISP_SD_FLICKER_CONTROL); + DUMP_REG(DC_DISP_DC_PIXEL_COUNT); + DUMP_REG(DC_DISP_SD_HISTOGRAM(0)); + DUMP_REG(DC_DISP_SD_HISTOGRAM(1)); + DUMP_REG(DC_DISP_SD_HISTOGRAM(2)); + DUMP_REG(DC_DISP_SD_HISTOGRAM(3)); + DUMP_REG(DC_DISP_SD_HISTOGRAM(4)); + DUMP_REG(DC_DISP_SD_HISTOGRAM(5)); + DUMP_REG(DC_DISP_SD_HISTOGRAM(6)); + DUMP_REG(DC_DISP_SD_HISTOGRAM(7)); + DUMP_REG(DC_DISP_SD_BL_TF(0)); + DUMP_REG(DC_DISP_SD_BL_TF(1)); + DUMP_REG(DC_DISP_SD_BL_TF(2)); + DUMP_REG(DC_DISP_SD_BL_TF(3)); + DUMP_REG(DC_DISP_SD_BL_CONTROL); + DUMP_REG(DC_DISP_SD_HW_K_VALUES); + DUMP_REG(DC_DISP_SD_MAN_K_VALUES); + DUMP_REG(DC_DISP_CURSOR_START_ADDR_HI); + DUMP_REG(DC_DISP_BLEND_CURSOR_CONTROL); + DUMP_REG(DC_WIN_WIN_OPTIONS); + DUMP_REG(DC_WIN_BYTE_SWAP); + DUMP_REG(DC_WIN_BUFFER_CONTROL); + DUMP_REG(DC_WIN_COLOR_DEPTH); + DUMP_REG(DC_WIN_POSITION); + DUMP_REG(DC_WIN_SIZE); + DUMP_REG(DC_WIN_PRESCALED_SIZE); + DUMP_REG(DC_WIN_H_INITIAL_DDA); + DUMP_REG(DC_WIN_V_INITIAL_DDA); + DUMP_REG(DC_WIN_DDA_INC); + DUMP_REG(DC_WIN_LINE_STRIDE); + DUMP_REG(DC_WIN_BUF_STRIDE); + DUMP_REG(DC_WIN_UV_BUF_STRIDE); + DUMP_REG(DC_WIN_BUFFER_ADDR_MODE); + DUMP_REG(DC_WIN_DV_CONTROL); + DUMP_REG(DC_WIN_BLEND_NOKEY); + DUMP_REG(DC_WIN_BLEND_1WIN); + DUMP_REG(DC_WIN_BLEND_2WIN_X); + DUMP_REG(DC_WIN_BLEND_2WIN_Y); + DUMP_REG(DC_WIN_BLEND_3WIN_XY); + DUMP_REG(DC_WIN_HP_FETCH_CONTROL); + DUMP_REG(DC_WINBUF_START_ADDR); + DUMP_REG(DC_WINBUF_START_ADDR_NS); + DUMP_REG(DC_WINBUF_START_ADDR_U); + DUMP_REG(DC_WINBUF_START_ADDR_U_NS); + DUMP_REG(DC_WINBUF_START_ADDR_V); + DUMP_REG(DC_WINBUF_START_ADDR_V_NS); + DUMP_REG(DC_WINBUF_ADDR_H_OFFSET); + DUMP_REG(DC_WINBUF_ADDR_H_OFFSET_NS); + DUMP_REG(DC_WINBUF_ADDR_V_OFFSET); + DUMP_REG(DC_WINBUF_ADDR_V_OFFSET_NS); + DUMP_REG(DC_WINBUF_UFLOW_STATUS); + DUMP_REG(DC_WINBUF_AD_UFLOW_STATUS); + DUMP_REG(DC_WINBUF_BD_UFLOW_STATUS); + DUMP_REG(DC_WINBUF_CD_UFLOW_STATUS); + +#undef DUMP_REG + + return 0; +} + +static struct drm_info_list debugfs_files[] = { + { "regs", tegra_dc_show_regs, 0, NULL }, +}; + +static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor) +{ + unsigned int i; + char *name; + int err; + + name = kasprintf(GFP_KERNEL, "dc.%d", dc->pipe); + dc->debugfs = debugfs_create_dir(name, minor->debugfs_root); + kfree(name); + + if (!dc->debugfs) + return -ENOMEM; + + dc->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files), + GFP_KERNEL); + if (!dc->debugfs_files) { + err = -ENOMEM; + goto remove; + } + + for (i = 0; i < ARRAY_SIZE(debugfs_files); i++) + dc->debugfs_files[i].data = dc; + + err = drm_debugfs_create_files(dc->debugfs_files, + ARRAY_SIZE(debugfs_files), + dc->debugfs, minor); + if (err < 0) + goto free; + + dc->minor = minor; + + return 0; + +free: + kfree(dc->debugfs_files); + dc->debugfs_files = NULL; +remove: + debugfs_remove(dc->debugfs); + dc->debugfs = NULL; + + return err; +} + +static int tegra_dc_debugfs_exit(struct tegra_dc *dc) +{ + drm_debugfs_remove_files(dc->debugfs_files, ARRAY_SIZE(debugfs_files), + dc->minor); + dc->minor = NULL; + + kfree(dc->debugfs_files); + dc->debugfs_files = NULL; + + debugfs_remove(dc->debugfs); + dc->debugfs = NULL; + + return 0; +} + +static int tegra_dc_init(struct host1x_client *client) +{ + struct drm_device *drm = dev_get_drvdata(client->parent); + struct tegra_dc *dc = host1x_client_to_dc(client); + struct tegra_drm *tegra = drm->dev_private; + struct drm_plane *primary = NULL; + struct drm_plane *cursor = NULL; + u32 value; + int err; + + if (tegra->domain) { + err = iommu_attach_device(tegra->domain, dc->dev); + if (err < 0) { + dev_err(dc->dev, "failed to attach to domain: %d\n", + err); + return err; + } + + dc->domain = tegra->domain; + } + + primary = tegra_dc_primary_plane_create(drm, dc); + if (IS_ERR(primary)) { + err = PTR_ERR(primary); + goto cleanup; + } + + if (dc->soc->supports_cursor) { + cursor = tegra_dc_cursor_plane_create(drm, dc); + if (IS_ERR(cursor)) { + err = PTR_ERR(cursor); + goto cleanup; + } + } + + err = drm_crtc_init_with_planes(drm, &dc->base, primary, cursor, + &tegra_crtc_funcs); + if (err < 0) + goto cleanup; + + drm_mode_crtc_set_gamma_size(&dc->base, 256); + drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs); + + /* + * Keep track of the minimum pitch alignment across all display + * controllers. + */ + if (dc->soc->pitch_align > tegra->pitch_align) + tegra->pitch_align = dc->soc->pitch_align; + + err = tegra_dc_rgb_init(drm, dc); + if (err < 0 && err != -ENODEV) { + dev_err(dc->dev, "failed to initialize RGB output: %d\n", err); + goto cleanup; + } + + err = tegra_dc_add_planes(drm, dc); + if (err < 0) + goto cleanup; + + if (IS_ENABLED(CONFIG_DEBUG_FS)) { + err = tegra_dc_debugfs_init(dc, drm->primary); + if (err < 0) + dev_err(dc->dev, "debugfs setup failed: %d\n", err); + } + + err = devm_request_irq(dc->dev, dc->irq, tegra_dc_irq, 0, + dev_name(dc->dev), dc); + if (err < 0) { + dev_err(dc->dev, "failed to request IRQ#%u: %d\n", dc->irq, + err); + goto cleanup; + } + + /* initialize display controller */ + if (dc->syncpt) { + u32 syncpt = host1x_syncpt_id(dc->syncpt); + + value = SYNCPT_CNTRL_NO_STALL; + tegra_dc_writel(dc, value, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL); + + value = SYNCPT_VSYNC_ENABLE | syncpt; + tegra_dc_writel(dc, value, DC_CMD_CONT_SYNCPT_VSYNC); + } + + value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT; + tegra_dc_writel(dc, value, DC_CMD_INT_TYPE); + + value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | + WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT; + tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY); + + /* initialize timer */ + value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) | + WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20); + tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY); + + value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) | + WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1); + tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER); + + value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT; + tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE); + + value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT; + tegra_dc_writel(dc, value, DC_CMD_INT_MASK); + + if (dc->soc->supports_border_color) + tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR); + + return 0; + +cleanup: + if (cursor) + drm_plane_cleanup(cursor); + + if (primary) + drm_plane_cleanup(primary); + + if (tegra->domain) { + iommu_detach_device(tegra->domain, dc->dev); + dc->domain = NULL; + } + + return err; +} + +static int tegra_dc_exit(struct host1x_client *client) +{ + struct tegra_dc *dc = host1x_client_to_dc(client); + int err; + + devm_free_irq(dc->dev, dc->irq, dc); + + if (IS_ENABLED(CONFIG_DEBUG_FS)) { + err = tegra_dc_debugfs_exit(dc); + if (err < 0) + dev_err(dc->dev, "debugfs cleanup failed: %d\n", err); + } + + err = tegra_dc_rgb_exit(dc); + if (err) { + dev_err(dc->dev, "failed to shutdown RGB output: %d\n", err); + return err; + } + + if (dc->domain) { + iommu_detach_device(dc->domain, dc->dev); + dc->domain = NULL; + } + + return 0; +} + +static const struct host1x_client_ops dc_client_ops = { + .init = tegra_dc_init, + .exit = tegra_dc_exit, +}; + +static const struct tegra_dc_soc_info tegra20_dc_soc_info = { + .supports_border_color = true, + .supports_interlacing = false, + .supports_cursor = false, + .supports_block_linear = false, + .pitch_align = 8, + .has_powergate = false, +}; + +static const struct tegra_dc_soc_info tegra30_dc_soc_info = { + .supports_border_color = true, + .supports_interlacing = false, + .supports_cursor = false, + .supports_block_linear = false, + .pitch_align = 8, + .has_powergate = false, +}; + +static const struct tegra_dc_soc_info tegra114_dc_soc_info = { + .supports_border_color = true, + .supports_interlacing = false, + .supports_cursor = false, + .supports_block_linear = false, + .pitch_align = 64, + .has_powergate = true, +}; + +static const struct tegra_dc_soc_info tegra124_dc_soc_info = { + .supports_border_color = false, + .supports_interlacing = true, + .supports_cursor = true, + .supports_block_linear = true, + .pitch_align = 64, + .has_powergate = true, +}; + +static const struct of_device_id tegra_dc_of_match[] = { + { + .compatible = "nvidia,tegra124-dc", + .data = &tegra124_dc_soc_info, + }, { + .compatible = "nvidia,tegra114-dc", + .data = &tegra114_dc_soc_info, + }, { + .compatible = "nvidia,tegra30-dc", + .data = &tegra30_dc_soc_info, + }, { + .compatible = "nvidia,tegra20-dc", + .data = &tegra20_dc_soc_info, + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(of, tegra_dc_of_match); + +static int tegra_dc_parse_dt(struct tegra_dc *dc) +{ + struct device_node *np; + u32 value = 0; + int err; + + err = of_property_read_u32(dc->dev->of_node, "nvidia,head", &value); + if (err < 0) { + dev_err(dc->dev, "missing \"nvidia,head\" property\n"); + + /* + * If the nvidia,head property isn't present, try to find the + * correct head number by looking up the position of this + * display controller's node within the device tree. Assuming + * that the nodes are ordered properly in the DTS file and + * that the translation into a flattened device tree blob + * preserves that ordering this will actually yield the right + * head number. + * + * If those assumptions don't hold, this will still work for + * cases where only a single display controller is used. + */ + for_each_matching_node(np, tegra_dc_of_match) { + if (np == dc->dev->of_node) + break; + + value++; + } + } + + dc->pipe = value; + + return 0; +} + +static int tegra_dc_probe(struct platform_device *pdev) +{ + unsigned long flags = HOST1X_SYNCPT_CLIENT_MANAGED; + const struct of_device_id *id; + struct resource *regs; + struct tegra_dc *dc; + int err; + + dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL); + if (!dc) + return -ENOMEM; + + id = of_match_node(tegra_dc_of_match, pdev->dev.of_node); + if (!id) + return -ENODEV; + + spin_lock_init(&dc->lock); + INIT_LIST_HEAD(&dc->list); + dc->dev = &pdev->dev; + dc->soc = id->data; + + err = tegra_dc_parse_dt(dc); + if (err < 0) + return err; + + dc->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(dc->clk)) { + dev_err(&pdev->dev, "failed to get clock\n"); + return PTR_ERR(dc->clk); + } + + dc->rst = devm_reset_control_get(&pdev->dev, "dc"); + if (IS_ERR(dc->rst)) { + dev_err(&pdev->dev, "failed to get reset\n"); + return PTR_ERR(dc->rst); + } + + if (dc->soc->has_powergate) { + if (dc->pipe == 0) + dc->powergate = TEGRA_POWERGATE_DIS; + else + dc->powergate = TEGRA_POWERGATE_DISB; + + err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk, + dc->rst); + if (err < 0) { + dev_err(&pdev->dev, "failed to power partition: %d\n", + err); + return err; + } + } else { + err = clk_prepare_enable(dc->clk); + if (err < 0) { + dev_err(&pdev->dev, "failed to enable clock: %d\n", + err); + return err; + } + + err = reset_control_deassert(dc->rst); + if (err < 0) { + dev_err(&pdev->dev, "failed to deassert reset: %d\n", + err); + return err; + } + } + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dc->regs = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(dc->regs)) + return PTR_ERR(dc->regs); + + dc->irq = platform_get_irq(pdev, 0); + if (dc->irq < 0) { + dev_err(&pdev->dev, "failed to get IRQ\n"); + return -ENXIO; + } + + INIT_LIST_HEAD(&dc->client.list); + dc->client.ops = &dc_client_ops; + dc->client.dev = &pdev->dev; + + err = tegra_dc_rgb_probe(dc); + if (err < 0 && err != -ENODEV) { + dev_err(&pdev->dev, "failed to probe RGB output: %d\n", err); + return err; + } + + err = host1x_client_register(&dc->client); + if (err < 0) { + dev_err(&pdev->dev, "failed to register host1x client: %d\n", + err); + return err; + } + + dc->syncpt = host1x_syncpt_request(&pdev->dev, flags); + if (!dc->syncpt) + dev_warn(&pdev->dev, "failed to allocate syncpoint\n"); + + platform_set_drvdata(pdev, dc); + + return 0; +} + +static int tegra_dc_remove(struct platform_device *pdev) +{ + struct tegra_dc *dc = platform_get_drvdata(pdev); + int err; + + host1x_syncpt_free(dc->syncpt); + + err = host1x_client_unregister(&dc->client); + if (err < 0) { + dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", + err); + return err; + } + + err = tegra_dc_rgb_remove(dc); + if (err < 0) { + dev_err(&pdev->dev, "failed to remove RGB output: %d\n", err); + return err; + } + + reset_control_assert(dc->rst); + + if (dc->soc->has_powergate) + tegra_powergate_power_off(dc->powergate); + + clk_disable_unprepare(dc->clk); + + return 0; +} + +struct platform_driver tegra_dc_driver = { + .driver = { + .name = "tegra-dc", + .owner = THIS_MODULE, + .of_match_table = tegra_dc_of_match, + }, + .probe = tegra_dc_probe, + .remove = tegra_dc_remove, +}; diff --git a/kernel/drivers/gpu/drm/tegra/dc.h b/kernel/drivers/gpu/drm/tegra/dc.h new file mode 100644 index 000000000..55792daab --- /dev/null +++ b/kernel/drivers/gpu/drm/tegra/dc.h @@ -0,0 +1,444 @@ +/* + * Copyright (C) 2012 Avionic Design GmbH + * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef TEGRA_DC_H +#define TEGRA_DC_H 1 + +#define DC_CMD_GENERAL_INCR_SYNCPT 0x000 +#define DC_CMD_GENERAL_INCR_SYNCPT_CNTRL 0x001 +#define SYNCPT_CNTRL_NO_STALL (1 << 8) +#define SYNCPT_CNTRL_SOFT_RESET (1 << 0) +#define DC_CMD_GENERAL_INCR_SYNCPT_ERROR 0x002 +#define DC_CMD_WIN_A_INCR_SYNCPT 0x008 +#define DC_CMD_WIN_A_INCR_SYNCPT_CNTRL 0x009 +#define DC_CMD_WIN_A_INCR_SYNCPT_ERROR 0x00a +#define DC_CMD_WIN_B_INCR_SYNCPT 0x010 +#define DC_CMD_WIN_B_INCR_SYNCPT_CNTRL 0x011 +#define DC_CMD_WIN_B_INCR_SYNCPT_ERROR 0x012 +#define DC_CMD_WIN_C_INCR_SYNCPT 0x018 +#define DC_CMD_WIN_C_INCR_SYNCPT_CNTRL 0x019 +#define DC_CMD_WIN_C_INCR_SYNCPT_ERROR 0x01a +#define DC_CMD_CONT_SYNCPT_VSYNC 0x028 +#define SYNCPT_VSYNC_ENABLE (1 << 8) +#define DC_CMD_DISPLAY_COMMAND_OPTION0 0x031 +#define DC_CMD_DISPLAY_COMMAND 0x032 +#define DISP_CTRL_MODE_STOP (0 << 5) +#define DISP_CTRL_MODE_C_DISPLAY (1 << 5) +#define DISP_CTRL_MODE_NC_DISPLAY (2 << 5) +#define DISP_CTRL_MODE_MASK (3 << 5) +#define DC_CMD_SIGNAL_RAISE 0x033 +#define DC_CMD_DISPLAY_POWER_CONTROL 0x036 +#define PW0_ENABLE (1 << 0) +#define PW1_ENABLE (1 << 2) +#define PW2_ENABLE (1 << 4) +#define PW3_ENABLE (1 << 6) +#define PW4_ENABLE (1 << 8) +#define PM0_ENABLE (1 << 16) +#define PM1_ENABLE (1 << 18) + +#define DC_CMD_INT_STATUS 0x037 +#define DC_CMD_INT_MASK 0x038 +#define DC_CMD_INT_ENABLE 0x039 +#define DC_CMD_INT_TYPE 0x03a +#define DC_CMD_INT_POLARITY 0x03b +#define CTXSW_INT (1 << 0) +#define FRAME_END_INT (1 << 1) +#define VBLANK_INT (1 << 2) +#define WIN_A_UF_INT (1 << 8) +#define WIN_B_UF_INT (1 << 9) +#define WIN_C_UF_INT (1 << 10) +#define WIN_A_OF_INT (1 << 14) +#define WIN_B_OF_INT (1 << 15) +#define WIN_C_OF_INT (1 << 16) + +#define DC_CMD_SIGNAL_RAISE1 0x03c +#define DC_CMD_SIGNAL_RAISE2 0x03d +#define DC_CMD_SIGNAL_RAISE3 0x03e + +#define DC_CMD_STATE_ACCESS 0x040 +#define READ_MUX (1 << 0) +#define WRITE_MUX (1 << 2) + +#define DC_CMD_STATE_CONTROL 0x041 +#define GENERAL_ACT_REQ (1 << 0) +#define WIN_A_ACT_REQ (1 << 1) +#define WIN_B_ACT_REQ (1 << 2) +#define WIN_C_ACT_REQ (1 << 3) +#define CURSOR_ACT_REQ (1 << 7) +#define GENERAL_UPDATE (1 << 8) +#define WIN_A_UPDATE (1 << 9) +#define WIN_B_UPDATE (1 << 10) +#define WIN_C_UPDATE (1 << 11) +#define CURSOR_UPDATE (1 << 15) +#define NC_HOST_TRIG (1 << 24) + +#define DC_CMD_DISPLAY_WINDOW_HEADER 0x042 +#define WINDOW_A_SELECT (1 << 4) +#define WINDOW_B_SELECT (1 << 5) +#define WINDOW_C_SELECT (1 << 6) + +#define DC_CMD_REG_ACT_CONTROL 0x043 + +#define DC_COM_CRC_CONTROL 0x300 +#define DC_COM_CRC_CHECKSUM 0x301 +#define DC_COM_PIN_OUTPUT_ENABLE(x) (0x302 + (x)) +#define DC_COM_PIN_OUTPUT_POLARITY(x) (0x306 + (x)) +#define LVS_OUTPUT_POLARITY_LOW (1 << 28) +#define LHS_OUTPUT_POLARITY_LOW (1 << 30) +#define DC_COM_PIN_OUTPUT_DATA(x) (0x30a + (x)) +#define DC_COM_PIN_INPUT_ENABLE(x) (0x30e + (x)) +#define DC_COM_PIN_INPUT_DATA(x) (0x312 + (x)) +#define DC_COM_PIN_OUTPUT_SELECT(x) (0x314 + (x)) + +#define DC_COM_PIN_MISC_CONTROL 0x31b +#define DC_COM_PIN_PM0_CONTROL 0x31c +#define DC_COM_PIN_PM0_DUTY_CYCLE 0x31d +#define DC_COM_PIN_PM1_CONTROL 0x31e +#define DC_COM_PIN_PM1_DUTY_CYCLE 0x31f + +#define DC_COM_SPI_CONTROL 0x320 +#define DC_COM_SPI_START_BYTE 0x321 +#define DC_COM_HSPI_WRITE_DATA_AB 0x322 +#define DC_COM_HSPI_WRITE_DATA_CD 0x323 +#define DC_COM_HSPI_CS_DC 0x324 +#define DC_COM_SCRATCH_REGISTER_A 0x325 +#define DC_COM_SCRATCH_REGISTER_B 0x326 +#define DC_COM_GPIO_CTRL 0x327 +#define DC_COM_GPIO_DEBOUNCE_COUNTER 0x328 +#define DC_COM_CRC_CHECKSUM_LATCHED 0x329 + +#define DC_DISP_DISP_SIGNAL_OPTIONS0 0x400 +#define H_PULSE_0_ENABLE (1 << 8) +#define H_PULSE_1_ENABLE (1 << 10) +#define H_PULSE_2_ENABLE (1 << 12) + +#define DC_DISP_DISP_SIGNAL_OPTIONS1 0x401 + +#define DC_DISP_DISP_WIN_OPTIONS 0x402 +#define HDMI_ENABLE (1 << 30) +#define DSI_ENABLE (1 << 29) +#define SOR_ENABLE (1 << 25) +#define CURSOR_ENABLE (1 << 16) + +#define DC_DISP_DISP_MEM_HIGH_PRIORITY 0x403 +#define CURSOR_THRESHOLD(x) (((x) & 0x03) << 24) +#define WINDOW_A_THRESHOLD(x) (((x) & 0x7f) << 16) +#define WINDOW_B_THRESHOLD(x) (((x) & 0x7f) << 8) +#define WINDOW_C_THRESHOLD(x) (((x) & 0xff) << 0) + +#define DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER 0x404 +#define CURSOR_DELAY(x) (((x) & 0x3f) << 24) +#define WINDOW_A_DELAY(x) (((x) & 0x3f) << 16) +#define WINDOW_B_DELAY(x) (((x) & 0x3f) << 8) +#define WINDOW_C_DELAY(x) (((x) & 0x3f) << 0) + +#define DC_DISP_DISP_TIMING_OPTIONS 0x405 +#define VSYNC_H_POSITION(x) ((x) & 0xfff) + +#define DC_DISP_REF_TO_SYNC 0x406 +#define DC_DISP_SYNC_WIDTH 0x407 +#define DC_DISP_BACK_PORCH 0x408 +#define DC_DISP_ACTIVE 0x409 +#define DC_DISP_FRONT_PORCH 0x40a +#define DC_DISP_H_PULSE0_CONTROL 0x40b +#define DC_DISP_H_PULSE0_POSITION_A 0x40c +#define DC_DISP_H_PULSE0_POSITION_B 0x40d +#define DC_DISP_H_PULSE0_POSITION_C 0x40e +#define DC_DISP_H_PULSE0_POSITION_D 0x40f +#define DC_DISP_H_PULSE1_CONTROL 0x410 +#define DC_DISP_H_PULSE1_POSITION_A 0x411 +#define DC_DISP_H_PULSE1_POSITION_B 0x412 +#define DC_DISP_H_PULSE1_POSITION_C 0x413 +#define DC_DISP_H_PULSE1_POSITION_D 0x414 +#define DC_DISP_H_PULSE2_CONTROL 0x415 +#define DC_DISP_H_PULSE2_POSITION_A 0x416 +#define DC_DISP_H_PULSE2_POSITION_B 0x417 +#define DC_DISP_H_PULSE2_POSITION_C 0x418 +#define DC_DISP_H_PULSE2_POSITION_D 0x419 +#define DC_DISP_V_PULSE0_CONTROL 0x41a +#define DC_DISP_V_PULSE0_POSITION_A 0x41b +#define DC_DISP_V_PULSE0_POSITION_B 0x41c +#define DC_DISP_V_PULSE0_POSITION_C 0x41d +#define DC_DISP_V_PULSE1_CONTROL 0x41e +#define DC_DISP_V_PULSE1_POSITION_A 0x41f +#define DC_DISP_V_PULSE1_POSITION_B 0x420 +#define DC_DISP_V_PULSE1_POSITION_C 0x421 +#define DC_DISP_V_PULSE2_CONTROL 0x422 +#define DC_DISP_V_PULSE2_POSITION_A 0x423 +#define DC_DISP_V_PULSE3_CONTROL 0x424 +#define DC_DISP_V_PULSE3_POSITION_A 0x425 +#define DC_DISP_M0_CONTROL 0x426 +#define DC_DISP_M1_CONTROL 0x427 +#define DC_DISP_DI_CONTROL 0x428 +#define DC_DISP_PP_CONTROL 0x429 +#define DC_DISP_PP_SELECT_A 0x42a +#define DC_DISP_PP_SELECT_B 0x42b +#define DC_DISP_PP_SELECT_C 0x42c +#define DC_DISP_PP_SELECT_D 0x42d + +#define PULSE_MODE_NORMAL (0 << 3) +#define PULSE_MODE_ONE_CLOCK (1 << 3) +#define PULSE_POLARITY_HIGH (0 << 4) +#define PULSE_POLARITY_LOW (1 << 4) +#define PULSE_QUAL_ALWAYS (0 << 6) +#define PULSE_QUAL_VACTIVE (2 << 6) +#define PULSE_QUAL_VACTIVE1 (3 << 6) +#define PULSE_LAST_START_A (0 << 8) +#define PULSE_LAST_END_A (1 << 8) +#define PULSE_LAST_START_B (2 << 8) +#define PULSE_LAST_END_B (3 << 8) +#define PULSE_LAST_START_C (4 << 8) +#define PULSE_LAST_END_C (5 << 8) +#define PULSE_LAST_START_D (6 << 8) +#define PULSE_LAST_END_D (7 << 8) + +#define PULSE_START(x) (((x) & 0xfff) << 0) +#define PULSE_END(x) (((x) & 0xfff) << 16) + +#define DC_DISP_DISP_CLOCK_CONTROL 0x42e +#define PIXEL_CLK_DIVIDER_PCD1 (0 << 8) +#define PIXEL_CLK_DIVIDER_PCD1H (1 << 8) +#define PIXEL_CLK_DIVIDER_PCD2 (2 << 8) +#define PIXEL_CLK_DIVIDER_PCD3 (3 << 8) +#define PIXEL_CLK_DIVIDER_PCD4 (4 << 8) +#define PIXEL_CLK_DIVIDER_PCD6 (5 << 8) +#define PIXEL_CLK_DIVIDER_PCD8 (6 << 8) +#define PIXEL_CLK_DIVIDER_PCD9 (7 << 8) +#define PIXEL_CLK_DIVIDER_PCD12 (8 << 8) +#define PIXEL_CLK_DIVIDER_PCD16 (9 << 8) +#define PIXEL_CLK_DIVIDER_PCD18 (10 << 8) +#define PIXEL_CLK_DIVIDER_PCD24 (11 << 8) +#define PIXEL_CLK_DIVIDER_PCD13 (12 << 8) +#define SHIFT_CLK_DIVIDER(x) ((x) & 0xff) + +#define DC_DISP_DISP_INTERFACE_CONTROL 0x42f +#define DISP_DATA_FORMAT_DF1P1C (0 << 0) +#define DISP_DATA_FORMAT_DF1P2C24B (1 << 0) +#define DISP_DATA_FORMAT_DF1P2C18B (2 << 0) +#define DISP_DATA_FORMAT_DF1P2C16B (3 << 0) +#define DISP_DATA_FORMAT_DF2S (4 << 0) +#define DISP_DATA_FORMAT_DF3S (5 << 0) +#define DISP_DATA_FORMAT_DFSPI (6 << 0) +#define DISP_DATA_FORMAT_DF1P3C24B (7 << 0) +#define DISP_DATA_FORMAT_DF1P3C18B (8 << 0) +#define DISP_ALIGNMENT_MSB (0 << 8) +#define DISP_ALIGNMENT_LSB (1 << 8) +#define DISP_ORDER_RED_BLUE (0 << 9) +#define DISP_ORDER_BLUE_RED (1 << 9) + +#define DC_DISP_DISP_COLOR_CONTROL 0x430 +#define BASE_COLOR_SIZE666 (0 << 0) +#define BASE_COLOR_SIZE111 (1 << 0) +#define BASE_COLOR_SIZE222 (2 << 0) +#define BASE_COLOR_SIZE333 (3 << 0) +#define BASE_COLOR_SIZE444 (4 << 0) +#define BASE_COLOR_SIZE555 (5 << 0) +#define BASE_COLOR_SIZE565 (6 << 0) +#define BASE_COLOR_SIZE332 (7 << 0) +#define BASE_COLOR_SIZE888 (8 << 0) +#define DITHER_CONTROL_DISABLE (0 << 8) +#define DITHER_CONTROL_ORDERED (2 << 8) +#define DITHER_CONTROL_ERRDIFF (3 << 8) + +#define DC_DISP_SHIFT_CLOCK_OPTIONS 0x431 +#define SC1_H_QUALIFIER_NONE (1 << 16) +#define SC0_H_QUALIFIER_NONE (1 << 0) + +#define DC_DISP_DATA_ENABLE_OPTIONS 0x432 +#define DE_SELECT_ACTIVE_BLANK (0 << 0) +#define DE_SELECT_ACTIVE (1 << 0) +#define DE_SELECT_ACTIVE_IS (2 << 0) +#define DE_CONTROL_ONECLK (0 << 2) +#define DE_CONTROL_NORMAL (1 << 2) +#define DE_CONTROL_EARLY_EXT (2 << 2) +#define DE_CONTROL_EARLY (3 << 2) +#define DE_CONTROL_ACTIVE_BLANK (4 << 2) + +#define DC_DISP_SERIAL_INTERFACE_OPTIONS 0x433 +#define DC_DISP_LCD_SPI_OPTIONS 0x434 +#define DC_DISP_BORDER_COLOR 0x435 +#define DC_DISP_COLOR_KEY0_LOWER 0x436 +#define DC_DISP_COLOR_KEY0_UPPER 0x437 +#define DC_DISP_COLOR_KEY1_LOWER 0x438 +#define DC_DISP_COLOR_KEY1_UPPER 0x439 + +#define DC_DISP_CURSOR_FOREGROUND 0x43c +#define DC_DISP_CURSOR_BACKGROUND 0x43d + +#define DC_DISP_CURSOR_START_ADDR 0x43e +#define CURSOR_CLIP_DISPLAY (0 << 28) +#define CURSOR_CLIP_WIN_A (1 << 28) +#define CURSOR_CLIP_WIN_B (2 << 28) +#define CURSOR_CLIP_WIN_C (3 << 28) +#define CURSOR_SIZE_32x32 (0 << 24) +#define CURSOR_SIZE_64x64 (1 << 24) +#define CURSOR_SIZE_128x128 (2 << 24) +#define CURSOR_SIZE_256x256 (3 << 24) +#define DC_DISP_CURSOR_START_ADDR_NS 0x43f + +#define DC_DISP_CURSOR_POSITION 0x440 +#define DC_DISP_CURSOR_POSITION_NS 0x441 + +#define DC_DISP_INIT_SEQ_CONTROL 0x442 +#define DC_DISP_SPI_INIT_SEQ_DATA_A 0x443 +#define DC_DISP_SPI_INIT_SEQ_DATA_B 0x444 +#define DC_DISP_SPI_INIT_SEQ_DATA_C 0x445 +#define DC_DISP_SPI_INIT_SEQ_DATA_D 0x446 + +#define DC_DISP_DC_MCCIF_FIFOCTRL 0x480 +#define DC_DISP_MCCIF_DISPLAY0A_HYST 0x481 +#define DC_DISP_MCCIF_DISPLAY0B_HYST 0x482 +#define DC_DISP_MCCIF_DISPLAY1A_HYST 0x483 +#define DC_DISP_MCCIF_DISPLAY1B_HYST 0x484 + +#define DC_DISP_DAC_CRT_CTRL 0x4c0 +#define DC_DISP_DISP_MISC_CONTROL 0x4c1 +#define DC_DISP_SD_CONTROL 0x4c2 +#define DC_DISP_SD_CSC_COEFF 0x4c3 +#define DC_DISP_SD_LUT(x) (0x4c4 + (x)) +#define DC_DISP_SD_FLICKER_CONTROL 0x4cd +#define DC_DISP_DC_PIXEL_COUNT 0x4ce +#define DC_DISP_SD_HISTOGRAM(x) (0x4cf + (x)) +#define DC_DISP_SD_BL_PARAMETERS 0x4d7 +#define DC_DISP_SD_BL_TF(x) (0x4d8 + (x)) +#define DC_DISP_SD_BL_CONTROL 0x4dc +#define DC_DISP_SD_HW_K_VALUES 0x4dd +#define DC_DISP_SD_MAN_K_VALUES 0x4de + +#define DC_DISP_INTERLACE_CONTROL 0x4e5 +#define INTERLACE_STATUS (1 << 2) +#define INTERLACE_START (1 << 1) +#define INTERLACE_ENABLE (1 << 0) + +#define DC_DISP_CURSOR_START_ADDR_HI 0x4ec +#define DC_DISP_BLEND_CURSOR_CONTROL 0x4f1 +#define CURSOR_MODE_LEGACY (0 << 24) +#define CURSOR_MODE_NORMAL (1 << 24) +#define CURSOR_DST_BLEND_ZERO (0 << 16) +#define CURSOR_DST_BLEND_K1 (1 << 16) +#define CURSOR_DST_BLEND_NEG_K1_TIMES_SRC (2 << 16) +#define CURSOR_DST_BLEND_MASK (3 << 16) +#define CURSOR_SRC_BLEND_K1 (0 << 8) +#define CURSOR_SRC_BLEND_K1_TIMES_SRC (1 << 8) +#define CURSOR_SRC_BLEND_MASK (3 << 8) +#define CURSOR_ALPHA 0xff + +#define DC_WIN_CSC_YOF 0x611 +#define DC_WIN_CSC_KYRGB 0x612 +#define DC_WIN_CSC_KUR 0x613 +#define DC_WIN_CSC_KVR 0x614 +#define DC_WIN_CSC_KUG 0x615 +#define DC_WIN_CSC_KVG 0x616 +#define DC_WIN_CSC_KUB 0x617 +#define DC_WIN_CSC_KVB 0x618 + +#define DC_WIN_WIN_OPTIONS 0x700 +#define H_DIRECTION (1 << 0) +#define V_DIRECTION (1 << 2) +#define COLOR_EXPAND (1 << 6) +#define CSC_ENABLE (1 << 18) +#define WIN_ENABLE (1 << 30) + +#define DC_WIN_BYTE_SWAP 0x701 +#define BYTE_SWAP_NOSWAP (0 << 0) +#define BYTE_SWAP_SWAP2 (1 << 0) +#define BYTE_SWAP_SWAP4 (2 << 0) +#define BYTE_SWAP_SWAP4HW (3 << 0) + +#define DC_WIN_BUFFER_CONTROL 0x702 +#define BUFFER_CONTROL_HOST (0 << 0) +#define BUFFER_CONTROL_VI (1 << 0) +#define BUFFER_CONTROL_EPP (2 << 0) +#define BUFFER_CONTROL_MPEGE (3 << 0) +#define BUFFER_CONTROL_SB2D (4 << 0) + +#define DC_WIN_COLOR_DEPTH 0x703 +#define WIN_COLOR_DEPTH_P1 0 +#define WIN_COLOR_DEPTH_P2 1 +#define WIN_COLOR_DEPTH_P4 2 +#define WIN_COLOR_DEPTH_P8 3 +#define WIN_COLOR_DEPTH_B4G4R4A4 4 +#define WIN_COLOR_DEPTH_B5G5R5A 5 +#define WIN_COLOR_DEPTH_B5G6R5 6 +#define WIN_COLOR_DEPTH_AB5G5R5 7 +#define WIN_COLOR_DEPTH_B8G8R8A8 12 +#define WIN_COLOR_DEPTH_R8G8B8A8 13 +#define WIN_COLOR_DEPTH_B6x2G6x2R6x2A8 14 +#define WIN_COLOR_DEPTH_R6x2G6x2B6x2A8 15 +#define WIN_COLOR_DEPTH_YCbCr422 16 +#define WIN_COLOR_DEPTH_YUV422 17 +#define WIN_COLOR_DEPTH_YCbCr420P 18 +#define WIN_COLOR_DEPTH_YUV420P 19 +#define WIN_COLOR_DEPTH_YCbCr422P 20 +#define WIN_COLOR_DEPTH_YUV422P 21 +#define WIN_COLOR_DEPTH_YCbCr422R 22 +#define WIN_COLOR_DEPTH_YUV422R 23 +#define WIN_COLOR_DEPTH_YCbCr422RA 24 +#define WIN_COLOR_DEPTH_YUV422RA 25 + +#define DC_WIN_POSITION 0x704 +#define H_POSITION(x) (((x) & 0x1fff) << 0) +#define V_POSITION(x) (((x) & 0x1fff) << 16) + +#define DC_WIN_SIZE 0x705 +#define H_SIZE(x) (((x) & 0x1fff) << 0) +#define V_SIZE(x) (((x) & 0x1fff) << 16) + +#define DC_WIN_PRESCALED_SIZE 0x706 +#define H_PRESCALED_SIZE(x) (((x) & 0x7fff) << 0) +#define V_PRESCALED_SIZE(x) (((x) & 0x1fff) << 16) + +#define DC_WIN_H_INITIAL_DDA 0x707 +#define DC_WIN_V_INITIAL_DDA 0x708 +#define DC_WIN_DDA_INC 0x709 +#define H_DDA_INC(x) (((x) & 0xffff) << 0) +#define V_DDA_INC(x) (((x) & 0xffff) << 16) + +#define DC_WIN_LINE_STRIDE 0x70a +#define DC_WIN_BUF_STRIDE 0x70b +#define DC_WIN_UV_BUF_STRIDE 0x70c +#define DC_WIN_BUFFER_ADDR_MODE 0x70d +#define DC_WIN_BUFFER_ADDR_MODE_LINEAR (0 << 0) +#define DC_WIN_BUFFER_ADDR_MODE_TILE (1 << 0) +#define DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV (0 << 16) +#define DC_WIN_BUFFER_ADDR_MODE_TILE_UV (1 << 16) +#define DC_WIN_DV_CONTROL 0x70e + +#define DC_WIN_BLEND_NOKEY 0x70f +#define DC_WIN_BLEND_1WIN 0x710 +#define DC_WIN_BLEND_2WIN_X 0x711 +#define DC_WIN_BLEND_2WIN_Y 0x712 +#define DC_WIN_BLEND_3WIN_XY 0x713 + +#define DC_WIN_HP_FETCH_CONTROL 0x714 + +#define DC_WINBUF_START_ADDR 0x800 +#define DC_WINBUF_START_ADDR_NS 0x801 +#define DC_WINBUF_START_ADDR_U 0x802 +#define DC_WINBUF_START_ADDR_U_NS 0x803 +#define DC_WINBUF_START_ADDR_V 0x804 +#define DC_WINBUF_START_ADDR_V_NS 0x805 + +#define DC_WINBUF_ADDR_H_OFFSET 0x806 +#define DC_WINBUF_ADDR_H_OFFSET_NS 0x807 +#define DC_WINBUF_ADDR_V_OFFSET 0x808 +#define DC_WINBUF_ADDR_V_OFFSET_NS 0x809 + +#define DC_WINBUF_UFLOW_STATUS 0x80a +#define DC_WINBUF_SURFACE_KIND 0x80b +#define DC_WINBUF_SURFACE_KIND_PITCH (0 << 0) +#define DC_WINBUF_SURFACE_KIND_TILED (1 << 0) +#define DC_WINBUF_SURFACE_KIND_BLOCK (2 << 0) +#define DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(x) (((x) & 0x7) << 4) + +#define DC_WINBUF_AD_UFLOW_STATUS 0xbca +#define DC_WINBUF_BD_UFLOW_STATUS 0xdca +#define DC_WINBUF_CD_UFLOW_STATUS 0xfca + +#endif /* TEGRA_DC_H */ diff --git a/kernel/drivers/gpu/drm/tegra/dpaux.c b/kernel/drivers/gpu/drm/tegra/dpaux.c new file mode 100644 index 000000000..d6b55e3e3 --- /dev/null +++ b/kernel/drivers/gpu/drm/tegra/dpaux.c @@ -0,0 +1,574 @@ +/* + * Copyright (C) 2013 NVIDIA Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/of_gpio.h> +#include <linux/platform_device.h> +#include <linux/reset.h> +#include <linux/regulator/consumer.h> +#include <linux/workqueue.h> + +#include <drm/drm_dp_helper.h> +#include <drm/drm_panel.h> + +#include "dpaux.h" +#include "drm.h" + +static DEFINE_MUTEX(dpaux_lock); +static LIST_HEAD(dpaux_list); + +struct tegra_dpaux { + struct drm_dp_aux aux; + struct device *dev; + + void __iomem *regs; + int irq; + + struct tegra_output *output; + + struct reset_control *rst; + struct clk *clk_parent; + struct clk *clk; + + struct regulator *vdd; + + struct completion complete; + struct work_struct work; + struct list_head list; +}; + +static inline struct tegra_dpaux *to_dpaux(struct drm_dp_aux *aux) +{ + return container_of(aux, struct tegra_dpaux, aux); +} + +static inline struct tegra_dpaux *work_to_dpaux(struct work_struct *work) +{ + return container_of(work, struct tegra_dpaux, work); +} + +static inline unsigned long tegra_dpaux_readl(struct tegra_dpaux *dpaux, + unsigned long offset) +{ + return readl(dpaux->regs + (offset << 2)); +} + +static inline void tegra_dpaux_writel(struct tegra_dpaux *dpaux, + unsigned long value, + unsigned long offset) +{ + writel(value, dpaux->regs + (offset << 2)); +} + +static void tegra_dpaux_write_fifo(struct tegra_dpaux *dpaux, const u8 *buffer, + size_t size) +{ + unsigned long offset = DPAUX_DP_AUXDATA_WRITE(0); + size_t i, j; + + for (i = 0; i < size; i += 4) { + size_t num = min_t(size_t, size - i, 4); + unsigned long value = 0; + + for (j = 0; j < num; j++) + value |= buffer[i + j] << (j * 8); + + tegra_dpaux_writel(dpaux, value, offset++); + } +} + +static void tegra_dpaux_read_fifo(struct tegra_dpaux *dpaux, u8 *buffer, + size_t size) +{ + unsigned long offset = DPAUX_DP_AUXDATA_READ(0); + size_t i, j; + + for (i = 0; i < size; i += 4) { + size_t num = min_t(size_t, size - i, 4); + unsigned long value; + + value = tegra_dpaux_readl(dpaux, offset++); + + for (j = 0; j < num; j++) + buffer[i + j] = value >> (j * 8); + } +} + +static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux, + struct drm_dp_aux_msg *msg) +{ + unsigned long timeout = msecs_to_jiffies(250); + struct tegra_dpaux *dpaux = to_dpaux(aux); + unsigned long status; + ssize_t ret = 0; + u32 value; + + /* Tegra has 4x4 byte DP AUX transmit and receive FIFOs. */ + if (msg->size > 16) + return -EINVAL; + + /* + * Allow zero-sized messages only for I2C, in which case they specify + * address-only transactions. + */ + if (msg->size < 1) { + switch (msg->request & ~DP_AUX_I2C_MOT) { + case DP_AUX_I2C_WRITE: + case DP_AUX_I2C_READ: + value = DPAUX_DP_AUXCTL_CMD_ADDRESS_ONLY; + break; + + default: + return -EINVAL; + } + } else { + /* For non-zero-sized messages, set the CMDLEN field. */ + value = DPAUX_DP_AUXCTL_CMDLEN(msg->size - 1); + } + + switch (msg->request & ~DP_AUX_I2C_MOT) { + case DP_AUX_I2C_WRITE: + if (msg->request & DP_AUX_I2C_MOT) + value |= DPAUX_DP_AUXCTL_CMD_MOT_WR; + else + value |= DPAUX_DP_AUXCTL_CMD_I2C_WR; + + break; + + case DP_AUX_I2C_READ: + if (msg->request & DP_AUX_I2C_MOT) + value |= DPAUX_DP_AUXCTL_CMD_MOT_RD; + else + value |= DPAUX_DP_AUXCTL_CMD_I2C_RD; + + break; + + case DP_AUX_I2C_STATUS: + if (msg->request & DP_AUX_I2C_MOT) + value |= DPAUX_DP_AUXCTL_CMD_MOT_RQ; + else + value |= DPAUX_DP_AUXCTL_CMD_I2C_RQ; + + break; + + case DP_AUX_NATIVE_WRITE: + value |= DPAUX_DP_AUXCTL_CMD_AUX_WR; + break; + + case DP_AUX_NATIVE_READ: + value |= DPAUX_DP_AUXCTL_CMD_AUX_RD; + break; + + default: + return -EINVAL; + } + + tegra_dpaux_writel(dpaux, msg->address, DPAUX_DP_AUXADDR); + tegra_dpaux_writel(dpaux, value, DPAUX_DP_AUXCTL); + + if ((msg->request & DP_AUX_I2C_READ) == 0) { + tegra_dpaux_write_fifo(dpaux, msg->buffer, msg->size); + ret = msg->size; + } + + /* start transaction */ + value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXCTL); + value |= DPAUX_DP_AUXCTL_TRANSACTREQ; + tegra_dpaux_writel(dpaux, value, DPAUX_DP_AUXCTL); + + status = wait_for_completion_timeout(&dpaux->complete, timeout); + if (!status) + return -ETIMEDOUT; + + /* read status and clear errors */ + value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXSTAT); + tegra_dpaux_writel(dpaux, 0xf00, DPAUX_DP_AUXSTAT); + + if (value & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR) + return -ETIMEDOUT; + + if ((value & DPAUX_DP_AUXSTAT_RX_ERROR) || + (value & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR) || + (value & DPAUX_DP_AUXSTAT_NO_STOP_ERROR)) + return -EIO; + + switch ((value & DPAUX_DP_AUXSTAT_REPLY_TYPE_MASK) >> 16) { + case 0x00: + msg->reply = DP_AUX_NATIVE_REPLY_ACK; + break; + + case 0x01: + msg->reply = DP_AUX_NATIVE_REPLY_NACK; + break; + + case 0x02: + msg->reply = DP_AUX_NATIVE_REPLY_DEFER; + break; + + case 0x04: + msg->reply = DP_AUX_I2C_REPLY_NACK; + break; + + case 0x08: + msg->reply = DP_AUX_I2C_REPLY_DEFER; + break; + } + + if ((msg->size > 0) && (msg->reply == DP_AUX_NATIVE_REPLY_ACK)) { + if (msg->request & DP_AUX_I2C_READ) { + size_t count = value & DPAUX_DP_AUXSTAT_REPLY_MASK; + + if (WARN_ON(count != msg->size)) + count = min_t(size_t, count, msg->size); + + tegra_dpaux_read_fifo(dpaux, msg->buffer, count); + ret = count; + } + } + + return ret; +} + +static void tegra_dpaux_hotplug(struct work_struct *work) +{ + struct tegra_dpaux *dpaux = work_to_dpaux(work); + + if (dpaux->output) + drm_helper_hpd_irq_event(dpaux->output->connector.dev); +} + +static irqreturn_t tegra_dpaux_irq(int irq, void *data) +{ + struct tegra_dpaux *dpaux = data; + irqreturn_t ret = IRQ_HANDLED; + unsigned long value; + + /* clear interrupts */ + value = tegra_dpaux_readl(dpaux, DPAUX_INTR_AUX); + tegra_dpaux_writel(dpaux, value, DPAUX_INTR_AUX); + + if (value & (DPAUX_INTR_PLUG_EVENT | DPAUX_INTR_UNPLUG_EVENT)) + schedule_work(&dpaux->work); + + if (value & DPAUX_INTR_IRQ_EVENT) { + /* TODO: handle this */ + } + + if (value & DPAUX_INTR_AUX_DONE) + complete(&dpaux->complete); + + return ret; +} + +static int tegra_dpaux_probe(struct platform_device *pdev) +{ + struct tegra_dpaux *dpaux; + struct resource *regs; + unsigned long value; + int err; + + dpaux = devm_kzalloc(&pdev->dev, sizeof(*dpaux), GFP_KERNEL); + if (!dpaux) + return -ENOMEM; + + INIT_WORK(&dpaux->work, tegra_dpaux_hotplug); + init_completion(&dpaux->complete); + INIT_LIST_HEAD(&dpaux->list); + dpaux->dev = &pdev->dev; + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dpaux->regs = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(dpaux->regs)) + return PTR_ERR(dpaux->regs); + + dpaux->irq = platform_get_irq(pdev, 0); + if (dpaux->irq < 0) { + dev_err(&pdev->dev, "failed to get IRQ\n"); + return -ENXIO; + } + + dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux"); + if (IS_ERR(dpaux->rst)) + return PTR_ERR(dpaux->rst); + + dpaux->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(dpaux->clk)) + return PTR_ERR(dpaux->clk); + + err = clk_prepare_enable(dpaux->clk); + if (err < 0) + return err; + + reset_control_deassert(dpaux->rst); + + dpaux->clk_parent = devm_clk_get(&pdev->dev, "parent"); + if (IS_ERR(dpaux->clk_parent)) + return PTR_ERR(dpaux->clk_parent); + + err = clk_prepare_enable(dpaux->clk_parent); + if (err < 0) + return err; + + err = clk_set_rate(dpaux->clk_parent, 270000000); + if (err < 0) { + dev_err(&pdev->dev, "failed to set clock to 270 MHz: %d\n", + err); + return err; + } + + dpaux->vdd = devm_regulator_get(&pdev->dev, "vdd"); + if (IS_ERR(dpaux->vdd)) + return PTR_ERR(dpaux->vdd); + + err = devm_request_irq(dpaux->dev, dpaux->irq, tegra_dpaux_irq, 0, + dev_name(dpaux->dev), dpaux); + if (err < 0) { + dev_err(dpaux->dev, "failed to request IRQ#%u: %d\n", + dpaux->irq, err); + return err; + } + + dpaux->aux.transfer = tegra_dpaux_transfer; + dpaux->aux.dev = &pdev->dev; + + err = drm_dp_aux_register(&dpaux->aux); + if (err < 0) + return err; + + /* enable and clear all interrupts */ + value = DPAUX_INTR_AUX_DONE | DPAUX_INTR_IRQ_EVENT | + DPAUX_INTR_UNPLUG_EVENT | DPAUX_INTR_PLUG_EVENT; + tegra_dpaux_writel(dpaux, value, DPAUX_INTR_EN_AUX); + tegra_dpaux_writel(dpaux, value, DPAUX_INTR_AUX); + + mutex_lock(&dpaux_lock); + list_add_tail(&dpaux->list, &dpaux_list); + mutex_unlock(&dpaux_lock); + + platform_set_drvdata(pdev, dpaux); + + return 0; +} + +static int tegra_dpaux_remove(struct platform_device *pdev) +{ + struct tegra_dpaux *dpaux = platform_get_drvdata(pdev); + + drm_dp_aux_unregister(&dpaux->aux); + + mutex_lock(&dpaux_lock); + list_del(&dpaux->list); + mutex_unlock(&dpaux_lock); + + cancel_work_sync(&dpaux->work); + + clk_disable_unprepare(dpaux->clk_parent); + reset_control_assert(dpaux->rst); + clk_disable_unprepare(dpaux->clk); + + return 0; +} + +static const struct of_device_id tegra_dpaux_of_match[] = { + { .compatible = "nvidia,tegra124-dpaux", }, + { }, +}; +MODULE_DEVICE_TABLE(of, tegra_dpaux_of_match); + +struct platform_driver tegra_dpaux_driver = { + .driver = { + .name = "tegra-dpaux", + .of_match_table = tegra_dpaux_of_match, + }, + .probe = tegra_dpaux_probe, + .remove = tegra_dpaux_remove, +}; + +struct tegra_dpaux *tegra_dpaux_find_by_of_node(struct device_node *np) +{ + struct tegra_dpaux *dpaux; + + mutex_lock(&dpaux_lock); + + list_for_each_entry(dpaux, &dpaux_list, list) + if (np == dpaux->dev->of_node) { + mutex_unlock(&dpaux_lock); + return dpaux; + } + + mutex_unlock(&dpaux_lock); + + return NULL; +} + +int tegra_dpaux_attach(struct tegra_dpaux *dpaux, struct tegra_output *output) +{ + unsigned long timeout; + int err; + + output->connector.polled = DRM_CONNECTOR_POLL_HPD; + dpaux->output = output; + + err = regulator_enable(dpaux->vdd); + if (err < 0) + return err; + + timeout = jiffies + msecs_to_jiffies(250); + + while (time_before(jiffies, timeout)) { + enum drm_connector_status status; + + status = tegra_dpaux_detect(dpaux); + if (status == connector_status_connected) + return 0; + + usleep_range(1000, 2000); + } + + return -ETIMEDOUT; +} + +int tegra_dpaux_detach(struct tegra_dpaux *dpaux) +{ + unsigned long timeout; + int err; + + err = regulator_disable(dpaux->vdd); + if (err < 0) + return err; + + timeout = jiffies + msecs_to_jiffies(250); + + while (time_before(jiffies, timeout)) { + enum drm_connector_status status; + + status = tegra_dpaux_detect(dpaux); + if (status == connector_status_disconnected) { + dpaux->output = NULL; + return 0; + } + + usleep_range(1000, 2000); + } + + return -ETIMEDOUT; +} + +enum drm_connector_status tegra_dpaux_detect(struct tegra_dpaux *dpaux) +{ + unsigned long value; + + value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXSTAT); + + if (value & DPAUX_DP_AUXSTAT_HPD_STATUS) + return connector_status_connected; + + return connector_status_disconnected; +} + +int tegra_dpaux_enable(struct tegra_dpaux *dpaux) +{ + unsigned long value; + + value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) | + DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) | + DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) | + DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV | + DPAUX_HYBRID_PADCTL_MODE_AUX; + tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL); + + value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE); + value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN; + tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE); + + return 0; +} + +int tegra_dpaux_disable(struct tegra_dpaux *dpaux) +{ + unsigned long value; + + value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE); + value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN; + tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE); + + return 0; +} + +int tegra_dpaux_prepare(struct tegra_dpaux *dpaux, u8 encoding) +{ + int err; + + err = drm_dp_dpcd_writeb(&dpaux->aux, DP_MAIN_LINK_CHANNEL_CODING_SET, + encoding); + if (err < 0) + return err; + + return 0; +} + +int tegra_dpaux_train(struct tegra_dpaux *dpaux, struct drm_dp_link *link, + u8 pattern) +{ + u8 tp = pattern & DP_TRAINING_PATTERN_MASK; + u8 status[DP_LINK_STATUS_SIZE], values[4]; + unsigned int i; + int err; + + err = drm_dp_dpcd_writeb(&dpaux->aux, DP_TRAINING_PATTERN_SET, pattern); + if (err < 0) + return err; + + if (tp == DP_TRAINING_PATTERN_DISABLE) + return 0; + + for (i = 0; i < link->num_lanes; i++) + values[i] = DP_TRAIN_MAX_PRE_EMPHASIS_REACHED | + DP_TRAIN_PRE_EMPH_LEVEL_0 | + DP_TRAIN_MAX_SWING_REACHED | + DP_TRAIN_VOLTAGE_SWING_LEVEL_0; + + err = drm_dp_dpcd_write(&dpaux->aux, DP_TRAINING_LANE0_SET, values, + link->num_lanes); + if (err < 0) + return err; + + usleep_range(500, 1000); + + err = drm_dp_dpcd_read_link_status(&dpaux->aux, status); + if (err < 0) + return err; + + switch (tp) { + case DP_TRAINING_PATTERN_1: + if (!drm_dp_clock_recovery_ok(status, link->num_lanes)) + return -EAGAIN; + + break; + + case DP_TRAINING_PATTERN_2: + if (!drm_dp_channel_eq_ok(status, link->num_lanes)) + return -EAGAIN; + + break; + + default: + dev_err(dpaux->dev, "unsupported training pattern %u\n", tp); + return -EINVAL; + } + + err = drm_dp_dpcd_writeb(&dpaux->aux, DP_EDP_CONFIGURATION_SET, 0); + if (err < 0) + return err; + + return 0; +} diff --git a/kernel/drivers/gpu/drm/tegra/dpaux.h b/kernel/drivers/gpu/drm/tegra/dpaux.h new file mode 100644 index 000000000..806e245ca --- /dev/null +++ b/kernel/drivers/gpu/drm/tegra/dpaux.h @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2013 NVIDIA Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef DRM_TEGRA_DPAUX_H +#define DRM_TEGRA_DPAUX_H + +#define DPAUX_CTXSW 0x00 + +#define DPAUX_INTR_EN_AUX 0x01 +#define DPAUX_INTR_AUX 0x05 +#define DPAUX_INTR_AUX_DONE (1 << 3) +#define DPAUX_INTR_IRQ_EVENT (1 << 2) +#define DPAUX_INTR_UNPLUG_EVENT (1 << 1) +#define DPAUX_INTR_PLUG_EVENT (1 << 0) + +#define DPAUX_DP_AUXDATA_WRITE(x) (0x09 + ((x) << 2)) +#define DPAUX_DP_AUXDATA_READ(x) (0x19 + ((x) << 2)) +#define DPAUX_DP_AUXADDR 0x29 + +#define DPAUX_DP_AUXCTL 0x2d +#define DPAUX_DP_AUXCTL_TRANSACTREQ (1 << 16) +#define DPAUX_DP_AUXCTL_CMD_AUX_RD (9 << 12) +#define DPAUX_DP_AUXCTL_CMD_AUX_WR (8 << 12) +#define DPAUX_DP_AUXCTL_CMD_MOT_RQ (6 << 12) +#define DPAUX_DP_AUXCTL_CMD_MOT_RD (5 << 12) +#define DPAUX_DP_AUXCTL_CMD_MOT_WR (4 << 12) +#define DPAUX_DP_AUXCTL_CMD_I2C_RQ (2 << 12) +#define DPAUX_DP_AUXCTL_CMD_I2C_RD (1 << 12) +#define DPAUX_DP_AUXCTL_CMD_I2C_WR (0 << 12) +#define DPAUX_DP_AUXCTL_CMD_ADDRESS_ONLY (1 << 8) +#define DPAUX_DP_AUXCTL_CMDLEN(x) ((x) & 0xff) + +#define DPAUX_DP_AUXSTAT 0x31 +#define DPAUX_DP_AUXSTAT_HPD_STATUS (1 << 28) +#define DPAUX_DP_AUXSTAT_REPLY_TYPE_MASK (0xf0000) +#define DPAUX_DP_AUXSTAT_NO_STOP_ERROR (1 << 11) +#define DPAUX_DP_AUXSTAT_SINKSTAT_ERROR (1 << 10) +#define DPAUX_DP_AUXSTAT_RX_ERROR (1 << 9) +#define DPAUX_DP_AUXSTAT_TIMEOUT_ERROR (1 << 8) +#define DPAUX_DP_AUXSTAT_REPLY_MASK (0xff) + +#define DPAUX_DP_AUX_SINKSTAT_LO 0x35 +#define DPAUX_DP_AUX_SINKSTAT_HI 0x39 + +#define DPAUX_HPD_CONFIG 0x3d +#define DPAUX_HPD_CONFIG_UNPLUG_MIN_TIME(x) (((x) & 0xffff) << 16) +#define DPAUX_HPD_CONFIG_PLUG_MIN_TIME(x) ((x) & 0xffff) + +#define DPAUX_HPD_IRQ_CONFIG 0x41 +#define DPAUX_HPD_IRQ_CONFIG_MIN_LOW_TIME(x) ((x) & 0xffff) + +#define DPAUX_DP_AUX_CONFIG 0x45 + +#define DPAUX_HYBRID_PADCTL 0x49 +#define DPAUX_HYBRID_PADCTL_AUX_CMH(x) (((x) & 0x3) << 12) +#define DPAUX_HYBRID_PADCTL_AUX_DRVZ(x) (((x) & 0x7) << 8) +#define DPAUX_HYBRID_PADCTL_AUX_DRVI(x) (((x) & 0x3f) << 2) +#define DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV (1 << 1) +#define DPAUX_HYBRID_PADCTL_MODE_I2C (1 << 0) +#define DPAUX_HYBRID_PADCTL_MODE_AUX (0 << 0) + +#define DPAUX_HYBRID_SPARE 0x4d +#define DPAUX_HYBRID_SPARE_PAD_POWER_DOWN (1 << 0) + +#define DPAUX_SCRATCH_REG0 0x51 +#define DPAUX_SCRATCH_REG1 0x55 +#define DPAUX_SCRATCH_REG2 0x59 + +#endif diff --git a/kernel/drivers/gpu/drm/tegra/drm.c b/kernel/drivers/gpu/drm/tegra/drm.c new file mode 100644 index 000000000..bfad15a91 --- /dev/null +++ b/kernel/drivers/gpu/drm/tegra/drm.c @@ -0,0 +1,1135 @@ +/* + * Copyright (C) 2012 Avionic Design GmbH + * Copyright (C) 2012-2013 NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/host1x.h> +#include <linux/iommu.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> + +#include "drm.h" +#include "gem.h" + +#define DRIVER_NAME "tegra" +#define DRIVER_DESC "NVIDIA Tegra graphics" +#define DRIVER_DATE "20120330" +#define DRIVER_MAJOR 0 +#define DRIVER_MINOR 0 +#define DRIVER_PATCHLEVEL 0 + +struct tegra_drm_file { + struct list_head contexts; +}; + +static void tegra_atomic_schedule(struct tegra_drm *tegra, + struct drm_atomic_state *state) +{ + tegra->commit.state = state; + schedule_work(&tegra->commit.work); +} + +static void tegra_atomic_complete(struct tegra_drm *tegra, + struct drm_atomic_state *state) +{ + struct drm_device *drm = tegra->drm; + + /* + * Everything below can be run asynchronously without the need to grab + * any modeset locks at all under one condition: It must be guaranteed + * that the asynchronous work has either been cancelled (if the driver + * supports it, which at least requires that the framebuffers get + * cleaned up with drm_atomic_helper_cleanup_planes()) or completed + * before the new state gets committed on the software side with + * drm_atomic_helper_swap_state(). + * + * This scheme allows new atomic state updates to be prepared and + * checked in parallel to the asynchronous completion of the previous + * update. Which is important since compositors need to figure out the + * composition of the next frame right after having submitted the + * current layout. + */ + + drm_atomic_helper_commit_modeset_disables(drm, state); + drm_atomic_helper_commit_planes(drm, state); + drm_atomic_helper_commit_modeset_enables(drm, state); + + drm_atomic_helper_wait_for_vblanks(drm, state); + + drm_atomic_helper_cleanup_planes(drm, state); + drm_atomic_state_free(state); +} + +static void tegra_atomic_work(struct work_struct *work) +{ + struct tegra_drm *tegra = container_of(work, struct tegra_drm, + commit.work); + + tegra_atomic_complete(tegra, tegra->commit.state); +} + +static int tegra_atomic_commit(struct drm_device *drm, + struct drm_atomic_state *state, bool async) +{ + struct tegra_drm *tegra = drm->dev_private; + int err; + + err = drm_atomic_helper_prepare_planes(drm, state); + if (err) + return err; + + /* serialize outstanding asynchronous commits */ + mutex_lock(&tegra->commit.lock); + flush_work(&tegra->commit.work); + + /* + * This is the point of no return - everything below never fails except + * when the hw goes bonghits. Which means we can commit the new state on + * the software side now. + */ + + drm_atomic_helper_swap_state(drm, state); + + if (async) + tegra_atomic_schedule(tegra, state); + else + tegra_atomic_complete(tegra, state); + + mutex_unlock(&tegra->commit.lock); + return 0; +} + +static const struct drm_mode_config_funcs tegra_drm_mode_funcs = { + .fb_create = tegra_fb_create, +#ifdef CONFIG_DRM_TEGRA_FBDEV + .output_poll_changed = tegra_fb_output_poll_changed, +#endif + .atomic_check = drm_atomic_helper_check, + .atomic_commit = tegra_atomic_commit, +}; + +static int tegra_drm_load(struct drm_device *drm, unsigned long flags) +{ + struct host1x_device *device = to_host1x_device(drm->dev); + struct tegra_drm *tegra; + int err; + + tegra = kzalloc(sizeof(*tegra), GFP_KERNEL); + if (!tegra) + return -ENOMEM; + + if (iommu_present(&platform_bus_type)) { + tegra->domain = iommu_domain_alloc(&platform_bus_type); + if (!tegra->domain) { + err = -ENOMEM; + goto free; + } + + DRM_DEBUG("IOMMU context initialized\n"); + drm_mm_init(&tegra->mm, 0, SZ_2G); + } + + mutex_init(&tegra->clients_lock); + INIT_LIST_HEAD(&tegra->clients); + + mutex_init(&tegra->commit.lock); + INIT_WORK(&tegra->commit.work, tegra_atomic_work); + + drm->dev_private = tegra; + tegra->drm = drm; + + drm_mode_config_init(drm); + + drm->mode_config.min_width = 0; + drm->mode_config.min_height = 0; + + drm->mode_config.max_width = 4096; + drm->mode_config.max_height = 4096; + + drm->mode_config.funcs = &tegra_drm_mode_funcs; + + err = tegra_drm_fb_prepare(drm); + if (err < 0) + goto config; + + drm_kms_helper_poll_init(drm); + + err = host1x_device_init(device); + if (err < 0) + goto fbdev; + + drm_mode_config_reset(drm); + + /* + * We don't use the drm_irq_install() helpers provided by the DRM + * core, so we need to set this manually in order to allow the + * DRM_IOCTL_WAIT_VBLANK to operate correctly. + */ + drm->irq_enabled = true; + + /* syncpoints are used for full 32-bit hardware VBLANK counters */ + drm->max_vblank_count = 0xffffffff; + + err = drm_vblank_init(drm, drm->mode_config.num_crtc); + if (err < 0) + goto device; + + err = tegra_drm_fb_init(drm); + if (err < 0) + goto vblank; + + return 0; + +vblank: + drm_vblank_cleanup(drm); +device: + host1x_device_exit(device); +fbdev: + drm_kms_helper_poll_fini(drm); + tegra_drm_fb_free(drm); +config: + drm_mode_config_cleanup(drm); + + if (tegra->domain) { + iommu_domain_free(tegra->domain); + drm_mm_takedown(&tegra->mm); + } +free: + kfree(tegra); + return err; +} + +static int tegra_drm_unload(struct drm_device *drm) +{ + struct host1x_device *device = to_host1x_device(drm->dev); + struct tegra_drm *tegra = drm->dev_private; + int err; + + drm_kms_helper_poll_fini(drm); + tegra_drm_fb_exit(drm); + drm_mode_config_cleanup(drm); + drm_vblank_cleanup(drm); + + err = host1x_device_exit(device); + if (err < 0) + return err; + + if (tegra->domain) { + iommu_domain_free(tegra->domain); + drm_mm_takedown(&tegra->mm); + } + + kfree(tegra); + + return 0; +} + +static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp) +{ + struct tegra_drm_file *fpriv; + + fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); + if (!fpriv) + return -ENOMEM; + + INIT_LIST_HEAD(&fpriv->contexts); + filp->driver_priv = fpriv; + + return 0; +} + +static void tegra_drm_context_free(struct tegra_drm_context *context) +{ + context->client->ops->close_channel(context); + kfree(context); +} + +static void tegra_drm_lastclose(struct drm_device *drm) +{ +#ifdef CONFIG_DRM_TEGRA_FBDEV + struct tegra_drm *tegra = drm->dev_private; + + tegra_fbdev_restore_mode(tegra->fbdev); +#endif +} + +static struct host1x_bo * +host1x_bo_lookup(struct drm_device *drm, struct drm_file *file, u32 handle) +{ + struct drm_gem_object *gem; + struct tegra_bo *bo; + + gem = drm_gem_object_lookup(drm, file, handle); + if (!gem) + return NULL; + + mutex_lock(&drm->struct_mutex); + drm_gem_object_unreference(gem); + mutex_unlock(&drm->struct_mutex); + + bo = to_tegra_bo(gem); + return &bo->base; +} + +static int host1x_reloc_copy_from_user(struct host1x_reloc *dest, + struct drm_tegra_reloc __user *src, + struct drm_device *drm, + struct drm_file *file) +{ + u32 cmdbuf, target; + int err; + + err = get_user(cmdbuf, &src->cmdbuf.handle); + if (err < 0) + return err; + + err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset); + if (err < 0) + return err; + + err = get_user(target, &src->target.handle); + if (err < 0) + return err; + + err = get_user(dest->target.offset, &src->target.offset); + if (err < 0) + return err; + + err = get_user(dest->shift, &src->shift); + if (err < 0) + return err; + + dest->cmdbuf.bo = host1x_bo_lookup(drm, file, cmdbuf); + if (!dest->cmdbuf.bo) + return -ENOENT; + + dest->target.bo = host1x_bo_lookup(drm, file, target); + if (!dest->target.bo) + return -ENOENT; + + return 0; +} + +int tegra_drm_submit(struct tegra_drm_context *context, + struct drm_tegra_submit *args, struct drm_device *drm, + struct drm_file *file) +{ + unsigned int num_cmdbufs = args->num_cmdbufs; + unsigned int num_relocs = args->num_relocs; + unsigned int num_waitchks = args->num_waitchks; + struct drm_tegra_cmdbuf __user *cmdbufs = + (void __user *)(uintptr_t)args->cmdbufs; + struct drm_tegra_reloc __user *relocs = + (void __user *)(uintptr_t)args->relocs; + struct drm_tegra_waitchk __user *waitchks = + (void __user *)(uintptr_t)args->waitchks; + struct drm_tegra_syncpt syncpt; + struct host1x_job *job; + int err; + + /* We don't yet support other than one syncpt_incr struct per submit */ + if (args->num_syncpts != 1) + return -EINVAL; + + job = host1x_job_alloc(context->channel, args->num_cmdbufs, + args->num_relocs, args->num_waitchks); + if (!job) + return -ENOMEM; + + job->num_relocs = args->num_relocs; + job->num_waitchk = args->num_waitchks; + job->client = (u32)args->context; + job->class = context->client->base.class; + job->serialize = true; + + while (num_cmdbufs) { + struct drm_tegra_cmdbuf cmdbuf; + struct host1x_bo *bo; + + if (copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf))) { + err = -EFAULT; + goto fail; + } + + bo = host1x_bo_lookup(drm, file, cmdbuf.handle); + if (!bo) { + err = -ENOENT; + goto fail; + } + + host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset); + num_cmdbufs--; + cmdbufs++; + } + + /* copy and resolve relocations from submit */ + while (num_relocs--) { + err = host1x_reloc_copy_from_user(&job->relocarray[num_relocs], + &relocs[num_relocs], drm, + file); + if (err < 0) + goto fail; + } + + if (copy_from_user(job->waitchk, waitchks, + sizeof(*waitchks) * num_waitchks)) { + err = -EFAULT; + goto fail; + } + + if (copy_from_user(&syncpt, (void __user *)(uintptr_t)args->syncpts, + sizeof(syncpt))) { + err = -EFAULT; + goto fail; + } + + job->is_addr_reg = context->client->ops->is_addr_reg; + job->syncpt_incrs = syncpt.incrs; + job->syncpt_id = syncpt.id; + job->timeout = 10000; + + if (args->timeout && args->timeout < 10000) + job->timeout = args->timeout; + + err = host1x_job_pin(job, context->client->base.dev); + if (err) + goto fail; + + err = host1x_job_submit(job); + if (err) + goto fail_submit; + + args->fence = job->syncpt_end; + + host1x_job_put(job); + return 0; + +fail_submit: + host1x_job_unpin(job); +fail: + host1x_job_put(job); + return err; +} + + +#ifdef CONFIG_DRM_TEGRA_STAGING +static struct tegra_drm_context *tegra_drm_get_context(__u64 context) +{ + return (struct tegra_drm_context *)(uintptr_t)context; +} + +static bool tegra_drm_file_owns_context(struct tegra_drm_file *file, + struct tegra_drm_context *context) +{ + struct tegra_drm_context *ctx; + + list_for_each_entry(ctx, &file->contexts, list) + if (ctx == context) + return true; + + return false; +} + +static int tegra_gem_create(struct drm_device *drm, void *data, + struct drm_file *file) +{ + struct drm_tegra_gem_create *args = data; + struct tegra_bo *bo; + + bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags, + &args->handle); + if (IS_ERR(bo)) + return PTR_ERR(bo); + + return 0; +} + +static int tegra_gem_mmap(struct drm_device *drm, void *data, + struct drm_file *file) +{ + struct drm_tegra_gem_mmap *args = data; + struct drm_gem_object *gem; + struct tegra_bo *bo; + + gem = drm_gem_object_lookup(drm, file, args->handle); + if (!gem) + return -EINVAL; + + bo = to_tegra_bo(gem); + + args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node); + + drm_gem_object_unreference(gem); + + return 0; +} + +static int tegra_syncpt_read(struct drm_device *drm, void *data, + struct drm_file *file) +{ + struct host1x *host = dev_get_drvdata(drm->dev->parent); + struct drm_tegra_syncpt_read *args = data; + struct host1x_syncpt *sp; + + sp = host1x_syncpt_get(host, args->id); + if (!sp) + return -EINVAL; + + args->value = host1x_syncpt_read_min(sp); + return 0; +} + +static int tegra_syncpt_incr(struct drm_device *drm, void *data, + struct drm_file *file) +{ + struct host1x *host1x = dev_get_drvdata(drm->dev->parent); + struct drm_tegra_syncpt_incr *args = data; + struct host1x_syncpt *sp; + + sp = host1x_syncpt_get(host1x, args->id); + if (!sp) + return -EINVAL; + + return host1x_syncpt_incr(sp); +} + +static int tegra_syncpt_wait(struct drm_device *drm, void *data, + struct drm_file *file) +{ + struct host1x *host1x = dev_get_drvdata(drm->dev->parent); + struct drm_tegra_syncpt_wait *args = data; + struct host1x_syncpt *sp; + + sp = host1x_syncpt_get(host1x, args->id); + if (!sp) + return -EINVAL; + + return host1x_syncpt_wait(sp, args->thresh, args->timeout, + &args->value); +} + +static int tegra_open_channel(struct drm_device *drm, void *data, + struct drm_file *file) +{ + struct tegra_drm_file *fpriv = file->driver_priv; + struct tegra_drm *tegra = drm->dev_private; + struct drm_tegra_open_channel *args = data; + struct tegra_drm_context *context; + struct tegra_drm_client *client; + int err = -ENODEV; + + context = kzalloc(sizeof(*context), GFP_KERNEL); + if (!context) + return -ENOMEM; + + list_for_each_entry(client, &tegra->clients, list) + if (client->base.class == args->client) { + err = client->ops->open_channel(client, context); + if (err) + break; + + list_add(&context->list, &fpriv->contexts); + args->context = (uintptr_t)context; + context->client = client; + return 0; + } + + kfree(context); + return err; +} + +static int tegra_close_channel(struct drm_device *drm, void *data, + struct drm_file *file) +{ + struct tegra_drm_file *fpriv = file->driver_priv; + struct drm_tegra_close_channel *args = data; + struct tegra_drm_context *context; + + context = tegra_drm_get_context(args->context); + + if (!tegra_drm_file_owns_context(fpriv, context)) + return -EINVAL; + + list_del(&context->list); + tegra_drm_context_free(context); + + return 0; +} + +static int tegra_get_syncpt(struct drm_device *drm, void *data, + struct drm_file *file) +{ + struct tegra_drm_file *fpriv = file->driver_priv; + struct drm_tegra_get_syncpt *args = data; + struct tegra_drm_context *context; + struct host1x_syncpt *syncpt; + + context = tegra_drm_get_context(args->context); + + if (!tegra_drm_file_owns_context(fpriv, context)) + return -ENODEV; + + if (args->index >= context->client->base.num_syncpts) + return -EINVAL; + + syncpt = context->client->base.syncpts[args->index]; + args->id = host1x_syncpt_id(syncpt); + + return 0; +} + +static int tegra_submit(struct drm_device *drm, void *data, + struct drm_file *file) +{ + struct tegra_drm_file *fpriv = file->driver_priv; + struct drm_tegra_submit *args = data; + struct tegra_drm_context *context; + + context = tegra_drm_get_context(args->context); + + if (!tegra_drm_file_owns_context(fpriv, context)) + return -ENODEV; + + return context->client->ops->submit(context, args, drm, file); +} + +static int tegra_get_syncpt_base(struct drm_device *drm, void *data, + struct drm_file *file) +{ + struct tegra_drm_file *fpriv = file->driver_priv; + struct drm_tegra_get_syncpt_base *args = data; + struct tegra_drm_context *context; + struct host1x_syncpt_base *base; + struct host1x_syncpt *syncpt; + + context = tegra_drm_get_context(args->context); + + if (!tegra_drm_file_owns_context(fpriv, context)) + return -ENODEV; + + if (args->syncpt >= context->client->base.num_syncpts) + return -EINVAL; + + syncpt = context->client->base.syncpts[args->syncpt]; + + base = host1x_syncpt_get_base(syncpt); + if (!base) + return -ENXIO; + + args->id = host1x_syncpt_base_id(base); + + return 0; +} + +static int tegra_gem_set_tiling(struct drm_device *drm, void *data, + struct drm_file *file) +{ + struct drm_tegra_gem_set_tiling *args = data; + enum tegra_bo_tiling_mode mode; + struct drm_gem_object *gem; + unsigned long value = 0; + struct tegra_bo *bo; + + switch (args->mode) { + case DRM_TEGRA_GEM_TILING_MODE_PITCH: + mode = TEGRA_BO_TILING_MODE_PITCH; + + if (args->value != 0) + return -EINVAL; + + break; + + case DRM_TEGRA_GEM_TILING_MODE_TILED: + mode = TEGRA_BO_TILING_MODE_TILED; + + if (args->value != 0) + return -EINVAL; + + break; + + case DRM_TEGRA_GEM_TILING_MODE_BLOCK: + mode = TEGRA_BO_TILING_MODE_BLOCK; + + if (args->value > 5) + return -EINVAL; + + value = args->value; + break; + + default: + return -EINVAL; + } + + gem = drm_gem_object_lookup(drm, file, args->handle); + if (!gem) + return -ENOENT; + + bo = to_tegra_bo(gem); + + bo->tiling.mode = mode; + bo->tiling.value = value; + + drm_gem_object_unreference(gem); + + return 0; +} + +static int tegra_gem_get_tiling(struct drm_device *drm, void *data, + struct drm_file *file) +{ + struct drm_tegra_gem_get_tiling *args = data; + struct drm_gem_object *gem; + struct tegra_bo *bo; + int err = 0; + + gem = drm_gem_object_lookup(drm, file, args->handle); + if (!gem) + return -ENOENT; + + bo = to_tegra_bo(gem); + + switch (bo->tiling.mode) { + case TEGRA_BO_TILING_MODE_PITCH: + args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH; + args->value = 0; + break; + + case TEGRA_BO_TILING_MODE_TILED: + args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED; + args->value = 0; + break; + + case TEGRA_BO_TILING_MODE_BLOCK: + args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK; + args->value = bo->tiling.value; + break; + + default: + err = -EINVAL; + break; + } + + drm_gem_object_unreference(gem); + + return err; +} + +static int tegra_gem_set_flags(struct drm_device *drm, void *data, + struct drm_file *file) +{ + struct drm_tegra_gem_set_flags *args = data; + struct drm_gem_object *gem; + struct tegra_bo *bo; + + if (args->flags & ~DRM_TEGRA_GEM_FLAGS) + return -EINVAL; + + gem = drm_gem_object_lookup(drm, file, args->handle); + if (!gem) + return -ENOENT; + + bo = to_tegra_bo(gem); + bo->flags = 0; + + if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP) + bo->flags |= TEGRA_BO_BOTTOM_UP; + + drm_gem_object_unreference(gem); + + return 0; +} + +static int tegra_gem_get_flags(struct drm_device *drm, void *data, + struct drm_file *file) +{ + struct drm_tegra_gem_get_flags *args = data; + struct drm_gem_object *gem; + struct tegra_bo *bo; + + gem = drm_gem_object_lookup(drm, file, args->handle); + if (!gem) + return -ENOENT; + + bo = to_tegra_bo(gem); + args->flags = 0; + + if (bo->flags & TEGRA_BO_BOTTOM_UP) + args->flags |= DRM_TEGRA_GEM_BOTTOM_UP; + + drm_gem_object_unreference(gem); + + return 0; +} +#endif + +static const struct drm_ioctl_desc tegra_drm_ioctls[] = { +#ifdef CONFIG_DRM_TEGRA_STAGING + DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags, DRM_UNLOCKED), +#endif +}; + +static const struct file_operations tegra_drm_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, + .mmap = tegra_drm_mmap, + .poll = drm_poll, + .read = drm_read, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif + .llseek = noop_llseek, +}; + +static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, + unsigned int pipe) +{ + struct drm_crtc *crtc; + + list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) { + if (pipe == drm_crtc_index(crtc)) + return crtc; + } + + return NULL; +} + +static u32 tegra_drm_get_vblank_counter(struct drm_device *drm, int pipe) +{ + struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe); + struct tegra_dc *dc = to_tegra_dc(crtc); + + if (!crtc) + return 0; + + return tegra_dc_get_vblank_counter(dc); +} + +static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe) +{ + struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe); + struct tegra_dc *dc = to_tegra_dc(crtc); + + if (!crtc) + return -ENODEV; + + tegra_dc_enable_vblank(dc); + + return 0; +} + +static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe) +{ + struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe); + struct tegra_dc *dc = to_tegra_dc(crtc); + + if (crtc) + tegra_dc_disable_vblank(dc); +} + +static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file) +{ + struct tegra_drm_file *fpriv = file->driver_priv; + struct tegra_drm_context *context, *tmp; + struct drm_crtc *crtc; + + list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) + tegra_dc_cancel_page_flip(crtc, file); + + list_for_each_entry_safe(context, tmp, &fpriv->contexts, list) + tegra_drm_context_free(context); + + kfree(fpriv); +} + +#ifdef CONFIG_DEBUG_FS +static int tegra_debugfs_framebuffers(struct seq_file *s, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *)s->private; + struct drm_device *drm = node->minor->dev; + struct drm_framebuffer *fb; + + mutex_lock(&drm->mode_config.fb_lock); + + list_for_each_entry(fb, &drm->mode_config.fb_list, head) { + seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n", + fb->base.id, fb->width, fb->height, fb->depth, + fb->bits_per_pixel, + atomic_read(&fb->refcount.refcount)); + } + + mutex_unlock(&drm->mode_config.fb_lock); + + return 0; +} + +static int tegra_debugfs_iova(struct seq_file *s, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *)s->private; + struct drm_device *drm = node->minor->dev; + struct tegra_drm *tegra = drm->dev_private; + + return drm_mm_dump_table(s, &tegra->mm); +} + +static struct drm_info_list tegra_debugfs_list[] = { + { "framebuffers", tegra_debugfs_framebuffers, 0 }, + { "iova", tegra_debugfs_iova, 0 }, +}; + +static int tegra_debugfs_init(struct drm_minor *minor) +{ + return drm_debugfs_create_files(tegra_debugfs_list, + ARRAY_SIZE(tegra_debugfs_list), + minor->debugfs_root, minor); +} + +static void tegra_debugfs_cleanup(struct drm_minor *minor) +{ + drm_debugfs_remove_files(tegra_debugfs_list, + ARRAY_SIZE(tegra_debugfs_list), minor); +} +#endif + +static struct drm_driver tegra_drm_driver = { + .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, + .load = tegra_drm_load, + .unload = tegra_drm_unload, + .open = tegra_drm_open, + .preclose = tegra_drm_preclose, + .lastclose = tegra_drm_lastclose, + + .get_vblank_counter = tegra_drm_get_vblank_counter, + .enable_vblank = tegra_drm_enable_vblank, + .disable_vblank = tegra_drm_disable_vblank, + +#if defined(CONFIG_DEBUG_FS) + .debugfs_init = tegra_debugfs_init, + .debugfs_cleanup = tegra_debugfs_cleanup, +#endif + + .gem_free_object = tegra_bo_free_object, + .gem_vm_ops = &tegra_bo_vm_ops, + + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_export = tegra_gem_prime_export, + .gem_prime_import = tegra_gem_prime_import, + + .dumb_create = tegra_bo_dumb_create, + .dumb_map_offset = tegra_bo_dumb_map_offset, + .dumb_destroy = drm_gem_dumb_destroy, + + .ioctls = tegra_drm_ioctls, + .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls), + .fops = &tegra_drm_fops, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, +}; + +int tegra_drm_register_client(struct tegra_drm *tegra, + struct tegra_drm_client *client) +{ + mutex_lock(&tegra->clients_lock); + list_add_tail(&client->list, &tegra->clients); + mutex_unlock(&tegra->clients_lock); + + return 0; +} + +int tegra_drm_unregister_client(struct tegra_drm *tegra, + struct tegra_drm_client *client) +{ + mutex_lock(&tegra->clients_lock); + list_del_init(&client->list); + mutex_unlock(&tegra->clients_lock); + + return 0; +} + +static int host1x_drm_probe(struct host1x_device *dev) +{ + struct drm_driver *driver = &tegra_drm_driver; + struct drm_device *drm; + int err; + + drm = drm_dev_alloc(driver, &dev->dev); + if (!drm) + return -ENOMEM; + + drm_dev_set_unique(drm, dev_name(&dev->dev)); + dev_set_drvdata(&dev->dev, drm); + + err = drm_dev_register(drm, 0); + if (err < 0) + goto unref; + + DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name, + driver->major, driver->minor, driver->patchlevel, + driver->date, drm->primary->index); + + return 0; + +unref: + drm_dev_unref(drm); + return err; +} + +static int host1x_drm_remove(struct host1x_device *dev) +{ + struct drm_device *drm = dev_get_drvdata(&dev->dev); + + drm_dev_unregister(drm); + drm_dev_unref(drm); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int host1x_drm_suspend(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + + drm_kms_helper_poll_disable(drm); + + return 0; +} + +static int host1x_drm_resume(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + + drm_kms_helper_poll_enable(drm); + + return 0; +} +#endif + +static const struct dev_pm_ops host1x_drm_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(host1x_drm_suspend, host1x_drm_resume) +}; + +static const struct of_device_id host1x_drm_subdevs[] = { + { .compatible = "nvidia,tegra20-dc", }, + { .compatible = "nvidia,tegra20-hdmi", }, + { .compatible = "nvidia,tegra20-gr2d", }, + { .compatible = "nvidia,tegra20-gr3d", }, + { .compatible = "nvidia,tegra30-dc", }, + { .compatible = "nvidia,tegra30-hdmi", }, + { .compatible = "nvidia,tegra30-gr2d", }, + { .compatible = "nvidia,tegra30-gr3d", }, + { .compatible = "nvidia,tegra114-dsi", }, + { .compatible = "nvidia,tegra114-hdmi", }, + { .compatible = "nvidia,tegra114-gr3d", }, + { .compatible = "nvidia,tegra124-dc", }, + { .compatible = "nvidia,tegra124-sor", }, + { .compatible = "nvidia,tegra124-hdmi", }, + { /* sentinel */ } +}; + +static struct host1x_driver host1x_drm_driver = { + .driver = { + .name = "drm", + .pm = &host1x_drm_pm_ops, + }, + .probe = host1x_drm_probe, + .remove = host1x_drm_remove, + .subdevs = host1x_drm_subdevs, +}; + +static int __init host1x_drm_init(void) +{ + int err; + + err = host1x_driver_register(&host1x_drm_driver); + if (err < 0) + return err; + + err = platform_driver_register(&tegra_dc_driver); + if (err < 0) + goto unregister_host1x; + + err = platform_driver_register(&tegra_dsi_driver); + if (err < 0) + goto unregister_dc; + + err = platform_driver_register(&tegra_sor_driver); + if (err < 0) + goto unregister_dsi; + + err = platform_driver_register(&tegra_hdmi_driver); + if (err < 0) + goto unregister_sor; + + err = platform_driver_register(&tegra_dpaux_driver); + if (err < 0) + goto unregister_hdmi; + + err = platform_driver_register(&tegra_gr2d_driver); + if (err < 0) + goto unregister_dpaux; + + err = platform_driver_register(&tegra_gr3d_driver); + if (err < 0) + goto unregister_gr2d; + + return 0; + +unregister_gr2d: + platform_driver_unregister(&tegra_gr2d_driver); +unregister_dpaux: + platform_driver_unregister(&tegra_dpaux_driver); +unregister_hdmi: + platform_driver_unregister(&tegra_hdmi_driver); +unregister_sor: + platform_driver_unregister(&tegra_sor_driver); +unregister_dsi: + platform_driver_unregister(&tegra_dsi_driver); +unregister_dc: + platform_driver_unregister(&tegra_dc_driver); +unregister_host1x: + host1x_driver_unregister(&host1x_drm_driver); + return err; +} +module_init(host1x_drm_init); + +static void __exit host1x_drm_exit(void) +{ + platform_driver_unregister(&tegra_gr3d_driver); + platform_driver_unregister(&tegra_gr2d_driver); + platform_driver_unregister(&tegra_dpaux_driver); + platform_driver_unregister(&tegra_hdmi_driver); + platform_driver_unregister(&tegra_sor_driver); + platform_driver_unregister(&tegra_dsi_driver); + platform_driver_unregister(&tegra_dc_driver); + host1x_driver_unregister(&host1x_drm_driver); +} +module_exit(host1x_drm_exit); + +MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>"); +MODULE_DESCRIPTION("NVIDIA Tegra DRM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/kernel/drivers/gpu/drm/tegra/drm.h b/kernel/drivers/gpu/drm/tegra/drm.h new file mode 100644 index 000000000..659b2fcc9 --- /dev/null +++ b/kernel/drivers/gpu/drm/tegra/drm.h @@ -0,0 +1,279 @@ +/* + * Copyright (C) 2012 Avionic Design GmbH + * Copyright (C) 2012-2013 NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef HOST1X_DRM_H +#define HOST1X_DRM_H 1 + +#include <uapi/drm/tegra_drm.h> +#include <linux/host1x.h> + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_edid.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_fixed.h> + +#include "gem.h" + +struct reset_control; + +struct tegra_fb { + struct drm_framebuffer base; + struct tegra_bo **planes; + unsigned int num_planes; +}; + +#ifdef CONFIG_DRM_TEGRA_FBDEV +struct tegra_fbdev { + struct drm_fb_helper base; + struct tegra_fb *fb; +}; +#endif + +struct tegra_drm { + struct drm_device *drm; + + struct iommu_domain *domain; + struct drm_mm mm; + + struct mutex clients_lock; + struct list_head clients; + +#ifdef CONFIG_DRM_TEGRA_FBDEV + struct tegra_fbdev *fbdev; +#endif + + unsigned int pitch_align; + + struct { + struct drm_atomic_state *state; + struct work_struct work; + struct mutex lock; + } commit; +}; + +struct tegra_drm_client; + +struct tegra_drm_context { + struct tegra_drm_client *client; + struct host1x_channel *channel; + struct list_head list; +}; + +struct tegra_drm_client_ops { + int (*open_channel)(struct tegra_drm_client *client, + struct tegra_drm_context *context); + void (*close_channel)(struct tegra_drm_context *context); + int (*is_addr_reg)(struct device *dev, u32 class, u32 offset); + int (*submit)(struct tegra_drm_context *context, + struct drm_tegra_submit *args, struct drm_device *drm, + struct drm_file *file); +}; + +int tegra_drm_submit(struct tegra_drm_context *context, + struct drm_tegra_submit *args, struct drm_device *drm, + struct drm_file *file); + +struct tegra_drm_client { + struct host1x_client base; + struct list_head list; + + const struct tegra_drm_client_ops *ops; +}; + +static inline struct tegra_drm_client * +host1x_to_drm_client(struct host1x_client *client) +{ + return container_of(client, struct tegra_drm_client, base); +} + +int tegra_drm_register_client(struct tegra_drm *tegra, + struct tegra_drm_client *client); +int tegra_drm_unregister_client(struct tegra_drm *tegra, + struct tegra_drm_client *client); + +int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm); +int tegra_drm_exit(struct tegra_drm *tegra); + +struct tegra_dc_soc_info; +struct tegra_output; + +struct tegra_dc { + struct host1x_client client; + struct host1x_syncpt *syncpt; + struct device *dev; + spinlock_t lock; + + struct drm_crtc base; + int powergate; + int pipe; + + struct clk *clk; + struct reset_control *rst; + void __iomem *regs; + int irq; + + struct tegra_output *rgb; + + struct list_head list; + + struct drm_info_list *debugfs_files; + struct drm_minor *minor; + struct dentry *debugfs; + + /* page-flip handling */ + struct drm_pending_vblank_event *event; + + const struct tegra_dc_soc_info *soc; + + struct iommu_domain *domain; +}; + +static inline struct tegra_dc * +host1x_client_to_dc(struct host1x_client *client) +{ + return container_of(client, struct tegra_dc, client); +} + +static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc) +{ + return crtc ? container_of(crtc, struct tegra_dc, base) : NULL; +} + +static inline void tegra_dc_writel(struct tegra_dc *dc, u32 value, + unsigned long offset) +{ + writel(value, dc->regs + (offset << 2)); +} + +static inline u32 tegra_dc_readl(struct tegra_dc *dc, unsigned long offset) +{ + return readl(dc->regs + (offset << 2)); +} + +struct tegra_dc_window { + struct { + unsigned int x; + unsigned int y; + unsigned int w; + unsigned int h; + } src; + struct { + unsigned int x; + unsigned int y; + unsigned int w; + unsigned int h; + } dst; + unsigned int bits_per_pixel; + unsigned int stride[2]; + unsigned long base[3]; + bool bottom_up; + + struct tegra_bo_tiling tiling; + u32 format; + u32 swap; +}; + +/* from dc.c */ +u32 tegra_dc_get_vblank_counter(struct tegra_dc *dc); +void tegra_dc_enable_vblank(struct tegra_dc *dc); +void tegra_dc_disable_vblank(struct tegra_dc *dc); +void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file); +void tegra_dc_commit(struct tegra_dc *dc); +int tegra_dc_state_setup_clock(struct tegra_dc *dc, + struct drm_crtc_state *crtc_state, + struct clk *clk, unsigned long pclk, + unsigned int div); + +struct tegra_output { + struct device_node *of_node; + struct device *dev; + + struct drm_panel *panel; + struct i2c_adapter *ddc; + const struct edid *edid; + unsigned int hpd_irq; + int hpd_gpio; + + struct drm_encoder encoder; + struct drm_connector connector; +}; + +static inline struct tegra_output *encoder_to_output(struct drm_encoder *e) +{ + return container_of(e, struct tegra_output, encoder); +} + +static inline struct tegra_output *connector_to_output(struct drm_connector *c) +{ + return container_of(c, struct tegra_output, connector); +} + +/* from rgb.c */ +int tegra_dc_rgb_probe(struct tegra_dc *dc); +int tegra_dc_rgb_remove(struct tegra_dc *dc); +int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc); +int tegra_dc_rgb_exit(struct tegra_dc *dc); + +/* from output.c */ +int tegra_output_probe(struct tegra_output *output); +void tegra_output_remove(struct tegra_output *output); +int tegra_output_init(struct drm_device *drm, struct tegra_output *output); +void tegra_output_exit(struct tegra_output *output); + +int tegra_output_connector_get_modes(struct drm_connector *connector); +struct drm_encoder * +tegra_output_connector_best_encoder(struct drm_connector *connector); +enum drm_connector_status +tegra_output_connector_detect(struct drm_connector *connector, bool force); +void tegra_output_connector_destroy(struct drm_connector *connector); + +void tegra_output_encoder_destroy(struct drm_encoder *encoder); + +/* from dpaux.c */ +struct tegra_dpaux; +struct drm_dp_link; + +struct tegra_dpaux *tegra_dpaux_find_by_of_node(struct device_node *np); +enum drm_connector_status tegra_dpaux_detect(struct tegra_dpaux *dpaux); +int tegra_dpaux_attach(struct tegra_dpaux *dpaux, struct tegra_output *output); +int tegra_dpaux_detach(struct tegra_dpaux *dpaux); +int tegra_dpaux_enable(struct tegra_dpaux *dpaux); +int tegra_dpaux_disable(struct tegra_dpaux *dpaux); +int tegra_dpaux_prepare(struct tegra_dpaux *dpaux, u8 encoding); +int tegra_dpaux_train(struct tegra_dpaux *dpaux, struct drm_dp_link *link, + u8 pattern); + +/* from fb.c */ +struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer, + unsigned int index); +bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer); +int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer, + struct tegra_bo_tiling *tiling); +struct drm_framebuffer *tegra_fb_create(struct drm_device *drm, + struct drm_file *file, + struct drm_mode_fb_cmd2 *cmd); +int tegra_drm_fb_prepare(struct drm_device *drm); +void tegra_drm_fb_free(struct drm_device *drm); +int tegra_drm_fb_init(struct drm_device *drm); +void tegra_drm_fb_exit(struct drm_device *drm); +#ifdef CONFIG_DRM_TEGRA_FBDEV +void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev); +void tegra_fb_output_poll_changed(struct drm_device *drm); +#endif + +extern struct platform_driver tegra_dc_driver; +extern struct platform_driver tegra_dsi_driver; +extern struct platform_driver tegra_sor_driver; +extern struct platform_driver tegra_hdmi_driver; +extern struct platform_driver tegra_dpaux_driver; +extern struct platform_driver tegra_gr2d_driver; +extern struct platform_driver tegra_gr3d_driver; + +#endif /* HOST1X_DRM_H */ diff --git a/kernel/drivers/gpu/drm/tegra/dsi.c b/kernel/drivers/gpu/drm/tegra/dsi.c new file mode 100644 index 000000000..ed970f622 --- /dev/null +++ b/kernel/drivers/gpu/drm/tegra/dsi.c @@ -0,0 +1,1636 @@ +/* + * Copyright (C) 2013 NVIDIA Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/clk.h> +#include <linux/debugfs.h> +#include <linux/host1x.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/reset.h> + +#include <linux/regulator/consumer.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_panel.h> + +#include <video/mipi_display.h> + +#include "dc.h" +#include "drm.h" +#include "dsi.h" +#include "mipi-phy.h" + +struct tegra_dsi_state { + struct drm_connector_state base; + + struct mipi_dphy_timing timing; + unsigned long period; + + unsigned int vrefresh; + unsigned int lanes; + unsigned long pclk; + unsigned long bclk; + + enum tegra_dsi_format format; + unsigned int mul; + unsigned int div; +}; + +static inline struct tegra_dsi_state * +to_dsi_state(struct drm_connector_state *state) +{ + return container_of(state, struct tegra_dsi_state, base); +} + +struct tegra_dsi { + struct host1x_client client; + struct tegra_output output; + struct device *dev; + + void __iomem *regs; + + struct reset_control *rst; + struct clk *clk_parent; + struct clk *clk_lp; + struct clk *clk; + + struct drm_info_list *debugfs_files; + struct drm_minor *minor; + struct dentry *debugfs; + + unsigned long flags; + enum mipi_dsi_pixel_format format; + unsigned int lanes; + + struct tegra_mipi_device *mipi; + struct mipi_dsi_host host; + + struct regulator *vdd; + + unsigned int video_fifo_depth; + unsigned int host_fifo_depth; + + /* for ganged-mode support */ + struct tegra_dsi *master; + struct tegra_dsi *slave; +}; + +static inline struct tegra_dsi * +host1x_client_to_dsi(struct host1x_client *client) +{ + return container_of(client, struct tegra_dsi, client); +} + +static inline struct tegra_dsi *host_to_tegra(struct mipi_dsi_host *host) +{ + return container_of(host, struct tegra_dsi, host); +} + +static inline struct tegra_dsi *to_dsi(struct tegra_output *output) +{ + return container_of(output, struct tegra_dsi, output); +} + +static struct tegra_dsi_state *tegra_dsi_get_state(struct tegra_dsi *dsi) +{ + return to_dsi_state(dsi->output.connector.state); +} + +static inline u32 tegra_dsi_readl(struct tegra_dsi *dsi, unsigned long reg) +{ + return readl(dsi->regs + (reg << 2)); +} + +static inline void tegra_dsi_writel(struct tegra_dsi *dsi, u32 value, + unsigned long reg) +{ + writel(value, dsi->regs + (reg << 2)); +} + +static int tegra_dsi_show_regs(struct seq_file *s, void *data) +{ + struct drm_info_node *node = s->private; + struct tegra_dsi *dsi = node->info_ent->data; + +#define DUMP_REG(name) \ + seq_printf(s, "%-32s %#05x %08x\n", #name, name, \ + tegra_dsi_readl(dsi, name)) + + DUMP_REG(DSI_INCR_SYNCPT); + DUMP_REG(DSI_INCR_SYNCPT_CONTROL); + DUMP_REG(DSI_INCR_SYNCPT_ERROR); + DUMP_REG(DSI_CTXSW); + DUMP_REG(DSI_RD_DATA); + DUMP_REG(DSI_WR_DATA); + DUMP_REG(DSI_POWER_CONTROL); + DUMP_REG(DSI_INT_ENABLE); + DUMP_REG(DSI_INT_STATUS); + DUMP_REG(DSI_INT_MASK); + DUMP_REG(DSI_HOST_CONTROL); + DUMP_REG(DSI_CONTROL); + DUMP_REG(DSI_SOL_DELAY); + DUMP_REG(DSI_MAX_THRESHOLD); + DUMP_REG(DSI_TRIGGER); + DUMP_REG(DSI_TX_CRC); + DUMP_REG(DSI_STATUS); + + DUMP_REG(DSI_INIT_SEQ_CONTROL); + DUMP_REG(DSI_INIT_SEQ_DATA_0); + DUMP_REG(DSI_INIT_SEQ_DATA_1); + DUMP_REG(DSI_INIT_SEQ_DATA_2); + DUMP_REG(DSI_INIT_SEQ_DATA_3); + DUMP_REG(DSI_INIT_SEQ_DATA_4); + DUMP_REG(DSI_INIT_SEQ_DATA_5); + DUMP_REG(DSI_INIT_SEQ_DATA_6); + DUMP_REG(DSI_INIT_SEQ_DATA_7); + + DUMP_REG(DSI_PKT_SEQ_0_LO); + DUMP_REG(DSI_PKT_SEQ_0_HI); + DUMP_REG(DSI_PKT_SEQ_1_LO); + DUMP_REG(DSI_PKT_SEQ_1_HI); + DUMP_REG(DSI_PKT_SEQ_2_LO); + DUMP_REG(DSI_PKT_SEQ_2_HI); + DUMP_REG(DSI_PKT_SEQ_3_LO); + DUMP_REG(DSI_PKT_SEQ_3_HI); + DUMP_REG(DSI_PKT_SEQ_4_LO); + DUMP_REG(DSI_PKT_SEQ_4_HI); + DUMP_REG(DSI_PKT_SEQ_5_LO); + DUMP_REG(DSI_PKT_SEQ_5_HI); + + DUMP_REG(DSI_DCS_CMDS); + + DUMP_REG(DSI_PKT_LEN_0_1); + DUMP_REG(DSI_PKT_LEN_2_3); + DUMP_REG(DSI_PKT_LEN_4_5); + DUMP_REG(DSI_PKT_LEN_6_7); + + DUMP_REG(DSI_PHY_TIMING_0); + DUMP_REG(DSI_PHY_TIMING_1); + DUMP_REG(DSI_PHY_TIMING_2); + DUMP_REG(DSI_BTA_TIMING); + + DUMP_REG(DSI_TIMEOUT_0); + DUMP_REG(DSI_TIMEOUT_1); + DUMP_REG(DSI_TO_TALLY); + + DUMP_REG(DSI_PAD_CONTROL_0); + DUMP_REG(DSI_PAD_CONTROL_CD); + DUMP_REG(DSI_PAD_CD_STATUS); + DUMP_REG(DSI_VIDEO_MODE_CONTROL); + DUMP_REG(DSI_PAD_CONTROL_1); + DUMP_REG(DSI_PAD_CONTROL_2); + DUMP_REG(DSI_PAD_CONTROL_3); + DUMP_REG(DSI_PAD_CONTROL_4); + + DUMP_REG(DSI_GANGED_MODE_CONTROL); + DUMP_REG(DSI_GANGED_MODE_START); + DUMP_REG(DSI_GANGED_MODE_SIZE); + + DUMP_REG(DSI_RAW_DATA_BYTE_COUNT); + DUMP_REG(DSI_ULTRA_LOW_POWER_CONTROL); + + DUMP_REG(DSI_INIT_SEQ_DATA_8); + DUMP_REG(DSI_INIT_SEQ_DATA_9); + DUMP_REG(DSI_INIT_SEQ_DATA_10); + DUMP_REG(DSI_INIT_SEQ_DATA_11); + DUMP_REG(DSI_INIT_SEQ_DATA_12); + DUMP_REG(DSI_INIT_SEQ_DATA_13); + DUMP_REG(DSI_INIT_SEQ_DATA_14); + DUMP_REG(DSI_INIT_SEQ_DATA_15); + +#undef DUMP_REG + + return 0; +} + +static struct drm_info_list debugfs_files[] = { + { "regs", tegra_dsi_show_regs, 0, NULL }, +}; + +static int tegra_dsi_debugfs_init(struct tegra_dsi *dsi, + struct drm_minor *minor) +{ + const char *name = dev_name(dsi->dev); + unsigned int i; + int err; + + dsi->debugfs = debugfs_create_dir(name, minor->debugfs_root); + if (!dsi->debugfs) + return -ENOMEM; + + dsi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files), + GFP_KERNEL); + if (!dsi->debugfs_files) { + err = -ENOMEM; + goto remove; + } + + for (i = 0; i < ARRAY_SIZE(debugfs_files); i++) + dsi->debugfs_files[i].data = dsi; + + err = drm_debugfs_create_files(dsi->debugfs_files, + ARRAY_SIZE(debugfs_files), + dsi->debugfs, minor); + if (err < 0) + goto free; + + dsi->minor = minor; + + return 0; + +free: + kfree(dsi->debugfs_files); + dsi->debugfs_files = NULL; +remove: + debugfs_remove(dsi->debugfs); + dsi->debugfs = NULL; + + return err; +} + +static void tegra_dsi_debugfs_exit(struct tegra_dsi *dsi) +{ + drm_debugfs_remove_files(dsi->debugfs_files, ARRAY_SIZE(debugfs_files), + dsi->minor); + dsi->minor = NULL; + + kfree(dsi->debugfs_files); + dsi->debugfs_files = NULL; + + debugfs_remove(dsi->debugfs); + dsi->debugfs = NULL; +} + +#define PKT_ID0(id) ((((id) & 0x3f) << 3) | (1 << 9)) +#define PKT_LEN0(len) (((len) & 0x07) << 0) +#define PKT_ID1(id) ((((id) & 0x3f) << 13) | (1 << 19)) +#define PKT_LEN1(len) (((len) & 0x07) << 10) +#define PKT_ID2(id) ((((id) & 0x3f) << 23) | (1 << 29)) +#define PKT_LEN2(len) (((len) & 0x07) << 20) + +#define PKT_LP (1 << 30) +#define NUM_PKT_SEQ 12 + +/* + * non-burst mode with sync pulses + */ +static const u32 pkt_seq_video_non_burst_sync_pulses[NUM_PKT_SEQ] = { + [ 0] = PKT_ID0(MIPI_DSI_V_SYNC_START) | PKT_LEN0(0) | + PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) | + PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) | + PKT_LP, + [ 1] = 0, + [ 2] = PKT_ID0(MIPI_DSI_V_SYNC_END) | PKT_LEN0(0) | + PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) | + PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) | + PKT_LP, + [ 3] = 0, + [ 4] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) | + PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) | + PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) | + PKT_LP, + [ 5] = 0, + [ 6] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) | + PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) | + PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0), + [ 7] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(2) | + PKT_ID1(MIPI_DSI_PACKED_PIXEL_STREAM_24) | PKT_LEN1(3) | + PKT_ID2(MIPI_DSI_BLANKING_PACKET) | PKT_LEN2(4), + [ 8] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) | + PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) | + PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) | + PKT_LP, + [ 9] = 0, + [10] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) | + PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) | + PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0), + [11] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(2) | + PKT_ID1(MIPI_DSI_PACKED_PIXEL_STREAM_24) | PKT_LEN1(3) | + PKT_ID2(MIPI_DSI_BLANKING_PACKET) | PKT_LEN2(4), +}; + +/* + * non-burst mode with sync events + */ +static const u32 pkt_seq_video_non_burst_sync_events[NUM_PKT_SEQ] = { + [ 0] = PKT_ID0(MIPI_DSI_V_SYNC_START) | PKT_LEN0(0) | + PKT_ID1(MIPI_DSI_END_OF_TRANSMISSION) | PKT_LEN1(7) | + PKT_LP, + [ 1] = 0, + [ 2] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) | + PKT_ID1(MIPI_DSI_END_OF_TRANSMISSION) | PKT_LEN1(7) | + PKT_LP, + [ 3] = 0, + [ 4] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) | + PKT_ID1(MIPI_DSI_END_OF_TRANSMISSION) | PKT_LEN1(7) | + PKT_LP, + [ 5] = 0, + [ 6] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) | + PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(2) | + PKT_ID2(MIPI_DSI_PACKED_PIXEL_STREAM_24) | PKT_LEN2(3), + [ 7] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(4), + [ 8] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) | + PKT_ID1(MIPI_DSI_END_OF_TRANSMISSION) | PKT_LEN1(7) | + PKT_LP, + [ 9] = 0, + [10] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) | + PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(2) | + PKT_ID2(MIPI_DSI_PACKED_PIXEL_STREAM_24) | PKT_LEN2(3), + [11] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(4), +}; + +static const u32 pkt_seq_command_mode[NUM_PKT_SEQ] = { + [ 0] = 0, + [ 1] = 0, + [ 2] = 0, + [ 3] = 0, + [ 4] = 0, + [ 5] = 0, + [ 6] = PKT_ID0(MIPI_DSI_DCS_LONG_WRITE) | PKT_LEN0(3) | PKT_LP, + [ 7] = 0, + [ 8] = 0, + [ 9] = 0, + [10] = PKT_ID0(MIPI_DSI_DCS_LONG_WRITE) | PKT_LEN0(5) | PKT_LP, + [11] = 0, +}; + +static void tegra_dsi_set_phy_timing(struct tegra_dsi *dsi, + unsigned long period, + const struct mipi_dphy_timing *timing) +{ + u32 value; + + value = DSI_TIMING_FIELD(timing->hsexit, period, 1) << 24 | + DSI_TIMING_FIELD(timing->hstrail, period, 0) << 16 | + DSI_TIMING_FIELD(timing->hszero, period, 3) << 8 | + DSI_TIMING_FIELD(timing->hsprepare, period, 1); + tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_0); + + value = DSI_TIMING_FIELD(timing->clktrail, period, 1) << 24 | + DSI_TIMING_FIELD(timing->clkpost, period, 1) << 16 | + DSI_TIMING_FIELD(timing->clkzero, period, 1) << 8 | + DSI_TIMING_FIELD(timing->lpx, period, 1); + tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_1); + + value = DSI_TIMING_FIELD(timing->clkprepare, period, 1) << 16 | + DSI_TIMING_FIELD(timing->clkpre, period, 1) << 8 | + DSI_TIMING_FIELD(0xff * period, period, 0) << 0; + tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_2); + + value = DSI_TIMING_FIELD(timing->taget, period, 1) << 16 | + DSI_TIMING_FIELD(timing->tasure, period, 1) << 8 | + DSI_TIMING_FIELD(timing->tago, period, 1); + tegra_dsi_writel(dsi, value, DSI_BTA_TIMING); + + if (dsi->slave) + tegra_dsi_set_phy_timing(dsi->slave, period, timing); +} + +static int tegra_dsi_get_muldiv(enum mipi_dsi_pixel_format format, + unsigned int *mulp, unsigned int *divp) +{ + switch (format) { + case MIPI_DSI_FMT_RGB666_PACKED: + case MIPI_DSI_FMT_RGB888: + *mulp = 3; + *divp = 1; + break; + + case MIPI_DSI_FMT_RGB565: + *mulp = 2; + *divp = 1; + break; + + case MIPI_DSI_FMT_RGB666: + *mulp = 9; + *divp = 4; + break; + + default: + return -EINVAL; + } + + return 0; +} + +static int tegra_dsi_get_format(enum mipi_dsi_pixel_format format, + enum tegra_dsi_format *fmt) +{ + switch (format) { + case MIPI_DSI_FMT_RGB888: + *fmt = TEGRA_DSI_FORMAT_24P; + break; + + case MIPI_DSI_FMT_RGB666: + *fmt = TEGRA_DSI_FORMAT_18NP; + break; + + case MIPI_DSI_FMT_RGB666_PACKED: + *fmt = TEGRA_DSI_FORMAT_18P; + break; + + case MIPI_DSI_FMT_RGB565: + *fmt = TEGRA_DSI_FORMAT_16P; + break; + + default: + return -EINVAL; + } + + return 0; +} + +static void tegra_dsi_ganged_enable(struct tegra_dsi *dsi, unsigned int start, + unsigned int size) +{ + u32 value; + + tegra_dsi_writel(dsi, start, DSI_GANGED_MODE_START); + tegra_dsi_writel(dsi, size << 16 | size, DSI_GANGED_MODE_SIZE); + + value = DSI_GANGED_MODE_CONTROL_ENABLE; + tegra_dsi_writel(dsi, value, DSI_GANGED_MODE_CONTROL); +} + +static void tegra_dsi_enable(struct tegra_dsi *dsi) +{ + u32 value; + + value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL); + value |= DSI_POWER_CONTROL_ENABLE; + tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL); + + if (dsi->slave) + tegra_dsi_enable(dsi->slave); +} + +static unsigned int tegra_dsi_get_lanes(struct tegra_dsi *dsi) +{ + if (dsi->master) + return dsi->master->lanes + dsi->lanes; + + if (dsi->slave) + return dsi->lanes + dsi->slave->lanes; + + return dsi->lanes; +} + +static void tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe, + const struct drm_display_mode *mode) +{ + unsigned int hact, hsw, hbp, hfp, i, mul, div; + struct tegra_dsi_state *state; + const u32 *pkt_seq; + u32 value; + + /* XXX: pass in state into this function? */ + if (dsi->master) + state = tegra_dsi_get_state(dsi->master); + else + state = tegra_dsi_get_state(dsi); + + mul = state->mul; + div = state->div; + + if (dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) { + DRM_DEBUG_KMS("Non-burst video mode with sync pulses\n"); + pkt_seq = pkt_seq_video_non_burst_sync_pulses; + } else if (dsi->flags & MIPI_DSI_MODE_VIDEO) { + DRM_DEBUG_KMS("Non-burst video mode with sync events\n"); + pkt_seq = pkt_seq_video_non_burst_sync_events; + } else { + DRM_DEBUG_KMS("Command mode\n"); + pkt_seq = pkt_seq_command_mode; + } + + value = DSI_CONTROL_CHANNEL(0) | + DSI_CONTROL_FORMAT(state->format) | + DSI_CONTROL_LANES(dsi->lanes - 1) | + DSI_CONTROL_SOURCE(pipe); + tegra_dsi_writel(dsi, value, DSI_CONTROL); + + tegra_dsi_writel(dsi, dsi->video_fifo_depth, DSI_MAX_THRESHOLD); + + value = DSI_HOST_CONTROL_HS; + tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL); + + value = tegra_dsi_readl(dsi, DSI_CONTROL); + + if (dsi->flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) + value |= DSI_CONTROL_HS_CLK_CTRL; + + value &= ~DSI_CONTROL_TX_TRIG(3); + + /* enable DCS commands for command mode */ + if (dsi->flags & MIPI_DSI_MODE_VIDEO) + value &= ~DSI_CONTROL_DCS_ENABLE; + else + value |= DSI_CONTROL_DCS_ENABLE; + + value |= DSI_CONTROL_VIDEO_ENABLE; + value &= ~DSI_CONTROL_HOST_ENABLE; + tegra_dsi_writel(dsi, value, DSI_CONTROL); + + for (i = 0; i < NUM_PKT_SEQ; i++) + tegra_dsi_writel(dsi, pkt_seq[i], DSI_PKT_SEQ_0_LO + i); + + if (dsi->flags & MIPI_DSI_MODE_VIDEO) { + /* horizontal active pixels */ + hact = mode->hdisplay * mul / div; + + /* horizontal sync width */ + hsw = (mode->hsync_end - mode->hsync_start) * mul / div; + hsw -= 10; + + /* horizontal back porch */ + hbp = (mode->htotal - mode->hsync_end) * mul / div; + hbp -= 14; + + /* horizontal front porch */ + hfp = (mode->hsync_start - mode->hdisplay) * mul / div; + hfp -= 8; + + tegra_dsi_writel(dsi, hsw << 16 | 0, DSI_PKT_LEN_0_1); + tegra_dsi_writel(dsi, hact << 16 | hbp, DSI_PKT_LEN_2_3); + tegra_dsi_writel(dsi, hfp, DSI_PKT_LEN_4_5); + tegra_dsi_writel(dsi, 0x0f0f << 16, DSI_PKT_LEN_6_7); + + /* set SOL delay (for non-burst mode only) */ + tegra_dsi_writel(dsi, 8 * mul / div, DSI_SOL_DELAY); + + /* TODO: implement ganged mode */ + } else { + u16 bytes; + + if (dsi->master || dsi->slave) { + /* + * For ganged mode, assume symmetric left-right mode. + */ + bytes = 1 + (mode->hdisplay / 2) * mul / div; + } else { + /* 1 byte (DCS command) + pixel data */ + bytes = 1 + mode->hdisplay * mul / div; + } + + tegra_dsi_writel(dsi, 0, DSI_PKT_LEN_0_1); + tegra_dsi_writel(dsi, bytes << 16, DSI_PKT_LEN_2_3); + tegra_dsi_writel(dsi, bytes << 16, DSI_PKT_LEN_4_5); + tegra_dsi_writel(dsi, 0, DSI_PKT_LEN_6_7); + + value = MIPI_DCS_WRITE_MEMORY_START << 8 | + MIPI_DCS_WRITE_MEMORY_CONTINUE; + tegra_dsi_writel(dsi, value, DSI_DCS_CMDS); + + /* set SOL delay */ + if (dsi->master || dsi->slave) { + unsigned long delay, bclk, bclk_ganged; + unsigned int lanes = state->lanes; + + /* SOL to valid, valid to FIFO and FIFO write delay */ + delay = 4 + 4 + 2; + delay = DIV_ROUND_UP(delay * mul, div * lanes); + /* FIFO read delay */ + delay = delay + 6; + + bclk = DIV_ROUND_UP(mode->htotal * mul, div * lanes); + bclk_ganged = DIV_ROUND_UP(bclk * lanes / 2, lanes); + value = bclk - bclk_ganged + delay + 20; + } else { + /* TODO: revisit for non-ganged mode */ + value = 8 * mul / div; + } + + tegra_dsi_writel(dsi, value, DSI_SOL_DELAY); + } + + if (dsi->slave) { + tegra_dsi_configure(dsi->slave, pipe, mode); + + /* + * TODO: Support modes other than symmetrical left-right + * split. + */ + tegra_dsi_ganged_enable(dsi, 0, mode->hdisplay / 2); + tegra_dsi_ganged_enable(dsi->slave, mode->hdisplay / 2, + mode->hdisplay / 2); + } +} + +static int tegra_dsi_wait_idle(struct tegra_dsi *dsi, unsigned long timeout) +{ + u32 value; + + timeout = jiffies + msecs_to_jiffies(timeout); + + while (time_before(jiffies, timeout)) { + value = tegra_dsi_readl(dsi, DSI_STATUS); + if (value & DSI_STATUS_IDLE) + return 0; + + usleep_range(1000, 2000); + } + + return -ETIMEDOUT; +} + +static void tegra_dsi_video_disable(struct tegra_dsi *dsi) +{ + u32 value; + + value = tegra_dsi_readl(dsi, DSI_CONTROL); + value &= ~DSI_CONTROL_VIDEO_ENABLE; + tegra_dsi_writel(dsi, value, DSI_CONTROL); + + if (dsi->slave) + tegra_dsi_video_disable(dsi->slave); +} + +static void tegra_dsi_ganged_disable(struct tegra_dsi *dsi) +{ + tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_START); + tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_SIZE); + tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_CONTROL); +} + +static void tegra_dsi_set_timeout(struct tegra_dsi *dsi, unsigned long bclk, + unsigned int vrefresh) +{ + unsigned int timeout; + u32 value; + + /* one frame high-speed transmission timeout */ + timeout = (bclk / vrefresh) / 512; + value = DSI_TIMEOUT_LRX(0x2000) | DSI_TIMEOUT_HTX(timeout); + tegra_dsi_writel(dsi, value, DSI_TIMEOUT_0); + + /* 2 ms peripheral timeout for panel */ + timeout = 2 * bclk / 512 * 1000; + value = DSI_TIMEOUT_PR(timeout) | DSI_TIMEOUT_TA(0x2000); + tegra_dsi_writel(dsi, value, DSI_TIMEOUT_1); + + value = DSI_TALLY_TA(0) | DSI_TALLY_LRX(0) | DSI_TALLY_HTX(0); + tegra_dsi_writel(dsi, value, DSI_TO_TALLY); + + if (dsi->slave) + tegra_dsi_set_timeout(dsi->slave, bclk, vrefresh); +} + +static void tegra_dsi_disable(struct tegra_dsi *dsi) +{ + u32 value; + + if (dsi->slave) { + tegra_dsi_ganged_disable(dsi->slave); + tegra_dsi_ganged_disable(dsi); + } + + value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL); + value &= ~DSI_POWER_CONTROL_ENABLE; + tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL); + + if (dsi->slave) + tegra_dsi_disable(dsi->slave); + + usleep_range(5000, 10000); +} + +static void tegra_dsi_soft_reset(struct tegra_dsi *dsi) +{ + u32 value; + + value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL); + value &= ~DSI_POWER_CONTROL_ENABLE; + tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL); + + usleep_range(300, 1000); + + value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL); + value |= DSI_POWER_CONTROL_ENABLE; + tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL); + + usleep_range(300, 1000); + + value = tegra_dsi_readl(dsi, DSI_TRIGGER); + if (value) + tegra_dsi_writel(dsi, 0, DSI_TRIGGER); + + if (dsi->slave) + tegra_dsi_soft_reset(dsi->slave); +} + +static void tegra_dsi_connector_dpms(struct drm_connector *connector, int mode) +{ +} + +static void tegra_dsi_connector_reset(struct drm_connector *connector) +{ + struct tegra_dsi_state *state; + + kfree(connector->state); + connector->state = NULL; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (state) + connector->state = &state->base; +} + +static struct drm_connector_state * +tegra_dsi_connector_duplicate_state(struct drm_connector *connector) +{ + struct tegra_dsi_state *state = to_dsi_state(connector->state); + struct tegra_dsi_state *copy; + + copy = kmemdup(state, sizeof(*state), GFP_KERNEL); + if (!copy) + return NULL; + + return ©->base; +} + +static const struct drm_connector_funcs tegra_dsi_connector_funcs = { + .dpms = tegra_dsi_connector_dpms, + .reset = tegra_dsi_connector_reset, + .detect = tegra_output_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = tegra_output_connector_destroy, + .atomic_duplicate_state = tegra_dsi_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static enum drm_mode_status +tegra_dsi_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + return MODE_OK; +} + +static const struct drm_connector_helper_funcs tegra_dsi_connector_helper_funcs = { + .get_modes = tegra_output_connector_get_modes, + .mode_valid = tegra_dsi_connector_mode_valid, + .best_encoder = tegra_output_connector_best_encoder, +}; + +static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = { + .destroy = tegra_output_encoder_destroy, +}; + +static void tegra_dsi_encoder_dpms(struct drm_encoder *encoder, int mode) +{ +} + +static void tegra_dsi_encoder_prepare(struct drm_encoder *encoder) +{ +} + +static void tegra_dsi_encoder_commit(struct drm_encoder *encoder) +{ +} + +static void tegra_dsi_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted) +{ + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(encoder->crtc); + struct tegra_dsi *dsi = to_dsi(output); + struct tegra_dsi_state *state; + u32 value; + + state = tegra_dsi_get_state(dsi); + + tegra_dsi_set_timeout(dsi, state->bclk, state->vrefresh); + + /* + * The D-PHY timing fields are expressed in byte-clock cycles, so + * multiply the period by 8. + */ + tegra_dsi_set_phy_timing(dsi, state->period * 8, &state->timing); + + if (output->panel) + drm_panel_prepare(output->panel); + + tegra_dsi_configure(dsi, dc->pipe, mode); + + /* enable display controller */ + value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); + value |= DSI_ENABLE; + tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); + + tegra_dc_commit(dc); + + /* enable DSI controller */ + tegra_dsi_enable(dsi); + + if (output->panel) + drm_panel_enable(output->panel); + + return; +} + +static void tegra_dsi_encoder_disable(struct drm_encoder *encoder) +{ + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(encoder->crtc); + struct tegra_dsi *dsi = to_dsi(output); + u32 value; + int err; + + if (output->panel) + drm_panel_disable(output->panel); + + tegra_dsi_video_disable(dsi); + + /* + * The following accesses registers of the display controller, so make + * sure it's only executed when the output is attached to one. + */ + if (dc) { + value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); + value &= ~DSI_ENABLE; + tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); + + tegra_dc_commit(dc); + } + + err = tegra_dsi_wait_idle(dsi, 100); + if (err < 0) + dev_dbg(dsi->dev, "failed to idle DSI: %d\n", err); + + tegra_dsi_soft_reset(dsi); + + if (output->panel) + drm_panel_unprepare(output->panel); + + tegra_dsi_disable(dsi); + + return; +} + +static int +tegra_dsi_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dsi_state *state = to_dsi_state(conn_state); + struct tegra_dc *dc = to_tegra_dc(conn_state->crtc); + struct tegra_dsi *dsi = to_dsi(output); + unsigned int scdiv; + unsigned long plld; + int err; + + state->pclk = crtc_state->mode.clock * 1000; + + err = tegra_dsi_get_muldiv(dsi->format, &state->mul, &state->div); + if (err < 0) + return err; + + state->lanes = tegra_dsi_get_lanes(dsi); + + err = tegra_dsi_get_format(dsi->format, &state->format); + if (err < 0) + return err; + + state->vrefresh = drm_mode_vrefresh(&crtc_state->mode); + + /* compute byte clock */ + state->bclk = (state->pclk * state->mul) / (state->div * state->lanes); + + DRM_DEBUG_KMS("mul: %u, div: %u, lanes: %u\n", state->mul, state->div, + state->lanes); + DRM_DEBUG_KMS("format: %u, vrefresh: %u\n", state->format, + state->vrefresh); + DRM_DEBUG_KMS("bclk: %lu\n", state->bclk); + + /* + * Compute bit clock and round up to the next MHz. + */ + plld = DIV_ROUND_UP(state->bclk * 8, USEC_PER_SEC) * USEC_PER_SEC; + state->period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, plld); + + err = mipi_dphy_timing_get_default(&state->timing, state->period); + if (err < 0) + return err; + + err = mipi_dphy_timing_validate(&state->timing, state->period); + if (err < 0) { + dev_err(dsi->dev, "failed to validate D-PHY timing: %d\n", err); + return err; + } + + /* + * We divide the frequency by two here, but we make up for that by + * setting the shift clock divider (further below) to half of the + * correct value. + */ + plld /= 2; + + /* + * Derive pixel clock from bit clock using the shift clock divider. + * Note that this is only half of what we would expect, but we need + * that to make up for the fact that we divided the bit clock by a + * factor of two above. + * + * It's not clear exactly why this is necessary, but the display is + * not working properly otherwise. Perhaps the PLLs cannot generate + * frequencies sufficiently high. + */ + scdiv = ((8 * state->mul) / (state->div * state->lanes)) - 2; + + err = tegra_dc_state_setup_clock(dc, crtc_state, dsi->clk_parent, + plld, scdiv); + if (err < 0) { + dev_err(output->dev, "failed to setup CRTC state: %d\n", err); + return err; + } + + return err; +} + +static const struct drm_encoder_helper_funcs tegra_dsi_encoder_helper_funcs = { + .dpms = tegra_dsi_encoder_dpms, + .prepare = tegra_dsi_encoder_prepare, + .commit = tegra_dsi_encoder_commit, + .mode_set = tegra_dsi_encoder_mode_set, + .disable = tegra_dsi_encoder_disable, + .atomic_check = tegra_dsi_encoder_atomic_check, +}; + +static int tegra_dsi_pad_enable(struct tegra_dsi *dsi) +{ + u32 value; + + value = DSI_PAD_CONTROL_VS1_PULLDN(0) | DSI_PAD_CONTROL_VS1_PDIO(0); + tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_0); + + return 0; +} + +static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi) +{ + u32 value; + + tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0); + tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1); + tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_2); + tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_3); + tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_4); + + /* start calibration */ + tegra_dsi_pad_enable(dsi); + + value = DSI_PAD_SLEW_UP(0x7) | DSI_PAD_SLEW_DN(0x7) | + DSI_PAD_LP_UP(0x1) | DSI_PAD_LP_DN(0x1) | + DSI_PAD_OUT_CLK(0x0); + tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2); + + return tegra_mipi_calibrate(dsi->mipi); +} + +static int tegra_dsi_init(struct host1x_client *client) +{ + struct drm_device *drm = dev_get_drvdata(client->parent); + struct tegra_dsi *dsi = host1x_client_to_dsi(client); + int err; + + reset_control_deassert(dsi->rst); + + err = tegra_dsi_pad_calibrate(dsi); + if (err < 0) { + dev_err(dsi->dev, "MIPI calibration failed: %d\n", err); + goto reset; + } + + /* Gangsters must not register their own outputs. */ + if (!dsi->master) { + dsi->output.dev = client->dev; + + drm_connector_init(drm, &dsi->output.connector, + &tegra_dsi_connector_funcs, + DRM_MODE_CONNECTOR_DSI); + drm_connector_helper_add(&dsi->output.connector, + &tegra_dsi_connector_helper_funcs); + dsi->output.connector.dpms = DRM_MODE_DPMS_OFF; + + drm_encoder_init(drm, &dsi->output.encoder, + &tegra_dsi_encoder_funcs, + DRM_MODE_ENCODER_DSI); + drm_encoder_helper_add(&dsi->output.encoder, + &tegra_dsi_encoder_helper_funcs); + + drm_mode_connector_attach_encoder(&dsi->output.connector, + &dsi->output.encoder); + drm_connector_register(&dsi->output.connector); + + err = tegra_output_init(drm, &dsi->output); + if (err < 0) { + dev_err(client->dev, + "failed to initialize output: %d\n", + err); + goto reset; + } + + dsi->output.encoder.possible_crtcs = 0x3; + } + + if (IS_ENABLED(CONFIG_DEBUG_FS)) { + err = tegra_dsi_debugfs_init(dsi, drm->primary); + if (err < 0) + dev_err(dsi->dev, "debugfs setup failed: %d\n", err); + } + + return 0; + +reset: + reset_control_assert(dsi->rst); + return err; +} + +static int tegra_dsi_exit(struct host1x_client *client) +{ + struct tegra_dsi *dsi = host1x_client_to_dsi(client); + + tegra_output_exit(&dsi->output); + + if (IS_ENABLED(CONFIG_DEBUG_FS)) + tegra_dsi_debugfs_exit(dsi); + + reset_control_assert(dsi->rst); + + return 0; +} + +static const struct host1x_client_ops dsi_client_ops = { + .init = tegra_dsi_init, + .exit = tegra_dsi_exit, +}; + +static int tegra_dsi_setup_clocks(struct tegra_dsi *dsi) +{ + struct clk *parent; + int err; + + parent = clk_get_parent(dsi->clk); + if (!parent) + return -EINVAL; + + err = clk_set_parent(parent, dsi->clk_parent); + if (err < 0) + return err; + + return 0; +} + +static const char * const error_report[16] = { + "SoT Error", + "SoT Sync Error", + "EoT Sync Error", + "Escape Mode Entry Command Error", + "Low-Power Transmit Sync Error", + "Peripheral Timeout Error", + "False Control Error", + "Contention Detected", + "ECC Error, single-bit", + "ECC Error, multi-bit", + "Checksum Error", + "DSI Data Type Not Recognized", + "DSI VC ID Invalid", + "Invalid Transmission Length", + "Reserved", + "DSI Protocol Violation", +}; + +static ssize_t tegra_dsi_read_response(struct tegra_dsi *dsi, + const struct mipi_dsi_msg *msg, + size_t count) +{ + u8 *rx = msg->rx_buf; + unsigned int i, j, k; + size_t size = 0; + u16 errors; + u32 value; + + /* read and parse packet header */ + value = tegra_dsi_readl(dsi, DSI_RD_DATA); + + switch (value & 0x3f) { + case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT: + errors = (value >> 8) & 0xffff; + dev_dbg(dsi->dev, "Acknowledge and error report: %04x\n", + errors); + for (i = 0; i < ARRAY_SIZE(error_report); i++) + if (errors & BIT(i)) + dev_dbg(dsi->dev, " %2u: %s\n", i, + error_report[i]); + break; + + case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE: + rx[0] = (value >> 8) & 0xff; + size = 1; + break; + + case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE: + rx[0] = (value >> 8) & 0xff; + rx[1] = (value >> 16) & 0xff; + size = 2; + break; + + case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE: + size = ((value >> 8) & 0xff00) | ((value >> 8) & 0xff); + break; + + case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE: + size = ((value >> 8) & 0xff00) | ((value >> 8) & 0xff); + break; + + default: + dev_err(dsi->dev, "unhandled response type: %02x\n", + value & 0x3f); + return -EPROTO; + } + + size = min(size, msg->rx_len); + + if (msg->rx_buf && size > 0) { + for (i = 0, j = 0; i < count - 1; i++, j += 4) { + u8 *rx = msg->rx_buf + j; + + value = tegra_dsi_readl(dsi, DSI_RD_DATA); + + for (k = 0; k < 4 && (j + k) < msg->rx_len; k++) + rx[j + k] = (value >> (k << 3)) & 0xff; + } + } + + return size; +} + +static int tegra_dsi_transmit(struct tegra_dsi *dsi, unsigned long timeout) +{ + tegra_dsi_writel(dsi, DSI_TRIGGER_HOST, DSI_TRIGGER); + + timeout = jiffies + msecs_to_jiffies(timeout); + + while (time_before(jiffies, timeout)) { + u32 value = tegra_dsi_readl(dsi, DSI_TRIGGER); + if ((value & DSI_TRIGGER_HOST) == 0) + return 0; + + usleep_range(1000, 2000); + } + + DRM_DEBUG_KMS("timeout waiting for transmission to complete\n"); + return -ETIMEDOUT; +} + +static int tegra_dsi_wait_for_response(struct tegra_dsi *dsi, + unsigned long timeout) +{ + timeout = jiffies + msecs_to_jiffies(250); + + while (time_before(jiffies, timeout)) { + u32 value = tegra_dsi_readl(dsi, DSI_STATUS); + u8 count = value & 0x1f; + + if (count > 0) + return count; + + usleep_range(1000, 2000); + } + + DRM_DEBUG_KMS("peripheral returned no data\n"); + return -ETIMEDOUT; +} + +static void tegra_dsi_writesl(struct tegra_dsi *dsi, unsigned long offset, + const void *buffer, size_t size) +{ + const u8 *buf = buffer; + size_t i, j; + u32 value; + + for (j = 0; j < size; j += 4) { + value = 0; + + for (i = 0; i < 4 && j + i < size; i++) + value |= buf[j + i] << (i << 3); + + tegra_dsi_writel(dsi, value, DSI_WR_DATA); + } +} + +static ssize_t tegra_dsi_host_transfer(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg) +{ + struct tegra_dsi *dsi = host_to_tegra(host); + struct mipi_dsi_packet packet; + const u8 *header; + size_t count; + ssize_t err; + u32 value; + + err = mipi_dsi_create_packet(&packet, msg); + if (err < 0) + return err; + + header = packet.header; + + /* maximum FIFO depth is 1920 words */ + if (packet.size > dsi->video_fifo_depth * 4) + return -ENOSPC; + + /* reset underflow/overflow flags */ + value = tegra_dsi_readl(dsi, DSI_STATUS); + if (value & (DSI_STATUS_UNDERFLOW | DSI_STATUS_OVERFLOW)) { + value = DSI_HOST_CONTROL_FIFO_RESET; + tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL); + usleep_range(10, 20); + } + + value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL); + value |= DSI_POWER_CONTROL_ENABLE; + tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL); + + usleep_range(5000, 10000); + + value = DSI_HOST_CONTROL_CRC_RESET | DSI_HOST_CONTROL_TX_TRIG_HOST | + DSI_HOST_CONTROL_CS | DSI_HOST_CONTROL_ECC; + + if ((msg->flags & MIPI_DSI_MSG_USE_LPM) == 0) + value |= DSI_HOST_CONTROL_HS; + + /* + * The host FIFO has a maximum of 64 words, so larger transmissions + * need to use the video FIFO. + */ + if (packet.size > dsi->host_fifo_depth * 4) + value |= DSI_HOST_CONTROL_FIFO_SEL; + + tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL); + + /* + * For reads and messages with explicitly requested ACK, generate a + * BTA sequence after the transmission of the packet. + */ + if ((msg->flags & MIPI_DSI_MSG_REQ_ACK) || + (msg->rx_buf && msg->rx_len > 0)) { + value = tegra_dsi_readl(dsi, DSI_HOST_CONTROL); + value |= DSI_HOST_CONTROL_PKT_BTA; + tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL); + } + + value = DSI_CONTROL_LANES(0) | DSI_CONTROL_HOST_ENABLE; + tegra_dsi_writel(dsi, value, DSI_CONTROL); + + /* write packet header, ECC is generated by hardware */ + value = header[2] << 16 | header[1] << 8 | header[0]; + tegra_dsi_writel(dsi, value, DSI_WR_DATA); + + /* write payload (if any) */ + if (packet.payload_length > 0) + tegra_dsi_writesl(dsi, DSI_WR_DATA, packet.payload, + packet.payload_length); + + err = tegra_dsi_transmit(dsi, 250); + if (err < 0) + return err; + + if ((msg->flags & MIPI_DSI_MSG_REQ_ACK) || + (msg->rx_buf && msg->rx_len > 0)) { + err = tegra_dsi_wait_for_response(dsi, 250); + if (err < 0) + return err; + + count = err; + + value = tegra_dsi_readl(dsi, DSI_RD_DATA); + switch (value) { + case 0x84: + /* + dev_dbg(dsi->dev, "ACK\n"); + */ + break; + + case 0x87: + /* + dev_dbg(dsi->dev, "ESCAPE\n"); + */ + break; + + default: + dev_err(dsi->dev, "unknown status: %08x\n", value); + break; + } + + if (count > 1) { + err = tegra_dsi_read_response(dsi, msg, count); + if (err < 0) + dev_err(dsi->dev, + "failed to parse response: %zd\n", + err); + else { + /* + * For read commands, return the number of + * bytes returned by the peripheral. + */ + count = err; + } + } + } else { + /* + * For write commands, we have transmitted the 4-byte header + * plus the variable-length payload. + */ + count = 4 + packet.payload_length; + } + + return count; +} + +static int tegra_dsi_ganged_setup(struct tegra_dsi *dsi) +{ + struct clk *parent; + int err; + + /* make sure both DSI controllers share the same PLL */ + parent = clk_get_parent(dsi->slave->clk); + if (!parent) + return -EINVAL; + + err = clk_set_parent(parent, dsi->clk_parent); + if (err < 0) + return err; + + return 0; +} + +static int tegra_dsi_host_attach(struct mipi_dsi_host *host, + struct mipi_dsi_device *device) +{ + struct tegra_dsi *dsi = host_to_tegra(host); + + dsi->flags = device->mode_flags; + dsi->format = device->format; + dsi->lanes = device->lanes; + + if (dsi->slave) { + int err; + + dev_dbg(dsi->dev, "attaching dual-channel device %s\n", + dev_name(&device->dev)); + + err = tegra_dsi_ganged_setup(dsi); + if (err < 0) { + dev_err(dsi->dev, "failed to set up ganged mode: %d\n", + err); + return err; + } + } + + /* + * Slaves don't have a panel associated with them, so they provide + * merely the second channel. + */ + if (!dsi->master) { + struct tegra_output *output = &dsi->output; + + output->panel = of_drm_find_panel(device->dev.of_node); + if (output->panel && output->connector.dev) { + drm_panel_attach(output->panel, &output->connector); + drm_helper_hpd_irq_event(output->connector.dev); + } + } + + return 0; +} + +static int tegra_dsi_host_detach(struct mipi_dsi_host *host, + struct mipi_dsi_device *device) +{ + struct tegra_dsi *dsi = host_to_tegra(host); + struct tegra_output *output = &dsi->output; + + if (output->panel && &device->dev == output->panel->dev) { + output->panel = NULL; + + if (output->connector.dev) + drm_helper_hpd_irq_event(output->connector.dev); + } + + return 0; +} + +static const struct mipi_dsi_host_ops tegra_dsi_host_ops = { + .attach = tegra_dsi_host_attach, + .detach = tegra_dsi_host_detach, + .transfer = tegra_dsi_host_transfer, +}; + +static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi) +{ + struct device_node *np; + + np = of_parse_phandle(dsi->dev->of_node, "nvidia,ganged-mode", 0); + if (np) { + struct platform_device *gangster = of_find_device_by_node(np); + + dsi->slave = platform_get_drvdata(gangster); + of_node_put(np); + + if (!dsi->slave) + return -EPROBE_DEFER; + + dsi->slave->master = dsi; + } + + return 0; +} + +static int tegra_dsi_probe(struct platform_device *pdev) +{ + struct tegra_dsi *dsi; + struct resource *regs; + int err; + + dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL); + if (!dsi) + return -ENOMEM; + + dsi->output.dev = dsi->dev = &pdev->dev; + dsi->video_fifo_depth = 1920; + dsi->host_fifo_depth = 64; + + err = tegra_dsi_ganged_probe(dsi); + if (err < 0) + return err; + + err = tegra_output_probe(&dsi->output); + if (err < 0) + return err; + + dsi->output.connector.polled = DRM_CONNECTOR_POLL_HPD; + + /* + * Assume these values by default. When a DSI peripheral driver + * attaches to the DSI host, the parameters will be taken from + * the attached device. + */ + dsi->flags = MIPI_DSI_MODE_VIDEO; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->lanes = 4; + + dsi->rst = devm_reset_control_get(&pdev->dev, "dsi"); + if (IS_ERR(dsi->rst)) + return PTR_ERR(dsi->rst); + + dsi->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(dsi->clk)) { + dev_err(&pdev->dev, "cannot get DSI clock\n"); + err = PTR_ERR(dsi->clk); + goto reset; + } + + err = clk_prepare_enable(dsi->clk); + if (err < 0) { + dev_err(&pdev->dev, "cannot enable DSI clock\n"); + goto reset; + } + + dsi->clk_lp = devm_clk_get(&pdev->dev, "lp"); + if (IS_ERR(dsi->clk_lp)) { + dev_err(&pdev->dev, "cannot get low-power clock\n"); + err = PTR_ERR(dsi->clk_lp); + goto disable_clk; + } + + err = clk_prepare_enable(dsi->clk_lp); + if (err < 0) { + dev_err(&pdev->dev, "cannot enable low-power clock\n"); + goto disable_clk; + } + + dsi->clk_parent = devm_clk_get(&pdev->dev, "parent"); + if (IS_ERR(dsi->clk_parent)) { + dev_err(&pdev->dev, "cannot get parent clock\n"); + err = PTR_ERR(dsi->clk_parent); + goto disable_clk_lp; + } + + dsi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi"); + if (IS_ERR(dsi->vdd)) { + dev_err(&pdev->dev, "cannot get VDD supply\n"); + err = PTR_ERR(dsi->vdd); + goto disable_clk_lp; + } + + err = regulator_enable(dsi->vdd); + if (err < 0) { + dev_err(&pdev->dev, "cannot enable VDD supply\n"); + goto disable_clk_lp; + } + + err = tegra_dsi_setup_clocks(dsi); + if (err < 0) { + dev_err(&pdev->dev, "cannot setup clocks\n"); + goto disable_vdd; + } + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dsi->regs = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(dsi->regs)) { + err = PTR_ERR(dsi->regs); + goto disable_vdd; + } + + dsi->mipi = tegra_mipi_request(&pdev->dev); + if (IS_ERR(dsi->mipi)) { + err = PTR_ERR(dsi->mipi); + goto disable_vdd; + } + + dsi->host.ops = &tegra_dsi_host_ops; + dsi->host.dev = &pdev->dev; + + err = mipi_dsi_host_register(&dsi->host); + if (err < 0) { + dev_err(&pdev->dev, "failed to register DSI host: %d\n", err); + goto mipi_free; + } + + INIT_LIST_HEAD(&dsi->client.list); + dsi->client.ops = &dsi_client_ops; + dsi->client.dev = &pdev->dev; + + err = host1x_client_register(&dsi->client); + if (err < 0) { + dev_err(&pdev->dev, "failed to register host1x client: %d\n", + err); + goto unregister; + } + + platform_set_drvdata(pdev, dsi); + + return 0; + +unregister: + mipi_dsi_host_unregister(&dsi->host); +mipi_free: + tegra_mipi_free(dsi->mipi); +disable_vdd: + regulator_disable(dsi->vdd); +disable_clk_lp: + clk_disable_unprepare(dsi->clk_lp); +disable_clk: + clk_disable_unprepare(dsi->clk); +reset: + reset_control_assert(dsi->rst); + return err; +} + +static int tegra_dsi_remove(struct platform_device *pdev) +{ + struct tegra_dsi *dsi = platform_get_drvdata(pdev); + int err; + + err = host1x_client_unregister(&dsi->client); + if (err < 0) { + dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", + err); + return err; + } + + tegra_output_remove(&dsi->output); + + mipi_dsi_host_unregister(&dsi->host); + tegra_mipi_free(dsi->mipi); + + regulator_disable(dsi->vdd); + clk_disable_unprepare(dsi->clk_lp); + clk_disable_unprepare(dsi->clk); + reset_control_assert(dsi->rst); + + return 0; +} + +static const struct of_device_id tegra_dsi_of_match[] = { + { .compatible = "nvidia,tegra114-dsi", }, + { }, +}; +MODULE_DEVICE_TABLE(of, tegra_dsi_of_match); + +struct platform_driver tegra_dsi_driver = { + .driver = { + .name = "tegra-dsi", + .of_match_table = tegra_dsi_of_match, + }, + .probe = tegra_dsi_probe, + .remove = tegra_dsi_remove, +}; diff --git a/kernel/drivers/gpu/drm/tegra/dsi.h b/kernel/drivers/gpu/drm/tegra/dsi.h new file mode 100644 index 000000000..bad1006a5 --- /dev/null +++ b/kernel/drivers/gpu/drm/tegra/dsi.h @@ -0,0 +1,142 @@ +/* + * Copyright (C) 2013 NVIDIA Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef DRM_TEGRA_DSI_H +#define DRM_TEGRA_DSI_H + +#define DSI_INCR_SYNCPT 0x00 +#define DSI_INCR_SYNCPT_CONTROL 0x01 +#define DSI_INCR_SYNCPT_ERROR 0x02 +#define DSI_CTXSW 0x08 +#define DSI_RD_DATA 0x09 +#define DSI_WR_DATA 0x0a +#define DSI_POWER_CONTROL 0x0b +#define DSI_POWER_CONTROL_ENABLE (1 << 0) +#define DSI_INT_ENABLE 0x0c +#define DSI_INT_STATUS 0x0d +#define DSI_INT_MASK 0x0e +#define DSI_HOST_CONTROL 0x0f +#define DSI_HOST_CONTROL_FIFO_RESET (1 << 21) +#define DSI_HOST_CONTROL_CRC_RESET (1 << 20) +#define DSI_HOST_CONTROL_TX_TRIG_SOL (0 << 12) +#define DSI_HOST_CONTROL_TX_TRIG_FIFO (1 << 12) +#define DSI_HOST_CONTROL_TX_TRIG_HOST (2 << 12) +#define DSI_HOST_CONTROL_RAW (1 << 6) +#define DSI_HOST_CONTROL_HS (1 << 5) +#define DSI_HOST_CONTROL_FIFO_SEL (1 << 4) +#define DSI_HOST_CONTROL_IMM_BTA (1 << 3) +#define DSI_HOST_CONTROL_PKT_BTA (1 << 2) +#define DSI_HOST_CONTROL_CS (1 << 1) +#define DSI_HOST_CONTROL_ECC (1 << 0) +#define DSI_CONTROL 0x10 +#define DSI_CONTROL_HS_CLK_CTRL (1 << 20) +#define DSI_CONTROL_CHANNEL(c) (((c) & 0x3) << 16) +#define DSI_CONTROL_FORMAT(f) (((f) & 0x3) << 12) +#define DSI_CONTROL_TX_TRIG(x) (((x) & 0x3) << 8) +#define DSI_CONTROL_LANES(n) (((n) & 0x3) << 4) +#define DSI_CONTROL_DCS_ENABLE (1 << 3) +#define DSI_CONTROL_SOURCE(s) (((s) & 0x1) << 2) +#define DSI_CONTROL_VIDEO_ENABLE (1 << 1) +#define DSI_CONTROL_HOST_ENABLE (1 << 0) +#define DSI_SOL_DELAY 0x11 +#define DSI_MAX_THRESHOLD 0x12 +#define DSI_TRIGGER 0x13 +#define DSI_TRIGGER_HOST (1 << 1) +#define DSI_TRIGGER_VIDEO (1 << 0) +#define DSI_TX_CRC 0x14 +#define DSI_STATUS 0x15 +#define DSI_STATUS_IDLE (1 << 10) +#define DSI_STATUS_UNDERFLOW (1 << 9) +#define DSI_STATUS_OVERFLOW (1 << 8) +#define DSI_INIT_SEQ_CONTROL 0x1a +#define DSI_INIT_SEQ_DATA_0 0x1b +#define DSI_INIT_SEQ_DATA_1 0x1c +#define DSI_INIT_SEQ_DATA_2 0x1d +#define DSI_INIT_SEQ_DATA_3 0x1e +#define DSI_INIT_SEQ_DATA_4 0x1f +#define DSI_INIT_SEQ_DATA_5 0x20 +#define DSI_INIT_SEQ_DATA_6 0x21 +#define DSI_INIT_SEQ_DATA_7 0x22 +#define DSI_PKT_SEQ_0_LO 0x23 +#define DSI_PKT_SEQ_0_HI 0x24 +#define DSI_PKT_SEQ_1_LO 0x25 +#define DSI_PKT_SEQ_1_HI 0x26 +#define DSI_PKT_SEQ_2_LO 0x27 +#define DSI_PKT_SEQ_2_HI 0x28 +#define DSI_PKT_SEQ_3_LO 0x29 +#define DSI_PKT_SEQ_3_HI 0x2a +#define DSI_PKT_SEQ_4_LO 0x2b +#define DSI_PKT_SEQ_4_HI 0x2c +#define DSI_PKT_SEQ_5_LO 0x2d +#define DSI_PKT_SEQ_5_HI 0x2e +#define DSI_DCS_CMDS 0x33 +#define DSI_PKT_LEN_0_1 0x34 +#define DSI_PKT_LEN_2_3 0x35 +#define DSI_PKT_LEN_4_5 0x36 +#define DSI_PKT_LEN_6_7 0x37 +#define DSI_PHY_TIMING_0 0x3c +#define DSI_PHY_TIMING_1 0x3d +#define DSI_PHY_TIMING_2 0x3e +#define DSI_BTA_TIMING 0x3f + +#define DSI_TIMING_FIELD(value, period, hwinc) \ + ((DIV_ROUND_CLOSEST(value, period) - (hwinc)) & 0xff) + +#define DSI_TIMEOUT_0 0x44 +#define DSI_TIMEOUT_LRX(x) (((x) & 0xffff) << 16) +#define DSI_TIMEOUT_HTX(x) (((x) & 0xffff) << 0) +#define DSI_TIMEOUT_1 0x45 +#define DSI_TIMEOUT_PR(x) (((x) & 0xffff) << 16) +#define DSI_TIMEOUT_TA(x) (((x) & 0xffff) << 0) +#define DSI_TO_TALLY 0x46 +#define DSI_TALLY_TA(x) (((x) & 0xff) << 16) +#define DSI_TALLY_LRX(x) (((x) & 0xff) << 8) +#define DSI_TALLY_HTX(x) (((x) & 0xff) << 0) +#define DSI_PAD_CONTROL_0 0x4b +#define DSI_PAD_CONTROL_VS1_PDIO(x) (((x) & 0xf) << 0) +#define DSI_PAD_CONTROL_VS1_PDIO_CLK (1 << 8) +#define DSI_PAD_CONTROL_VS1_PULLDN(x) (((x) & 0xf) << 16) +#define DSI_PAD_CONTROL_VS1_PULLDN_CLK (1 << 24) +#define DSI_PAD_CONTROL_CD 0x4c +#define DSI_PAD_CD_STATUS 0x4d +#define DSI_VIDEO_MODE_CONTROL 0x4e +#define DSI_PAD_CONTROL_1 0x4f +#define DSI_PAD_CONTROL_2 0x50 +#define DSI_PAD_OUT_CLK(x) (((x) & 0x7) << 0) +#define DSI_PAD_LP_DN(x) (((x) & 0x7) << 4) +#define DSI_PAD_LP_UP(x) (((x) & 0x7) << 8) +#define DSI_PAD_SLEW_DN(x) (((x) & 0x7) << 12) +#define DSI_PAD_SLEW_UP(x) (((x) & 0x7) << 16) +#define DSI_PAD_CONTROL_3 0x51 +#define DSI_PAD_CONTROL_4 0x52 +#define DSI_GANGED_MODE_CONTROL 0x53 +#define DSI_GANGED_MODE_CONTROL_ENABLE (1 << 0) +#define DSI_GANGED_MODE_START 0x54 +#define DSI_GANGED_MODE_SIZE 0x55 +#define DSI_RAW_DATA_BYTE_COUNT 0x56 +#define DSI_ULTRA_LOW_POWER_CONTROL 0x57 +#define DSI_INIT_SEQ_DATA_8 0x58 +#define DSI_INIT_SEQ_DATA_9 0x59 +#define DSI_INIT_SEQ_DATA_10 0x5a +#define DSI_INIT_SEQ_DATA_11 0x5b +#define DSI_INIT_SEQ_DATA_12 0x5c +#define DSI_INIT_SEQ_DATA_13 0x5d +#define DSI_INIT_SEQ_DATA_14 0x5e +#define DSI_INIT_SEQ_DATA_15 0x5f + +/* + * pixel format as used in the DSI_CONTROL_FORMAT field + */ +enum tegra_dsi_format { + TEGRA_DSI_FORMAT_16P, + TEGRA_DSI_FORMAT_18NP, + TEGRA_DSI_FORMAT_18P, + TEGRA_DSI_FORMAT_24P, +}; + +#endif diff --git a/kernel/drivers/gpu/drm/tegra/fb.c b/kernel/drivers/gpu/drm/tegra/fb.c new file mode 100644 index 000000000..397fb34d5 --- /dev/null +++ b/kernel/drivers/gpu/drm/tegra/fb.c @@ -0,0 +1,433 @@ +/* + * Copyright (C) 2012-2013 Avionic Design GmbH + * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. + * + * Based on the KMS/FB CMA helpers + * Copyright (C) 2012 Analog Device Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "drm.h" +#include "gem.h" + +static inline struct tegra_fb *to_tegra_fb(struct drm_framebuffer *fb) +{ + return container_of(fb, struct tegra_fb, base); +} + +#ifdef CONFIG_DRM_TEGRA_FBDEV +static inline struct tegra_fbdev *to_tegra_fbdev(struct drm_fb_helper *helper) +{ + return container_of(helper, struct tegra_fbdev, base); +} +#endif + +struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer, + unsigned int index) +{ + struct tegra_fb *fb = to_tegra_fb(framebuffer); + + if (index >= drm_format_num_planes(framebuffer->pixel_format)) + return NULL; + + return fb->planes[index]; +} + +bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer) +{ + struct tegra_fb *fb = to_tegra_fb(framebuffer); + + if (fb->planes[0]->flags & TEGRA_BO_BOTTOM_UP) + return true; + + return false; +} + +int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer, + struct tegra_bo_tiling *tiling) +{ + struct tegra_fb *fb = to_tegra_fb(framebuffer); + + /* TODO: handle YUV formats? */ + *tiling = fb->planes[0]->tiling; + + return 0; +} + +static void tegra_fb_destroy(struct drm_framebuffer *framebuffer) +{ + struct tegra_fb *fb = to_tegra_fb(framebuffer); + unsigned int i; + + for (i = 0; i < fb->num_planes; i++) { + struct tegra_bo *bo = fb->planes[i]; + + if (bo) { + if (bo->pages && bo->vaddr) + vunmap(bo->vaddr); + + drm_gem_object_unreference_unlocked(&bo->gem); + } + } + + drm_framebuffer_cleanup(framebuffer); + kfree(fb->planes); + kfree(fb); +} + +static int tegra_fb_create_handle(struct drm_framebuffer *framebuffer, + struct drm_file *file, unsigned int *handle) +{ + struct tegra_fb *fb = to_tegra_fb(framebuffer); + + return drm_gem_handle_create(file, &fb->planes[0]->gem, handle); +} + +static struct drm_framebuffer_funcs tegra_fb_funcs = { + .destroy = tegra_fb_destroy, + .create_handle = tegra_fb_create_handle, +}; + +static struct tegra_fb *tegra_fb_alloc(struct drm_device *drm, + struct drm_mode_fb_cmd2 *mode_cmd, + struct tegra_bo **planes, + unsigned int num_planes) +{ + struct tegra_fb *fb; + unsigned int i; + int err; + + fb = kzalloc(sizeof(*fb), GFP_KERNEL); + if (!fb) + return ERR_PTR(-ENOMEM); + + fb->planes = kzalloc(num_planes * sizeof(*planes), GFP_KERNEL); + if (!fb->planes) { + kfree(fb); + return ERR_PTR(-ENOMEM); + } + + fb->num_planes = num_planes; + + drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd); + + for (i = 0; i < fb->num_planes; i++) + fb->planes[i] = planes[i]; + + err = drm_framebuffer_init(drm, &fb->base, &tegra_fb_funcs); + if (err < 0) { + dev_err(drm->dev, "failed to initialize framebuffer: %d\n", + err); + kfree(fb->planes); + kfree(fb); + return ERR_PTR(err); + } + + return fb; +} + +struct drm_framebuffer *tegra_fb_create(struct drm_device *drm, + struct drm_file *file, + struct drm_mode_fb_cmd2 *cmd) +{ + unsigned int hsub, vsub, i; + struct tegra_bo *planes[4]; + struct drm_gem_object *gem; + struct tegra_fb *fb; + int err; + + hsub = drm_format_horz_chroma_subsampling(cmd->pixel_format); + vsub = drm_format_vert_chroma_subsampling(cmd->pixel_format); + + for (i = 0; i < drm_format_num_planes(cmd->pixel_format); i++) { + unsigned int width = cmd->width / (i ? hsub : 1); + unsigned int height = cmd->height / (i ? vsub : 1); + unsigned int size, bpp; + + gem = drm_gem_object_lookup(drm, file, cmd->handles[i]); + if (!gem) { + err = -ENXIO; + goto unreference; + } + + bpp = drm_format_plane_cpp(cmd->pixel_format, i); + + size = (height - 1) * cmd->pitches[i] + + width * bpp + cmd->offsets[i]; + + if (gem->size < size) { + err = -EINVAL; + goto unreference; + } + + planes[i] = to_tegra_bo(gem); + } + + fb = tegra_fb_alloc(drm, cmd, planes, i); + if (IS_ERR(fb)) { + err = PTR_ERR(fb); + goto unreference; + } + + return &fb->base; + +unreference: + while (i--) + drm_gem_object_unreference_unlocked(&planes[i]->gem); + + return ERR_PTR(err); +} + +#ifdef CONFIG_DRM_TEGRA_FBDEV +static struct fb_ops tegra_fb_ops = { + .owner = THIS_MODULE, + .fb_fillrect = sys_fillrect, + .fb_copyarea = sys_copyarea, + .fb_imageblit = sys_imageblit, + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = drm_fb_helper_set_par, + .fb_blank = drm_fb_helper_blank, + .fb_pan_display = drm_fb_helper_pan_display, + .fb_setcmap = drm_fb_helper_setcmap, +}; + +static int tegra_fbdev_probe(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) +{ + struct tegra_fbdev *fbdev = to_tegra_fbdev(helper); + struct tegra_drm *tegra = helper->dev->dev_private; + struct drm_device *drm = helper->dev; + struct drm_mode_fb_cmd2 cmd = { 0 }; + unsigned int bytes_per_pixel; + struct drm_framebuffer *fb; + unsigned long offset; + struct fb_info *info; + struct tegra_bo *bo; + size_t size; + int err; + + bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8); + + cmd.width = sizes->surface_width; + cmd.height = sizes->surface_height; + cmd.pitches[0] = round_up(sizes->surface_width * bytes_per_pixel, + tegra->pitch_align); + cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, + sizes->surface_depth); + + size = cmd.pitches[0] * cmd.height; + + bo = tegra_bo_create(drm, size, 0); + if (IS_ERR(bo)) + return PTR_ERR(bo); + + info = framebuffer_alloc(0, drm->dev); + if (!info) { + dev_err(drm->dev, "failed to allocate framebuffer info\n"); + drm_gem_object_unreference_unlocked(&bo->gem); + return -ENOMEM; + } + + fbdev->fb = tegra_fb_alloc(drm, &cmd, &bo, 1); + if (IS_ERR(fbdev->fb)) { + err = PTR_ERR(fbdev->fb); + dev_err(drm->dev, "failed to allocate DRM framebuffer: %d\n", + err); + drm_gem_object_unreference_unlocked(&bo->gem); + goto release; + } + + fb = &fbdev->fb->base; + helper->fb = fb; + helper->fbdev = info; + + info->par = helper; + info->flags = FBINFO_FLAG_DEFAULT; + info->fbops = &tegra_fb_ops; + + err = fb_alloc_cmap(&info->cmap, 256, 0); + if (err < 0) { + dev_err(drm->dev, "failed to allocate color map: %d\n", err); + goto destroy; + } + + drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); + drm_fb_helper_fill_var(info, helper, fb->width, fb->height); + + offset = info->var.xoffset * bytes_per_pixel + + info->var.yoffset * fb->pitches[0]; + + if (bo->pages) { + bo->vaddr = vmap(bo->pages, bo->num_pages, VM_MAP, + pgprot_writecombine(PAGE_KERNEL)); + if (!bo->vaddr) { + dev_err(drm->dev, "failed to vmap() framebuffer\n"); + err = -ENOMEM; + goto destroy; + } + } + + drm->mode_config.fb_base = (resource_size_t)bo->paddr; + info->screen_base = (void __iomem *)bo->vaddr + offset; + info->screen_size = size; + info->fix.smem_start = (unsigned long)(bo->paddr + offset); + info->fix.smem_len = size; + + return 0; + +destroy: + drm_framebuffer_unregister_private(fb); + tegra_fb_destroy(fb); +release: + framebuffer_release(info); + return err; +} + +static const struct drm_fb_helper_funcs tegra_fb_helper_funcs = { + .fb_probe = tegra_fbdev_probe, +}; + +static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm) +{ + struct tegra_fbdev *fbdev; + + fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); + if (!fbdev) { + dev_err(drm->dev, "failed to allocate DRM fbdev\n"); + return ERR_PTR(-ENOMEM); + } + + drm_fb_helper_prepare(drm, &fbdev->base, &tegra_fb_helper_funcs); + + return fbdev; +} + +static void tegra_fbdev_free(struct tegra_fbdev *fbdev) +{ + kfree(fbdev); +} + +static int tegra_fbdev_init(struct tegra_fbdev *fbdev, + unsigned int preferred_bpp, + unsigned int num_crtc, + unsigned int max_connectors) +{ + struct drm_device *drm = fbdev->base.dev; + int err; + + err = drm_fb_helper_init(drm, &fbdev->base, num_crtc, max_connectors); + if (err < 0) { + dev_err(drm->dev, "failed to initialize DRM FB helper: %d\n", + err); + return err; + } + + err = drm_fb_helper_single_add_all_connectors(&fbdev->base); + if (err < 0) { + dev_err(drm->dev, "failed to add connectors: %d\n", err); + goto fini; + } + + err = drm_fb_helper_initial_config(&fbdev->base, preferred_bpp); + if (err < 0) { + dev_err(drm->dev, "failed to set initial configuration: %d\n", + err); + goto fini; + } + + return 0; + +fini: + drm_fb_helper_fini(&fbdev->base); + return err; +} + +static void tegra_fbdev_exit(struct tegra_fbdev *fbdev) +{ + struct fb_info *info = fbdev->base.fbdev; + + if (info) { + int err; + + err = unregister_framebuffer(info); + if (err < 0) + DRM_DEBUG_KMS("failed to unregister framebuffer\n"); + + if (info->cmap.len) + fb_dealloc_cmap(&info->cmap); + + framebuffer_release(info); + } + + if (fbdev->fb) { + drm_framebuffer_unregister_private(&fbdev->fb->base); + drm_framebuffer_remove(&fbdev->fb->base); + } + + drm_fb_helper_fini(&fbdev->base); + tegra_fbdev_free(fbdev); +} + +void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev) +{ + if (fbdev) + drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev->base); +} + +void tegra_fb_output_poll_changed(struct drm_device *drm) +{ + struct tegra_drm *tegra = drm->dev_private; + + if (tegra->fbdev) + drm_fb_helper_hotplug_event(&tegra->fbdev->base); +} +#endif + +int tegra_drm_fb_prepare(struct drm_device *drm) +{ +#ifdef CONFIG_DRM_TEGRA_FBDEV + struct tegra_drm *tegra = drm->dev_private; + + tegra->fbdev = tegra_fbdev_create(drm); + if (IS_ERR(tegra->fbdev)) + return PTR_ERR(tegra->fbdev); +#endif + + return 0; +} + +void tegra_drm_fb_free(struct drm_device *drm) +{ +#ifdef CONFIG_DRM_TEGRA_FBDEV + struct tegra_drm *tegra = drm->dev_private; + + tegra_fbdev_free(tegra->fbdev); +#endif +} + +int tegra_drm_fb_init(struct drm_device *drm) +{ +#ifdef CONFIG_DRM_TEGRA_FBDEV + struct tegra_drm *tegra = drm->dev_private; + int err; + + err = tegra_fbdev_init(tegra->fbdev, 32, drm->mode_config.num_crtc, + drm->mode_config.num_connector); + if (err < 0) + return err; +#endif + + return 0; +} + +void tegra_drm_fb_exit(struct drm_device *drm) +{ +#ifdef CONFIG_DRM_TEGRA_FBDEV + struct tegra_drm *tegra = drm->dev_private; + + tegra_fbdev_exit(tegra->fbdev); +#endif +} diff --git a/kernel/drivers/gpu/drm/tegra/gem.c b/kernel/drivers/gpu/drm/tegra/gem.c new file mode 100644 index 000000000..1217272a5 --- /dev/null +++ b/kernel/drivers/gpu/drm/tegra/gem.c @@ -0,0 +1,659 @@ +/* + * NVIDIA Tegra DRM GEM helper functions + * + * Copyright (C) 2012 Sascha Hauer, Pengutronix + * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved. + * + * Based on the GEM/CMA helpers + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/dma-buf.h> +#include <linux/iommu.h> +#include <drm/tegra_drm.h> + +#include "drm.h" +#include "gem.h" + +static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo) +{ + return container_of(bo, struct tegra_bo, base); +} + +static void tegra_bo_put(struct host1x_bo *bo) +{ + struct tegra_bo *obj = host1x_to_tegra_bo(bo); + struct drm_device *drm = obj->gem.dev; + + mutex_lock(&drm->struct_mutex); + drm_gem_object_unreference(&obj->gem); + mutex_unlock(&drm->struct_mutex); +} + +static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt) +{ + struct tegra_bo *obj = host1x_to_tegra_bo(bo); + + return obj->paddr; +} + +static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt) +{ +} + +static void *tegra_bo_mmap(struct host1x_bo *bo) +{ + struct tegra_bo *obj = host1x_to_tegra_bo(bo); + + return obj->vaddr; +} + +static void tegra_bo_munmap(struct host1x_bo *bo, void *addr) +{ +} + +static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page) +{ + struct tegra_bo *obj = host1x_to_tegra_bo(bo); + + return obj->vaddr + page * PAGE_SIZE; +} + +static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page, + void *addr) +{ +} + +static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo) +{ + struct tegra_bo *obj = host1x_to_tegra_bo(bo); + struct drm_device *drm = obj->gem.dev; + + mutex_lock(&drm->struct_mutex); + drm_gem_object_reference(&obj->gem); + mutex_unlock(&drm->struct_mutex); + + return bo; +} + +static const struct host1x_bo_ops tegra_bo_ops = { + .get = tegra_bo_get, + .put = tegra_bo_put, + .pin = tegra_bo_pin, + .unpin = tegra_bo_unpin, + .mmap = tegra_bo_mmap, + .munmap = tegra_bo_munmap, + .kmap = tegra_bo_kmap, + .kunmap = tegra_bo_kunmap, +}; + +static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo) +{ + int prot = IOMMU_READ | IOMMU_WRITE; + ssize_t err; + + if (bo->mm) + return -EBUSY; + + bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL); + if (!bo->mm) + return -ENOMEM; + + err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size, + PAGE_SIZE, 0, 0, 0); + if (err < 0) { + dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n", + err); + goto free; + } + + bo->paddr = bo->mm->start; + + err = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl, + bo->sgt->nents, prot); + if (err < 0) { + dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err); + goto remove; + } + + bo->size = err; + + return 0; + +remove: + drm_mm_remove_node(bo->mm); +free: + kfree(bo->mm); + return err; +} + +static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo) +{ + if (!bo->mm) + return 0; + + iommu_unmap(tegra->domain, bo->paddr, bo->size); + drm_mm_remove_node(bo->mm); + kfree(bo->mm); + + return 0; +} + +static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm, + size_t size) +{ + struct tegra_bo *bo; + int err; + + bo = kzalloc(sizeof(*bo), GFP_KERNEL); + if (!bo) + return ERR_PTR(-ENOMEM); + + host1x_bo_init(&bo->base, &tegra_bo_ops); + size = round_up(size, PAGE_SIZE); + + err = drm_gem_object_init(drm, &bo->gem, size); + if (err < 0) + goto free; + + err = drm_gem_create_mmap_offset(&bo->gem); + if (err < 0) + goto release; + + return bo; + +release: + drm_gem_object_release(&bo->gem); +free: + kfree(bo); + return ERR_PTR(err); +} + +static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo) +{ + if (bo->pages) { + drm_gem_put_pages(&bo->gem, bo->pages, true, true); + sg_free_table(bo->sgt); + kfree(bo->sgt); + } else if (bo->vaddr) { + dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, + bo->paddr); + } +} + +static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo) +{ + struct scatterlist *s; + struct sg_table *sgt; + unsigned int i; + + bo->pages = drm_gem_get_pages(&bo->gem); + if (IS_ERR(bo->pages)) + return PTR_ERR(bo->pages); + + bo->num_pages = bo->gem.size >> PAGE_SHIFT; + + sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); + if (IS_ERR(sgt)) + goto put_pages; + + /* + * Fake up the SG table so that dma_map_sg() can be used to flush the + * pages associated with it. Note that this relies on the fact that + * the DMA API doesn't hook into IOMMU on Tegra, therefore mapping is + * only cache maintenance. + * + * TODO: Replace this by drm_clflash_sg() once it can be implemented + * without relying on symbols that are not exported. + */ + for_each_sg(sgt->sgl, s, sgt->nents, i) + sg_dma_address(s) = sg_phys(s); + + if (dma_map_sg(drm->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE) == 0) + goto release_sgt; + + bo->sgt = sgt; + + return 0; + +release_sgt: + sg_free_table(sgt); + kfree(sgt); + sgt = ERR_PTR(-ENOMEM); +put_pages: + drm_gem_put_pages(&bo->gem, bo->pages, false, false); + return PTR_ERR(sgt); +} + +static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo) +{ + struct tegra_drm *tegra = drm->dev_private; + int err; + + if (tegra->domain) { + err = tegra_bo_get_pages(drm, bo); + if (err < 0) + return err; + + err = tegra_bo_iommu_map(tegra, bo); + if (err < 0) { + tegra_bo_free(drm, bo); + return err; + } + } else { + size_t size = bo->gem.size; + + bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr, + GFP_KERNEL | __GFP_NOWARN); + if (!bo->vaddr) { + dev_err(drm->dev, + "failed to allocate buffer of size %zu\n", + size); + return -ENOMEM; + } + } + + return 0; +} + +struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size, + unsigned long flags) +{ + struct tegra_bo *bo; + int err; + + bo = tegra_bo_alloc_object(drm, size); + if (IS_ERR(bo)) + return bo; + + err = tegra_bo_alloc(drm, bo); + if (err < 0) + goto release; + + if (flags & DRM_TEGRA_GEM_CREATE_TILED) + bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED; + + if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP) + bo->flags |= TEGRA_BO_BOTTOM_UP; + + return bo; + +release: + drm_gem_object_release(&bo->gem); + kfree(bo); + return ERR_PTR(err); +} + +struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file, + struct drm_device *drm, + size_t size, + unsigned long flags, + u32 *handle) +{ + struct tegra_bo *bo; + int err; + + bo = tegra_bo_create(drm, size, flags); + if (IS_ERR(bo)) + return bo; + + err = drm_gem_handle_create(file, &bo->gem, handle); + if (err) { + tegra_bo_free_object(&bo->gem); + return ERR_PTR(err); + } + + drm_gem_object_unreference_unlocked(&bo->gem); + + return bo; +} + +static struct tegra_bo *tegra_bo_import(struct drm_device *drm, + struct dma_buf *buf) +{ + struct tegra_drm *tegra = drm->dev_private; + struct dma_buf_attachment *attach; + struct tegra_bo *bo; + int err; + + bo = tegra_bo_alloc_object(drm, buf->size); + if (IS_ERR(bo)) + return bo; + + attach = dma_buf_attach(buf, drm->dev); + if (IS_ERR(attach)) { + err = PTR_ERR(attach); + goto free; + } + + get_dma_buf(buf); + + bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE); + if (!bo->sgt) { + err = -ENOMEM; + goto detach; + } + + if (IS_ERR(bo->sgt)) { + err = PTR_ERR(bo->sgt); + goto detach; + } + + if (tegra->domain) { + err = tegra_bo_iommu_map(tegra, bo); + if (err < 0) + goto detach; + } else { + if (bo->sgt->nents > 1) { + err = -EINVAL; + goto detach; + } + + bo->paddr = sg_dma_address(bo->sgt->sgl); + } + + bo->gem.import_attach = attach; + + return bo; + +detach: + if (!IS_ERR_OR_NULL(bo->sgt)) + dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE); + + dma_buf_detach(buf, attach); + dma_buf_put(buf); +free: + drm_gem_object_release(&bo->gem); + kfree(bo); + return ERR_PTR(err); +} + +void tegra_bo_free_object(struct drm_gem_object *gem) +{ + struct tegra_drm *tegra = gem->dev->dev_private; + struct tegra_bo *bo = to_tegra_bo(gem); + + if (tegra->domain) + tegra_bo_iommu_unmap(tegra, bo); + + if (gem->import_attach) { + dma_buf_unmap_attachment(gem->import_attach, bo->sgt, + DMA_TO_DEVICE); + drm_prime_gem_destroy(gem, NULL); + } else { + tegra_bo_free(gem->dev, bo); + } + + drm_gem_object_release(gem); + kfree(bo); +} + +int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, + struct drm_mode_create_dumb *args) +{ + unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); + struct tegra_drm *tegra = drm->dev_private; + struct tegra_bo *bo; + + args->pitch = round_up(min_pitch, tegra->pitch_align); + args->size = args->pitch * args->height; + + bo = tegra_bo_create_with_handle(file, drm, args->size, 0, + &args->handle); + if (IS_ERR(bo)) + return PTR_ERR(bo); + + return 0; +} + +int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm, + u32 handle, u64 *offset) +{ + struct drm_gem_object *gem; + struct tegra_bo *bo; + + mutex_lock(&drm->struct_mutex); + + gem = drm_gem_object_lookup(drm, file, handle); + if (!gem) { + dev_err(drm->dev, "failed to lookup GEM object\n"); + mutex_unlock(&drm->struct_mutex); + return -EINVAL; + } + + bo = to_tegra_bo(gem); + + *offset = drm_vma_node_offset_addr(&bo->gem.vma_node); + + drm_gem_object_unreference(gem); + + mutex_unlock(&drm->struct_mutex); + + return 0; +} + +static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct drm_gem_object *gem = vma->vm_private_data; + struct tegra_bo *bo = to_tegra_bo(gem); + struct page *page; + pgoff_t offset; + int err; + + if (!bo->pages) + return VM_FAULT_SIGBUS; + + offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT; + page = bo->pages[offset]; + + err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page); + switch (err) { + case -EAGAIN: + case 0: + case -ERESTARTSYS: + case -EINTR: + case -EBUSY: + return VM_FAULT_NOPAGE; + + case -ENOMEM: + return VM_FAULT_OOM; + } + + return VM_FAULT_SIGBUS; +} + +const struct vm_operations_struct tegra_bo_vm_ops = { + .fault = tegra_bo_fault, + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct drm_gem_object *gem; + struct tegra_bo *bo; + int ret; + + ret = drm_gem_mmap(file, vma); + if (ret) + return ret; + + gem = vma->vm_private_data; + bo = to_tegra_bo(gem); + + if (!bo->pages) { + unsigned long vm_pgoff = vma->vm_pgoff; + + vma->vm_flags &= ~VM_PFNMAP; + vma->vm_pgoff = 0; + + ret = dma_mmap_writecombine(gem->dev->dev, vma, bo->vaddr, + bo->paddr, gem->size); + if (ret) { + drm_gem_vm_close(vma); + return ret; + } + + vma->vm_pgoff = vm_pgoff; + } else { + pgprot_t prot = vm_get_page_prot(vma->vm_flags); + + vma->vm_flags |= VM_MIXEDMAP; + vma->vm_flags &= ~VM_PFNMAP; + + vma->vm_page_prot = pgprot_writecombine(prot); + } + + return 0; +} + +static struct sg_table * +tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, + enum dma_data_direction dir) +{ + struct drm_gem_object *gem = attach->dmabuf->priv; + struct tegra_bo *bo = to_tegra_bo(gem); + struct sg_table *sgt; + + sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return NULL; + + if (bo->pages) { + struct scatterlist *sg; + unsigned int i; + + if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL)) + goto free; + + for_each_sg(sgt->sgl, sg, bo->num_pages, i) + sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0); + + if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) + goto free; + } else { + if (sg_alloc_table(sgt, 1, GFP_KERNEL)) + goto free; + + sg_dma_address(sgt->sgl) = bo->paddr; + sg_dma_len(sgt->sgl) = gem->size; + } + + return sgt; + +free: + sg_free_table(sgt); + kfree(sgt); + return NULL; +} + +static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, + struct sg_table *sgt, + enum dma_data_direction dir) +{ + struct drm_gem_object *gem = attach->dmabuf->priv; + struct tegra_bo *bo = to_tegra_bo(gem); + + if (bo->pages) + dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); + + sg_free_table(sgt); + kfree(sgt); +} + +static void tegra_gem_prime_release(struct dma_buf *buf) +{ + drm_gem_dmabuf_release(buf); +} + +static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf, + unsigned long page) +{ + return NULL; +} + +static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf, + unsigned long page, + void *addr) +{ +} + +static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page) +{ + return NULL; +} + +static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page, + void *addr) +{ +} + +static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma) +{ + return -EINVAL; +} + +static void *tegra_gem_prime_vmap(struct dma_buf *buf) +{ + struct drm_gem_object *gem = buf->priv; + struct tegra_bo *bo = to_tegra_bo(gem); + + return bo->vaddr; +} + +static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr) +{ +} + +static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = { + .map_dma_buf = tegra_gem_prime_map_dma_buf, + .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf, + .release = tegra_gem_prime_release, + .kmap_atomic = tegra_gem_prime_kmap_atomic, + .kunmap_atomic = tegra_gem_prime_kunmap_atomic, + .kmap = tegra_gem_prime_kmap, + .kunmap = tegra_gem_prime_kunmap, + .mmap = tegra_gem_prime_mmap, + .vmap = tegra_gem_prime_vmap, + .vunmap = tegra_gem_prime_vunmap, +}; + +struct dma_buf *tegra_gem_prime_export(struct drm_device *drm, + struct drm_gem_object *gem, + int flags) +{ + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + + exp_info.ops = &tegra_gem_prime_dmabuf_ops; + exp_info.size = gem->size; + exp_info.flags = flags; + exp_info.priv = gem; + + return dma_buf_export(&exp_info); +} + +struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm, + struct dma_buf *buf) +{ + struct tegra_bo *bo; + + if (buf->ops == &tegra_gem_prime_dmabuf_ops) { + struct drm_gem_object *gem = buf->priv; + + if (gem->dev == drm) { + drm_gem_object_reference(gem); + return gem; + } + } + + bo = tegra_bo_import(drm, buf); + if (IS_ERR(bo)) + return ERR_CAST(bo); + + return &bo->gem; +} diff --git a/kernel/drivers/gpu/drm/tegra/gem.h b/kernel/drivers/gpu/drm/tegra/gem.h new file mode 100644 index 000000000..6c5f12ac0 --- /dev/null +++ b/kernel/drivers/gpu/drm/tegra/gem.h @@ -0,0 +1,78 @@ +/* + * Tegra host1x GEM implementation + * + * Copyright (c) 2012-2013, NVIDIA Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __HOST1X_GEM_H +#define __HOST1X_GEM_H + +#include <linux/host1x.h> + +#include <drm/drm.h> +#include <drm/drmP.h> +#include <drm/drm_gem.h> + +#define TEGRA_BO_BOTTOM_UP (1 << 0) + +enum tegra_bo_tiling_mode { + TEGRA_BO_TILING_MODE_PITCH, + TEGRA_BO_TILING_MODE_TILED, + TEGRA_BO_TILING_MODE_BLOCK, +}; + +struct tegra_bo_tiling { + enum tegra_bo_tiling_mode mode; + unsigned long value; +}; + +struct tegra_bo { + struct drm_gem_object gem; + struct host1x_bo base; + unsigned long flags; + struct sg_table *sgt; + dma_addr_t paddr; + void *vaddr; + + struct drm_mm_node *mm; + unsigned long num_pages; + struct page **pages; + /* size of IOMMU mapping */ + size_t size; + + struct tegra_bo_tiling tiling; +}; + +static inline struct tegra_bo *to_tegra_bo(struct drm_gem_object *gem) +{ + return container_of(gem, struct tegra_bo, gem); +} + +struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size, + unsigned long flags); +struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file, + struct drm_device *drm, + size_t size, + unsigned long flags, + u32 *handle); +void tegra_bo_free_object(struct drm_gem_object *gem); +int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, + struct drm_mode_create_dumb *args); +int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm, + u32 handle, u64 *offset); + +int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma); + +extern const struct vm_operations_struct tegra_bo_vm_ops; + +struct dma_buf *tegra_gem_prime_export(struct drm_device *drm, + struct drm_gem_object *gem, + int flags); +struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm, + struct dma_buf *buf); + +#endif diff --git a/kernel/drivers/gpu/drm/tegra/gr2d.c b/kernel/drivers/gpu/drm/tegra/gr2d.c new file mode 100644 index 000000000..02cd3e37a --- /dev/null +++ b/kernel/drivers/gpu/drm/tegra/gr2d.c @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2012-2013, NVIDIA Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/clk.h> + +#include "drm.h" +#include "gem.h" +#include "gr2d.h" + +struct gr2d { + struct tegra_drm_client client; + struct host1x_channel *channel; + struct clk *clk; + + DECLARE_BITMAP(addr_regs, GR2D_NUM_REGS); +}; + +static inline struct gr2d *to_gr2d(struct tegra_drm_client *client) +{ + return container_of(client, struct gr2d, client); +} + +static int gr2d_init(struct host1x_client *client) +{ + struct tegra_drm_client *drm = host1x_to_drm_client(client); + struct drm_device *dev = dev_get_drvdata(client->parent); + unsigned long flags = HOST1X_SYNCPT_HAS_BASE; + struct gr2d *gr2d = to_gr2d(drm); + + gr2d->channel = host1x_channel_request(client->dev); + if (!gr2d->channel) + return -ENOMEM; + + client->syncpts[0] = host1x_syncpt_request(client->dev, flags); + if (!client->syncpts[0]) { + host1x_channel_free(gr2d->channel); + return -ENOMEM; + } + + return tegra_drm_register_client(dev->dev_private, drm); +} + +static int gr2d_exit(struct host1x_client *client) +{ + struct tegra_drm_client *drm = host1x_to_drm_client(client); + struct drm_device *dev = dev_get_drvdata(client->parent); + struct gr2d *gr2d = to_gr2d(drm); + int err; + + err = tegra_drm_unregister_client(dev->dev_private, drm); + if (err < 0) + return err; + + host1x_syncpt_free(client->syncpts[0]); + host1x_channel_free(gr2d->channel); + + return 0; +} + +static const struct host1x_client_ops gr2d_client_ops = { + .init = gr2d_init, + .exit = gr2d_exit, +}; + +static int gr2d_open_channel(struct tegra_drm_client *client, + struct tegra_drm_context *context) +{ + struct gr2d *gr2d = to_gr2d(client); + + context->channel = host1x_channel_get(gr2d->channel); + if (!context->channel) + return -ENOMEM; + + return 0; +} + +static void gr2d_close_channel(struct tegra_drm_context *context) +{ + host1x_channel_put(context->channel); +} + +static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 offset) +{ + struct gr2d *gr2d = dev_get_drvdata(dev); + + switch (class) { + case HOST1X_CLASS_HOST1X: + if (offset == 0x2b) + return 1; + + break; + + case HOST1X_CLASS_GR2D: + case HOST1X_CLASS_GR2D_SB: + if (offset >= GR2D_NUM_REGS) + break; + + if (test_bit(offset, gr2d->addr_regs)) + return 1; + + break; + } + + return 0; +} + +static const struct tegra_drm_client_ops gr2d_ops = { + .open_channel = gr2d_open_channel, + .close_channel = gr2d_close_channel, + .is_addr_reg = gr2d_is_addr_reg, + .submit = tegra_drm_submit, +}; + +static const struct of_device_id gr2d_match[] = { + { .compatible = "nvidia,tegra30-gr2d" }, + { .compatible = "nvidia,tegra20-gr2d" }, + { }, +}; +MODULE_DEVICE_TABLE(of, gr2d_match); + +static const u32 gr2d_addr_regs[] = { + GR2D_UA_BASE_ADDR, + GR2D_VA_BASE_ADDR, + GR2D_PAT_BASE_ADDR, + GR2D_DSTA_BASE_ADDR, + GR2D_DSTB_BASE_ADDR, + GR2D_DSTC_BASE_ADDR, + GR2D_SRCA_BASE_ADDR, + GR2D_SRCB_BASE_ADDR, + GR2D_SRC_BASE_ADDR_SB, + GR2D_DSTA_BASE_ADDR_SB, + GR2D_DSTB_BASE_ADDR_SB, + GR2D_UA_BASE_ADDR_SB, + GR2D_VA_BASE_ADDR_SB, +}; + +static int gr2d_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct host1x_syncpt **syncpts; + struct gr2d *gr2d; + unsigned int i; + int err; + + gr2d = devm_kzalloc(dev, sizeof(*gr2d), GFP_KERNEL); + if (!gr2d) + return -ENOMEM; + + syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL); + if (!syncpts) + return -ENOMEM; + + gr2d->clk = devm_clk_get(dev, NULL); + if (IS_ERR(gr2d->clk)) { + dev_err(dev, "cannot get clock\n"); + return PTR_ERR(gr2d->clk); + } + + err = clk_prepare_enable(gr2d->clk); + if (err) { + dev_err(dev, "cannot turn on clock\n"); + return err; + } + + INIT_LIST_HEAD(&gr2d->client.base.list); + gr2d->client.base.ops = &gr2d_client_ops; + gr2d->client.base.dev = dev; + gr2d->client.base.class = HOST1X_CLASS_GR2D; + gr2d->client.base.syncpts = syncpts; + gr2d->client.base.num_syncpts = 1; + + INIT_LIST_HEAD(&gr2d->client.list); + gr2d->client.ops = &gr2d_ops; + + err = host1x_client_register(&gr2d->client.base); + if (err < 0) { + dev_err(dev, "failed to register host1x client: %d\n", err); + clk_disable_unprepare(gr2d->clk); + return err; + } + + /* initialize address register map */ + for (i = 0; i < ARRAY_SIZE(gr2d_addr_regs); i++) + set_bit(gr2d_addr_regs[i], gr2d->addr_regs); + + platform_set_drvdata(pdev, gr2d); + + return 0; +} + +static int gr2d_remove(struct platform_device *pdev) +{ + struct gr2d *gr2d = platform_get_drvdata(pdev); + int err; + + err = host1x_client_unregister(&gr2d->client.base); + if (err < 0) { + dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", + err); + return err; + } + + clk_disable_unprepare(gr2d->clk); + + return 0; +} + +struct platform_driver tegra_gr2d_driver = { + .driver = { + .name = "tegra-gr2d", + .of_match_table = gr2d_match, + }, + .probe = gr2d_probe, + .remove = gr2d_remove, +}; diff --git a/kernel/drivers/gpu/drm/tegra/gr2d.h b/kernel/drivers/gpu/drm/tegra/gr2d.h new file mode 100644 index 000000000..4d7304fb0 --- /dev/null +++ b/kernel/drivers/gpu/drm/tegra/gr2d.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2013 NVIDIA Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef TEGRA_GR2D_H +#define TEGRA_GR2D_H + +#define GR2D_UA_BASE_ADDR 0x1a +#define GR2D_VA_BASE_ADDR 0x1b +#define GR2D_PAT_BASE_ADDR 0x26 +#define GR2D_DSTA_BASE_ADDR 0x2b +#define GR2D_DSTB_BASE_ADDR 0x2c +#define GR2D_DSTC_BASE_ADDR 0x2d +#define GR2D_SRCA_BASE_ADDR 0x31 +#define GR2D_SRCB_BASE_ADDR 0x32 +#define GR2D_SRC_BASE_ADDR_SB 0x48 +#define GR2D_DSTA_BASE_ADDR_SB 0x49 +#define GR2D_DSTB_BASE_ADDR_SB 0x4a +#define GR2D_UA_BASE_ADDR_SB 0x4b +#define GR2D_VA_BASE_ADDR_SB 0x4c + +#define GR2D_NUM_REGS 0x4d + +#endif diff --git a/kernel/drivers/gpu/drm/tegra/gr3d.c b/kernel/drivers/gpu/drm/tegra/gr3d.c new file mode 100644 index 000000000..0b3f2b977 --- /dev/null +++ b/kernel/drivers/gpu/drm/tegra/gr3d.c @@ -0,0 +1,358 @@ +/* + * Copyright (C) 2013 Avionic Design GmbH + * Copyright (C) 2013 NVIDIA Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/clk.h> +#include <linux/host1x.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/reset.h> + +#include <soc/tegra/pmc.h> + +#include "drm.h" +#include "gem.h" +#include "gr3d.h" + +struct gr3d { + struct tegra_drm_client client; + struct host1x_channel *channel; + struct clk *clk_secondary; + struct clk *clk; + struct reset_control *rst_secondary; + struct reset_control *rst; + + DECLARE_BITMAP(addr_regs, GR3D_NUM_REGS); +}; + +static inline struct gr3d *to_gr3d(struct tegra_drm_client *client) +{ + return container_of(client, struct gr3d, client); +} + +static int gr3d_init(struct host1x_client *client) +{ + struct tegra_drm_client *drm = host1x_to_drm_client(client); + struct drm_device *dev = dev_get_drvdata(client->parent); + unsigned long flags = HOST1X_SYNCPT_HAS_BASE; + struct gr3d *gr3d = to_gr3d(drm); + + gr3d->channel = host1x_channel_request(client->dev); + if (!gr3d->channel) + return -ENOMEM; + + client->syncpts[0] = host1x_syncpt_request(client->dev, flags); + if (!client->syncpts[0]) { + host1x_channel_free(gr3d->channel); + return -ENOMEM; + } + + return tegra_drm_register_client(dev->dev_private, drm); +} + +static int gr3d_exit(struct host1x_client *client) +{ + struct tegra_drm_client *drm = host1x_to_drm_client(client); + struct drm_device *dev = dev_get_drvdata(client->parent); + struct gr3d *gr3d = to_gr3d(drm); + int err; + + err = tegra_drm_unregister_client(dev->dev_private, drm); + if (err < 0) + return err; + + host1x_syncpt_free(client->syncpts[0]); + host1x_channel_free(gr3d->channel); + + return 0; +} + +static const struct host1x_client_ops gr3d_client_ops = { + .init = gr3d_init, + .exit = gr3d_exit, +}; + +static int gr3d_open_channel(struct tegra_drm_client *client, + struct tegra_drm_context *context) +{ + struct gr3d *gr3d = to_gr3d(client); + + context->channel = host1x_channel_get(gr3d->channel); + if (!context->channel) + return -ENOMEM; + + return 0; +} + +static void gr3d_close_channel(struct tegra_drm_context *context) +{ + host1x_channel_put(context->channel); +} + +static int gr3d_is_addr_reg(struct device *dev, u32 class, u32 offset) +{ + struct gr3d *gr3d = dev_get_drvdata(dev); + + switch (class) { + case HOST1X_CLASS_HOST1X: + if (offset == 0x2b) + return 1; + + break; + + case HOST1X_CLASS_GR3D: + if (offset >= GR3D_NUM_REGS) + break; + + if (test_bit(offset, gr3d->addr_regs)) + return 1; + + break; + } + + return 0; +} + +static const struct tegra_drm_client_ops gr3d_ops = { + .open_channel = gr3d_open_channel, + .close_channel = gr3d_close_channel, + .is_addr_reg = gr3d_is_addr_reg, + .submit = tegra_drm_submit, +}; + +static const struct of_device_id tegra_gr3d_match[] = { + { .compatible = "nvidia,tegra114-gr3d" }, + { .compatible = "nvidia,tegra30-gr3d" }, + { .compatible = "nvidia,tegra20-gr3d" }, + { } +}; +MODULE_DEVICE_TABLE(of, tegra_gr3d_match); + +static const u32 gr3d_addr_regs[] = { + GR3D_IDX_ATTRIBUTE( 0), + GR3D_IDX_ATTRIBUTE( 1), + GR3D_IDX_ATTRIBUTE( 2), + GR3D_IDX_ATTRIBUTE( 3), + GR3D_IDX_ATTRIBUTE( 4), + GR3D_IDX_ATTRIBUTE( 5), + GR3D_IDX_ATTRIBUTE( 6), + GR3D_IDX_ATTRIBUTE( 7), + GR3D_IDX_ATTRIBUTE( 8), + GR3D_IDX_ATTRIBUTE( 9), + GR3D_IDX_ATTRIBUTE(10), + GR3D_IDX_ATTRIBUTE(11), + GR3D_IDX_ATTRIBUTE(12), + GR3D_IDX_ATTRIBUTE(13), + GR3D_IDX_ATTRIBUTE(14), + GR3D_IDX_ATTRIBUTE(15), + GR3D_IDX_INDEX_BASE, + GR3D_QR_ZTAG_ADDR, + GR3D_QR_CTAG_ADDR, + GR3D_QR_CZ_ADDR, + GR3D_TEX_TEX_ADDR( 0), + GR3D_TEX_TEX_ADDR( 1), + GR3D_TEX_TEX_ADDR( 2), + GR3D_TEX_TEX_ADDR( 3), + GR3D_TEX_TEX_ADDR( 4), + GR3D_TEX_TEX_ADDR( 5), + GR3D_TEX_TEX_ADDR( 6), + GR3D_TEX_TEX_ADDR( 7), + GR3D_TEX_TEX_ADDR( 8), + GR3D_TEX_TEX_ADDR( 9), + GR3D_TEX_TEX_ADDR(10), + GR3D_TEX_TEX_ADDR(11), + GR3D_TEX_TEX_ADDR(12), + GR3D_TEX_TEX_ADDR(13), + GR3D_TEX_TEX_ADDR(14), + GR3D_TEX_TEX_ADDR(15), + GR3D_DW_MEMORY_OUTPUT_ADDRESS, + GR3D_GLOBAL_SURFADDR( 0), + GR3D_GLOBAL_SURFADDR( 1), + GR3D_GLOBAL_SURFADDR( 2), + GR3D_GLOBAL_SURFADDR( 3), + GR3D_GLOBAL_SURFADDR( 4), + GR3D_GLOBAL_SURFADDR( 5), + GR3D_GLOBAL_SURFADDR( 6), + GR3D_GLOBAL_SURFADDR( 7), + GR3D_GLOBAL_SURFADDR( 8), + GR3D_GLOBAL_SURFADDR( 9), + GR3D_GLOBAL_SURFADDR(10), + GR3D_GLOBAL_SURFADDR(11), + GR3D_GLOBAL_SURFADDR(12), + GR3D_GLOBAL_SURFADDR(13), + GR3D_GLOBAL_SURFADDR(14), + GR3D_GLOBAL_SURFADDR(15), + GR3D_GLOBAL_SPILLSURFADDR, + GR3D_GLOBAL_SURFOVERADDR( 0), + GR3D_GLOBAL_SURFOVERADDR( 1), + GR3D_GLOBAL_SURFOVERADDR( 2), + GR3D_GLOBAL_SURFOVERADDR( 3), + GR3D_GLOBAL_SURFOVERADDR( 4), + GR3D_GLOBAL_SURFOVERADDR( 5), + GR3D_GLOBAL_SURFOVERADDR( 6), + GR3D_GLOBAL_SURFOVERADDR( 7), + GR3D_GLOBAL_SURFOVERADDR( 8), + GR3D_GLOBAL_SURFOVERADDR( 9), + GR3D_GLOBAL_SURFOVERADDR(10), + GR3D_GLOBAL_SURFOVERADDR(11), + GR3D_GLOBAL_SURFOVERADDR(12), + GR3D_GLOBAL_SURFOVERADDR(13), + GR3D_GLOBAL_SURFOVERADDR(14), + GR3D_GLOBAL_SURFOVERADDR(15), + GR3D_GLOBAL_SAMP01SURFADDR( 0), + GR3D_GLOBAL_SAMP01SURFADDR( 1), + GR3D_GLOBAL_SAMP01SURFADDR( 2), + GR3D_GLOBAL_SAMP01SURFADDR( 3), + GR3D_GLOBAL_SAMP01SURFADDR( 4), + GR3D_GLOBAL_SAMP01SURFADDR( 5), + GR3D_GLOBAL_SAMP01SURFADDR( 6), + GR3D_GLOBAL_SAMP01SURFADDR( 7), + GR3D_GLOBAL_SAMP01SURFADDR( 8), + GR3D_GLOBAL_SAMP01SURFADDR( 9), + GR3D_GLOBAL_SAMP01SURFADDR(10), + GR3D_GLOBAL_SAMP01SURFADDR(11), + GR3D_GLOBAL_SAMP01SURFADDR(12), + GR3D_GLOBAL_SAMP01SURFADDR(13), + GR3D_GLOBAL_SAMP01SURFADDR(14), + GR3D_GLOBAL_SAMP01SURFADDR(15), + GR3D_GLOBAL_SAMP23SURFADDR( 0), + GR3D_GLOBAL_SAMP23SURFADDR( 1), + GR3D_GLOBAL_SAMP23SURFADDR( 2), + GR3D_GLOBAL_SAMP23SURFADDR( 3), + GR3D_GLOBAL_SAMP23SURFADDR( 4), + GR3D_GLOBAL_SAMP23SURFADDR( 5), + GR3D_GLOBAL_SAMP23SURFADDR( 6), + GR3D_GLOBAL_SAMP23SURFADDR( 7), + GR3D_GLOBAL_SAMP23SURFADDR( 8), + GR3D_GLOBAL_SAMP23SURFADDR( 9), + GR3D_GLOBAL_SAMP23SURFADDR(10), + GR3D_GLOBAL_SAMP23SURFADDR(11), + GR3D_GLOBAL_SAMP23SURFADDR(12), + GR3D_GLOBAL_SAMP23SURFADDR(13), + GR3D_GLOBAL_SAMP23SURFADDR(14), + GR3D_GLOBAL_SAMP23SURFADDR(15), +}; + +static int gr3d_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct host1x_syncpt **syncpts; + struct gr3d *gr3d; + unsigned int i; + int err; + + gr3d = devm_kzalloc(&pdev->dev, sizeof(*gr3d), GFP_KERNEL); + if (!gr3d) + return -ENOMEM; + + syncpts = devm_kzalloc(&pdev->dev, sizeof(*syncpts), GFP_KERNEL); + if (!syncpts) + return -ENOMEM; + + gr3d->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(gr3d->clk)) { + dev_err(&pdev->dev, "cannot get clock\n"); + return PTR_ERR(gr3d->clk); + } + + gr3d->rst = devm_reset_control_get(&pdev->dev, "3d"); + if (IS_ERR(gr3d->rst)) { + dev_err(&pdev->dev, "cannot get reset\n"); + return PTR_ERR(gr3d->rst); + } + + if (of_device_is_compatible(np, "nvidia,tegra30-gr3d")) { + gr3d->clk_secondary = devm_clk_get(&pdev->dev, "3d2"); + if (IS_ERR(gr3d->clk)) { + dev_err(&pdev->dev, "cannot get secondary clock\n"); + return PTR_ERR(gr3d->clk); + } + + gr3d->rst_secondary = devm_reset_control_get(&pdev->dev, + "3d2"); + if (IS_ERR(gr3d->rst_secondary)) { + dev_err(&pdev->dev, "cannot get secondary reset\n"); + return PTR_ERR(gr3d->rst_secondary); + } + } + + err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_3D, gr3d->clk, + gr3d->rst); + if (err < 0) { + dev_err(&pdev->dev, "failed to power up 3D unit\n"); + return err; + } + + if (gr3d->clk_secondary) { + err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_3D1, + gr3d->clk_secondary, + gr3d->rst_secondary); + if (err < 0) { + dev_err(&pdev->dev, + "failed to power up secondary 3D unit\n"); + return err; + } + } + + INIT_LIST_HEAD(&gr3d->client.base.list); + gr3d->client.base.ops = &gr3d_client_ops; + gr3d->client.base.dev = &pdev->dev; + gr3d->client.base.class = HOST1X_CLASS_GR3D; + gr3d->client.base.syncpts = syncpts; + gr3d->client.base.num_syncpts = 1; + + INIT_LIST_HEAD(&gr3d->client.list); + gr3d->client.ops = &gr3d_ops; + + err = host1x_client_register(&gr3d->client.base); + if (err < 0) { + dev_err(&pdev->dev, "failed to register host1x client: %d\n", + err); + return err; + } + + /* initialize address register map */ + for (i = 0; i < ARRAY_SIZE(gr3d_addr_regs); i++) + set_bit(gr3d_addr_regs[i], gr3d->addr_regs); + + platform_set_drvdata(pdev, gr3d); + + return 0; +} + +static int gr3d_remove(struct platform_device *pdev) +{ + struct gr3d *gr3d = platform_get_drvdata(pdev); + int err; + + err = host1x_client_unregister(&gr3d->client.base); + if (err < 0) { + dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", + err); + return err; + } + + if (gr3d->clk_secondary) { + tegra_powergate_power_off(TEGRA_POWERGATE_3D1); + clk_disable_unprepare(gr3d->clk_secondary); + } + + tegra_powergate_power_off(TEGRA_POWERGATE_3D); + clk_disable_unprepare(gr3d->clk); + + return 0; +} + +struct platform_driver tegra_gr3d_driver = { + .driver = { + .name = "tegra-gr3d", + .of_match_table = tegra_gr3d_match, + }, + .probe = gr3d_probe, + .remove = gr3d_remove, +}; diff --git a/kernel/drivers/gpu/drm/tegra/gr3d.h b/kernel/drivers/gpu/drm/tegra/gr3d.h new file mode 100644 index 000000000..0c30a1351 --- /dev/null +++ b/kernel/drivers/gpu/drm/tegra/gr3d.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2013 NVIDIA Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef TEGRA_GR3D_H +#define TEGRA_GR3D_H + +#define GR3D_IDX_ATTRIBUTE(x) (0x100 + (x) * 2) +#define GR3D_IDX_INDEX_BASE 0x121 +#define GR3D_QR_ZTAG_ADDR 0x415 +#define GR3D_QR_CTAG_ADDR 0x417 +#define GR3D_QR_CZ_ADDR 0x419 +#define GR3D_TEX_TEX_ADDR(x) (0x710 + (x)) +#define GR3D_DW_MEMORY_OUTPUT_ADDRESS 0x904 +#define GR3D_GLOBAL_SURFADDR(x) (0xe00 + (x)) +#define GR3D_GLOBAL_SPILLSURFADDR 0xe2a +#define GR3D_GLOBAL_SURFOVERADDR(x) (0xe30 + (x)) +#define GR3D_GLOBAL_SAMP01SURFADDR(x) (0xe50 + (x)) +#define GR3D_GLOBAL_SAMP23SURFADDR(x) (0xe60 + (x)) + +#define GR3D_NUM_REGS 0xe88 + +#endif diff --git a/kernel/drivers/gpu/drm/tegra/hdmi.c b/kernel/drivers/gpu/drm/tegra/hdmi.c new file mode 100644 index 000000000..06ab1783b --- /dev/null +++ b/kernel/drivers/gpu/drm/tegra/hdmi.c @@ -0,0 +1,1579 @@ +/* + * Copyright (C) 2012 Avionic Design GmbH + * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/clk.h> +#include <linux/debugfs.h> +#include <linux/gpio.h> +#include <linux/hdmi.h> +#include <linux/regulator/consumer.h> +#include <linux/reset.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> + +#include "hdmi.h" +#include "drm.h" +#include "dc.h" + +struct tmds_config { + unsigned int pclk; + u32 pll0; + u32 pll1; + u32 pe_current; + u32 drive_current; + u32 peak_current; +}; + +struct tegra_hdmi_config { + const struct tmds_config *tmds; + unsigned int num_tmds; + + unsigned long fuse_override_offset; + u32 fuse_override_value; + + bool has_sor_io_peak_current; +}; + +struct tegra_hdmi { + struct host1x_client client; + struct tegra_output output; + struct device *dev; + + struct regulator *hdmi; + struct regulator *pll; + struct regulator *vdd; + + void __iomem *regs; + unsigned int irq; + + struct clk *clk_parent; + struct clk *clk; + struct reset_control *rst; + + const struct tegra_hdmi_config *config; + + unsigned int audio_source; + unsigned int audio_freq; + bool stereo; + bool dvi; + + struct drm_info_list *debugfs_files; + struct drm_minor *minor; + struct dentry *debugfs; +}; + +static inline struct tegra_hdmi * +host1x_client_to_hdmi(struct host1x_client *client) +{ + return container_of(client, struct tegra_hdmi, client); +} + +static inline struct tegra_hdmi *to_hdmi(struct tegra_output *output) +{ + return container_of(output, struct tegra_hdmi, output); +} + +#define HDMI_AUDIOCLK_FREQ 216000000 +#define HDMI_REKEY_DEFAULT 56 + +enum { + AUTO = 0, + SPDIF, + HDA, +}; + +static inline u32 tegra_hdmi_readl(struct tegra_hdmi *hdmi, + unsigned long offset) +{ + return readl(hdmi->regs + (offset << 2)); +} + +static inline void tegra_hdmi_writel(struct tegra_hdmi *hdmi, u32 value, + unsigned long offset) +{ + writel(value, hdmi->regs + (offset << 2)); +} + +struct tegra_hdmi_audio_config { + unsigned int pclk; + unsigned int n; + unsigned int cts; + unsigned int aval; +}; + +static const struct tegra_hdmi_audio_config tegra_hdmi_audio_32k[] = { + { 25200000, 4096, 25200, 24000 }, + { 27000000, 4096, 27000, 24000 }, + { 74250000, 4096, 74250, 24000 }, + { 148500000, 4096, 148500, 24000 }, + { 0, 0, 0, 0 }, +}; + +static const struct tegra_hdmi_audio_config tegra_hdmi_audio_44_1k[] = { + { 25200000, 5880, 26250, 25000 }, + { 27000000, 5880, 28125, 25000 }, + { 74250000, 4704, 61875, 20000 }, + { 148500000, 4704, 123750, 20000 }, + { 0, 0, 0, 0 }, +}; + +static const struct tegra_hdmi_audio_config tegra_hdmi_audio_48k[] = { + { 25200000, 6144, 25200, 24000 }, + { 27000000, 6144, 27000, 24000 }, + { 74250000, 6144, 74250, 24000 }, + { 148500000, 6144, 148500, 24000 }, + { 0, 0, 0, 0 }, +}; + +static const struct tegra_hdmi_audio_config tegra_hdmi_audio_88_2k[] = { + { 25200000, 11760, 26250, 25000 }, + { 27000000, 11760, 28125, 25000 }, + { 74250000, 9408, 61875, 20000 }, + { 148500000, 9408, 123750, 20000 }, + { 0, 0, 0, 0 }, +}; + +static const struct tegra_hdmi_audio_config tegra_hdmi_audio_96k[] = { + { 25200000, 12288, 25200, 24000 }, + { 27000000, 12288, 27000, 24000 }, + { 74250000, 12288, 74250, 24000 }, + { 148500000, 12288, 148500, 24000 }, + { 0, 0, 0, 0 }, +}; + +static const struct tegra_hdmi_audio_config tegra_hdmi_audio_176_4k[] = { + { 25200000, 23520, 26250, 25000 }, + { 27000000, 23520, 28125, 25000 }, + { 74250000, 18816, 61875, 20000 }, + { 148500000, 18816, 123750, 20000 }, + { 0, 0, 0, 0 }, +}; + +static const struct tegra_hdmi_audio_config tegra_hdmi_audio_192k[] = { + { 25200000, 24576, 25200, 24000 }, + { 27000000, 24576, 27000, 24000 }, + { 74250000, 24576, 74250, 24000 }, + { 148500000, 24576, 148500, 24000 }, + { 0, 0, 0, 0 }, +}; + +static const struct tmds_config tegra20_tmds_config[] = { + { /* slow pixel clock modes */ + .pclk = 27000000, + .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | + SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) | + SOR_PLL_TX_REG_LOAD(3), + .pll1 = SOR_PLL_TMDS_TERM_ENABLE, + .pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) | + PE_CURRENT1(PE_CURRENT_0_0_mA) | + PE_CURRENT2(PE_CURRENT_0_0_mA) | + PE_CURRENT3(PE_CURRENT_0_0_mA), + .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) | + DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) | + DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) | + DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA), + }, + { /* high pixel clock modes */ + .pclk = UINT_MAX, + .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | + SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) | + SOR_PLL_TX_REG_LOAD(3), + .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN, + .pe_current = PE_CURRENT0(PE_CURRENT_6_0_mA) | + PE_CURRENT1(PE_CURRENT_6_0_mA) | + PE_CURRENT2(PE_CURRENT_6_0_mA) | + PE_CURRENT3(PE_CURRENT_6_0_mA), + .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) | + DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) | + DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) | + DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA), + }, +}; + +static const struct tmds_config tegra30_tmds_config[] = { + { /* 480p modes */ + .pclk = 27000000, + .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | + SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) | + SOR_PLL_TX_REG_LOAD(0), + .pll1 = SOR_PLL_TMDS_TERM_ENABLE, + .pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) | + PE_CURRENT1(PE_CURRENT_0_0_mA) | + PE_CURRENT2(PE_CURRENT_0_0_mA) | + PE_CURRENT3(PE_CURRENT_0_0_mA), + .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) | + DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) | + DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) | + DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA), + }, { /* 720p modes */ + .pclk = 74250000, + .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | + SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) | + SOR_PLL_TX_REG_LOAD(0), + .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN, + .pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) | + PE_CURRENT1(PE_CURRENT_5_0_mA) | + PE_CURRENT2(PE_CURRENT_5_0_mA) | + PE_CURRENT3(PE_CURRENT_5_0_mA), + .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) | + DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) | + DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) | + DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA), + }, { /* 1080p modes */ + .pclk = UINT_MAX, + .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | + SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(3) | + SOR_PLL_TX_REG_LOAD(0), + .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN, + .pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) | + PE_CURRENT1(PE_CURRENT_5_0_mA) | + PE_CURRENT2(PE_CURRENT_5_0_mA) | + PE_CURRENT3(PE_CURRENT_5_0_mA), + .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) | + DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) | + DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) | + DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA), + }, +}; + +static const struct tmds_config tegra114_tmds_config[] = { + { /* 480p/576p / 25.2MHz/27MHz modes */ + .pclk = 27000000, + .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) | + SOR_PLL_VCOCAP(0) | SOR_PLL_RESISTORSEL, + .pll1 = SOR_PLL_LOADADJ(3) | SOR_PLL_TMDS_TERMADJ(0), + .pe_current = PE_CURRENT0(PE_CURRENT_0_mA_T114) | + PE_CURRENT1(PE_CURRENT_0_mA_T114) | + PE_CURRENT2(PE_CURRENT_0_mA_T114) | + PE_CURRENT3(PE_CURRENT_0_mA_T114), + .drive_current = + DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_10_400_mA_T114) | + DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_10_400_mA_T114) | + DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_10_400_mA_T114) | + DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_10_400_mA_T114), + .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) | + PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) | + PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) | + PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA), + }, { /* 720p / 74.25MHz modes */ + .pclk = 74250000, + .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) | + SOR_PLL_VCOCAP(1) | SOR_PLL_RESISTORSEL, + .pll1 = SOR_PLL_PE_EN | SOR_PLL_LOADADJ(3) | + SOR_PLL_TMDS_TERMADJ(0), + .pe_current = PE_CURRENT0(PE_CURRENT_15_mA_T114) | + PE_CURRENT1(PE_CURRENT_15_mA_T114) | + PE_CURRENT2(PE_CURRENT_15_mA_T114) | + PE_CURRENT3(PE_CURRENT_15_mA_T114), + .drive_current = + DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_10_400_mA_T114) | + DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_10_400_mA_T114) | + DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_10_400_mA_T114) | + DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_10_400_mA_T114), + .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) | + PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) | + PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) | + PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA), + }, { /* 1080p / 148.5MHz modes */ + .pclk = 148500000, + .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) | + SOR_PLL_VCOCAP(3) | SOR_PLL_RESISTORSEL, + .pll1 = SOR_PLL_PE_EN | SOR_PLL_LOADADJ(3) | + SOR_PLL_TMDS_TERMADJ(0), + .pe_current = PE_CURRENT0(PE_CURRENT_10_mA_T114) | + PE_CURRENT1(PE_CURRENT_10_mA_T114) | + PE_CURRENT2(PE_CURRENT_10_mA_T114) | + PE_CURRENT3(PE_CURRENT_10_mA_T114), + .drive_current = + DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_12_400_mA_T114) | + DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_12_400_mA_T114) | + DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_12_400_mA_T114) | + DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_12_400_mA_T114), + .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) | + PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) | + PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) | + PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA), + }, { /* 225/297MHz modes */ + .pclk = UINT_MAX, + .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) | + SOR_PLL_VCOCAP(0xf) | SOR_PLL_RESISTORSEL, + .pll1 = SOR_PLL_LOADADJ(3) | SOR_PLL_TMDS_TERMADJ(7) + | SOR_PLL_TMDS_TERM_ENABLE, + .pe_current = PE_CURRENT0(PE_CURRENT_0_mA_T114) | + PE_CURRENT1(PE_CURRENT_0_mA_T114) | + PE_CURRENT2(PE_CURRENT_0_mA_T114) | + PE_CURRENT3(PE_CURRENT_0_mA_T114), + .drive_current = + DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_25_200_mA_T114) | + DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_25_200_mA_T114) | + DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_25_200_mA_T114) | + DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_19_200_mA_T114), + .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_3_000_mA) | + PEAK_CURRENT_LANE1(PEAK_CURRENT_3_000_mA) | + PEAK_CURRENT_LANE2(PEAK_CURRENT_3_000_mA) | + PEAK_CURRENT_LANE3(PEAK_CURRENT_0_800_mA), + }, +}; + +static const struct tmds_config tegra124_tmds_config[] = { + { /* 480p/576p / 25.2MHz/27MHz modes */ + .pclk = 27000000, + .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) | + SOR_PLL_VCOCAP(0) | SOR_PLL_RESISTORSEL, + .pll1 = SOR_PLL_LOADADJ(3) | SOR_PLL_TMDS_TERMADJ(0), + .pe_current = PE_CURRENT0(PE_CURRENT_0_mA_T114) | + PE_CURRENT1(PE_CURRENT_0_mA_T114) | + PE_CURRENT2(PE_CURRENT_0_mA_T114) | + PE_CURRENT3(PE_CURRENT_0_mA_T114), + .drive_current = + DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_10_400_mA_T114) | + DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_10_400_mA_T114) | + DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_10_400_mA_T114) | + DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_10_400_mA_T114), + .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) | + PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) | + PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) | + PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA), + }, { /* 720p / 74.25MHz modes */ + .pclk = 74250000, + .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) | + SOR_PLL_VCOCAP(1) | SOR_PLL_RESISTORSEL, + .pll1 = SOR_PLL_PE_EN | SOR_PLL_LOADADJ(3) | + SOR_PLL_TMDS_TERMADJ(0), + .pe_current = PE_CURRENT0(PE_CURRENT_15_mA_T114) | + PE_CURRENT1(PE_CURRENT_15_mA_T114) | + PE_CURRENT2(PE_CURRENT_15_mA_T114) | + PE_CURRENT3(PE_CURRENT_15_mA_T114), + .drive_current = + DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_10_400_mA_T114) | + DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_10_400_mA_T114) | + DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_10_400_mA_T114) | + DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_10_400_mA_T114), + .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) | + PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) | + PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) | + PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA), + }, { /* 1080p / 148.5MHz modes */ + .pclk = 148500000, + .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) | + SOR_PLL_VCOCAP(3) | SOR_PLL_RESISTORSEL, + .pll1 = SOR_PLL_PE_EN | SOR_PLL_LOADADJ(3) | + SOR_PLL_TMDS_TERMADJ(0), + .pe_current = PE_CURRENT0(PE_CURRENT_10_mA_T114) | + PE_CURRENT1(PE_CURRENT_10_mA_T114) | + PE_CURRENT2(PE_CURRENT_10_mA_T114) | + PE_CURRENT3(PE_CURRENT_10_mA_T114), + .drive_current = + DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_12_400_mA_T114) | + DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_12_400_mA_T114) | + DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_12_400_mA_T114) | + DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_12_400_mA_T114), + .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) | + PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) | + PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) | + PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA), + }, { /* 225/297MHz modes */ + .pclk = UINT_MAX, + .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) | + SOR_PLL_VCOCAP(0xf) | SOR_PLL_RESISTORSEL, + .pll1 = SOR_PLL_LOADADJ(3) | SOR_PLL_TMDS_TERMADJ(7) + | SOR_PLL_TMDS_TERM_ENABLE, + .pe_current = PE_CURRENT0(PE_CURRENT_0_mA_T114) | + PE_CURRENT1(PE_CURRENT_0_mA_T114) | + PE_CURRENT2(PE_CURRENT_0_mA_T114) | + PE_CURRENT3(PE_CURRENT_0_mA_T114), + .drive_current = + DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_25_200_mA_T114) | + DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_25_200_mA_T114) | + DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_25_200_mA_T114) | + DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_19_200_mA_T114), + .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_3_000_mA) | + PEAK_CURRENT_LANE1(PEAK_CURRENT_3_000_mA) | + PEAK_CURRENT_LANE2(PEAK_CURRENT_3_000_mA) | + PEAK_CURRENT_LANE3(PEAK_CURRENT_0_800_mA), + }, +}; + +static const struct tegra_hdmi_audio_config * +tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk) +{ + const struct tegra_hdmi_audio_config *table; + + switch (audio_freq) { + case 32000: + table = tegra_hdmi_audio_32k; + break; + + case 44100: + table = tegra_hdmi_audio_44_1k; + break; + + case 48000: + table = tegra_hdmi_audio_48k; + break; + + case 88200: + table = tegra_hdmi_audio_88_2k; + break; + + case 96000: + table = tegra_hdmi_audio_96k; + break; + + case 176400: + table = tegra_hdmi_audio_176_4k; + break; + + case 192000: + table = tegra_hdmi_audio_192k; + break; + + default: + return NULL; + } + + while (table->pclk) { + if (table->pclk == pclk) + return table; + + table++; + } + + return NULL; +} + +static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi) +{ + const unsigned int freqs[] = { + 32000, 44100, 48000, 88200, 96000, 176400, 192000 + }; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(freqs); i++) { + unsigned int f = freqs[i]; + unsigned int eight_half; + unsigned int delta; + u32 value; + + if (f > 96000) + delta = 2; + else if (f > 48000) + delta = 6; + else + delta = 9; + + eight_half = (8 * HDMI_AUDIOCLK_FREQ) / (f * 128); + value = AUDIO_FS_LOW(eight_half - delta) | + AUDIO_FS_HIGH(eight_half + delta); + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_FS(i)); + } +} + +static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk) +{ + struct device_node *node = hdmi->dev->of_node; + const struct tegra_hdmi_audio_config *config; + unsigned int offset = 0; + u32 value; + + switch (hdmi->audio_source) { + case HDA: + value = AUDIO_CNTRL0_SOURCE_SELECT_HDAL; + break; + + case SPDIF: + value = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF; + break; + + default: + value = AUDIO_CNTRL0_SOURCE_SELECT_AUTO; + break; + } + + if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) { + value |= AUDIO_CNTRL0_ERROR_TOLERANCE(6) | + AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0); + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0); + } else { + value |= AUDIO_CNTRL0_INJECT_NULLSMPL; + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_CNTRL0); + + value = AUDIO_CNTRL0_ERROR_TOLERANCE(6) | + AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0); + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0); + } + + config = tegra_hdmi_get_audio_config(hdmi->audio_freq, pclk); + if (!config) { + dev_err(hdmi->dev, "cannot set audio to %u at %u pclk\n", + hdmi->audio_freq, pclk); + return -EINVAL; + } + + tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_HDMI_ACR_CTRL); + + value = AUDIO_N_RESETF | AUDIO_N_GENERATE_ALTERNATE | + AUDIO_N_VALUE(config->n - 1); + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N); + + tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config->n) | ACR_ENABLE, + HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH); + + value = ACR_SUBPACK_CTS(config->cts); + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW); + + value = SPARE_HW_CTS | SPARE_FORCE_SW_CTS | SPARE_CTS_RESET_VAL(1); + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_SPARE); + + value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_AUDIO_N); + value &= ~AUDIO_N_RESETF; + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N); + + if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) { + switch (hdmi->audio_freq) { + case 32000: + offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320; + break; + + case 44100: + offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441; + break; + + case 48000: + offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480; + break; + + case 88200: + offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882; + break; + + case 96000: + offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960; + break; + + case 176400: + offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764; + break; + + case 192000: + offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920; + break; + } + + tegra_hdmi_writel(hdmi, config->aval, offset); + } + + tegra_hdmi_setup_audio_fs_tables(hdmi); + + return 0; +} + +static inline u32 tegra_hdmi_subpack(const u8 *ptr, size_t size) +{ + u32 value = 0; + size_t i; + + for (i = size; i > 0; i--) + value = (value << 8) | ptr[i - 1]; + + return value; +} + +static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi, const void *data, + size_t size) +{ + const u8 *ptr = data; + unsigned long offset; + size_t i, j; + u32 value; + + switch (ptr[0]) { + case HDMI_INFOFRAME_TYPE_AVI: + offset = HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER; + break; + + case HDMI_INFOFRAME_TYPE_AUDIO: + offset = HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER; + break; + + case HDMI_INFOFRAME_TYPE_VENDOR: + offset = HDMI_NV_PDISP_HDMI_GENERIC_HEADER; + break; + + default: + dev_err(hdmi->dev, "unsupported infoframe type: %02x\n", + ptr[0]); + return; + } + + value = INFOFRAME_HEADER_TYPE(ptr[0]) | + INFOFRAME_HEADER_VERSION(ptr[1]) | + INFOFRAME_HEADER_LEN(ptr[2]); + tegra_hdmi_writel(hdmi, value, offset); + offset++; + + /* + * Each subpack contains 7 bytes, divided into: + * - subpack_low: bytes 0 - 3 + * - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00) + */ + for (i = 3, j = 0; i < size; i += 7, j += 8) { + size_t rem = size - i, num = min_t(size_t, rem, 4); + + value = tegra_hdmi_subpack(&ptr[i], num); + tegra_hdmi_writel(hdmi, value, offset++); + + num = min_t(size_t, rem - num, 3); + + value = tegra_hdmi_subpack(&ptr[i + 4], num); + tegra_hdmi_writel(hdmi, value, offset++); + } +} + +static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi, + struct drm_display_mode *mode) +{ + struct hdmi_avi_infoframe frame; + u8 buffer[17]; + ssize_t err; + + if (hdmi->dvi) { + tegra_hdmi_writel(hdmi, 0, + HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL); + return; + } + + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); + if (err < 0) { + dev_err(hdmi->dev, "failed to setup AVI infoframe: %zd\n", err); + return; + } + + err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer)); + if (err < 0) { + dev_err(hdmi->dev, "failed to pack AVI infoframe: %zd\n", err); + return; + } + + tegra_hdmi_write_infopack(hdmi, buffer, err); + + tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE, + HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL); +} + +static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi) +{ + struct hdmi_audio_infoframe frame; + u8 buffer[14]; + ssize_t err; + + if (hdmi->dvi) { + tegra_hdmi_writel(hdmi, 0, + HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); + return; + } + + err = hdmi_audio_infoframe_init(&frame); + if (err < 0) { + dev_err(hdmi->dev, "failed to setup audio infoframe: %zd\n", + err); + return; + } + + frame.channels = 2; + + err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer)); + if (err < 0) { + dev_err(hdmi->dev, "failed to pack audio infoframe: %zd\n", + err); + return; + } + + /* + * The audio infoframe has only one set of subpack registers, so the + * infoframe needs to be truncated. One set of subpack registers can + * contain 7 bytes. Including the 3 byte header only the first 10 + * bytes can be programmed. + */ + tegra_hdmi_write_infopack(hdmi, buffer, min_t(size_t, 10, err)); + + tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE, + HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); +} + +static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi) +{ + struct hdmi_vendor_infoframe frame; + u8 buffer[10]; + ssize_t err; + u32 value; + + if (!hdmi->stereo) { + value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); + value &= ~GENERIC_CTRL_ENABLE; + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); + return; + } + + hdmi_vendor_infoframe_init(&frame); + frame.s3d_struct = HDMI_3D_STRUCTURE_FRAME_PACKING; + + err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer)); + if (err < 0) { + dev_err(hdmi->dev, "failed to pack vendor infoframe: %zd\n", + err); + return; + } + + tegra_hdmi_write_infopack(hdmi, buffer, err); + + value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); + value |= GENERIC_CTRL_ENABLE; + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); +} + +static void tegra_hdmi_setup_tmds(struct tegra_hdmi *hdmi, + const struct tmds_config *tmds) +{ + u32 value; + + tegra_hdmi_writel(hdmi, tmds->pll0, HDMI_NV_PDISP_SOR_PLL0); + tegra_hdmi_writel(hdmi, tmds->pll1, HDMI_NV_PDISP_SOR_PLL1); + tegra_hdmi_writel(hdmi, tmds->pe_current, HDMI_NV_PDISP_PE_CURRENT); + + tegra_hdmi_writel(hdmi, tmds->drive_current, + HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT); + + value = tegra_hdmi_readl(hdmi, hdmi->config->fuse_override_offset); + value |= hdmi->config->fuse_override_value; + tegra_hdmi_writel(hdmi, value, hdmi->config->fuse_override_offset); + + if (hdmi->config->has_sor_io_peak_current) + tegra_hdmi_writel(hdmi, tmds->peak_current, + HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT); +} + +static bool tegra_output_is_hdmi(struct tegra_output *output) +{ + struct edid *edid; + + if (!output->connector.edid_blob_ptr) + return false; + + edid = (struct edid *)output->connector.edid_blob_ptr->data; + + return drm_detect_hdmi_monitor(edid); +} + +static void tegra_hdmi_connector_dpms(struct drm_connector *connector, + int mode) +{ +} + +static const struct drm_connector_funcs tegra_hdmi_connector_funcs = { + .dpms = tegra_hdmi_connector_dpms, + .reset = drm_atomic_helper_connector_reset, + .detect = tegra_output_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = tegra_output_connector_destroy, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static enum drm_mode_status +tegra_hdmi_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct tegra_output *output = connector_to_output(connector); + struct tegra_hdmi *hdmi = to_hdmi(output); + unsigned long pclk = mode->clock * 1000; + enum drm_mode_status status = MODE_OK; + struct clk *parent; + long err; + + parent = clk_get_parent(hdmi->clk_parent); + + err = clk_round_rate(parent, pclk * 4); + if (err <= 0) + status = MODE_NOCLOCK; + + return status; +} + +static const struct drm_connector_helper_funcs +tegra_hdmi_connector_helper_funcs = { + .get_modes = tegra_output_connector_get_modes, + .mode_valid = tegra_hdmi_connector_mode_valid, + .best_encoder = tegra_output_connector_best_encoder, +}; + +static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = { + .destroy = tegra_output_encoder_destroy, +}; + +static void tegra_hdmi_encoder_dpms(struct drm_encoder *encoder, int mode) +{ +} + +static void tegra_hdmi_encoder_prepare(struct drm_encoder *encoder) +{ +} + +static void tegra_hdmi_encoder_commit(struct drm_encoder *encoder) +{ +} + +static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted) +{ + unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey; + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(encoder->crtc); + struct device_node *node = output->dev->of_node; + struct tegra_hdmi *hdmi = to_hdmi(output); + unsigned int pulse_start, div82, pclk; + int retries = 1000; + u32 value; + int err; + + hdmi->dvi = !tegra_output_is_hdmi(output); + + pclk = mode->clock * 1000; + h_sync_width = mode->hsync_end - mode->hsync_start; + h_back_porch = mode->htotal - mode->hsync_end; + h_front_porch = mode->hsync_start - mode->hdisplay; + + err = clk_set_rate(hdmi->clk, pclk); + if (err < 0) { + dev_err(hdmi->dev, "failed to set HDMI clock frequency: %d\n", + err); + } + + DRM_DEBUG_KMS("HDMI clock rate: %lu Hz\n", clk_get_rate(hdmi->clk)); + + /* power up sequence */ + value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PLL0); + value &= ~SOR_PLL_PDBG; + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_PLL0); + + usleep_range(10, 20); + + value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PLL0); + value &= ~SOR_PLL_PWR; + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_PLL0); + + tegra_dc_writel(dc, VSYNC_H_POSITION(1), + DC_DISP_DISP_TIMING_OPTIONS); + tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE888, + DC_DISP_DISP_COLOR_CONTROL); + + /* video_preamble uses h_pulse2 */ + pulse_start = 1 + h_sync_width + h_back_porch - 10; + + tegra_dc_writel(dc, H_PULSE_2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0); + + value = PULSE_MODE_NORMAL | PULSE_POLARITY_HIGH | PULSE_QUAL_VACTIVE | + PULSE_LAST_END_A; + tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_CONTROL); + + value = PULSE_START(pulse_start) | PULSE_END(pulse_start + 8); + tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_POSITION_A); + + value = VSYNC_WINDOW_END(0x210) | VSYNC_WINDOW_START(0x200) | + VSYNC_WINDOW_ENABLE; + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_VSYNC_WINDOW); + + if (dc->pipe) + value = HDMI_SRC_DISPLAYB; + else + value = HDMI_SRC_DISPLAYA; + + if ((mode->hdisplay == 720) && ((mode->vdisplay == 480) || + (mode->vdisplay == 576))) + tegra_hdmi_writel(hdmi, + value | ARM_VIDEO_RANGE_FULL, + HDMI_NV_PDISP_INPUT_CONTROL); + else + tegra_hdmi_writel(hdmi, + value | ARM_VIDEO_RANGE_LIMITED, + HDMI_NV_PDISP_INPUT_CONTROL); + + div82 = clk_get_rate(hdmi->clk) / 1000000 * 4; + value = SOR_REFCLK_DIV_INT(div82 >> 2) | SOR_REFCLK_DIV_FRAC(div82); + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_REFCLK); + + if (!hdmi->dvi) { + err = tegra_hdmi_setup_audio(hdmi, pclk); + if (err < 0) + hdmi->dvi = true; + } + + if (of_device_is_compatible(node, "nvidia,tegra20-hdmi")) { + /* + * TODO: add ELD support + */ + } + + rekey = HDMI_REKEY_DEFAULT; + value = HDMI_CTRL_REKEY(rekey); + value |= HDMI_CTRL_MAX_AC_PACKET((h_sync_width + h_back_porch + + h_front_porch - rekey - 18) / 32); + + if (!hdmi->dvi) + value |= HDMI_CTRL_ENABLE; + + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_CTRL); + + if (hdmi->dvi) + tegra_hdmi_writel(hdmi, 0x0, + HDMI_NV_PDISP_HDMI_GENERIC_CTRL); + else + tegra_hdmi_writel(hdmi, GENERIC_CTRL_AUDIO, + HDMI_NV_PDISP_HDMI_GENERIC_CTRL); + + tegra_hdmi_setup_avi_infoframe(hdmi, mode); + tegra_hdmi_setup_audio_infoframe(hdmi); + tegra_hdmi_setup_stereo_infoframe(hdmi); + + /* TMDS CONFIG */ + for (i = 0; i < hdmi->config->num_tmds; i++) { + if (pclk <= hdmi->config->tmds[i].pclk) { + tegra_hdmi_setup_tmds(hdmi, &hdmi->config->tmds[i]); + break; + } + } + + tegra_hdmi_writel(hdmi, + SOR_SEQ_PU_PC(0) | + SOR_SEQ_PU_PC_ALT(0) | + SOR_SEQ_PD_PC(8) | + SOR_SEQ_PD_PC_ALT(8), + HDMI_NV_PDISP_SOR_SEQ_CTL); + + value = SOR_SEQ_INST_WAIT_TIME(1) | + SOR_SEQ_INST_WAIT_UNITS_VSYNC | + SOR_SEQ_INST_HALT | + SOR_SEQ_INST_PIN_A_LOW | + SOR_SEQ_INST_PIN_B_LOW | + SOR_SEQ_INST_DRIVE_PWM_OUT_LO; + + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(0)); + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(8)); + + value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_CSTM); + value &= ~SOR_CSTM_ROTCLK(~0); + value |= SOR_CSTM_ROTCLK(2); + value |= SOR_CSTM_PLLDIV; + value &= ~SOR_CSTM_LVDS_ENABLE; + value &= ~SOR_CSTM_MODE_MASK; + value |= SOR_CSTM_MODE_TMDS; + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_CSTM); + + /* start SOR */ + tegra_hdmi_writel(hdmi, + SOR_PWR_NORMAL_STATE_PU | + SOR_PWR_NORMAL_START_NORMAL | + SOR_PWR_SAFE_STATE_PD | + SOR_PWR_SETTING_NEW_TRIGGER, + HDMI_NV_PDISP_SOR_PWR); + tegra_hdmi_writel(hdmi, + SOR_PWR_NORMAL_STATE_PU | + SOR_PWR_NORMAL_START_NORMAL | + SOR_PWR_SAFE_STATE_PD | + SOR_PWR_SETTING_NEW_DONE, + HDMI_NV_PDISP_SOR_PWR); + + do { + BUG_ON(--retries < 0); + value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PWR); + } while (value & SOR_PWR_SETTING_NEW_PENDING); + + value = SOR_STATE_ASY_CRCMODE_COMPLETE | + SOR_STATE_ASY_OWNER_HEAD0 | + SOR_STATE_ASY_SUBOWNER_BOTH | + SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A | + SOR_STATE_ASY_DEPOL_POS; + + /* setup sync polarities */ + if (mode->flags & DRM_MODE_FLAG_PHSYNC) + value |= SOR_STATE_ASY_HSYNCPOL_POS; + + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + value |= SOR_STATE_ASY_HSYNCPOL_NEG; + + if (mode->flags & DRM_MODE_FLAG_PVSYNC) + value |= SOR_STATE_ASY_VSYNCPOL_POS; + + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + value |= SOR_STATE_ASY_VSYNCPOL_NEG; + + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_STATE2); + + value = SOR_STATE_ASY_HEAD_OPMODE_AWAKE | SOR_STATE_ASY_ORMODE_NORMAL; + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_STATE1); + + tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0); + tegra_hdmi_writel(hdmi, SOR_STATE_UPDATE, HDMI_NV_PDISP_SOR_STATE0); + tegra_hdmi_writel(hdmi, value | SOR_STATE_ATTACHED, + HDMI_NV_PDISP_SOR_STATE1); + tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0); + + value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); + value |= HDMI_ENABLE; + tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); + + tegra_dc_commit(dc); + + /* TODO: add HDCP support */ +} + +static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder) +{ + struct tegra_dc *dc = to_tegra_dc(encoder->crtc); + u32 value; + + /* + * The following accesses registers of the display controller, so make + * sure it's only executed when the output is attached to one. + */ + if (dc) { + value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); + value &= ~HDMI_ENABLE; + tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); + + tegra_dc_commit(dc); + } +} + +static int +tegra_hdmi_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(conn_state->crtc); + unsigned long pclk = crtc_state->mode.clock * 1000; + struct tegra_hdmi *hdmi = to_hdmi(output); + int err; + + err = tegra_dc_state_setup_clock(dc, crtc_state, hdmi->clk_parent, + pclk, 0); + if (err < 0) { + dev_err(output->dev, "failed to setup CRTC state: %d\n", err); + return err; + } + + return err; +} + +static const struct drm_encoder_helper_funcs tegra_hdmi_encoder_helper_funcs = { + .dpms = tegra_hdmi_encoder_dpms, + .prepare = tegra_hdmi_encoder_prepare, + .commit = tegra_hdmi_encoder_commit, + .mode_set = tegra_hdmi_encoder_mode_set, + .disable = tegra_hdmi_encoder_disable, + .atomic_check = tegra_hdmi_encoder_atomic_check, +}; + +static int tegra_hdmi_show_regs(struct seq_file *s, void *data) +{ + struct drm_info_node *node = s->private; + struct tegra_hdmi *hdmi = node->info_ent->data; + int err; + + err = clk_prepare_enable(hdmi->clk); + if (err) + return err; + +#define DUMP_REG(name) \ + seq_printf(s, "%-56s %#05x %08x\n", #name, name, \ + tegra_hdmi_readl(hdmi, name)) + + DUMP_REG(HDMI_CTXSW); + DUMP_REG(HDMI_NV_PDISP_SOR_STATE0); + DUMP_REG(HDMI_NV_PDISP_SOR_STATE1); + DUMP_REG(HDMI_NV_PDISP_SOR_STATE2); + DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_MSB); + DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_LSB); + DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_MSB); + DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_LSB); + DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_MSB); + DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_LSB); + DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_MSB); + DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_LSB); + DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_MSB); + DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_LSB); + DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_MSB); + DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_LSB); + DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CTRL); + DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CMODE); + DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB); + DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB); + DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB); + DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2); + DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1); + DUMP_REG(HDMI_NV_PDISP_RG_HDCP_RI); + DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_MSB); + DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_LSB); + DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU0); + DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0); + DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU1); + DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU2); + DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); + DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS); + DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER); + DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW); + DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH); + DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL); + DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS); + DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER); + DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW); + DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH); + DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW); + DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH); + DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_CTRL); + DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_STATUS); + DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_HEADER); + DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW); + DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH); + DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW); + DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH); + DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW); + DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH); + DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW); + DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH); + DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_CTRL); + DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW); + DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH); + DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW); + DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH); + DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW); + DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH); + DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW); + DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH); + DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW); + DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH); + DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW); + DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH); + DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW); + DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH); + DUMP_REG(HDMI_NV_PDISP_HDMI_CTRL); + DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT); + DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_WINDOW); + DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_CTRL); + DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_STATUS); + DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_SUBPACK); + DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1); + DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2); + DUMP_REG(HDMI_NV_PDISP_HDMI_EMU0); + DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1); + DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1_RDATA); + DUMP_REG(HDMI_NV_PDISP_HDMI_SPARE); + DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1); + DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2); + DUMP_REG(HDMI_NV_PDISP_HDMI_HDCPRIF_ROM_CTRL); + DUMP_REG(HDMI_NV_PDISP_SOR_CAP); + DUMP_REG(HDMI_NV_PDISP_SOR_PWR); + DUMP_REG(HDMI_NV_PDISP_SOR_TEST); + DUMP_REG(HDMI_NV_PDISP_SOR_PLL0); + DUMP_REG(HDMI_NV_PDISP_SOR_PLL1); + DUMP_REG(HDMI_NV_PDISP_SOR_PLL2); + DUMP_REG(HDMI_NV_PDISP_SOR_CSTM); + DUMP_REG(HDMI_NV_PDISP_SOR_LVDS); + DUMP_REG(HDMI_NV_PDISP_SOR_CRCA); + DUMP_REG(HDMI_NV_PDISP_SOR_CRCB); + DUMP_REG(HDMI_NV_PDISP_SOR_BLANK); + DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_CTL); + DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(0)); + DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(1)); + DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(2)); + DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(3)); + DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(4)); + DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(5)); + DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(6)); + DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(7)); + DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(8)); + DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(9)); + DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(10)); + DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(11)); + DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(12)); + DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(13)); + DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(14)); + DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(15)); + DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA0); + DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA1); + DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA0); + DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA1); + DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA0); + DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA1); + DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA0); + DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA1); + DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA0); + DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA1); + DUMP_REG(HDMI_NV_PDISP_SOR_TRIG); + DUMP_REG(HDMI_NV_PDISP_SOR_MSCHECK); + DUMP_REG(HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT); + DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG0); + DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG1); + DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG2); + DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(0)); + DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(1)); + DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(2)); + DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(3)); + DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(4)); + DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(5)); + DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(6)); + DUMP_REG(HDMI_NV_PDISP_AUDIO_PULSE_WIDTH); + DUMP_REG(HDMI_NV_PDISP_AUDIO_THRESHOLD); + DUMP_REG(HDMI_NV_PDISP_AUDIO_CNTRL0); + DUMP_REG(HDMI_NV_PDISP_AUDIO_N); + DUMP_REG(HDMI_NV_PDISP_HDCPRIF_ROM_TIMING); + DUMP_REG(HDMI_NV_PDISP_SOR_REFCLK); + DUMP_REG(HDMI_NV_PDISP_CRC_CONTROL); + DUMP_REG(HDMI_NV_PDISP_INPUT_CONTROL); + DUMP_REG(HDMI_NV_PDISP_SCRATCH); + DUMP_REG(HDMI_NV_PDISP_PE_CURRENT); + DUMP_REG(HDMI_NV_PDISP_KEY_CTRL); + DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG0); + DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG1); + DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG2); + DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_0); + DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_1); + DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_2); + DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_3); + DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG); + DUMP_REG(HDMI_NV_PDISP_KEY_SKEY_INDEX); + DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0); + DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR); + DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE); + DUMP_REG(HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT); + +#undef DUMP_REG + + clk_disable_unprepare(hdmi->clk); + + return 0; +} + +static struct drm_info_list debugfs_files[] = { + { "regs", tegra_hdmi_show_regs, 0, NULL }, +}; + +static int tegra_hdmi_debugfs_init(struct tegra_hdmi *hdmi, + struct drm_minor *minor) +{ + unsigned int i; + int err; + + hdmi->debugfs = debugfs_create_dir("hdmi", minor->debugfs_root); + if (!hdmi->debugfs) + return -ENOMEM; + + hdmi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files), + GFP_KERNEL); + if (!hdmi->debugfs_files) { + err = -ENOMEM; + goto remove; + } + + for (i = 0; i < ARRAY_SIZE(debugfs_files); i++) + hdmi->debugfs_files[i].data = hdmi; + + err = drm_debugfs_create_files(hdmi->debugfs_files, + ARRAY_SIZE(debugfs_files), + hdmi->debugfs, minor); + if (err < 0) + goto free; + + hdmi->minor = minor; + + return 0; + +free: + kfree(hdmi->debugfs_files); + hdmi->debugfs_files = NULL; +remove: + debugfs_remove(hdmi->debugfs); + hdmi->debugfs = NULL; + + return err; +} + +static void tegra_hdmi_debugfs_exit(struct tegra_hdmi *hdmi) +{ + drm_debugfs_remove_files(hdmi->debugfs_files, ARRAY_SIZE(debugfs_files), + hdmi->minor); + hdmi->minor = NULL; + + kfree(hdmi->debugfs_files); + hdmi->debugfs_files = NULL; + + debugfs_remove(hdmi->debugfs); + hdmi->debugfs = NULL; +} + +static int tegra_hdmi_init(struct host1x_client *client) +{ + struct drm_device *drm = dev_get_drvdata(client->parent); + struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client); + int err; + + hdmi->output.dev = client->dev; + + drm_connector_init(drm, &hdmi->output.connector, + &tegra_hdmi_connector_funcs, + DRM_MODE_CONNECTOR_HDMIA); + drm_connector_helper_add(&hdmi->output.connector, + &tegra_hdmi_connector_helper_funcs); + hdmi->output.connector.dpms = DRM_MODE_DPMS_OFF; + + drm_encoder_init(drm, &hdmi->output.encoder, &tegra_hdmi_encoder_funcs, + DRM_MODE_ENCODER_TMDS); + drm_encoder_helper_add(&hdmi->output.encoder, + &tegra_hdmi_encoder_helper_funcs); + + drm_mode_connector_attach_encoder(&hdmi->output.connector, + &hdmi->output.encoder); + drm_connector_register(&hdmi->output.connector); + + err = tegra_output_init(drm, &hdmi->output); + if (err < 0) { + dev_err(client->dev, "failed to initialize output: %d\n", err); + return err; + } + + hdmi->output.encoder.possible_crtcs = 0x3; + + if (IS_ENABLED(CONFIG_DEBUG_FS)) { + err = tegra_hdmi_debugfs_init(hdmi, drm->primary); + if (err < 0) + dev_err(client->dev, "debugfs setup failed: %d\n", err); + } + + err = regulator_enable(hdmi->hdmi); + if (err < 0) { + dev_err(client->dev, "failed to enable HDMI regulator: %d\n", + err); + return err; + } + + err = regulator_enable(hdmi->pll); + if (err < 0) { + dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err); + return err; + } + + err = regulator_enable(hdmi->vdd); + if (err < 0) { + dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err); + return err; + } + + err = clk_prepare_enable(hdmi->clk); + if (err < 0) { + dev_err(hdmi->dev, "failed to enable clock: %d\n", err); + return err; + } + + reset_control_deassert(hdmi->rst); + + return 0; +} + +static int tegra_hdmi_exit(struct host1x_client *client) +{ + struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client); + + tegra_output_exit(&hdmi->output); + + reset_control_assert(hdmi->rst); + clk_disable_unprepare(hdmi->clk); + + regulator_disable(hdmi->vdd); + regulator_disable(hdmi->pll); + regulator_disable(hdmi->hdmi); + + if (IS_ENABLED(CONFIG_DEBUG_FS)) + tegra_hdmi_debugfs_exit(hdmi); + + return 0; +} + +static const struct host1x_client_ops hdmi_client_ops = { + .init = tegra_hdmi_init, + .exit = tegra_hdmi_exit, +}; + +static const struct tegra_hdmi_config tegra20_hdmi_config = { + .tmds = tegra20_tmds_config, + .num_tmds = ARRAY_SIZE(tegra20_tmds_config), + .fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT, + .fuse_override_value = 1 << 31, + .has_sor_io_peak_current = false, +}; + +static const struct tegra_hdmi_config tegra30_hdmi_config = { + .tmds = tegra30_tmds_config, + .num_tmds = ARRAY_SIZE(tegra30_tmds_config), + .fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT, + .fuse_override_value = 1 << 31, + .has_sor_io_peak_current = false, +}; + +static const struct tegra_hdmi_config tegra114_hdmi_config = { + .tmds = tegra114_tmds_config, + .num_tmds = ARRAY_SIZE(tegra114_tmds_config), + .fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0, + .fuse_override_value = 1 << 31, + .has_sor_io_peak_current = true, +}; + +static const struct tegra_hdmi_config tegra124_hdmi_config = { + .tmds = tegra124_tmds_config, + .num_tmds = ARRAY_SIZE(tegra124_tmds_config), + .fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0, + .fuse_override_value = 1 << 31, + .has_sor_io_peak_current = true, +}; + +static const struct of_device_id tegra_hdmi_of_match[] = { + { .compatible = "nvidia,tegra124-hdmi", .data = &tegra124_hdmi_config }, + { .compatible = "nvidia,tegra114-hdmi", .data = &tegra114_hdmi_config }, + { .compatible = "nvidia,tegra30-hdmi", .data = &tegra30_hdmi_config }, + { .compatible = "nvidia,tegra20-hdmi", .data = &tegra20_hdmi_config }, + { }, +}; +MODULE_DEVICE_TABLE(of, tegra_hdmi_of_match); + +static int tegra_hdmi_probe(struct platform_device *pdev) +{ + const struct of_device_id *match; + struct tegra_hdmi *hdmi; + struct resource *regs; + int err; + + match = of_match_node(tegra_hdmi_of_match, pdev->dev.of_node); + if (!match) + return -ENODEV; + + hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL); + if (!hdmi) + return -ENOMEM; + + hdmi->config = match->data; + hdmi->dev = &pdev->dev; + hdmi->audio_source = AUTO; + hdmi->audio_freq = 44100; + hdmi->stereo = false; + hdmi->dvi = false; + + hdmi->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(hdmi->clk)) { + dev_err(&pdev->dev, "failed to get clock\n"); + return PTR_ERR(hdmi->clk); + } + + hdmi->rst = devm_reset_control_get(&pdev->dev, "hdmi"); + if (IS_ERR(hdmi->rst)) { + dev_err(&pdev->dev, "failed to get reset\n"); + return PTR_ERR(hdmi->rst); + } + + hdmi->clk_parent = devm_clk_get(&pdev->dev, "parent"); + if (IS_ERR(hdmi->clk_parent)) + return PTR_ERR(hdmi->clk_parent); + + err = clk_set_parent(hdmi->clk, hdmi->clk_parent); + if (err < 0) { + dev_err(&pdev->dev, "failed to setup clocks: %d\n", err); + return err; + } + + hdmi->hdmi = devm_regulator_get(&pdev->dev, "hdmi"); + if (IS_ERR(hdmi->hdmi)) { + dev_err(&pdev->dev, "failed to get HDMI regulator\n"); + return PTR_ERR(hdmi->hdmi); + } + + hdmi->pll = devm_regulator_get(&pdev->dev, "pll"); + if (IS_ERR(hdmi->pll)) { + dev_err(&pdev->dev, "failed to get PLL regulator\n"); + return PTR_ERR(hdmi->pll); + } + + hdmi->vdd = devm_regulator_get(&pdev->dev, "vdd"); + if (IS_ERR(hdmi->vdd)) { + dev_err(&pdev->dev, "failed to get VDD regulator\n"); + return PTR_ERR(hdmi->vdd); + } + + hdmi->output.dev = &pdev->dev; + + err = tegra_output_probe(&hdmi->output); + if (err < 0) + return err; + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + hdmi->regs = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(hdmi->regs)) + return PTR_ERR(hdmi->regs); + + err = platform_get_irq(pdev, 0); + if (err < 0) + return err; + + hdmi->irq = err; + + INIT_LIST_HEAD(&hdmi->client.list); + hdmi->client.ops = &hdmi_client_ops; + hdmi->client.dev = &pdev->dev; + + err = host1x_client_register(&hdmi->client); + if (err < 0) { + dev_err(&pdev->dev, "failed to register host1x client: %d\n", + err); + return err; + } + + platform_set_drvdata(pdev, hdmi); + + return 0; +} + +static int tegra_hdmi_remove(struct platform_device *pdev) +{ + struct tegra_hdmi *hdmi = platform_get_drvdata(pdev); + int err; + + err = host1x_client_unregister(&hdmi->client); + if (err < 0) { + dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", + err); + return err; + } + + tegra_output_remove(&hdmi->output); + + clk_disable_unprepare(hdmi->clk_parent); + clk_disable_unprepare(hdmi->clk); + + return 0; +} + +struct platform_driver tegra_hdmi_driver = { + .driver = { + .name = "tegra-hdmi", + .owner = THIS_MODULE, + .of_match_table = tegra_hdmi_of_match, + }, + .probe = tegra_hdmi_probe, + .remove = tegra_hdmi_remove, +}; diff --git a/kernel/drivers/gpu/drm/tegra/hdmi.h b/kernel/drivers/gpu/drm/tegra/hdmi.h new file mode 100644 index 000000000..a88251438 --- /dev/null +++ b/kernel/drivers/gpu/drm/tegra/hdmi.h @@ -0,0 +1,541 @@ +/* + * Copyright (C) 2012 Avionic Design GmbH + * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef TEGRA_HDMI_H +#define TEGRA_HDMI_H 1 + +/* register definitions */ +#define HDMI_CTXSW 0x00 + +#define HDMI_NV_PDISP_SOR_STATE0 0x01 +#define SOR_STATE_UPDATE (1 << 0) + +#define HDMI_NV_PDISP_SOR_STATE1 0x02 +#define SOR_STATE_ASY_HEAD_OPMODE_AWAKE (2 << 0) +#define SOR_STATE_ASY_ORMODE_NORMAL (1 << 2) +#define SOR_STATE_ATTACHED (1 << 3) + +#define HDMI_NV_PDISP_SOR_STATE2 0x03 +#define SOR_STATE_ASY_OWNER_NONE (0 << 0) +#define SOR_STATE_ASY_OWNER_HEAD0 (1 << 0) +#define SOR_STATE_ASY_SUBOWNER_NONE (0 << 4) +#define SOR_STATE_ASY_SUBOWNER_SUBHEAD0 (1 << 4) +#define SOR_STATE_ASY_SUBOWNER_SUBHEAD1 (2 << 4) +#define SOR_STATE_ASY_SUBOWNER_BOTH (3 << 4) +#define SOR_STATE_ASY_CRCMODE_ACTIVE (0 << 6) +#define SOR_STATE_ASY_CRCMODE_COMPLETE (1 << 6) +#define SOR_STATE_ASY_CRCMODE_NON_ACTIVE (2 << 6) +#define SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A (1 << 8) +#define SOR_STATE_ASY_PROTOCOL_CUSTOM (15 << 8) +#define SOR_STATE_ASY_HSYNCPOL_POS (0 << 12) +#define SOR_STATE_ASY_HSYNCPOL_NEG (1 << 12) +#define SOR_STATE_ASY_VSYNCPOL_POS (0 << 13) +#define SOR_STATE_ASY_VSYNCPOL_NEG (1 << 13) +#define SOR_STATE_ASY_DEPOL_POS (0 << 14) +#define SOR_STATE_ASY_DEPOL_NEG (1 << 14) + +#define HDMI_NV_PDISP_RG_HDCP_AN_MSB 0x04 +#define HDMI_NV_PDISP_RG_HDCP_AN_LSB 0x05 +#define HDMI_NV_PDISP_RG_HDCP_CN_MSB 0x06 +#define HDMI_NV_PDISP_RG_HDCP_CN_LSB 0x07 +#define HDMI_NV_PDISP_RG_HDCP_AKSV_MSB 0x08 +#define HDMI_NV_PDISP_RG_HDCP_AKSV_LSB 0x09 +#define HDMI_NV_PDISP_RG_HDCP_BKSV_MSB 0x0a +#define HDMI_NV_PDISP_RG_HDCP_BKSV_LSB 0x0b +#define HDMI_NV_PDISP_RG_HDCP_CKSV_MSB 0x0c +#define HDMI_NV_PDISP_RG_HDCP_CKSV_LSB 0x0d +#define HDMI_NV_PDISP_RG_HDCP_DKSV_MSB 0x0e +#define HDMI_NV_PDISP_RG_HDCP_DKSV_LSB 0x0f +#define HDMI_NV_PDISP_RG_HDCP_CTRL 0x10 +#define HDMI_NV_PDISP_RG_HDCP_CMODE 0x11 +#define HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB 0x12 +#define HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB 0x13 +#define HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB 0x14 +#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2 0x15 +#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1 0x16 +#define HDMI_NV_PDISP_RG_HDCP_RI 0x17 +#define HDMI_NV_PDISP_RG_HDCP_CS_MSB 0x18 +#define HDMI_NV_PDISP_RG_HDCP_CS_LSB 0x19 +#define HDMI_NV_PDISP_HDMI_AUDIO_EMU0 0x1a +#define HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0 0x1b +#define HDMI_NV_PDISP_HDMI_AUDIO_EMU1 0x1c +#define HDMI_NV_PDISP_HDMI_AUDIO_EMU2 0x1d + +#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL 0x1e +#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS 0x1f +#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER 0x20 +#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW 0x21 +#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH 0x22 +#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL 0x23 +#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS 0x24 +#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER 0x25 +#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW 0x26 +#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH 0x27 +#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW 0x28 +#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH 0x29 + +#define INFOFRAME_CTRL_ENABLE (1 << 0) + +#define INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0) +#define INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8) +#define INFOFRAME_HEADER_LEN(x) (((x) & 0x0f) << 16) + +#define HDMI_NV_PDISP_HDMI_GENERIC_CTRL 0x2a +#define GENERIC_CTRL_ENABLE (1 << 0) +#define GENERIC_CTRL_OTHER (1 << 4) +#define GENERIC_CTRL_SINGLE (1 << 8) +#define GENERIC_CTRL_HBLANK (1 << 12) +#define GENERIC_CTRL_AUDIO (1 << 16) + +#define HDMI_NV_PDISP_HDMI_GENERIC_STATUS 0x2b +#define HDMI_NV_PDISP_HDMI_GENERIC_HEADER 0x2c +#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW 0x2d +#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH 0x2e +#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW 0x2f +#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH 0x30 +#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW 0x31 +#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH 0x32 +#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW 0x33 +#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH 0x34 + +#define HDMI_NV_PDISP_HDMI_ACR_CTRL 0x35 +#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW 0x36 +#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH 0x37 +#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW 0x38 +#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH 0x39 +#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW 0x3a +#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH 0x3b +#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW 0x3c +#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH 0x3d +#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW 0x3e +#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH 0x3f +#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW 0x40 +#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH 0x41 +#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW 0x42 +#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH 0x43 + +#define ACR_SUBPACK_CTS(x) (((x) & 0xffffff) << 8) +#define ACR_SUBPACK_N(x) (((x) & 0xffffff) << 0) +#define ACR_ENABLE (1 << 31) + +#define HDMI_NV_PDISP_HDMI_CTRL 0x44 +#define HDMI_CTRL_REKEY(x) (((x) & 0x7f) << 0) +#define HDMI_CTRL_MAX_AC_PACKET(x) (((x) & 0x1f) << 16) +#define HDMI_CTRL_ENABLE (1 << 30) + +#define HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT 0x45 +#define HDMI_NV_PDISP_HDMI_VSYNC_WINDOW 0x46 +#define VSYNC_WINDOW_END(x) (((x) & 0x3ff) << 0) +#define VSYNC_WINDOW_START(x) (((x) & 0x3ff) << 16) +#define VSYNC_WINDOW_ENABLE (1 << 31) + +#define HDMI_NV_PDISP_HDMI_GCP_CTRL 0x47 +#define HDMI_NV_PDISP_HDMI_GCP_STATUS 0x48 +#define HDMI_NV_PDISP_HDMI_GCP_SUBPACK 0x49 +#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1 0x4a +#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2 0x4b +#define HDMI_NV_PDISP_HDMI_EMU0 0x4c +#define HDMI_NV_PDISP_HDMI_EMU1 0x4d +#define HDMI_NV_PDISP_HDMI_EMU1_RDATA 0x4e + +#define HDMI_NV_PDISP_HDMI_SPARE 0x4f +#define SPARE_HW_CTS (1 << 0) +#define SPARE_FORCE_SW_CTS (1 << 1) +#define SPARE_CTS_RESET_VAL(x) (((x) & 0x7) << 16) + +#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1 0x50 +#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2 0x51 +#define HDMI_NV_PDISP_HDMI_HDCPRIF_ROM_CTRL 0x53 +#define HDMI_NV_PDISP_SOR_CAP 0x54 +#define HDMI_NV_PDISP_SOR_PWR 0x55 +#define SOR_PWR_NORMAL_STATE_PD (0 << 0) +#define SOR_PWR_NORMAL_STATE_PU (1 << 0) +#define SOR_PWR_NORMAL_START_NORMAL (0 << 1) +#define SOR_PWR_NORMAL_START_ALT (1 << 1) +#define SOR_PWR_SAFE_STATE_PD (0 << 16) +#define SOR_PWR_SAFE_STATE_PU (1 << 16) +#define SOR_PWR_SETTING_NEW_DONE (0 << 31) +#define SOR_PWR_SETTING_NEW_PENDING (1 << 31) +#define SOR_PWR_SETTING_NEW_TRIGGER (1 << 31) + +#define HDMI_NV_PDISP_SOR_TEST 0x56 +#define HDMI_NV_PDISP_SOR_PLL0 0x57 +#define SOR_PLL_PWR (1 << 0) +#define SOR_PLL_PDBG (1 << 1) +#define SOR_PLL_VCAPD (1 << 2) +#define SOR_PLL_PDPORT (1 << 3) +#define SOR_PLL_RESISTORSEL (1 << 4) +#define SOR_PLL_PULLDOWN (1 << 5) +#define SOR_PLL_VCOCAP(x) (((x) & 0xf) << 8) +#define SOR_PLL_BG_V17_S(x) (((x) & 0xf) << 12) +#define SOR_PLL_FILTER(x) (((x) & 0xf) << 16) +#define SOR_PLL_ICHPMP(x) (((x) & 0xf) << 24) +#define SOR_PLL_TX_REG_LOAD(x) (((x) & 0xf) << 28) + +#define HDMI_NV_PDISP_SOR_PLL1 0x58 +#define SOR_PLL_TMDS_TERM_ENABLE (1 << 8) +#define SOR_PLL_TMDS_TERMADJ(x) (((x) & 0xf) << 9) +#define SOR_PLL_LOADADJ(x) (((x) & 0xf) << 20) +#define SOR_PLL_PE_EN (1 << 28) +#define SOR_PLL_HALF_FULL_PE (1 << 29) +#define SOR_PLL_S_D_PIN_PE (1 << 30) + +#define HDMI_NV_PDISP_SOR_PLL2 0x59 + +#define HDMI_NV_PDISP_SOR_CSTM 0x5a +#define SOR_CSTM_ROTCLK(x) (((x) & 0xf) << 24) +#define SOR_CSTM_PLLDIV (1 << 21) +#define SOR_CSTM_LVDS_ENABLE (1 << 16) +#define SOR_CSTM_MODE_LVDS (0 << 12) +#define SOR_CSTM_MODE_TMDS (1 << 12) +#define SOR_CSTM_MODE_MASK (3 << 12) + +#define HDMI_NV_PDISP_SOR_LVDS 0x5b +#define HDMI_NV_PDISP_SOR_CRCA 0x5c +#define HDMI_NV_PDISP_SOR_CRCB 0x5d +#define HDMI_NV_PDISP_SOR_BLANK 0x5e +#define HDMI_NV_PDISP_SOR_SEQ_CTL 0x5f +#define SOR_SEQ_PU_PC(x) (((x) & 0xf) << 0) +#define SOR_SEQ_PU_PC_ALT(x) (((x) & 0xf) << 4) +#define SOR_SEQ_PD_PC(x) (((x) & 0xf) << 8) +#define SOR_SEQ_PD_PC_ALT(x) (((x) & 0xf) << 12) +#define SOR_SEQ_PC(x) (((x) & 0xf) << 16) +#define SOR_SEQ_STATUS (1 << 28) +#define SOR_SEQ_SWITCH (1 << 30) + +#define HDMI_NV_PDISP_SOR_SEQ_INST(x) (0x60 + (x)) + +#define SOR_SEQ_INST_WAIT_TIME(x) (((x) & 0x3ff) << 0) +#define SOR_SEQ_INST_WAIT_UNITS_VSYNC (2 << 12) +#define SOR_SEQ_INST_HALT (1 << 15) +#define SOR_SEQ_INST_PIN_A_LOW (0 << 21) +#define SOR_SEQ_INST_PIN_A_HIGH (1 << 21) +#define SOR_SEQ_INST_PIN_B_LOW (0 << 22) +#define SOR_SEQ_INST_PIN_B_HIGH (1 << 22) +#define SOR_SEQ_INST_DRIVE_PWM_OUT_LO (1 << 23) + +#define HDMI_NV_PDISP_SOR_VCRCA0 0x72 +#define HDMI_NV_PDISP_SOR_VCRCA1 0x73 +#define HDMI_NV_PDISP_SOR_CCRCA0 0x74 +#define HDMI_NV_PDISP_SOR_CCRCA1 0x75 +#define HDMI_NV_PDISP_SOR_EDATAA0 0x76 +#define HDMI_NV_PDISP_SOR_EDATAA1 0x77 +#define HDMI_NV_PDISP_SOR_COUNTA0 0x78 +#define HDMI_NV_PDISP_SOR_COUNTA1 0x79 +#define HDMI_NV_PDISP_SOR_DEBUGA0 0x7a +#define HDMI_NV_PDISP_SOR_DEBUGA1 0x7b +#define HDMI_NV_PDISP_SOR_TRIG 0x7c +#define HDMI_NV_PDISP_SOR_MSCHECK 0x7d + +#define HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT 0x7e +#define DRIVE_CURRENT_LANE0(x) (((x) & 0x3f) << 0) +#define DRIVE_CURRENT_LANE1(x) (((x) & 0x3f) << 8) +#define DRIVE_CURRENT_LANE2(x) (((x) & 0x3f) << 16) +#define DRIVE_CURRENT_LANE3(x) (((x) & 0x3f) << 24) +#define DRIVE_CURRENT_LANE0_T114(x) (((x) & 0x7f) << 0) +#define DRIVE_CURRENT_LANE1_T114(x) (((x) & 0x7f) << 8) +#define DRIVE_CURRENT_LANE2_T114(x) (((x) & 0x7f) << 16) +#define DRIVE_CURRENT_LANE3_T114(x) (((x) & 0x7f) << 24) + +#define DRIVE_CURRENT_1_500_mA 0x00 +#define DRIVE_CURRENT_1_875_mA 0x01 +#define DRIVE_CURRENT_2_250_mA 0x02 +#define DRIVE_CURRENT_2_625_mA 0x03 +#define DRIVE_CURRENT_3_000_mA 0x04 +#define DRIVE_CURRENT_3_375_mA 0x05 +#define DRIVE_CURRENT_3_750_mA 0x06 +#define DRIVE_CURRENT_4_125_mA 0x07 +#define DRIVE_CURRENT_4_500_mA 0x08 +#define DRIVE_CURRENT_4_875_mA 0x09 +#define DRIVE_CURRENT_5_250_mA 0x0a +#define DRIVE_CURRENT_5_625_mA 0x0b +#define DRIVE_CURRENT_6_000_mA 0x0c +#define DRIVE_CURRENT_6_375_mA 0x0d +#define DRIVE_CURRENT_6_750_mA 0x0e +#define DRIVE_CURRENT_7_125_mA 0x0f +#define DRIVE_CURRENT_7_500_mA 0x10 +#define DRIVE_CURRENT_7_875_mA 0x11 +#define DRIVE_CURRENT_8_250_mA 0x12 +#define DRIVE_CURRENT_8_625_mA 0x13 +#define DRIVE_CURRENT_9_000_mA 0x14 +#define DRIVE_CURRENT_9_375_mA 0x15 +#define DRIVE_CURRENT_9_750_mA 0x16 +#define DRIVE_CURRENT_10_125_mA 0x17 +#define DRIVE_CURRENT_10_500_mA 0x18 +#define DRIVE_CURRENT_10_875_mA 0x19 +#define DRIVE_CURRENT_11_250_mA 0x1a +#define DRIVE_CURRENT_11_625_mA 0x1b +#define DRIVE_CURRENT_12_000_mA 0x1c +#define DRIVE_CURRENT_12_375_mA 0x1d +#define DRIVE_CURRENT_12_750_mA 0x1e +#define DRIVE_CURRENT_13_125_mA 0x1f +#define DRIVE_CURRENT_13_500_mA 0x20 +#define DRIVE_CURRENT_13_875_mA 0x21 +#define DRIVE_CURRENT_14_250_mA 0x22 +#define DRIVE_CURRENT_14_625_mA 0x23 +#define DRIVE_CURRENT_15_000_mA 0x24 +#define DRIVE_CURRENT_15_375_mA 0x25 +#define DRIVE_CURRENT_15_750_mA 0x26 +#define DRIVE_CURRENT_16_125_mA 0x27 +#define DRIVE_CURRENT_16_500_mA 0x28 +#define DRIVE_CURRENT_16_875_mA 0x29 +#define DRIVE_CURRENT_17_250_mA 0x2a +#define DRIVE_CURRENT_17_625_mA 0x2b +#define DRIVE_CURRENT_18_000_mA 0x2c +#define DRIVE_CURRENT_18_375_mA 0x2d +#define DRIVE_CURRENT_18_750_mA 0x2e +#define DRIVE_CURRENT_19_125_mA 0x2f +#define DRIVE_CURRENT_19_500_mA 0x30 +#define DRIVE_CURRENT_19_875_mA 0x31 +#define DRIVE_CURRENT_20_250_mA 0x32 +#define DRIVE_CURRENT_20_625_mA 0x33 +#define DRIVE_CURRENT_21_000_mA 0x34 +#define DRIVE_CURRENT_21_375_mA 0x35 +#define DRIVE_CURRENT_21_750_mA 0x36 +#define DRIVE_CURRENT_22_125_mA 0x37 +#define DRIVE_CURRENT_22_500_mA 0x38 +#define DRIVE_CURRENT_22_875_mA 0x39 +#define DRIVE_CURRENT_23_250_mA 0x3a +#define DRIVE_CURRENT_23_625_mA 0x3b +#define DRIVE_CURRENT_24_000_mA 0x3c +#define DRIVE_CURRENT_24_375_mA 0x3d +#define DRIVE_CURRENT_24_750_mA 0x3e + +#define DRIVE_CURRENT_0_000_mA_T114 0x00 +#define DRIVE_CURRENT_0_400_mA_T114 0x01 +#define DRIVE_CURRENT_0_800_mA_T114 0x02 +#define DRIVE_CURRENT_1_200_mA_T114 0x03 +#define DRIVE_CURRENT_1_600_mA_T114 0x04 +#define DRIVE_CURRENT_2_000_mA_T114 0x05 +#define DRIVE_CURRENT_2_400_mA_T114 0x06 +#define DRIVE_CURRENT_2_800_mA_T114 0x07 +#define DRIVE_CURRENT_3_200_mA_T114 0x08 +#define DRIVE_CURRENT_3_600_mA_T114 0x09 +#define DRIVE_CURRENT_4_000_mA_T114 0x0a +#define DRIVE_CURRENT_4_400_mA_T114 0x0b +#define DRIVE_CURRENT_4_800_mA_T114 0x0c +#define DRIVE_CURRENT_5_200_mA_T114 0x0d +#define DRIVE_CURRENT_5_600_mA_T114 0x0e +#define DRIVE_CURRENT_6_000_mA_T114 0x0f +#define DRIVE_CURRENT_6_400_mA_T114 0x10 +#define DRIVE_CURRENT_6_800_mA_T114 0x11 +#define DRIVE_CURRENT_7_200_mA_T114 0x12 +#define DRIVE_CURRENT_7_600_mA_T114 0x13 +#define DRIVE_CURRENT_8_000_mA_T114 0x14 +#define DRIVE_CURRENT_8_400_mA_T114 0x15 +#define DRIVE_CURRENT_8_800_mA_T114 0x16 +#define DRIVE_CURRENT_9_200_mA_T114 0x17 +#define DRIVE_CURRENT_9_600_mA_T114 0x18 +#define DRIVE_CURRENT_10_000_mA_T114 0x19 +#define DRIVE_CURRENT_10_400_mA_T114 0x1a +#define DRIVE_CURRENT_10_800_mA_T114 0x1b +#define DRIVE_CURRENT_11_200_mA_T114 0x1c +#define DRIVE_CURRENT_11_600_mA_T114 0x1d +#define DRIVE_CURRENT_12_000_mA_T114 0x1e +#define DRIVE_CURRENT_12_400_mA_T114 0x1f +#define DRIVE_CURRENT_12_800_mA_T114 0x20 +#define DRIVE_CURRENT_13_200_mA_T114 0x21 +#define DRIVE_CURRENT_13_600_mA_T114 0x22 +#define DRIVE_CURRENT_14_000_mA_T114 0x23 +#define DRIVE_CURRENT_14_400_mA_T114 0x24 +#define DRIVE_CURRENT_14_800_mA_T114 0x25 +#define DRIVE_CURRENT_15_200_mA_T114 0x26 +#define DRIVE_CURRENT_15_600_mA_T114 0x27 +#define DRIVE_CURRENT_16_000_mA_T114 0x28 +#define DRIVE_CURRENT_16_400_mA_T114 0x29 +#define DRIVE_CURRENT_16_800_mA_T114 0x2a +#define DRIVE_CURRENT_17_200_mA_T114 0x2b +#define DRIVE_CURRENT_17_600_mA_T114 0x2c +#define DRIVE_CURRENT_18_000_mA_T114 0x2d +#define DRIVE_CURRENT_18_400_mA_T114 0x2e +#define DRIVE_CURRENT_18_800_mA_T114 0x2f +#define DRIVE_CURRENT_19_200_mA_T114 0x30 +#define DRIVE_CURRENT_19_600_mA_T114 0x31 +#define DRIVE_CURRENT_20_000_mA_T114 0x32 +#define DRIVE_CURRENT_20_400_mA_T114 0x33 +#define DRIVE_CURRENT_20_800_mA_T114 0x34 +#define DRIVE_CURRENT_21_200_mA_T114 0x35 +#define DRIVE_CURRENT_21_600_mA_T114 0x36 +#define DRIVE_CURRENT_22_000_mA_T114 0x37 +#define DRIVE_CURRENT_22_400_mA_T114 0x38 +#define DRIVE_CURRENT_22_800_mA_T114 0x39 +#define DRIVE_CURRENT_23_200_mA_T114 0x3a +#define DRIVE_CURRENT_23_600_mA_T114 0x3b +#define DRIVE_CURRENT_24_000_mA_T114 0x3c +#define DRIVE_CURRENT_24_400_mA_T114 0x3d +#define DRIVE_CURRENT_24_800_mA_T114 0x3e +#define DRIVE_CURRENT_25_200_mA_T114 0x3f +#define DRIVE_CURRENT_25_400_mA_T114 0x40 +#define DRIVE_CURRENT_25_800_mA_T114 0x41 +#define DRIVE_CURRENT_26_200_mA_T114 0x42 +#define DRIVE_CURRENT_26_600_mA_T114 0x43 +#define DRIVE_CURRENT_27_000_mA_T114 0x44 +#define DRIVE_CURRENT_27_400_mA_T114 0x45 +#define DRIVE_CURRENT_27_800_mA_T114 0x46 +#define DRIVE_CURRENT_28_200_mA_T114 0x47 + +#define HDMI_NV_PDISP_AUDIO_DEBUG0 0x7f +#define HDMI_NV_PDISP_AUDIO_DEBUG1 0x80 +#define HDMI_NV_PDISP_AUDIO_DEBUG2 0x81 + +#define HDMI_NV_PDISP_AUDIO_FS(x) (0x82 + (x)) +#define AUDIO_FS_LOW(x) (((x) & 0xfff) << 0) +#define AUDIO_FS_HIGH(x) (((x) & 0xfff) << 16) + +#define HDMI_NV_PDISP_AUDIO_PULSE_WIDTH 0x89 +#define HDMI_NV_PDISP_AUDIO_THRESHOLD 0x8a +#define HDMI_NV_PDISP_AUDIO_CNTRL0 0x8b +#define AUDIO_CNTRL0_ERROR_TOLERANCE(x) (((x) & 0xff) << 0) +#define AUDIO_CNTRL0_SOURCE_SELECT_AUTO (0 << 20) +#define AUDIO_CNTRL0_SOURCE_SELECT_SPDIF (1 << 20) +#define AUDIO_CNTRL0_SOURCE_SELECT_HDAL (2 << 20) +#define AUDIO_CNTRL0_FRAMES_PER_BLOCK(x) (((x) & 0xff) << 24) + +#define HDMI_NV_PDISP_AUDIO_N 0x8c +#define AUDIO_N_VALUE(x) (((x) & 0xfffff) << 0) +#define AUDIO_N_RESETF (1 << 20) +#define AUDIO_N_GENERATE_NORMAL (0 << 24) +#define AUDIO_N_GENERATE_ALTERNATE (1 << 24) + +#define HDMI_NV_PDISP_HDCPRIF_ROM_TIMING 0x94 +#define HDMI_NV_PDISP_SOR_REFCLK 0x95 +#define SOR_REFCLK_DIV_INT(x) (((x) & 0xff) << 8) +#define SOR_REFCLK_DIV_FRAC(x) (((x) & 0x03) << 6) + +#define HDMI_NV_PDISP_CRC_CONTROL 0x96 +#define HDMI_NV_PDISP_INPUT_CONTROL 0x97 +#define HDMI_SRC_DISPLAYA (0 << 0) +#define HDMI_SRC_DISPLAYB (1 << 0) +#define ARM_VIDEO_RANGE_FULL (0 << 1) +#define ARM_VIDEO_RANGE_LIMITED (1 << 1) + +#define HDMI_NV_PDISP_SCRATCH 0x98 +#define HDMI_NV_PDISP_PE_CURRENT 0x99 +#define PE_CURRENT0(x) (((x) & 0xf) << 0) +#define PE_CURRENT1(x) (((x) & 0xf) << 8) +#define PE_CURRENT2(x) (((x) & 0xf) << 16) +#define PE_CURRENT3(x) (((x) & 0xf) << 24) + +#define PE_CURRENT_0_0_mA 0x0 +#define PE_CURRENT_0_5_mA 0x1 +#define PE_CURRENT_1_0_mA 0x2 +#define PE_CURRENT_1_5_mA 0x3 +#define PE_CURRENT_2_0_mA 0x4 +#define PE_CURRENT_2_5_mA 0x5 +#define PE_CURRENT_3_0_mA 0x6 +#define PE_CURRENT_3_5_mA 0x7 +#define PE_CURRENT_4_0_mA 0x8 +#define PE_CURRENT_4_5_mA 0x9 +#define PE_CURRENT_5_0_mA 0xa +#define PE_CURRENT_5_5_mA 0xb +#define PE_CURRENT_6_0_mA 0xc +#define PE_CURRENT_6_5_mA 0xd +#define PE_CURRENT_7_0_mA 0xe +#define PE_CURRENT_7_5_mA 0xf + +#define PE_CURRENT_0_mA_T114 0x0 +#define PE_CURRENT_1_mA_T114 0x1 +#define PE_CURRENT_2_mA_T114 0x2 +#define PE_CURRENT_3_mA_T114 0x3 +#define PE_CURRENT_4_mA_T114 0x4 +#define PE_CURRENT_5_mA_T114 0x5 +#define PE_CURRENT_6_mA_T114 0x6 +#define PE_CURRENT_7_mA_T114 0x7 +#define PE_CURRENT_8_mA_T114 0x8 +#define PE_CURRENT_9_mA_T114 0x9 +#define PE_CURRENT_10_mA_T114 0xa +#define PE_CURRENT_11_mA_T114 0xb +#define PE_CURRENT_12_mA_T114 0xc +#define PE_CURRENT_13_mA_T114 0xd +#define PE_CURRENT_14_mA_T114 0xe +#define PE_CURRENT_15_mA_T114 0xf + +#define HDMI_NV_PDISP_KEY_CTRL 0x9a +#define HDMI_NV_PDISP_KEY_DEBUG0 0x9b +#define HDMI_NV_PDISP_KEY_DEBUG1 0x9c +#define HDMI_NV_PDISP_KEY_DEBUG2 0x9d +#define HDMI_NV_PDISP_KEY_HDCP_KEY_0 0x9e +#define HDMI_NV_PDISP_KEY_HDCP_KEY_1 0x9f +#define HDMI_NV_PDISP_KEY_HDCP_KEY_2 0xa0 +#define HDMI_NV_PDISP_KEY_HDCP_KEY_3 0xa1 +#define HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG 0xa2 +#define HDMI_NV_PDISP_KEY_SKEY_INDEX 0xa3 + +#define HDMI_NV_PDISP_SOR_AUDIO_CNTRL0 0xac +#define AUDIO_CNTRL0_INJECT_NULLSMPL (1 << 29) +#define HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR 0xbc +#define HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE 0xbd + +#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320 0xbf +#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441 0xc0 +#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882 0xc1 +#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764 0xc2 +#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480 0xc3 +#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960 0xc4 +#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920 0xc5 +#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT 0xc5 + +#define HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT 0xd1 +#define PEAK_CURRENT_LANE0(x) (((x) & 0x7f) << 0) +#define PEAK_CURRENT_LANE1(x) (((x) & 0x7f) << 8) +#define PEAK_CURRENT_LANE2(x) (((x) & 0x7f) << 16) +#define PEAK_CURRENT_LANE3(x) (((x) & 0x7f) << 24) + +#define PEAK_CURRENT_0_000_mA 0x00 +#define PEAK_CURRENT_0_200_mA 0x01 +#define PEAK_CURRENT_0_400_mA 0x02 +#define PEAK_CURRENT_0_600_mA 0x03 +#define PEAK_CURRENT_0_800_mA 0x04 +#define PEAK_CURRENT_1_000_mA 0x05 +#define PEAK_CURRENT_1_200_mA 0x06 +#define PEAK_CURRENT_1_400_mA 0x07 +#define PEAK_CURRENT_1_600_mA 0x08 +#define PEAK_CURRENT_1_800_mA 0x09 +#define PEAK_CURRENT_2_000_mA 0x0a +#define PEAK_CURRENT_2_200_mA 0x0b +#define PEAK_CURRENT_2_400_mA 0x0c +#define PEAK_CURRENT_2_600_mA 0x0d +#define PEAK_CURRENT_2_800_mA 0x0e +#define PEAK_CURRENT_3_000_mA 0x0f +#define PEAK_CURRENT_3_200_mA 0x10 +#define PEAK_CURRENT_3_400_mA 0x11 +#define PEAK_CURRENT_3_600_mA 0x12 +#define PEAK_CURRENT_3_800_mA 0x13 +#define PEAK_CURRENT_4_000_mA 0x14 +#define PEAK_CURRENT_4_200_mA 0x15 +#define PEAK_CURRENT_4_400_mA 0x16 +#define PEAK_CURRENT_4_600_mA 0x17 +#define PEAK_CURRENT_4_800_mA 0x18 +#define PEAK_CURRENT_5_000_mA 0x19 +#define PEAK_CURRENT_5_200_mA 0x1a +#define PEAK_CURRENT_5_400_mA 0x1b +#define PEAK_CURRENT_5_600_mA 0x1c +#define PEAK_CURRENT_5_800_mA 0x1d +#define PEAK_CURRENT_6_000_mA 0x1e +#define PEAK_CURRENT_6_200_mA 0x1f +#define PEAK_CURRENT_6_400_mA 0x20 +#define PEAK_CURRENT_6_600_mA 0x21 +#define PEAK_CURRENT_6_800_mA 0x22 +#define PEAK_CURRENT_7_000_mA 0x23 +#define PEAK_CURRENT_7_200_mA 0x24 +#define PEAK_CURRENT_7_400_mA 0x25 +#define PEAK_CURRENT_7_600_mA 0x26 +#define PEAK_CURRENT_7_800_mA 0x27 +#define PEAK_CURRENT_8_000_mA 0x28 +#define PEAK_CURRENT_8_200_mA 0x29 +#define PEAK_CURRENT_8_400_mA 0x2a +#define PEAK_CURRENT_8_600_mA 0x2b +#define PEAK_CURRENT_8_800_mA 0x2c +#define PEAK_CURRENT_9_000_mA 0x2d +#define PEAK_CURRENT_9_200_mA 0x2e +#define PEAK_CURRENT_9_400_mA 0x2f + +#define HDMI_NV_PDISP_SOR_PAD_CTLS0 0xd2 + +#endif /* TEGRA_HDMI_H */ diff --git a/kernel/drivers/gpu/drm/tegra/mipi-phy.c b/kernel/drivers/gpu/drm/tegra/mipi-phy.c new file mode 100644 index 000000000..ba2ae6511 --- /dev/null +++ b/kernel/drivers/gpu/drm/tegra/mipi-phy.c @@ -0,0 +1,137 @@ +/* + * Copyright (C) 2013 NVIDIA Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/errno.h> +#include <linux/kernel.h> + +#include "mipi-phy.h" + +/* + * Default D-PHY timings based on MIPI D-PHY specification. Derived from the + * valid ranges specified in Section 6.9, Table 14, Page 40 of the D-PHY + * specification (v1.2) with minor adjustments. + */ +int mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing, + unsigned long period) +{ + timing->clkmiss = 0; + timing->clkpost = 70 + 52 * period; + timing->clkpre = 8; + timing->clkprepare = 65; + timing->clksettle = 95; + timing->clktermen = 0; + timing->clktrail = 80; + timing->clkzero = 260; + timing->dtermen = 0; + timing->eot = 0; + timing->hsexit = 120; + timing->hsprepare = 65 + 5 * period; + timing->hszero = 145 + 5 * period; + timing->hssettle = 85 + 6 * period; + timing->hsskip = 40; + + /* + * The MIPI D-PHY specification (Section 6.9, v1.2, Table 14, Page 40) + * contains this formula as: + * + * T_HS-TRAIL = max(n * 8 * period, 60 + n * 4 * period) + * + * where n = 1 for forward-direction HS mode and n = 4 for reverse- + * direction HS mode. There's only one setting and this function does + * not parameterize on anything other that period, so this code will + * assumes that reverse-direction HS mode is supported and uses n = 4. + */ + timing->hstrail = max(4 * 8 * period, 60 + 4 * 4 * period); + + timing->init = 100000; + timing->lpx = 60; + timing->taget = 5 * timing->lpx; + timing->tago = 4 * timing->lpx; + timing->tasure = 2 * timing->lpx; + timing->wakeup = 1000000; + + return 0; +} + +/* + * Validate D-PHY timing according to MIPI D-PHY specification (v1.2, Section + * Section 6.9 "Global Operation Timing Parameters"). + */ +int mipi_dphy_timing_validate(struct mipi_dphy_timing *timing, + unsigned long period) +{ + if (timing->clkmiss > 60) + return -EINVAL; + + if (timing->clkpost < (60 + 52 * period)) + return -EINVAL; + + if (timing->clkpre < 8) + return -EINVAL; + + if (timing->clkprepare < 38 || timing->clkprepare > 95) + return -EINVAL; + + if (timing->clksettle < 95 || timing->clksettle > 300) + return -EINVAL; + + if (timing->clktermen > 38) + return -EINVAL; + + if (timing->clktrail < 60) + return -EINVAL; + + if (timing->clkprepare + timing->clkzero < 300) + return -EINVAL; + + if (timing->dtermen > 35 + 4 * period) + return -EINVAL; + + if (timing->eot > 105 + 12 * period) + return -EINVAL; + + if (timing->hsexit < 100) + return -EINVAL; + + if (timing->hsprepare < 40 + 4 * period || + timing->hsprepare > 85 + 6 * period) + return -EINVAL; + + if (timing->hsprepare + timing->hszero < 145 + 10 * period) + return -EINVAL; + + if ((timing->hssettle < 85 + 6 * period) || + (timing->hssettle > 145 + 10 * period)) + return -EINVAL; + + if (timing->hsskip < 40 || timing->hsskip > 55 + 4 * period) + return -EINVAL; + + if (timing->hstrail < max(8 * period, 60 + 4 * period)) + return -EINVAL; + + if (timing->init < 100000) + return -EINVAL; + + if (timing->lpx < 50) + return -EINVAL; + + if (timing->taget != 5 * timing->lpx) + return -EINVAL; + + if (timing->tago != 4 * timing->lpx) + return -EINVAL; + + if (timing->tasure < timing->lpx || timing->tasure > 2 * timing->lpx) + return -EINVAL; + + if (timing->wakeup < 1000000) + return -EINVAL; + + return 0; +} diff --git a/kernel/drivers/gpu/drm/tegra/mipi-phy.h b/kernel/drivers/gpu/drm/tegra/mipi-phy.h new file mode 100644 index 000000000..012ea8ac3 --- /dev/null +++ b/kernel/drivers/gpu/drm/tegra/mipi-phy.h @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2013 NVIDIA Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef DRM_TEGRA_MIPI_PHY_H +#define DRM_TEGRA_MIPI_PHY_H + +/* + * D-PHY timing parameters + * + * A detailed description of these parameters can be found in the MIPI + * Alliance Specification for D-PHY, Section 5.9 "Global Operation Timing + * Parameters". + * + * All parameters are specified in nanoseconds. + */ +struct mipi_dphy_timing { + unsigned int clkmiss; + unsigned int clkpost; + unsigned int clkpre; + unsigned int clkprepare; + unsigned int clksettle; + unsigned int clktermen; + unsigned int clktrail; + unsigned int clkzero; + unsigned int dtermen; + unsigned int eot; + unsigned int hsexit; + unsigned int hsprepare; + unsigned int hszero; + unsigned int hssettle; + unsigned int hsskip; + unsigned int hstrail; + unsigned int init; + unsigned int lpx; + unsigned int taget; + unsigned int tago; + unsigned int tasure; + unsigned int wakeup; +}; + +int mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing, + unsigned long period); +int mipi_dphy_timing_validate(struct mipi_dphy_timing *timing, + unsigned long period); + +#endif diff --git a/kernel/drivers/gpu/drm/tegra/output.c b/kernel/drivers/gpu/drm/tegra/output.c new file mode 100644 index 000000000..37db47975 --- /dev/null +++ b/kernel/drivers/gpu/drm/tegra/output.c @@ -0,0 +1,218 @@ +/* + * Copyright (C) 2012 Avionic Design GmbH + * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/of_gpio.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_panel.h> +#include "drm.h" + +int tegra_output_connector_get_modes(struct drm_connector *connector) +{ + struct tegra_output *output = connector_to_output(connector); + struct edid *edid = NULL; + int err = 0; + + /* + * If the panel provides one or more modes, use them exclusively and + * ignore any other means of obtaining a mode. + */ + if (output->panel) { + err = output->panel->funcs->get_modes(output->panel); + if (err > 0) + return err; + } + + if (output->edid) + edid = kmemdup(output->edid, sizeof(*edid), GFP_KERNEL); + else if (output->ddc) + edid = drm_get_edid(connector, output->ddc); + + drm_mode_connector_update_edid_property(connector, edid); + + if (edid) { + err = drm_add_edid_modes(connector, edid); + kfree(edid); + } + + return err; +} + +struct drm_encoder * +tegra_output_connector_best_encoder(struct drm_connector *connector) +{ + struct tegra_output *output = connector_to_output(connector); + + return &output->encoder; +} + +enum drm_connector_status +tegra_output_connector_detect(struct drm_connector *connector, bool force) +{ + struct tegra_output *output = connector_to_output(connector); + enum drm_connector_status status = connector_status_unknown; + + if (gpio_is_valid(output->hpd_gpio)) { + if (gpio_get_value(output->hpd_gpio) == 0) + status = connector_status_disconnected; + else + status = connector_status_connected; + } else { + if (!output->panel) + status = connector_status_disconnected; + else + status = connector_status_connected; + } + + return status; +} + +void tegra_output_connector_destroy(struct drm_connector *connector) +{ + drm_connector_unregister(connector); + drm_connector_cleanup(connector); +} + +void tegra_output_encoder_destroy(struct drm_encoder *encoder) +{ + drm_encoder_cleanup(encoder); +} + +static irqreturn_t hpd_irq(int irq, void *data) +{ + struct tegra_output *output = data; + + if (output->connector.dev) + drm_helper_hpd_irq_event(output->connector.dev); + + return IRQ_HANDLED; +} + +int tegra_output_probe(struct tegra_output *output) +{ + struct device_node *ddc, *panel; + enum of_gpio_flags flags; + int err, size; + + if (!output->of_node) + output->of_node = output->dev->of_node; + + panel = of_parse_phandle(output->of_node, "nvidia,panel", 0); + if (panel) { + output->panel = of_drm_find_panel(panel); + if (!output->panel) + return -EPROBE_DEFER; + + of_node_put(panel); + } + + output->edid = of_get_property(output->of_node, "nvidia,edid", &size); + + ddc = of_parse_phandle(output->of_node, "nvidia,ddc-i2c-bus", 0); + if (ddc) { + output->ddc = of_find_i2c_adapter_by_node(ddc); + if (!output->ddc) { + err = -EPROBE_DEFER; + of_node_put(ddc); + return err; + } + + of_node_put(ddc); + } + + output->hpd_gpio = of_get_named_gpio_flags(output->of_node, + "nvidia,hpd-gpio", 0, + &flags); + if (gpio_is_valid(output->hpd_gpio)) { + unsigned long flags; + + err = gpio_request_one(output->hpd_gpio, GPIOF_DIR_IN, + "HDMI hotplug detect"); + if (err < 0) { + dev_err(output->dev, "gpio_request_one(): %d\n", err); + return err; + } + + err = gpio_to_irq(output->hpd_gpio); + if (err < 0) { + dev_err(output->dev, "gpio_to_irq(): %d\n", err); + gpio_free(output->hpd_gpio); + return err; + } + + output->hpd_irq = err; + + flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | + IRQF_ONESHOT; + + err = request_threaded_irq(output->hpd_irq, NULL, hpd_irq, + flags, "hpd", output); + if (err < 0) { + dev_err(output->dev, "failed to request IRQ#%u: %d\n", + output->hpd_irq, err); + gpio_free(output->hpd_gpio); + return err; + } + + output->connector.polled = DRM_CONNECTOR_POLL_HPD; + + /* + * Disable the interrupt until the connector has been + * initialized to avoid a race in the hotplug interrupt + * handler. + */ + disable_irq(output->hpd_irq); + } + + return 0; +} + +void tegra_output_remove(struct tegra_output *output) +{ + if (gpio_is_valid(output->hpd_gpio)) { + free_irq(output->hpd_irq, output); + gpio_free(output->hpd_gpio); + } + + if (output->ddc) + put_device(&output->ddc->dev); +} + +int tegra_output_init(struct drm_device *drm, struct tegra_output *output) +{ + int err; + + if (output->panel) { + err = drm_panel_attach(output->panel, &output->connector); + if (err < 0) + return err; + } + + /* + * The connector is now registered and ready to receive hotplug events + * so the hotplug interrupt can be enabled. + */ + if (gpio_is_valid(output->hpd_gpio)) + enable_irq(output->hpd_irq); + + return 0; +} + +void tegra_output_exit(struct tegra_output *output) +{ + /* + * The connector is going away, so the interrupt must be disabled to + * prevent the hotplug interrupt handler from potentially crashing. + */ + if (gpio_is_valid(output->hpd_gpio)) + disable_irq(output->hpd_irq); + + if (output->panel) + drm_panel_detach(output->panel); +} diff --git a/kernel/drivers/gpu/drm/tegra/rgb.c b/kernel/drivers/gpu/drm/tegra/rgb.c new file mode 100644 index 000000000..7cd833f5b --- /dev/null +++ b/kernel/drivers/gpu/drm/tegra/rgb.c @@ -0,0 +1,343 @@ +/* + * Copyright (C) 2012 Avionic Design GmbH + * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/clk.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_panel.h> + +#include "drm.h" +#include "dc.h" + +struct tegra_rgb { + struct tegra_output output; + struct tegra_dc *dc; + bool enabled; + + struct clk *clk_parent; + struct clk *clk; +}; + +static inline struct tegra_rgb *to_rgb(struct tegra_output *output) +{ + return container_of(output, struct tegra_rgb, output); +} + +struct reg_entry { + unsigned long offset; + unsigned long value; +}; + +static const struct reg_entry rgb_enable[] = { + { DC_COM_PIN_OUTPUT_ENABLE(0), 0x00000000 }, + { DC_COM_PIN_OUTPUT_ENABLE(1), 0x00000000 }, + { DC_COM_PIN_OUTPUT_ENABLE(2), 0x00000000 }, + { DC_COM_PIN_OUTPUT_ENABLE(3), 0x00000000 }, + { DC_COM_PIN_OUTPUT_POLARITY(0), 0x00000000 }, + { DC_COM_PIN_OUTPUT_POLARITY(1), 0x01000000 }, + { DC_COM_PIN_OUTPUT_POLARITY(2), 0x00000000 }, + { DC_COM_PIN_OUTPUT_POLARITY(3), 0x00000000 }, + { DC_COM_PIN_OUTPUT_DATA(0), 0x00000000 }, + { DC_COM_PIN_OUTPUT_DATA(1), 0x00000000 }, + { DC_COM_PIN_OUTPUT_DATA(2), 0x00000000 }, + { DC_COM_PIN_OUTPUT_DATA(3), 0x00000000 }, + { DC_COM_PIN_OUTPUT_SELECT(0), 0x00000000 }, + { DC_COM_PIN_OUTPUT_SELECT(1), 0x00000000 }, + { DC_COM_PIN_OUTPUT_SELECT(2), 0x00000000 }, + { DC_COM_PIN_OUTPUT_SELECT(3), 0x00000000 }, + { DC_COM_PIN_OUTPUT_SELECT(4), 0x00210222 }, + { DC_COM_PIN_OUTPUT_SELECT(5), 0x00002200 }, + { DC_COM_PIN_OUTPUT_SELECT(6), 0x00020000 }, +}; + +static const struct reg_entry rgb_disable[] = { + { DC_COM_PIN_OUTPUT_SELECT(6), 0x00000000 }, + { DC_COM_PIN_OUTPUT_SELECT(5), 0x00000000 }, + { DC_COM_PIN_OUTPUT_SELECT(4), 0x00000000 }, + { DC_COM_PIN_OUTPUT_SELECT(3), 0x00000000 }, + { DC_COM_PIN_OUTPUT_SELECT(2), 0x00000000 }, + { DC_COM_PIN_OUTPUT_SELECT(1), 0x00000000 }, + { DC_COM_PIN_OUTPUT_SELECT(0), 0x00000000 }, + { DC_COM_PIN_OUTPUT_DATA(3), 0xaaaaaaaa }, + { DC_COM_PIN_OUTPUT_DATA(2), 0xaaaaaaaa }, + { DC_COM_PIN_OUTPUT_DATA(1), 0xaaaaaaaa }, + { DC_COM_PIN_OUTPUT_DATA(0), 0xaaaaaaaa }, + { DC_COM_PIN_OUTPUT_POLARITY(3), 0x00000000 }, + { DC_COM_PIN_OUTPUT_POLARITY(2), 0x00000000 }, + { DC_COM_PIN_OUTPUT_POLARITY(1), 0x00000000 }, + { DC_COM_PIN_OUTPUT_POLARITY(0), 0x00000000 }, + { DC_COM_PIN_OUTPUT_ENABLE(3), 0x55555555 }, + { DC_COM_PIN_OUTPUT_ENABLE(2), 0x55555555 }, + { DC_COM_PIN_OUTPUT_ENABLE(1), 0x55150005 }, + { DC_COM_PIN_OUTPUT_ENABLE(0), 0x55555555 }, +}; + +static void tegra_dc_write_regs(struct tegra_dc *dc, + const struct reg_entry *table, + unsigned int num) +{ + unsigned int i; + + for (i = 0; i < num; i++) + tegra_dc_writel(dc, table[i].value, table[i].offset); +} + +static void tegra_rgb_connector_dpms(struct drm_connector *connector, + int mode) +{ +} + +static const struct drm_connector_funcs tegra_rgb_connector_funcs = { + .dpms = tegra_rgb_connector_dpms, + .reset = drm_atomic_helper_connector_reset, + .detect = tegra_output_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = tegra_output_connector_destroy, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static enum drm_mode_status +tegra_rgb_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + /* + * FIXME: For now, always assume that the mode is okay. There are + * unresolved issues with clk_round_rate(), which doesn't always + * reliably report whether a frequency can be set or not. + */ + return MODE_OK; +} + +static const struct drm_connector_helper_funcs tegra_rgb_connector_helper_funcs = { + .get_modes = tegra_output_connector_get_modes, + .mode_valid = tegra_rgb_connector_mode_valid, + .best_encoder = tegra_output_connector_best_encoder, +}; + +static const struct drm_encoder_funcs tegra_rgb_encoder_funcs = { + .destroy = tegra_output_encoder_destroy, +}; + +static void tegra_rgb_encoder_dpms(struct drm_encoder *encoder, int mode) +{ +} + +static void tegra_rgb_encoder_prepare(struct drm_encoder *encoder) +{ +} + +static void tegra_rgb_encoder_commit(struct drm_encoder *encoder) +{ +} + +static void tegra_rgb_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted) +{ + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_rgb *rgb = to_rgb(output); + u32 value; + + if (output->panel) + drm_panel_prepare(output->panel); + + tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable)); + + value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL; + tegra_dc_writel(rgb->dc, value, DC_DISP_DATA_ENABLE_OPTIONS); + + /* XXX: parameterize? */ + value = tegra_dc_readl(rgb->dc, DC_COM_PIN_OUTPUT_POLARITY(1)); + value &= ~LVS_OUTPUT_POLARITY_LOW; + value &= ~LHS_OUTPUT_POLARITY_LOW; + tegra_dc_writel(rgb->dc, value, DC_COM_PIN_OUTPUT_POLARITY(1)); + + /* XXX: parameterize? */ + value = DISP_DATA_FORMAT_DF1P1C | DISP_ALIGNMENT_MSB | + DISP_ORDER_RED_BLUE; + tegra_dc_writel(rgb->dc, value, DC_DISP_DISP_INTERFACE_CONTROL); + + /* XXX: parameterize? */ + value = SC0_H_QUALIFIER_NONE | SC1_H_QUALIFIER_NONE; + tegra_dc_writel(rgb->dc, value, DC_DISP_SHIFT_CLOCK_OPTIONS); + + tegra_dc_commit(rgb->dc); + + if (output->panel) + drm_panel_enable(output->panel); +} + +static void tegra_rgb_encoder_disable(struct drm_encoder *encoder) +{ + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_rgb *rgb = to_rgb(output); + + if (output->panel) + drm_panel_disable(output->panel); + + tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable)); + tegra_dc_commit(rgb->dc); + + if (output->panel) + drm_panel_unprepare(output->panel); +} + +static int +tegra_rgb_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(conn_state->crtc); + unsigned long pclk = crtc_state->mode.clock * 1000; + struct tegra_rgb *rgb = to_rgb(output); + unsigned int div; + int err; + + /* + * We may not want to change the frequency of the parent clock, since + * it may be a parent for other peripherals. This is due to the fact + * that on Tegra20 there's only a single clock dedicated to display + * (pll_d_out0), whereas later generations have a second one that can + * be used to independently drive a second output (pll_d2_out0). + * + * As a way to support multiple outputs on Tegra20 as well, pll_p is + * typically used as the parent clock for the display controllers. + * But this comes at a cost: pll_p is the parent of several other + * peripherals, so its frequency shouldn't change out of the blue. + * + * The best we can do at this point is to use the shift clock divider + * and hope that the desired frequency can be matched (or at least + * matched sufficiently close that the panel will still work). + */ + div = ((clk_get_rate(rgb->clk) * 2) / pclk) - 2; + pclk = 0; + + err = tegra_dc_state_setup_clock(dc, crtc_state, rgb->clk_parent, + pclk, div); + if (err < 0) { + dev_err(output->dev, "failed to setup CRTC state: %d\n", err); + return err; + } + + return err; +} + +static const struct drm_encoder_helper_funcs tegra_rgb_encoder_helper_funcs = { + .dpms = tegra_rgb_encoder_dpms, + .prepare = tegra_rgb_encoder_prepare, + .commit = tegra_rgb_encoder_commit, + .mode_set = tegra_rgb_encoder_mode_set, + .disable = tegra_rgb_encoder_disable, + .atomic_check = tegra_rgb_encoder_atomic_check, +}; + +int tegra_dc_rgb_probe(struct tegra_dc *dc) +{ + struct device_node *np; + struct tegra_rgb *rgb; + int err; + + np = of_get_child_by_name(dc->dev->of_node, "rgb"); + if (!np || !of_device_is_available(np)) + return -ENODEV; + + rgb = devm_kzalloc(dc->dev, sizeof(*rgb), GFP_KERNEL); + if (!rgb) + return -ENOMEM; + + rgb->output.dev = dc->dev; + rgb->output.of_node = np; + rgb->dc = dc; + + err = tegra_output_probe(&rgb->output); + if (err < 0) + return err; + + rgb->clk = devm_clk_get(dc->dev, NULL); + if (IS_ERR(rgb->clk)) { + dev_err(dc->dev, "failed to get clock\n"); + return PTR_ERR(rgb->clk); + } + + rgb->clk_parent = devm_clk_get(dc->dev, "parent"); + if (IS_ERR(rgb->clk_parent)) { + dev_err(dc->dev, "failed to get parent clock\n"); + return PTR_ERR(rgb->clk_parent); + } + + err = clk_set_parent(rgb->clk, rgb->clk_parent); + if (err < 0) { + dev_err(dc->dev, "failed to set parent clock: %d\n", err); + return err; + } + + dc->rgb = &rgb->output; + + return 0; +} + +int tegra_dc_rgb_remove(struct tegra_dc *dc) +{ + if (!dc->rgb) + return 0; + + tegra_output_remove(dc->rgb); + dc->rgb = NULL; + + return 0; +} + +int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc) +{ + struct tegra_output *output = dc->rgb; + int err; + + if (!dc->rgb) + return -ENODEV; + + drm_connector_init(drm, &output->connector, &tegra_rgb_connector_funcs, + DRM_MODE_CONNECTOR_LVDS); + drm_connector_helper_add(&output->connector, + &tegra_rgb_connector_helper_funcs); + output->connector.dpms = DRM_MODE_DPMS_OFF; + + drm_encoder_init(drm, &output->encoder, &tegra_rgb_encoder_funcs, + DRM_MODE_ENCODER_LVDS); + drm_encoder_helper_add(&output->encoder, + &tegra_rgb_encoder_helper_funcs); + + drm_mode_connector_attach_encoder(&output->connector, + &output->encoder); + drm_connector_register(&output->connector); + + err = tegra_output_init(drm, output); + if (err < 0) { + dev_err(output->dev, "failed to initialize output: %d\n", err); + return err; + } + + /* + * Other outputs can be attached to either display controller. The RGB + * outputs are an exception and work only with their parent display + * controller. + */ + output->encoder.possible_crtcs = drm_crtc_mask(&dc->base); + + return 0; +} + +int tegra_dc_rgb_exit(struct tegra_dc *dc) +{ + if (dc->rgb) + tegra_output_exit(dc->rgb); + + return 0; +} diff --git a/kernel/drivers/gpu/drm/tegra/sor.c b/kernel/drivers/gpu/drm/tegra/sor.c new file mode 100644 index 000000000..7591d8901 --- /dev/null +++ b/kernel/drivers/gpu/drm/tegra/sor.c @@ -0,0 +1,1679 @@ +/* + * Copyright (C) 2013 NVIDIA Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/clk.h> +#include <linux/debugfs.h> +#include <linux/gpio.h> +#include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/reset.h> + +#include <soc/tegra/pmc.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_dp_helper.h> +#include <drm/drm_panel.h> + +#include "dc.h" +#include "drm.h" +#include "sor.h" + +struct tegra_sor { + struct host1x_client client; + struct tegra_output output; + struct device *dev; + + void __iomem *regs; + + struct reset_control *rst; + struct clk *clk_parent; + struct clk *clk_safe; + struct clk *clk_dp; + struct clk *clk; + + struct tegra_dpaux *dpaux; + + struct mutex lock; + bool enabled; + + struct drm_info_list *debugfs_files; + struct drm_minor *minor; + struct dentry *debugfs; +}; + +struct tegra_sor_config { + u32 bits_per_pixel; + + u32 active_polarity; + u32 active_count; + u32 tu_size; + u32 active_frac; + u32 watermark; + + u32 hblank_symbols; + u32 vblank_symbols; +}; + +static inline struct tegra_sor * +host1x_client_to_sor(struct host1x_client *client) +{ + return container_of(client, struct tegra_sor, client); +} + +static inline struct tegra_sor *to_sor(struct tegra_output *output) +{ + return container_of(output, struct tegra_sor, output); +} + +static inline u32 tegra_sor_readl(struct tegra_sor *sor, unsigned long offset) +{ + return readl(sor->regs + (offset << 2)); +} + +static inline void tegra_sor_writel(struct tegra_sor *sor, u32 value, + unsigned long offset) +{ + writel(value, sor->regs + (offset << 2)); +} + +static int tegra_sor_dp_train_fast(struct tegra_sor *sor, + struct drm_dp_link *link) +{ + unsigned int i; + u8 pattern; + u32 value; + int err; + + /* setup lane parameters */ + value = SOR_LANE_DRIVE_CURRENT_LANE3(0x40) | + SOR_LANE_DRIVE_CURRENT_LANE2(0x40) | + SOR_LANE_DRIVE_CURRENT_LANE1(0x40) | + SOR_LANE_DRIVE_CURRENT_LANE0(0x40); + tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT_0); + + value = SOR_LANE_PREEMPHASIS_LANE3(0x0f) | + SOR_LANE_PREEMPHASIS_LANE2(0x0f) | + SOR_LANE_PREEMPHASIS_LANE1(0x0f) | + SOR_LANE_PREEMPHASIS_LANE0(0x0f); + tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS_0); + + value = SOR_LANE_POST_CURSOR_LANE3(0x00) | + SOR_LANE_POST_CURSOR_LANE2(0x00) | + SOR_LANE_POST_CURSOR_LANE1(0x00) | + SOR_LANE_POST_CURSOR_LANE0(0x00); + tegra_sor_writel(sor, value, SOR_LANE_POST_CURSOR_0); + + /* disable LVDS mode */ + tegra_sor_writel(sor, 0, SOR_LVDS); + + value = tegra_sor_readl(sor, SOR_DP_PADCTL_0); + value |= SOR_DP_PADCTL_TX_PU_ENABLE; + value &= ~SOR_DP_PADCTL_TX_PU_MASK; + value |= SOR_DP_PADCTL_TX_PU(2); /* XXX: don't hardcode? */ + tegra_sor_writel(sor, value, SOR_DP_PADCTL_0); + + value = tegra_sor_readl(sor, SOR_DP_PADCTL_0); + value |= SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 | + SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0; + tegra_sor_writel(sor, value, SOR_DP_PADCTL_0); + + usleep_range(10, 100); + + value = tegra_sor_readl(sor, SOR_DP_PADCTL_0); + value &= ~(SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 | + SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0); + tegra_sor_writel(sor, value, SOR_DP_PADCTL_0); + + err = tegra_dpaux_prepare(sor->dpaux, DP_SET_ANSI_8B10B); + if (err < 0) + return err; + + for (i = 0, value = 0; i < link->num_lanes; i++) { + unsigned long lane = SOR_DP_TPG_CHANNEL_CODING | + SOR_DP_TPG_SCRAMBLER_NONE | + SOR_DP_TPG_PATTERN_TRAIN1; + value = (value << 8) | lane; + } + + tegra_sor_writel(sor, value, SOR_DP_TPG); + + pattern = DP_TRAINING_PATTERN_1; + + err = tegra_dpaux_train(sor->dpaux, link, pattern); + if (err < 0) + return err; + + value = tegra_sor_readl(sor, SOR_DP_SPARE_0); + value |= SOR_DP_SPARE_SEQ_ENABLE; + value &= ~SOR_DP_SPARE_PANEL_INTERNAL; + value |= SOR_DP_SPARE_MACRO_SOR_CLK; + tegra_sor_writel(sor, value, SOR_DP_SPARE_0); + + for (i = 0, value = 0; i < link->num_lanes; i++) { + unsigned long lane = SOR_DP_TPG_CHANNEL_CODING | + SOR_DP_TPG_SCRAMBLER_NONE | + SOR_DP_TPG_PATTERN_TRAIN2; + value = (value << 8) | lane; + } + + tegra_sor_writel(sor, value, SOR_DP_TPG); + + pattern = DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_2; + + err = tegra_dpaux_train(sor->dpaux, link, pattern); + if (err < 0) + return err; + + for (i = 0, value = 0; i < link->num_lanes; i++) { + unsigned long lane = SOR_DP_TPG_CHANNEL_CODING | + SOR_DP_TPG_SCRAMBLER_GALIOS | + SOR_DP_TPG_PATTERN_NONE; + value = (value << 8) | lane; + } + + tegra_sor_writel(sor, value, SOR_DP_TPG); + + pattern = DP_TRAINING_PATTERN_DISABLE; + + err = tegra_dpaux_train(sor->dpaux, link, pattern); + if (err < 0) + return err; + + return 0; +} + +static void tegra_sor_super_update(struct tegra_sor *sor) +{ + tegra_sor_writel(sor, 0, SOR_SUPER_STATE_0); + tegra_sor_writel(sor, 1, SOR_SUPER_STATE_0); + tegra_sor_writel(sor, 0, SOR_SUPER_STATE_0); +} + +static void tegra_sor_update(struct tegra_sor *sor) +{ + tegra_sor_writel(sor, 0, SOR_STATE_0); + tegra_sor_writel(sor, 1, SOR_STATE_0); + tegra_sor_writel(sor, 0, SOR_STATE_0); +} + +static int tegra_sor_setup_pwm(struct tegra_sor *sor, unsigned long timeout) +{ + u32 value; + + value = tegra_sor_readl(sor, SOR_PWM_DIV); + value &= ~SOR_PWM_DIV_MASK; + value |= 0x400; /* period */ + tegra_sor_writel(sor, value, SOR_PWM_DIV); + + value = tegra_sor_readl(sor, SOR_PWM_CTL); + value &= ~SOR_PWM_CTL_DUTY_CYCLE_MASK; + value |= 0x400; /* duty cycle */ + value &= ~SOR_PWM_CTL_CLK_SEL; /* clock source: PCLK */ + value |= SOR_PWM_CTL_TRIGGER; + tegra_sor_writel(sor, value, SOR_PWM_CTL); + + timeout = jiffies + msecs_to_jiffies(timeout); + + while (time_before(jiffies, timeout)) { + value = tegra_sor_readl(sor, SOR_PWM_CTL); + if ((value & SOR_PWM_CTL_TRIGGER) == 0) + return 0; + + usleep_range(25, 100); + } + + return -ETIMEDOUT; +} + +static int tegra_sor_attach(struct tegra_sor *sor) +{ + unsigned long value, timeout; + + /* wake up in normal mode */ + value = tegra_sor_readl(sor, SOR_SUPER_STATE_1); + value |= SOR_SUPER_STATE_HEAD_MODE_AWAKE; + value |= SOR_SUPER_STATE_MODE_NORMAL; + tegra_sor_writel(sor, value, SOR_SUPER_STATE_1); + tegra_sor_super_update(sor); + + /* attach */ + value = tegra_sor_readl(sor, SOR_SUPER_STATE_1); + value |= SOR_SUPER_STATE_ATTACHED; + tegra_sor_writel(sor, value, SOR_SUPER_STATE_1); + tegra_sor_super_update(sor); + + timeout = jiffies + msecs_to_jiffies(250); + + while (time_before(jiffies, timeout)) { + value = tegra_sor_readl(sor, SOR_TEST); + if ((value & SOR_TEST_ATTACHED) != 0) + return 0; + + usleep_range(25, 100); + } + + return -ETIMEDOUT; +} + +static int tegra_sor_wakeup(struct tegra_sor *sor) +{ + unsigned long value, timeout; + + timeout = jiffies + msecs_to_jiffies(250); + + /* wait for head to wake up */ + while (time_before(jiffies, timeout)) { + value = tegra_sor_readl(sor, SOR_TEST); + value &= SOR_TEST_HEAD_MODE_MASK; + + if (value == SOR_TEST_HEAD_MODE_AWAKE) + return 0; + + usleep_range(25, 100); + } + + return -ETIMEDOUT; +} + +static int tegra_sor_power_up(struct tegra_sor *sor, unsigned long timeout) +{ + u32 value; + + value = tegra_sor_readl(sor, SOR_PWR); + value |= SOR_PWR_TRIGGER | SOR_PWR_NORMAL_STATE_PU; + tegra_sor_writel(sor, value, SOR_PWR); + + timeout = jiffies + msecs_to_jiffies(timeout); + + while (time_before(jiffies, timeout)) { + value = tegra_sor_readl(sor, SOR_PWR); + if ((value & SOR_PWR_TRIGGER) == 0) + return 0; + + usleep_range(25, 100); + } + + return -ETIMEDOUT; +} + +struct tegra_sor_params { + /* number of link clocks per line */ + unsigned int num_clocks; + /* ratio between input and output */ + u64 ratio; + /* precision factor */ + u64 precision; + + unsigned int active_polarity; + unsigned int active_count; + unsigned int active_frac; + unsigned int tu_size; + unsigned int error; +}; + +static int tegra_sor_compute_params(struct tegra_sor *sor, + struct tegra_sor_params *params, + unsigned int tu_size) +{ + u64 active_sym, active_count, frac, approx; + u32 active_polarity, active_frac = 0; + const u64 f = params->precision; + s64 error; + + active_sym = params->ratio * tu_size; + active_count = div_u64(active_sym, f) * f; + frac = active_sym - active_count; + + /* fraction < 0.5 */ + if (frac >= (f / 2)) { + active_polarity = 1; + frac = f - frac; + } else { + active_polarity = 0; + } + + if (frac != 0) { + frac = div_u64(f * f, frac); /* 1/fraction */ + if (frac <= (15 * f)) { + active_frac = div_u64(frac, f); + + /* round up */ + if (active_polarity) + active_frac++; + } else { + active_frac = active_polarity ? 1 : 15; + } + } + + if (active_frac == 1) + active_polarity = 0; + + if (active_polarity == 1) { + if (active_frac) { + approx = active_count + (active_frac * (f - 1)) * f; + approx = div_u64(approx, active_frac * f); + } else { + approx = active_count + f; + } + } else { + if (active_frac) + approx = active_count + div_u64(f, active_frac); + else + approx = active_count; + } + + error = div_s64(active_sym - approx, tu_size); + error *= params->num_clocks; + + if (error <= 0 && abs64(error) < params->error) { + params->active_count = div_u64(active_count, f); + params->active_polarity = active_polarity; + params->active_frac = active_frac; + params->error = abs64(error); + params->tu_size = tu_size; + + if (error == 0) + return true; + } + + return false; +} + +static int tegra_sor_calc_config(struct tegra_sor *sor, + struct drm_display_mode *mode, + struct tegra_sor_config *config, + struct drm_dp_link *link) +{ + const u64 f = 100000, link_rate = link->rate * 1000; + const u64 pclk = mode->clock * 1000; + u64 input, output, watermark, num; + struct tegra_sor_params params; + u32 num_syms_per_line; + unsigned int i; + + if (!link_rate || !link->num_lanes || !pclk || !config->bits_per_pixel) + return -EINVAL; + + output = link_rate * 8 * link->num_lanes; + input = pclk * config->bits_per_pixel; + + if (input >= output) + return -ERANGE; + + memset(¶ms, 0, sizeof(params)); + params.ratio = div64_u64(input * f, output); + params.num_clocks = div_u64(link_rate * mode->hdisplay, pclk); + params.precision = f; + params.error = 64 * f; + params.tu_size = 64; + + for (i = params.tu_size; i >= 32; i--) + if (tegra_sor_compute_params(sor, ¶ms, i)) + break; + + if (params.active_frac == 0) { + config->active_polarity = 0; + config->active_count = params.active_count; + + if (!params.active_polarity) + config->active_count--; + + config->tu_size = params.tu_size; + config->active_frac = 1; + } else { + config->active_polarity = params.active_polarity; + config->active_count = params.active_count; + config->active_frac = params.active_frac; + config->tu_size = params.tu_size; + } + + dev_dbg(sor->dev, + "polarity: %d active count: %d tu size: %d active frac: %d\n", + config->active_polarity, config->active_count, + config->tu_size, config->active_frac); + + watermark = params.ratio * config->tu_size * (f - params.ratio); + watermark = div_u64(watermark, f); + + watermark = div_u64(watermark + params.error, f); + config->watermark = watermark + (config->bits_per_pixel / 8) + 2; + num_syms_per_line = (mode->hdisplay * config->bits_per_pixel) * + (link->num_lanes * 8); + + if (config->watermark > 30) { + config->watermark = 30; + dev_err(sor->dev, + "unable to compute TU size, forcing watermark to %u\n", + config->watermark); + } else if (config->watermark > num_syms_per_line) { + config->watermark = num_syms_per_line; + dev_err(sor->dev, "watermark too high, forcing to %u\n", + config->watermark); + } + + /* compute the number of symbols per horizontal blanking interval */ + num = ((mode->htotal - mode->hdisplay) - 7) * link_rate; + config->hblank_symbols = div_u64(num, pclk); + + if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING) + config->hblank_symbols -= 3; + + config->hblank_symbols -= 12 / link->num_lanes; + + /* compute the number of symbols per vertical blanking interval */ + num = (mode->hdisplay - 25) * link_rate; + config->vblank_symbols = div_u64(num, pclk); + config->vblank_symbols -= 36 / link->num_lanes + 4; + + dev_dbg(sor->dev, "blank symbols: H:%u V:%u\n", config->hblank_symbols, + config->vblank_symbols); + + return 0; +} + +static int tegra_sor_detach(struct tegra_sor *sor) +{ + unsigned long value, timeout; + + /* switch to safe mode */ + value = tegra_sor_readl(sor, SOR_SUPER_STATE_1); + value &= ~SOR_SUPER_STATE_MODE_NORMAL; + tegra_sor_writel(sor, value, SOR_SUPER_STATE_1); + tegra_sor_super_update(sor); + + timeout = jiffies + msecs_to_jiffies(250); + + while (time_before(jiffies, timeout)) { + value = tegra_sor_readl(sor, SOR_PWR); + if (value & SOR_PWR_MODE_SAFE) + break; + } + + if ((value & SOR_PWR_MODE_SAFE) == 0) + return -ETIMEDOUT; + + /* go to sleep */ + value = tegra_sor_readl(sor, SOR_SUPER_STATE_1); + value &= ~SOR_SUPER_STATE_HEAD_MODE_MASK; + tegra_sor_writel(sor, value, SOR_SUPER_STATE_1); + tegra_sor_super_update(sor); + + /* detach */ + value = tegra_sor_readl(sor, SOR_SUPER_STATE_1); + value &= ~SOR_SUPER_STATE_ATTACHED; + tegra_sor_writel(sor, value, SOR_SUPER_STATE_1); + tegra_sor_super_update(sor); + + timeout = jiffies + msecs_to_jiffies(250); + + while (time_before(jiffies, timeout)) { + value = tegra_sor_readl(sor, SOR_TEST); + if ((value & SOR_TEST_ATTACHED) == 0) + break; + + usleep_range(25, 100); + } + + if ((value & SOR_TEST_ATTACHED) != 0) + return -ETIMEDOUT; + + return 0; +} + +static int tegra_sor_power_down(struct tegra_sor *sor) +{ + unsigned long value, timeout; + int err; + + value = tegra_sor_readl(sor, SOR_PWR); + value &= ~SOR_PWR_NORMAL_STATE_PU; + value |= SOR_PWR_TRIGGER; + tegra_sor_writel(sor, value, SOR_PWR); + + timeout = jiffies + msecs_to_jiffies(250); + + while (time_before(jiffies, timeout)) { + value = tegra_sor_readl(sor, SOR_PWR); + if ((value & SOR_PWR_TRIGGER) == 0) + return 0; + + usleep_range(25, 100); + } + + if ((value & SOR_PWR_TRIGGER) != 0) + return -ETIMEDOUT; + + err = clk_set_parent(sor->clk, sor->clk_safe); + if (err < 0) + dev_err(sor->dev, "failed to set safe parent clock: %d\n", err); + + value = tegra_sor_readl(sor, SOR_DP_PADCTL_0); + value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 | + SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2); + tegra_sor_writel(sor, value, SOR_DP_PADCTL_0); + + /* stop lane sequencer */ + value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP | + SOR_LANE_SEQ_CTL_POWER_STATE_DOWN; + tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL); + + timeout = jiffies + msecs_to_jiffies(250); + + while (time_before(jiffies, timeout)) { + value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL); + if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0) + break; + + usleep_range(25, 100); + } + + if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0) + return -ETIMEDOUT; + + value = tegra_sor_readl(sor, SOR_PLL_2); + value |= SOR_PLL_2_PORT_POWERDOWN; + tegra_sor_writel(sor, value, SOR_PLL_2); + + usleep_range(20, 100); + + value = tegra_sor_readl(sor, SOR_PLL_0); + value |= SOR_PLL_0_POWER_OFF; + value |= SOR_PLL_0_VCOPD; + tegra_sor_writel(sor, value, SOR_PLL_0); + + value = tegra_sor_readl(sor, SOR_PLL_2); + value |= SOR_PLL_2_SEQ_PLLCAPPD; + value |= SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE; + tegra_sor_writel(sor, value, SOR_PLL_2); + + usleep_range(20, 100); + + return 0; +} + +static int tegra_sor_crc_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + + return 0; +} + +static int tegra_sor_crc_release(struct inode *inode, struct file *file) +{ + return 0; +} + +static int tegra_sor_crc_wait(struct tegra_sor *sor, unsigned long timeout) +{ + u32 value; + + timeout = jiffies + msecs_to_jiffies(timeout); + + while (time_before(jiffies, timeout)) { + value = tegra_sor_readl(sor, SOR_CRC_A); + if (value & SOR_CRC_A_VALID) + return 0; + + usleep_range(100, 200); + } + + return -ETIMEDOUT; +} + +static ssize_t tegra_sor_crc_read(struct file *file, char __user *buffer, + size_t size, loff_t *ppos) +{ + struct tegra_sor *sor = file->private_data; + ssize_t num, err; + char buf[10]; + u32 value; + + mutex_lock(&sor->lock); + + if (!sor->enabled) { + err = -EAGAIN; + goto unlock; + } + + value = tegra_sor_readl(sor, SOR_STATE_1); + value &= ~SOR_STATE_ASY_CRC_MODE_MASK; + tegra_sor_writel(sor, value, SOR_STATE_1); + + value = tegra_sor_readl(sor, SOR_CRC_CNTRL); + value |= SOR_CRC_CNTRL_ENABLE; + tegra_sor_writel(sor, value, SOR_CRC_CNTRL); + + value = tegra_sor_readl(sor, SOR_TEST); + value &= ~SOR_TEST_CRC_POST_SERIALIZE; + tegra_sor_writel(sor, value, SOR_TEST); + + err = tegra_sor_crc_wait(sor, 100); + if (err < 0) + goto unlock; + + tegra_sor_writel(sor, SOR_CRC_A_RESET, SOR_CRC_A); + value = tegra_sor_readl(sor, SOR_CRC_B); + + num = scnprintf(buf, sizeof(buf), "%08x\n", value); + + err = simple_read_from_buffer(buffer, size, ppos, buf, num); + +unlock: + mutex_unlock(&sor->lock); + return err; +} + +static const struct file_operations tegra_sor_crc_fops = { + .owner = THIS_MODULE, + .open = tegra_sor_crc_open, + .read = tegra_sor_crc_read, + .release = tegra_sor_crc_release, +}; + +static int tegra_sor_show_regs(struct seq_file *s, void *data) +{ + struct drm_info_node *node = s->private; + struct tegra_sor *sor = node->info_ent->data; + +#define DUMP_REG(name) \ + seq_printf(s, "%-38s %#05x %08x\n", #name, name, \ + tegra_sor_readl(sor, name)) + + DUMP_REG(SOR_CTXSW); + DUMP_REG(SOR_SUPER_STATE_0); + DUMP_REG(SOR_SUPER_STATE_1); + DUMP_REG(SOR_STATE_0); + DUMP_REG(SOR_STATE_1); + DUMP_REG(SOR_HEAD_STATE_0(0)); + DUMP_REG(SOR_HEAD_STATE_0(1)); + DUMP_REG(SOR_HEAD_STATE_1(0)); + DUMP_REG(SOR_HEAD_STATE_1(1)); + DUMP_REG(SOR_HEAD_STATE_2(0)); + DUMP_REG(SOR_HEAD_STATE_2(1)); + DUMP_REG(SOR_HEAD_STATE_3(0)); + DUMP_REG(SOR_HEAD_STATE_3(1)); + DUMP_REG(SOR_HEAD_STATE_4(0)); + DUMP_REG(SOR_HEAD_STATE_4(1)); + DUMP_REG(SOR_HEAD_STATE_5(0)); + DUMP_REG(SOR_HEAD_STATE_5(1)); + DUMP_REG(SOR_CRC_CNTRL); + DUMP_REG(SOR_DP_DEBUG_MVID); + DUMP_REG(SOR_CLK_CNTRL); + DUMP_REG(SOR_CAP); + DUMP_REG(SOR_PWR); + DUMP_REG(SOR_TEST); + DUMP_REG(SOR_PLL_0); + DUMP_REG(SOR_PLL_1); + DUMP_REG(SOR_PLL_2); + DUMP_REG(SOR_PLL_3); + DUMP_REG(SOR_CSTM); + DUMP_REG(SOR_LVDS); + DUMP_REG(SOR_CRC_A); + DUMP_REG(SOR_CRC_B); + DUMP_REG(SOR_BLANK); + DUMP_REG(SOR_SEQ_CTL); + DUMP_REG(SOR_LANE_SEQ_CTL); + DUMP_REG(SOR_SEQ_INST(0)); + DUMP_REG(SOR_SEQ_INST(1)); + DUMP_REG(SOR_SEQ_INST(2)); + DUMP_REG(SOR_SEQ_INST(3)); + DUMP_REG(SOR_SEQ_INST(4)); + DUMP_REG(SOR_SEQ_INST(5)); + DUMP_REG(SOR_SEQ_INST(6)); + DUMP_REG(SOR_SEQ_INST(7)); + DUMP_REG(SOR_SEQ_INST(8)); + DUMP_REG(SOR_SEQ_INST(9)); + DUMP_REG(SOR_SEQ_INST(10)); + DUMP_REG(SOR_SEQ_INST(11)); + DUMP_REG(SOR_SEQ_INST(12)); + DUMP_REG(SOR_SEQ_INST(13)); + DUMP_REG(SOR_SEQ_INST(14)); + DUMP_REG(SOR_SEQ_INST(15)); + DUMP_REG(SOR_PWM_DIV); + DUMP_REG(SOR_PWM_CTL); + DUMP_REG(SOR_VCRC_A_0); + DUMP_REG(SOR_VCRC_A_1); + DUMP_REG(SOR_VCRC_B_0); + DUMP_REG(SOR_VCRC_B_1); + DUMP_REG(SOR_CCRC_A_0); + DUMP_REG(SOR_CCRC_A_1); + DUMP_REG(SOR_CCRC_B_0); + DUMP_REG(SOR_CCRC_B_1); + DUMP_REG(SOR_EDATA_A_0); + DUMP_REG(SOR_EDATA_A_1); + DUMP_REG(SOR_EDATA_B_0); + DUMP_REG(SOR_EDATA_B_1); + DUMP_REG(SOR_COUNT_A_0); + DUMP_REG(SOR_COUNT_A_1); + DUMP_REG(SOR_COUNT_B_0); + DUMP_REG(SOR_COUNT_B_1); + DUMP_REG(SOR_DEBUG_A_0); + DUMP_REG(SOR_DEBUG_A_1); + DUMP_REG(SOR_DEBUG_B_0); + DUMP_REG(SOR_DEBUG_B_1); + DUMP_REG(SOR_TRIG); + DUMP_REG(SOR_MSCHECK); + DUMP_REG(SOR_XBAR_CTRL); + DUMP_REG(SOR_XBAR_POL); + DUMP_REG(SOR_DP_LINKCTL_0); + DUMP_REG(SOR_DP_LINKCTL_1); + DUMP_REG(SOR_LANE_DRIVE_CURRENT_0); + DUMP_REG(SOR_LANE_DRIVE_CURRENT_1); + DUMP_REG(SOR_LANE4_DRIVE_CURRENT_0); + DUMP_REG(SOR_LANE4_DRIVE_CURRENT_1); + DUMP_REG(SOR_LANE_PREEMPHASIS_0); + DUMP_REG(SOR_LANE_PREEMPHASIS_1); + DUMP_REG(SOR_LANE4_PREEMPHASIS_0); + DUMP_REG(SOR_LANE4_PREEMPHASIS_1); + DUMP_REG(SOR_LANE_POST_CURSOR_0); + DUMP_REG(SOR_LANE_POST_CURSOR_1); + DUMP_REG(SOR_DP_CONFIG_0); + DUMP_REG(SOR_DP_CONFIG_1); + DUMP_REG(SOR_DP_MN_0); + DUMP_REG(SOR_DP_MN_1); + DUMP_REG(SOR_DP_PADCTL_0); + DUMP_REG(SOR_DP_PADCTL_1); + DUMP_REG(SOR_DP_DEBUG_0); + DUMP_REG(SOR_DP_DEBUG_1); + DUMP_REG(SOR_DP_SPARE_0); + DUMP_REG(SOR_DP_SPARE_1); + DUMP_REG(SOR_DP_AUDIO_CTRL); + DUMP_REG(SOR_DP_AUDIO_HBLANK_SYMBOLS); + DUMP_REG(SOR_DP_AUDIO_VBLANK_SYMBOLS); + DUMP_REG(SOR_DP_GENERIC_INFOFRAME_HEADER); + DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_0); + DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_1); + DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_2); + DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_3); + DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_4); + DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_5); + DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_6); + DUMP_REG(SOR_DP_TPG); + DUMP_REG(SOR_DP_TPG_CONFIG); + DUMP_REG(SOR_DP_LQ_CSTM_0); + DUMP_REG(SOR_DP_LQ_CSTM_1); + DUMP_REG(SOR_DP_LQ_CSTM_2); + +#undef DUMP_REG + + return 0; +} + +static const struct drm_info_list debugfs_files[] = { + { "regs", tegra_sor_show_regs, 0, NULL }, +}; + +static int tegra_sor_debugfs_init(struct tegra_sor *sor, + struct drm_minor *minor) +{ + struct dentry *entry; + unsigned int i; + int err = 0; + + sor->debugfs = debugfs_create_dir("sor", minor->debugfs_root); + if (!sor->debugfs) + return -ENOMEM; + + sor->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files), + GFP_KERNEL); + if (!sor->debugfs_files) { + err = -ENOMEM; + goto remove; + } + + for (i = 0; i < ARRAY_SIZE(debugfs_files); i++) + sor->debugfs_files[i].data = sor; + + err = drm_debugfs_create_files(sor->debugfs_files, + ARRAY_SIZE(debugfs_files), + sor->debugfs, minor); + if (err < 0) + goto free; + + entry = debugfs_create_file("crc", 0644, sor->debugfs, sor, + &tegra_sor_crc_fops); + if (!entry) { + err = -ENOMEM; + goto free; + } + + return err; + +free: + kfree(sor->debugfs_files); + sor->debugfs_files = NULL; +remove: + debugfs_remove_recursive(sor->debugfs); + sor->debugfs = NULL; + return err; +} + +static void tegra_sor_debugfs_exit(struct tegra_sor *sor) +{ + drm_debugfs_remove_files(sor->debugfs_files, ARRAY_SIZE(debugfs_files), + sor->minor); + sor->minor = NULL; + + kfree(sor->debugfs_files); + sor->debugfs = NULL; + + debugfs_remove_recursive(sor->debugfs); + sor->debugfs_files = NULL; +} + +static void tegra_sor_connector_dpms(struct drm_connector *connector, int mode) +{ +} + +static enum drm_connector_status +tegra_sor_connector_detect(struct drm_connector *connector, bool force) +{ + struct tegra_output *output = connector_to_output(connector); + struct tegra_sor *sor = to_sor(output); + + if (sor->dpaux) + return tegra_dpaux_detect(sor->dpaux); + + return connector_status_unknown; +} + +static const struct drm_connector_funcs tegra_sor_connector_funcs = { + .dpms = tegra_sor_connector_dpms, + .reset = drm_atomic_helper_connector_reset, + .detect = tegra_sor_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = tegra_output_connector_destroy, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int tegra_sor_connector_get_modes(struct drm_connector *connector) +{ + struct tegra_output *output = connector_to_output(connector); + struct tegra_sor *sor = to_sor(output); + int err; + + if (sor->dpaux) + tegra_dpaux_enable(sor->dpaux); + + err = tegra_output_connector_get_modes(connector); + + if (sor->dpaux) + tegra_dpaux_disable(sor->dpaux); + + return err; +} + +static enum drm_mode_status +tegra_sor_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + return MODE_OK; +} + +static const struct drm_connector_helper_funcs tegra_sor_connector_helper_funcs = { + .get_modes = tegra_sor_connector_get_modes, + .mode_valid = tegra_sor_connector_mode_valid, + .best_encoder = tegra_output_connector_best_encoder, +}; + +static const struct drm_encoder_funcs tegra_sor_encoder_funcs = { + .destroy = tegra_output_encoder_destroy, +}; + +static void tegra_sor_encoder_dpms(struct drm_encoder *encoder, int mode) +{ +} + +static void tegra_sor_encoder_prepare(struct drm_encoder *encoder) +{ +} + +static void tegra_sor_encoder_commit(struct drm_encoder *encoder) +{ +} + +static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted) +{ + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(encoder->crtc); + unsigned int vbe, vse, hbe, hse, vbs, hbs, i; + struct tegra_sor *sor = to_sor(output); + struct tegra_sor_config config; + struct drm_dp_link link; + struct drm_dp_aux *aux; + int err = 0; + u32 value; + + mutex_lock(&sor->lock); + + if (sor->enabled) + goto unlock; + + err = clk_prepare_enable(sor->clk); + if (err < 0) + goto unlock; + + reset_control_deassert(sor->rst); + + if (output->panel) + drm_panel_prepare(output->panel); + + /* FIXME: properly convert to struct drm_dp_aux */ + aux = (struct drm_dp_aux *)sor->dpaux; + + if (sor->dpaux) { + err = tegra_dpaux_enable(sor->dpaux); + if (err < 0) + dev_err(sor->dev, "failed to enable DP: %d\n", err); + + err = drm_dp_link_probe(aux, &link); + if (err < 0) { + dev_err(sor->dev, "failed to probe eDP link: %d\n", + err); + goto unlock; + } + } + + err = clk_set_parent(sor->clk, sor->clk_safe); + if (err < 0) + dev_err(sor->dev, "failed to set safe parent clock: %d\n", err); + + memset(&config, 0, sizeof(config)); + config.bits_per_pixel = output->connector.display_info.bpc * 3; + + err = tegra_sor_calc_config(sor, mode, &config, &link); + if (err < 0) + dev_err(sor->dev, "failed to compute link configuration: %d\n", + err); + + value = tegra_sor_readl(sor, SOR_CLK_CNTRL); + value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK; + value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK; + tegra_sor_writel(sor, value, SOR_CLK_CNTRL); + + value = tegra_sor_readl(sor, SOR_PLL_2); + value &= ~SOR_PLL_2_BANDGAP_POWERDOWN; + tegra_sor_writel(sor, value, SOR_PLL_2); + usleep_range(20, 100); + + value = tegra_sor_readl(sor, SOR_PLL_3); + value |= SOR_PLL_3_PLL_VDD_MODE_V3_3; + tegra_sor_writel(sor, value, SOR_PLL_3); + + value = SOR_PLL_0_ICHPMP(0xf) | SOR_PLL_0_VCOCAP_RST | + SOR_PLL_0_PLLREG_LEVEL_V45 | SOR_PLL_0_RESISTOR_EXT; + tegra_sor_writel(sor, value, SOR_PLL_0); + + value = tegra_sor_readl(sor, SOR_PLL_2); + value |= SOR_PLL_2_SEQ_PLLCAPPD; + value &= ~SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE; + value |= SOR_PLL_2_LVDS_ENABLE; + tegra_sor_writel(sor, value, SOR_PLL_2); + + value = SOR_PLL_1_TERM_COMPOUT | SOR_PLL_1_TMDS_TERM; + tegra_sor_writel(sor, value, SOR_PLL_1); + + while (true) { + value = tegra_sor_readl(sor, SOR_PLL_2); + if ((value & SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE) == 0) + break; + + usleep_range(250, 1000); + } + + value = tegra_sor_readl(sor, SOR_PLL_2); + value &= ~SOR_PLL_2_POWERDOWN_OVERRIDE; + value &= ~SOR_PLL_2_PORT_POWERDOWN; + tegra_sor_writel(sor, value, SOR_PLL_2); + + /* + * power up + */ + + /* set safe link bandwidth (1.62 Gbps) */ + value = tegra_sor_readl(sor, SOR_CLK_CNTRL); + value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK; + value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G1_62; + tegra_sor_writel(sor, value, SOR_CLK_CNTRL); + + /* step 1 */ + value = tegra_sor_readl(sor, SOR_PLL_2); + value |= SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE | SOR_PLL_2_PORT_POWERDOWN | + SOR_PLL_2_BANDGAP_POWERDOWN; + tegra_sor_writel(sor, value, SOR_PLL_2); + + value = tegra_sor_readl(sor, SOR_PLL_0); + value |= SOR_PLL_0_VCOPD | SOR_PLL_0_POWER_OFF; + tegra_sor_writel(sor, value, SOR_PLL_0); + + value = tegra_sor_readl(sor, SOR_DP_PADCTL_0); + value &= ~SOR_DP_PADCTL_PAD_CAL_PD; + tegra_sor_writel(sor, value, SOR_DP_PADCTL_0); + + /* step 2 */ + err = tegra_io_rail_power_on(TEGRA_IO_RAIL_LVDS); + if (err < 0) { + dev_err(sor->dev, "failed to power on I/O rail: %d\n", err); + goto unlock; + } + + usleep_range(5, 100); + + /* step 3 */ + value = tegra_sor_readl(sor, SOR_PLL_2); + value &= ~SOR_PLL_2_BANDGAP_POWERDOWN; + tegra_sor_writel(sor, value, SOR_PLL_2); + + usleep_range(20, 100); + + /* step 4 */ + value = tegra_sor_readl(sor, SOR_PLL_0); + value &= ~SOR_PLL_0_POWER_OFF; + value &= ~SOR_PLL_0_VCOPD; + tegra_sor_writel(sor, value, SOR_PLL_0); + + value = tegra_sor_readl(sor, SOR_PLL_2); + value &= ~SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE; + tegra_sor_writel(sor, value, SOR_PLL_2); + + usleep_range(200, 1000); + + /* step 5 */ + value = tegra_sor_readl(sor, SOR_PLL_2); + value &= ~SOR_PLL_2_PORT_POWERDOWN; + tegra_sor_writel(sor, value, SOR_PLL_2); + + /* switch to DP clock */ + err = clk_set_parent(sor->clk, sor->clk_dp); + if (err < 0) + dev_err(sor->dev, "failed to set DP parent clock: %d\n", err); + + /* power DP lanes */ + value = tegra_sor_readl(sor, SOR_DP_PADCTL_0); + + if (link.num_lanes <= 2) + value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2); + else + value |= SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2; + + if (link.num_lanes <= 1) + value &= ~SOR_DP_PADCTL_PD_TXD_1; + else + value |= SOR_DP_PADCTL_PD_TXD_1; + + if (link.num_lanes == 0) + value &= ~SOR_DP_PADCTL_PD_TXD_0; + else + value |= SOR_DP_PADCTL_PD_TXD_0; + + tegra_sor_writel(sor, value, SOR_DP_PADCTL_0); + + value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0); + value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK; + value |= SOR_DP_LINKCTL_LANE_COUNT(link.num_lanes); + tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0); + + /* start lane sequencer */ + value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN | + SOR_LANE_SEQ_CTL_POWER_STATE_UP; + tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL); + + while (true) { + value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL); + if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0) + break; + + usleep_range(250, 1000); + } + + /* set link bandwidth */ + value = tegra_sor_readl(sor, SOR_CLK_CNTRL); + value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK; + value |= drm_dp_link_rate_to_bw_code(link.rate) << 2; + tegra_sor_writel(sor, value, SOR_CLK_CNTRL); + + /* set linkctl */ + value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0); + value |= SOR_DP_LINKCTL_ENABLE; + + value &= ~SOR_DP_LINKCTL_TU_SIZE_MASK; + value |= SOR_DP_LINKCTL_TU_SIZE(config.tu_size); + + value |= SOR_DP_LINKCTL_ENHANCED_FRAME; + tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0); + + for (i = 0, value = 0; i < 4; i++) { + unsigned long lane = SOR_DP_TPG_CHANNEL_CODING | + SOR_DP_TPG_SCRAMBLER_GALIOS | + SOR_DP_TPG_PATTERN_NONE; + value = (value << 8) | lane; + } + + tegra_sor_writel(sor, value, SOR_DP_TPG); + + value = tegra_sor_readl(sor, SOR_DP_CONFIG_0); + value &= ~SOR_DP_CONFIG_WATERMARK_MASK; + value |= SOR_DP_CONFIG_WATERMARK(config.watermark); + + value &= ~SOR_DP_CONFIG_ACTIVE_SYM_COUNT_MASK; + value |= SOR_DP_CONFIG_ACTIVE_SYM_COUNT(config.active_count); + + value &= ~SOR_DP_CONFIG_ACTIVE_SYM_FRAC_MASK; + value |= SOR_DP_CONFIG_ACTIVE_SYM_FRAC(config.active_frac); + + if (config.active_polarity) + value |= SOR_DP_CONFIG_ACTIVE_SYM_POLARITY; + else + value &= ~SOR_DP_CONFIG_ACTIVE_SYM_POLARITY; + + value |= SOR_DP_CONFIG_ACTIVE_SYM_ENABLE; + value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE; + tegra_sor_writel(sor, value, SOR_DP_CONFIG_0); + + value = tegra_sor_readl(sor, SOR_DP_AUDIO_HBLANK_SYMBOLS); + value &= ~SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK; + value |= config.hblank_symbols & 0xffff; + tegra_sor_writel(sor, value, SOR_DP_AUDIO_HBLANK_SYMBOLS); + + value = tegra_sor_readl(sor, SOR_DP_AUDIO_VBLANK_SYMBOLS); + value &= ~SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK; + value |= config.vblank_symbols & 0xffff; + tegra_sor_writel(sor, value, SOR_DP_AUDIO_VBLANK_SYMBOLS); + + /* enable pad calibration logic */ + value = tegra_sor_readl(sor, SOR_DP_PADCTL_0); + value |= SOR_DP_PADCTL_PAD_CAL_PD; + tegra_sor_writel(sor, value, SOR_DP_PADCTL_0); + + if (sor->dpaux) { + u8 rate, lanes; + + err = drm_dp_link_probe(aux, &link); + if (err < 0) { + dev_err(sor->dev, "failed to probe eDP link: %d\n", + err); + goto unlock; + } + + err = drm_dp_link_power_up(aux, &link); + if (err < 0) { + dev_err(sor->dev, "failed to power up eDP link: %d\n", + err); + goto unlock; + } + + err = drm_dp_link_configure(aux, &link); + if (err < 0) { + dev_err(sor->dev, "failed to configure eDP link: %d\n", + err); + goto unlock; + } + + rate = drm_dp_link_rate_to_bw_code(link.rate); + lanes = link.num_lanes; + + value = tegra_sor_readl(sor, SOR_CLK_CNTRL); + value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK; + value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate); + tegra_sor_writel(sor, value, SOR_CLK_CNTRL); + + value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0); + value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK; + value |= SOR_DP_LINKCTL_LANE_COUNT(lanes); + + if (link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) + value |= SOR_DP_LINKCTL_ENHANCED_FRAME; + + tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0); + + /* disable training pattern generator */ + + for (i = 0; i < link.num_lanes; i++) { + unsigned long lane = SOR_DP_TPG_CHANNEL_CODING | + SOR_DP_TPG_SCRAMBLER_GALIOS | + SOR_DP_TPG_PATTERN_NONE; + value = (value << 8) | lane; + } + + tegra_sor_writel(sor, value, SOR_DP_TPG); + + err = tegra_sor_dp_train_fast(sor, &link); + if (err < 0) { + dev_err(sor->dev, "DP fast link training failed: %d\n", + err); + goto unlock; + } + + dev_dbg(sor->dev, "fast link training succeeded\n"); + } + + err = tegra_sor_power_up(sor, 250); + if (err < 0) { + dev_err(sor->dev, "failed to power up SOR: %d\n", err); + goto unlock; + } + + /* + * configure panel (24bpp, vsync-, hsync-, DP-A protocol, complete + * raster, associate with display controller) + */ + value = SOR_STATE_ASY_PROTOCOL_DP_A | + SOR_STATE_ASY_CRC_MODE_COMPLETE | + SOR_STATE_ASY_OWNER(dc->pipe + 1); + + if (mode->flags & DRM_MODE_FLAG_PHSYNC) + value &= ~SOR_STATE_ASY_HSYNCPOL; + + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + value |= SOR_STATE_ASY_HSYNCPOL; + + if (mode->flags & DRM_MODE_FLAG_PVSYNC) + value &= ~SOR_STATE_ASY_VSYNCPOL; + + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + value |= SOR_STATE_ASY_VSYNCPOL; + + switch (config.bits_per_pixel) { + case 24: + value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444; + break; + + case 18: + value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444; + break; + + default: + BUG(); + break; + } + + tegra_sor_writel(sor, value, SOR_STATE_1); + + /* + * TODO: The video timing programming below doesn't seem to match the + * register definitions. + */ + + value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff); + tegra_sor_writel(sor, value, SOR_HEAD_STATE_1(0)); + + vse = mode->vsync_end - mode->vsync_start - 1; + hse = mode->hsync_end - mode->hsync_start - 1; + + value = ((vse & 0x7fff) << 16) | (hse & 0x7fff); + tegra_sor_writel(sor, value, SOR_HEAD_STATE_2(0)); + + vbe = vse + (mode->vsync_start - mode->vdisplay); + hbe = hse + (mode->hsync_start - mode->hdisplay); + + value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff); + tegra_sor_writel(sor, value, SOR_HEAD_STATE_3(0)); + + vbs = vbe + mode->vdisplay; + hbs = hbe + mode->hdisplay; + + value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff); + tegra_sor_writel(sor, value, SOR_HEAD_STATE_4(0)); + + /* CSTM (LVDS, link A/B, upper) */ + value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B | + SOR_CSTM_UPPER; + tegra_sor_writel(sor, value, SOR_CSTM); + + /* PWM setup */ + err = tegra_sor_setup_pwm(sor, 250); + if (err < 0) { + dev_err(sor->dev, "failed to setup PWM: %d\n", err); + goto unlock; + } + + tegra_sor_update(sor); + + value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); + value |= SOR_ENABLE; + tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); + + tegra_dc_commit(dc); + + err = tegra_sor_attach(sor); + if (err < 0) { + dev_err(sor->dev, "failed to attach SOR: %d\n", err); + goto unlock; + } + + err = tegra_sor_wakeup(sor); + if (err < 0) { + dev_err(sor->dev, "failed to enable DC: %d\n", err); + goto unlock; + } + + if (output->panel) + drm_panel_enable(output->panel); + + sor->enabled = true; + +unlock: + mutex_unlock(&sor->lock); +} + +static void tegra_sor_encoder_disable(struct drm_encoder *encoder) +{ + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(encoder->crtc); + struct tegra_sor *sor = to_sor(output); + u32 value; + int err; + + mutex_lock(&sor->lock); + + if (!sor->enabled) + goto unlock; + + if (output->panel) + drm_panel_disable(output->panel); + + err = tegra_sor_detach(sor); + if (err < 0) { + dev_err(sor->dev, "failed to detach SOR: %d\n", err); + goto unlock; + } + + tegra_sor_writel(sor, 0, SOR_STATE_1); + tegra_sor_update(sor); + + /* + * The following accesses registers of the display controller, so make + * sure it's only executed when the output is attached to one. + */ + if (dc) { + value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); + value &= ~SOR_ENABLE; + tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); + + tegra_dc_commit(dc); + } + + err = tegra_sor_power_down(sor); + if (err < 0) { + dev_err(sor->dev, "failed to power down SOR: %d\n", err); + goto unlock; + } + + if (sor->dpaux) { + err = tegra_dpaux_disable(sor->dpaux); + if (err < 0) { + dev_err(sor->dev, "failed to disable DP: %d\n", err); + goto unlock; + } + } + + err = tegra_io_rail_power_off(TEGRA_IO_RAIL_LVDS); + if (err < 0) { + dev_err(sor->dev, "failed to power off I/O rail: %d\n", err); + goto unlock; + } + + if (output->panel) + drm_panel_unprepare(output->panel); + + clk_disable_unprepare(sor->clk); + reset_control_assert(sor->rst); + + sor->enabled = false; + +unlock: + mutex_unlock(&sor->lock); +} + +static int +tegra_sor_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(conn_state->crtc); + unsigned long pclk = crtc_state->mode.clock * 1000; + struct tegra_sor *sor = to_sor(output); + int err; + + err = tegra_dc_state_setup_clock(dc, crtc_state, sor->clk_parent, + pclk, 0); + if (err < 0) { + dev_err(output->dev, "failed to setup CRTC state: %d\n", err); + return err; + } + + return 0; +} + +static const struct drm_encoder_helper_funcs tegra_sor_encoder_helper_funcs = { + .dpms = tegra_sor_encoder_dpms, + .prepare = tegra_sor_encoder_prepare, + .commit = tegra_sor_encoder_commit, + .mode_set = tegra_sor_encoder_mode_set, + .disable = tegra_sor_encoder_disable, + .atomic_check = tegra_sor_encoder_atomic_check, +}; + +static int tegra_sor_init(struct host1x_client *client) +{ + struct drm_device *drm = dev_get_drvdata(client->parent); + struct tegra_sor *sor = host1x_client_to_sor(client); + int err; + + if (!sor->dpaux) + return -ENODEV; + + sor->output.dev = sor->dev; + + drm_connector_init(drm, &sor->output.connector, + &tegra_sor_connector_funcs, + DRM_MODE_CONNECTOR_eDP); + drm_connector_helper_add(&sor->output.connector, + &tegra_sor_connector_helper_funcs); + sor->output.connector.dpms = DRM_MODE_DPMS_OFF; + + drm_encoder_init(drm, &sor->output.encoder, &tegra_sor_encoder_funcs, + DRM_MODE_ENCODER_TMDS); + drm_encoder_helper_add(&sor->output.encoder, + &tegra_sor_encoder_helper_funcs); + + drm_mode_connector_attach_encoder(&sor->output.connector, + &sor->output.encoder); + drm_connector_register(&sor->output.connector); + + err = tegra_output_init(drm, &sor->output); + if (err < 0) { + dev_err(client->dev, "failed to initialize output: %d\n", err); + return err; + } + + sor->output.encoder.possible_crtcs = 0x3; + + if (IS_ENABLED(CONFIG_DEBUG_FS)) { + err = tegra_sor_debugfs_init(sor, drm->primary); + if (err < 0) + dev_err(sor->dev, "debugfs setup failed: %d\n", err); + } + + if (sor->dpaux) { + err = tegra_dpaux_attach(sor->dpaux, &sor->output); + if (err < 0) { + dev_err(sor->dev, "failed to attach DP: %d\n", err); + return err; + } + } + + /* + * XXX: Remove this reset once proper hand-over from firmware to + * kernel is possible. + */ + err = reset_control_assert(sor->rst); + if (err < 0) { + dev_err(sor->dev, "failed to assert SOR reset: %d\n", err); + return err; + } + + err = clk_prepare_enable(sor->clk); + if (err < 0) { + dev_err(sor->dev, "failed to enable clock: %d\n", err); + return err; + } + + usleep_range(1000, 3000); + + err = reset_control_deassert(sor->rst); + if (err < 0) { + dev_err(sor->dev, "failed to deassert SOR reset: %d\n", err); + return err; + } + + err = clk_prepare_enable(sor->clk_safe); + if (err < 0) + return err; + + err = clk_prepare_enable(sor->clk_dp); + if (err < 0) + return err; + + return 0; +} + +static int tegra_sor_exit(struct host1x_client *client) +{ + struct tegra_sor *sor = host1x_client_to_sor(client); + int err; + + tegra_output_exit(&sor->output); + + if (sor->dpaux) { + err = tegra_dpaux_detach(sor->dpaux); + if (err < 0) { + dev_err(sor->dev, "failed to detach DP: %d\n", err); + return err; + } + } + + clk_disable_unprepare(sor->clk_safe); + clk_disable_unprepare(sor->clk_dp); + clk_disable_unprepare(sor->clk); + + if (IS_ENABLED(CONFIG_DEBUG_FS)) + tegra_sor_debugfs_exit(sor); + + return 0; +} + +static const struct host1x_client_ops sor_client_ops = { + .init = tegra_sor_init, + .exit = tegra_sor_exit, +}; + +static int tegra_sor_probe(struct platform_device *pdev) +{ + struct device_node *np; + struct tegra_sor *sor; + struct resource *regs; + int err; + + sor = devm_kzalloc(&pdev->dev, sizeof(*sor), GFP_KERNEL); + if (!sor) + return -ENOMEM; + + sor->output.dev = sor->dev = &pdev->dev; + + np = of_parse_phandle(pdev->dev.of_node, "nvidia,dpaux", 0); + if (np) { + sor->dpaux = tegra_dpaux_find_by_of_node(np); + of_node_put(np); + + if (!sor->dpaux) + return -EPROBE_DEFER; + } + + err = tegra_output_probe(&sor->output); + if (err < 0) + return err; + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + sor->regs = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(sor->regs)) + return PTR_ERR(sor->regs); + + sor->rst = devm_reset_control_get(&pdev->dev, "sor"); + if (IS_ERR(sor->rst)) + return PTR_ERR(sor->rst); + + sor->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(sor->clk)) + return PTR_ERR(sor->clk); + + sor->clk_parent = devm_clk_get(&pdev->dev, "parent"); + if (IS_ERR(sor->clk_parent)) + return PTR_ERR(sor->clk_parent); + + sor->clk_safe = devm_clk_get(&pdev->dev, "safe"); + if (IS_ERR(sor->clk_safe)) + return PTR_ERR(sor->clk_safe); + + sor->clk_dp = devm_clk_get(&pdev->dev, "dp"); + if (IS_ERR(sor->clk_dp)) + return PTR_ERR(sor->clk_dp); + + INIT_LIST_HEAD(&sor->client.list); + sor->client.ops = &sor_client_ops; + sor->client.dev = &pdev->dev; + + mutex_init(&sor->lock); + + err = host1x_client_register(&sor->client); + if (err < 0) { + dev_err(&pdev->dev, "failed to register host1x client: %d\n", + err); + return err; + } + + platform_set_drvdata(pdev, sor); + + return 0; +} + +static int tegra_sor_remove(struct platform_device *pdev) +{ + struct tegra_sor *sor = platform_get_drvdata(pdev); + int err; + + err = host1x_client_unregister(&sor->client); + if (err < 0) { + dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", + err); + return err; + } + + tegra_output_remove(&sor->output); + + return 0; +} + +static const struct of_device_id tegra_sor_of_match[] = { + { .compatible = "nvidia,tegra124-sor", }, + { }, +}; +MODULE_DEVICE_TABLE(of, tegra_sor_of_match); + +struct platform_driver tegra_sor_driver = { + .driver = { + .name = "tegra-sor", + .of_match_table = tegra_sor_of_match, + }, + .probe = tegra_sor_probe, + .remove = tegra_sor_remove, +}; diff --git a/kernel/drivers/gpu/drm/tegra/sor.h b/kernel/drivers/gpu/drm/tegra/sor.h new file mode 100644 index 000000000..a5f8853fe --- /dev/null +++ b/kernel/drivers/gpu/drm/tegra/sor.h @@ -0,0 +1,282 @@ +/* + * Copyright (C) 2013 NVIDIA Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef DRM_TEGRA_SOR_H +#define DRM_TEGRA_SOR_H + +#define SOR_CTXSW 0x00 + +#define SOR_SUPER_STATE_0 0x01 + +#define SOR_SUPER_STATE_1 0x02 +#define SOR_SUPER_STATE_ATTACHED (1 << 3) +#define SOR_SUPER_STATE_MODE_NORMAL (1 << 2) +#define SOR_SUPER_STATE_HEAD_MODE_MASK (3 << 0) +#define SOR_SUPER_STATE_HEAD_MODE_AWAKE (2 << 0) +#define SOR_SUPER_STATE_HEAD_MODE_SNOOZE (1 << 0) +#define SOR_SUPER_STATE_HEAD_MODE_SLEEP (0 << 0) + +#define SOR_STATE_0 0x03 + +#define SOR_STATE_1 0x04 +#define SOR_STATE_ASY_PIXELDEPTH_MASK (0xf << 17) +#define SOR_STATE_ASY_PIXELDEPTH_BPP_18_444 (0x2 << 17) +#define SOR_STATE_ASY_PIXELDEPTH_BPP_24_444 (0x5 << 17) +#define SOR_STATE_ASY_VSYNCPOL (1 << 13) +#define SOR_STATE_ASY_HSYNCPOL (1 << 12) +#define SOR_STATE_ASY_PROTOCOL_MASK (0xf << 8) +#define SOR_STATE_ASY_PROTOCOL_CUSTOM (0xf << 8) +#define SOR_STATE_ASY_PROTOCOL_DP_A (0x8 << 8) +#define SOR_STATE_ASY_PROTOCOL_DP_B (0x9 << 8) +#define SOR_STATE_ASY_PROTOCOL_LVDS (0x0 << 8) +#define SOR_STATE_ASY_CRC_MODE_MASK (0x3 << 6) +#define SOR_STATE_ASY_CRC_MODE_NON_ACTIVE (0x2 << 6) +#define SOR_STATE_ASY_CRC_MODE_COMPLETE (0x1 << 6) +#define SOR_STATE_ASY_CRC_MODE_ACTIVE (0x0 << 6) +#define SOR_STATE_ASY_OWNER(x) (((x) & 0xf) << 0) + +#define SOR_HEAD_STATE_0(x) (0x05 + (x)) +#define SOR_HEAD_STATE_1(x) (0x07 + (x)) +#define SOR_HEAD_STATE_2(x) (0x09 + (x)) +#define SOR_HEAD_STATE_3(x) (0x0b + (x)) +#define SOR_HEAD_STATE_4(x) (0x0d + (x)) +#define SOR_HEAD_STATE_5(x) (0x0f + (x)) +#define SOR_CRC_CNTRL 0x11 +#define SOR_CRC_CNTRL_ENABLE (1 << 0) +#define SOR_DP_DEBUG_MVID 0x12 + +#define SOR_CLK_CNTRL 0x13 +#define SOR_CLK_CNTRL_DP_LINK_SPEED_MASK (0x1f << 2) +#define SOR_CLK_CNTRL_DP_LINK_SPEED(x) (((x) & 0x1f) << 2) +#define SOR_CLK_CNTRL_DP_LINK_SPEED_G1_62 (0x06 << 2) +#define SOR_CLK_CNTRL_DP_LINK_SPEED_G2_70 (0x0a << 2) +#define SOR_CLK_CNTRL_DP_LINK_SPEED_G5_40 (0x14 << 2) +#define SOR_CLK_CNTRL_DP_CLK_SEL_MASK (3 << 0) +#define SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK (0 << 0) +#define SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_PCLK (1 << 0) +#define SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK (2 << 0) +#define SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_DPCLK (3 << 0) + +#define SOR_CAP 0x14 + +#define SOR_PWR 0x15 +#define SOR_PWR_TRIGGER (1 << 31) +#define SOR_PWR_MODE_SAFE (1 << 28) +#define SOR_PWR_NORMAL_STATE_PU (1 << 0) + +#define SOR_TEST 0x16 +#define SOR_TEST_CRC_POST_SERIALIZE (1 << 23) +#define SOR_TEST_ATTACHED (1 << 10) +#define SOR_TEST_HEAD_MODE_MASK (3 << 8) +#define SOR_TEST_HEAD_MODE_AWAKE (2 << 8) + +#define SOR_PLL_0 0x17 +#define SOR_PLL_0_ICHPMP_MASK (0xf << 24) +#define SOR_PLL_0_ICHPMP(x) (((x) & 0xf) << 24) +#define SOR_PLL_0_VCOCAP_MASK (0xf << 8) +#define SOR_PLL_0_VCOCAP(x) (((x) & 0xf) << 8) +#define SOR_PLL_0_VCOCAP_RST SOR_PLL_0_VCOCAP(3) +#define SOR_PLL_0_PLLREG_MASK (0x3 << 6) +#define SOR_PLL_0_PLLREG_LEVEL(x) (((x) & 0x3) << 6) +#define SOR_PLL_0_PLLREG_LEVEL_V25 SOR_PLL_0_PLLREG_LEVEL(0) +#define SOR_PLL_0_PLLREG_LEVEL_V15 SOR_PLL_0_PLLREG_LEVEL(1) +#define SOR_PLL_0_PLLREG_LEVEL_V35 SOR_PLL_0_PLLREG_LEVEL(2) +#define SOR_PLL_0_PLLREG_LEVEL_V45 SOR_PLL_0_PLLREG_LEVEL(3) +#define SOR_PLL_0_PULLDOWN (1 << 5) +#define SOR_PLL_0_RESISTOR_EXT (1 << 4) +#define SOR_PLL_0_VCOPD (1 << 2) +#define SOR_PLL_0_POWER_OFF (1 << 0) + +#define SOR_PLL_1 0x18 +/* XXX: read-only bit? */ +#define SOR_PLL_1_TERM_COMPOUT (1 << 15) +#define SOR_PLL_1_TMDS_TERM (1 << 8) + +#define SOR_PLL_2 0x19 +#define SOR_PLL_2_LVDS_ENABLE (1 << 25) +#define SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE (1 << 24) +#define SOR_PLL_2_PORT_POWERDOWN (1 << 23) +#define SOR_PLL_2_BANDGAP_POWERDOWN (1 << 22) +#define SOR_PLL_2_POWERDOWN_OVERRIDE (1 << 18) +#define SOR_PLL_2_SEQ_PLLCAPPD (1 << 17) + +#define SOR_PLL_3 0x1a +#define SOR_PLL_3_PLL_VDD_MODE_V1_8 (0 << 13) +#define SOR_PLL_3_PLL_VDD_MODE_V3_3 (1 << 13) + +#define SOR_CSTM 0x1b +#define SOR_CSTM_LVDS (1 << 16) +#define SOR_CSTM_LINK_ACT_B (1 << 15) +#define SOR_CSTM_LINK_ACT_A (1 << 14) +#define SOR_CSTM_UPPER (1 << 11) + +#define SOR_LVDS 0x1c +#define SOR_CRC_A 0x1d +#define SOR_CRC_A_VALID (1 << 0) +#define SOR_CRC_A_RESET (1 << 0) +#define SOR_CRC_B 0x1e +#define SOR_BLANK 0x1f +#define SOR_SEQ_CTL 0x20 + +#define SOR_LANE_SEQ_CTL 0x21 +#define SOR_LANE_SEQ_CTL_TRIGGER (1 << 31) +#define SOR_LANE_SEQ_CTL_SEQUENCE_UP (0 << 20) +#define SOR_LANE_SEQ_CTL_SEQUENCE_DOWN (1 << 20) +#define SOR_LANE_SEQ_CTL_POWER_STATE_UP (0 << 16) +#define SOR_LANE_SEQ_CTL_POWER_STATE_DOWN (1 << 16) + +#define SOR_SEQ_INST(x) (0x22 + (x)) + +#define SOR_PWM_DIV 0x32 +#define SOR_PWM_DIV_MASK 0xffffff + +#define SOR_PWM_CTL 0x33 +#define SOR_PWM_CTL_TRIGGER (1 << 31) +#define SOR_PWM_CTL_CLK_SEL (1 << 30) +#define SOR_PWM_CTL_DUTY_CYCLE_MASK 0xffffff + +#define SOR_VCRC_A_0 0x34 +#define SOR_VCRC_A_1 0x35 +#define SOR_VCRC_B_0 0x36 +#define SOR_VCRC_B_1 0x37 +#define SOR_CCRC_A_0 0x38 +#define SOR_CCRC_A_1 0x39 +#define SOR_CCRC_B_0 0x3a +#define SOR_CCRC_B_1 0x3b +#define SOR_EDATA_A_0 0x3c +#define SOR_EDATA_A_1 0x3d +#define SOR_EDATA_B_0 0x3e +#define SOR_EDATA_B_1 0x3f +#define SOR_COUNT_A_0 0x40 +#define SOR_COUNT_A_1 0x41 +#define SOR_COUNT_B_0 0x42 +#define SOR_COUNT_B_1 0x43 +#define SOR_DEBUG_A_0 0x44 +#define SOR_DEBUG_A_1 0x45 +#define SOR_DEBUG_B_0 0x46 +#define SOR_DEBUG_B_1 0x47 +#define SOR_TRIG 0x48 +#define SOR_MSCHECK 0x49 +#define SOR_XBAR_CTRL 0x4a +#define SOR_XBAR_POL 0x4b + +#define SOR_DP_LINKCTL_0 0x4c +#define SOR_DP_LINKCTL_LANE_COUNT_MASK (0x1f << 16) +#define SOR_DP_LINKCTL_LANE_COUNT(x) (((1 << (x)) - 1) << 16) +#define SOR_DP_LINKCTL_ENHANCED_FRAME (1 << 14) +#define SOR_DP_LINKCTL_TU_SIZE_MASK (0x7f << 2) +#define SOR_DP_LINKCTL_TU_SIZE(x) (((x) & 0x7f) << 2) +#define SOR_DP_LINKCTL_ENABLE (1 << 0) + +#define SOR_DP_LINKCTL_1 0x4d + +#define SOR_LANE_DRIVE_CURRENT_0 0x4e +#define SOR_LANE_DRIVE_CURRENT_1 0x4f +#define SOR_LANE4_DRIVE_CURRENT_0 0x50 +#define SOR_LANE4_DRIVE_CURRENT_1 0x51 +#define SOR_LANE_DRIVE_CURRENT_LANE3(x) (((x) & 0xff) << 24) +#define SOR_LANE_DRIVE_CURRENT_LANE2(x) (((x) & 0xff) << 16) +#define SOR_LANE_DRIVE_CURRENT_LANE1(x) (((x) & 0xff) << 8) +#define SOR_LANE_DRIVE_CURRENT_LANE0(x) (((x) & 0xff) << 0) + +#define SOR_LANE_PREEMPHASIS_0 0x52 +#define SOR_LANE_PREEMPHASIS_1 0x53 +#define SOR_LANE4_PREEMPHASIS_0 0x54 +#define SOR_LANE4_PREEMPHASIS_1 0x55 +#define SOR_LANE_PREEMPHASIS_LANE3(x) (((x) & 0xff) << 24) +#define SOR_LANE_PREEMPHASIS_LANE2(x) (((x) & 0xff) << 16) +#define SOR_LANE_PREEMPHASIS_LANE1(x) (((x) & 0xff) << 8) +#define SOR_LANE_PREEMPHASIS_LANE0(x) (((x) & 0xff) << 0) + +#define SOR_LANE_POST_CURSOR_0 0x56 +#define SOR_LANE_POST_CURSOR_1 0x57 +#define SOR_LANE_POST_CURSOR_LANE3(x) (((x) & 0xff) << 24) +#define SOR_LANE_POST_CURSOR_LANE2(x) (((x) & 0xff) << 16) +#define SOR_LANE_POST_CURSOR_LANE1(x) (((x) & 0xff) << 8) +#define SOR_LANE_POST_CURSOR_LANE0(x) (((x) & 0xff) << 0) + +#define SOR_DP_CONFIG_0 0x58 +#define SOR_DP_CONFIG_DISPARITY_NEGATIVE (1 << 31) +#define SOR_DP_CONFIG_ACTIVE_SYM_ENABLE (1 << 26) +#define SOR_DP_CONFIG_ACTIVE_SYM_POLARITY (1 << 24) +#define SOR_DP_CONFIG_ACTIVE_SYM_FRAC_MASK (0xf << 16) +#define SOR_DP_CONFIG_ACTIVE_SYM_FRAC(x) (((x) & 0xf) << 16) +#define SOR_DP_CONFIG_ACTIVE_SYM_COUNT_MASK (0x7f << 8) +#define SOR_DP_CONFIG_ACTIVE_SYM_COUNT(x) (((x) & 0x7f) << 8) +#define SOR_DP_CONFIG_WATERMARK_MASK (0x3f << 0) +#define SOR_DP_CONFIG_WATERMARK(x) (((x) & 0x3f) << 0) + +#define SOR_DP_CONFIG_1 0x59 +#define SOR_DP_MN_0 0x5a +#define SOR_DP_MN_1 0x5b + +#define SOR_DP_PADCTL_0 0x5c +#define SOR_DP_PADCTL_PAD_CAL_PD (1 << 23) +#define SOR_DP_PADCTL_TX_PU_ENABLE (1 << 22) +#define SOR_DP_PADCTL_TX_PU_MASK (0xff << 8) +#define SOR_DP_PADCTL_TX_PU(x) (((x) & 0xff) << 8) +#define SOR_DP_PADCTL_CM_TXD_3 (1 << 7) +#define SOR_DP_PADCTL_CM_TXD_2 (1 << 6) +#define SOR_DP_PADCTL_CM_TXD_1 (1 << 5) +#define SOR_DP_PADCTL_CM_TXD_0 (1 << 4) +#define SOR_DP_PADCTL_PD_TXD_3 (1 << 3) +#define SOR_DP_PADCTL_PD_TXD_0 (1 << 2) +#define SOR_DP_PADCTL_PD_TXD_1 (1 << 1) +#define SOR_DP_PADCTL_PD_TXD_2 (1 << 0) + +#define SOR_DP_PADCTL_1 0x5d + +#define SOR_DP_DEBUG_0 0x5e +#define SOR_DP_DEBUG_1 0x5f + +#define SOR_DP_SPARE_0 0x60 +#define SOR_DP_SPARE_MACRO_SOR_CLK (1 << 2) +#define SOR_DP_SPARE_PANEL_INTERNAL (1 << 1) +#define SOR_DP_SPARE_SEQ_ENABLE (1 << 0) + +#define SOR_DP_SPARE_1 0x61 +#define SOR_DP_AUDIO_CTRL 0x62 + +#define SOR_DP_AUDIO_HBLANK_SYMBOLS 0x63 +#define SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK (0x01ffff << 0) + +#define SOR_DP_AUDIO_VBLANK_SYMBOLS 0x64 +#define SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK (0x1fffff << 0) + +#define SOR_DP_GENERIC_INFOFRAME_HEADER 0x65 +#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_0 0x66 +#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_1 0x67 +#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_2 0x68 +#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_3 0x69 +#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_4 0x6a +#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_5 0x6b +#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_6 0x6c + +#define SOR_DP_TPG 0x6d +#define SOR_DP_TPG_CHANNEL_CODING (1 << 6) +#define SOR_DP_TPG_SCRAMBLER_MASK (3 << 4) +#define SOR_DP_TPG_SCRAMBLER_FIBONACCI (2 << 4) +#define SOR_DP_TPG_SCRAMBLER_GALIOS (1 << 4) +#define SOR_DP_TPG_SCRAMBLER_NONE (0 << 4) +#define SOR_DP_TPG_PATTERN_MASK (0xf << 0) +#define SOR_DP_TPG_PATTERN_HBR2 (0x8 << 0) +#define SOR_DP_TPG_PATTERN_CSTM (0x7 << 0) +#define SOR_DP_TPG_PATTERN_PRBS7 (0x6 << 0) +#define SOR_DP_TPG_PATTERN_SBLERRRATE (0x5 << 0) +#define SOR_DP_TPG_PATTERN_D102 (0x4 << 0) +#define SOR_DP_TPG_PATTERN_TRAIN3 (0x3 << 0) +#define SOR_DP_TPG_PATTERN_TRAIN2 (0x2 << 0) +#define SOR_DP_TPG_PATTERN_TRAIN1 (0x1 << 0) +#define SOR_DP_TPG_PATTERN_NONE (0x0 << 0) + +#define SOR_DP_TPG_CONFIG 0x6e +#define SOR_DP_LQ_CSTM_0 0x6f +#define SOR_DP_LQ_CSTM_1 0x70 +#define SOR_DP_LQ_CSTM_2 0x71 + +#endif |