diff options
Diffstat (limited to 'kernel/drivers/gpu/drm/msm')
80 files changed, 8306 insertions, 1779 deletions
diff --git a/kernel/drivers/gpu/drm/msm/Kconfig b/kernel/drivers/gpu/drm/msm/Kconfig index 0a6f6764a..84d3ec98e 100644 --- a/kernel/drivers/gpu/drm/msm/Kconfig +++ b/kernel/drivers/gpu/drm/msm/Kconfig @@ -9,24 +9,11 @@ config DRM_MSM select DRM_PANEL select SHMEM select TMPFS + select QCOM_SCM default y help DRM/KMS driver for MSM/snapdragon. -config DRM_MSM_FBDEV - bool "Enable legacy fbdev support for MSM modesetting driver" - depends on DRM_MSM - select DRM_KMS_FB_HELPER - select FB_SYS_FILLRECT - select FB_SYS_COPYAREA - select FB_SYS_IMAGEBLIT - select FB_SYS_FOPS - default y - help - Choose this option if you have a need for the legacy fbdev - support. Note that this support also provide the linux console - support on top of the MSM modesetting driver. - config DRM_MSM_REGISTER_LOGGING bool "MSM DRM register logging" depends on DRM_MSM @@ -46,3 +33,24 @@ config DRM_MSM_DSI Choose this option if you have a need for MIPI DSI connector support. +config DRM_MSM_DSI_PLL + bool "Enable DSI PLL driver in MSM DRM" + depends on DRM_MSM_DSI && COMMON_CLK + default y + help + Choose this option to enable DSI PLL driver which provides DSI + source clocks under common clock framework. + +config DRM_MSM_DSI_28NM_PHY + bool "Enable DSI 28nm PHY driver in MSM DRM" + depends on DRM_MSM_DSI + default y + help + Choose this option if the 28nm DSI PHY is used on the platform. + +config DRM_MSM_DSI_20NM_PHY + bool "Enable DSI 20nm PHY driver in MSM DRM" + depends on DRM_MSM_DSI + default y + help + Choose this option if the 20nm DSI PHY is used on the platform. diff --git a/kernel/drivers/gpu/drm/msm/Makefile b/kernel/drivers/gpu/drm/msm/Makefile index ab2086783..1c90290be 100644 --- a/kernel/drivers/gpu/drm/msm/Makefile +++ b/kernel/drivers/gpu/drm/msm/Makefile @@ -1,4 +1,5 @@ ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm +ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi msm-y := \ adreno/adreno_device.o \ @@ -9,6 +10,7 @@ msm-y := \ hdmi/hdmi_audio.o \ hdmi/hdmi_bridge.o \ hdmi/hdmi_connector.o \ + hdmi/hdmi_hdcp.o \ hdmi/hdmi_i2c.o \ hdmi/hdmi_phy_8960.o \ hdmi/hdmi_phy_8x60.o \ @@ -48,12 +50,22 @@ msm-y := \ msm_rd.o \ msm_ringbuffer.o -msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o +msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o + msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \ + dsi/dsi_cfg.o \ dsi/dsi_host.o \ dsi/dsi_manager.o \ - dsi/dsi_phy.o \ + dsi/phy/dsi_phy.o \ mdp/mdp5/mdp5_cmd_encoder.o +msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o +msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o + +ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y) +msm-y += dsi/pll/dsi_pll.o +msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o +endif + obj-$(CONFIG_DRM_MSM) += msm.o diff --git a/kernel/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/kernel/drivers/gpu/drm/msm/adreno/a2xx.xml.h index edc845fff..9e2aceb4f 100644 --- a/kernel/drivers/gpu/drm/msm/adreno/a2xx.xml.h +++ b/kernel/drivers/gpu/drm/msm/adreno/a2xx.xml.h @@ -8,15 +8,16 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15085 bytes, from 2014-12-20 21:49:41) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 64344 bytes, from 2014-12-12 20:22:26) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 51069 bytes, from 2014-12-21 15:51:54) - -Copyright (C) 2013-2014 by the following authors: +- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00) + +Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) Permission is hereby granted, free of charge, to any person obtaining diff --git a/kernel/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/kernel/drivers/gpu/drm/msm/adreno/a3xx.xml.h index e91a73945..97dc1c6ec 100644 --- a/kernel/drivers/gpu/drm/msm/adreno/a3xx.xml.h +++ b/kernel/drivers/gpu/drm/msm/adreno/a3xx.xml.h @@ -8,15 +8,16 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15085 bytes, from 2014-12-20 21:49:41) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 64344 bytes, from 2014-12-12 20:22:26) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 51069 bytes, from 2014-12-21 15:51:54) - -Copyright (C) 2013-2014 by the following authors: +- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00) + +Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) Permission is hereby granted, free of charge, to any person obtaining @@ -130,6 +131,10 @@ enum a3xx_tex_fmt { TFMT_I420_Y = 24, TFMT_I420_U = 26, TFMT_I420_V = 27, + TFMT_ATC_RGB = 32, + TFMT_ATC_RGBA_EXPLICIT = 33, + TFMT_ETC1 = 34, + TFMT_ATC_RGBA_INTERPOLATED = 35, TFMT_DXT1 = 36, TFMT_DXT3 = 37, TFMT_DXT5 = 38, @@ -178,10 +183,13 @@ enum a3xx_tex_fmt { TFMT_32_SINT = 92, TFMT_32_32_SINT = 93, TFMT_32_32_32_32_SINT = 95, - TFMT_RGTC2_SNORM = 112, - TFMT_RGTC2_UNORM = 113, - TFMT_RGTC1_SNORM = 114, - TFMT_RGTC1_UNORM = 115, + TFMT_ETC2_RG11_SNORM = 112, + TFMT_ETC2_RG11_UNORM = 113, + TFMT_ETC2_R11_SNORM = 114, + TFMT_ETC2_R11_UNORM = 115, + TFMT_ETC2_RGBA8 = 116, + TFMT_ETC2_RGB8A1 = 117, + TFMT_ETC2_RGB8 = 118, }; enum a3xx_tex_fetchsize { @@ -209,14 +217,24 @@ enum a3xx_color_fmt { RB_R10G10B10A2_UNORM = 16, RB_A8_UNORM = 20, RB_R8_UNORM = 21, + RB_R16_FLOAT = 24, + RB_R16G16_FLOAT = 25, RB_R16G16B16A16_FLOAT = 27, RB_R11G11B10_FLOAT = 28, + RB_R16_SNORM = 32, + RB_R16G16_SNORM = 33, + RB_R16G16B16A16_SNORM = 35, + RB_R16_UNORM = 36, + RB_R16G16_UNORM = 37, + RB_R16G16B16A16_UNORM = 39, RB_R16_SINT = 40, RB_R16G16_SINT = 41, RB_R16G16B16A16_SINT = 43, RB_R16_UINT = 44, RB_R16G16_UINT = 45, RB_R16G16B16A16_UINT = 47, + RB_R32_FLOAT = 48, + RB_R32G32_FLOAT = 49, RB_R32G32B32A32_FLOAT = 51, RB_R32_SINT = 52, RB_R32G32_SINT = 53, @@ -263,6 +281,14 @@ enum a3xx_rb_blend_opcode { enum a3xx_intp_mode { SMOOTH = 0, FLAT = 1, + ZERO = 2, + ONE = 3, +}; + +enum a3xx_repl_mode { + S = 1, + T = 2, + ONE_T = 3, }; enum a3xx_tex_filter { @@ -303,6 +329,13 @@ enum a3xx_tex_type { A3XX_TEX_3D = 3, }; +enum a3xx_tex_msaa { + A3XX_TPL1_MSAA1X = 0, + A3XX_TPL1_MSAA2X = 1, + A3XX_TPL1_MSAA4X = 2, + A3XX_TPL1_MSAA8X = 3, +}; + #define A3XX_INT0_RBBM_GPU_IDLE 0x00000001 #define A3XX_INT0_RBBM_AHB_ERROR 0x00000002 #define A3XX_INT0_RBBM_REG_TIMEOUT 0x00000004 @@ -650,9 +683,16 @@ static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460 #define A3XX_GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE 0x00080000 #define A3XX_GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE 0x00100000 #define A3XX_GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE 0x00200000 +#define A3XX_GRAS_CL_CLIP_CNTL_ZERO_GB_SCALE_Z 0x00400000 #define A3XX_GRAS_CL_CLIP_CNTL_ZCOORD 0x00800000 #define A3XX_GRAS_CL_CLIP_CNTL_WCOORD 0x01000000 #define A3XX_GRAS_CL_CLIP_CNTL_ZCLIP_DISABLE 0x02000000 +#define A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES__MASK 0x1c000000 +#define A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES__SHIFT 26 +static inline uint32_t A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES(uint32_t val) +{ + return ((val) << A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES__SHIFT) & A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES__MASK; +} #define REG_A3XX_GRAS_CL_GB_CLIP_ADJ 0x00002044 #define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff @@ -743,7 +783,7 @@ static inline uint32_t A3XX_GRAS_SU_POINT_SIZE(float val) #define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT 0 static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val) { - return ((((int32_t)(val * 16384.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK; + return ((((int32_t)(val * 1048576.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK; } #define REG_A3XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000206d @@ -751,7 +791,7 @@ static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val) #define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0 static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val) { - return ((((int32_t)(val * 16384.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK; + return ((((int32_t)(val * 64.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK; } #define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070 @@ -854,10 +894,19 @@ static inline uint32_t A3XX_RB_MODE_CONTROL_RENDER_MODE(enum a3xx_render_mode va { return ((val) << A3XX_RB_MODE_CONTROL_RENDER_MODE__SHIFT) & A3XX_RB_MODE_CONTROL_RENDER_MODE__MASK; } +#define A3XX_RB_MODE_CONTROL_MRT__MASK 0x00003000 +#define A3XX_RB_MODE_CONTROL_MRT__SHIFT 12 +static inline uint32_t A3XX_RB_MODE_CONTROL_MRT(uint32_t val) +{ + return ((val) << A3XX_RB_MODE_CONTROL_MRT__SHIFT) & A3XX_RB_MODE_CONTROL_MRT__MASK; +} #define A3XX_RB_MODE_CONTROL_MARB_CACHE_SPLIT_MODE 0x00008000 #define A3XX_RB_MODE_CONTROL_PACKER_TIMER_ENABLE 0x00010000 #define REG_A3XX_RB_RENDER_CONTROL 0x000020c1 +#define A3XX_RB_RENDER_CONTROL_DUAL_COLOR_IN_ENABLE 0x00000001 +#define A3XX_RB_RENDER_CONTROL_YUV_IN_ENABLE 0x00000002 +#define A3XX_RB_RENDER_CONTROL_COV_VALUE_INPUT_ENABLE 0x00000004 #define A3XX_RB_RENDER_CONTROL_FACENESS 0x00000008 #define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK 0x00000ff0 #define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT 4 @@ -871,6 +920,8 @@ static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val) #define A3XX_RB_RENDER_CONTROL_YCOORD 0x00008000 #define A3XX_RB_RENDER_CONTROL_ZCOORD 0x00010000 #define A3XX_RB_RENDER_CONTROL_WCOORD 0x00020000 +#define A3XX_RB_RENDER_CONTROL_I_CLAMP_ENABLE 0x00080000 +#define A3XX_RB_RENDER_CONTROL_COV_VALUE_OUTPUT_ENABLE 0x00100000 #define A3XX_RB_RENDER_CONTROL_ALPHA_TEST 0x00400000 #define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK 0x07000000 #define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT 24 @@ -878,6 +929,8 @@ static inline uint32_t A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC(enum adreno_compar { return ((val) << A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK; } +#define A3XX_RB_RENDER_CONTROL_ALPHA_TO_COVERAGE 0x40000000 +#define A3XX_RB_RENDER_CONTROL_ALPHA_TO_ONE 0x80000000 #define REG_A3XX_RB_MSAA_CONTROL 0x000020c2 #define A3XX_RB_MSAA_CONTROL_DISABLE 0x00000400 @@ -1246,9 +1299,21 @@ static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op v #define REG_A3XX_RB_STENCIL_CLEAR 0x00002105 -#define REG_A3XX_RB_STENCIL_BUF_INFO 0x00002106 +#define REG_A3XX_RB_STENCIL_INFO 0x00002106 +#define A3XX_RB_STENCIL_INFO_STENCIL_BASE__MASK 0xfffff800 +#define A3XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT 11 +static inline uint32_t A3XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val) +{ + return ((val >> 12) << A3XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A3XX_RB_STENCIL_INFO_STENCIL_BASE__MASK; +} -#define REG_A3XX_RB_STENCIL_BUF_PITCH 0x00002107 +#define REG_A3XX_RB_STENCIL_PITCH 0x00002107 +#define A3XX_RB_STENCIL_PITCH__MASK 0xffffffff +#define A3XX_RB_STENCIL_PITCH__SHIFT 0 +static inline uint32_t A3XX_RB_STENCIL_PITCH(uint32_t val) +{ + return ((val >> 3) << A3XX_RB_STENCIL_PITCH__SHIFT) & A3XX_RB_STENCIL_PITCH__MASK; +} #define REG_A3XX_RB_STENCILREFMASK 0x00002108 #define A3XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff @@ -1356,6 +1421,7 @@ static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_ { return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK; } +#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_ENABLE 0x00001000 #define A3XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART 0x00100000 #define A3XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000 #define A3XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000 @@ -1805,6 +1871,102 @@ static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CF(enum a3xx_intp_mode val) static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00002286 + 0x1*i0; } static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x00002286 + 0x1*i0; } +#define A3XX_VPC_VARYING_PS_REPL_MODE_C0__MASK 0x00000003 +#define A3XX_VPC_VARYING_PS_REPL_MODE_C0__SHIFT 0 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C0(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C0__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C0__MASK; +} +#define A3XX_VPC_VARYING_PS_REPL_MODE_C1__MASK 0x0000000c +#define A3XX_VPC_VARYING_PS_REPL_MODE_C1__SHIFT 2 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C1(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C1__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C1__MASK; +} +#define A3XX_VPC_VARYING_PS_REPL_MODE_C2__MASK 0x00000030 +#define A3XX_VPC_VARYING_PS_REPL_MODE_C2__SHIFT 4 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C2(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C2__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C2__MASK; +} +#define A3XX_VPC_VARYING_PS_REPL_MODE_C3__MASK 0x000000c0 +#define A3XX_VPC_VARYING_PS_REPL_MODE_C3__SHIFT 6 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C3(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C3__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C3__MASK; +} +#define A3XX_VPC_VARYING_PS_REPL_MODE_C4__MASK 0x00000300 +#define A3XX_VPC_VARYING_PS_REPL_MODE_C4__SHIFT 8 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C4(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C4__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C4__MASK; +} +#define A3XX_VPC_VARYING_PS_REPL_MODE_C5__MASK 0x00000c00 +#define A3XX_VPC_VARYING_PS_REPL_MODE_C5__SHIFT 10 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C5(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C5__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C5__MASK; +} +#define A3XX_VPC_VARYING_PS_REPL_MODE_C6__MASK 0x00003000 +#define A3XX_VPC_VARYING_PS_REPL_MODE_C6__SHIFT 12 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C6(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C6__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C6__MASK; +} +#define A3XX_VPC_VARYING_PS_REPL_MODE_C7__MASK 0x0000c000 +#define A3XX_VPC_VARYING_PS_REPL_MODE_C7__SHIFT 14 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C7(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C7__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C7__MASK; +} +#define A3XX_VPC_VARYING_PS_REPL_MODE_C8__MASK 0x00030000 +#define A3XX_VPC_VARYING_PS_REPL_MODE_C8__SHIFT 16 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C8(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C8__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C8__MASK; +} +#define A3XX_VPC_VARYING_PS_REPL_MODE_C9__MASK 0x000c0000 +#define A3XX_VPC_VARYING_PS_REPL_MODE_C9__SHIFT 18 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C9(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C9__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C9__MASK; +} +#define A3XX_VPC_VARYING_PS_REPL_MODE_CA__MASK 0x00300000 +#define A3XX_VPC_VARYING_PS_REPL_MODE_CA__SHIFT 20 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CA(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CA__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CA__MASK; +} +#define A3XX_VPC_VARYING_PS_REPL_MODE_CB__MASK 0x00c00000 +#define A3XX_VPC_VARYING_PS_REPL_MODE_CB__SHIFT 22 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CB(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CB__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CB__MASK; +} +#define A3XX_VPC_VARYING_PS_REPL_MODE_CC__MASK 0x03000000 +#define A3XX_VPC_VARYING_PS_REPL_MODE_CC__SHIFT 24 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CC(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CC__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CC__MASK; +} +#define A3XX_VPC_VARYING_PS_REPL_MODE_CD__MASK 0x0c000000 +#define A3XX_VPC_VARYING_PS_REPL_MODE_CD__SHIFT 26 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CD(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CD__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CD__MASK; +} +#define A3XX_VPC_VARYING_PS_REPL_MODE_CE__MASK 0x30000000 +#define A3XX_VPC_VARYING_PS_REPL_MODE_CE__SHIFT 28 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CE(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CE__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CE__MASK; +} +#define A3XX_VPC_VARYING_PS_REPL_MODE_CF__MASK 0xc0000000 +#define A3XX_VPC_VARYING_PS_REPL_MODE_CF__SHIFT 30 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CF(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CF__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CF__MASK; +} #define REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_0 0x0000228a @@ -2107,6 +2269,12 @@ static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) #define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_1 0x000022e9 #define REG_A3XX_SP_FS_OUTPUT_REG 0x000022ec +#define A3XX_SP_FS_OUTPUT_REG_MRT__MASK 0x00000003 +#define A3XX_SP_FS_OUTPUT_REG_MRT__SHIFT 0 +static inline uint32_t A3XX_SP_FS_OUTPUT_REG_MRT(uint32_t val) +{ + return ((val) << A3XX_SP_FS_OUTPUT_REG_MRT__SHIFT) & A3XX_SP_FS_OUTPUT_REG_MRT__MASK; +} #define A3XX_SP_FS_OUTPUT_REG_DEPTH_ENABLE 0x00000080 #define A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK 0x0000ff00 #define A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT 8 @@ -2508,6 +2676,7 @@ static inline uint32_t A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES(uint32_t val) #define REG_A3XX_VGT_IMMED_DATA 0x000021fd #define REG_A3XX_TEX_SAMP_0 0x00000000 +#define A3XX_TEX_SAMP_0_CLAMPENABLE 0x00000001 #define A3XX_TEX_SAMP_0_MIPFILTER_LINEAR 0x00000002 #define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c #define A3XX_TEX_SAMP_0_XY_MAG__SHIFT 2 @@ -2551,6 +2720,7 @@ static inline uint32_t A3XX_TEX_SAMP_0_COMPARE_FUNC(enum adreno_compare_func val { return ((val) << A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT) & A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK; } +#define A3XX_TEX_SAMP_0_CUBEMAPSEAMLESSFILTOFF 0x01000000 #define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000 #define REG_A3XX_TEX_SAMP_1 0x00000001 @@ -2606,6 +2776,12 @@ static inline uint32_t A3XX_TEX_CONST_0_MIPLVLS(uint32_t val) { return ((val) << A3XX_TEX_CONST_0_MIPLVLS__SHIFT) & A3XX_TEX_CONST_0_MIPLVLS__MASK; } +#define A3XX_TEX_CONST_0_MSAATEX__MASK 0x00300000 +#define A3XX_TEX_CONST_0_MSAATEX__SHIFT 20 +static inline uint32_t A3XX_TEX_CONST_0_MSAATEX(enum a3xx_tex_msaa val) +{ + return ((val) << A3XX_TEX_CONST_0_MSAATEX__SHIFT) & A3XX_TEX_CONST_0_MSAATEX__MASK; +} #define A3XX_TEX_CONST_0_FMT__MASK 0x1fc00000 #define A3XX_TEX_CONST_0_FMT__SHIFT 22 static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val) @@ -2641,7 +2817,7 @@ static inline uint32_t A3XX_TEX_CONST_1_FETCHSIZE(enum a3xx_tex_fetchsize val) } #define REG_A3XX_TEX_CONST_2 0x00000002 -#define A3XX_TEX_CONST_2_INDX__MASK 0x000000ff +#define A3XX_TEX_CONST_2_INDX__MASK 0x000001ff #define A3XX_TEX_CONST_2_INDX__SHIFT 0 static inline uint32_t A3XX_TEX_CONST_2_INDX(uint32_t val) { @@ -2661,7 +2837,7 @@ static inline uint32_t A3XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val) } #define REG_A3XX_TEX_CONST_3 0x00000003 -#define A3XX_TEX_CONST_3_LAYERSZ1__MASK 0x0000000f +#define A3XX_TEX_CONST_3_LAYERSZ1__MASK 0x0001ffff #define A3XX_TEX_CONST_3_LAYERSZ1__SHIFT 0 static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ1(uint32_t val) { diff --git a/kernel/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/kernel/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index b66c53bdc..fd266ed96 100644 --- a/kernel/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/kernel/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -93,7 +93,10 @@ static int a3xx_hw_init(struct msm_gpu *gpu) /* Set up AOOO: */ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c); gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c); - + } else if (adreno_is_a306(adreno_gpu)) { + gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003); + gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x0000000a); + gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x0000000a); } else if (adreno_is_a320(adreno_gpu)) { /* Set up 16 deep read/write request queues: */ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010); @@ -186,7 +189,9 @@ static int a3xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001); /* Enable Clock gating: */ - if (adreno_is_a320(adreno_gpu)) + if (adreno_is_a306(adreno_gpu)) + gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa); + else if (adreno_is_a320(adreno_gpu)) gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff); else if (adreno_is_a330v2(adreno_gpu)) gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa); @@ -271,7 +276,8 @@ static int a3xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]); /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */ - if (adreno_is_a305(adreno_gpu) || adreno_is_a320(adreno_gpu)) { + if (adreno_is_a305(adreno_gpu) || adreno_is_a306(adreno_gpu) || + adreno_is_a320(adreno_gpu)) { gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) | AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) | @@ -295,9 +301,12 @@ static int a3xx_hw_init(struct msm_gpu *gpu) static void a3xx_recover(struct msm_gpu *gpu) { + adreno_dump_info(gpu); + /* dump registers before resetting gpu, if enabled: */ if (hang_debug) a3xx_dump(gpu); + gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 1); gpu_read(gpu, REG_A3XX_RBBM_SW_RESET_CMD); gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 0); diff --git a/kernel/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/kernel/drivers/gpu/drm/msm/adreno/a4xx.xml.h index 755723fd8..99de8271d 100644 --- a/kernel/drivers/gpu/drm/msm/adreno/a4xx.xml.h +++ b/kernel/drivers/gpu/drm/msm/adreno/a4xx.xml.h @@ -8,15 +8,16 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15085 bytes, from 2014-12-20 21:49:41) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 64344 bytes, from 2014-12-12 20:22:26) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 51069 bytes, from 2014-12-21 15:51:54) - -Copyright (C) 2013-2014 by the following authors: +- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00) + +Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) Permission is hereby granted, free of charge, to any person obtaining @@ -43,10 +44,40 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. enum a4xx_color_fmt { RB4_A8_UNORM = 1, + RB4_R8_UNORM = 2, + RB4_R4G4B4A4_UNORM = 8, + RB4_R5G5B5A1_UNORM = 10, RB4_R5G6R5_UNORM = 14, - RB4_Z16_UNORM = 15, + RB4_R8G8_UNORM = 15, + RB4_R8G8_SNORM = 16, + RB4_R8G8_UINT = 17, + RB4_R8G8_SINT = 18, + RB4_R16_FLOAT = 21, + RB4_R16_UINT = 22, + RB4_R16_SINT = 23, RB4_R8G8B8_UNORM = 25, RB4_R8G8B8A8_UNORM = 26, + RB4_R8G8B8A8_SNORM = 28, + RB4_R8G8B8A8_UINT = 29, + RB4_R8G8B8A8_SINT = 30, + RB4_R10G10B10A2_UNORM = 31, + RB4_R10G10B10A2_UINT = 34, + RB4_R11G11B10_FLOAT = 39, + RB4_R16G16_FLOAT = 42, + RB4_R16G16_UINT = 43, + RB4_R16G16_SINT = 44, + RB4_R32_FLOAT = 45, + RB4_R32_UINT = 46, + RB4_R32_SINT = 47, + RB4_R16G16B16A16_FLOAT = 54, + RB4_R16G16B16A16_UINT = 55, + RB4_R16G16B16A16_SINT = 56, + RB4_R32G32_FLOAT = 57, + RB4_R32G32_UINT = 58, + RB4_R32G32_SINT = 59, + RB4_R32G32B32A32_FLOAT = 60, + RB4_R32G32B32A32_UINT = 61, + RB4_R32G32B32A32_SINT = 62, }; enum a4xx_tile_mode { @@ -91,7 +122,14 @@ enum a4xx_vtx_fmt { VFMT4_16_16_UNORM = 29, VFMT4_16_16_16_UNORM = 30, VFMT4_16_16_16_16_UNORM = 31, + VFMT4_32_UINT = 32, + VFMT4_32_32_UINT = 33, + VFMT4_32_32_32_UINT = 34, + VFMT4_32_32_32_32_UINT = 35, + VFMT4_32_SINT = 36, VFMT4_32_32_SINT = 37, + VFMT4_32_32_32_SINT = 38, + VFMT4_32_32_32_32_SINT = 39, VFMT4_8_UINT = 40, VFMT4_8_8_UINT = 41, VFMT4_8_8_8_UINT = 42, @@ -125,12 +163,60 @@ enum a4xx_tex_fmt { TFMT4_8_UNORM = 4, TFMT4_8_8_UNORM = 14, TFMT4_8_8_8_8_UNORM = 28, + TFMT4_8_SNORM = 5, + TFMT4_8_8_SNORM = 15, + TFMT4_8_8_8_8_SNORM = 29, + TFMT4_8_UINT = 6, + TFMT4_8_8_UINT = 16, + TFMT4_8_8_8_8_UINT = 30, + TFMT4_8_SINT = 7, + TFMT4_8_8_SINT = 17, + TFMT4_8_8_8_8_SINT = 31, + TFMT4_16_UINT = 21, + TFMT4_16_16_UINT = 41, + TFMT4_16_16_16_16_UINT = 54, + TFMT4_16_SINT = 22, + TFMT4_16_16_SINT = 42, + TFMT4_16_16_16_16_SINT = 55, + TFMT4_32_UINT = 44, + TFMT4_32_32_UINT = 57, + TFMT4_32_32_32_32_UINT = 64, + TFMT4_32_SINT = 45, + TFMT4_32_32_SINT = 58, + TFMT4_32_32_32_32_SINT = 65, TFMT4_16_FLOAT = 20, TFMT4_16_16_FLOAT = 40, TFMT4_16_16_16_16_FLOAT = 53, TFMT4_32_FLOAT = 43, TFMT4_32_32_FLOAT = 56, TFMT4_32_32_32_32_FLOAT = 63, + TFMT4_9_9_9_E5_FLOAT = 32, + TFMT4_11_11_10_FLOAT = 37, + TFMT4_ATC_RGB = 100, + TFMT4_ATC_RGBA_EXPLICIT = 101, + TFMT4_ATC_RGBA_INTERPOLATED = 102, + TFMT4_ETC2_RG11_UNORM = 103, + TFMT4_ETC2_RG11_SNORM = 104, + TFMT4_ETC2_R11_UNORM = 105, + TFMT4_ETC2_R11_SNORM = 106, + TFMT4_ETC1 = 107, + TFMT4_ETC2_RGB8 = 108, + TFMT4_ETC2_RGBA8 = 109, + TFMT4_ETC2_RGB8A1 = 110, + TFMT4_ASTC_4x4 = 111, + TFMT4_ASTC_5x4 = 112, + TFMT4_ASTC_5x5 = 113, + TFMT4_ASTC_6x5 = 114, + TFMT4_ASTC_6x6 = 115, + TFMT4_ASTC_8x5 = 116, + TFMT4_ASTC_8x6 = 117, + TFMT4_ASTC_8x8 = 118, + TFMT4_ASTC_10x5 = 119, + TFMT4_ASTC_10x6 = 120, + TFMT4_ASTC_10x8 = 121, + TFMT4_ASTC_10x10 = 122, + TFMT4_ASTC_12x10 = 123, + TFMT4_ASTC_12x12 = 124, }; enum a4xx_tex_fetchsize { @@ -145,18 +231,35 @@ enum a4xx_depth_format { DEPTH4_NONE = 0, DEPTH4_16 = 1, DEPTH4_24_8 = 2, + DEPTH4_32 = 3, +}; + +enum a4xx_tess_spacing { + EQUAL_SPACING = 0, + ODD_SPACING = 2, + EVEN_SPACING = 3, }; enum a4xx_tex_filter { A4XX_TEX_NEAREST = 0, A4XX_TEX_LINEAR = 1, + A4XX_TEX_ANISO = 2, }; enum a4xx_tex_clamp { A4XX_TEX_REPEAT = 0, A4XX_TEX_CLAMP_TO_EDGE = 1, A4XX_TEX_MIRROR_REPEAT = 2, - A4XX_TEX_CLAMP_NONE = 3, + A4XX_TEX_CLAMP_TO_BORDER = 3, + A4XX_TEX_MIRROR_CLAMP = 4, +}; + +enum a4xx_tex_aniso { + A4XX_TEX_ANISO_1 = 0, + A4XX_TEX_ANISO_2 = 1, + A4XX_TEX_ANISO_4 = 2, + A4XX_TEX_ANISO_8 = 3, + A4XX_TEX_ANISO_16 = 4, }; enum a4xx_tex_swiz { @@ -279,13 +382,16 @@ static inline uint32_t A4XX_RB_MSAA_CONTROL_SAMPLES(uint32_t val) #define A4XX_RB_RENDER_CONTROL2_YCOORD 0x00000002 #define A4XX_RB_RENDER_CONTROL2_ZCOORD 0x00000004 #define A4XX_RB_RENDER_CONTROL2_WCOORD 0x00000008 +#define A4XX_RB_RENDER_CONTROL2_SAMPLEMASK 0x00000010 #define A4XX_RB_RENDER_CONTROL2_FACENESS 0x00000020 +#define A4XX_RB_RENDER_CONTROL2_SAMPLEID 0x00000040 #define A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__MASK 0x00000380 #define A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__SHIFT 7 static inline uint32_t A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES(uint32_t val) { return ((val) << A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__SHIFT) & A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__MASK; } +#define A4XX_RB_RENDER_CONTROL2_SAMPLEID_HR 0x00000800 #define A4XX_RB_RENDER_CONTROL2_VARYING 0x00001000 static inline uint32_t REG_A4XX_RB_MRT(uint32_t i0) { return 0x000020a4 + 0x5*i0; } @@ -310,6 +416,12 @@ static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a4xx_color_fmt val { return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK; } +#define A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x000000c0 +#define A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 6 +static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a4xx_tile_mode val) +{ + return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK; +} #define A4XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK 0x00000600 #define A4XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT 9 static inline uint32_t A4XX_RB_MRT_BUF_INFO_DITHER_MODE(enum adreno_rb_dither_mode val) @@ -322,7 +434,8 @@ static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val) { return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK; } -#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0x007fc000 +#define A4XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00002000 +#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0xffffc000 #define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 14 static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val) { @@ -332,7 +445,7 @@ static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val) static inline uint32_t REG_A4XX_RB_MRT_BASE(uint32_t i0) { return 0x000020a6 + 0x5*i0; } static inline uint32_t REG_A4XX_RB_MRT_CONTROL3(uint32_t i0) { return 0x000020a7 + 0x5*i0; } -#define A4XX_RB_MRT_CONTROL3_STRIDE__MASK 0x0001fff8 +#define A4XX_RB_MRT_CONTROL3_STRIDE__MASK 0x03fffff8 #define A4XX_RB_MRT_CONTROL3_STRIDE__SHIFT 3 static inline uint32_t A4XX_RB_MRT_CONTROL3_STRIDE(uint32_t val) { @@ -449,7 +562,12 @@ static inline uint32_t A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare } #define REG_A4XX_RB_FS_OUTPUT 0x000020f9 -#define A4XX_RB_FS_OUTPUT_ENABLE_BLEND 0x00000001 +#define A4XX_RB_FS_OUTPUT_ENABLE_BLEND__MASK 0x000000ff +#define A4XX_RB_FS_OUTPUT_ENABLE_BLEND__SHIFT 0 +static inline uint32_t A4XX_RB_FS_OUTPUT_ENABLE_BLEND(uint32_t val) +{ + return ((val) << A4XX_RB_FS_OUTPUT_ENABLE_BLEND__SHIFT) & A4XX_RB_FS_OUTPUT_ENABLE_BLEND__MASK; +} #define A4XX_RB_FS_OUTPUT_FAST_CLEAR 0x00000100 #define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK 0xffff0000 #define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT 16 @@ -458,12 +576,63 @@ static inline uint32_t A4XX_RB_FS_OUTPUT_SAMPLE_MASK(uint32_t val) return ((val) << A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT) & A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK; } -#define REG_A4XX_RB_RENDER_CONTROL3 0x000020fb -#define A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE__MASK 0x0000001f -#define A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE__SHIFT 0 -static inline uint32_t A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE(uint32_t val) +#define REG_A4XX_RB_SAMPLE_COUNT_CONTROL 0x000020fa +#define A4XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002 +#define A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__MASK 0xfffffffc +#define A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__SHIFT 2 +static inline uint32_t A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR(uint32_t val) { - return ((val) << A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE__SHIFT) & A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE__MASK; + return ((val >> 2) << A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__SHIFT) & A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__MASK; +} + +#define REG_A4XX_RB_RENDER_COMPONENTS 0x000020fb +#define A4XX_RB_RENDER_COMPONENTS_RT0__MASK 0x0000000f +#define A4XX_RB_RENDER_COMPONENTS_RT0__SHIFT 0 +static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT0(uint32_t val) +{ + return ((val) << A4XX_RB_RENDER_COMPONENTS_RT0__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT0__MASK; +} +#define A4XX_RB_RENDER_COMPONENTS_RT1__MASK 0x000000f0 +#define A4XX_RB_RENDER_COMPONENTS_RT1__SHIFT 4 +static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT1(uint32_t val) +{ + return ((val) << A4XX_RB_RENDER_COMPONENTS_RT1__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT1__MASK; +} +#define A4XX_RB_RENDER_COMPONENTS_RT2__MASK 0x00000f00 +#define A4XX_RB_RENDER_COMPONENTS_RT2__SHIFT 8 +static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT2(uint32_t val) +{ + return ((val) << A4XX_RB_RENDER_COMPONENTS_RT2__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT2__MASK; +} +#define A4XX_RB_RENDER_COMPONENTS_RT3__MASK 0x0000f000 +#define A4XX_RB_RENDER_COMPONENTS_RT3__SHIFT 12 +static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT3(uint32_t val) +{ + return ((val) << A4XX_RB_RENDER_COMPONENTS_RT3__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT3__MASK; +} +#define A4XX_RB_RENDER_COMPONENTS_RT4__MASK 0x000f0000 +#define A4XX_RB_RENDER_COMPONENTS_RT4__SHIFT 16 +static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT4(uint32_t val) +{ + return ((val) << A4XX_RB_RENDER_COMPONENTS_RT4__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT4__MASK; +} +#define A4XX_RB_RENDER_COMPONENTS_RT5__MASK 0x00f00000 +#define A4XX_RB_RENDER_COMPONENTS_RT5__SHIFT 20 +static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT5(uint32_t val) +{ + return ((val) << A4XX_RB_RENDER_COMPONENTS_RT5__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT5__MASK; +} +#define A4XX_RB_RENDER_COMPONENTS_RT6__MASK 0x0f000000 +#define A4XX_RB_RENDER_COMPONENTS_RT6__SHIFT 24 +static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT6(uint32_t val) +{ + return ((val) << A4XX_RB_RENDER_COMPONENTS_RT6__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT6__MASK; +} +#define A4XX_RB_RENDER_COMPONENTS_RT7__MASK 0xf0000000 +#define A4XX_RB_RENDER_COMPONENTS_RT7__SHIFT 28 +static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT7(uint32_t val) +{ + return ((val) << A4XX_RB_RENDER_COMPONENTS_RT7__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT7__MASK; } #define REG_A4XX_RB_COPY_CONTROL 0x000020fc @@ -547,7 +716,12 @@ static inline uint32_t A4XX_RB_COPY_DEST_INFO_TILE(enum a4xx_tile_mode val) } #define REG_A4XX_RB_FS_OUTPUT_REG 0x00002100 -#define A4XX_RB_FS_OUTPUT_REG_COLOR_PIPE_ENABLE 0x00000001 +#define A4XX_RB_FS_OUTPUT_REG_MRT__MASK 0x0000000f +#define A4XX_RB_FS_OUTPUT_REG_MRT__SHIFT 0 +static inline uint32_t A4XX_RB_FS_OUTPUT_REG_MRT(uint32_t val) +{ + return ((val) << A4XX_RB_FS_OUTPUT_REG_MRT__SHIFT) & A4XX_RB_FS_OUTPUT_REG_MRT__MASK; +} #define A4XX_RB_FS_OUTPUT_REG_FRAG_WRITES_Z 0x00000020 #define REG_A4XX_RB_DEPTH_CONTROL 0x00002101 @@ -652,6 +826,23 @@ static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op v #define REG_A4XX_RB_STENCIL_CONTROL2 0x00002107 #define A4XX_RB_STENCIL_CONTROL2_STENCIL_BUFFER 0x00000001 +#define REG_A4XX_RB_STENCIL_INFO 0x00002108 +#define A4XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001 +#define A4XX_RB_STENCIL_INFO_STENCIL_BASE__MASK 0xfffff000 +#define A4XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT 12 +static inline uint32_t A4XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val) +{ + return ((val >> 12) << A4XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A4XX_RB_STENCIL_INFO_STENCIL_BASE__MASK; +} + +#define REG_A4XX_RB_STENCIL_PITCH 0x00002109 +#define A4XX_RB_STENCIL_PITCH__MASK 0xffffffff +#define A4XX_RB_STENCIL_PITCH__SHIFT 0 +static inline uint32_t A4XX_RB_STENCIL_PITCH(uint32_t val) +{ + return ((val >> 5) << A4XX_RB_STENCIL_PITCH__SHIFT) & A4XX_RB_STENCIL_PITCH__MASK; +} + #define REG_A4XX_RB_STENCILREFMASK 0x0000210b #define A4XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff #define A4XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0 @@ -930,6 +1121,10 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0) #define REG_A4XX_CP_IB2_BUFSZ 0x00000209 +#define REG_A4XX_CP_ME_NRT_ADDR 0x0000020c + +#define REG_A4XX_CP_ME_NRT_DATA 0x0000020d + #define REG_A4XX_CP_ME_RB_DONE_DATA 0x00000217 #define REG_A4XX_CP_QUEUE_THRESH2 0x00000219 @@ -940,9 +1135,9 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0) #define REG_A4XX_CP_ROQ_DATA 0x0000021d -#define REG_A4XX_CP_MEQ_ADDR 0x0000021e +#define REG_A4XX_CP_MEQ_ADDR 0x0000021e -#define REG_A4XX_CP_MEQ_DATA 0x0000021f +#define REG_A4XX_CP_MEQ_DATA 0x0000021f #define REG_A4XX_CP_MERCIU_ADDR 0x00000220 @@ -1004,12 +1199,17 @@ static inline uint32_t REG_A4XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000578 #define REG_A4XX_SP_VS_STATUS 0x00000ec0 +#define REG_A4XX_SP_MODE_CONTROL 0x00000ec3 + #define REG_A4XX_SP_PERFCTR_SP_SEL_11 0x00000ecf #define REG_A4XX_SP_SP_CTRL_REG 0x000022c0 #define A4XX_SP_SP_CTRL_REG_BINNING_PASS 0x00080000 #define REG_A4XX_SP_INSTR_CACHE_CTRL 0x000022c1 +#define A4XX_SP_INSTR_CACHE_CTRL_VS_BUFFER 0x00000080 +#define A4XX_SP_INSTR_CACHE_CTRL_FS_BUFFER 0x00000100 +#define A4XX_SP_INSTR_CACHE_CTRL_INSTR_BUFFER 0x00000400 #define REG_A4XX_SP_VS_CTRL_REG0 0x000022c4 #define A4XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001 @@ -1229,6 +1429,12 @@ static inline uint32_t A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) #define REG_A4XX_SP_FS_LENGTH_REG 0x000022ef #define REG_A4XX_SP_FS_OUTPUT_REG 0x000022f0 +#define A4XX_SP_FS_OUTPUT_REG_MRT__MASK 0x0000000f +#define A4XX_SP_FS_OUTPUT_REG_MRT__SHIFT 0 +static inline uint32_t A4XX_SP_FS_OUTPUT_REG_MRT(uint32_t val) +{ + return ((val) << A4XX_SP_FS_OUTPUT_REG_MRT__SHIFT) & A4XX_SP_FS_OUTPUT_REG_MRT__MASK; +} #define A4XX_SP_FS_OUTPUT_REG_DEPTH_ENABLE 0x00000080 #define A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK 0x0000ff00 #define A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT 8 @@ -1236,6 +1442,12 @@ static inline uint32_t A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID(uint32_t val) { return ((val) << A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT) & A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK; } +#define A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID__MASK 0xff000000 +#define A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID__SHIFT 24 +static inline uint32_t A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID(uint32_t val) +{ + return ((val) << A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID__SHIFT) & A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID__MASK; +} static inline uint32_t REG_A4XX_SP_FS_MRT(uint32_t i0) { return 0x000022f1 + 0x1*i0; } @@ -1253,6 +1465,21 @@ static inline uint32_t A4XX_SP_FS_MRT_REG_MRTFORMAT(enum a4xx_color_fmt val) { return ((val) << A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT) & A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK; } +#define A4XX_SP_FS_MRT_REG_COLOR_SRGB 0x00040000 + +#define REG_A4XX_SP_CS_CTRL_REG0 0x00002300 + +#define REG_A4XX_SP_CS_OBJ_OFFSET_REG 0x00002301 + +#define REG_A4XX_SP_CS_OBJ_START 0x00002302 + +#define REG_A4XX_SP_CS_PVT_MEM_PARAM 0x00002303 + +#define REG_A4XX_SP_CS_PVT_MEM_ADDR 0x00002304 + +#define REG_A4XX_SP_CS_PVT_MEM_SIZE 0x00002305 + +#define REG_A4XX_SP_CS_LENGTH_REG 0x00002306 #define REG_A4XX_SP_HS_OBJ_OFFSET_REG 0x0000230d #define A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000 @@ -1268,6 +1495,84 @@ static inline uint32_t A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) return ((val) << A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK; } +#define REG_A4XX_SP_HS_OBJ_START 0x0000230e + +#define REG_A4XX_SP_HS_PVT_MEM_PARAM 0x0000230f + +#define REG_A4XX_SP_HS_PVT_MEM_ADDR 0x00002310 + +#define REG_A4XX_SP_HS_LENGTH_REG 0x00002312 + +#define REG_A4XX_SP_DS_PARAM_REG 0x0000231a +#define A4XX_SP_DS_PARAM_REG_POSREGID__MASK 0x000000ff +#define A4XX_SP_DS_PARAM_REG_POSREGID__SHIFT 0 +static inline uint32_t A4XX_SP_DS_PARAM_REG_POSREGID(uint32_t val) +{ + return ((val) << A4XX_SP_DS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_DS_PARAM_REG_POSREGID__MASK; +} +#define A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__MASK 0xfff00000 +#define A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__SHIFT 20 +static inline uint32_t A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR(uint32_t val) +{ + return ((val) << A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__SHIFT) & A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__MASK; +} + +static inline uint32_t REG_A4XX_SP_DS_OUT(uint32_t i0) { return 0x0000231b + 0x1*i0; } + +static inline uint32_t REG_A4XX_SP_DS_OUT_REG(uint32_t i0) { return 0x0000231b + 0x1*i0; } +#define A4XX_SP_DS_OUT_REG_A_REGID__MASK 0x000001ff +#define A4XX_SP_DS_OUT_REG_A_REGID__SHIFT 0 +static inline uint32_t A4XX_SP_DS_OUT_REG_A_REGID(uint32_t val) +{ + return ((val) << A4XX_SP_DS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_DS_OUT_REG_A_REGID__MASK; +} +#define A4XX_SP_DS_OUT_REG_A_COMPMASK__MASK 0x00001e00 +#define A4XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT 9 +static inline uint32_t A4XX_SP_DS_OUT_REG_A_COMPMASK(uint32_t val) +{ + return ((val) << A4XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_DS_OUT_REG_A_COMPMASK__MASK; +} +#define A4XX_SP_DS_OUT_REG_B_REGID__MASK 0x01ff0000 +#define A4XX_SP_DS_OUT_REG_B_REGID__SHIFT 16 +static inline uint32_t A4XX_SP_DS_OUT_REG_B_REGID(uint32_t val) +{ + return ((val) << A4XX_SP_DS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_DS_OUT_REG_B_REGID__MASK; +} +#define A4XX_SP_DS_OUT_REG_B_COMPMASK__MASK 0x1e000000 +#define A4XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT 25 +static inline uint32_t A4XX_SP_DS_OUT_REG_B_COMPMASK(uint32_t val) +{ + return ((val) << A4XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_DS_OUT_REG_B_COMPMASK__MASK; +} + +static inline uint32_t REG_A4XX_SP_DS_VPC_DST(uint32_t i0) { return 0x0000232c + 0x1*i0; } + +static inline uint32_t REG_A4XX_SP_DS_VPC_DST_REG(uint32_t i0) { return 0x0000232c + 0x1*i0; } +#define A4XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff +#define A4XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT 0 +static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC0(uint32_t val) +{ + return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK; +} +#define A4XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00 +#define A4XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT 8 +static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC1(uint32_t val) +{ + return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK; +} +#define A4XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000 +#define A4XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT 16 +static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC2(uint32_t val) +{ + return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK; +} +#define A4XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK 0xff000000 +#define A4XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT 24 +static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC3(uint32_t val) +{ + return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK; +} + #define REG_A4XX_SP_DS_OBJ_OFFSET_REG 0x00002334 #define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000 #define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16 @@ -1282,6 +1587,90 @@ static inline uint32_t A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) return ((val) << A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK; } +#define REG_A4XX_SP_DS_OBJ_START 0x00002335 + +#define REG_A4XX_SP_DS_PVT_MEM_PARAM 0x00002336 + +#define REG_A4XX_SP_DS_PVT_MEM_ADDR 0x00002337 + +#define REG_A4XX_SP_DS_LENGTH_REG 0x00002339 + +#define REG_A4XX_SP_GS_PARAM_REG 0x00002341 +#define A4XX_SP_GS_PARAM_REG_POSREGID__MASK 0x000000ff +#define A4XX_SP_GS_PARAM_REG_POSREGID__SHIFT 0 +static inline uint32_t A4XX_SP_GS_PARAM_REG_POSREGID(uint32_t val) +{ + return ((val) << A4XX_SP_GS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_GS_PARAM_REG_POSREGID__MASK; +} +#define A4XX_SP_GS_PARAM_REG_PRIMREGID__MASK 0x0000ff00 +#define A4XX_SP_GS_PARAM_REG_PRIMREGID__SHIFT 8 +static inline uint32_t A4XX_SP_GS_PARAM_REG_PRIMREGID(uint32_t val) +{ + return ((val) << A4XX_SP_GS_PARAM_REG_PRIMREGID__SHIFT) & A4XX_SP_GS_PARAM_REG_PRIMREGID__MASK; +} +#define A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__MASK 0xfff00000 +#define A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__SHIFT 20 +static inline uint32_t A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR(uint32_t val) +{ + return ((val) << A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__SHIFT) & A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__MASK; +} + +static inline uint32_t REG_A4XX_SP_GS_OUT(uint32_t i0) { return 0x00002342 + 0x1*i0; } + +static inline uint32_t REG_A4XX_SP_GS_OUT_REG(uint32_t i0) { return 0x00002342 + 0x1*i0; } +#define A4XX_SP_GS_OUT_REG_A_REGID__MASK 0x000001ff +#define A4XX_SP_GS_OUT_REG_A_REGID__SHIFT 0 +static inline uint32_t A4XX_SP_GS_OUT_REG_A_REGID(uint32_t val) +{ + return ((val) << A4XX_SP_GS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_GS_OUT_REG_A_REGID__MASK; +} +#define A4XX_SP_GS_OUT_REG_A_COMPMASK__MASK 0x00001e00 +#define A4XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT 9 +static inline uint32_t A4XX_SP_GS_OUT_REG_A_COMPMASK(uint32_t val) +{ + return ((val) << A4XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_GS_OUT_REG_A_COMPMASK__MASK; +} +#define A4XX_SP_GS_OUT_REG_B_REGID__MASK 0x01ff0000 +#define A4XX_SP_GS_OUT_REG_B_REGID__SHIFT 16 +static inline uint32_t A4XX_SP_GS_OUT_REG_B_REGID(uint32_t val) +{ + return ((val) << A4XX_SP_GS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_GS_OUT_REG_B_REGID__MASK; +} +#define A4XX_SP_GS_OUT_REG_B_COMPMASK__MASK 0x1e000000 +#define A4XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT 25 +static inline uint32_t A4XX_SP_GS_OUT_REG_B_COMPMASK(uint32_t val) +{ + return ((val) << A4XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_GS_OUT_REG_B_COMPMASK__MASK; +} + +static inline uint32_t REG_A4XX_SP_GS_VPC_DST(uint32_t i0) { return 0x00002353 + 0x1*i0; } + +static inline uint32_t REG_A4XX_SP_GS_VPC_DST_REG(uint32_t i0) { return 0x00002353 + 0x1*i0; } +#define A4XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff +#define A4XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT 0 +static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC0(uint32_t val) +{ + return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK; +} +#define A4XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00 +#define A4XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT 8 +static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC1(uint32_t val) +{ + return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK; +} +#define A4XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000 +#define A4XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT 16 +static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC2(uint32_t val) +{ + return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK; +} +#define A4XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK 0xff000000 +#define A4XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT 24 +static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC3(uint32_t val) +{ + return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK; +} + #define REG_A4XX_SP_GS_OBJ_OFFSET_REG 0x0000235b #define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000 #define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16 @@ -1296,6 +1685,12 @@ static inline uint32_t A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) return ((val) << A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK; } +#define REG_A4XX_SP_GS_OBJ_START 0x0000235c + +#define REG_A4XX_SP_GS_PVT_MEM_PARAM 0x0000235d + +#define REG_A4XX_SP_GS_PVT_MEM_ADDR 0x0000235e + #define REG_A4XX_SP_GS_LENGTH_REG 0x00002360 #define REG_A4XX_VPC_DEBUG_RAM_SEL 0x00000e60 @@ -1418,6 +1813,10 @@ static inline uint32_t REG_A4XX_VSC_PIPE_DATA_LENGTH_REG(uint32_t i0) { return 0 #define REG_A4XX_VFD_PERFCTR_VFD_SEL_7 0x00000e4a +#define REG_A4XX_VGT_CL_INITIATOR 0x000021d0 + +#define REG_A4XX_VGT_EVENT_INITIATOR 0x000021d9 + #define REG_A4XX_VFD_CONTROL_0 0x00002200 #define A4XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK 0x000000ff #define A4XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT 0 @@ -1473,6 +1872,18 @@ static inline uint32_t A4XX_VFD_CONTROL_3_REGID_VTXCNT(uint32_t val) { return ((val) << A4XX_VFD_CONTROL_3_REGID_VTXCNT__SHIFT) & A4XX_VFD_CONTROL_3_REGID_VTXCNT__MASK; } +#define A4XX_VFD_CONTROL_3_REGID_TESSX__MASK 0x00ff0000 +#define A4XX_VFD_CONTROL_3_REGID_TESSX__SHIFT 16 +static inline uint32_t A4XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val) +{ + return ((val) << A4XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A4XX_VFD_CONTROL_3_REGID_TESSX__MASK; +} +#define A4XX_VFD_CONTROL_3_REGID_TESSY__MASK 0xff000000 +#define A4XX_VFD_CONTROL_3_REGID_TESSY__SHIFT 24 +static inline uint32_t A4XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val) +{ + return ((val) << A4XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A4XX_VFD_CONTROL_3_REGID_TESSY__MASK; +} #define REG_A4XX_VFD_CONTROL_4 0x00002204 @@ -1554,10 +1965,54 @@ static inline uint32_t A4XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val) #define REG_A4XX_TPL1_DEBUG_ECO_CONTROL 0x00000f00 +#define REG_A4XX_TPL1_TP_MODE_CONTROL 0x00000f03 + #define REG_A4XX_TPL1_PERFCTR_TP_SEL_7 0x00000f0b #define REG_A4XX_TPL1_TP_TEX_OFFSET 0x00002380 +#define REG_A4XX_TPL1_TP_TEX_COUNT 0x00002381 +#define A4XX_TPL1_TP_TEX_COUNT_VS__MASK 0x000000ff +#define A4XX_TPL1_TP_TEX_COUNT_VS__SHIFT 0 +static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_VS(uint32_t val) +{ + return ((val) << A4XX_TPL1_TP_TEX_COUNT_VS__SHIFT) & A4XX_TPL1_TP_TEX_COUNT_VS__MASK; +} +#define A4XX_TPL1_TP_TEX_COUNT_HS__MASK 0x0000ff00 +#define A4XX_TPL1_TP_TEX_COUNT_HS__SHIFT 8 +static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_HS(uint32_t val) +{ + return ((val) << A4XX_TPL1_TP_TEX_COUNT_HS__SHIFT) & A4XX_TPL1_TP_TEX_COUNT_HS__MASK; +} +#define A4XX_TPL1_TP_TEX_COUNT_DS__MASK 0x00ff0000 +#define A4XX_TPL1_TP_TEX_COUNT_DS__SHIFT 16 +static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_DS(uint32_t val) +{ + return ((val) << A4XX_TPL1_TP_TEX_COUNT_DS__SHIFT) & A4XX_TPL1_TP_TEX_COUNT_DS__MASK; +} +#define A4XX_TPL1_TP_TEX_COUNT_GS__MASK 0xff000000 +#define A4XX_TPL1_TP_TEX_COUNT_GS__SHIFT 24 +static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_GS(uint32_t val) +{ + return ((val) << A4XX_TPL1_TP_TEX_COUNT_GS__SHIFT) & A4XX_TPL1_TP_TEX_COUNT_GS__MASK; +} + +#define REG_A4XX_TPL1_TP_VS_BORDER_COLOR_BASE_ADDR 0x00002384 + +#define REG_A4XX_TPL1_TP_HS_BORDER_COLOR_BASE_ADDR 0x00002387 + +#define REG_A4XX_TPL1_TP_DS_BORDER_COLOR_BASE_ADDR 0x0000238a + +#define REG_A4XX_TPL1_TP_GS_BORDER_COLOR_BASE_ADDR 0x0000238d + +#define REG_A4XX_TPL1_TP_FS_TEX_COUNT 0x000023a0 + +#define REG_A4XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR 0x000023a1 + +#define REG_A4XX_TPL1_TP_CS_BORDER_COLOR_BASE_ADDR 0x000023a4 + +#define REG_A4XX_TPL1_TP_CS_SAMPLER_BASE_ADDR 0x000023a5 + #define REG_A4XX_TPL1_TP_CS_TEXMEMOBJ_BASE_ADDR 0x000023a6 #define REG_A4XX_GRAS_TSE_STATUS 0x00000c80 @@ -1676,6 +2131,14 @@ static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_OFFSET(float val) return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK; } +#define REG_A4XX_GRAS_SU_POLY_OFFSET_CLAMP 0x00002076 +#define A4XX_GRAS_SU_POLY_OFFSET_CLAMP__MASK 0xffffffff +#define A4XX_GRAS_SU_POLY_OFFSET_CLAMP__SHIFT 0 +static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_CLAMP(float val) +{ + return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_CLAMP__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_CLAMP__MASK; +} + #define REG_A4XX_GRAS_DEPTH_CONTROL 0x00002077 #define A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK 0x00000003 #define A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT 0 @@ -1828,6 +2291,8 @@ static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y(uint32_t val) #define REG_A4XX_HLSQ_DEBUG_ECO_CONTROL 0x00000e04 +#define REG_A4XX_HLSQ_MODE_CONTROL 0x00000e05 + #define REG_A4XX_HLSQ_PERF_PIPE_MASK 0x00000e0e #define REG_A4XX_HLSQ_CONTROL_0_REG 0x000023c0 @@ -1867,7 +2332,12 @@ static inline uint32_t A4XX_HLSQ_CONTROL_1_REG_COORDREGID(uint32_t val) { return ((val) << A4XX_HLSQ_CONTROL_1_REG_COORDREGID__SHIFT) & A4XX_HLSQ_CONTROL_1_REG_COORDREGID__MASK; } -#define A4XX_HLSQ_CONTROL_1_REG_ZWCOORD 0x02000000 +#define A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID__MASK 0xff000000 +#define A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID__SHIFT 24 +static inline uint32_t A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID__SHIFT) & A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID__MASK; +} #define REG_A4XX_HLSQ_CONTROL_2_REG 0x000023c2 #define A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000 @@ -1882,6 +2352,18 @@ static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val) { return ((val) << A4XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK; } +#define A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID__MASK 0x0003fc00 +#define A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID__SHIFT 10 +static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID__MASK; +} +#define A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID__MASK 0x03fc0000 +#define A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID__SHIFT 18 +static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID__MASK; +} #define REG_A4XX_HLSQ_CONTROL_3_REG 0x000023c3 #define A4XX_HLSQ_CONTROL_3_REG_REGID__MASK 0x000000ff @@ -1891,6 +2373,8 @@ static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_REGID(uint32_t val) return ((val) << A4XX_HLSQ_CONTROL_3_REG_REGID__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_REGID__MASK; } +#define REG_A4XX_HLSQ_CONTROL_4_REG 0x000023c4 + #define REG_A4XX_HLSQ_VS_CONTROL_REG 0x000023c5 #define A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff #define A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT 0 @@ -1904,6 +2388,7 @@ static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) { return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; } +#define A4XX_HLSQ_VS_CONTROL_REG_ENABLED 0x00010000 #define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000 #define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17 static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) @@ -1930,6 +2415,7 @@ static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) { return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; } +#define A4XX_HLSQ_FS_CONTROL_REG_ENABLED 0x00010000 #define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000 #define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17 static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) @@ -1956,6 +2442,7 @@ static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) { return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; } +#define A4XX_HLSQ_HS_CONTROL_REG_ENABLED 0x00010000 #define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000 #define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17 static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) @@ -1982,6 +2469,7 @@ static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) { return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; } +#define A4XX_HLSQ_DS_CONTROL_REG_ENABLED 0x00010000 #define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000 #define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17 static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) @@ -2008,6 +2496,7 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) { return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; } +#define A4XX_HLSQ_GS_CONTROL_REG_ENABLED 0x00010000 #define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000 #define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17 static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) @@ -2021,6 +2510,36 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val) return ((val) << A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__MASK; } +#define REG_A4XX_HLSQ_CS_CONTROL 0x000023ca + +#define REG_A4XX_HLSQ_CL_NDRANGE_0 0x000023cd + +#define REG_A4XX_HLSQ_CL_NDRANGE_1 0x000023ce + +#define REG_A4XX_HLSQ_CL_NDRANGE_2 0x000023cf + +#define REG_A4XX_HLSQ_CL_NDRANGE_3 0x000023d0 + +#define REG_A4XX_HLSQ_CL_NDRANGE_4 0x000023d1 + +#define REG_A4XX_HLSQ_CL_NDRANGE_5 0x000023d2 + +#define REG_A4XX_HLSQ_CL_NDRANGE_6 0x000023d3 + +#define REG_A4XX_HLSQ_CL_CONTROL_0 0x000023d4 + +#define REG_A4XX_HLSQ_CL_CONTROL_1 0x000023d5 + +#define REG_A4XX_HLSQ_CL_KERNEL_CONST 0x000023d6 + +#define REG_A4XX_HLSQ_CL_KERNEL_GROUP_X 0x000023d7 + +#define REG_A4XX_HLSQ_CL_KERNEL_GROUP_Y 0x000023d8 + +#define REG_A4XX_HLSQ_CL_KERNEL_GROUP_Z 0x000023d9 + +#define REG_A4XX_HLSQ_CL_WG_OFFSET 0x000023da + #define REG_A4XX_HLSQ_UPDATE_CONTROL 0x000023db #define REG_A4XX_PC_BINNING_COMMAND 0x00000d00 @@ -2035,7 +2554,13 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val) #define REG_A4XX_PC_BIN_BASE 0x000021c0 #define REG_A4XX_PC_PRIM_VTX_CNTL 0x000021c4 -#define A4XX_PC_PRIM_VTX_CNTL_VAROUT 0x00000001 +#define A4XX_PC_PRIM_VTX_CNTL_VAROUT__MASK 0x0000000f +#define A4XX_PC_PRIM_VTX_CNTL_VAROUT__SHIFT 0 +static inline uint32_t A4XX_PC_PRIM_VTX_CNTL_VAROUT(uint32_t val) +{ + return ((val) << A4XX_PC_PRIM_VTX_CNTL_VAROUT__SHIFT) & A4XX_PC_PRIM_VTX_CNTL_VAROUT__MASK; +} +#define A4XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART 0x00100000 #define A4XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000 #define A4XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000 @@ -2044,8 +2569,45 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val) #define REG_A4XX_PC_RESTART_INDEX 0x000021c6 #define REG_A4XX_PC_GS_PARAM 0x000021e5 +#define A4XX_PC_GS_PARAM_MAX_VERTICES__MASK 0x000003ff +#define A4XX_PC_GS_PARAM_MAX_VERTICES__SHIFT 0 +static inline uint32_t A4XX_PC_GS_PARAM_MAX_VERTICES(uint32_t val) +{ + return ((val) << A4XX_PC_GS_PARAM_MAX_VERTICES__SHIFT) & A4XX_PC_GS_PARAM_MAX_VERTICES__MASK; +} +#define A4XX_PC_GS_PARAM_INVOCATIONS__MASK 0x0000f800 +#define A4XX_PC_GS_PARAM_INVOCATIONS__SHIFT 11 +static inline uint32_t A4XX_PC_GS_PARAM_INVOCATIONS(uint32_t val) +{ + return ((val) << A4XX_PC_GS_PARAM_INVOCATIONS__SHIFT) & A4XX_PC_GS_PARAM_INVOCATIONS__MASK; +} +#define A4XX_PC_GS_PARAM_PRIMTYPE__MASK 0x01800000 +#define A4XX_PC_GS_PARAM_PRIMTYPE__SHIFT 23 +static inline uint32_t A4XX_PC_GS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val) +{ + return ((val) << A4XX_PC_GS_PARAM_PRIMTYPE__SHIFT) & A4XX_PC_GS_PARAM_PRIMTYPE__MASK; +} +#define A4XX_PC_GS_PARAM_LAYER 0x80000000 #define REG_A4XX_PC_HS_PARAM 0x000021e7 +#define A4XX_PC_HS_PARAM_VERTICES_OUT__MASK 0x0000003f +#define A4XX_PC_HS_PARAM_VERTICES_OUT__SHIFT 0 +static inline uint32_t A4XX_PC_HS_PARAM_VERTICES_OUT(uint32_t val) +{ + return ((val) << A4XX_PC_HS_PARAM_VERTICES_OUT__SHIFT) & A4XX_PC_HS_PARAM_VERTICES_OUT__MASK; +} +#define A4XX_PC_HS_PARAM_SPACING__MASK 0x00600000 +#define A4XX_PC_HS_PARAM_SPACING__SHIFT 21 +static inline uint32_t A4XX_PC_HS_PARAM_SPACING(enum a4xx_tess_spacing val) +{ + return ((val) << A4XX_PC_HS_PARAM_SPACING__SHIFT) & A4XX_PC_HS_PARAM_SPACING__MASK; +} +#define A4XX_PC_HS_PARAM_PRIMTYPE__MASK 0x01800000 +#define A4XX_PC_HS_PARAM_PRIMTYPE__SHIFT 23 +static inline uint32_t A4XX_PC_HS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val) +{ + return ((val) << A4XX_PC_HS_PARAM_PRIMTYPE__SHIFT) & A4XX_PC_HS_PARAM_PRIMTYPE__MASK; +} #define REG_A4XX_VBIF_VERSION 0x00003000 @@ -2074,16 +2636,10 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val) #define REG_A4XX_UNKNOWN_0D01 0x00000d01 -#define REG_A4XX_UNKNOWN_0E05 0x00000e05 - #define REG_A4XX_UNKNOWN_0E42 0x00000e42 #define REG_A4XX_UNKNOWN_0EC2 0x00000ec2 -#define REG_A4XX_UNKNOWN_0EC3 0x00000ec3 - -#define REG_A4XX_UNKNOWN_0F03 0x00000f03 - #define REG_A4XX_UNKNOWN_2001 0x00002001 #define REG_A4XX_UNKNOWN_209B 0x0000209b @@ -2124,9 +2680,7 @@ static inline uint32_t A4XX_UNKNOWN_20F7(float val) #define REG_A4XX_UNKNOWN_22D7 0x000022d7 -#define REG_A4XX_UNKNOWN_2381 0x00002381 - -#define REG_A4XX_UNKNOWN_23A0 0x000023a0 +#define REG_A4XX_UNKNOWN_2352 0x00002352 #define REG_A4XX_TEX_SAMP_0 0x00000000 #define A4XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001 @@ -2160,6 +2714,12 @@ static inline uint32_t A4XX_TEX_SAMP_0_WRAP_R(enum a4xx_tex_clamp val) { return ((val) << A4XX_TEX_SAMP_0_WRAP_R__SHIFT) & A4XX_TEX_SAMP_0_WRAP_R__MASK; } +#define A4XX_TEX_SAMP_0_ANISO__MASK 0x0001c000 +#define A4XX_TEX_SAMP_0_ANISO__SHIFT 14 +static inline uint32_t A4XX_TEX_SAMP_0_ANISO(enum a4xx_tex_aniso val) +{ + return ((val) << A4XX_TEX_SAMP_0_ANISO__SHIFT) & A4XX_TEX_SAMP_0_ANISO__MASK; +} #define REG_A4XX_TEX_SAMP_1 0x00000001 #define A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e @@ -2185,6 +2745,7 @@ static inline uint32_t A4XX_TEX_SAMP_1_MIN_LOD(float val) #define REG_A4XX_TEX_CONST_0 0x00000000 #define A4XX_TEX_CONST_0_TILED 0x00000001 +#define A4XX_TEX_CONST_0_SRGB 0x00000004 #define A4XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070 #define A4XX_TEX_CONST_0_SWIZ_X__SHIFT 4 static inline uint32_t A4XX_TEX_CONST_0_SWIZ_X(enum a4xx_tex_swiz val) diff --git a/kernel/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/kernel/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index 91221836c..a53f1be05 100644 --- a/kernel/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/kernel/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -268,6 +268,8 @@ static int a4xx_hw_init(struct msm_gpu *gpu) static void a4xx_recover(struct msm_gpu *gpu) { + adreno_dump_info(gpu); + /* dump registers before resetting gpu, if enabled: */ if (hang_debug) a4xx_dump(gpu); @@ -505,7 +507,6 @@ static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = { static void a4xx_dump(struct msm_gpu *gpu) { - adreno_dump(gpu); printk("status: %08x\n", gpu_read(gpu, REG_A4XX_RBBM_STATUS)); adreno_dump(gpu); diff --git a/kernel/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/kernel/drivers/gpu/drm/msm/adreno/adreno_common.xml.h index 8531beb98..c304468cf 100644 --- a/kernel/drivers/gpu/drm/msm/adreno/adreno_common.xml.h +++ b/kernel/drivers/gpu/drm/msm/adreno/adreno_common.xml.h @@ -8,15 +8,16 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15085 bytes, from 2014-12-20 21:49:41) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 64344 bytes, from 2014-12-12 20:22:26) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 51069 bytes, from 2014-12-21 15:51:54) - -Copyright (C) 2013-2014 by the following authors: +- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00) + +Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) Permission is hereby granted, free of charge, to any person obtaining @@ -85,6 +86,10 @@ enum adreno_rb_blend_factor { FACTOR_CONSTANT_ALPHA = 14, FACTOR_ONE_MINUS_CONSTANT_ALPHA = 15, FACTOR_SRC_ALPHA_SATURATE = 16, + FACTOR_SRC1_COLOR = 20, + FACTOR_ONE_MINUS_SRC1_COLOR = 21, + FACTOR_SRC1_ALPHA = 22, + FACTOR_ONE_MINUS_SRC1_ALPHA = 23, }; enum adreno_rb_surface_endian { diff --git a/kernel/drivers/gpu/drm/msm/adreno/adreno_device.c b/kernel/drivers/gpu/drm/msm/adreno/adreno_device.c index be83dee83..1ea2df524 100644 --- a/kernel/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/kernel/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -19,7 +19,7 @@ #include "adreno_gpu.h" -#if defined(CONFIG_MSM_BUS_SCALING) && !defined(CONFIG_OF) +#if defined(DOWNSTREAM_CONFIG_MSM_BUS_SCALING) && !defined(CONFIG_OF) # include <mach/kgsl.h> #endif @@ -42,6 +42,14 @@ static const struct adreno_info gpulist[] = { .gmem = SZ_256K, .init = a3xx_gpu_init, }, { + .rev = ADRENO_REV(3, 0, 6, 0), + .revn = 307, /* because a305c is revn==306 */ + .name = "A306", + .pm4fw = "a300_pm4.fw", + .pfpfw = "a300_pfp.fw", + .gmem = SZ_128K, + .init = a3xx_gpu_init, + }, { .rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID), .revn = 320, .name = "A320", @@ -240,7 +248,7 @@ static int adreno_bind(struct device *dev, struct device *master, void *data) config.rev = ADRENO_REV(3, 0, 5, 0); } -# ifdef CONFIG_MSM_BUS_SCALING +# ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING config.bus_scale_table = pdata->bus_scale_table; # endif #endif diff --git a/kernel/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/kernel/drivers/gpu/drm/msm/adreno/adreno_gpu.c index bbdcab0a5..a3b54cc76 100644 --- a/kernel/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/kernel/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -176,6 +176,17 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, OUT_PKT3(ring, CP_INTERRUPT, 1); OUT_RING(ring, 0x80000000); + /* Workaround for missing irq issue on 8x16/a306. Unsure if the + * root cause is a platform issue or some a306 quirk, but this + * keeps things humming along: + */ + if (adreno_is_a306(adreno_gpu)) { + OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1); + OUT_RING(ring, 0x00000000); + OUT_PKT3(ring, CP_INTERRUPT, 1); + OUT_RING(ring, 0x80000000); + } + #if 0 if (adreno_is_a3xx(adreno_gpu)) { /* Dummy set-constant to trigger context rollover */ @@ -249,8 +260,13 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m) } #endif -/* would be nice to not have to duplicate the _show() stuff with printk(): */ -void adreno_dump(struct msm_gpu *gpu) +/* Dump common gpu status and scratch registers on any hang, to make + * the hangcheck logs more useful. The scratch registers seem always + * safe to read when GPU has hung (unlike some other regs, depending + * on how the GPU hung), and they are useful to match up to cmdstream + * dumps when debugging hangs: + */ +void adreno_dump_info(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); int i; @@ -266,6 +282,18 @@ void adreno_dump(struct msm_gpu *gpu) printk("wptr: %d\n", adreno_gpu->memptrs->wptr); printk("rb wptr: %d\n", get_wptr(gpu->rb)); + for (i = 0; i < 8; i++) { + printk("CP_SCRATCH_REG%d: %u\n", i, + gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i)); + } +} + +/* would be nice to not have to duplicate the _show() stuff with printk(): */ +void adreno_dump(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + int i; + /* dump these out in a form that can be parsed by demsm: */ printk("IO:region %s 00000000 00020000\n", gpu->name); for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) { @@ -317,7 +345,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, gpu->fast_rate = config->fast_rate; gpu->slow_rate = config->slow_rate; gpu->bus_freq = config->bus_freq; -#ifdef CONFIG_MSM_BUS_SCALING +#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING gpu->bus_scale_table = config->bus_scale_table; #endif diff --git a/kernel/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/kernel/drivers/gpu/drm/msm/adreno/adreno_gpu.h index a0cc30977..0a312e9d3 100644 --- a/kernel/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/kernel/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -167,7 +167,7 @@ struct adreno_gpu { struct adreno_platform_config { struct adreno_rev rev; uint32_t fast_rate, slow_rate, bus_freq; -#ifdef CONFIG_MSM_BUS_SCALING +#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING struct msm_bus_scale_pdata *bus_scale_table; #endif }; @@ -197,6 +197,12 @@ static inline bool adreno_is_a305(struct adreno_gpu *gpu) return gpu->revn == 305; } +static inline bool adreno_is_a306(struct adreno_gpu *gpu) +{ + /* yes, 307, because a305c is 306 */ + return gpu->revn == 307; +} + static inline bool adreno_is_a320(struct adreno_gpu *gpu) { return gpu->revn == 320; @@ -233,6 +239,7 @@ void adreno_idle(struct msm_gpu *gpu); #ifdef CONFIG_DEBUG_FS void adreno_show(struct msm_gpu *gpu, struct seq_file *m); #endif +void adreno_dump_info(struct msm_gpu *gpu); void adreno_dump(struct msm_gpu *gpu); void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords); diff --git a/kernel/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/kernel/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h index 6ffc4f6c8..a22fef569 100644 --- a/kernel/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h +++ b/kernel/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h @@ -8,15 +8,16 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15085 bytes, from 2014-12-20 21:49:41) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 64344 bytes, from 2014-12-12 20:22:26) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 51069 bytes, from 2014-12-21 15:51:54) - -Copyright (C) 2013-2014 by the following authors: +- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00) + +Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) Permission is hereby granted, free of charge, to any person obtaining @@ -67,7 +68,7 @@ enum vgt_event_type { enum pc_di_primtype { DI_PT_NONE = 0, - DI_PT_POINTLIST_A2XX = 1, + DI_PT_POINTLIST_PSIZE = 1, DI_PT_LINELIST = 2, DI_PT_LINESTRIP = 3, DI_PT_TRILIST = 4, @@ -75,17 +76,12 @@ enum pc_di_primtype { DI_PT_TRISTRIP = 6, DI_PT_LINELOOP = 7, DI_PT_RECTLIST = 8, - DI_PT_POINTLIST_A3XX = 9, - DI_PT_QUADLIST = 13, - DI_PT_QUADSTRIP = 14, - DI_PT_POLYGON = 15, - DI_PT_2D_COPY_RECT_LIST_V0 = 16, - DI_PT_2D_COPY_RECT_LIST_V1 = 17, - DI_PT_2D_COPY_RECT_LIST_V2 = 18, - DI_PT_2D_COPY_RECT_LIST_V3 = 19, - DI_PT_2D_FILL_RECT_LIST = 20, - DI_PT_2D_LINE_STRIP = 21, - DI_PT_2D_TRI_STRIP = 22, + DI_PT_POINTLIST = 9, + DI_PT_LINE_ADJ = 10, + DI_PT_LINESTRIP_ADJ = 11, + DI_PT_TRI_ADJ = 12, + DI_PT_TRISTRIP_ADJ = 13, + DI_PT_PATCHES = 34, }; enum pc_di_src_sel { @@ -192,6 +188,7 @@ enum adreno_state_block { SB_FRAG_TEX = 2, SB_FRAG_MIPADDR = 3, SB_VERT_SHADER = 4, + SB_GEOM_SHADER = 5, SB_FRAG_SHADER = 6, }; @@ -382,12 +379,19 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(enum pc_di_src_sel va { return ((val) << CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK; } +#define CP_DRAW_INDX_OFFSET_0_TESSELLATE 0x00000100 #define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK 0x00000c00 #define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT 10 static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum a4xx_index_size val) { return ((val) << CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK; } +#define CP_DRAW_INDX_OFFSET_0_TESS_MODE__MASK 0x01f00000 +#define CP_DRAW_INDX_OFFSET_0_TESS_MODE__SHIFT 20 +static inline uint32_t CP_DRAW_INDX_OFFSET_0_TESS_MODE(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_OFFSET_0_TESS_MODE__SHIFT) & CP_DRAW_INDX_OFFSET_0_TESS_MODE__MASK; +} #define REG_CP_DRAW_INDX_OFFSET_1 0x00000001 #define CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__MASK 0xffffffff diff --git a/kernel/drivers/gpu/drm/msm/dsi/dsi.c b/kernel/drivers/gpu/drm/msm/dsi/dsi.c index ad50b8022..6edcd6f57 100644 --- a/kernel/drivers/gpu/drm/msm/dsi/dsi.c +++ b/kernel/drivers/gpu/drm/msm/dsi/dsi.c @@ -15,20 +15,55 @@ struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi) { - if (!msm_dsi || !msm_dsi->panel) + if (!msm_dsi || !msm_dsi_device_connected(msm_dsi)) return NULL; - return (msm_dsi->panel_flags & MIPI_DSI_MODE_VIDEO) ? + return (msm_dsi->device_flags & MIPI_DSI_MODE_VIDEO) ? msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID] : msm_dsi->encoders[MSM_DSI_CMD_ENCODER_ID]; } +static int dsi_get_phy(struct msm_dsi *msm_dsi) +{ + struct platform_device *pdev = msm_dsi->pdev; + struct platform_device *phy_pdev; + struct device_node *phy_node; + + phy_node = of_parse_phandle(pdev->dev.of_node, "qcom,dsi-phy", 0); + if (!phy_node) { + dev_err(&pdev->dev, "cannot find phy device\n"); + return -ENXIO; + } + + phy_pdev = of_find_device_by_node(phy_node); + if (phy_pdev) + msm_dsi->phy = platform_get_drvdata(phy_pdev); + + of_node_put(phy_node); + + if (!phy_pdev || !msm_dsi->phy) { + dev_err(&pdev->dev, "%s: phy driver is not ready\n", __func__); + return -EPROBE_DEFER; + } + + msm_dsi->phy_dev = get_device(&phy_pdev->dev); + + return 0; +} + static void dsi_destroy(struct msm_dsi *msm_dsi) { if (!msm_dsi) return; msm_dsi_manager_unregister(msm_dsi); + + if (msm_dsi->phy_dev) { + put_device(msm_dsi->phy_dev); + msm_dsi->phy = NULL; + msm_dsi->phy_dev = NULL; + } + if (msm_dsi->host) { msm_dsi_host_destroy(msm_dsi->host); msm_dsi->host = NULL; @@ -39,20 +74,15 @@ static void dsi_destroy(struct msm_dsi *msm_dsi) static struct msm_dsi *dsi_init(struct platform_device *pdev) { - struct msm_dsi *msm_dsi = NULL; + struct msm_dsi *msm_dsi; int ret; - if (!pdev) { - dev_err(&pdev->dev, "no dsi device\n"); - ret = -ENXIO; - goto fail; - } + if (!pdev) + return ERR_PTR(-ENXIO); msm_dsi = devm_kzalloc(&pdev->dev, sizeof(*msm_dsi), GFP_KERNEL); - if (!msm_dsi) { - ret = -ENOMEM; - goto fail; - } + if (!msm_dsi) + return ERR_PTR(-ENOMEM); DBG("dsi probed=%p", msm_dsi); msm_dsi->pdev = pdev; @@ -61,19 +91,22 @@ static struct msm_dsi *dsi_init(struct platform_device *pdev) /* Init dsi host */ ret = msm_dsi_host_init(msm_dsi); if (ret) - goto fail; + goto destroy_dsi; + + /* GET dsi PHY */ + ret = dsi_get_phy(msm_dsi); + if (ret) + goto destroy_dsi; /* Register to dsi manager */ ret = msm_dsi_manager_register(msm_dsi); if (ret) - goto fail; + goto destroy_dsi; return msm_dsi; -fail: - if (msm_dsi) - dsi_destroy(msm_dsi); - +destroy_dsi: + dsi_destroy(msm_dsi); return ERR_PTR(ret); } @@ -142,12 +175,14 @@ static struct platform_driver dsi_driver = { void __init msm_dsi_register(void) { DBG(""); + msm_dsi_phy_driver_register(); platform_driver_register(&dsi_driver); } void __exit msm_dsi_unregister(void) { DBG(""); + msm_dsi_phy_driver_unregister(); platform_driver_unregister(&dsi_driver); } @@ -155,6 +190,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]) { struct msm_drm_private *priv = dev->dev_private; + struct drm_bridge *ext_bridge; int ret, i; if (WARN_ON(!encoders[MSM_DSI_VIDEO_ENCODER_ID] || @@ -182,10 +218,25 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, msm_dsi->encoders[i] = encoders[i]; } - msm_dsi->connector = msm_dsi_manager_connector_init(msm_dsi->id); + /* + * check if the dsi encoder output is connected to a panel or an + * external bridge. We create a connector only if we're connected to a + * drm_panel device. When we're connected to an external bridge, we + * assume that the drm_bridge driver will create the connector itself. + */ + ext_bridge = msm_dsi_host_get_bridge(msm_dsi->host); + + if (ext_bridge) + msm_dsi->connector = + msm_dsi_manager_ext_bridge_init(msm_dsi->id); + else + msm_dsi->connector = + msm_dsi_manager_connector_init(msm_dsi->id); + if (IS_ERR(msm_dsi->connector)) { ret = PTR_ERR(msm_dsi->connector); - dev_err(dev->dev, "failed to create dsi connector: %d\n", ret); + dev_err(dev->dev, + "failed to create dsi connector: %d\n", ret); msm_dsi->connector = NULL; goto fail; } @@ -201,10 +252,12 @@ fail: msm_dsi_manager_bridge_destroy(msm_dsi->bridge); msm_dsi->bridge = NULL; } - if (msm_dsi->connector) { + + /* don't destroy connector if we didn't make it */ + if (msm_dsi->connector && !msm_dsi->external_bridge) msm_dsi->connector->funcs->destroy(msm_dsi->connector); - msm_dsi->connector = NULL; - } + + msm_dsi->connector = NULL; } return ret; diff --git a/kernel/drivers/gpu/drm/msm/dsi/dsi.h b/kernel/drivers/gpu/drm/msm/dsi/dsi.h index 10f54d4e3..5f5a3732c 100644 --- a/kernel/drivers/gpu/drm/msm/dsi/dsi.h +++ b/kernel/drivers/gpu/drm/msm/dsi/dsi.h @@ -14,6 +14,7 @@ #ifndef __DSI_CONNECTOR_H__ #define __DSI_CONNECTOR_H__ +#include <linux/of_platform.h> #include <linux/platform_device.h> #include "drm_crtc.h" @@ -26,29 +27,50 @@ #define DSI_1 1 #define DSI_MAX 2 -#define DSI_CLOCK_MASTER DSI_0 -#define DSI_CLOCK_SLAVE DSI_1 +enum msm_dsi_phy_type { + MSM_DSI_PHY_28NM_HPM, + MSM_DSI_PHY_28NM_LP, + MSM_DSI_PHY_20NM, + MSM_DSI_PHY_MAX +}; -#define DSI_LEFT DSI_0 -#define DSI_RIGHT DSI_1 +#define DSI_DEV_REGULATOR_MAX 8 -/* According to the current drm framework sequence, take the encoder of - * DSI_1 as master encoder - */ -#define DSI_ENCODER_MASTER DSI_1 -#define DSI_ENCODER_SLAVE DSI_0 +/* Regulators for DSI devices */ +struct dsi_reg_entry { + char name[32]; + int min_voltage; + int max_voltage; + int enable_load; + int disable_load; +}; + +struct dsi_reg_config { + int num; + struct dsi_reg_entry regs[DSI_DEV_REGULATOR_MAX]; +}; struct msm_dsi { struct drm_device *dev; struct platform_device *pdev; + /* connector managed by us when we're connected to a drm_panel */ struct drm_connector *connector; + /* internal dsi bridge attached to MDP interface */ struct drm_bridge *bridge; struct mipi_dsi_host *host; struct msm_dsi_phy *phy; + + /* + * panel/external_bridge connected to dsi bridge output, only one of the + * two can be valid at a time + */ struct drm_panel *panel; - unsigned long panel_flags; + struct drm_bridge *external_bridge; + unsigned long device_flags; + + struct device *phy_dev; bool phy_enabled; /* the encoders we are hooked to (outside of dsi block) */ @@ -61,6 +83,7 @@ struct msm_dsi { struct drm_bridge *msm_dsi_manager_bridge_init(u8 id); void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge); struct drm_connector *msm_dsi_manager_connector_init(u8 id); +struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id); int msm_dsi_manager_phy_enable(int id, const unsigned long bit_rate, const unsigned long esc_rate, u32 *clk_pre, u32 *clk_post); @@ -71,8 +94,45 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi); void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi); /* msm dsi */ +static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi) +{ + return msm_dsi->panel || msm_dsi->external_bridge; +} + struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi); +/* dsi pll */ +struct msm_dsi_pll; +#ifdef CONFIG_DRM_MSM_DSI_PLL +struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, + enum msm_dsi_phy_type type, int dsi_id); +void msm_dsi_pll_destroy(struct msm_dsi_pll *pll); +int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll, + struct clk **byte_clk_provider, struct clk **pixel_clk_provider); +void msm_dsi_pll_save_state(struct msm_dsi_pll *pll); +int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll); +#else +static inline struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, + enum msm_dsi_phy_type type, int id) { + return ERR_PTR(-ENODEV); +} +static inline void msm_dsi_pll_destroy(struct msm_dsi_pll *pll) +{ +} +static inline int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll, + struct clk **byte_clk_provider, struct clk **pixel_clk_provider) +{ + return -ENODEV; +} +static inline void msm_dsi_pll_save_state(struct msm_dsi_pll *pll) +{ +} +static inline int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll) +{ + return 0; +} +#endif + /* dsi host */ int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host, const struct mipi_dsi_msg *msg); @@ -92,8 +152,11 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, struct drm_display_mode *mode); struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host, unsigned long *panel_flags); +struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host); int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer); void msm_dsi_host_unregister(struct mipi_dsi_host *host); +int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host, + struct msm_dsi_pll *src_pll); void msm_dsi_host_destroy(struct mipi_dsi_host *host); int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, struct drm_device *dev); @@ -101,17 +164,14 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi); /* dsi phy */ struct msm_dsi_phy; -enum msm_dsi_phy_type { - MSM_DSI_PHY_UNKNOWN, - MSM_DSI_PHY_28NM, - MSM_DSI_PHY_MAX -}; -struct msm_dsi_phy *msm_dsi_phy_init(struct platform_device *pdev, - enum msm_dsi_phy_type type, int id); -int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel, +void msm_dsi_phy_driver_register(void); +void msm_dsi_phy_driver_unregister(void); +int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, const unsigned long bit_rate, const unsigned long esc_rate); -int msm_dsi_phy_disable(struct msm_dsi_phy *phy); +void msm_dsi_phy_disable(struct msm_dsi_phy *phy); void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy, u32 *clk_pre, u32 *clk_post); +struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy); + #endif /* __DSI_CONNECTOR_H__ */ diff --git a/kernel/drivers/gpu/drm/msm/dsi/dsi.xml.h b/kernel/drivers/gpu/drm/msm/dsi/dsi.xml.h index 1dcfae265..b2b5f3dd1 100644 --- a/kernel/drivers/gpu/drm/msm/dsi/dsi.xml.h +++ b/kernel/drivers/gpu/drm/msm/dsi/dsi.xml.h @@ -8,8 +8,17 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /usr2/hali/local/envytools/envytools/rnndb/dsi/dsi.xml ( 18681 bytes, from 2015-03-04 23:08:31) -- /usr2/hali/local/envytools/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-01-28 21:43:22) +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) @@ -373,6 +382,11 @@ static inline uint32_t DSI_TRIG_CTRL_STREAM(uint32_t val) #define REG_DSI_TRIG_DMA 0x0000008c #define REG_DSI_DLN0_PHY_ERR 0x000000b0 +#define DSI_DLN0_PHY_ERR_DLN0_ERR_ESC 0x00000001 +#define DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC 0x00000010 +#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL 0x00000100 +#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 0x00001000 +#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1 0x00010000 #define REG_DSI_TIMEOUT_STATUS 0x000000bc @@ -394,6 +408,9 @@ static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val) #define DSI_EOT_PACKET_CTRL_TX_EOT_APPEND 0x00000001 #define DSI_EOT_PACKET_CTRL_RX_EOT_IGNORE 0x00000010 +#define REG_DSI_LANE_CTRL 0x000000a8 +#define DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST 0x10000000 + #define REG_DSI_LANE_SWAP_CTRL 0x000000ac #define DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__MASK 0x00000007 #define DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__SHIFT 0 @@ -423,6 +440,9 @@ static inline uint32_t DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(enum dsi_lane_swap val) #define REG_DSI_PHY_RESET 0x00000128 #define DSI_PHY_RESET_RESET 0x00000001 +#define REG_DSI_T_CLK_PRE_EXTEND 0x0000017c +#define DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK 0x00000001 + #define REG_DSI_RDBK_DATA_CTRL 0x000001d0 #define DSI_RDBK_DATA_CTRL_COUNT__MASK 0x00ff0000 #define DSI_RDBK_DATA_CTRL_COUNT__SHIFT 16 @@ -547,114 +567,234 @@ static inline uint32_t DSI_VERSION_MAJOR(uint32_t val) #define REG_DSI_8x60_PHY_CAL_STATUS 0x000000fc #define DSI_8x60_PHY_CAL_STATUS_CAL_BUSY 0x10000000 -static inline uint32_t REG_DSI_8960_LN(uint32_t i0) { return 0x00000300 + 0x40*i0; } +static inline uint32_t REG_DSI_28nm_8960_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; } + +static inline uint32_t REG_DSI_28nm_8960_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; } + +static inline uint32_t REG_DSI_28nm_8960_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; } + +static inline uint32_t REG_DSI_28nm_8960_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; } + +static inline uint32_t REG_DSI_28nm_8960_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x0000000c + 0x40*i0; } + +static inline uint32_t REG_DSI_28nm_8960_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x00000014 + 0x40*i0; } + +static inline uint32_t REG_DSI_28nm_8960_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000018 + 0x40*i0; } + +#define REG_DSI_28nm_8960_PHY_LNCK_CFG_0 0x00000100 + +#define REG_DSI_28nm_8960_PHY_LNCK_CFG_1 0x00000104 + +#define REG_DSI_28nm_8960_PHY_LNCK_CFG_2 0x00000108 + +#define REG_DSI_28nm_8960_PHY_LNCK_TEST_DATAPATH 0x0000010c + +#define REG_DSI_28nm_8960_PHY_LNCK_TEST_STR0 0x00000114 + +#define REG_DSI_28nm_8960_PHY_LNCK_TEST_STR1 0x00000118 + +#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_0 0x00000140 +#define DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__MASK 0x000000ff +#define DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT 0 +static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val) +{ + return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__MASK; +} + +#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_1 0x00000144 +#define DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK 0x000000ff +#define DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT 0 +static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val) +{ + return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK; +} + +#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_2 0x00000148 +#define DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK 0x000000ff +#define DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT 0 +static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val) +{ + return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK; +} + +#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_3 0x0000014c -static inline uint32_t REG_DSI_8960_LN_CFG_0(uint32_t i0) { return 0x00000300 + 0x40*i0; } +#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_4 0x00000150 +#define DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff +#define DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT 0 +static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val) +{ + return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__MASK; +} + +#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_5 0x00000154 +#define DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff +#define DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT 0 +static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val) +{ + return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__MASK; +} + +#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_6 0x00000158 +#define DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff +#define DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT 0 +static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val) +{ + return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__MASK; +} + +#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_7 0x0000015c +#define DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff +#define DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT 0 +static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val) +{ + return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__MASK; +} + +#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_8 0x00000160 +#define DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff +#define DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__SHIFT 0 +static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val) +{ + return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__MASK; +} + +#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_9 0x00000164 +#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__MASK 0x00000007 +#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__SHIFT 0 +static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(uint32_t val) +{ + return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__MASK; +} +#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__MASK 0x00000070 +#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__SHIFT 4 +static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val) +{ + return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__MASK; +} + +#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_10 0x00000168 +#define DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__MASK 0x00000007 +#define DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__SHIFT 0 +static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(uint32_t val) +{ + return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__MASK; +} -static inline uint32_t REG_DSI_8960_LN_CFG_1(uint32_t i0) { return 0x00000304 + 0x40*i0; } +#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_11 0x0000016c +#define DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff +#define DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0 +static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val) +{ + return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK; +} -static inline uint32_t REG_DSI_8960_LN_CFG_2(uint32_t i0) { return 0x00000308 + 0x40*i0; } +#define REG_DSI_28nm_8960_PHY_CTRL_0 0x00000170 -static inline uint32_t REG_DSI_8960_LN_TEST_DATAPATH(uint32_t i0) { return 0x0000030c + 0x40*i0; } +#define REG_DSI_28nm_8960_PHY_CTRL_1 0x00000174 -static inline uint32_t REG_DSI_8960_LN_TEST_STR_0(uint32_t i0) { return 0x00000314 + 0x40*i0; } +#define REG_DSI_28nm_8960_PHY_CTRL_2 0x00000178 -static inline uint32_t REG_DSI_8960_LN_TEST_STR_1(uint32_t i0) { return 0x00000318 + 0x40*i0; } +#define REG_DSI_28nm_8960_PHY_CTRL_3 0x0000017c -#define REG_DSI_8960_PHY_LNCK_CFG_0 0x00000400 +#define REG_DSI_28nm_8960_PHY_STRENGTH_0 0x00000180 -#define REG_DSI_8960_PHY_LNCK_CFG_1 0x00000404 +#define REG_DSI_28nm_8960_PHY_STRENGTH_1 0x00000184 -#define REG_DSI_8960_PHY_LNCK_CFG_2 0x00000408 +#define REG_DSI_28nm_8960_PHY_STRENGTH_2 0x00000188 -#define REG_DSI_8960_PHY_LNCK_TEST_DATAPATH 0x0000040c +#define REG_DSI_28nm_8960_PHY_BIST_CTRL_0 0x0000018c -#define REG_DSI_8960_PHY_LNCK_TEST_STR0 0x00000414 +#define REG_DSI_28nm_8960_PHY_BIST_CTRL_1 0x00000190 -#define REG_DSI_8960_PHY_LNCK_TEST_STR1 0x00000418 +#define REG_DSI_28nm_8960_PHY_BIST_CTRL_2 0x00000194 -#define REG_DSI_8960_PHY_TIMING_CTRL_0 0x00000440 +#define REG_DSI_28nm_8960_PHY_BIST_CTRL_3 0x00000198 -#define REG_DSI_8960_PHY_TIMING_CTRL_1 0x00000444 +#define REG_DSI_28nm_8960_PHY_BIST_CTRL_4 0x0000019c -#define REG_DSI_8960_PHY_TIMING_CTRL_2 0x00000448 +#define REG_DSI_28nm_8960_PHY_LDO_CTRL 0x000001b0 -#define REG_DSI_8960_PHY_TIMING_CTRL_3 0x0000044c +#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0 0x00000000 -#define REG_DSI_8960_PHY_TIMING_CTRL_4 0x00000450 +#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1 0x00000004 -#define REG_DSI_8960_PHY_TIMING_CTRL_5 0x00000454 +#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2 0x00000008 -#define REG_DSI_8960_PHY_TIMING_CTRL_6 0x00000458 +#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3 0x0000000c -#define REG_DSI_8960_PHY_TIMING_CTRL_7 0x0000045c +#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4 0x00000010 -#define REG_DSI_8960_PHY_TIMING_CTRL_8 0x00000460 +#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_5 0x00000014 -#define REG_DSI_8960_PHY_TIMING_CTRL_9 0x00000464 +#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CAL_PWR_CFG 0x00000018 -#define REG_DSI_8960_PHY_TIMING_CTRL_10 0x00000468 +#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER 0x00000028 -#define REG_DSI_8960_PHY_TIMING_CTRL_11 0x0000046c +#define REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_0 0x0000002c -#define REG_DSI_8960_PHY_CTRL_0 0x00000470 +#define REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_1 0x00000030 -#define REG_DSI_8960_PHY_CTRL_1 0x00000474 +#define REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_2 0x00000034 -#define REG_DSI_8960_PHY_CTRL_2 0x00000478 +#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_0 0x00000038 -#define REG_DSI_8960_PHY_CTRL_3 0x0000047c +#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_1 0x0000003c -#define REG_DSI_8960_PHY_STRENGTH_0 0x00000480 +#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_2 0x00000040 -#define REG_DSI_8960_PHY_STRENGTH_1 0x00000484 +#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_3 0x00000044 -#define REG_DSI_8960_PHY_STRENGTH_2 0x00000488 +#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_4 0x00000048 -#define REG_DSI_8960_PHY_BIST_CTRL_0 0x0000048c +#define REG_DSI_28nm_8960_PHY_MISC_CAL_STATUS 0x00000050 +#define DSI_28nm_8960_PHY_MISC_CAL_STATUS_CAL_BUSY 0x00000010 -#define REG_DSI_8960_PHY_BIST_CTRL_1 0x00000490 +#define REG_DSI_28nm_8960_PHY_PLL_CTRL_0 0x00000000 +#define DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE 0x00000001 -#define REG_DSI_8960_PHY_BIST_CTRL_2 0x00000494 +#define REG_DSI_28nm_8960_PHY_PLL_CTRL_1 0x00000004 -#define REG_DSI_8960_PHY_BIST_CTRL_3 0x00000498 +#define REG_DSI_28nm_8960_PHY_PLL_CTRL_2 0x00000008 -#define REG_DSI_8960_PHY_BIST_CTRL_4 0x0000049c +#define REG_DSI_28nm_8960_PHY_PLL_CTRL_3 0x0000000c -#define REG_DSI_8960_PHY_LDO_CTRL 0x000004b0 +#define REG_DSI_28nm_8960_PHY_PLL_CTRL_4 0x00000010 -#define REG_DSI_8960_PHY_REGULATOR_CTRL_0 0x00000500 +#define REG_DSI_28nm_8960_PHY_PLL_CTRL_5 0x00000014 -#define REG_DSI_8960_PHY_REGULATOR_CTRL_1 0x00000504 +#define REG_DSI_28nm_8960_PHY_PLL_CTRL_6 0x00000018 -#define REG_DSI_8960_PHY_REGULATOR_CTRL_2 0x00000508 +#define REG_DSI_28nm_8960_PHY_PLL_CTRL_7 0x0000001c -#define REG_DSI_8960_PHY_REGULATOR_CTRL_3 0x0000050c +#define REG_DSI_28nm_8960_PHY_PLL_CTRL_8 0x00000020 -#define REG_DSI_8960_PHY_REGULATOR_CTRL_4 0x00000510 +#define REG_DSI_28nm_8960_PHY_PLL_CTRL_9 0x00000024 -#define REG_DSI_8960_PHY_REGULATOR_CAL_PWR_CFG 0x00000518 +#define REG_DSI_28nm_8960_PHY_PLL_CTRL_10 0x00000028 -#define REG_DSI_8960_PHY_CAL_HW_TRIGGER 0x00000528 +#define REG_DSI_28nm_8960_PHY_PLL_CTRL_11 0x0000002c -#define REG_DSI_8960_PHY_CAL_SW_CFG_0 0x0000052c +#define REG_DSI_28nm_8960_PHY_PLL_CTRL_12 0x00000030 -#define REG_DSI_8960_PHY_CAL_SW_CFG_1 0x00000530 +#define REG_DSI_28nm_8960_PHY_PLL_CTRL_13 0x00000034 -#define REG_DSI_8960_PHY_CAL_SW_CFG_2 0x00000534 +#define REG_DSI_28nm_8960_PHY_PLL_CTRL_14 0x00000038 -#define REG_DSI_8960_PHY_CAL_HW_CFG_0 0x00000538 +#define REG_DSI_28nm_8960_PHY_PLL_CTRL_15 0x0000003c -#define REG_DSI_8960_PHY_CAL_HW_CFG_1 0x0000053c +#define REG_DSI_28nm_8960_PHY_PLL_CTRL_16 0x00000040 -#define REG_DSI_8960_PHY_CAL_HW_CFG_2 0x00000540 +#define REG_DSI_28nm_8960_PHY_PLL_CTRL_17 0x00000044 -#define REG_DSI_8960_PHY_CAL_HW_CFG_3 0x00000544 +#define REG_DSI_28nm_8960_PHY_PLL_CTRL_18 0x00000048 -#define REG_DSI_8960_PHY_CAL_HW_CFG_4 0x00000548 +#define REG_DSI_28nm_8960_PHY_PLL_CTRL_19 0x0000004c -#define REG_DSI_8960_PHY_CAL_STATUS 0x00000550 -#define DSI_8960_PHY_CAL_STATUS_CAL_BUSY 0x00000010 +#define REG_DSI_28nm_8960_PHY_PLL_CTRL_20 0x00000050 + +#define REG_DSI_28nm_8960_PHY_PLL_RDY 0x00000080 +#define DSI_28nm_8960_PHY_PLL_RDY_PLL_RDY 0x00000001 static inline uint32_t REG_DSI_28nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; } @@ -818,6 +958,7 @@ static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val) #define REG_DSI_28nm_PHY_BIST_CTRL_5 0x000001c8 #define REG_DSI_28nm_PHY_GLBL_TEST_CTRL 0x000001d4 +#define DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000001 #define REG_DSI_28nm_PHY_LDO_CNTRL 0x000001dc @@ -835,5 +976,332 @@ static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val) #define REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018 +#define REG_DSI_28nm_PHY_PLL_REFCLK_CFG 0x00000000 +#define DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR 0x00000001 + +#define REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG 0x00000004 + +#define REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG 0x00000008 + +#define REG_DSI_28nm_PHY_PLL_VCOLPF_CFG 0x0000000c + +#define REG_DSI_28nm_PHY_PLL_VREG_CFG 0x00000010 +#define DSI_28nm_PHY_PLL_VREG_CFG_POSTDIV1_BYPASS_B 0x00000002 + +#define REG_DSI_28nm_PHY_PLL_PWRGEN_CFG 0x00000014 + +#define REG_DSI_28nm_PHY_PLL_DMUX_CFG 0x00000018 + +#define REG_DSI_28nm_PHY_PLL_AMUX_CFG 0x0000001c + +#define REG_DSI_28nm_PHY_PLL_GLB_CFG 0x00000020 +#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B 0x00000001 +#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B 0x00000002 +#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B 0x00000004 +#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE 0x00000008 + +#define REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG 0x00000024 + +#define REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG 0x00000028 + +#define REG_DSI_28nm_PHY_PLL_LPFR_CFG 0x0000002c + +#define REG_DSI_28nm_PHY_PLL_LPFC1_CFG 0x00000030 + +#define REG_DSI_28nm_PHY_PLL_LPFC2_CFG 0x00000034 + +#define REG_DSI_28nm_PHY_PLL_SDM_CFG0 0x00000038 +#define DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__MASK 0x0000003f +#define DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__SHIFT 0 +static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(uint32_t val) +{ + return ((val) << DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__MASK; +} +#define DSI_28nm_PHY_PLL_SDM_CFG0_BYP 0x00000040 + +#define REG_DSI_28nm_PHY_PLL_SDM_CFG1 0x0000003c +#define DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK 0x0000003f +#define DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__SHIFT 0 +static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(uint32_t val) +{ + return ((val) << DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK; +} +#define DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__MASK 0x00000040 +#define DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__SHIFT 6 +static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN(uint32_t val) +{ + return ((val) << DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__MASK; +} + +#define REG_DSI_28nm_PHY_PLL_SDM_CFG2 0x00000040 +#define DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__MASK 0x000000ff +#define DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__SHIFT 0 +static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(uint32_t val) +{ + return ((val) << DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__MASK; +} + +#define REG_DSI_28nm_PHY_PLL_SDM_CFG3 0x00000044 +#define DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__MASK 0x000000ff +#define DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__SHIFT 0 +static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(uint32_t val) +{ + return ((val) << DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__MASK; +} + +#define REG_DSI_28nm_PHY_PLL_SDM_CFG4 0x00000048 + +#define REG_DSI_28nm_PHY_PLL_SSC_CFG0 0x0000004c + +#define REG_DSI_28nm_PHY_PLL_SSC_CFG1 0x00000050 + +#define REG_DSI_28nm_PHY_PLL_SSC_CFG2 0x00000054 + +#define REG_DSI_28nm_PHY_PLL_SSC_CFG3 0x00000058 + +#define REG_DSI_28nm_PHY_PLL_LKDET_CFG0 0x0000005c + +#define REG_DSI_28nm_PHY_PLL_LKDET_CFG1 0x00000060 + +#define REG_DSI_28nm_PHY_PLL_LKDET_CFG2 0x00000064 + +#define REG_DSI_28nm_PHY_PLL_TEST_CFG 0x00000068 +#define DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET 0x00000001 + +#define REG_DSI_28nm_PHY_PLL_CAL_CFG0 0x0000006c + +#define REG_DSI_28nm_PHY_PLL_CAL_CFG1 0x00000070 + +#define REG_DSI_28nm_PHY_PLL_CAL_CFG2 0x00000074 + +#define REG_DSI_28nm_PHY_PLL_CAL_CFG3 0x00000078 + +#define REG_DSI_28nm_PHY_PLL_CAL_CFG4 0x0000007c + +#define REG_DSI_28nm_PHY_PLL_CAL_CFG5 0x00000080 + +#define REG_DSI_28nm_PHY_PLL_CAL_CFG6 0x00000084 + +#define REG_DSI_28nm_PHY_PLL_CAL_CFG7 0x00000088 + +#define REG_DSI_28nm_PHY_PLL_CAL_CFG8 0x0000008c + +#define REG_DSI_28nm_PHY_PLL_CAL_CFG9 0x00000090 + +#define REG_DSI_28nm_PHY_PLL_CAL_CFG10 0x00000094 + +#define REG_DSI_28nm_PHY_PLL_CAL_CFG11 0x00000098 + +#define REG_DSI_28nm_PHY_PLL_EFUSE_CFG 0x0000009c + +#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS_SEL 0x000000a0 + +#define REG_DSI_28nm_PHY_PLL_CTRL_42 0x000000a4 + +#define REG_DSI_28nm_PHY_PLL_CTRL_43 0x000000a8 + +#define REG_DSI_28nm_PHY_PLL_CTRL_44 0x000000ac + +#define REG_DSI_28nm_PHY_PLL_CTRL_45 0x000000b0 + +#define REG_DSI_28nm_PHY_PLL_CTRL_46 0x000000b4 + +#define REG_DSI_28nm_PHY_PLL_CTRL_47 0x000000b8 + +#define REG_DSI_28nm_PHY_PLL_CTRL_48 0x000000bc + +#define REG_DSI_28nm_PHY_PLL_STATUS 0x000000c0 +#define DSI_28nm_PHY_PLL_STATUS_PLL_RDY 0x00000001 + +#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS0 0x000000c4 + +#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS1 0x000000c8 + +#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS2 0x000000cc + +#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS3 0x000000d0 + +#define REG_DSI_28nm_PHY_PLL_CTRL_54 0x000000d4 + +static inline uint32_t REG_DSI_20nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; } + +static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; } + +static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; } + +static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; } + +static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_3(uint32_t i0) { return 0x0000000c + 0x40*i0; } + +static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_4(uint32_t i0) { return 0x00000010 + 0x40*i0; } + +static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000014 + 0x40*i0; } + +static inline uint32_t REG_DSI_20nm_PHY_LN_DEBUG_SEL(uint32_t i0) { return 0x00000018 + 0x40*i0; } + +static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x0000001c + 0x40*i0; } + +static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000020 + 0x40*i0; } + +#define REG_DSI_20nm_PHY_LNCK_CFG_0 0x00000100 + +#define REG_DSI_20nm_PHY_LNCK_CFG_1 0x00000104 + +#define REG_DSI_20nm_PHY_LNCK_CFG_2 0x00000108 + +#define REG_DSI_20nm_PHY_LNCK_CFG_3 0x0000010c + +#define REG_DSI_20nm_PHY_LNCK_CFG_4 0x00000110 + +#define REG_DSI_20nm_PHY_LNCK_TEST_DATAPATH 0x00000114 + +#define REG_DSI_20nm_PHY_LNCK_DEBUG_SEL 0x00000118 + +#define REG_DSI_20nm_PHY_LNCK_TEST_STR0 0x0000011c + +#define REG_DSI_20nm_PHY_LNCK_TEST_STR1 0x00000120 + +#define REG_DSI_20nm_PHY_TIMING_CTRL_0 0x00000140 +#define DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK 0x000000ff +#define DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT 0 +static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val) +{ + return ((val) << DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK; +} + +#define REG_DSI_20nm_PHY_TIMING_CTRL_1 0x00000144 +#define DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK 0x000000ff +#define DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT 0 +static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val) +{ + return ((val) << DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK; +} + +#define REG_DSI_20nm_PHY_TIMING_CTRL_2 0x00000148 +#define DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK 0x000000ff +#define DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT 0 +static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val) +{ + return ((val) << DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK; +} + +#define REG_DSI_20nm_PHY_TIMING_CTRL_3 0x0000014c +#define DSI_20nm_PHY_TIMING_CTRL_3_CLK_ZERO_8 0x00000001 + +#define REG_DSI_20nm_PHY_TIMING_CTRL_4 0x00000150 +#define DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff +#define DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT 0 +static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val) +{ + return ((val) << DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK; +} + +#define REG_DSI_20nm_PHY_TIMING_CTRL_5 0x00000154 +#define DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff +#define DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT 0 +static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val) +{ + return ((val) << DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK; +} + +#define REG_DSI_20nm_PHY_TIMING_CTRL_6 0x00000158 +#define DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff +#define DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT 0 +static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val) +{ + return ((val) << DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK; +} + +#define REG_DSI_20nm_PHY_TIMING_CTRL_7 0x0000015c +#define DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff +#define DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT 0 +static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val) +{ + return ((val) << DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK; +} + +#define REG_DSI_20nm_PHY_TIMING_CTRL_8 0x00000160 +#define DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff +#define DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT 0 +static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val) +{ + return ((val) << DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__MASK; +} + +#define REG_DSI_20nm_PHY_TIMING_CTRL_9 0x00000164 +#define DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__MASK 0x00000007 +#define DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT 0 +static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_9_TA_GO(uint32_t val) +{ + return ((val) << DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__MASK; +} +#define DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__MASK 0x00000070 +#define DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT 4 +static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val) +{ + return ((val) << DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__MASK; +} + +#define REG_DSI_20nm_PHY_TIMING_CTRL_10 0x00000168 +#define DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__MASK 0x00000007 +#define DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT 0 +static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_10_TA_GET(uint32_t val) +{ + return ((val) << DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__MASK; +} + +#define REG_DSI_20nm_PHY_TIMING_CTRL_11 0x0000016c +#define DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff +#define DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0 +static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val) +{ + return ((val) << DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK; +} + +#define REG_DSI_20nm_PHY_CTRL_0 0x00000170 + +#define REG_DSI_20nm_PHY_CTRL_1 0x00000174 + +#define REG_DSI_20nm_PHY_CTRL_2 0x00000178 + +#define REG_DSI_20nm_PHY_CTRL_3 0x0000017c + +#define REG_DSI_20nm_PHY_CTRL_4 0x00000180 + +#define REG_DSI_20nm_PHY_STRENGTH_0 0x00000184 + +#define REG_DSI_20nm_PHY_STRENGTH_1 0x00000188 + +#define REG_DSI_20nm_PHY_BIST_CTRL_0 0x000001b4 + +#define REG_DSI_20nm_PHY_BIST_CTRL_1 0x000001b8 + +#define REG_DSI_20nm_PHY_BIST_CTRL_2 0x000001bc + +#define REG_DSI_20nm_PHY_BIST_CTRL_3 0x000001c0 + +#define REG_DSI_20nm_PHY_BIST_CTRL_4 0x000001c4 + +#define REG_DSI_20nm_PHY_BIST_CTRL_5 0x000001c8 + +#define REG_DSI_20nm_PHY_GLBL_TEST_CTRL 0x000001d4 +#define DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000001 + +#define REG_DSI_20nm_PHY_LDO_CNTRL 0x000001dc + +#define REG_DSI_20nm_PHY_REGULATOR_CTRL_0 0x00000000 + +#define REG_DSI_20nm_PHY_REGULATOR_CTRL_1 0x00000004 + +#define REG_DSI_20nm_PHY_REGULATOR_CTRL_2 0x00000008 + +#define REG_DSI_20nm_PHY_REGULATOR_CTRL_3 0x0000000c + +#define REG_DSI_20nm_PHY_REGULATOR_CTRL_4 0x00000010 + +#define REG_DSI_20nm_PHY_REGULATOR_CTRL_5 0x00000014 + +#define REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018 + #endif /* DSI_XML */ diff --git a/kernel/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/kernel/drivers/gpu/drm/msm/dsi/dsi_cfg.c new file mode 100644 index 000000000..5872d5e59 --- /dev/null +++ b/kernel/drivers/gpu/drm/msm/dsi/dsi_cfg.c @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "dsi_cfg.h" + +/* DSI v2 has not been supported by now */ +static const struct msm_dsi_config dsi_v2_cfg = { + .io_offset = 0, +}; + +static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .reg_cfg = { + .num = 4, + .regs = { + {"gdsc", -1, -1, -1, -1}, + {"vdd", 3000000, 3000000, 150000, 100}, + {"vdda", 1200000, 1200000, 100000, 100}, + {"vddio", 1800000, 1800000, 100000, 100}, + }, + }, +}; + +static const struct msm_dsi_config msm8916_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .reg_cfg = { + .num = 4, + .regs = { + {"gdsc", -1, -1, -1, -1}, + {"vdd", 2850000, 2850000, 100000, 100}, + {"vdda", 1200000, 1200000, 100000, 100}, + {"vddio", 1800000, 1800000, 100000, 100}, + }, + }, +}; + +static const struct msm_dsi_config msm8994_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .reg_cfg = { + .num = 7, + .regs = { + {"gdsc", -1, -1, -1, -1}, + {"vdda", 1250000, 1250000, 100000, 100}, + {"vddio", 1800000, 1800000, 100000, 100}, + {"vcca", 1000000, 1000000, 10000, 100}, + {"vdd", 1800000, 1800000, 100000, 100}, + {"lab_reg", -1, -1, -1, -1}, + {"ibb_reg", -1, -1, -1, -1}, + }, + } +}; + +static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { + {MSM_DSI_VER_MAJOR_V2, U32_MAX, &dsi_v2_cfg}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0, + &msm8974_apq8084_dsi_cfg}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1, + &msm8974_apq8084_dsi_cfg}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1_1, + &msm8974_apq8084_dsi_cfg}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_2, + &msm8974_apq8084_dsi_cfg}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, &msm8994_dsi_cfg}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, &msm8916_dsi_cfg}, +}; + +const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor) +{ + const struct msm_dsi_cfg_handler *cfg_hnd = NULL; + int i; + + for (i = ARRAY_SIZE(dsi_cfg_handlers) - 1; i >= 0; i--) { + if ((dsi_cfg_handlers[i].major == major) && + (dsi_cfg_handlers[i].minor == minor)) { + cfg_hnd = &dsi_cfg_handlers[i]; + break; + } + } + + return cfg_hnd; +} + diff --git a/kernel/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/kernel/drivers/gpu/drm/msm/dsi/dsi_cfg.h new file mode 100644 index 000000000..4cf887240 --- /dev/null +++ b/kernel/drivers/gpu/drm/msm/dsi/dsi_cfg.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MSM_DSI_CFG_H__ +#define __MSM_DSI_CFG_H__ + +#include "dsi.h" + +#define MSM_DSI_VER_MAJOR_V2 0x02 +#define MSM_DSI_VER_MAJOR_6G 0x03 +#define MSM_DSI_6G_VER_MINOR_V1_0 0x10000000 +#define MSM_DSI_6G_VER_MINOR_V1_1 0x10010000 +#define MSM_DSI_6G_VER_MINOR_V1_1_1 0x10010001 +#define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000 +#define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000 +#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001 + +#define DSI_6G_REG_SHIFT 4 + +struct msm_dsi_config { + u32 io_offset; + struct dsi_reg_config reg_cfg; +}; + +struct msm_dsi_cfg_handler { + u32 major; + u32 minor; + const struct msm_dsi_config *cfg; +}; + +const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor); + +#endif /* __MSM_DSI_CFG_H__ */ + diff --git a/kernel/drivers/gpu/drm/msm/dsi/dsi_host.c b/kernel/drivers/gpu/drm/msm/dsi/dsi_host.c index 649d20d29..4c49868ef 100644 --- a/kernel/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/kernel/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -15,127 +15,20 @@ #include <linux/delay.h> #include <linux/err.h> #include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/of_device.h> #include <linux/of_gpio.h> #include <linux/of_irq.h> +#include <linux/pinctrl/consumer.h> +#include <linux/of_graph.h> #include <linux/regulator/consumer.h> #include <linux/spinlock.h> #include <video/mipi_display.h> #include "dsi.h" #include "dsi.xml.h" - -#define MSM_DSI_VER_MAJOR_V2 0x02 -#define MSM_DSI_VER_MAJOR_6G 0x03 -#define MSM_DSI_6G_VER_MINOR_V1_0 0x10000000 -#define MSM_DSI_6G_VER_MINOR_V1_1 0x10010000 -#define MSM_DSI_6G_VER_MINOR_V1_1_1 0x10010001 -#define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000 -#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001 - -#define DSI_6G_REG_SHIFT 4 - -#define DSI_REGULATOR_MAX 8 -struct dsi_reg_entry { - char name[32]; - int min_voltage; - int max_voltage; - int enable_load; - int disable_load; -}; - -struct dsi_reg_config { - int num; - struct dsi_reg_entry regs[DSI_REGULATOR_MAX]; -}; - -struct dsi_config { - u32 major; - u32 minor; - u32 io_offset; - enum msm_dsi_phy_type phy_type; - struct dsi_reg_config reg_cfg; -}; - -static const struct dsi_config dsi_cfgs[] = { - {MSM_DSI_VER_MAJOR_V2, 0, 0, MSM_DSI_PHY_UNKNOWN}, - { /* 8974 v1 */ - .major = MSM_DSI_VER_MAJOR_6G, - .minor = MSM_DSI_6G_VER_MINOR_V1_0, - .io_offset = DSI_6G_REG_SHIFT, - .phy_type = MSM_DSI_PHY_28NM, - .reg_cfg = { - .num = 4, - .regs = { - {"gdsc", -1, -1, -1, -1}, - {"vdd", 3000000, 3000000, 150000, 100}, - {"vdda", 1200000, 1200000, 100000, 100}, - {"vddio", 1800000, 1800000, 100000, 100}, - }, - }, - }, - { /* 8974 v2 */ - .major = MSM_DSI_VER_MAJOR_6G, - .minor = MSM_DSI_6G_VER_MINOR_V1_1, - .io_offset = DSI_6G_REG_SHIFT, - .phy_type = MSM_DSI_PHY_28NM, - .reg_cfg = { - .num = 4, - .regs = { - {"gdsc", -1, -1, -1, -1}, - {"vdd", 3000000, 3000000, 150000, 100}, - {"vdda", 1200000, 1200000, 100000, 100}, - {"vddio", 1800000, 1800000, 100000, 100}, - }, - }, - }, - { /* 8974 v3 */ - .major = MSM_DSI_VER_MAJOR_6G, - .minor = MSM_DSI_6G_VER_MINOR_V1_1_1, - .io_offset = DSI_6G_REG_SHIFT, - .phy_type = MSM_DSI_PHY_28NM, - .reg_cfg = { - .num = 4, - .regs = { - {"gdsc", -1, -1, -1, -1}, - {"vdd", 3000000, 3000000, 150000, 100}, - {"vdda", 1200000, 1200000, 100000, 100}, - {"vddio", 1800000, 1800000, 100000, 100}, - }, - }, - }, - { /* 8084 */ - .major = MSM_DSI_VER_MAJOR_6G, - .minor = MSM_DSI_6G_VER_MINOR_V1_2, - .io_offset = DSI_6G_REG_SHIFT, - .phy_type = MSM_DSI_PHY_28NM, - .reg_cfg = { - .num = 4, - .regs = { - {"gdsc", -1, -1, -1, -1}, - {"vdd", 3000000, 3000000, 150000, 100}, - {"vdda", 1200000, 1200000, 100000, 100}, - {"vddio", 1800000, 1800000, 100000, 100}, - }, - }, - }, - { /* 8916 */ - .major = MSM_DSI_VER_MAJOR_6G, - .minor = MSM_DSI_6G_VER_MINOR_V1_3_1, - .io_offset = DSI_6G_REG_SHIFT, - .phy_type = MSM_DSI_PHY_28NM, - .reg_cfg = { - .num = 4, - .regs = { - {"gdsc", -1, -1, -1, -1}, - {"vdd", 2850000, 2850000, 100000, 100}, - {"vdda", 1200000, 1200000, 100000, 100}, - {"vddio", 1800000, 1800000, 100000, 100}, - }, - }, - }, -}; +#include "dsi_cfg.h" static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor) { @@ -197,7 +90,7 @@ struct msm_dsi_host { int id; void __iomem *ctrl_base; - struct regulator_bulk_data supplies[DSI_REGULATOR_MAX]; + struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX]; struct clk *mdp_core_clk; struct clk *ahb_clk; struct clk *axi_clk; @@ -205,12 +98,15 @@ struct msm_dsi_host { struct clk *byte_clk; struct clk *esc_clk; struct clk *pixel_clk; + struct clk *byte_clk_src; + struct clk *pixel_clk_src; + u32 byte_clk_rate; struct gpio_desc *disp_en_gpio; struct gpio_desc *te_gpio; - const struct dsi_config *cfg; + const struct msm_dsi_cfg_handler *cfg_hnd; struct completion dma_comp; struct completion video_comp; @@ -228,8 +124,8 @@ struct msm_dsi_host { struct drm_display_mode *mode; - /* Panel info */ - struct device_node *panel_node; + /* connected device info */ + struct device_node *device_node; unsigned int channel; unsigned int lanes; enum mipi_dsi_pixel_format format; @@ -255,61 +151,58 @@ static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt) static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg) { - return msm_readl(msm_host->ctrl_base + msm_host->cfg->io_offset + reg); + return msm_readl(msm_host->ctrl_base + reg); } static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data) { - msm_writel(data, msm_host->ctrl_base + msm_host->cfg->io_offset + reg); + msm_writel(data, msm_host->ctrl_base + reg); } static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host); static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host); -static const struct dsi_config *dsi_get_config(struct msm_dsi_host *msm_host) +static const struct msm_dsi_cfg_handler *dsi_get_config( + struct msm_dsi_host *msm_host) { - const struct dsi_config *cfg; + const struct msm_dsi_cfg_handler *cfg_hnd = NULL; struct regulator *gdsc_reg; - int i, ret; + int ret; u32 major = 0, minor = 0; gdsc_reg = regulator_get(&msm_host->pdev->dev, "gdsc"); - if (IS_ERR_OR_NULL(gdsc_reg)) { + if (IS_ERR(gdsc_reg)) { pr_err("%s: cannot get gdsc\n", __func__); - goto fail; + goto exit; } ret = regulator_enable(gdsc_reg); if (ret) { pr_err("%s: unable to enable gdsc\n", __func__); - regulator_put(gdsc_reg); - goto fail; + goto put_gdsc; } ret = clk_prepare_enable(msm_host->ahb_clk); if (ret) { pr_err("%s: unable to enable ahb_clk\n", __func__); - regulator_disable(gdsc_reg); - regulator_put(gdsc_reg); - goto fail; + goto disable_gdsc; } ret = dsi_get_version(msm_host->ctrl_base, &major, &minor); - - clk_disable_unprepare(msm_host->ahb_clk); - regulator_disable(gdsc_reg); - regulator_put(gdsc_reg); if (ret) { pr_err("%s: Invalid version\n", __func__); - goto fail; + goto disable_clks; } - for (i = 0; i < ARRAY_SIZE(dsi_cfgs); i++) { - cfg = dsi_cfgs + i; - if ((cfg->major == major) && (cfg->minor == minor)) - return cfg; - } - pr_err("%s: Version %x:%x not support\n", __func__, major, minor); + cfg_hnd = msm_dsi_cfg_get(major, minor); -fail: - return NULL; + DBG("%s: Version %x:%x\n", __func__, major, minor); + +disable_clks: + clk_disable_unprepare(msm_host->ahb_clk); +disable_gdsc: + regulator_disable(gdsc_reg); +put_gdsc: + regulator_put(gdsc_reg); +exit: + return cfg_hnd; } static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host) @@ -320,8 +213,8 @@ static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host) static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host) { struct regulator_bulk_data *s = msm_host->supplies; - const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs; - int num = msm_host->cfg->reg_cfg.num; + const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs; + int num = msm_host->cfg_hnd->cfg->reg_cfg.num; int i; DBG(""); @@ -336,8 +229,8 @@ static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host) static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host) { struct regulator_bulk_data *s = msm_host->supplies; - const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs; - int num = msm_host->cfg->reg_cfg.num; + const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs; + int num = msm_host->cfg_hnd->cfg->reg_cfg.num; int ret, i; DBG(""); @@ -370,8 +263,8 @@ fail: static int dsi_regulator_init(struct msm_dsi_host *msm_host) { struct regulator_bulk_data *s = msm_host->supplies; - const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs; - int num = msm_host->cfg->reg_cfg.num; + const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs; + int num = msm_host->cfg_hnd->cfg->reg_cfg.num; int i, ret; for (i = 0; i < num; i++) @@ -385,7 +278,7 @@ static int dsi_regulator_init(struct msm_dsi_host *msm_host) } for (i = 0; i < num; i++) { - if ((regs[i].min_voltage >= 0) && (regs[i].max_voltage >= 0)) { + if (regulator_can_change_voltage(s[i].consumer)) { ret = regulator_set_voltage(s[i].consumer, regs[i].min_voltage, regs[i].max_voltage); if (ret < 0) { @@ -463,6 +356,22 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host) goto exit; } + msm_host->byte_clk_src = devm_clk_get(dev, "byte_clk_src"); + if (IS_ERR(msm_host->byte_clk_src)) { + ret = PTR_ERR(msm_host->byte_clk_src); + pr_err("%s: can't find byte_clk_src. ret=%d\n", __func__, ret); + msm_host->byte_clk_src = NULL; + goto exit; + } + + msm_host->pixel_clk_src = devm_clk_get(dev, "pixel_clk_src"); + if (IS_ERR(msm_host->pixel_clk_src)) { + ret = PTR_ERR(msm_host->pixel_clk_src); + pr_err("%s: can't find pixel_clk_src. ret=%d\n", __func__, ret); + msm_host->pixel_clk_src = NULL; + goto exit; + } + exit: return ret; } @@ -697,6 +606,7 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable, { u32 flags = msm_host->mode_flags; enum mipi_dsi_pixel_format mipi_fmt = msm_host->format; + const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; u32 data = 0; if (!enable) { @@ -750,8 +660,8 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable, data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE); data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW); data |= DSI_TRIG_CTRL_STREAM(msm_host->channel); - if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) && - (msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_2)) + if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) && + (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_2)) data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME; dsi_write(msm_host, REG_DSI_TRIG_CTRL, data); @@ -787,6 +697,11 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable, dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL, DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_0123)); } + + if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) + dsi_write(msm_host, REG_DSI_LANE_CTRL, + DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST); + data |= DSI_CTRL_ENABLE; dsi_write(msm_host, REG_DSI_CTRL, data); @@ -1252,7 +1167,11 @@ static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host) status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR); - if (status) { + if (status & (DSI_DLN0_PHY_ERR_DLN0_ERR_ESC | + DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC | + DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL | + DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 | + DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1)) { dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status); msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY; } @@ -1345,36 +1264,20 @@ static irqreturn_t dsi_host_irq(int irq, void *ptr) static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host, struct device *panel_device) { - int ret; - - msm_host->disp_en_gpio = devm_gpiod_get(panel_device, - "disp-enable"); + msm_host->disp_en_gpio = devm_gpiod_get_optional(panel_device, + "disp-enable", + GPIOD_OUT_LOW); if (IS_ERR(msm_host->disp_en_gpio)) { DBG("cannot get disp-enable-gpios %ld", PTR_ERR(msm_host->disp_en_gpio)); - msm_host->disp_en_gpio = NULL; - } - if (msm_host->disp_en_gpio) { - ret = gpiod_direction_output(msm_host->disp_en_gpio, 0); - if (ret) { - pr_err("cannot set dir to disp-en-gpios %d\n", ret); - return ret; - } + return PTR_ERR(msm_host->disp_en_gpio); } - msm_host->te_gpio = devm_gpiod_get(panel_device, "disp-te"); + msm_host->te_gpio = devm_gpiod_get_optional(panel_device, "disp-te", + GPIOD_IN); if (IS_ERR(msm_host->te_gpio)) { DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio)); - msm_host->te_gpio = NULL; - } - - if (msm_host->te_gpio) { - ret = gpiod_direction_input(msm_host->te_gpio); - if (ret) { - pr_err("%s: cannot set dir to disp-te-gpios, %d\n", - __func__, ret); - return ret; - } + return PTR_ERR(msm_host->te_gpio); } return 0; @@ -1391,7 +1294,7 @@ static int dsi_host_attach(struct mipi_dsi_host *host, msm_host->format = dsi->format; msm_host->mode_flags = dsi->mode_flags; - msm_host->panel_node = dsi->dev.of_node; + WARN_ON(dsi->dev.of_node != msm_host->device_node); /* Some gpios defined in panel DT need to be controlled by host */ ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev); @@ -1410,7 +1313,7 @@ static int dsi_host_detach(struct mipi_dsi_host *host, { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); - msm_host->panel_node = NULL; + msm_host->device_node = NULL; DBG("id=%d", msm_host->id); if (msm_host->dev) @@ -1441,6 +1344,48 @@ static struct mipi_dsi_host_ops dsi_host_ops = { .transfer = dsi_host_transfer, }; +static int dsi_host_parse_dt(struct msm_dsi_host *msm_host) +{ + struct device *dev = &msm_host->pdev->dev; + struct device_node *np = dev->of_node; + struct device_node *endpoint, *device_node; + int ret; + + ret = of_property_read_u32(np, "qcom,dsi-host-index", &msm_host->id); + if (ret) { + dev_err(dev, "%s: host index not specified, ret=%d\n", + __func__, ret); + return ret; + } + + /* + * Get the first endpoint node. In our case, dsi has one output port + * to which the panel is connected. Don't return an error if a port + * isn't defined. It's possible that there is nothing connected to + * the dsi output. + */ + endpoint = of_graph_get_next_endpoint(np, NULL); + if (!endpoint) { + dev_dbg(dev, "%s: no endpoint\n", __func__); + return 0; + } + + /* Get panel node from the output port's endpoint data */ + device_node = of_graph_get_remote_port_parent(endpoint); + if (!device_node) { + dev_err(dev, "%s: no valid device\n", __func__); + of_node_put(endpoint); + return -ENODEV; + } + + of_node_put(endpoint); + of_node_put(device_node); + + msm_host->device_node = device_node; + + return 0; +} + int msm_dsi_host_init(struct msm_dsi *msm_dsi) { struct msm_dsi_host *msm_host = NULL; @@ -1455,15 +1400,13 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) goto fail; } - ret = of_property_read_u32(pdev->dev.of_node, - "qcom,dsi-host-index", &msm_host->id); + msm_host->pdev = pdev; + + ret = dsi_host_parse_dt(msm_host); if (ret) { - dev_err(&pdev->dev, - "%s: host index not specified, ret=%d\n", - __func__, ret); + pr_err("%s: failed to parse dt\n", __func__); goto fail; } - msm_host->pdev = pdev; ret = dsi_clk_init(msm_host); if (ret) { @@ -1478,13 +1421,16 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) goto fail; } - msm_host->cfg = dsi_get_config(msm_host); - if (!msm_host->cfg) { + msm_host->cfg_hnd = dsi_get_config(msm_host); + if (!msm_host->cfg_hnd) { ret = -EINVAL; pr_err("%s: get config failed\n", __func__); goto fail; } + /* fixup base address by io offset */ + msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset; + ret = dsi_regulator_init(msm_host); if (ret) { pr_err("%s: regulator init failed\n", __func__); @@ -1508,13 +1454,6 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0); INIT_WORK(&msm_host->err_work, dsi_err_worker); - msm_dsi->phy = msm_dsi_phy_init(pdev, msm_host->cfg->phy_type, - msm_host->id); - if (!msm_dsi->phy) { - ret = -EINVAL; - pr_err("%s: phy init failed\n", __func__); - goto fail; - } msm_dsi->host = &msm_host->base; msm_dsi->id = msm_host->id; @@ -1578,7 +1517,6 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); - struct device_node *node; int ret; /* Register mipi dsi host */ @@ -1596,14 +1534,13 @@ int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer) * It makes sure panel is connected when fbcon detects * connector status and gets the proper display mode to * create framebuffer. + * Don't try to defer if there is nothing connected to the dsi + * output */ - if (check_defer) { - node = of_get_child_by_name(msm_host->pdev->dev.of_node, - "panel"); - if (node) { - if (!of_drm_find_panel(node)) + if (check_defer && msm_host->device_node) { + if (!of_drm_find_panel(msm_host->device_node)) + if (!of_drm_find_bridge(msm_host->device_node)) return -EPROBE_DEFER; - } } } @@ -1682,6 +1619,7 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host, const struct mipi_dsi_msg *msg) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; int data_byte, rx_byte, dlen, end; int short_response, diff, pkt_size, ret = 0; char cmd; @@ -1723,8 +1661,8 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host, return -EINVAL; } - if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) && - (msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) { + if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) && + (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) { /* Clear the RDBK_DATA registers */ dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, DSI_RDBK_DATA_CTRL_CLR); @@ -1824,6 +1762,39 @@ void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 iova, u32 len) wmb(); } +int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host, + struct msm_dsi_pll *src_pll) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + struct clk *byte_clk_provider, *pixel_clk_provider; + int ret; + + ret = msm_dsi_pll_get_clk_provider(src_pll, + &byte_clk_provider, &pixel_clk_provider); + if (ret) { + pr_info("%s: can't get provider from pll, don't set parent\n", + __func__); + return 0; + } + + ret = clk_set_parent(msm_host->byte_clk_src, byte_clk_provider); + if (ret) { + pr_err("%s: can't set parent to byte_clk_src. ret=%d\n", + __func__, ret); + goto exit; + } + + ret = clk_set_parent(msm_host->pixel_clk_src, pixel_clk_provider); + if (ret) { + pr_err("%s: can't set parent to pixel_clk_src. ret=%d\n", + __func__, ret); + goto exit; + } + +exit: + return ret; +} + int msm_dsi_host_enable(struct mipi_dsi_host *host) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); @@ -1905,6 +1876,13 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host) goto fail_disable_reg; } + ret = pinctrl_pm_select_default_state(&msm_host->pdev->dev); + if (ret) { + pr_err("%s: failed to set pinctrl default state, %d\n", + __func__, ret); + goto fail_disable_clk; + } + dsi_timing_setup(msm_host); dsi_sw_reset(msm_host); dsi_ctrl_config(msm_host, true, clk_pre, clk_post); @@ -1917,6 +1895,8 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host) return 0; +fail_disable_clk: + dsi_clk_ctrl(msm_host, 0); fail_disable_reg: dsi_host_regulator_disable(msm_host); unlock_ret: @@ -1939,6 +1919,8 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host) if (msm_host->disp_en_gpio) gpiod_set_value(msm_host->disp_en_gpio, 0); + pinctrl_pm_select_sleep_state(&msm_host->pdev->dev); + msm_dsi_manager_phy_disable(msm_host->id); dsi_clk_ctrl(msm_host, 0); @@ -1979,10 +1961,16 @@ struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host, struct msm_dsi_host *msm_host = to_msm_dsi_host(host); struct drm_panel *panel; - panel = of_drm_find_panel(msm_host->panel_node); + panel = of_drm_find_panel(msm_host->device_node); if (panel_flags) *panel_flags = msm_host->mode_flags; return panel; } +struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + return of_drm_find_bridge(msm_host->device_node); +} diff --git a/kernel/drivers/gpu/drm/msm/dsi/dsi_manager.c b/kernel/drivers/gpu/drm/msm/dsi/dsi_manager.c index 0a40f3c64..0455ff750 100644 --- a/kernel/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/kernel/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -14,19 +14,31 @@ #include "msm_kms.h" #include "dsi.h" +#define DSI_CLOCK_MASTER DSI_0 +#define DSI_CLOCK_SLAVE DSI_1 + +#define DSI_LEFT DSI_0 +#define DSI_RIGHT DSI_1 + +/* According to the current drm framework sequence, take the encoder of + * DSI_1 as master encoder + */ +#define DSI_ENCODER_MASTER DSI_1 +#define DSI_ENCODER_SLAVE DSI_0 + struct msm_dsi_manager { struct msm_dsi *dsi[DSI_MAX]; - bool is_dual_panel; + bool is_dual_dsi; bool is_sync_needed; - int master_panel_id; + int master_dsi_link_id; }; static struct msm_dsi_manager msm_dsim_glb; -#define IS_DUAL_PANEL() (msm_dsim_glb.is_dual_panel) +#define IS_DUAL_DSI() (msm_dsim_glb.is_dual_dsi) #define IS_SYNC_NEEDED() (msm_dsim_glb.is_sync_needed) -#define IS_MASTER_PANEL(id) (msm_dsim_glb.master_panel_id == id) +#define IS_MASTER_DSI_LINK(id) (msm_dsim_glb.master_dsi_link_id == id) static inline struct msm_dsi *dsi_mgr_get_dsi(int id) { @@ -38,28 +50,75 @@ static inline struct msm_dsi *dsi_mgr_get_other_dsi(int id) return msm_dsim_glb.dsi[(id + 1) % DSI_MAX]; } -static int dsi_mgr_parse_dual_panel(struct device_node *np, int id) +static int dsi_mgr_parse_dual_dsi(struct device_node *np, int id) { struct msm_dsi_manager *msm_dsim = &msm_dsim_glb; - /* We assume 2 dsi nodes have the same information of dual-panel and + /* We assume 2 dsi nodes have the same information of dual-dsi and * sync-mode, and only one node specifies master in case of dual mode. */ - if (!msm_dsim->is_dual_panel) - msm_dsim->is_dual_panel = of_property_read_bool( - np, "qcom,dual-panel-mode"); + if (!msm_dsim->is_dual_dsi) + msm_dsim->is_dual_dsi = of_property_read_bool( + np, "qcom,dual-dsi-mode"); - if (msm_dsim->is_dual_panel) { - if (of_property_read_bool(np, "qcom,master-panel")) - msm_dsim->master_panel_id = id; + if (msm_dsim->is_dual_dsi) { + if (of_property_read_bool(np, "qcom,master-dsi")) + msm_dsim->master_dsi_link_id = id; if (!msm_dsim->is_sync_needed) msm_dsim->is_sync_needed = of_property_read_bool( - np, "qcom,sync-dual-panel"); + np, "qcom,sync-dual-dsi"); } return 0; } +static int dsi_mgr_host_register(int id) +{ + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id); + struct msm_dsi *clk_master_dsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER); + struct msm_dsi_pll *src_pll; + int ret; + + if (!IS_DUAL_DSI()) { + ret = msm_dsi_host_register(msm_dsi->host, true); + if (ret) + return ret; + + src_pll = msm_dsi_phy_get_pll(msm_dsi->phy); + ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll); + } else if (!other_dsi) { + ret = 0; + } else { + struct msm_dsi *mdsi = IS_MASTER_DSI_LINK(id) ? + msm_dsi : other_dsi; + struct msm_dsi *sdsi = IS_MASTER_DSI_LINK(id) ? + other_dsi : msm_dsi; + /* Register slave host first, so that slave DSI device + * has a chance to probe, and do not block the master + * DSI device's probe. + * Also, do not check defer for the slave host, + * because only master DSI device adds the panel to global + * panel list. The panel's device is the master DSI device. + */ + ret = msm_dsi_host_register(sdsi->host, false); + if (ret) + return ret; + ret = msm_dsi_host_register(mdsi->host, true); + if (ret) + return ret; + + /* PLL0 is to drive both 2 DSI link clocks in Dual DSI mode. */ + src_pll = msm_dsi_phy_get_pll(clk_master_dsi->phy); + ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll); + if (ret) + return ret; + ret = msm_dsi_host_set_src_pll(other_dsi->host, src_pll); + } + + return ret; +} + struct dsi_connector { struct drm_connector base; int id; @@ -97,28 +156,28 @@ static enum drm_connector_status dsi_mgr_connector_detect( DBG("id=%d", id); if (!msm_dsi->panel) { msm_dsi->panel = msm_dsi_host_get_panel(msm_dsi->host, - &msm_dsi->panel_flags); + &msm_dsi->device_flags); /* There is only 1 panel in the global panel list - * for dual panel mode. Therefore slave dsi should get + * for dual DSI mode. Therefore slave dsi should get * the drm_panel instance from master dsi, and * keep using the panel flags got from the current DSI link. */ - if (!msm_dsi->panel && IS_DUAL_PANEL() && - !IS_MASTER_PANEL(id) && other_dsi) + if (!msm_dsi->panel && IS_DUAL_DSI() && + !IS_MASTER_DSI_LINK(id) && other_dsi) msm_dsi->panel = msm_dsi_host_get_panel( other_dsi->host, NULL); - if (msm_dsi->panel && IS_DUAL_PANEL()) + if (msm_dsi->panel && IS_DUAL_DSI()) drm_object_attach_property(&connector->base, connector->dev->mode_config.tile_property, 0); - /* Set split display info to kms once dual panel is connected - * to both hosts + /* Set split display info to kms once dual DSI panel is + * connected to both hosts. */ - if (msm_dsi->panel && IS_DUAL_PANEL() && + if (msm_dsi->panel && IS_DUAL_DSI() && other_dsi && other_dsi->panel) { - bool cmd_mode = !(msm_dsi->panel_flags & + bool cmd_mode = !(msm_dsi->device_flags & MIPI_DSI_MODE_VIDEO); struct drm_encoder *encoder = msm_dsi_get_encoder( dsi_mgr_get_dsi(DSI_ENCODER_MASTER)); @@ -129,7 +188,7 @@ static enum drm_connector_status dsi_mgr_connector_detect( kms->funcs->set_split_display(kms, encoder, slave_enc, cmd_mode); else - pr_err("mdp does not support dual panel\n"); + pr_err("mdp does not support dual DSI\n"); } } @@ -226,7 +285,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector) if (!num) return 0; - if (IS_DUAL_PANEL()) { + if (IS_DUAL_DSI()) { /* report half resolution to user */ dsi_dual_connector_fix_modes(connector); ret = dsi_dual_connector_tile_init(connector, id); @@ -281,11 +340,12 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge) struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1); struct mipi_dsi_host *host = msm_dsi->host; struct drm_panel *panel = msm_dsi->panel; - bool is_dual_panel = IS_DUAL_PANEL(); + bool is_dual_dsi = IS_DUAL_DSI(); int ret; DBG("id=%d", id); - if (!panel || (is_dual_panel && (DSI_1 == id))) + if (!msm_dsi_device_connected(msm_dsi) || + (is_dual_dsi && (DSI_1 == id))) return; ret = msm_dsi_host_power_on(host); @@ -294,7 +354,7 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge) goto host_on_fail; } - if (is_dual_panel && msm_dsi1) { + if (is_dual_dsi && msm_dsi1) { ret = msm_dsi_host_power_on(msm_dsi1->host); if (ret) { pr_err("%s: power on host1 failed, %d\n", @@ -306,10 +366,13 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge) /* Always call panel functions once, because even for dual panels, * there is only one drm_panel instance. */ - ret = drm_panel_prepare(panel); - if (ret) { - pr_err("%s: prepare panel %d failed, %d\n", __func__, id, ret); - goto panel_prep_fail; + if (panel) { + ret = drm_panel_prepare(panel); + if (ret) { + pr_err("%s: prepare panel %d failed, %d\n", __func__, + id, ret); + goto panel_prep_fail; + } } ret = msm_dsi_host_enable(host); @@ -318,7 +381,7 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge) goto host_en_fail; } - if (is_dual_panel && msm_dsi1) { + if (is_dual_dsi && msm_dsi1) { ret = msm_dsi_host_enable(msm_dsi1->host); if (ret) { pr_err("%s: enable host1 failed, %d\n", __func__, ret); @@ -326,23 +389,27 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge) } } - ret = drm_panel_enable(panel); - if (ret) { - pr_err("%s: enable panel %d failed, %d\n", __func__, id, ret); - goto panel_en_fail; + if (panel) { + ret = drm_panel_enable(panel); + if (ret) { + pr_err("%s: enable panel %d failed, %d\n", __func__, id, + ret); + goto panel_en_fail; + } } return; panel_en_fail: - if (is_dual_panel && msm_dsi1) + if (is_dual_dsi && msm_dsi1) msm_dsi_host_disable(msm_dsi1->host); host1_en_fail: msm_dsi_host_disable(host); host_en_fail: - drm_panel_unprepare(panel); + if (panel) + drm_panel_unprepare(panel); panel_prep_fail: - if (is_dual_panel && msm_dsi1) + if (is_dual_dsi && msm_dsi1) msm_dsi_host_power_off(msm_dsi1->host); host1_on_fail: msm_dsi_host_power_off(host); @@ -367,37 +434,44 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge) struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1); struct mipi_dsi_host *host = msm_dsi->host; struct drm_panel *panel = msm_dsi->panel; - bool is_dual_panel = IS_DUAL_PANEL(); + bool is_dual_dsi = IS_DUAL_DSI(); int ret; DBG("id=%d", id); - if (!panel || (is_dual_panel && (DSI_1 == id))) + if (!msm_dsi_device_connected(msm_dsi) || + (is_dual_dsi && (DSI_1 == id))) return; - ret = drm_panel_disable(panel); - if (ret) - pr_err("%s: Panel %d OFF failed, %d\n", __func__, id, ret); + if (panel) { + ret = drm_panel_disable(panel); + if (ret) + pr_err("%s: Panel %d OFF failed, %d\n", __func__, id, + ret); + } ret = msm_dsi_host_disable(host); if (ret) pr_err("%s: host %d disable failed, %d\n", __func__, id, ret); - if (is_dual_panel && msm_dsi1) { + if (is_dual_dsi && msm_dsi1) { ret = msm_dsi_host_disable(msm_dsi1->host); if (ret) pr_err("%s: host1 disable failed, %d\n", __func__, ret); } - ret = drm_panel_unprepare(panel); - if (ret) - pr_err("%s: Panel %d unprepare failed,%d\n", __func__, id, ret); + if (panel) { + ret = drm_panel_unprepare(panel); + if (ret) + pr_err("%s: Panel %d unprepare failed,%d\n", __func__, + id, ret); + } ret = msm_dsi_host_power_off(host); if (ret) pr_err("%s: host %d power off failed,%d\n", __func__, id, ret); - if (is_dual_panel && msm_dsi1) { + if (is_dual_dsi && msm_dsi1) { ret = msm_dsi_host_power_off(msm_dsi1->host); if (ret) pr_err("%s: host1 power off failed, %d\n", @@ -413,7 +487,7 @@ static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge, struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id); struct mipi_dsi_host *host = msm_dsi->host; - bool is_dual_panel = IS_DUAL_PANEL(); + bool is_dual_dsi = IS_DUAL_DSI(); DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", mode->base.id, mode->name, @@ -424,11 +498,11 @@ static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge, mode->vsync_end, mode->vtotal, mode->type, mode->flags); - if (is_dual_panel && (DSI_1 == id)) + if (is_dual_dsi && (DSI_1 == id)) return; msm_dsi_host_set_display_mode(host, adjusted_mode); - if (is_dual_panel && other_dsi) + if (is_dual_dsi && other_dsi) msm_dsi_host_set_display_mode(other_dsi->host, adjusted_mode); } @@ -456,7 +530,7 @@ static const struct drm_bridge_funcs dsi_mgr_bridge_funcs = { .mode_set = dsi_mgr_bridge_mode_set, }; -/* initialize connector */ +/* initialize connector when we're connected to a drm_panel */ struct drm_connector *msm_dsi_manager_connector_init(u8 id) { struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); @@ -541,6 +615,53 @@ fail: return ERR_PTR(ret); } +struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id) +{ + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct drm_device *dev = msm_dsi->dev; + struct drm_encoder *encoder; + struct drm_bridge *int_bridge, *ext_bridge; + struct drm_connector *connector; + struct list_head *connector_list; + + int_bridge = msm_dsi->bridge; + ext_bridge = msm_dsi->external_bridge = + msm_dsi_host_get_bridge(msm_dsi->host); + + /* + * HACK: we may not know the external DSI bridge device's mode + * flags here. We'll get to know them only when the device + * attaches to the dsi host. For now, assume the bridge supports + * DSI video mode + */ + encoder = msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID]; + + /* link the internal dsi bridge to the external bridge */ + int_bridge->next = ext_bridge; + /* set the external bridge's encoder as dsi's encoder */ + ext_bridge->encoder = encoder; + + drm_bridge_attach(dev, ext_bridge); + + /* + * we need the drm_connector created by the external bridge + * driver (or someone else) to feed it to our driver's + * priv->connector[] list, mainly for msm_fbdev_init() + */ + connector_list = &dev->mode_config.connector_list; + + list_for_each_entry(connector, connector_list, head) { + int i; + + for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { + if (connector->encoder_ids[i] == encoder->base.id) + return connector; + } + } + + return ERR_PTR(-ENODEV); +} + void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge) { } @@ -551,12 +672,29 @@ int msm_dsi_manager_phy_enable(int id, { struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); struct msm_dsi_phy *phy = msm_dsi->phy; + int src_pll_id = IS_DUAL_DSI() ? DSI_CLOCK_MASTER : id; + struct msm_dsi_pll *pll = msm_dsi_phy_get_pll(msm_dsi->phy); int ret; - ret = msm_dsi_phy_enable(phy, IS_DUAL_PANEL(), bit_rate, esc_rate); + ret = msm_dsi_phy_enable(phy, src_pll_id, bit_rate, esc_rate); if (ret) return ret; + /* + * Reset DSI PHY silently changes its PLL registers to reset status, + * which will confuse clock driver and result in wrong output rate of + * link clocks. Restore PLL status if its PLL is being used as clock + * source. + */ + if (!IS_DUAL_DSI() || (id == DSI_CLOCK_MASTER)) { + ret = msm_dsi_pll_restore_state(pll); + if (ret) { + pr_err("%s: failed to restore pll state\n", __func__); + msm_dsi_phy_disable(phy); + return ret; + } + } + msm_dsi->phy_enabled = true; msm_dsi_phy_get_clk_pre_post(phy, clk_pre, clk_post); @@ -569,13 +707,18 @@ void msm_dsi_manager_phy_disable(int id) struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER); struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE); struct msm_dsi_phy *phy = msm_dsi->phy; + struct msm_dsi_pll *pll = msm_dsi_phy_get_pll(msm_dsi->phy); + + /* Save PLL status if it is a clock source */ + if (!IS_DUAL_DSI() || (id == DSI_CLOCK_MASTER)) + msm_dsi_pll_save_state(pll); /* disable DSI phy * In dual-dsi configuration, the phy should be disabled for the * first controller only when the second controller is disabled. */ msm_dsi->phy_enabled = false; - if (IS_DUAL_PANEL() && mdsi && sdsi) { + if (IS_DUAL_DSI() && mdsi && sdsi) { if (!mdsi->phy_enabled && !sdsi->phy_enabled) { msm_dsi_phy_disable(sdsi->phy); msm_dsi_phy_disable(mdsi->phy); @@ -652,7 +795,6 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi) { struct msm_dsi_manager *msm_dsim = &msm_dsim_glb; int id = msm_dsi->id; - struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id); int ret; if (id > DSI_MAX) { @@ -667,34 +809,23 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi) msm_dsim->dsi[id] = msm_dsi; - ret = dsi_mgr_parse_dual_panel(msm_dsi->pdev->dev.of_node, id); + ret = dsi_mgr_parse_dual_dsi(msm_dsi->pdev->dev.of_node, id); if (ret) { - pr_err("%s: failed to parse dual panel info\n", __func__); - return ret; + pr_err("%s: failed to parse dual DSI info\n", __func__); + goto fail; } - if (!IS_DUAL_PANEL()) { - ret = msm_dsi_host_register(msm_dsi->host, true); - } else if (!other_dsi) { - return 0; - } else { - struct msm_dsi *mdsi = IS_MASTER_PANEL(id) ? - msm_dsi : other_dsi; - struct msm_dsi *sdsi = IS_MASTER_PANEL(id) ? - other_dsi : msm_dsi; - /* Register slave host first, so that slave DSI device - * has a chance to probe, and do not block the master - * DSI device's probe. - * Also, do not check defer for the slave host, - * because only master DSI device adds the panel to global - * panel list. The panel's device is the master DSI device. - */ - ret = msm_dsi_host_register(sdsi->host, false); - if (ret) - return ret; - ret = msm_dsi_host_register(mdsi->host, true); + ret = dsi_mgr_host_register(id); + if (ret) { + pr_err("%s: failed to register mipi dsi host for DSI %d\n", + __func__, id); + goto fail; } + return 0; + +fail: + msm_dsim->dsi[id] = NULL; return ret; } diff --git a/kernel/drivers/gpu/drm/msm/dsi/dsi_phy.c b/kernel/drivers/gpu/drm/msm/dsi/dsi_phy.c deleted file mode 100644 index f0cea8927..000000000 --- a/kernel/drivers/gpu/drm/msm/dsi/dsi_phy.c +++ /dev/null @@ -1,352 +0,0 @@ -/* - * Copyright (c) 2015, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include "dsi.h" -#include "dsi.xml.h" - -#define dsi_phy_read(offset) msm_readl((offset)) -#define dsi_phy_write(offset, data) msm_writel((data), (offset)) - -struct dsi_dphy_timing { - u32 clk_pre; - u32 clk_post; - u32 clk_zero; - u32 clk_trail; - u32 clk_prepare; - u32 hs_exit; - u32 hs_zero; - u32 hs_prepare; - u32 hs_trail; - u32 hs_rqst; - u32 ta_go; - u32 ta_sure; - u32 ta_get; -}; - -struct msm_dsi_phy { - void __iomem *base; - void __iomem *reg_base; - int id; - struct dsi_dphy_timing timing; - int (*enable)(struct msm_dsi_phy *phy, bool is_dual_panel, - const unsigned long bit_rate, const unsigned long esc_rate); - int (*disable)(struct msm_dsi_phy *phy); -}; - -#define S_DIV_ROUND_UP(n, d) \ - (((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d))) - -static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent, - s32 min_result, bool even) -{ - s32 v; - v = (tmax - tmin) * percent; - v = S_DIV_ROUND_UP(v, 100) + tmin; - if (even && (v & 0x1)) - return max_t(s32, min_result, v - 1); - else - return max_t(s32, min_result, v); -} - -static void dsi_dphy_timing_calc_clk_zero(struct dsi_dphy_timing *timing, - s32 ui, s32 coeff, s32 pcnt) -{ - s32 tmax, tmin, clk_z; - s32 temp; - - /* reset */ - temp = 300 * coeff - ((timing->clk_prepare >> 1) + 1) * 2 * ui; - tmin = S_DIV_ROUND_UP(temp, ui) - 2; - if (tmin > 255) { - tmax = 511; - clk_z = linear_inter(2 * tmin, tmin, pcnt, 0, true); - } else { - tmax = 255; - clk_z = linear_inter(tmax, tmin, pcnt, 0, true); - } - - /* adjust */ - temp = (timing->hs_rqst + timing->clk_prepare + clk_z) & 0x7; - timing->clk_zero = clk_z + 8 - temp; -} - -static int dsi_dphy_timing_calc(struct dsi_dphy_timing *timing, - const unsigned long bit_rate, const unsigned long esc_rate) -{ - s32 ui, lpx; - s32 tmax, tmin; - s32 pcnt0 = 10; - s32 pcnt1 = (bit_rate > 1200000000) ? 15 : 10; - s32 pcnt2 = 10; - s32 pcnt3 = (bit_rate > 180000000) ? 10 : 40; - s32 coeff = 1000; /* Precision, should avoid overflow */ - s32 temp; - - if (!bit_rate || !esc_rate) - return -EINVAL; - - ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000); - lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000); - - tmax = S_DIV_ROUND_UP(95 * coeff, ui) - 2; - tmin = S_DIV_ROUND_UP(38 * coeff, ui) - 2; - timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, true); - - temp = lpx / ui; - if (temp & 0x1) - timing->hs_rqst = temp; - else - timing->hs_rqst = max_t(s32, 0, temp - 2); - - /* Calculate clk_zero after clk_prepare and hs_rqst */ - dsi_dphy_timing_calc_clk_zero(timing, ui, coeff, pcnt2); - - temp = 105 * coeff + 12 * ui - 20 * coeff; - tmax = S_DIV_ROUND_UP(temp, ui) - 2; - tmin = S_DIV_ROUND_UP(60 * coeff, ui) - 2; - timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, true); - - temp = 85 * coeff + 6 * ui; - tmax = S_DIV_ROUND_UP(temp, ui) - 2; - temp = 40 * coeff + 4 * ui; - tmin = S_DIV_ROUND_UP(temp, ui) - 2; - timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, true); - - tmax = 255; - temp = ((timing->hs_prepare >> 1) + 1) * 2 * ui + 2 * ui; - temp = 145 * coeff + 10 * ui - temp; - tmin = S_DIV_ROUND_UP(temp, ui) - 2; - timing->hs_zero = linear_inter(tmax, tmin, pcnt2, 24, true); - - temp = 105 * coeff + 12 * ui - 20 * coeff; - tmax = S_DIV_ROUND_UP(temp, ui) - 2; - temp = 60 * coeff + 4 * ui; - tmin = DIV_ROUND_UP(temp, ui) - 2; - timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, true); - - tmax = 255; - tmin = S_DIV_ROUND_UP(100 * coeff, ui) - 2; - timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, true); - - tmax = 63; - temp = ((timing->hs_exit >> 1) + 1) * 2 * ui; - temp = 60 * coeff + 52 * ui - 24 * ui - temp; - tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1; - timing->clk_post = linear_inter(tmax, tmin, pcnt2, 0, false); - - tmax = 63; - temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui; - temp += ((timing->clk_zero >> 1) + 1) * 2 * ui; - temp += 8 * ui + lpx; - tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1; - if (tmin > tmax) { - temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false) >> 1; - timing->clk_pre = temp >> 1; - temp = (2 * tmax - tmin) * pcnt2; - } else { - timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false); - } - - timing->ta_go = 3; - timing->ta_sure = 0; - timing->ta_get = 4; - - DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d", - timing->clk_pre, timing->clk_post, timing->clk_zero, - timing->clk_trail, timing->clk_prepare, timing->hs_exit, - timing->hs_zero, timing->hs_prepare, timing->hs_trail, - timing->hs_rqst); - - return 0; -} - -static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable) -{ - void __iomem *base = phy->reg_base; - - if (!enable) { - dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0); - return; - } - - dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0); - dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1); - dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0); - dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0); - dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3); - dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9); - dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7); - dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20); -} - -static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel, - const unsigned long bit_rate, const unsigned long esc_rate) -{ - struct dsi_dphy_timing *timing = &phy->timing; - int i; - void __iomem *base = phy->base; - - DBG(""); - - if (dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) { - pr_err("%s: D-PHY timing calculation failed\n", __func__); - return -EINVAL; - } - - dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff); - - dsi_28nm_phy_regulator_ctrl(phy, true); - - dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00); - - dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0, - DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero)); - dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1, - DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail)); - dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2, - DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare)); - if (timing->clk_zero & BIT(8)) - dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3, - DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8); - dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4, - DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit)); - dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5, - DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero)); - dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6, - DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare)); - dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7, - DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail)); - dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8, - DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst)); - dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9, - DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) | - DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure)); - dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10, - DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get)); - dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11, - DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0)); - - dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00); - dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f); - - dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6); - - for (i = 0; i < 4; i++) { - dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0); - dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0); - dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0); - dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0); - dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0); - dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0); - dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1); - dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97); - } - dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(0), 0); - dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(1), 0x5); - dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(2), 0xa); - dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(3), 0xf); - - dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0); - dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1); - dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb); - - dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f); - - if (is_dual_panel && (phy->id != DSI_CLOCK_MASTER)) - dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x00); - else - dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x01); - - return 0; -} - -static int dsi_28nm_phy_disable(struct msm_dsi_phy *phy) -{ - dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0); - dsi_28nm_phy_regulator_ctrl(phy, false); - - /* - * Wait for the registers writes to complete in order to - * ensure that the phy is completely disabled - */ - wmb(); - - return 0; -} - -#define dsi_phy_func_init(name) \ - do { \ - phy->enable = dsi_##name##_phy_enable; \ - phy->disable = dsi_##name##_phy_disable; \ - } while (0) - -struct msm_dsi_phy *msm_dsi_phy_init(struct platform_device *pdev, - enum msm_dsi_phy_type type, int id) -{ - struct msm_dsi_phy *phy; - - phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL); - if (!phy) - return NULL; - - phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY"); - if (IS_ERR_OR_NULL(phy->base)) { - pr_err("%s: failed to map phy base\n", __func__); - return NULL; - } - phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", "DSI_PHY_REG"); - if (IS_ERR_OR_NULL(phy->reg_base)) { - pr_err("%s: failed to map phy regulator base\n", __func__); - return NULL; - } - - switch (type) { - case MSM_DSI_PHY_28NM: - dsi_phy_func_init(28nm); - break; - default: - pr_err("%s: unsupported type, %d\n", __func__, type); - return NULL; - } - - phy->id = id; - - return phy; -} - -int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel, - const unsigned long bit_rate, const unsigned long esc_rate) -{ - if (!phy || !phy->enable) - return -EINVAL; - return phy->enable(phy, is_dual_panel, bit_rate, esc_rate); -} - -int msm_dsi_phy_disable(struct msm_dsi_phy *phy) -{ - if (!phy || !phy->disable) - return -EINVAL; - return phy->disable(phy); -} - -void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy, - u32 *clk_pre, u32 *clk_post) -{ - if (!phy) - return; - if (clk_pre) - *clk_pre = phy->timing.clk_pre; - if (clk_post) - *clk_post = phy->timing.clk_post; -} - diff --git a/kernel/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/kernel/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h index 695f99d4b..80ec65e47 100644 --- a/kernel/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h +++ b/kernel/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h @@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) -- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) - -Copyright (C) 2013-2014 by the following authors: +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) + +Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) Permission is hereby granted, free of charge, to any person obtaining diff --git a/kernel/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/kernel/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c new file mode 100644 index 000000000..f1f955f57 --- /dev/null +++ b/kernel/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -0,0 +1,452 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/platform_device.h> + +#include "dsi_phy.h" + +#define S_DIV_ROUND_UP(n, d) \ + (((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d))) + +static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent, + s32 min_result, bool even) +{ + s32 v; + + v = (tmax - tmin) * percent; + v = S_DIV_ROUND_UP(v, 100) + tmin; + if (even && (v & 0x1)) + return max_t(s32, min_result, v - 1); + else + return max_t(s32, min_result, v); +} + +static void dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing *timing, + s32 ui, s32 coeff, s32 pcnt) +{ + s32 tmax, tmin, clk_z; + s32 temp; + + /* reset */ + temp = 300 * coeff - ((timing->clk_prepare >> 1) + 1) * 2 * ui; + tmin = S_DIV_ROUND_UP(temp, ui) - 2; + if (tmin > 255) { + tmax = 511; + clk_z = linear_inter(2 * tmin, tmin, pcnt, 0, true); + } else { + tmax = 255; + clk_z = linear_inter(tmax, tmin, pcnt, 0, true); + } + + /* adjust */ + temp = (timing->hs_rqst + timing->clk_prepare + clk_z) & 0x7; + timing->clk_zero = clk_z + 8 - temp; +} + +int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, + const unsigned long bit_rate, const unsigned long esc_rate) +{ + s32 ui, lpx; + s32 tmax, tmin; + s32 pcnt0 = 10; + s32 pcnt1 = (bit_rate > 1200000000) ? 15 : 10; + s32 pcnt2 = 10; + s32 pcnt3 = (bit_rate > 180000000) ? 10 : 40; + s32 coeff = 1000; /* Precision, should avoid overflow */ + s32 temp; + + if (!bit_rate || !esc_rate) + return -EINVAL; + + ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000); + lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000); + + tmax = S_DIV_ROUND_UP(95 * coeff, ui) - 2; + tmin = S_DIV_ROUND_UP(38 * coeff, ui) - 2; + timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, true); + + temp = lpx / ui; + if (temp & 0x1) + timing->hs_rqst = temp; + else + timing->hs_rqst = max_t(s32, 0, temp - 2); + + /* Calculate clk_zero after clk_prepare and hs_rqst */ + dsi_dphy_timing_calc_clk_zero(timing, ui, coeff, pcnt2); + + temp = 105 * coeff + 12 * ui - 20 * coeff; + tmax = S_DIV_ROUND_UP(temp, ui) - 2; + tmin = S_DIV_ROUND_UP(60 * coeff, ui) - 2; + timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, true); + + temp = 85 * coeff + 6 * ui; + tmax = S_DIV_ROUND_UP(temp, ui) - 2; + temp = 40 * coeff + 4 * ui; + tmin = S_DIV_ROUND_UP(temp, ui) - 2; + timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, true); + + tmax = 255; + temp = ((timing->hs_prepare >> 1) + 1) * 2 * ui + 2 * ui; + temp = 145 * coeff + 10 * ui - temp; + tmin = S_DIV_ROUND_UP(temp, ui) - 2; + timing->hs_zero = linear_inter(tmax, tmin, pcnt2, 24, true); + + temp = 105 * coeff + 12 * ui - 20 * coeff; + tmax = S_DIV_ROUND_UP(temp, ui) - 2; + temp = 60 * coeff + 4 * ui; + tmin = DIV_ROUND_UP(temp, ui) - 2; + timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, true); + + tmax = 255; + tmin = S_DIV_ROUND_UP(100 * coeff, ui) - 2; + timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, true); + + tmax = 63; + temp = ((timing->hs_exit >> 1) + 1) * 2 * ui; + temp = 60 * coeff + 52 * ui - 24 * ui - temp; + tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1; + timing->clk_post = linear_inter(tmax, tmin, pcnt2, 0, false); + + tmax = 63; + temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui; + temp += ((timing->clk_zero >> 1) + 1) * 2 * ui; + temp += 8 * ui + lpx; + tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1; + if (tmin > tmax) { + temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false); + timing->clk_pre = temp >> 1; + } else { + timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false); + } + + timing->ta_go = 3; + timing->ta_sure = 0; + timing->ta_get = 4; + + DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d", + timing->clk_pre, timing->clk_post, timing->clk_zero, + timing->clk_trail, timing->clk_prepare, timing->hs_exit, + timing->hs_zero, timing->hs_prepare, timing->hs_trail, + timing->hs_rqst); + + return 0; +} + +void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, + u32 bit_mask) +{ + int phy_id = phy->id; + u32 val; + + if ((phy_id >= DSI_MAX) || (pll_id >= DSI_MAX)) + return; + + val = dsi_phy_read(phy->base + reg); + + if (phy->cfg->src_pll_truthtable[phy_id][pll_id]) + dsi_phy_write(phy->base + reg, val | bit_mask); + else + dsi_phy_write(phy->base + reg, val & (~bit_mask)); +} + +static int dsi_phy_regulator_init(struct msm_dsi_phy *phy) +{ + struct regulator_bulk_data *s = phy->supplies; + const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs; + struct device *dev = &phy->pdev->dev; + int num = phy->cfg->reg_cfg.num; + int i, ret; + + for (i = 0; i < num; i++) + s[i].supply = regs[i].name; + + ret = devm_regulator_bulk_get(dev, num, s); + if (ret < 0) { + dev_err(dev, "%s: failed to init regulator, ret=%d\n", + __func__, ret); + return ret; + } + + for (i = 0; i < num; i++) { + if (regulator_can_change_voltage(s[i].consumer)) { + ret = regulator_set_voltage(s[i].consumer, + regs[i].min_voltage, regs[i].max_voltage); + if (ret < 0) { + dev_err(dev, + "regulator %d set voltage failed, %d\n", + i, ret); + return ret; + } + } + } + + return 0; +} + +static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy) +{ + struct regulator_bulk_data *s = phy->supplies; + const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs; + int num = phy->cfg->reg_cfg.num; + int i; + + DBG(""); + for (i = num - 1; i >= 0; i--) + if (regs[i].disable_load >= 0) + regulator_set_load(s[i].consumer, regs[i].disable_load); + + regulator_bulk_disable(num, s); +} + +static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy) +{ + struct regulator_bulk_data *s = phy->supplies; + const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs; + struct device *dev = &phy->pdev->dev; + int num = phy->cfg->reg_cfg.num; + int ret, i; + + DBG(""); + for (i = 0; i < num; i++) { + if (regs[i].enable_load >= 0) { + ret = regulator_set_load(s[i].consumer, + regs[i].enable_load); + if (ret < 0) { + dev_err(dev, + "regulator %d set op mode failed, %d\n", + i, ret); + goto fail; + } + } + } + + ret = regulator_bulk_enable(num, s); + if (ret < 0) { + dev_err(dev, "regulator enable failed, %d\n", ret); + goto fail; + } + + return 0; + +fail: + for (i--; i >= 0; i--) + regulator_set_load(s[i].consumer, regs[i].disable_load); + return ret; +} + +static int dsi_phy_enable_resource(struct msm_dsi_phy *phy) +{ + struct device *dev = &phy->pdev->dev; + int ret; + + pm_runtime_get_sync(dev); + + ret = clk_prepare_enable(phy->ahb_clk); + if (ret) { + dev_err(dev, "%s: can't enable ahb clk, %d\n", __func__, ret); + pm_runtime_put_sync(dev); + } + + return ret; +} + +static void dsi_phy_disable_resource(struct msm_dsi_phy *phy) +{ + clk_disable_unprepare(phy->ahb_clk); + pm_runtime_put_sync(&phy->pdev->dev); +} + +static const struct of_device_id dsi_phy_dt_match[] = { +#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY + { .compatible = "qcom,dsi-phy-28nm-hpm", + .data = &dsi_phy_28nm_hpm_cfgs }, + { .compatible = "qcom,dsi-phy-28nm-lp", + .data = &dsi_phy_28nm_lp_cfgs }, +#endif +#ifdef CONFIG_DRM_MSM_DSI_20NM_PHY + { .compatible = "qcom,dsi-phy-20nm", + .data = &dsi_phy_20nm_cfgs }, +#endif + {} +}; + +static int dsi_phy_driver_probe(struct platform_device *pdev) +{ + struct msm_dsi_phy *phy; + struct device *dev = &pdev->dev; + const struct of_device_id *match; + int ret; + + phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); + if (!phy) + return -ENOMEM; + + match = of_match_node(dsi_phy_dt_match, dev->of_node); + if (!match) + return -ENODEV; + + phy->cfg = match->data; + phy->pdev = pdev; + + ret = of_property_read_u32(dev->of_node, + "qcom,dsi-phy-index", &phy->id); + if (ret) { + dev_err(dev, "%s: PHY index not specified, %d\n", + __func__, ret); + goto fail; + } + + phy->regulator_ldo_mode = of_property_read_bool(dev->of_node, + "qcom,dsi-phy-regulator-ldo-mode"); + + phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY"); + if (IS_ERR(phy->base)) { + dev_err(dev, "%s: failed to map phy base\n", __func__); + ret = -ENOMEM; + goto fail; + } + + phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", + "DSI_PHY_REG"); + if (IS_ERR(phy->reg_base)) { + dev_err(dev, "%s: failed to map phy regulator base\n", + __func__); + ret = -ENOMEM; + goto fail; + } + + ret = dsi_phy_regulator_init(phy); + if (ret) { + dev_err(dev, "%s: failed to init regulator\n", __func__); + goto fail; + } + + phy->ahb_clk = devm_clk_get(dev, "iface_clk"); + if (IS_ERR(phy->ahb_clk)) { + dev_err(dev, "%s: Unable to get ahb clk\n", __func__); + ret = PTR_ERR(phy->ahb_clk); + goto fail; + } + + /* PLL init will call into clk_register which requires + * register access, so we need to enable power and ahb clock. + */ + ret = dsi_phy_enable_resource(phy); + if (ret) + goto fail; + + phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id); + if (!phy->pll) + dev_info(dev, + "%s: pll init failed, need separate pll clk driver\n", + __func__); + + dsi_phy_disable_resource(phy); + + platform_set_drvdata(pdev, phy); + + return 0; + +fail: + return ret; +} + +static int dsi_phy_driver_remove(struct platform_device *pdev) +{ + struct msm_dsi_phy *phy = platform_get_drvdata(pdev); + + if (phy && phy->pll) { + msm_dsi_pll_destroy(phy->pll); + phy->pll = NULL; + } + + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_driver dsi_phy_platform_driver = { + .probe = dsi_phy_driver_probe, + .remove = dsi_phy_driver_remove, + .driver = { + .name = "msm_dsi_phy", + .of_match_table = dsi_phy_dt_match, + }, +}; + +void __init msm_dsi_phy_driver_register(void) +{ + platform_driver_register(&dsi_phy_platform_driver); +} + +void __exit msm_dsi_phy_driver_unregister(void) +{ + platform_driver_unregister(&dsi_phy_platform_driver); +} + +int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, + const unsigned long bit_rate, const unsigned long esc_rate) +{ + struct device *dev = &phy->pdev->dev; + int ret; + + if (!phy || !phy->cfg->ops.enable) + return -EINVAL; + + ret = dsi_phy_regulator_enable(phy); + if (ret) { + dev_err(dev, "%s: regulator enable failed, %d\n", + __func__, ret); + return ret; + } + + ret = phy->cfg->ops.enable(phy, src_pll_id, bit_rate, esc_rate); + if (ret) { + dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret); + dsi_phy_regulator_disable(phy); + return ret; + } + + return 0; +} + +void msm_dsi_phy_disable(struct msm_dsi_phy *phy) +{ + if (!phy || !phy->cfg->ops.disable) + return; + + phy->cfg->ops.disable(phy); + + dsi_phy_regulator_disable(phy); +} + +void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy, + u32 *clk_pre, u32 *clk_post) +{ + if (!phy) + return; + + if (clk_pre) + *clk_pre = phy->timing.clk_pre; + if (clk_post) + *clk_post = phy->timing.clk_post; +} + +struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy) +{ + if (!phy) + return NULL; + + return phy->pll; +} + diff --git a/kernel/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/kernel/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h new file mode 100644 index 000000000..0456b2532 --- /dev/null +++ b/kernel/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __DSI_PHY_H__ +#define __DSI_PHY_H__ + +#include <linux/regulator/consumer.h> + +#include "dsi.h" + +#define dsi_phy_read(offset) msm_readl((offset)) +#define dsi_phy_write(offset, data) msm_writel((data), (offset)) + +struct msm_dsi_phy_ops { + int (*enable)(struct msm_dsi_phy *phy, int src_pll_id, + const unsigned long bit_rate, const unsigned long esc_rate); + void (*disable)(struct msm_dsi_phy *phy); +}; + +struct msm_dsi_phy_cfg { + enum msm_dsi_phy_type type; + struct dsi_reg_config reg_cfg; + struct msm_dsi_phy_ops ops; + + /* + * Each cell {phy_id, pll_id} of the truth table indicates + * if the source PLL selection bit should be set for each PHY. + * Fill default H/W values in illegal cells, eg. cell {0, 1}. + */ + bool src_pll_truthtable[DSI_MAX][DSI_MAX]; +}; + +extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs; + +struct msm_dsi_dphy_timing { + u32 clk_pre; + u32 clk_post; + u32 clk_zero; + u32 clk_trail; + u32 clk_prepare; + u32 hs_exit; + u32 hs_zero; + u32 hs_prepare; + u32 hs_trail; + u32 hs_rqst; + u32 ta_go; + u32 ta_sure; + u32 ta_get; +}; + +struct msm_dsi_phy { + struct platform_device *pdev; + void __iomem *base; + void __iomem *reg_base; + int id; + + struct clk *ahb_clk; + struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX]; + + struct msm_dsi_dphy_timing timing; + const struct msm_dsi_phy_cfg *cfg; + + bool regulator_ldo_mode; + + struct msm_dsi_pll *pll; +}; + +/* + * PHY internal functions + */ +int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, + const unsigned long bit_rate, const unsigned long esc_rate); +void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, + u32 bit_mask); + +#endif /* __DSI_PHY_H__ */ + diff --git a/kernel/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/kernel/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c new file mode 100644 index 000000000..2e9ba118d --- /dev/null +++ b/kernel/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "dsi_phy.h" +#include "dsi.xml.h" + +static void dsi_20nm_dphy_set_timing(struct msm_dsi_phy *phy, + struct msm_dsi_dphy_timing *timing) +{ + void __iomem *base = phy->base; + + dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_0, + DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero)); + dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_1, + DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail)); + dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_2, + DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare)); + if (timing->clk_zero & BIT(8)) + dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_3, + DSI_20nm_PHY_TIMING_CTRL_3_CLK_ZERO_8); + dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_4, + DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit)); + dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_5, + DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero)); + dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_6, + DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare)); + dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_7, + DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail)); + dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_8, + DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst)); + dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_9, + DSI_20nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) | + DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure)); + dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_10, + DSI_20nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get)); + dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_11, + DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0)); +} + +static void dsi_20nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable) +{ + void __iomem *base = phy->reg_base; + + if (!enable) { + dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG, 0); + return; + } + + if (phy->regulator_ldo_mode) { + dsi_phy_write(phy->base + REG_DSI_20nm_PHY_LDO_CNTRL, 0x1d); + return; + } + + /* non LDO mode */ + dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_1, 0x03); + dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_2, 0x03); + dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_3, 0x00); + dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_4, 0x20); + dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG, 0x01); + dsi_phy_write(phy->base + REG_DSI_20nm_PHY_LDO_CNTRL, 0x00); + dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_0, 0x03); +} + +static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, + const unsigned long bit_rate, const unsigned long esc_rate) +{ + struct msm_dsi_dphy_timing *timing = &phy->timing; + int i; + void __iomem *base = phy->base; + u32 cfg_4[4] = {0x20, 0x40, 0x20, 0x00}; + + DBG(""); + + if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) { + dev_err(&phy->pdev->dev, + "%s: D-PHY timing calculation failed\n", __func__); + return -EINVAL; + } + + dsi_20nm_phy_regulator_ctrl(phy, true); + + dsi_phy_write(base + REG_DSI_20nm_PHY_STRENGTH_0, 0xff); + + msm_dsi_phy_set_src_pll(phy, src_pll_id, + REG_DSI_20nm_PHY_GLBL_TEST_CTRL, + DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL); + + for (i = 0; i < 4; i++) { + dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_3(i), + (i >> 1) * 0x40); + dsi_phy_write(base + REG_DSI_20nm_PHY_LN_TEST_STR_0(i), 0x01); + dsi_phy_write(base + REG_DSI_20nm_PHY_LN_TEST_STR_1(i), 0x46); + dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_0(i), 0x02); + dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_1(i), 0xa0); + dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_4(i), cfg_4[i]); + } + + dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_3, 0x80); + dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_TEST_STR0, 0x01); + dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_TEST_STR1, 0x46); + dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_0, 0x00); + dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_1, 0xa0); + dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_2, 0x00); + dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_4, 0x00); + + dsi_20nm_dphy_set_timing(phy, timing); + + dsi_phy_write(base + REG_DSI_20nm_PHY_CTRL_1, 0x00); + + dsi_phy_write(base + REG_DSI_20nm_PHY_STRENGTH_1, 0x06); + + /* make sure everything is written before enable */ + wmb(); + dsi_phy_write(base + REG_DSI_20nm_PHY_CTRL_0, 0x7f); + + return 0; +} + +static void dsi_20nm_phy_disable(struct msm_dsi_phy *phy) +{ + dsi_phy_write(phy->base + REG_DSI_20nm_PHY_CTRL_0, 0); + dsi_20nm_phy_regulator_ctrl(phy, false); +} + +const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = { + .type = MSM_DSI_PHY_20NM, + .src_pll_truthtable = { {false, true}, {false, true} }, + .reg_cfg = { + .num = 2, + .regs = { + {"vddio", 1800000, 1800000, 100000, 100}, + {"vcca", 1000000, 1000000, 10000, 100}, + }, + }, + .ops = { + .enable = dsi_20nm_phy_enable, + .disable = dsi_20nm_phy_disable, + } +}; + diff --git a/kernel/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/kernel/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c new file mode 100644 index 000000000..edf74110c --- /dev/null +++ b/kernel/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c @@ -0,0 +1,164 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "dsi_phy.h" +#include "dsi.xml.h" + +static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy, + struct msm_dsi_dphy_timing *timing) +{ + void __iomem *base = phy->base; + + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0, + DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero)); + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1, + DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail)); + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2, + DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare)); + if (timing->clk_zero & BIT(8)) + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3, + DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8); + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4, + DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit)); + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5, + DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero)); + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6, + DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare)); + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7, + DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail)); + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8, + DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst)); + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9, + DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) | + DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure)); + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10, + DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get)); + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11, + DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0)); +} + +static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable) +{ + void __iomem *base = phy->reg_base; + + if (!enable) { + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0); + return; + } + + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20); +} + +static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, + const unsigned long bit_rate, const unsigned long esc_rate) +{ + struct msm_dsi_dphy_timing *timing = &phy->timing; + int i; + void __iomem *base = phy->base; + + DBG(""); + + if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) { + dev_err(&phy->pdev->dev, + "%s: D-PHY timing calculation failed\n", __func__); + return -EINVAL; + } + + dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff); + + dsi_28nm_phy_regulator_ctrl(phy, true); + + dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00); + + dsi_28nm_dphy_set_timing(phy, timing); + + dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00); + dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f); + + dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6); + + for (i = 0; i < 4; i++) { + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(i), 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1); + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97); + } + + dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_4, 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0); + dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1); + dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb); + + dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f); + + msm_dsi_phy_set_src_pll(phy, src_pll_id, + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, + DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL); + + return 0; +} + +static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy) +{ + dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0); + dsi_28nm_phy_regulator_ctrl(phy, false); + + /* + * Wait for the registers writes to complete in order to + * ensure that the phy is completely disabled + */ + wmb(); +} + +const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = { + .type = MSM_DSI_PHY_28NM_HPM, + .src_pll_truthtable = { {true, true}, {false, true} }, + .reg_cfg = { + .num = 1, + .regs = { + {"vddio", 1800000, 1800000, 100000, 100}, + }, + }, + .ops = { + .enable = dsi_28nm_phy_enable, + .disable = dsi_28nm_phy_disable, + }, +}; + +const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = { + .type = MSM_DSI_PHY_28NM_LP, + .src_pll_truthtable = { {true, true}, {true, true} }, + .reg_cfg = { + .num = 1, + .regs = { + {"vddio", 1800000, 1800000, 100000, 100}, + }, + }, + .ops = { + .enable = dsi_28nm_phy_enable, + .disable = dsi_28nm_phy_disable, + }, +}; + diff --git a/kernel/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/kernel/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c new file mode 100644 index 000000000..5104fc9f9 --- /dev/null +++ b/kernel/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "dsi_pll.h" + +static int dsi_pll_enable(struct msm_dsi_pll *pll) +{ + int i, ret = 0; + + /* + * Certain PLLs do not allow VCO rate update when it is on. + * Keep track of their status to turn on/off after set rate success. + */ + if (unlikely(pll->pll_on)) + return 0; + + /* Try all enable sequences until one succeeds */ + for (i = 0; i < pll->en_seq_cnt; i++) { + ret = pll->enable_seqs[i](pll); + DBG("DSI PLL %s after sequence #%d", + ret ? "unlocked" : "locked", i + 1); + if (!ret) + break; + } + + if (ret) { + DRM_ERROR("DSI PLL failed to lock\n"); + return ret; + } + + pll->pll_on = true; + + return 0; +} + +static void dsi_pll_disable(struct msm_dsi_pll *pll) +{ + if (unlikely(!pll->pll_on)) + return; + + pll->disable_seq(pll); + + pll->pll_on = false; +} + +/* + * DSI PLL Helper functions + */ +long msm_dsi_pll_helper_clk_round_rate(struct clk_hw *hw, + unsigned long rate, unsigned long *parent_rate) +{ + struct msm_dsi_pll *pll = hw_clk_to_pll(hw); + + if (rate < pll->min_rate) + return pll->min_rate; + else if (rate > pll->max_rate) + return pll->max_rate; + else + return rate; +} + +int msm_dsi_pll_helper_clk_prepare(struct clk_hw *hw) +{ + struct msm_dsi_pll *pll = hw_clk_to_pll(hw); + + return dsi_pll_enable(pll); +} + +void msm_dsi_pll_helper_clk_unprepare(struct clk_hw *hw) +{ + struct msm_dsi_pll *pll = hw_clk_to_pll(hw); + + dsi_pll_disable(pll); +} + +void msm_dsi_pll_helper_unregister_clks(struct platform_device *pdev, + struct clk **clks, u32 num_clks) +{ + of_clk_del_provider(pdev->dev.of_node); + + if (!num_clks || !clks) + return; + + do { + clk_unregister(clks[--num_clks]); + clks[num_clks] = NULL; + } while (num_clks); +} + +/* + * DSI PLL API + */ +int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll, + struct clk **byte_clk_provider, struct clk **pixel_clk_provider) +{ + if (pll->get_provider) + return pll->get_provider(pll, + byte_clk_provider, + pixel_clk_provider); + + return -EINVAL; +} + +void msm_dsi_pll_destroy(struct msm_dsi_pll *pll) +{ + if (pll->destroy) + pll->destroy(pll); +} + +void msm_dsi_pll_save_state(struct msm_dsi_pll *pll) +{ + if (pll->save_state) { + pll->save_state(pll); + pll->state_saved = true; + } +} + +int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll) +{ + int ret; + + if (pll->restore_state && pll->state_saved) { + ret = pll->restore_state(pll); + if (ret) + return ret; + + pll->state_saved = false; + } + + return 0; +} + +struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, + enum msm_dsi_phy_type type, int id) +{ + struct device *dev = &pdev->dev; + struct msm_dsi_pll *pll; + + switch (type) { + case MSM_DSI_PHY_28NM_HPM: + case MSM_DSI_PHY_28NM_LP: + pll = msm_dsi_pll_28nm_init(pdev, type, id); + break; + default: + pll = ERR_PTR(-ENXIO); + break; + } + + if (IS_ERR(pll)) { + dev_err(dev, "%s: failed to init DSI PLL\n", __func__); + return NULL; + } + + pll->type = type; + + DBG("DSI:%d PLL registered", id); + + return pll; +} + diff --git a/kernel/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/kernel/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h new file mode 100644 index 000000000..063caa2c5 --- /dev/null +++ b/kernel/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __DSI_PLL_H__ +#define __DSI_PLL_H__ + +#include <linux/clk.h> +#include <linux/clk-provider.h> + +#include "dsi.h" + +#define NUM_DSI_CLOCKS_MAX 6 +#define MAX_DSI_PLL_EN_SEQS 10 + +struct msm_dsi_pll { + enum msm_dsi_phy_type type; + + struct clk_hw clk_hw; + bool pll_on; + bool state_saved; + + unsigned long min_rate; + unsigned long max_rate; + u32 en_seq_cnt; + + int (*enable_seqs[MAX_DSI_PLL_EN_SEQS])(struct msm_dsi_pll *pll); + void (*disable_seq)(struct msm_dsi_pll *pll); + int (*get_provider)(struct msm_dsi_pll *pll, + struct clk **byte_clk_provider, + struct clk **pixel_clk_provider); + void (*destroy)(struct msm_dsi_pll *pll); + void (*save_state)(struct msm_dsi_pll *pll); + int (*restore_state)(struct msm_dsi_pll *pll); +}; + +#define hw_clk_to_pll(x) container_of(x, struct msm_dsi_pll, clk_hw) + +static inline void pll_write(void __iomem *reg, u32 data) +{ + msm_writel(data, reg); +} + +static inline u32 pll_read(const void __iomem *reg) +{ + return msm_readl(reg); +} + +static inline void pll_write_udelay(void __iomem *reg, u32 data, u32 delay_us) +{ + pll_write(reg, data); + udelay(delay_us); +} + +static inline void pll_write_ndelay(void __iomem *reg, u32 data, u32 delay_ns) +{ + pll_write((reg), data); + ndelay(delay_ns); +} + +/* + * DSI PLL Helper functions + */ + +/* clock callbacks */ +long msm_dsi_pll_helper_clk_round_rate(struct clk_hw *hw, + unsigned long rate, unsigned long *parent_rate); +int msm_dsi_pll_helper_clk_prepare(struct clk_hw *hw); +void msm_dsi_pll_helper_clk_unprepare(struct clk_hw *hw); +/* misc */ +void msm_dsi_pll_helper_unregister_clks(struct platform_device *pdev, + struct clk **clks, u32 num_clks); + +/* + * Initialization for Each PLL Type + */ +#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY +struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev, + enum msm_dsi_phy_type type, int id); +#else +static inline struct msm_dsi_pll *msm_dsi_pll_28nm_init( + struct platform_device *pdev, enum msm_dsi_phy_type type, int id) +{ + return ERR_PTR(-ENODEV); +} +#endif + +#endif /* __DSI_PLL_H__ */ + diff --git a/kernel/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/kernel/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c new file mode 100644 index 000000000..598fdaff0 --- /dev/null +++ b/kernel/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c @@ -0,0 +1,647 @@ +/* + * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> + +#include "dsi_pll.h" +#include "dsi.xml.h" + +/* + * DSI PLL 28nm - clock diagram (eg: DSI0): + * + * dsi0analog_postdiv_clk + * | dsi0indirect_path_div2_clk + * | | + * +------+ | +----+ | |\ dsi0byte_mux + * dsi0vco_clk --o--| DIV1 |--o--| /2 |--o--| \ | + * | +------+ +----+ | m| | +----+ + * | | u|--o--| /4 |-- dsi0pllbyte + * | | x| +----+ + * o--------------------------| / + * | |/ + * | +------+ + * o----------| DIV3 |------------------------- dsi0pll + * +------+ + */ + +#define POLL_MAX_READS 10 +#define POLL_TIMEOUT_US 50 + +#define NUM_PROVIDED_CLKS 2 + +#define VCO_REF_CLK_RATE 19200000 +#define VCO_MIN_RATE 350000000 +#define VCO_MAX_RATE 750000000 + +#define DSI_BYTE_PLL_CLK 0 +#define DSI_PIXEL_PLL_CLK 1 + +#define LPFR_LUT_SIZE 10 +struct lpfr_cfg { + unsigned long vco_rate; + u32 resistance; +}; + +/* Loop filter resistance: */ +static const struct lpfr_cfg lpfr_lut[LPFR_LUT_SIZE] = { + { 479500000, 8 }, + { 480000000, 11 }, + { 575500000, 8 }, + { 576000000, 12 }, + { 610500000, 8 }, + { 659500000, 9 }, + { 671500000, 10 }, + { 672000000, 14 }, + { 708500000, 10 }, + { 750000000, 11 }, +}; + +struct pll_28nm_cached_state { + unsigned long vco_rate; + u8 postdiv3; + u8 postdiv1; + u8 byte_mux; +}; + +struct dsi_pll_28nm { + struct msm_dsi_pll base; + + int id; + struct platform_device *pdev; + void __iomem *mmio; + + int vco_delay; + + /* private clocks: */ + struct clk *clks[NUM_DSI_CLOCKS_MAX]; + u32 num_clks; + + /* clock-provider: */ + struct clk *provided_clks[NUM_PROVIDED_CLKS]; + struct clk_onecell_data clk_data; + + struct pll_28nm_cached_state cached_state; +}; + +#define to_pll_28nm(x) container_of(x, struct dsi_pll_28nm, base) + +static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm, + u32 nb_tries, u32 timeout_us) +{ + bool pll_locked = false; + u32 val; + + while (nb_tries--) { + val = pll_read(pll_28nm->mmio + REG_DSI_28nm_PHY_PLL_STATUS); + pll_locked = !!(val & DSI_28nm_PHY_PLL_STATUS_PLL_RDY); + + if (pll_locked) + break; + + udelay(timeout_us); + } + DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* "); + + return pll_locked; +} + +static void pll_28nm_software_reset(struct dsi_pll_28nm *pll_28nm) +{ + void __iomem *base = pll_28nm->mmio; + + /* + * Add HW recommended delays after toggling the software + * reset bit off and back on. + */ + pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG, + DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1); + pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG, 0x00, 1); +} + +/* + * Clock Callbacks + */ +static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct msm_dsi_pll *pll = hw_clk_to_pll(hw); + struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll); + struct device *dev = &pll_28nm->pdev->dev; + void __iomem *base = pll_28nm->mmio; + unsigned long div_fbx1000, gen_vco_clk; + u32 refclk_cfg, frac_n_mode, frac_n_value; + u32 sdm_cfg0, sdm_cfg1, sdm_cfg2, sdm_cfg3; + u32 cal_cfg10, cal_cfg11; + u32 rem; + int i; + + VERB("rate=%lu, parent's=%lu", rate, parent_rate); + + /* Force postdiv2 to be div-4 */ + pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG, 3); + + /* Configure the Loop filter resistance */ + for (i = 0; i < LPFR_LUT_SIZE; i++) + if (rate <= lpfr_lut[i].vco_rate) + break; + if (i == LPFR_LUT_SIZE) { + dev_err(dev, "unable to get loop filter resistance. vco=%lu\n", + rate); + return -EINVAL; + } + pll_write(base + REG_DSI_28nm_PHY_PLL_LPFR_CFG, lpfr_lut[i].resistance); + + /* Loop filter capacitance values : c1 and c2 */ + pll_write(base + REG_DSI_28nm_PHY_PLL_LPFC1_CFG, 0x70); + pll_write(base + REG_DSI_28nm_PHY_PLL_LPFC2_CFG, 0x15); + + rem = rate % VCO_REF_CLK_RATE; + if (rem) { + refclk_cfg = DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR; + frac_n_mode = 1; + div_fbx1000 = rate / (VCO_REF_CLK_RATE / 500); + gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 500); + } else { + refclk_cfg = 0x0; + frac_n_mode = 0; + div_fbx1000 = rate / (VCO_REF_CLK_RATE / 1000); + gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 1000); + } + + DBG("refclk_cfg = %d", refclk_cfg); + + rem = div_fbx1000 % 1000; + frac_n_value = (rem << 16) / 1000; + + DBG("div_fb = %lu", div_fbx1000); + DBG("frac_n_value = %d", frac_n_value); + + DBG("Generated VCO Clock: %lu", gen_vco_clk); + rem = 0; + sdm_cfg1 = pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1); + sdm_cfg1 &= ~DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK; + if (frac_n_mode) { + sdm_cfg0 = 0x0; + sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(0); + sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET( + (u32)(((div_fbx1000 / 1000) & 0x3f) - 1)); + sdm_cfg3 = frac_n_value >> 8; + sdm_cfg2 = frac_n_value & 0xff; + } else { + sdm_cfg0 = DSI_28nm_PHY_PLL_SDM_CFG0_BYP; + sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV( + (u32)(((div_fbx1000 / 1000) & 0x3f) - 1)); + sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(0); + sdm_cfg2 = 0; + sdm_cfg3 = 0; + } + + DBG("sdm_cfg0=%d", sdm_cfg0); + DBG("sdm_cfg1=%d", sdm_cfg1); + DBG("sdm_cfg2=%d", sdm_cfg2); + DBG("sdm_cfg3=%d", sdm_cfg3); + + cal_cfg11 = (u32)(gen_vco_clk / (256 * 1000000)); + cal_cfg10 = (u32)((gen_vco_clk % (256 * 1000000)) / 1000000); + DBG("cal_cfg10=%d, cal_cfg11=%d", cal_cfg10, cal_cfg11); + + pll_write(base + REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG, 0x02); + pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG3, 0x2b); + pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG4, 0x06); + pll_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d); + + pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1, sdm_cfg1); + pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2, + DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2)); + pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3, + DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3)); + pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG4, 0x00); + + /* Add hardware recommended delay for correct PLL configuration */ + if (pll_28nm->vco_delay) + udelay(pll_28nm->vco_delay); + + pll_write(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG, refclk_cfg); + pll_write(base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG, 0x00); + pll_write(base + REG_DSI_28nm_PHY_PLL_VCOLPF_CFG, 0x31); + pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0, sdm_cfg0); + pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG0, 0x12); + pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG6, 0x30); + pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG7, 0x00); + pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG8, 0x60); + pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG9, 0x00); + pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG10, cal_cfg10 & 0xff); + pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG11, cal_cfg11 & 0xff); + pll_write(base + REG_DSI_28nm_PHY_PLL_EFUSE_CFG, 0x20); + + return 0; +} + +static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw) +{ + struct msm_dsi_pll *pll = hw_clk_to_pll(hw); + struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll); + + return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS, + POLL_TIMEOUT_US); +} + +static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct msm_dsi_pll *pll = hw_clk_to_pll(hw); + struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll); + void __iomem *base = pll_28nm->mmio; + u32 sdm0, doubler, sdm_byp_div; + u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3; + u32 ref_clk = VCO_REF_CLK_RATE; + unsigned long vco_rate; + + VERB("parent_rate=%lu", parent_rate); + + /* Check to see if the ref clk doubler is enabled */ + doubler = pll_read(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG) & + DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR; + ref_clk += (doubler * VCO_REF_CLK_RATE); + + /* see if it is integer mode or sdm mode */ + sdm0 = pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0); + if (sdm0 & DSI_28nm_PHY_PLL_SDM_CFG0_BYP) { + /* integer mode */ + sdm_byp_div = FIELD( + pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0), + DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV) + 1; + vco_rate = ref_clk * sdm_byp_div; + } else { + /* sdm mode */ + sdm_dc_off = FIELD( + pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1), + DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET); + DBG("sdm_dc_off = %d", sdm_dc_off); + sdm2 = FIELD(pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2), + DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0); + sdm3 = FIELD(pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3), + DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8); + sdm_freq_seed = (sdm3 << 8) | sdm2; + DBG("sdm_freq_seed = %d", sdm_freq_seed); + + vco_rate = (ref_clk * (sdm_dc_off + 1)) + + mult_frac(ref_clk, sdm_freq_seed, BIT(16)); + DBG("vco rate = %lu", vco_rate); + } + + DBG("returning vco rate = %lu", vco_rate); + + return vco_rate; +} + +static const struct clk_ops clk_ops_dsi_pll_28nm_vco = { + .round_rate = msm_dsi_pll_helper_clk_round_rate, + .set_rate = dsi_pll_28nm_clk_set_rate, + .recalc_rate = dsi_pll_28nm_clk_recalc_rate, + .prepare = msm_dsi_pll_helper_clk_prepare, + .unprepare = msm_dsi_pll_helper_clk_unprepare, + .is_enabled = dsi_pll_28nm_clk_is_enabled, +}; + +/* + * PLL Callbacks + */ +static int dsi_pll_28nm_enable_seq_hpm(struct msm_dsi_pll *pll) +{ + struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll); + struct device *dev = &pll_28nm->pdev->dev; + void __iomem *base = pll_28nm->mmio; + u32 max_reads = 5, timeout_us = 100; + bool locked; + u32 val; + int i; + + DBG("id=%d", pll_28nm->id); + + pll_28nm_software_reset(pll_28nm); + + /* + * PLL power up sequence. + * Add necessary delays recommended by hardware. + */ + val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B; + pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1); + + val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B; + pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200); + + val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B; + pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500); + + val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE; + pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600); + + for (i = 0; i < 2; i++) { + /* DSI Uniphy lock detect setting */ + pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, + 0x0c, 100); + pll_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d); + + /* poll for PLL ready status */ + locked = pll_28nm_poll_for_ready(pll_28nm, + max_reads, timeout_us); + if (locked) + break; + + pll_28nm_software_reset(pll_28nm); + + /* + * PLL power up sequence. + * Add necessary delays recommended by hardware. + */ + val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B; + pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1); + + val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B; + pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200); + + val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B; + pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 250); + + val &= ~DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B; + pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200); + + val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B; + pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500); + + val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE; + pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600); + } + + if (unlikely(!locked)) + dev_err(dev, "DSI PLL lock failed\n"); + else + DBG("DSI PLL Lock success"); + + return locked ? 0 : -EINVAL; +} + +static int dsi_pll_28nm_enable_seq_lp(struct msm_dsi_pll *pll) +{ + struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll); + struct device *dev = &pll_28nm->pdev->dev; + void __iomem *base = pll_28nm->mmio; + bool locked; + u32 max_reads = 10, timeout_us = 50; + u32 val; + + DBG("id=%d", pll_28nm->id); + + pll_28nm_software_reset(pll_28nm); + + /* + * PLL power up sequence. + * Add necessary delays recommended by hardware. + */ + pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_CAL_CFG1, 0x34, 500); + + val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B; + pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500); + + val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B; + pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500); + + val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B | + DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE; + pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500); + + /* DSI PLL toggle lock detect setting */ + pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x04, 500); + pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x05, 512); + + locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us); + + if (unlikely(!locked)) + dev_err(dev, "DSI PLL lock failed\n"); + else + DBG("DSI PLL lock success"); + + return locked ? 0 : -EINVAL; +} + +static void dsi_pll_28nm_disable_seq(struct msm_dsi_pll *pll) +{ + struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll); + + DBG("id=%d", pll_28nm->id); + pll_write(pll_28nm->mmio + REG_DSI_28nm_PHY_PLL_GLB_CFG, 0x00); +} + +static void dsi_pll_28nm_save_state(struct msm_dsi_pll *pll) +{ + struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll); + struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state; + void __iomem *base = pll_28nm->mmio; + + cached_state->postdiv3 = + pll_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG); + cached_state->postdiv1 = + pll_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG); + cached_state->byte_mux = pll_read(base + REG_DSI_28nm_PHY_PLL_VREG_CFG); + cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw); +} + +static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll) +{ + struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll); + struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state; + void __iomem *base = pll_28nm->mmio; + int ret; + + ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw, + cached_state->vco_rate, 0); + if (ret) { + dev_err(&pll_28nm->pdev->dev, + "restore vco rate failed. ret=%d\n", ret); + return ret; + } + + pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG, + cached_state->postdiv3); + pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG, + cached_state->postdiv1); + pll_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG, + cached_state->byte_mux); + + return 0; +} + +static int dsi_pll_28nm_get_provider(struct msm_dsi_pll *pll, + struct clk **byte_clk_provider, + struct clk **pixel_clk_provider) +{ + struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll); + + if (byte_clk_provider) + *byte_clk_provider = pll_28nm->provided_clks[DSI_BYTE_PLL_CLK]; + if (pixel_clk_provider) + *pixel_clk_provider = + pll_28nm->provided_clks[DSI_PIXEL_PLL_CLK]; + + return 0; +} + +static void dsi_pll_28nm_destroy(struct msm_dsi_pll *pll) +{ + struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll); + int i; + + msm_dsi_pll_helper_unregister_clks(pll_28nm->pdev, + pll_28nm->clks, pll_28nm->num_clks); + + for (i = 0; i < NUM_PROVIDED_CLKS; i++) + pll_28nm->provided_clks[i] = NULL; + + pll_28nm->num_clks = 0; + pll_28nm->clk_data.clks = NULL; + pll_28nm->clk_data.clk_num = 0; +} + +static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm) +{ + char clk_name[32], parent1[32], parent2[32], vco_name[32]; + struct clk_init_data vco_init = { + .parent_names = (const char *[]){ "xo" }, + .num_parents = 1, + .name = vco_name, + .ops = &clk_ops_dsi_pll_28nm_vco, + }; + struct device *dev = &pll_28nm->pdev->dev; + struct clk **clks = pll_28nm->clks; + struct clk **provided_clks = pll_28nm->provided_clks; + int num = 0; + int ret; + + DBG("%d", pll_28nm->id); + + snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->id); + pll_28nm->base.clk_hw.init = &vco_init; + clks[num++] = clk_register(dev, &pll_28nm->base.clk_hw); + + snprintf(clk_name, 32, "dsi%danalog_postdiv_clk", pll_28nm->id); + snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id); + clks[num++] = clk_register_divider(dev, clk_name, + parent1, CLK_SET_RATE_PARENT, + pll_28nm->mmio + + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG, + 0, 4, 0, NULL); + + snprintf(clk_name, 32, "dsi%dindirect_path_div2_clk", pll_28nm->id); + snprintf(parent1, 32, "dsi%danalog_postdiv_clk", pll_28nm->id); + clks[num++] = clk_register_fixed_factor(dev, clk_name, + parent1, CLK_SET_RATE_PARENT, + 1, 2); + + snprintf(clk_name, 32, "dsi%dpll", pll_28nm->id); + snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id); + clks[num++] = provided_clks[DSI_PIXEL_PLL_CLK] = + clk_register_divider(dev, clk_name, + parent1, 0, pll_28nm->mmio + + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG, + 0, 8, 0, NULL); + + snprintf(clk_name, 32, "dsi%dbyte_mux", pll_28nm->id); + snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id); + snprintf(parent2, 32, "dsi%dindirect_path_div2_clk", pll_28nm->id); + clks[num++] = clk_register_mux(dev, clk_name, + (const char *[]){ + parent1, parent2 + }, 2, CLK_SET_RATE_PARENT, pll_28nm->mmio + + REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL); + + snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->id); + snprintf(parent1, 32, "dsi%dbyte_mux", pll_28nm->id); + clks[num++] = provided_clks[DSI_BYTE_PLL_CLK] = + clk_register_fixed_factor(dev, clk_name, + parent1, CLK_SET_RATE_PARENT, 1, 4); + + pll_28nm->num_clks = num; + + pll_28nm->clk_data.clk_num = NUM_PROVIDED_CLKS; + pll_28nm->clk_data.clks = provided_clks; + + ret = of_clk_add_provider(dev->of_node, + of_clk_src_onecell_get, &pll_28nm->clk_data); + if (ret) { + dev_err(dev, "failed to register clk provider: %d\n", ret); + return ret; + } + + return 0; +} + +struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev, + enum msm_dsi_phy_type type, int id) +{ + struct dsi_pll_28nm *pll_28nm; + struct msm_dsi_pll *pll; + int ret; + + if (!pdev) + return ERR_PTR(-ENODEV); + + pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL); + if (!pll_28nm) + return ERR_PTR(-ENOMEM); + + pll_28nm->pdev = pdev; + pll_28nm->id = id; + + pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL"); + if (IS_ERR_OR_NULL(pll_28nm->mmio)) { + dev_err(&pdev->dev, "%s: failed to map pll base\n", __func__); + return ERR_PTR(-ENOMEM); + } + + pll = &pll_28nm->base; + pll->min_rate = VCO_MIN_RATE; + pll->max_rate = VCO_MAX_RATE; + pll->get_provider = dsi_pll_28nm_get_provider; + pll->destroy = dsi_pll_28nm_destroy; + pll->disable_seq = dsi_pll_28nm_disable_seq; + pll->save_state = dsi_pll_28nm_save_state; + pll->restore_state = dsi_pll_28nm_restore_state; + + if (type == MSM_DSI_PHY_28NM_HPM) { + pll_28nm->vco_delay = 1; + + pll->en_seq_cnt = 3; + pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_hpm; + pll->enable_seqs[1] = dsi_pll_28nm_enable_seq_hpm; + pll->enable_seqs[2] = dsi_pll_28nm_enable_seq_hpm; + } else if (type == MSM_DSI_PHY_28NM_LP) { + pll_28nm->vco_delay = 1000; + + pll->en_seq_cnt = 1; + pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_lp; + } else { + dev_err(&pdev->dev, "phy type (%d) is not 28nm\n", type); + return ERR_PTR(-EINVAL); + } + + ret = pll_28nm_register(pll_28nm); + if (ret) { + dev_err(&pdev->dev, "failed to register PLL: %d\n", ret); + return ERR_PTR(ret); + } + + return pll; +} + diff --git a/kernel/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/kernel/drivers/gpu/drm/msm/dsi/sfpb.xml.h index 50ff9851d..7d7662e69 100644 --- a/kernel/drivers/gpu/drm/msm/dsi/sfpb.xml.h +++ b/kernel/drivers/gpu/drm/msm/dsi/sfpb.xml.h @@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) -- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) - -Copyright (C) 2013 by the following authors: +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) + +Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) Permission is hereby granted, free of charge, to any person obtaining @@ -45,7 +45,18 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -#define REG_SFPB_CFG 0x00000058 +enum sfpb_ahb_arb_master_port_en { + SFPB_MASTER_PORT_ENABLE = 3, + SFPB_MASTER_PORT_DISABLE = 0, +}; + +#define REG_SFPB_GPREG 0x00000058 +#define SFPB_GPREG_MASTER_PORT_EN__MASK 0x00001800 +#define SFPB_GPREG_MASTER_PORT_EN__SHIFT 11 +static inline uint32_t SFPB_GPREG_MASTER_PORT_EN(enum sfpb_ahb_arb_master_port_en val) +{ + return ((val) << SFPB_GPREG_MASTER_PORT_EN__SHIFT) & SFPB_GPREG_MASTER_PORT_EN__MASK; +} #endif /* SFPB_XML */ diff --git a/kernel/drivers/gpu/drm/msm/edp/edp.xml.h b/kernel/drivers/gpu/drm/msm/edp/edp.xml.h index a29f1df15..90bf5ed46 100644 --- a/kernel/drivers/gpu/drm/msm/edp/edp.xml.h +++ b/kernel/drivers/gpu/drm/msm/edp/edp.xml.h @@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) -- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) - -Copyright (C) 2013-2014 by the following authors: +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) + +Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) Permission is hereby granted, free of charge, to any person obtaining @@ -288,5 +288,92 @@ static inline uint32_t REG_EDP_PHY_LN_PD_CTL(uint32_t i0) { return 0x00000404 + #define REG_EDP_PHY_GLB_PHY_STATUS 0x00000598 +#define REG_EDP_28nm_PHY_PLL_REFCLK_CFG 0x00000000 + +#define REG_EDP_28nm_PHY_PLL_POSTDIV1_CFG 0x00000004 + +#define REG_EDP_28nm_PHY_PLL_CHGPUMP_CFG 0x00000008 + +#define REG_EDP_28nm_PHY_PLL_VCOLPF_CFG 0x0000000c + +#define REG_EDP_28nm_PHY_PLL_VREG_CFG 0x00000010 + +#define REG_EDP_28nm_PHY_PLL_PWRGEN_CFG 0x00000014 + +#define REG_EDP_28nm_PHY_PLL_DMUX_CFG 0x00000018 + +#define REG_EDP_28nm_PHY_PLL_AMUX_CFG 0x0000001c + +#define REG_EDP_28nm_PHY_PLL_GLB_CFG 0x00000020 +#define EDP_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B 0x00000001 +#define EDP_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B 0x00000002 +#define EDP_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B 0x00000004 +#define EDP_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE 0x00000008 + +#define REG_EDP_28nm_PHY_PLL_POSTDIV2_CFG 0x00000024 + +#define REG_EDP_28nm_PHY_PLL_POSTDIV3_CFG 0x00000028 + +#define REG_EDP_28nm_PHY_PLL_LPFR_CFG 0x0000002c + +#define REG_EDP_28nm_PHY_PLL_LPFC1_CFG 0x00000030 + +#define REG_EDP_28nm_PHY_PLL_LPFC2_CFG 0x00000034 + +#define REG_EDP_28nm_PHY_PLL_SDM_CFG0 0x00000038 + +#define REG_EDP_28nm_PHY_PLL_SDM_CFG1 0x0000003c + +#define REG_EDP_28nm_PHY_PLL_SDM_CFG2 0x00000040 + +#define REG_EDP_28nm_PHY_PLL_SDM_CFG3 0x00000044 + +#define REG_EDP_28nm_PHY_PLL_SDM_CFG4 0x00000048 + +#define REG_EDP_28nm_PHY_PLL_SSC_CFG0 0x0000004c + +#define REG_EDP_28nm_PHY_PLL_SSC_CFG1 0x00000050 + +#define REG_EDP_28nm_PHY_PLL_SSC_CFG2 0x00000054 + +#define REG_EDP_28nm_PHY_PLL_SSC_CFG3 0x00000058 + +#define REG_EDP_28nm_PHY_PLL_LKDET_CFG0 0x0000005c + +#define REG_EDP_28nm_PHY_PLL_LKDET_CFG1 0x00000060 + +#define REG_EDP_28nm_PHY_PLL_LKDET_CFG2 0x00000064 + +#define REG_EDP_28nm_PHY_PLL_TEST_CFG 0x00000068 +#define EDP_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET 0x00000001 + +#define REG_EDP_28nm_PHY_PLL_CAL_CFG0 0x0000006c + +#define REG_EDP_28nm_PHY_PLL_CAL_CFG1 0x00000070 + +#define REG_EDP_28nm_PHY_PLL_CAL_CFG2 0x00000074 + +#define REG_EDP_28nm_PHY_PLL_CAL_CFG3 0x00000078 + +#define REG_EDP_28nm_PHY_PLL_CAL_CFG4 0x0000007c + +#define REG_EDP_28nm_PHY_PLL_CAL_CFG5 0x00000080 + +#define REG_EDP_28nm_PHY_PLL_CAL_CFG6 0x00000084 + +#define REG_EDP_28nm_PHY_PLL_CAL_CFG7 0x00000088 + +#define REG_EDP_28nm_PHY_PLL_CAL_CFG8 0x0000008c + +#define REG_EDP_28nm_PHY_PLL_CAL_CFG9 0x00000090 + +#define REG_EDP_28nm_PHY_PLL_CAL_CFG10 0x00000094 + +#define REG_EDP_28nm_PHY_PLL_CAL_CFG11 0x00000098 + +#define REG_EDP_28nm_PHY_PLL_EFUSE_CFG 0x0000009c + +#define REG_EDP_28nm_PHY_PLL_DEBUG_BUS_SEL 0x000000a0 + #endif /* EDP_XML */ diff --git a/kernel/drivers/gpu/drm/msm/edp/edp_aux.c b/kernel/drivers/gpu/drm/msm/edp/edp_aux.c index 208f9d47f..82789dd24 100644 --- a/kernel/drivers/gpu/drm/msm/edp/edp_aux.c +++ b/kernel/drivers/gpu/drm/msm/edp/edp_aux.c @@ -115,10 +115,12 @@ static int edp_msg_fifo_rx(struct edp_aux *aux, struct drm_dp_aux_msg *msg) * msm_edp_aux_ctrl() running concurrently in other threads, i.e. * start transaction only when AUX channel is fully enabled. */ -ssize_t edp_aux_transfer(struct drm_dp_aux *drm_aux, struct drm_dp_aux_msg *msg) +static ssize_t edp_aux_transfer(struct drm_dp_aux *drm_aux, + struct drm_dp_aux_msg *msg) { struct edp_aux *aux = to_edp_aux(drm_aux); ssize_t ret; + unsigned long time_left; bool native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ); bool read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); @@ -147,15 +149,17 @@ ssize_t edp_aux_transfer(struct drm_dp_aux *drm_aux, struct drm_dp_aux_msg *msg) goto unlock_exit; DBG("wait_for_completion"); - ret = wait_for_completion_timeout(&aux->msg_comp, 300); - if (ret <= 0) { + time_left = wait_for_completion_timeout(&aux->msg_comp, + msecs_to_jiffies(300)); + if (!time_left) { /* * Clear GO and reset AUX channel * to cancel the current transaction. */ edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, 0); msm_edp_aux_ctrl(aux, 1); - pr_err("%s: aux timeout, %zd\n", __func__, ret); + pr_err("%s: aux timeout,\n", __func__); + ret = -ETIMEDOUT; goto unlock_exit; } DBG("completion"); diff --git a/kernel/drivers/gpu/drm/msm/edp/edp_ctrl.c b/kernel/drivers/gpu/drm/msm/edp/edp_ctrl.c index 29e52d7c6..81200e9be 100644 --- a/kernel/drivers/gpu/drm/msm/edp/edp_ctrl.c +++ b/kernel/drivers/gpu/drm/msm/edp/edp_ctrl.c @@ -373,7 +373,7 @@ static int edp_gpio_config(struct edp_ctrl *ctrl) struct device *dev = &ctrl->pdev->dev; int ret; - ctrl->panel_hpd_gpio = devm_gpiod_get(dev, "panel-hpd"); + ctrl->panel_hpd_gpio = devm_gpiod_get(dev, "panel-hpd", GPIOD_IN); if (IS_ERR(ctrl->panel_hpd_gpio)) { ret = PTR_ERR(ctrl->panel_hpd_gpio); ctrl->panel_hpd_gpio = NULL; @@ -381,13 +381,7 @@ static int edp_gpio_config(struct edp_ctrl *ctrl) return ret; } - ret = gpiod_direction_input(ctrl->panel_hpd_gpio); - if (ret) { - pr_err("%s: Set direction for hpd failed, %d\n", __func__, ret); - return ret; - } - - ctrl->panel_en_gpio = devm_gpiod_get(dev, "panel-en"); + ctrl->panel_en_gpio = devm_gpiod_get(dev, "panel-en", GPIOD_OUT_LOW); if (IS_ERR(ctrl->panel_en_gpio)) { ret = PTR_ERR(ctrl->panel_en_gpio); ctrl->panel_en_gpio = NULL; @@ -395,13 +389,6 @@ static int edp_gpio_config(struct edp_ctrl *ctrl) return ret; } - ret = gpiod_direction_output(ctrl->panel_en_gpio, 0); - if (ret) { - pr_err("%s: Set direction for panel_en failed, %d\n", - __func__, ret); - return ret; - } - DBG("gpio on"); return 0; @@ -1018,7 +1005,7 @@ static void edp_ctrl_off_worker(struct work_struct *work) { struct edp_ctrl *ctrl = container_of( work, struct edp_ctrl, off_work); - int ret; + unsigned long time_left; mutex_lock(&ctrl->dev_mutex); @@ -1030,11 +1017,10 @@ static void edp_ctrl_off_worker(struct work_struct *work) reinit_completion(&ctrl->idle_comp); edp_state_ctrl(ctrl, EDP_STATE_CTRL_PUSH_IDLE); - ret = wait_for_completion_timeout(&ctrl->idle_comp, + time_left = wait_for_completion_timeout(&ctrl->idle_comp, msecs_to_jiffies(500)); - if (ret <= 0) - DBG("%s: idle pattern timedout, %d\n", - __func__, ret); + if (!time_left) + DBG("%s: idle pattern timedout\n", __func__); edp_state_ctrl(ctrl, 0); diff --git a/kernel/drivers/gpu/drm/msm/hdmi/hdmi.c b/kernel/drivers/gpu/drm/msm/hdmi/hdmi.c index 814536202..1f4a95eeb 100644 --- a/kernel/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/kernel/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -22,7 +22,9 @@ void hdmi_set_mode(struct hdmi *hdmi, bool power_on) { uint32_t ctrl = 0; + unsigned long flags; + spin_lock_irqsave(&hdmi->reg_lock, flags); if (power_on) { ctrl |= HDMI_CTRL_ENABLE; if (!hdmi->hdmi_mode) { @@ -37,6 +39,7 @@ void hdmi_set_mode(struct hdmi *hdmi, bool power_on) } hdmi_write(hdmi, REG_HDMI_CTRL, ctrl); + spin_unlock_irqrestore(&hdmi->reg_lock, flags); DBG("HDMI Core: %s, HDMI_CTRL=0x%08x", power_on ? "Enable" : "Disable", ctrl); } @@ -51,6 +54,10 @@ static irqreturn_t hdmi_irq(int irq, void *dev_id) /* Process DDC: */ hdmi_i2c_irq(hdmi->i2c); + /* Process HDCP: */ + if (hdmi->hdcp_ctrl) + hdmi_hdcp_irq(hdmi->hdcp_ctrl); + /* TODO audio.. */ return IRQ_HANDLED; @@ -60,6 +67,15 @@ static void hdmi_destroy(struct hdmi *hdmi) { struct hdmi_phy *phy = hdmi->phy; + /* + * at this point, hpd has been disabled, + * after flush workq, it's safe to deinit hdcp + */ + if (hdmi->workq) { + flush_workqueue(hdmi->workq); + destroy_workqueue(hdmi->workq); + } + hdmi_hdcp_destroy(hdmi); if (phy) phy->funcs->destroy(phy); @@ -77,6 +93,7 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) { struct hdmi_platform_config *config = pdev->dev.platform_data; struct hdmi *hdmi = NULL; + struct resource *res; int i, ret; hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL); @@ -87,18 +104,18 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) hdmi->pdev = pdev; hdmi->config = config; + spin_lock_init(&hdmi->reg_lock); /* not sure about which phy maps to which msm.. probably I miss some */ - if (config->phy_init) + if (config->phy_init) { hdmi->phy = config->phy_init(hdmi); - else - hdmi->phy = ERR_PTR(-ENXIO); - if (IS_ERR(hdmi->phy)) { - ret = PTR_ERR(hdmi->phy); - dev_err(&pdev->dev, "failed to load phy: %d\n", ret); - hdmi->phy = NULL; - goto fail; + if (IS_ERR(hdmi->phy)) { + ret = PTR_ERR(hdmi->phy); + dev_err(&pdev->dev, "failed to load phy: %d\n", ret); + hdmi->phy = NULL; + goto fail; + } } hdmi->mmio = msm_ioremap(pdev, config->mmio_name, "HDMI"); @@ -107,6 +124,18 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) goto fail; } + /* HDCP needs physical address of hdmi register */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + config->mmio_name); + hdmi->mmio_phy_addr = res->start; + + hdmi->qfprom_mmio = msm_ioremap(pdev, + config->qfprom_mmio_name, "HDMI_QFPROM"); + if (IS_ERR(hdmi->qfprom_mmio)) { + dev_info(&pdev->dev, "can't find qfprom resource\n"); + hdmi->qfprom_mmio = NULL; + } + hdmi->hpd_regs = devm_kzalloc(&pdev->dev, sizeof(hdmi->hpd_regs[0]) * config->hpd_reg_cnt, GFP_KERNEL); if (!hdmi->hpd_regs) { @@ -189,6 +218,8 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) hdmi->pwr_clks[i] = clk; } + hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0); + hdmi->i2c = hdmi_i2c_init(hdmi); if (IS_ERR(hdmi->i2c)) { ret = PTR_ERR(hdmi->i2c); @@ -197,6 +228,12 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) goto fail; } + hdmi->hdcp_ctrl = hdmi_hdcp_init(hdmi); + if (IS_ERR(hdmi->hdcp_ctrl)) { + dev_warn(&pdev->dev, "failed to init hdcp: disabled\n"); + hdmi->hdcp_ctrl = NULL; + } + return hdmi; fail: @@ -291,6 +328,9 @@ fail: .item ## _names = item ##_names_ ## entry, \ .item ## _cnt = ARRAY_SIZE(item ## _names_ ## entry) +static const char *pwr_reg_names_none[] = {}; +static const char *hpd_reg_names_none[] = {}; + static struct hdmi_platform_config hdmi_tx_8660_config = { .phy_init = hdmi_phy_8x60_init, }; @@ -310,7 +350,7 @@ static const char *pwr_clk_names_8x74[] = {"extp_clk", "alt_iface_clk"}; static const char *hpd_clk_names_8x74[] = {"iface_clk", "core_clk", "mdp_core_clk"}; static unsigned long hpd_clk_freq_8x74[] = {0, 19200000, 0}; -static struct hdmi_platform_config hdmi_tx_8074_config = { +static struct hdmi_platform_config hdmi_tx_8974_config = { .phy_init = hdmi_phy_8x74_init, HDMI_CFG(pwr_reg, 8x74), HDMI_CFG(hpd_reg, 8x74), @@ -330,9 +370,29 @@ static struct hdmi_platform_config hdmi_tx_8084_config = { .hpd_freq = hpd_clk_freq_8x74, }; +static struct hdmi_platform_config hdmi_tx_8994_config = { + .phy_init = NULL, /* nothing to do for this HDMI PHY 20nm */ + HDMI_CFG(pwr_reg, 8x74), + HDMI_CFG(hpd_reg, none), + HDMI_CFG(pwr_clk, 8x74), + HDMI_CFG(hpd_clk, 8x74), + .hpd_freq = hpd_clk_freq_8x74, +}; + +static struct hdmi_platform_config hdmi_tx_8996_config = { + .phy_init = NULL, + HDMI_CFG(pwr_reg, none), + HDMI_CFG(hpd_reg, none), + HDMI_CFG(pwr_clk, 8x74), + HDMI_CFG(hpd_clk, 8x74), + .hpd_freq = hpd_clk_freq_8x74, +}; + static const struct of_device_id dt_match[] = { + { .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8996_config }, + { .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8994_config }, { .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config }, - { .compatible = "qcom,hdmi-tx-8074", .data = &hdmi_tx_8074_config }, + { .compatible = "qcom,hdmi-tx-8974", .data = &hdmi_tx_8974_config }, { .compatible = "qcom,hdmi-tx-8960", .data = &hdmi_tx_8960_config }, { .compatible = "qcom,hdmi-tx-8660", .data = &hdmi_tx_8660_config }, {} @@ -347,8 +407,7 @@ static int get_gpio(struct device *dev, struct device_node *of_node, const char snprintf(name2, sizeof(name2), "%s-gpio", name); gpio = of_get_named_gpio(of_node, name2, 0); if (gpio < 0) { - dev_err(dev, "failed to get gpio: %s (%d)\n", - name, gpio); + DBG("failed to get gpio: %s (%d)", name, gpio); gpio = -1; } } @@ -376,6 +435,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data) } hdmi_cfg->mmio_name = "core_physical"; + hdmi_cfg->qfprom_mmio_name = "qfprom_physical"; hdmi_cfg->ddc_clk_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk"); hdmi_cfg->ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data"); hdmi_cfg->hpd_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd"); @@ -391,7 +451,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data) if (cpu_is_apq8064()) { static const char *hpd_reg_names[] = {"8921_hdmi_mvs"}; config.phy_init = hdmi_phy_8960_init; - config.mmio_name = "hdmi_msm_hdmi_addr"; config.hpd_reg_names = hpd_reg_names; config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); config.hpd_clk_names = hpd_clk_names; @@ -404,7 +463,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data) } else if (cpu_is_msm8960() || cpu_is_msm8960ab()) { static const char *hpd_reg_names[] = {"8921_hdmi_mvs"}; config.phy_init = hdmi_phy_8960_init; - config.mmio_name = "hdmi_msm_hdmi_addr"; config.hpd_reg_names = hpd_reg_names; config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); config.hpd_clk_names = hpd_clk_names; @@ -419,7 +477,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data) "8901_hdmi_mvs", "8901_mpp0" }; config.phy_init = hdmi_phy_8x60_init; - config.mmio_name = "hdmi_msm_hdmi_addr"; config.hpd_reg_names = hpd_reg_names; config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); config.hpd_clk_names = hpd_clk_names; @@ -430,6 +487,9 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data) config.mux_en_gpio = -1; config.mux_sel_gpio = -1; } + config.mmio_name = "hdmi_msm_hdmi_addr"; + config.qfprom_mmio_name = "hdmi_msm_qfprom_addr"; + hdmi_cfg = &config; #endif dev->platform_data = hdmi_cfg; diff --git a/kernel/drivers/gpu/drm/msm/hdmi/hdmi.h b/kernel/drivers/gpu/drm/msm/hdmi/hdmi.h index 68fdfb362..d0e663192 100644 --- a/kernel/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/kernel/drivers/gpu/drm/msm/hdmi/hdmi.h @@ -37,6 +37,8 @@ struct hdmi_audio { int rate; }; +struct hdmi_hdcp_ctrl; + struct hdmi { struct drm_device *dev; struct platform_device *pdev; @@ -51,6 +53,8 @@ struct hdmi { unsigned long int pixclock; void __iomem *mmio; + void __iomem *qfprom_mmio; + phys_addr_t mmio_phy_addr; struct regulator **hpd_regs; struct regulator **pwr_regs; @@ -68,12 +72,25 @@ struct hdmi { bool hdmi_mode; /* are we in hdmi mode? */ int irq; + struct workqueue_struct *workq; + + struct hdmi_hdcp_ctrl *hdcp_ctrl; + + /* + * spinlock to protect registers shared by different execution + * REG_HDMI_CTRL + * REG_HDMI_DDC_ARBITRATION + * REG_HDMI_HDCP_INT_CTRL + * REG_HDMI_HPD_CTRL + */ + spinlock_t reg_lock; }; /* platform config data (ie. from DT, or pdata) */ struct hdmi_platform_config { struct hdmi_phy *(*phy_init)(struct hdmi *hdmi); const char *mmio_name; + const char *qfprom_mmio_name; /* regulators that need to be on for hpd: */ const char **hpd_reg_names; @@ -109,6 +126,11 @@ static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg) return msm_readl(hdmi->mmio + reg); } +static inline u32 hdmi_qfprom_read(struct hdmi *hdmi, u32 reg) +{ + return msm_readl(hdmi->qfprom_mmio + reg); +} + /* * The phy appears to be different, for example between 8960 and 8x60, * so split the phy related functions out and load the correct one at @@ -117,7 +139,6 @@ static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg) struct hdmi_phy_funcs { void (*destroy)(struct hdmi_phy *phy); - void (*reset)(struct hdmi_phy *phy); void (*powerup)(struct hdmi_phy *phy, unsigned long int pixclock); void (*powerdown)(struct hdmi_phy *phy); }; @@ -163,4 +184,13 @@ void hdmi_i2c_irq(struct i2c_adapter *i2c); void hdmi_i2c_destroy(struct i2c_adapter *i2c); struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi); +/* + * hdcp + */ +struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi); +void hdmi_hdcp_destroy(struct hdmi *hdmi); +void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl); +void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl); +void hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl); + #endif /* __HDMI_CONNECTOR_H__ */ diff --git a/kernel/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/kernel/drivers/gpu/drm/msm/hdmi/hdmi.xml.h index 350988740..10c45700a 100644 --- a/kernel/drivers/gpu/drm/msm/hdmi/hdmi.xml.h +++ b/kernel/drivers/gpu/drm/msm/hdmi/hdmi.xml.h @@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) -- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) @@ -441,6 +441,12 @@ static inline uint32_t HDMI_DDC_REF_REFTIMER(uint32_t val) #define REG_HDMI_HDCP_SW_LOWER_AKSV 0x00000288 +#define REG_HDMI_CEC_CTRL 0x0000028c + +#define REG_HDMI_CEC_WR_DATA 0x00000290 + +#define REG_HDMI_CEC_CEC_RETRANSMIT 0x00000294 + #define REG_HDMI_CEC_STATUS 0x00000298 #define REG_HDMI_CEC_INT 0x0000029c @@ -750,5 +756,92 @@ static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val) #define REG_HDMI_8x74_BIST_PATN3 0x00000048 +#define REG_HDMI_28nm_PHY_PLL_REFCLK_CFG 0x00000000 + +#define REG_HDMI_28nm_PHY_PLL_POSTDIV1_CFG 0x00000004 + +#define REG_HDMI_28nm_PHY_PLL_CHGPUMP_CFG 0x00000008 + +#define REG_HDMI_28nm_PHY_PLL_VCOLPF_CFG 0x0000000c + +#define REG_HDMI_28nm_PHY_PLL_VREG_CFG 0x00000010 + +#define REG_HDMI_28nm_PHY_PLL_PWRGEN_CFG 0x00000014 + +#define REG_HDMI_28nm_PHY_PLL_DMUX_CFG 0x00000018 + +#define REG_HDMI_28nm_PHY_PLL_AMUX_CFG 0x0000001c + +#define REG_HDMI_28nm_PHY_PLL_GLB_CFG 0x00000020 +#define HDMI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B 0x00000001 +#define HDMI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B 0x00000002 +#define HDMI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B 0x00000004 +#define HDMI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE 0x00000008 + +#define REG_HDMI_28nm_PHY_PLL_POSTDIV2_CFG 0x00000024 + +#define REG_HDMI_28nm_PHY_PLL_POSTDIV3_CFG 0x00000028 + +#define REG_HDMI_28nm_PHY_PLL_LPFR_CFG 0x0000002c + +#define REG_HDMI_28nm_PHY_PLL_LPFC1_CFG 0x00000030 + +#define REG_HDMI_28nm_PHY_PLL_LPFC2_CFG 0x00000034 + +#define REG_HDMI_28nm_PHY_PLL_SDM_CFG0 0x00000038 + +#define REG_HDMI_28nm_PHY_PLL_SDM_CFG1 0x0000003c + +#define REG_HDMI_28nm_PHY_PLL_SDM_CFG2 0x00000040 + +#define REG_HDMI_28nm_PHY_PLL_SDM_CFG3 0x00000044 + +#define REG_HDMI_28nm_PHY_PLL_SDM_CFG4 0x00000048 + +#define REG_HDMI_28nm_PHY_PLL_SSC_CFG0 0x0000004c + +#define REG_HDMI_28nm_PHY_PLL_SSC_CFG1 0x00000050 + +#define REG_HDMI_28nm_PHY_PLL_SSC_CFG2 0x00000054 + +#define REG_HDMI_28nm_PHY_PLL_SSC_CFG3 0x00000058 + +#define REG_HDMI_28nm_PHY_PLL_LKDET_CFG0 0x0000005c + +#define REG_HDMI_28nm_PHY_PLL_LKDET_CFG1 0x00000060 + +#define REG_HDMI_28nm_PHY_PLL_LKDET_CFG2 0x00000064 + +#define REG_HDMI_28nm_PHY_PLL_TEST_CFG 0x00000068 +#define HDMI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET 0x00000001 + +#define REG_HDMI_28nm_PHY_PLL_CAL_CFG0 0x0000006c + +#define REG_HDMI_28nm_PHY_PLL_CAL_CFG1 0x00000070 + +#define REG_HDMI_28nm_PHY_PLL_CAL_CFG2 0x00000074 + +#define REG_HDMI_28nm_PHY_PLL_CAL_CFG3 0x00000078 + +#define REG_HDMI_28nm_PHY_PLL_CAL_CFG4 0x0000007c + +#define REG_HDMI_28nm_PHY_PLL_CAL_CFG5 0x00000080 + +#define REG_HDMI_28nm_PHY_PLL_CAL_CFG6 0x00000084 + +#define REG_HDMI_28nm_PHY_PLL_CAL_CFG7 0x00000088 + +#define REG_HDMI_28nm_PHY_PLL_CAL_CFG8 0x0000008c + +#define REG_HDMI_28nm_PHY_PLL_CAL_CFG9 0x00000090 + +#define REG_HDMI_28nm_PHY_PLL_CAL_CFG10 0x00000094 + +#define REG_HDMI_28nm_PHY_PLL_CAL_CFG11 0x00000098 + +#define REG_HDMI_28nm_PHY_PLL_EFUSE_CFG 0x0000009c + +#define REG_HDMI_28nm_PHY_PLL_DEBUG_BUS_SEL 0x000000a0 + #endif /* HDMI_XML */ diff --git a/kernel/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/kernel/drivers/gpu/drm/msm/hdmi/hdmi_audio.c index 872485f60..df232e20c 100644 --- a/kernel/drivers/gpu/drm/msm/hdmi/hdmi_audio.c +++ b/kernel/drivers/gpu/drm/msm/hdmi/hdmi_audio.c @@ -203,7 +203,6 @@ int hdmi_audio_update(struct hdmi *hdmi) audio_config |= HDMI_AUDIO_CFG_FIFO_WATERMARK(4); audio_config |= HDMI_AUDIO_CFG_ENGINE_ENABLE; } else { - hdmi_write(hdmi, REG_HDMI_GC, HDMI_GC_MUTE); acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_CONT; acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_SEND; vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_ENABLE; diff --git a/kernel/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/kernel/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c index a7a1d8267..92b69ae8c 100644 --- a/kernel/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c +++ b/kernel/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c @@ -100,8 +100,13 @@ static void hdmi_bridge_pre_enable(struct drm_bridge *bridge) hdmi_audio_update(hdmi); } - phy->funcs->powerup(phy, hdmi->pixclock); + if (phy) + phy->funcs->powerup(phy, hdmi->pixclock); + hdmi_set_mode(hdmi, true); + + if (hdmi->hdcp_ctrl) + hdmi_hdcp_on(hdmi->hdcp_ctrl); } static void hdmi_bridge_enable(struct drm_bridge *bridge) @@ -118,9 +123,14 @@ static void hdmi_bridge_post_disable(struct drm_bridge *bridge) struct hdmi *hdmi = hdmi_bridge->hdmi; struct hdmi_phy *phy = hdmi->phy; + if (hdmi->hdcp_ctrl) + hdmi_hdcp_off(hdmi->hdcp_ctrl); + DBG("power down"); hdmi_set_mode(hdmi, false); - phy->funcs->powerdown(phy); + + if (phy) + phy->funcs->powerdown(phy); if (hdmi->power_on) { power_off(bridge); @@ -142,8 +152,6 @@ static void hdmi_bridge_mode_set(struct drm_bridge *bridge, hdmi->pixclock = mode->clock * 1000; - hdmi->hdmi_mode = drm_match_cea_mode(mode) > 1; - hstart = mode->htotal - mode->hsync_start; hend = mode->htotal - mode->hsync_start + mode->hdisplay; diff --git a/kernel/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/kernel/drivers/gpu/drm/msm/hdmi/hdmi_connector.c index b62cdb968..a3b05ae52 100644 --- a/kernel/drivers/gpu/drm/msm/hdmi/hdmi_connector.c +++ b/kernel/drivers/gpu/drm/msm/hdmi/hdmi_connector.c @@ -16,6 +16,7 @@ */ #include <linux/gpio.h> +#include <linux/pinctrl/consumer.h> #include "msm_kms.h" #include "hdmi.h" @@ -27,32 +28,85 @@ struct hdmi_connector { }; #define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base) +static void hdmi_phy_reset(struct hdmi *hdmi) +{ + unsigned int val; + + val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL); + + if (val & HDMI_PHY_CTRL_SW_RESET_LOW) { + /* pull low */ + hdmi_write(hdmi, REG_HDMI_PHY_CTRL, + val & ~HDMI_PHY_CTRL_SW_RESET); + } else { + /* pull high */ + hdmi_write(hdmi, REG_HDMI_PHY_CTRL, + val | HDMI_PHY_CTRL_SW_RESET); + } + + if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) { + /* pull low */ + hdmi_write(hdmi, REG_HDMI_PHY_CTRL, + val & ~HDMI_PHY_CTRL_SW_RESET_PLL); + } else { + /* pull high */ + hdmi_write(hdmi, REG_HDMI_PHY_CTRL, + val | HDMI_PHY_CTRL_SW_RESET_PLL); + } + + msleep(100); + + if (val & HDMI_PHY_CTRL_SW_RESET_LOW) { + /* pull high */ + hdmi_write(hdmi, REG_HDMI_PHY_CTRL, + val | HDMI_PHY_CTRL_SW_RESET); + } else { + /* pull low */ + hdmi_write(hdmi, REG_HDMI_PHY_CTRL, + val & ~HDMI_PHY_CTRL_SW_RESET); + } + + if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) { + /* pull high */ + hdmi_write(hdmi, REG_HDMI_PHY_CTRL, + val | HDMI_PHY_CTRL_SW_RESET_PLL); + } else { + /* pull low */ + hdmi_write(hdmi, REG_HDMI_PHY_CTRL, + val & ~HDMI_PHY_CTRL_SW_RESET_PLL); + } +} + static int gpio_config(struct hdmi *hdmi, bool on) { - struct drm_device *dev = hdmi->dev; + struct device *dev = &hdmi->pdev->dev; const struct hdmi_platform_config *config = hdmi->config; int ret; if (on) { - ret = gpio_request(config->ddc_clk_gpio, "HDMI_DDC_CLK"); - if (ret) { - dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", - "HDMI_DDC_CLK", config->ddc_clk_gpio, ret); - goto error1; + if (config->ddc_clk_gpio != -1) { + ret = gpio_request(config->ddc_clk_gpio, "HDMI_DDC_CLK"); + if (ret) { + dev_err(dev, "'%s'(%d) gpio_request failed: %d\n", + "HDMI_DDC_CLK", config->ddc_clk_gpio, ret); + goto error1; + } + gpio_set_value_cansleep(config->ddc_clk_gpio, 1); } - gpio_set_value_cansleep(config->ddc_clk_gpio, 1); - ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA"); - if (ret) { - dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", - "HDMI_DDC_DATA", config->ddc_data_gpio, ret); - goto error2; + if (config->ddc_data_gpio != -1) { + ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA"); + if (ret) { + dev_err(dev, "'%s'(%d) gpio_request failed: %d\n", + "HDMI_DDC_DATA", config->ddc_data_gpio, ret); + goto error2; + } + gpio_set_value_cansleep(config->ddc_data_gpio, 1); } - gpio_set_value_cansleep(config->ddc_data_gpio, 1); ret = gpio_request(config->hpd_gpio, "HDMI_HPD"); if (ret) { - dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", + dev_err(dev, "'%s'(%d) gpio_request failed: %d\n", "HDMI_HPD", config->hpd_gpio, ret); goto error3; } @@ -62,7 +116,7 @@ static int gpio_config(struct hdmi *hdmi, bool on) if (config->mux_en_gpio != -1) { ret = gpio_request(config->mux_en_gpio, "HDMI_MUX_EN"); if (ret) { - dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", + dev_err(dev, "'%s'(%d) gpio_request failed: %d\n", "HDMI_MUX_EN", config->mux_en_gpio, ret); goto error4; } @@ -72,7 +126,7 @@ static int gpio_config(struct hdmi *hdmi, bool on) if (config->mux_sel_gpio != -1) { ret = gpio_request(config->mux_sel_gpio, "HDMI_MUX_SEL"); if (ret) { - dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", + dev_err(dev, "'%s'(%d) gpio_request failed: %d\n", "HDMI_MUX_SEL", config->mux_sel_gpio, ret); goto error5; } @@ -83,7 +137,7 @@ static int gpio_config(struct hdmi *hdmi, bool on) ret = gpio_request(config->mux_lpm_gpio, "HDMI_MUX_LPM"); if (ret) { - dev_err(dev->dev, + dev_err(dev, "'%s'(%d) gpio_request failed: %d\n", "HDMI_MUX_LPM", config->mux_lpm_gpio, ret); @@ -93,8 +147,12 @@ static int gpio_config(struct hdmi *hdmi, bool on) } DBG("gpio on"); } else { - gpio_free(config->ddc_clk_gpio); - gpio_free(config->ddc_data_gpio); + if (config->ddc_clk_gpio != -1) + gpio_free(config->ddc_clk_gpio); + + if (config->ddc_data_gpio != -1) + gpio_free(config->ddc_data_gpio); + gpio_free(config->hpd_gpio); if (config->mux_en_gpio != -1) { @@ -125,9 +183,11 @@ error5: error4: gpio_free(config->hpd_gpio); error3: - gpio_free(config->ddc_data_gpio); + if (config->ddc_data_gpio != -1) + gpio_free(config->ddc_data_gpio); error2: - gpio_free(config->ddc_clk_gpio); + if (config->ddc_clk_gpio != -1) + gpio_free(config->ddc_clk_gpio); error1: return ret; } @@ -136,23 +196,29 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector) { struct hdmi *hdmi = hdmi_connector->hdmi; const struct hdmi_platform_config *config = hdmi->config; - struct drm_device *dev = hdmi_connector->base.dev; - struct hdmi_phy *phy = hdmi->phy; + struct device *dev = &hdmi->pdev->dev; uint32_t hpd_ctrl; int i, ret; + unsigned long flags; for (i = 0; i < config->hpd_reg_cnt; i++) { ret = regulator_enable(hdmi->hpd_regs[i]); if (ret) { - dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n", + dev_err(dev, "failed to enable hpd regulator: %s (%d)\n", config->hpd_reg_names[i], ret); goto fail; } } + ret = pinctrl_pm_select_default_state(dev); + if (ret) { + dev_err(dev, "pinctrl state chg failed: %d\n", ret); + goto fail; + } + ret = gpio_config(hdmi, true); if (ret) { - dev_err(dev->dev, "failed to configure GPIOs: %d\n", ret); + dev_err(dev, "failed to configure GPIOs: %d\n", ret); goto fail; } @@ -161,20 +227,20 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector) ret = clk_set_rate(hdmi->hpd_clks[i], config->hpd_freq[i]); if (ret) - dev_warn(dev->dev, "failed to set clk %s (%d)\n", + dev_warn(dev, "failed to set clk %s (%d)\n", config->hpd_clk_names[i], ret); } ret = clk_prepare_enable(hdmi->hpd_clks[i]); if (ret) { - dev_err(dev->dev, "failed to enable hpd clk: %s (%d)\n", + dev_err(dev, "failed to enable hpd clk: %s (%d)\n", config->hpd_clk_names[i], ret); goto fail; } } hdmi_set_mode(hdmi, false); - phy->funcs->reset(phy); + hdmi_phy_reset(hdmi); hdmi_set_mode(hdmi, true); hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b); @@ -185,6 +251,7 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector) HDMI_HPD_INT_CTRL_INT_EN); /* set timeout to 4.1ms (max) for hardware debounce */ + spin_lock_irqsave(&hdmi->reg_lock, flags); hpd_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_CTRL); hpd_ctrl |= HDMI_HPD_CTRL_TIMEOUT(0x1fff); @@ -193,6 +260,7 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector) ~HDMI_HPD_CTRL_ENABLE & hpd_ctrl); hdmi_write(hdmi, REG_HDMI_HPD_CTRL, HDMI_HPD_CTRL_ENABLE | hpd_ctrl); + spin_unlock_irqrestore(&hdmi->reg_lock, flags); return 0; @@ -204,7 +272,7 @@ static void hdp_disable(struct hdmi_connector *hdmi_connector) { struct hdmi *hdmi = hdmi_connector->hdmi; const struct hdmi_platform_config *config = hdmi->config; - struct drm_device *dev = hdmi_connector->base.dev; + struct device *dev = &hdmi->pdev->dev; int i, ret = 0; /* Disable HPD interrupt */ @@ -217,12 +285,16 @@ static void hdp_disable(struct hdmi_connector *hdmi_connector) ret = gpio_config(hdmi, false); if (ret) - dev_warn(dev->dev, "failed to unconfigure GPIOs: %d\n", ret); + dev_warn(dev, "failed to unconfigure GPIOs: %d\n", ret); + + ret = pinctrl_pm_select_sleep_state(dev); + if (ret) + dev_warn(dev, "pinctrl state chg failed: %d\n", ret); for (i = 0; i < config->hpd_reg_cnt; i++) { ret = regulator_disable(hdmi->hpd_regs[i]); if (ret) - dev_warn(dev->dev, "failed to disable hpd regulator: %s (%d)\n", + dev_warn(dev, "failed to disable hpd regulator: %s (%d)\n", config->hpd_reg_names[i], ret); } } @@ -239,7 +311,6 @@ hotplug_work(struct work_struct *work) void hdmi_connector_irq(struct drm_connector *connector) { struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); - struct msm_drm_private *priv = connector->dev->dev_private; struct hdmi *hdmi = hdmi_connector->hdmi; uint32_t hpd_int_status, hpd_int_ctrl; @@ -263,7 +334,7 @@ void hdmi_connector_irq(struct drm_connector *connector) hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT; hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl); - queue_work(priv->wq, &hdmi_connector->hpd_work); + queue_work(hdmi->workq, &hdmi_connector->hpd_work); } } @@ -339,6 +410,7 @@ static int hdmi_connector_get_modes(struct drm_connector *connector) hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl); + hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid); drm_mode_connector_update_edid_property(connector, edid); if (edid) { @@ -433,7 +505,7 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi) ret = hpd_enable(hdmi_connector); if (ret) { - dev_err(hdmi->dev->dev, "failed to enable HPD: %d\n", ret); + dev_err(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret); goto fail; } diff --git a/kernel/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c b/kernel/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c new file mode 100644 index 000000000..1dc9c34eb --- /dev/null +++ b/kernel/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c @@ -0,0 +1,1437 @@ +/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "hdmi.h" +#include <linux/qcom_scm.h> + +#define HDCP_REG_ENABLE 0x01 +#define HDCP_REG_DISABLE 0x00 +#define HDCP_PORT_ADDR 0x74 + +#define HDCP_INT_STATUS_MASK ( \ + HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_INT | \ + HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT | \ + HDMI_HDCP_INT_CTRL_AUTH_XFER_REQ_INT | \ + HDMI_HDCP_INT_CTRL_AUTH_XFER_DONE_INT) + +#define AUTH_WORK_RETRIES_TIME 100 +#define AUTH_RETRIES_TIME 30 + +/* QFPROM Registers for HDMI/HDCP */ +#define QFPROM_RAW_FEAT_CONFIG_ROW0_LSB 0x000000F8 +#define QFPROM_RAW_FEAT_CONFIG_ROW0_MSB 0x000000FC +#define HDCP_KSV_LSB 0x000060D8 +#define HDCP_KSV_MSB 0x000060DC + +enum DS_TYPE { /* type of downstream device */ + DS_UNKNOWN, + DS_RECEIVER, + DS_REPEATER, +}; + +enum hdmi_hdcp_state { + HDCP_STATE_NO_AKSV, + HDCP_STATE_INACTIVE, + HDCP_STATE_AUTHENTICATING, + HDCP_STATE_AUTHENTICATED, + HDCP_STATE_AUTH_FAILED +}; + +struct hdmi_hdcp_reg_data { + u32 reg_id; + u32 off; + char *name; + u32 reg_val; +}; + +struct hdmi_hdcp_ctrl { + struct hdmi *hdmi; + u32 auth_retries; + bool tz_hdcp; + enum hdmi_hdcp_state hdcp_state; + struct work_struct hdcp_auth_work; + struct work_struct hdcp_reauth_work; + +#define AUTH_ABORT_EV 1 +#define AUTH_RESULT_RDY_EV 2 + unsigned long auth_event; + wait_queue_head_t auth_event_queue; + + u32 ksv_fifo_w_index; + /* + * store aksv from qfprom + */ + u32 aksv_lsb; + u32 aksv_msb; + bool aksv_valid; + u32 ds_type; + u32 bksv_lsb; + u32 bksv_msb; + u8 dev_count; + u8 depth; + u8 ksv_list[5 * 127]; + bool max_cascade_exceeded; + bool max_dev_exceeded; +}; + +static int hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset, + u8 *data, u16 data_len) +{ + int rc; + int retry = 5; + struct i2c_msg msgs[] = { + { + .addr = addr >> 1, + .flags = 0, + .len = 1, + .buf = &offset, + }, { + .addr = addr >> 1, + .flags = I2C_M_RD, + .len = data_len, + .buf = data, + } + }; + + DBG("Start DDC read"); +retry: + rc = i2c_transfer(hdmi->i2c, msgs, 2); + + retry--; + if (rc == 2) + rc = 0; + else if (retry > 0) + goto retry; + else + rc = -EIO; + + DBG("End DDC read %d", rc); + + return rc; +} + +#define HDCP_DDC_WRITE_MAX_BYTE_NUM 32 + +static int hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset, + u8 *data, u16 data_len) +{ + int rc; + int retry = 10; + u8 buf[HDCP_DDC_WRITE_MAX_BYTE_NUM]; + struct i2c_msg msgs[] = { + { + .addr = addr >> 1, + .flags = 0, + .len = 1, + } + }; + + DBG("Start DDC write"); + if (data_len > (HDCP_DDC_WRITE_MAX_BYTE_NUM - 1)) { + pr_err("%s: write size too big\n", __func__); + return -ERANGE; + } + + buf[0] = offset; + memcpy(&buf[1], data, data_len); + msgs[0].buf = buf; + msgs[0].len = data_len + 1; +retry: + rc = i2c_transfer(hdmi->i2c, msgs, 1); + + retry--; + if (rc == 1) + rc = 0; + else if (retry > 0) + goto retry; + else + rc = -EIO; + + DBG("End DDC write %d", rc); + + return rc; +} + +static int hdmi_hdcp_scm_wr(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 *preg, + u32 *pdata, u32 count) +{ + struct hdmi *hdmi = hdcp_ctrl->hdmi; + struct qcom_scm_hdcp_req scm_buf[QCOM_SCM_HDCP_MAX_REQ_CNT]; + u32 resp, phy_addr, idx = 0; + int i, ret = 0; + + WARN_ON(!pdata || !preg || (count == 0)); + + if (hdcp_ctrl->tz_hdcp) { + phy_addr = (u32)hdmi->mmio_phy_addr; + + while (count) { + memset(scm_buf, 0, sizeof(scm_buf)); + for (i = 0; i < count && i < QCOM_SCM_HDCP_MAX_REQ_CNT; + i++) { + scm_buf[i].addr = phy_addr + preg[idx]; + scm_buf[i].val = pdata[idx]; + idx++; + } + ret = qcom_scm_hdcp_req(scm_buf, i, &resp); + + if (ret || resp) { + pr_err("%s: error: scm_call ret=%d resp=%u\n", + __func__, ret, resp); + ret = -EINVAL; + break; + } + + count -= i; + } + } else { + for (i = 0; i < count; i++) + hdmi_write(hdmi, preg[i], pdata[i]); + } + + return ret; +} + +void hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + struct hdmi *hdmi = hdcp_ctrl->hdmi; + u32 reg_val, hdcp_int_status; + unsigned long flags; + + spin_lock_irqsave(&hdmi->reg_lock, flags); + reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_INT_CTRL); + hdcp_int_status = reg_val & HDCP_INT_STATUS_MASK; + if (!hdcp_int_status) { + spin_unlock_irqrestore(&hdmi->reg_lock, flags); + return; + } + /* Clear Interrupts */ + reg_val |= hdcp_int_status << 1; + /* Clear AUTH_FAIL_INFO as well */ + if (hdcp_int_status & HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT) + reg_val |= HDMI_HDCP_INT_CTRL_AUTH_FAIL_INFO_ACK; + hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, reg_val); + spin_unlock_irqrestore(&hdmi->reg_lock, flags); + + DBG("hdcp irq %x", hdcp_int_status); + + if (hdcp_int_status & HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_INT) { + pr_info("%s:AUTH_SUCCESS_INT received\n", __func__); + if (HDCP_STATE_AUTHENTICATING == hdcp_ctrl->hdcp_state) { + set_bit(AUTH_RESULT_RDY_EV, &hdcp_ctrl->auth_event); + wake_up_all(&hdcp_ctrl->auth_event_queue); + } + } + + if (hdcp_int_status & HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT) { + reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS); + pr_info("%s: AUTH_FAIL_INT rcvd, LINK0_STATUS=0x%08x\n", + __func__, reg_val); + if (HDCP_STATE_AUTHENTICATED == hdcp_ctrl->hdcp_state) + queue_work(hdmi->workq, &hdcp_ctrl->hdcp_reauth_work); + else if (HDCP_STATE_AUTHENTICATING == + hdcp_ctrl->hdcp_state) { + set_bit(AUTH_RESULT_RDY_EV, &hdcp_ctrl->auth_event); + wake_up_all(&hdcp_ctrl->auth_event_queue); + } + } +} + +static int hdmi_hdcp_msleep(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 ms, u32 ev) +{ + int rc; + + rc = wait_event_timeout(hdcp_ctrl->auth_event_queue, + !!test_bit(ev, &hdcp_ctrl->auth_event), + msecs_to_jiffies(ms)); + if (rc) { + pr_info("%s: msleep is canceled by event %d\n", + __func__, ev); + clear_bit(ev, &hdcp_ctrl->auth_event); + return -ECANCELED; + } + + return 0; +} + +static int hdmi_hdcp_read_validate_aksv(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + struct hdmi *hdmi = hdcp_ctrl->hdmi; + + /* Fetch aksv from QFPROM, this info should be public. */ + hdcp_ctrl->aksv_lsb = hdmi_qfprom_read(hdmi, HDCP_KSV_LSB); + hdcp_ctrl->aksv_msb = hdmi_qfprom_read(hdmi, HDCP_KSV_MSB); + + /* check there are 20 ones in AKSV */ + if ((hweight32(hdcp_ctrl->aksv_lsb) + hweight32(hdcp_ctrl->aksv_msb)) + != 20) { + pr_err("%s: AKSV QFPROM doesn't have 20 1's, 20 0's\n", + __func__); + pr_err("%s: QFPROM AKSV chk failed (AKSV=%02x%08x)\n", + __func__, hdcp_ctrl->aksv_msb, + hdcp_ctrl->aksv_lsb); + return -EINVAL; + } + DBG("AKSV=%02x%08x", hdcp_ctrl->aksv_msb, hdcp_ctrl->aksv_lsb); + + return 0; +} + +static int reset_hdcp_ddc_failures(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + struct hdmi *hdmi = hdcp_ctrl->hdmi; + u32 reg_val, failure, nack0; + int rc = 0; + + /* Check for any DDC transfer failures */ + reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_STATUS); + failure = reg_val & HDMI_HDCP_DDC_STATUS_FAILED; + nack0 = reg_val & HDMI_HDCP_DDC_STATUS_NACK0; + DBG("HDCP_DDC_STATUS=0x%x, FAIL=%d, NACK0=%d", + reg_val, failure, nack0); + + if (failure) { + /* + * Indicates that the last HDCP HW DDC transfer failed. + * This occurs when a transfer is attempted with HDCP DDC + * disabled (HDCP_DDC_DISABLE=1) or the number of retries + * matches HDCP_DDC_RETRY_CNT. + * Failure occurred, let's clear it. + */ + DBG("DDC failure detected"); + + /* First, Disable DDC */ + hdmi_write(hdmi, REG_HDMI_HDCP_DDC_CTRL_0, + HDMI_HDCP_DDC_CTRL_0_DISABLE); + + /* ACK the Failure to Clear it */ + reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_CTRL_1); + reg_val |= HDMI_HDCP_DDC_CTRL_1_FAILED_ACK; + hdmi_write(hdmi, REG_HDMI_HDCP_DDC_CTRL_1, reg_val); + + /* Check if the FAILURE got Cleared */ + reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_STATUS); + if (reg_val & HDMI_HDCP_DDC_STATUS_FAILED) + pr_info("%s: Unable to clear HDCP DDC Failure\n", + __func__); + + /* Re-Enable HDCP DDC */ + hdmi_write(hdmi, REG_HDMI_HDCP_DDC_CTRL_0, 0); + } + + if (nack0) { + DBG("Before: HDMI_DDC_SW_STATUS=0x%08x", + hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS)); + /* Reset HDMI DDC software status */ + reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL); + reg_val |= HDMI_DDC_CTRL_SW_STATUS_RESET; + hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val); + + rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); + + reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL); + reg_val &= ~HDMI_DDC_CTRL_SW_STATUS_RESET; + hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val); + + /* Reset HDMI DDC Controller */ + reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL); + reg_val |= HDMI_DDC_CTRL_SOFT_RESET; + hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val); + + /* If previous msleep is aborted, skip this msleep */ + if (!rc) + rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); + + reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL); + reg_val &= ~HDMI_DDC_CTRL_SOFT_RESET; + hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val); + DBG("After: HDMI_DDC_SW_STATUS=0x%08x", + hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS)); + } + + return rc; +} + +static int hdmi_hdcp_hw_ddc_clean(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + int rc; + u32 hdcp_ddc_status, ddc_hw_status; + u32 xfer_done, xfer_req, hw_done; + bool hw_not_ready; + u32 timeout_count; + struct hdmi *hdmi = hdcp_ctrl->hdmi; + + if (hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS) == 0) + return 0; + + /* Wait to be clean on DDC HW engine */ + timeout_count = 100; + do { + hdcp_ddc_status = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_STATUS); + ddc_hw_status = hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS); + + xfer_done = hdcp_ddc_status & HDMI_HDCP_DDC_STATUS_XFER_DONE; + xfer_req = hdcp_ddc_status & HDMI_HDCP_DDC_STATUS_XFER_REQ; + hw_done = ddc_hw_status & HDMI_DDC_HW_STATUS_DONE; + hw_not_ready = !xfer_done || xfer_req || !hw_done; + + if (hw_not_ready) + break; + + timeout_count--; + if (!timeout_count) { + pr_warn("%s: hw_ddc_clean failed\n", __func__); + return -ETIMEDOUT; + } + + rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); + if (rc) + return rc; + } while (1); + + return 0; +} + +static void hdmi_hdcp_reauth_work(struct work_struct *work) +{ + struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work, + struct hdmi_hdcp_ctrl, hdcp_reauth_work); + struct hdmi *hdmi = hdcp_ctrl->hdmi; + unsigned long flags; + u32 reg_val; + + DBG("HDCP REAUTH WORK"); + /* + * Disable HPD circuitry. + * This is needed to reset the HDCP cipher engine so that when we + * attempt a re-authentication, HW would clear the AN0_READY and + * AN1_READY bits in HDMI_HDCP_LINK0_STATUS register + */ + spin_lock_irqsave(&hdmi->reg_lock, flags); + reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL); + reg_val &= ~HDMI_HPD_CTRL_ENABLE; + hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val); + + /* Disable HDCP interrupts */ + hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, 0); + spin_unlock_irqrestore(&hdmi->reg_lock, flags); + + hdmi_write(hdmi, REG_HDMI_HDCP_RESET, + HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE); + + /* Wait to be clean on DDC HW engine */ + if (hdmi_hdcp_hw_ddc_clean(hdcp_ctrl)) { + pr_info("%s: reauth work aborted\n", __func__); + return; + } + + /* Disable encryption and disable the HDCP block */ + hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, 0); + + /* Enable HPD circuitry */ + spin_lock_irqsave(&hdmi->reg_lock, flags); + reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL); + reg_val |= HDMI_HPD_CTRL_ENABLE; + hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val); + spin_unlock_irqrestore(&hdmi->reg_lock, flags); + + /* + * Only retry defined times then abort current authenticating process + */ + if (++hdcp_ctrl->auth_retries == AUTH_RETRIES_TIME) { + hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE; + hdcp_ctrl->auth_retries = 0; + pr_info("%s: abort reauthentication!\n", __func__); + + return; + } + + DBG("Queue AUTH WORK"); + hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATING; + queue_work(hdmi->workq, &hdcp_ctrl->hdcp_auth_work); +} + +static int hdmi_hdcp_auth_prepare(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + struct hdmi *hdmi = hdcp_ctrl->hdmi; + u32 link0_status; + u32 reg_val; + unsigned long flags; + int rc; + + if (!hdcp_ctrl->aksv_valid) { + rc = hdmi_hdcp_read_validate_aksv(hdcp_ctrl); + if (rc) { + pr_err("%s: ASKV validation failed\n", __func__); + hdcp_ctrl->hdcp_state = HDCP_STATE_NO_AKSV; + return -ENOTSUPP; + } + hdcp_ctrl->aksv_valid = true; + } + + spin_lock_irqsave(&hdmi->reg_lock, flags); + /* disable HDMI Encrypt */ + reg_val = hdmi_read(hdmi, REG_HDMI_CTRL); + reg_val &= ~HDMI_CTRL_ENCRYPTED; + hdmi_write(hdmi, REG_HDMI_CTRL, reg_val); + + /* Enabling Software DDC */ + reg_val = hdmi_read(hdmi, REG_HDMI_DDC_ARBITRATION); + reg_val &= ~HDMI_DDC_ARBITRATION_HW_ARBITRATION; + hdmi_write(hdmi, REG_HDMI_DDC_ARBITRATION, reg_val); + spin_unlock_irqrestore(&hdmi->reg_lock, flags); + + /* + * Write AKSV read from QFPROM to the HDCP registers. + * This step is needed for HDCP authentication and must be + * written before enabling HDCP. + */ + hdmi_write(hdmi, REG_HDMI_HDCP_SW_LOWER_AKSV, hdcp_ctrl->aksv_lsb); + hdmi_write(hdmi, REG_HDMI_HDCP_SW_UPPER_AKSV, hdcp_ctrl->aksv_msb); + + /* + * HDCP setup prior to enabling HDCP_CTRL. + * Setup seed values for random number An. + */ + hdmi_write(hdmi, REG_HDMI_HDCP_ENTROPY_CTRL0, 0xB1FFB0FF); + hdmi_write(hdmi, REG_HDMI_HDCP_ENTROPY_CTRL1, 0xF00DFACE); + + /* Disable the RngCipher state */ + reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DEBUG_CTRL); + reg_val &= ~HDMI_HDCP_DEBUG_CTRL_RNG_CIPHER; + hdmi_write(hdmi, REG_HDMI_HDCP_DEBUG_CTRL, reg_val); + DBG("HDCP_DEBUG_CTRL=0x%08x", + hdmi_read(hdmi, REG_HDMI_HDCP_DEBUG_CTRL)); + + /* + * Ensure that all register writes are completed before + * enabling HDCP cipher + */ + wmb(); + + /* + * Enable HDCP + * This needs to be done as early as possible in order for the + * hardware to make An available to read + */ + hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, HDMI_HDCP_CTRL_ENABLE); + + /* + * If we had stale values for the An ready bit, it should most + * likely be cleared now after enabling HDCP cipher + */ + link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS); + DBG("After enabling HDCP Link0_Status=0x%08x", link0_status); + if (!(link0_status & + (HDMI_HDCP_LINK0_STATUS_AN_0_READY | + HDMI_HDCP_LINK0_STATUS_AN_1_READY))) + DBG("An not ready after enabling HDCP"); + + /* Clear any DDC failures from previous tries before enable HDCP*/ + rc = reset_hdcp_ddc_failures(hdcp_ctrl); + + return rc; +} + +static void hdmi_hdcp_auth_fail(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + struct hdmi *hdmi = hdcp_ctrl->hdmi; + u32 reg_val; + unsigned long flags; + + DBG("hdcp auth failed, queue reauth work"); + /* clear HDMI Encrypt */ + spin_lock_irqsave(&hdmi->reg_lock, flags); + reg_val = hdmi_read(hdmi, REG_HDMI_CTRL); + reg_val &= ~HDMI_CTRL_ENCRYPTED; + hdmi_write(hdmi, REG_HDMI_CTRL, reg_val); + spin_unlock_irqrestore(&hdmi->reg_lock, flags); + + hdcp_ctrl->hdcp_state = HDCP_STATE_AUTH_FAILED; + queue_work(hdmi->workq, &hdcp_ctrl->hdcp_reauth_work); +} + +static void hdmi_hdcp_auth_done(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + struct hdmi *hdmi = hdcp_ctrl->hdmi; + u32 reg_val; + unsigned long flags; + + /* + * Disable software DDC before going into part3 to make sure + * there is no Arbitration between software and hardware for DDC + */ + spin_lock_irqsave(&hdmi->reg_lock, flags); + reg_val = hdmi_read(hdmi, REG_HDMI_DDC_ARBITRATION); + reg_val |= HDMI_DDC_ARBITRATION_HW_ARBITRATION; + hdmi_write(hdmi, REG_HDMI_DDC_ARBITRATION, reg_val); + spin_unlock_irqrestore(&hdmi->reg_lock, flags); + + /* enable HDMI Encrypt */ + spin_lock_irqsave(&hdmi->reg_lock, flags); + reg_val = hdmi_read(hdmi, REG_HDMI_CTRL); + reg_val |= HDMI_CTRL_ENCRYPTED; + hdmi_write(hdmi, REG_HDMI_CTRL, reg_val); + spin_unlock_irqrestore(&hdmi->reg_lock, flags); + + hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATED; + hdcp_ctrl->auth_retries = 0; +} + +/* + * hdcp authenticating part 1 + * Wait Key/An ready + * Read BCAPS from sink + * Write BCAPS and AKSV into HDCP engine + * Write An and AKSV to sink + * Read BKSV from sink and write into HDCP engine + */ +static int hdmi_hdcp_wait_key_an_ready(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + int rc; + struct hdmi *hdmi = hdcp_ctrl->hdmi; + u32 link0_status, keys_state; + u32 timeout_count; + bool an_ready; + + /* Wait for HDCP keys to be checked and validated */ + timeout_count = 100; + do { + link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS); + keys_state = (link0_status >> 28) & 0x7; + if (keys_state == HDCP_KEYS_STATE_VALID) + break; + + DBG("Keys not ready(%d). s=%d, l0=%0x08x", + timeout_count, keys_state, link0_status); + + timeout_count--; + if (!timeout_count) { + pr_err("%s: Wait key state timedout", __func__); + return -ETIMEDOUT; + } + + rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); + if (rc) + return rc; + } while (1); + + timeout_count = 100; + do { + link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS); + an_ready = (link0_status & HDMI_HDCP_LINK0_STATUS_AN_0_READY) + && (link0_status & HDMI_HDCP_LINK0_STATUS_AN_1_READY); + if (an_ready) + break; + + DBG("An not ready(%d). l0_status=0x%08x", + timeout_count, link0_status); + + timeout_count--; + if (!timeout_count) { + pr_err("%s: Wait An timedout", __func__); + return -ETIMEDOUT; + } + + rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); + if (rc) + return rc; + } while (1); + + return 0; +} + +static int hdmi_hdcp_send_aksv_an(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + int rc = 0; + struct hdmi *hdmi = hdcp_ctrl->hdmi; + u32 link0_aksv_0, link0_aksv_1; + u32 link0_an[2]; + u8 aksv[5]; + + /* Read An0 and An1 */ + link0_an[0] = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA5); + link0_an[1] = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA6); + + /* Read AKSV */ + link0_aksv_0 = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA3); + link0_aksv_1 = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA4); + + DBG("Link ASKV=%08x%08x", link0_aksv_0, link0_aksv_1); + /* Copy An and AKSV to byte arrays for transmission */ + aksv[0] = link0_aksv_0 & 0xFF; + aksv[1] = (link0_aksv_0 >> 8) & 0xFF; + aksv[2] = (link0_aksv_0 >> 16) & 0xFF; + aksv[3] = (link0_aksv_0 >> 24) & 0xFF; + aksv[4] = link0_aksv_1 & 0xFF; + + /* Write An to offset 0x18 */ + rc = hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x18, (u8 *)link0_an, + (u16)sizeof(link0_an)); + if (rc) { + pr_err("%s:An write failed\n", __func__); + return rc; + } + DBG("Link0-An=%08x%08x", link0_an[0], link0_an[1]); + + /* Write AKSV to offset 0x10 */ + rc = hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x10, aksv, 5); + if (rc) { + pr_err("%s:AKSV write failed\n", __func__); + return rc; + } + DBG("Link0-AKSV=%02x%08x", link0_aksv_1 & 0xFF, link0_aksv_0); + + return 0; +} + +static int hdmi_hdcp_recv_bksv(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + int rc = 0; + struct hdmi *hdmi = hdcp_ctrl->hdmi; + u8 bksv[5]; + u32 reg[2], data[2]; + + /* Read BKSV at offset 0x00 */ + rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x00, bksv, 5); + if (rc) { + pr_err("%s:BKSV read failed\n", __func__); + return rc; + } + + hdcp_ctrl->bksv_lsb = bksv[0] | (bksv[1] << 8) | + (bksv[2] << 16) | (bksv[3] << 24); + hdcp_ctrl->bksv_msb = bksv[4]; + DBG(":BKSV=%02x%08x", hdcp_ctrl->bksv_msb, hdcp_ctrl->bksv_lsb); + + /* check there are 20 ones in BKSV */ + if ((hweight32(hdcp_ctrl->bksv_lsb) + hweight32(hdcp_ctrl->bksv_msb)) + != 20) { + pr_err(": BKSV doesn't have 20 1's and 20 0's\n"); + pr_err(": BKSV chk fail. BKSV=%02x%02x%02x%02x%02x\n", + bksv[4], bksv[3], bksv[2], bksv[1], bksv[0]); + return -EINVAL; + } + + /* Write BKSV read from sink to HDCP registers */ + reg[0] = REG_HDMI_HDCP_RCVPORT_DATA0; + data[0] = hdcp_ctrl->bksv_lsb; + reg[1] = REG_HDMI_HDCP_RCVPORT_DATA1; + data[1] = hdcp_ctrl->bksv_msb; + rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2); + + return rc; +} + +static int hdmi_hdcp_recv_bcaps(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + int rc = 0; + struct hdmi *hdmi = hdcp_ctrl->hdmi; + u32 reg, data; + u8 bcaps; + + rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1); + if (rc) { + pr_err("%s:BCAPS read failed\n", __func__); + return rc; + } + DBG("BCAPS=%02x", bcaps); + + /* receiver (0), repeater (1) */ + hdcp_ctrl->ds_type = (bcaps & BIT(6)) ? DS_REPEATER : DS_RECEIVER; + + /* Write BCAPS to the hardware */ + reg = REG_HDMI_HDCP_RCVPORT_DATA12; + data = (u32)bcaps; + rc = hdmi_hdcp_scm_wr(hdcp_ctrl, ®, &data, 1); + + return rc; +} + +static int hdmi_hdcp_auth_part1_key_exchange(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + struct hdmi *hdmi = hdcp_ctrl->hdmi; + unsigned long flags; + int rc; + + /* Wait for AKSV key and An ready */ + rc = hdmi_hdcp_wait_key_an_ready(hdcp_ctrl); + if (rc) { + pr_err("%s: wait key and an ready failed\n", __func__); + return rc; + }; + + /* Read BCAPS and send to HDCP engine */ + rc = hdmi_hdcp_recv_bcaps(hdcp_ctrl); + if (rc) { + pr_err("%s: read bcaps error, abort\n", __func__); + return rc; + } + + /* + * 1.1_Features turned off by default. + * No need to write AInfo since 1.1_Features is disabled. + */ + hdmi_write(hdmi, REG_HDMI_HDCP_RCVPORT_DATA4, 0); + + /* Send AKSV and An to sink */ + rc = hdmi_hdcp_send_aksv_an(hdcp_ctrl); + if (rc) { + pr_err("%s:An/Aksv write failed\n", __func__); + return rc; + } + + /* Read BKSV and send to HDCP engine*/ + rc = hdmi_hdcp_recv_bksv(hdcp_ctrl); + if (rc) { + pr_err("%s:BKSV Process failed\n", __func__); + return rc; + } + + /* Enable HDCP interrupts and ack/clear any stale interrupts */ + spin_lock_irqsave(&hdmi->reg_lock, flags); + hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, + HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_ACK | + HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_MASK | + HDMI_HDCP_INT_CTRL_AUTH_FAIL_ACK | + HDMI_HDCP_INT_CTRL_AUTH_FAIL_MASK | + HDMI_HDCP_INT_CTRL_AUTH_FAIL_INFO_ACK); + spin_unlock_irqrestore(&hdmi->reg_lock, flags); + + return 0; +} + +/* read R0' from sink and pass it to HDCP engine */ +static int hdmi_hdcp_auth_part1_recv_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + struct hdmi *hdmi = hdcp_ctrl->hdmi; + int rc = 0; + u8 buf[2]; + + /* + * HDCP Compliance Test case 1A-01: + * Wait here at least 100ms before reading R0' + */ + rc = hdmi_hdcp_msleep(hdcp_ctrl, 125, AUTH_ABORT_EV); + if (rc) + return rc; + + /* Read R0' at offset 0x08 */ + rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x08, buf, 2); + if (rc) { + pr_err("%s:R0' read failed\n", __func__); + return rc; + } + DBG("R0'=%02x%02x", buf[1], buf[0]); + + /* Write R0' to HDCP registers and check to see if it is a match */ + hdmi_write(hdmi, REG_HDMI_HDCP_RCVPORT_DATA2_0, + (((u32)buf[1]) << 8) | buf[0]); + + return 0; +} + +/* Wait for authenticating result: R0/R0' are matched or not */ +static int hdmi_hdcp_auth_part1_verify_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + struct hdmi *hdmi = hdcp_ctrl->hdmi; + u32 link0_status; + int rc; + + /* wait for hdcp irq, 10 sec should be long enough */ + rc = hdmi_hdcp_msleep(hdcp_ctrl, 10000, AUTH_RESULT_RDY_EV); + if (!rc) { + pr_err("%s: Wait Auth IRQ timeout\n", __func__); + return -ETIMEDOUT; + } + + link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS); + if (!(link0_status & HDMI_HDCP_LINK0_STATUS_RI_MATCHES)) { + pr_err("%s: Authentication Part I failed\n", __func__); + return -EINVAL; + } + + /* Enable HDCP Encryption */ + hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, + HDMI_HDCP_CTRL_ENABLE | + HDMI_HDCP_CTRL_ENCRYPTION_ENABLE); + + return 0; +} + +static int hdmi_hdcp_recv_check_bstatus(struct hdmi_hdcp_ctrl *hdcp_ctrl, + u16 *pbstatus) +{ + int rc; + struct hdmi *hdmi = hdcp_ctrl->hdmi; + bool max_devs_exceeded = false, max_cascade_exceeded = false; + u32 repeater_cascade_depth = 0, down_stream_devices = 0; + u16 bstatus; + u8 buf[2]; + + /* Read BSTATUS at offset 0x41 */ + rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x41, buf, 2); + if (rc) { + pr_err("%s: BSTATUS read failed\n", __func__); + goto error; + } + *pbstatus = bstatus = (buf[1] << 8) | buf[0]; + + + down_stream_devices = bstatus & 0x7F; + repeater_cascade_depth = (bstatus >> 8) & 0x7; + max_devs_exceeded = (bstatus & BIT(7)) ? true : false; + max_cascade_exceeded = (bstatus & BIT(11)) ? true : false; + + if (down_stream_devices == 0) { + /* + * If no downstream devices are attached to the repeater + * then part II fails. + * todo: The other approach would be to continue PART II. + */ + pr_err("%s: No downstream devices\n", __func__); + rc = -EINVAL; + goto error; + } + + /* + * HDCP Compliance 1B-05: + * Check if no. of devices connected to repeater + * exceed max_devices_connected from bit 7 of Bstatus. + */ + if (max_devs_exceeded) { + pr_err("%s: no. of devs connected exceeds max allowed", + __func__); + rc = -EINVAL; + goto error; + } + + /* + * HDCP Compliance 1B-06: + * Check if no. of cascade connected to repeater + * exceed max_cascade_connected from bit 11 of Bstatus. + */ + if (max_cascade_exceeded) { + pr_err("%s: no. of cascade conn exceeds max allowed", + __func__); + rc = -EINVAL; + goto error; + } + +error: + hdcp_ctrl->dev_count = down_stream_devices; + hdcp_ctrl->max_cascade_exceeded = max_cascade_exceeded; + hdcp_ctrl->max_dev_exceeded = max_devs_exceeded; + hdcp_ctrl->depth = repeater_cascade_depth; + return rc; +} + +static int hdmi_hdcp_auth_part2_wait_ksv_fifo_ready( + struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + int rc; + struct hdmi *hdmi = hdcp_ctrl->hdmi; + u32 reg, data; + u32 timeout_count; + u16 bstatus; + u8 bcaps; + + /* + * Wait until READY bit is set in BCAPS, as per HDCP specifications + * maximum permitted time to check for READY bit is five seconds. + */ + timeout_count = 100; + do { + /* Read BCAPS at offset 0x40 */ + rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1); + if (rc) { + pr_err("%s: BCAPS read failed\n", __func__); + return rc; + } + + if (bcaps & BIT(5)) + break; + + timeout_count--; + if (!timeout_count) { + pr_err("%s: Wait KSV fifo ready timedout", __func__); + return -ETIMEDOUT; + } + + rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); + if (rc) + return rc; + } while (1); + + rc = hdmi_hdcp_recv_check_bstatus(hdcp_ctrl, &bstatus); + if (rc) { + pr_err("%s: bstatus error\n", __func__); + return rc; + } + + /* Write BSTATUS and BCAPS to HDCP registers */ + reg = REG_HDMI_HDCP_RCVPORT_DATA12; + data = bcaps | (bstatus << 8); + rc = hdmi_hdcp_scm_wr(hdcp_ctrl, ®, &data, 1); + if (rc) { + pr_err("%s: BSTATUS write failed\n", __func__); + return rc; + } + + return 0; +} + +/* + * hdcp authenticating part 2: 2nd + * read ksv fifo from sink + * transfer V' from sink to HDCP engine + * reset SHA engine + */ +static int hdmi_hdcp_transfer_v_h(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + struct hdmi *hdmi = hdcp_ctrl->hdmi; + int rc = 0; + struct hdmi_hdcp_reg_data reg_data[] = { + {REG_HDMI_HDCP_RCVPORT_DATA7, 0x20, "V' H0"}, + {REG_HDMI_HDCP_RCVPORT_DATA8, 0x24, "V' H1"}, + {REG_HDMI_HDCP_RCVPORT_DATA9, 0x28, "V' H2"}, + {REG_HDMI_HDCP_RCVPORT_DATA10, 0x2C, "V' H3"}, + {REG_HDMI_HDCP_RCVPORT_DATA11, 0x30, "V' H4"}, + }; + struct hdmi_hdcp_reg_data *rd; + u32 size = ARRAY_SIZE(reg_data); + u32 reg[ARRAY_SIZE(reg_data)]; + u32 data[ARRAY_SIZE(reg_data)]; + int i; + + for (i = 0; i < size; i++) { + rd = ®_data[i]; + rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, + rd->off, (u8 *)&data[i], (u16)sizeof(data[i])); + if (rc) { + pr_err("%s: Read %s failed\n", __func__, rd->name); + goto error; + } + + DBG("%s =%x", rd->name, data[i]); + reg[i] = reg_data[i].reg_id; + } + + rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, size); + +error: + return rc; +} + +static int hdmi_hdcp_recv_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + int rc; + struct hdmi *hdmi = hdcp_ctrl->hdmi; + u32 ksv_bytes; + + ksv_bytes = 5 * hdcp_ctrl->dev_count; + + rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x43, + hdcp_ctrl->ksv_list, ksv_bytes); + if (rc) + pr_err("%s: KSV FIFO read failed\n", __func__); + + return rc; +} + +static int hdmi_hdcp_reset_sha_engine(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + u32 reg[2], data[2]; + u32 rc = 0; + + reg[0] = REG_HDMI_HDCP_SHA_CTRL; + data[0] = HDCP_REG_ENABLE; + reg[1] = REG_HDMI_HDCP_SHA_CTRL; + data[1] = HDCP_REG_DISABLE; + + rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2); + + return rc; +} + +static int hdmi_hdcp_auth_part2_recv_ksv_fifo( + struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + int rc; + u32 timeout_count; + + /* + * Read KSV FIFO over DDC + * Key Selection vector FIFO Used to pull downstream KSVs + * from HDCP Repeaters. + * All bytes (DEVICE_COUNT * 5) must be read in a single, + * auto incrementing access. + * All bytes read as 0x00 for HDCP Receivers that are not + * HDCP Repeaters (REPEATER == 0). + */ + timeout_count = 100; + do { + rc = hdmi_hdcp_recv_ksv_fifo(hdcp_ctrl); + if (!rc) + break; + + timeout_count--; + if (!timeout_count) { + pr_err("%s: Recv ksv fifo timedout", __func__); + return -ETIMEDOUT; + } + + rc = hdmi_hdcp_msleep(hdcp_ctrl, 25, AUTH_ABORT_EV); + if (rc) + return rc; + } while (1); + + rc = hdmi_hdcp_transfer_v_h(hdcp_ctrl); + if (rc) { + pr_err("%s: transfer V failed\n", __func__); + return rc; + } + + /* reset SHA engine before write ksv fifo */ + rc = hdmi_hdcp_reset_sha_engine(hdcp_ctrl); + if (rc) { + pr_err("%s: fail to reset sha engine\n", __func__); + return rc; + } + + return 0; +} + +/* + * Write KSV FIFO to HDCP_SHA_DATA. + * This is done 1 byte at time starting with the LSB. + * Once 64 bytes have been written, we need to poll for + * HDCP_SHA_BLOCK_DONE before writing any further + * If the last byte is written, we need to poll for + * HDCP_SHA_COMP_DONE to wait until HW finish + */ +static int hdmi_hdcp_write_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + int i; + struct hdmi *hdmi = hdcp_ctrl->hdmi; + u32 ksv_bytes, last_byte = 0; + u8 *ksv_fifo = NULL; + u32 reg_val, data, reg; + u32 rc = 0; + + ksv_bytes = 5 * hdcp_ctrl->dev_count; + + /* Check if need to wait for HW completion */ + if (hdcp_ctrl->ksv_fifo_w_index) { + reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_SHA_STATUS); + DBG("HDCP_SHA_STATUS=%08x", reg_val); + if (hdcp_ctrl->ksv_fifo_w_index == ksv_bytes) { + /* check COMP_DONE if last write */ + if (reg_val & HDMI_HDCP_SHA_STATUS_COMP_DONE) { + DBG("COMP_DONE"); + return 0; + } else { + return -EAGAIN; + } + } else { + /* check BLOCK_DONE if not last write */ + if (!(reg_val & HDMI_HDCP_SHA_STATUS_BLOCK_DONE)) + return -EAGAIN; + + DBG("BLOCK_DONE"); + } + } + + ksv_bytes -= hdcp_ctrl->ksv_fifo_w_index; + if (ksv_bytes <= 64) + last_byte = 1; + else + ksv_bytes = 64; + + ksv_fifo = hdcp_ctrl->ksv_list; + ksv_fifo += hdcp_ctrl->ksv_fifo_w_index; + + for (i = 0; i < ksv_bytes; i++) { + /* Write KSV byte and set DONE bit[0] for last byte*/ + reg_val = ksv_fifo[i] << 16; + if ((i == (ksv_bytes - 1)) && last_byte) + reg_val |= HDMI_HDCP_SHA_DATA_DONE; + + reg = REG_HDMI_HDCP_SHA_DATA; + data = reg_val; + rc = hdmi_hdcp_scm_wr(hdcp_ctrl, ®, &data, 1); + + if (rc) + return rc; + } + + hdcp_ctrl->ksv_fifo_w_index += ksv_bytes; + + /* + *return -EAGAIN to notify caller to wait for COMP_DONE or BLOCK_DONE + */ + return -EAGAIN; +} + +/* write ksv fifo into HDCP engine */ +static int hdmi_hdcp_auth_part2_write_ksv_fifo( + struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + int rc; + u32 timeout_count; + + hdcp_ctrl->ksv_fifo_w_index = 0; + timeout_count = 100; + do { + rc = hdmi_hdcp_write_ksv_fifo(hdcp_ctrl); + if (!rc) + break; + + if (rc != -EAGAIN) + return rc; + + timeout_count--; + if (!timeout_count) { + pr_err("%s: Write KSV fifo timedout", __func__); + return -ETIMEDOUT; + } + + rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); + if (rc) + return rc; + } while (1); + + return 0; +} + +static int hdmi_hdcp_auth_part2_check_v_match(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + int rc = 0; + struct hdmi *hdmi = hdcp_ctrl->hdmi; + u32 link0_status; + u32 timeout_count = 100; + + do { + link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS); + if (link0_status & HDMI_HDCP_LINK0_STATUS_V_MATCHES) + break; + + timeout_count--; + if (!timeout_count) { + pr_err("%s: HDCP V Match timedout", __func__); + return -ETIMEDOUT; + } + + rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); + if (rc) + return rc; + } while (1); + + return 0; +} + +static void hdmi_hdcp_auth_work(struct work_struct *work) +{ + struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work, + struct hdmi_hdcp_ctrl, hdcp_auth_work); + int rc; + + rc = hdmi_hdcp_auth_prepare(hdcp_ctrl); + if (rc) { + pr_err("%s: auth prepare failed %d\n", __func__, rc); + goto end; + } + + /* HDCP PartI */ + rc = hdmi_hdcp_auth_part1_key_exchange(hdcp_ctrl); + if (rc) { + pr_err("%s: key exchange failed %d\n", __func__, rc); + goto end; + } + + rc = hdmi_hdcp_auth_part1_recv_r0(hdcp_ctrl); + if (rc) { + pr_err("%s: receive r0 failed %d\n", __func__, rc); + goto end; + } + + rc = hdmi_hdcp_auth_part1_verify_r0(hdcp_ctrl); + if (rc) { + pr_err("%s: verify r0 failed %d\n", __func__, rc); + goto end; + } + pr_info("%s: Authentication Part I successful\n", __func__); + if (hdcp_ctrl->ds_type == DS_RECEIVER) + goto end; + + /* HDCP PartII */ + rc = hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(hdcp_ctrl); + if (rc) { + pr_err("%s: wait ksv fifo ready failed %d\n", __func__, rc); + goto end; + } + + rc = hdmi_hdcp_auth_part2_recv_ksv_fifo(hdcp_ctrl); + if (rc) { + pr_err("%s: recv ksv fifo failed %d\n", __func__, rc); + goto end; + } + + rc = hdmi_hdcp_auth_part2_write_ksv_fifo(hdcp_ctrl); + if (rc) { + pr_err("%s: write ksv fifo failed %d\n", __func__, rc); + goto end; + } + + rc = hdmi_hdcp_auth_part2_check_v_match(hdcp_ctrl); + if (rc) + pr_err("%s: check v match failed %d\n", __func__, rc); + +end: + if (rc == -ECANCELED) { + pr_info("%s: hdcp authentication canceled\n", __func__); + } else if (rc == -ENOTSUPP) { + pr_info("%s: hdcp is not supported\n", __func__); + } else if (rc) { + pr_err("%s: hdcp authentication failed\n", __func__); + hdmi_hdcp_auth_fail(hdcp_ctrl); + } else { + hdmi_hdcp_auth_done(hdcp_ctrl); + } +} + +void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + struct hdmi *hdmi = hdcp_ctrl->hdmi; + u32 reg_val; + unsigned long flags; + + if ((HDCP_STATE_INACTIVE != hdcp_ctrl->hdcp_state) || + (HDCP_STATE_NO_AKSV == hdcp_ctrl->hdcp_state)) { + DBG("still active or activating or no askv. returning"); + return; + } + + /* clear HDMI Encrypt */ + spin_lock_irqsave(&hdmi->reg_lock, flags); + reg_val = hdmi_read(hdmi, REG_HDMI_CTRL); + reg_val &= ~HDMI_CTRL_ENCRYPTED; + hdmi_write(hdmi, REG_HDMI_CTRL, reg_val); + spin_unlock_irqrestore(&hdmi->reg_lock, flags); + + hdcp_ctrl->auth_event = 0; + hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATING; + hdcp_ctrl->auth_retries = 0; + queue_work(hdmi->workq, &hdcp_ctrl->hdcp_auth_work); +} + +void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + struct hdmi *hdmi = hdcp_ctrl->hdmi; + unsigned long flags; + u32 reg_val; + + if ((HDCP_STATE_INACTIVE == hdcp_ctrl->hdcp_state) || + (HDCP_STATE_NO_AKSV == hdcp_ctrl->hdcp_state)) { + DBG("hdcp inactive or no aksv. returning"); + return; + } + + /* + * Disable HPD circuitry. + * This is needed to reset the HDCP cipher engine so that when we + * attempt a re-authentication, HW would clear the AN0_READY and + * AN1_READY bits in HDMI_HDCP_LINK0_STATUS register + */ + spin_lock_irqsave(&hdmi->reg_lock, flags); + reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL); + reg_val &= ~HDMI_HPD_CTRL_ENABLE; + hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val); + + /* + * Disable HDCP interrupts. + * Also, need to set the state to inactive here so that any ongoing + * reauth works will know that the HDCP session has been turned off. + */ + hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, 0); + spin_unlock_irqrestore(&hdmi->reg_lock, flags); + + /* + * Cancel any pending auth/reauth attempts. + * If one is ongoing, this will wait for it to finish. + * No more reauthentication attempts will be scheduled since we + * set the current state to inactive. + */ + set_bit(AUTH_ABORT_EV, &hdcp_ctrl->auth_event); + wake_up_all(&hdcp_ctrl->auth_event_queue); + cancel_work_sync(&hdcp_ctrl->hdcp_auth_work); + cancel_work_sync(&hdcp_ctrl->hdcp_reauth_work); + + hdmi_write(hdmi, REG_HDMI_HDCP_RESET, + HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE); + + /* Disable encryption and disable the HDCP block */ + hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, 0); + + spin_lock_irqsave(&hdmi->reg_lock, flags); + reg_val = hdmi_read(hdmi, REG_HDMI_CTRL); + reg_val &= ~HDMI_CTRL_ENCRYPTED; + hdmi_write(hdmi, REG_HDMI_CTRL, reg_val); + + /* Enable HPD circuitry */ + reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL); + reg_val |= HDMI_HPD_CTRL_ENABLE; + hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val); + spin_unlock_irqrestore(&hdmi->reg_lock, flags); + + hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE; + + DBG("HDCP: Off"); +} + +struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi) +{ + struct hdmi_hdcp_ctrl *hdcp_ctrl = NULL; + + if (!hdmi->qfprom_mmio) { + pr_err("%s: HDCP is not supported without qfprom\n", + __func__); + return ERR_PTR(-EINVAL); + } + + hdcp_ctrl = kzalloc(sizeof(*hdcp_ctrl), GFP_KERNEL); + if (!hdcp_ctrl) + return ERR_PTR(-ENOMEM); + + INIT_WORK(&hdcp_ctrl->hdcp_auth_work, hdmi_hdcp_auth_work); + INIT_WORK(&hdcp_ctrl->hdcp_reauth_work, hdmi_hdcp_reauth_work); + init_waitqueue_head(&hdcp_ctrl->auth_event_queue); + hdcp_ctrl->hdmi = hdmi; + hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE; + hdcp_ctrl->aksv_valid = false; + + if (qcom_scm_hdcp_available()) + hdcp_ctrl->tz_hdcp = true; + else + hdcp_ctrl->tz_hdcp = false; + + return hdcp_ctrl; +} + +void hdmi_hdcp_destroy(struct hdmi *hdmi) +{ + if (hdmi && hdmi->hdcp_ctrl) { + kfree(hdmi->hdcp_ctrl); + hdmi->hdcp_ctrl = NULL; + } +} diff --git a/kernel/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/kernel/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c index 6997ec636..3a01cb505 100644 --- a/kernel/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c +++ b/kernel/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c @@ -426,57 +426,6 @@ static void hdmi_phy_8960_destroy(struct hdmi_phy *phy) kfree(phy_8960); } -static void hdmi_phy_8960_reset(struct hdmi_phy *phy) -{ - struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy); - struct hdmi *hdmi = phy_8960->hdmi; - unsigned int val; - - val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL); - - if (val & HDMI_PHY_CTRL_SW_RESET_LOW) { - /* pull low */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val & ~HDMI_PHY_CTRL_SW_RESET); - } else { - /* pull high */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val | HDMI_PHY_CTRL_SW_RESET); - } - - if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) { - /* pull low */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val & ~HDMI_PHY_CTRL_SW_RESET_PLL); - } else { - /* pull high */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val | HDMI_PHY_CTRL_SW_RESET_PLL); - } - - msleep(100); - - if (val & HDMI_PHY_CTRL_SW_RESET_LOW) { - /* pull high */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val | HDMI_PHY_CTRL_SW_RESET); - } else { - /* pull low */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val & ~HDMI_PHY_CTRL_SW_RESET); - } - - if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) { - /* pull high */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val | HDMI_PHY_CTRL_SW_RESET_PLL); - } else { - /* pull low */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val & ~HDMI_PHY_CTRL_SW_RESET_PLL); - } -} - static void hdmi_phy_8960_powerup(struct hdmi_phy *phy, unsigned long int pixclock) { @@ -511,7 +460,6 @@ static void hdmi_phy_8960_powerdown(struct hdmi_phy *phy) static const struct hdmi_phy_funcs hdmi_phy_8960_funcs = { .destroy = hdmi_phy_8960_destroy, - .reset = hdmi_phy_8960_reset, .powerup = hdmi_phy_8960_powerup, .powerdown = hdmi_phy_8960_powerdown, }; diff --git a/kernel/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c b/kernel/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c index 391433c1a..cb01421ae 100644 --- a/kernel/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c +++ b/kernel/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c @@ -29,37 +29,6 @@ static void hdmi_phy_8x60_destroy(struct hdmi_phy *phy) kfree(phy_8x60); } -static void hdmi_phy_8x60_reset(struct hdmi_phy *phy) -{ - struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy); - struct hdmi *hdmi = phy_8x60->hdmi; - unsigned int val; - - val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL); - - if (val & HDMI_PHY_CTRL_SW_RESET_LOW) { - /* pull low */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val & ~HDMI_PHY_CTRL_SW_RESET); - } else { - /* pull high */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val | HDMI_PHY_CTRL_SW_RESET); - } - - msleep(100); - - if (val & HDMI_PHY_CTRL_SW_RESET_LOW) { - /* pull high */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val | HDMI_PHY_CTRL_SW_RESET); - } else { - /* pull low */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val & ~HDMI_PHY_CTRL_SW_RESET); - } -} - static void hdmi_phy_8x60_powerup(struct hdmi_phy *phy, unsigned long int pixclock) { @@ -182,7 +151,6 @@ static void hdmi_phy_8x60_powerdown(struct hdmi_phy *phy) static const struct hdmi_phy_funcs hdmi_phy_8x60_funcs = { .destroy = hdmi_phy_8x60_destroy, - .reset = hdmi_phy_8x60_reset, .powerup = hdmi_phy_8x60_powerup, .powerdown = hdmi_phy_8x60_powerdown, }; diff --git a/kernel/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c b/kernel/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c index 59fa6cdac..56ab8917e 100644 --- a/kernel/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c +++ b/kernel/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c @@ -19,7 +19,6 @@ struct hdmi_phy_8x74 { struct hdmi_phy base; - struct hdmi *hdmi; void __iomem *mmio; }; #define to_hdmi_phy_8x74(x) container_of(x, struct hdmi_phy_8x74, base) @@ -41,59 +40,6 @@ static void hdmi_phy_8x74_destroy(struct hdmi_phy *phy) kfree(phy_8x74); } -static void hdmi_phy_8x74_reset(struct hdmi_phy *phy) -{ - struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy); - struct hdmi *hdmi = phy_8x74->hdmi; - unsigned int val; - - /* NOTE that HDMI_PHY_CTL is in core mmio, not phy mmio: */ - - val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL); - - if (val & HDMI_PHY_CTRL_SW_RESET_LOW) { - /* pull low */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val & ~HDMI_PHY_CTRL_SW_RESET); - } else { - /* pull high */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val | HDMI_PHY_CTRL_SW_RESET); - } - - if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) { - /* pull low */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val & ~HDMI_PHY_CTRL_SW_RESET_PLL); - } else { - /* pull high */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val | HDMI_PHY_CTRL_SW_RESET_PLL); - } - - msleep(100); - - if (val & HDMI_PHY_CTRL_SW_RESET_LOW) { - /* pull high */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val | HDMI_PHY_CTRL_SW_RESET); - } else { - /* pull low */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val & ~HDMI_PHY_CTRL_SW_RESET); - } - - if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) { - /* pull high */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val | HDMI_PHY_CTRL_SW_RESET_PLL); - } else { - /* pull low */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val & ~HDMI_PHY_CTRL_SW_RESET_PLL); - } -} - static void hdmi_phy_8x74_powerup(struct hdmi_phy *phy, unsigned long int pixclock) { @@ -117,7 +63,6 @@ static void hdmi_phy_8x74_powerdown(struct hdmi_phy *phy) static const struct hdmi_phy_funcs hdmi_phy_8x74_funcs = { .destroy = hdmi_phy_8x74_destroy, - .reset = hdmi_phy_8x74_reset, .powerup = hdmi_phy_8x74_powerup, .powerdown = hdmi_phy_8x74_powerdown, }; @@ -138,8 +83,6 @@ struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi) phy->funcs = &hdmi_phy_8x74_funcs; - phy_8x74->hdmi = hdmi; - /* for 8x74, the phy mmio is mapped separately: */ phy_8x74->mmio = msm_ioremap(hdmi->pdev, "phy_physical", "HDMI_8x74"); diff --git a/kernel/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/kernel/drivers/gpu/drm/msm/hdmi/qfprom.xml.h index 43bb54a9a..dbd9cc4da 100644 --- a/kernel/drivers/gpu/drm/msm/hdmi/qfprom.xml.h +++ b/kernel/drivers/gpu/drm/msm/hdmi/qfprom.xml.h @@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) -- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) - -Copyright (C) 2013 by the following authors: +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) + +Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) Permission is hereby granted, free of charge, to any person obtaining diff --git a/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h index 1d39174d9..d5d94575f 100644 --- a/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h +++ b/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h @@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) -- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) - -Copyright (C) 2013-2014 by the following authors: +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) + +Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) Permission is hereby granted, free of charge, to any person obtaining @@ -680,18 +680,18 @@ static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P3(uint32_t val) return ((val) << MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P3__MASK; } -static inline uint32_t REG_MDP4_PIPE_FRAME_SIZE(enum mdp4_pipe i0) { return 0x00020048 + 0x10000*i0; } -#define MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK 0xffff0000 -#define MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT 16 -static inline uint32_t MDP4_PIPE_FRAME_SIZE_HEIGHT(uint32_t val) +static inline uint32_t REG_MDP4_PIPE_SSTILE_FRAME_SIZE(enum mdp4_pipe i0) { return 0x00020048 + 0x10000*i0; } +#define MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__MASK 0xffff0000 +#define MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__SHIFT 16 +static inline uint32_t MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT(uint32_t val) { - return ((val) << MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK; + return ((val) << MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__MASK; } -#define MDP4_PIPE_FRAME_SIZE_WIDTH__MASK 0x0000ffff -#define MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT 0 -static inline uint32_t MDP4_PIPE_FRAME_SIZE_WIDTH(uint32_t val) +#define MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__MASK 0x0000ffff +#define MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__SHIFT 0 +static inline uint32_t MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH(uint32_t val) { - return ((val) << MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT) & MDP4_PIPE_FRAME_SIZE_WIDTH__MASK; + return ((val) << MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__MASK; } static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mdp4_pipe i0) { return 0x00020050 + 0x10000*i0; } diff --git a/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index 73afa2182..6ac9aa165 100644 --- a/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c @@ -51,6 +51,11 @@ struct mdp4_crtc { /* if there is a pending flip, these will be non-null: */ struct drm_pending_vblank_event *event; + /* Bits have been flushed at the last commit, + * used to decide if a vsync has happened since last commit. + */ + u32 flushed_mask; + #define PENDING_CURSOR 0x1 #define PENDING_FLIP 0x2 atomic_t pending; @@ -93,6 +98,8 @@ static void crtc_flush(struct drm_crtc *crtc) DBG("%s: flush=%08x", mdp4_crtc->name, flush); + mdp4_crtc->flushed_mask = flush; + mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush); } @@ -327,13 +334,15 @@ static int mdp4_crtc_atomic_check(struct drm_crtc *crtc, return 0; } -static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc) +static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) { struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); DBG("%s: begin", mdp4_crtc->name); } -static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc) +static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) { struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); struct drm_device *dev = crtc->dev; @@ -537,6 +546,29 @@ static void mdp4_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus) crtc_flush(crtc); } +static void mdp4_crtc_wait_for_flush_done(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + int ret; + + ret = drm_crtc_vblank_get(crtc); + if (ret) + return; + + ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue, + !(mdp4_read(mdp4_kms, REG_MDP4_OVERLAY_FLUSH) & + mdp4_crtc->flushed_mask), + msecs_to_jiffies(50)); + if (ret <= 0) + dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp4_crtc->id); + + mdp4_crtc->flushed_mask = 0; + + drm_crtc_vblank_put(crtc); +} + uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc) { struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); @@ -600,6 +632,15 @@ void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer) mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel); } +void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc) +{ + /* wait_for_flush_done is the only case for now. + * Later we will have command mode CRTC to wait for + * other event. + */ + mdp4_crtc_wait_for_flush_done(crtc); +} + static const char *dma_names[] = { "DMA_P", "DMA_S", "DMA_E", }; @@ -641,7 +682,5 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs); plane->crtc = crtc; - mdp4_plane_install_properties(plane, &crtc->base); - return crtc; } diff --git a/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c b/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c index 7896323b2..89614c6a6 100644 --- a/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c +++ b/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c @@ -38,7 +38,7 @@ static struct mdp4_kms *get_kms(struct drm_encoder *encoder) return to_mdp4_kms(to_mdp_kms(priv->kms)); } -#ifdef CONFIG_MSM_BUS_SCALING +#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING #include <mach/board.h> /* not ironically named at all.. no, really.. */ static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder) diff --git a/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c index 7369ee7f0..5ed38cf54 100644 --- a/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c +++ b/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c @@ -19,8 +19,11 @@ #include "msm_drv.h" #include "mdp4_kms.h" -void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask) +void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, + uint32_t old_irqmask) { + mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_CLEAR, + irqmask ^ (irqmask & old_irqmask)); mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_ENABLE, irqmask); } @@ -68,9 +71,10 @@ irqreturn_t mdp4_irq(struct msm_kms *kms) struct drm_device *dev = mdp4_kms->dev; struct msm_drm_private *priv = dev->dev_private; unsigned int id; - uint32_t status; + uint32_t status, enable; - status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS); + enable = mdp4_read(mdp4_kms, REG_MDP4_INTR_ENABLE); + status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS) & enable; mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status); VERB("status=%08x", status); @@ -86,13 +90,22 @@ irqreturn_t mdp4_irq(struct msm_kms *kms) int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) { + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + + mdp4_enable(mdp4_kms); mdp_update_vblank_mask(to_mdp_kms(kms), mdp4_crtc_vblank(crtc), true); + mdp4_disable(mdp4_kms); + return 0; } void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) { + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + + mdp4_enable(mdp4_kms); mdp_update_vblank_mask(to_mdp_kms(kms), mdp4_crtc_vblank(crtc), false); + mdp4_disable(mdp4_kms); } diff --git a/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index d847b9436..077f7521a 100644 --- a/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ b/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c @@ -119,6 +119,8 @@ static int mdp4_hw_init(struct msm_kms *kms) if (mdp4_kms->rev > 1) mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1); + dev->mode_config.allow_fb_modifiers = true; + out: pm_runtime_put_sync(dev->dev); @@ -157,6 +159,12 @@ static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s mdp4_disable(mdp4_kms); } +static void mdp4_wait_for_crtc_commit_done(struct msm_kms *kms, + struct drm_crtc *crtc) +{ + mdp4_crtc_wait_for_commit_done(crtc); +} + static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate, struct drm_encoder *encoder) { @@ -195,6 +203,7 @@ static const struct mdp_kms_funcs kms_funcs = { .disable_vblank = mdp4_disable_vblank, .prepare_commit = mdp4_prepare_commit, .complete_commit = mdp4_complete_commit, + .wait_for_crtc_commit_done = mdp4_wait_for_crtc_commit_done, .get_format = mdp_get_format, .round_pixclk = mdp4_round_pixclk, .preclose = mdp4_preclose, @@ -232,22 +241,37 @@ int mdp4_enable(struct mdp4_kms *mdp4_kms) } #ifdef CONFIG_OF -static struct drm_panel *detect_panel(struct drm_device *dev, const char *name) +static struct drm_panel *detect_panel(struct drm_device *dev) { - struct device_node *n; + struct device_node *endpoint, *panel_node; + struct device_node *np = dev->dev->of_node; struct drm_panel *panel = NULL; - n = of_parse_phandle(dev->dev->of_node, name, 0); - if (n) { - panel = of_drm_find_panel(n); - if (!panel) - panel = ERR_PTR(-EPROBE_DEFER); + endpoint = of_graph_get_next_endpoint(np, NULL); + if (!endpoint) { + dev_err(dev->dev, "no valid endpoint\n"); + return ERR_PTR(-ENODEV); + } + + panel_node = of_graph_get_remote_port_parent(endpoint); + if (!panel_node) { + dev_err(dev->dev, "no valid panel node\n"); + of_node_put(endpoint); + return ERR_PTR(-ENODEV); + } + + of_node_put(endpoint); + + panel = of_drm_find_panel(panel_node); + if (!panel) { + of_node_put(panel_node); + return ERR_PTR(-EPROBE_DEFER); } return panel; } #else -static struct drm_panel *detect_panel(struct drm_device *dev, const char *name) +static struct drm_panel *detect_panel(struct drm_device *dev) { // ??? maybe use a module param to specify which panel is attached? } @@ -285,7 +309,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms) * Setup the LCDC/LVDS path: RGB2 -> DMA_P -> LCDC -> LVDS: */ - panel = detect_panel(dev, "qcom,lvds-panel"); + panel = detect_panel(dev); if (IS_ERR(panel)) { ret = PTR_ERR(panel); dev_err(dev->dev, "failed to detect LVDS panel: %d\n", ret); @@ -518,6 +542,11 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) goto fail; } + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + dev->mode_config.max_width = 2048; + dev->mode_config.max_height = 2048; + return kms; fail: diff --git a/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h index 0a5c58bde..8a7f6e1e2 100644 --- a/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h +++ b/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h @@ -167,7 +167,8 @@ static inline uint32_t mixercfg(uint32_t mixer_cfg, int mixer, int mdp4_disable(struct mdp4_kms *mdp4_kms); int mdp4_enable(struct mdp4_kms *mdp4_kms); -void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask); +void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, + uint32_t old_irqmask); void mdp4_irq_preinstall(struct msm_kms *kms); int mdp4_irq_postinstall(struct msm_kms *kms); void mdp4_irq_uninstall(struct msm_kms *kms); @@ -175,29 +176,24 @@ irqreturn_t mdp4_irq(struct msm_kms *kms); int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); -static inline bool pipe_supports_yuv(enum mdp4_pipe pipe) +static inline uint32_t mdp4_pipe_caps(enum mdp4_pipe pipe) { switch (pipe) { case VG1: case VG2: case VG3: case VG4: - return true; + return MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC; + case RGB1: + case RGB2: + case RGB3: + return MDP_PIPE_CAP_SCALE; default: - return false; + return 0; } } -static inline -uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats, - uint32_t max_formats) -{ - return mdp_get_formats(pixel_formats, max_formats, - !pipe_supports_yuv(pipe_id)); -} - -void mdp4_plane_install_properties(struct drm_plane *plane, - struct drm_mode_object *obj); enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane); struct drm_plane *mdp4_plane_init(struct drm_device *dev, enum mdp4_pipe pipe_id, bool private_plane); @@ -206,6 +202,7 @@ uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc); void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file); void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config); void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer); +void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc); struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, struct drm_plane *plane, int id, int ovlp_id, enum mdp4_dma dma_id); @@ -229,7 +226,7 @@ static inline struct clk *mpd4_lvds_pll_init(struct drm_device *dev) } #endif -#ifdef CONFIG_MSM_BUS_SCALING +#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING static inline int match_dev_name(struct device *dev, void *data) { return !strcmp(dev_name(dev), data); diff --git a/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c b/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c index 60ec8222c..4cd6e721a 100644 --- a/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c +++ b/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c @@ -38,7 +38,7 @@ static struct mdp4_kms *get_kms(struct drm_encoder *encoder) return to_mdp4_kms(to_mdp_kms(priv->kms)); } -#ifdef CONFIG_MSM_BUS_SCALING +#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING #include <mach/board.h> static void bs_init(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder) { @@ -346,8 +346,10 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder) mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0); - if (panel) + if (panel) { drm_panel_disable(panel); + drm_panel_unprepare(panel); + } /* * Wait for a vsync so we know the ENABLE=0 latched before @@ -412,8 +414,10 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder) if (ret) dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret); - if (panel) + if (panel) { + drm_panel_prepare(panel); drm_panel_enable(panel); + } setup_phy(encoder); diff --git a/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c index dbc068988..30d57e74c 100644 --- a/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c +++ b/kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c @@ -26,6 +26,7 @@ struct mdp4_plane { enum mdp4_pipe pipe; + uint32_t caps; uint32_t nformats; uint32_t formats[32]; @@ -33,6 +34,21 @@ struct mdp4_plane { }; #define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base) +/* MDP format helper functions */ +static inline +enum mdp4_frame_format mdp4_get_frame_format(struct drm_framebuffer *fb) +{ + bool is_tile = false; + + if (fb->modifier[1] == DRM_FORMAT_MOD_SAMSUNG_64_32_TILE) + is_tile = true; + + if (fb->pixel_format == DRM_FORMAT_NV12 && is_tile) + return FRAME_TILE_YCBCR_420; + + return FRAME_LINEAR; +} + static void mdp4_plane_set_scanout(struct drm_plane *plane, struct drm_framebuffer *fb); static int mdp4_plane_mode_set(struct drm_plane *plane, @@ -59,7 +75,7 @@ static void mdp4_plane_destroy(struct drm_plane *plane) } /* helper to install properties which are common to planes and crtcs */ -void mdp4_plane_install_properties(struct drm_plane *plane, +static void mdp4_plane_install_properties(struct drm_plane *plane, struct drm_mode_object *obj) { // XXX @@ -83,22 +99,28 @@ static const struct drm_plane_funcs mdp4_plane_funcs = { }; static int mdp4_plane_prepare_fb(struct drm_plane *plane, - struct drm_framebuffer *fb, const struct drm_plane_state *new_state) { struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); struct mdp4_kms *mdp4_kms = get_kms(plane); + struct drm_framebuffer *fb = new_state->fb; + + if (!fb) + return 0; DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id); return msm_framebuffer_prepare(fb, mdp4_kms->id); } static void mdp4_plane_cleanup_fb(struct drm_plane *plane, - struct drm_framebuffer *fb, const struct drm_plane_state *old_state) { struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); struct mdp4_kms *mdp4_kms = get_kms(plane); + struct drm_framebuffer *fb = old_state->fb; + + if (!fb) + return; DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id); msm_framebuffer_cleanup(fb, mdp4_kms->id); @@ -205,12 +227,15 @@ static int mdp4_plane_mode_set(struct drm_plane *plane, uint32_t op_mode = 0; uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT; uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT; + enum mdp4_frame_format frame_type; if (!(crtc && fb)) { DBG("%s: disabled!", mdp4_plane->name); return 0; } + frame_type = mdp4_get_frame_format(fb); + /* src values are in Q16 fixed point, convert to integer: */ src_x = src_x >> 16; src_y = src_y >> 16; @@ -304,6 +329,7 @@ static int mdp4_plane_mode_set(struct drm_plane *plane, MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) | MDP4_PIPE_SRC_FORMAT_FETCH_PLANES(format->fetch_type) | MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample) | + MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT(frame_type) | COND(format->unpack_tight, MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT)); mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_UNPACK(pipe), @@ -324,6 +350,11 @@ static int mdp4_plane_mode_set(struct drm_plane *plane, mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step); mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step); + if (frame_type != FRAME_LINEAR) + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SSTILE_FRAME_SIZE(pipe), + MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH(src_w) | + MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT(src_h)); + return 0; } @@ -358,9 +389,11 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev, mdp4_plane->pipe = pipe_id; mdp4_plane->name = pipe_names[pipe_id]; + mdp4_plane->caps = mdp4_pipe_caps(pipe_id); - mdp4_plane->nformats = mdp4_get_formats(pipe_id, mdp4_plane->formats, - ARRAY_SIZE(mdp4_plane->formats)); + mdp4_plane->nformats = mdp_get_formats(mdp4_plane->formats, + ARRAY_SIZE(mdp4_plane->formats), + !pipe_supports_yuv(mdp4_plane->caps)); type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs, diff --git a/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h index b9a4ded6e..c37da9c61 100644 --- a/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h +++ b/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h @@ -8,9 +8,17 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp5.xml ( 29312 bytes, from 2015-03-23 21:18:48) -- /local/mnt2/workspace2/sviau/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2014-06-02 18:31:15) -- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2015-03-23 20:38:49) +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) @@ -120,6 +128,21 @@ enum mdp5_data_format { DATA_FORMAT_YUV = 1, }; +enum mdp5_block_size { + BLOCK_SIZE_64 = 0, + BLOCK_SIZE_128 = 1, +}; + +enum mdp5_rotate_mode { + ROTATE_0 = 0, + ROTATE_90 = 1, +}; + +enum mdp5_chroma_downsample_method { + DS_MTHD_NO_PIXEL_DROP = 0, + DS_MTHD_PIXEL_DROP = 1, +}; + #define MDP5_IRQ_WB_0_DONE 0x00000001 #define MDP5_IRQ_WB_1_DONE 0x00000002 #define MDP5_IRQ_WB_2_DONE 0x00000010 @@ -314,19 +337,19 @@ static inline uint32_t MDP5_MDP_IGC_LUT_REG_VAL(uint32_t val) #define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000 #define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000 -#define REG_MDP5_SPLIT_DPL_EN 0x000003f4 +static inline uint32_t REG_MDP5_MDP_SPLIT_DPL_EN(uint32_t i0) { return 0x000002f4 + __offset_MDP(i0); } -#define REG_MDP5_SPLIT_DPL_UPPER 0x000003f8 -#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL 0x00000002 -#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN 0x00000004 -#define MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX 0x00000010 -#define MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX 0x00000100 +static inline uint32_t REG_MDP5_MDP_SPLIT_DPL_UPPER(uint32_t i0) { return 0x000002f8 + __offset_MDP(i0); } +#define MDP5_MDP_SPLIT_DPL_UPPER_SMART_PANEL 0x00000002 +#define MDP5_MDP_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN 0x00000004 +#define MDP5_MDP_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX 0x00000010 +#define MDP5_MDP_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX 0x00000100 -#define REG_MDP5_SPLIT_DPL_LOWER 0x000004f0 -#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL 0x00000002 -#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN 0x00000004 -#define MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC 0x00000010 -#define MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC 0x00000100 +static inline uint32_t REG_MDP5_MDP_SPLIT_DPL_LOWER(uint32_t i0) { return 0x000003f0 + __offset_MDP(i0); } +#define MDP5_MDP_SPLIT_DPL_LOWER_SMART_PANEL 0x00000002 +#define MDP5_MDP_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN 0x00000004 +#define MDP5_MDP_SPLIT_DPL_LOWER_INTF1_TG_SYNC 0x00000010 +#define MDP5_MDP_SPLIT_DPL_LOWER_INTF2_TG_SYNC 0x00000100 static inline uint32_t __offset_CTL(uint32_t idx) { @@ -358,49 +381,49 @@ static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x0 static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); } #define MDP5_CTL_LAYER_REG_VIG0__MASK 0x00000007 #define MDP5_CTL_LAYER_REG_VIG0__SHIFT 0 -static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(enum mdp_mixer_stage_id val) +static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(uint32_t val) { return ((val) << MDP5_CTL_LAYER_REG_VIG0__SHIFT) & MDP5_CTL_LAYER_REG_VIG0__MASK; } #define MDP5_CTL_LAYER_REG_VIG1__MASK 0x00000038 #define MDP5_CTL_LAYER_REG_VIG1__SHIFT 3 -static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(enum mdp_mixer_stage_id val) +static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(uint32_t val) { return ((val) << MDP5_CTL_LAYER_REG_VIG1__SHIFT) & MDP5_CTL_LAYER_REG_VIG1__MASK; } #define MDP5_CTL_LAYER_REG_VIG2__MASK 0x000001c0 #define MDP5_CTL_LAYER_REG_VIG2__SHIFT 6 -static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(enum mdp_mixer_stage_id val) +static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(uint32_t val) { return ((val) << MDP5_CTL_LAYER_REG_VIG2__SHIFT) & MDP5_CTL_LAYER_REG_VIG2__MASK; } #define MDP5_CTL_LAYER_REG_RGB0__MASK 0x00000e00 #define MDP5_CTL_LAYER_REG_RGB0__SHIFT 9 -static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(enum mdp_mixer_stage_id val) +static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(uint32_t val) { return ((val) << MDP5_CTL_LAYER_REG_RGB0__SHIFT) & MDP5_CTL_LAYER_REG_RGB0__MASK; } #define MDP5_CTL_LAYER_REG_RGB1__MASK 0x00007000 #define MDP5_CTL_LAYER_REG_RGB1__SHIFT 12 -static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(enum mdp_mixer_stage_id val) +static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(uint32_t val) { return ((val) << MDP5_CTL_LAYER_REG_RGB1__SHIFT) & MDP5_CTL_LAYER_REG_RGB1__MASK; } #define MDP5_CTL_LAYER_REG_RGB2__MASK 0x00038000 #define MDP5_CTL_LAYER_REG_RGB2__SHIFT 15 -static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(enum mdp_mixer_stage_id val) +static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(uint32_t val) { return ((val) << MDP5_CTL_LAYER_REG_RGB2__SHIFT) & MDP5_CTL_LAYER_REG_RGB2__MASK; } #define MDP5_CTL_LAYER_REG_DMA0__MASK 0x001c0000 #define MDP5_CTL_LAYER_REG_DMA0__SHIFT 18 -static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(enum mdp_mixer_stage_id val) +static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(uint32_t val) { return ((val) << MDP5_CTL_LAYER_REG_DMA0__SHIFT) & MDP5_CTL_LAYER_REG_DMA0__MASK; } #define MDP5_CTL_LAYER_REG_DMA1__MASK 0x00e00000 #define MDP5_CTL_LAYER_REG_DMA1__SHIFT 21 -static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(enum mdp_mixer_stage_id val) +static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(uint32_t val) { return ((val) << MDP5_CTL_LAYER_REG_DMA1__SHIFT) & MDP5_CTL_LAYER_REG_DMA1__MASK; } @@ -408,13 +431,13 @@ static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(enum mdp_mixer_stage_id val) #define MDP5_CTL_LAYER_REG_CURSOR_OUT 0x02000000 #define MDP5_CTL_LAYER_REG_VIG3__MASK 0x1c000000 #define MDP5_CTL_LAYER_REG_VIG3__SHIFT 26 -static inline uint32_t MDP5_CTL_LAYER_REG_VIG3(enum mdp_mixer_stage_id val) +static inline uint32_t MDP5_CTL_LAYER_REG_VIG3(uint32_t val) { return ((val) << MDP5_CTL_LAYER_REG_VIG3__SHIFT) & MDP5_CTL_LAYER_REG_VIG3__MASK; } #define MDP5_CTL_LAYER_REG_RGB3__MASK 0xe0000000 #define MDP5_CTL_LAYER_REG_RGB3__SHIFT 29 -static inline uint32_t MDP5_CTL_LAYER_REG_RGB3(enum mdp_mixer_stage_id val) +static inline uint32_t MDP5_CTL_LAYER_REG_RGB3(uint32_t val) { return ((val) << MDP5_CTL_LAYER_REG_RGB3__SHIFT) & MDP5_CTL_LAYER_REG_RGB3__MASK; } @@ -476,6 +499,44 @@ static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000001c + __o static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000020 + __offset_CTL(i0); } +static inline uint32_t __offset_LAYER_EXT(uint32_t idx) +{ + switch (idx) { + case 0: return 0x00000040; + case 1: return 0x00000044; + case 2: return 0x00000048; + case 3: return 0x0000004c; + case 4: return 0x00000050; + case 5: return 0x00000054; + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_CTL_LAYER_EXT(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER_EXT(i1); } + +static inline uint32_t REG_MDP5_CTL_LAYER_EXT_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER_EXT(i1); } +#define MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3 0x00000001 +#define MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3 0x00000004 +#define MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3 0x00000010 +#define MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3 0x00000040 +#define MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3 0x00000100 +#define MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3 0x00000400 +#define MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3 0x00001000 +#define MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3 0x00004000 +#define MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3 0x00010000 +#define MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3 0x00040000 +#define MDP5_CTL_LAYER_EXT_REG_CURSOR0__MASK 0x00f00000 +#define MDP5_CTL_LAYER_EXT_REG_CURSOR0__SHIFT 20 +static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR0(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP5_CTL_LAYER_EXT_REG_CURSOR0__SHIFT) & MDP5_CTL_LAYER_EXT_REG_CURSOR0__MASK; +} +#define MDP5_CTL_LAYER_EXT_REG_CURSOR1__MASK 0x3c000000 +#define MDP5_CTL_LAYER_EXT_REG_CURSOR1__SHIFT 26 +static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR1(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP5_CTL_LAYER_EXT_REG_CURSOR1__SHIFT) & MDP5_CTL_LAYER_EXT_REG_CURSOR1__MASK; +} + static inline uint32_t __offset_PIPE(enum mdp5_pipe idx) { switch (idx) { @@ -780,11 +841,11 @@ static inline uint32_t MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val) } #define MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000 #define MDP5_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000 -#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK 0x00180000 -#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT 19 -static inline uint32_t MDP5_PIPE_SRC_FORMAT_NUM_PLANES(enum mdp_sspp_fetch_type val) +#define MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__MASK 0x00180000 +#define MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__SHIFT 19 +static inline uint32_t MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(enum mdp_fetch_type val) { - return ((val) << MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT) & MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK; + return ((val) << MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__SHIFT) & MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__MASK; } #define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x01800000 #define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 23 @@ -834,6 +895,7 @@ static inline uint32_t MDP5_PIPE_SRC_OP_MODE_BWC(enum mdp5_pipe_bwc val) #define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_1 0x00040000 #define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE 0x00400000 #define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE_ODD 0x00800000 +#define MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE 0x80000000 static inline uint32_t REG_MDP5_PIPE_SRC_CONSTANT_COLOR(enum mdp5_pipe i0) { return 0x0000003c + __offset_PIPE(i0); } @@ -871,44 +933,121 @@ static inline uint32_t MDP5_PIPE_DECIMATION_HORZ(uint32_t val) return ((val) << MDP5_PIPE_DECIMATION_HORZ__SHIFT) & MDP5_PIPE_DECIMATION_HORZ__MASK; } +static inline uint32_t __offset_SW_PIX_EXT(enum mdp_component_type idx) +{ + switch (idx) { + case COMP_0: return 0x00000100; + case COMP_1_2: return 0x00000110; + case COMP_3: return 0x00000120; + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000000 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); } + +static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT_LR(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000000 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); } +#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__MASK 0x000000ff +#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__SHIFT 0 +static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT(uint32_t val) +{ + return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__MASK; +} +#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__MASK 0x0000ff00 +#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__SHIFT 8 +static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF(int32_t val) +{ + return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__MASK; +} +#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__MASK 0x00ff0000 +#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__SHIFT 16 +static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT(uint32_t val) +{ + return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__MASK; +} +#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__MASK 0xff000000 +#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__SHIFT 24 +static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF(int32_t val) +{ + return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT_TB(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000004 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); } +#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__MASK 0x000000ff +#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__SHIFT 0 +static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT(uint32_t val) +{ + return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__MASK; +} +#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__MASK 0x0000ff00 +#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__SHIFT 8 +static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF(int32_t val) +{ + return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__MASK; +} +#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__MASK 0x00ff0000 +#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__SHIFT 16 +static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT(uint32_t val) +{ + return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__MASK; +} +#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__MASK 0xff000000 +#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__SHIFT 24 +static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF(int32_t val) +{ + return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000008 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); } +#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__MASK 0x0000ffff +#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__SHIFT 0 +static inline uint32_t MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT(uint32_t val) +{ + return ((val) << MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__MASK; +} +#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__MASK 0xffff0000 +#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__SHIFT 16 +static inline uint32_t MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM(uint32_t val) +{ + return ((val) << MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__SHIFT) & MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__MASK; +} + static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00000204 + __offset_PIPE(i0); } #define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN 0x00000001 #define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN 0x00000002 -#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK 0x00000300 -#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT 8 -static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(enum mdp5_scale_filter val) +#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__MASK 0x00000300 +#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__SHIFT 8 +static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(enum mdp5_scale_filter val) { - return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK; + return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__MASK; } -#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK 0x00000c00 -#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT 10 -static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(enum mdp5_scale_filter val) +#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__MASK 0x00000c00 +#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__SHIFT 10 +static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(enum mdp5_scale_filter val) { - return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK; + return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__MASK; } -#define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK 0x00003000 -#define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT 12 -static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(enum mdp5_scale_filter val) +#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__MASK 0x00003000 +#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__SHIFT 12 +static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(enum mdp5_scale_filter val) { - return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK; + return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__MASK; } -#define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK 0x0000c000 -#define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT 14 -static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(enum mdp5_scale_filter val) +#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__MASK 0x0000c000 +#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__SHIFT 14 +static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(enum mdp5_scale_filter val) { - return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK; + return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__MASK; } -#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK 0x00030000 -#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT 16 -static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(enum mdp5_scale_filter val) +#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__MASK 0x00030000 +#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__SHIFT 16 +static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(enum mdp5_scale_filter val) { - return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK; + return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__MASK; } -#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK 0x000c0000 -#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT 18 -static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(enum mdp5_scale_filter val) +#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__MASK 0x000c0000 +#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__SHIFT 18 +static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(enum mdp5_scale_filter val) { - return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK; + return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__MASK; } static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000210 + __offset_PIPE(i0); } @@ -961,9 +1100,22 @@ static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x000000 static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00000010 + __offset_LM(i0); } -static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + 0x30*i1; } +static inline uint32_t __offset_BLEND(uint32_t idx) +{ + switch (idx) { + case 0: return 0x00000020; + case 1: return 0x00000050; + case 2: return 0x00000080; + case 3: return 0x000000b0; + case 4: return 0x00000230; + case 5: return 0x00000260; + case 6: return 0x00000290; + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_LM(i0) + __offset_BLEND(i1); } -static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_LM(i0) + __offset_BLEND(i1); } #define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK 0x00000003 #define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT 0 static inline uint32_t MDP5_LM_BLEND_OP_MODE_FG_ALPHA(enum mdp_alpha_type val) @@ -985,25 +1137,25 @@ static inline uint32_t MDP5_LM_BLEND_OP_MODE_BG_ALPHA(enum mdp_alpha_type val) #define MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA 0x00001000 #define MDP5_LM_BLEND_OP_MODE_BG_TRANSP_EN 0x00002000 -static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000024 + __offset_LM(i0) + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_LM(i0) + __offset_BLEND(i1); } -static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000028 + __offset_LM(i0) + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_LM(i0) + __offset_BLEND(i1); } -static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000002c + __offset_LM(i0) + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_LM(i0) + __offset_BLEND(i1); } -static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000030 + __offset_LM(i0) + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_LM(i0) + __offset_BLEND(i1); } -static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000034 + __offset_LM(i0) + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_LM(i0) + __offset_BLEND(i1); } -static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000038 + __offset_LM(i0) + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_LM(i0) + __offset_BLEND(i1); } -static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000003c + __offset_LM(i0) + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000001c + __offset_LM(i0) + __offset_BLEND(i1); } -static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000040 + __offset_LM(i0) + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + __offset_BLEND(i1); } -static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000044 + __offset_LM(i0) + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000024 + __offset_LM(i0) + __offset_BLEND(i1); } -static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000048 + __offset_LM(i0) + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000028 + __offset_LM(i0) + __offset_BLEND(i1); } static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000000e0 + __offset_LM(i0); } #define MDP5_LM_CURSOR_IMG_SIZE_SRC_W__MASK 0x0000ffff @@ -1234,6 +1386,358 @@ static inline uint32_t REG_MDP5_PP_FBC_BUDGET_CTL(uint32_t i0) { return 0x000000 static inline uint32_t REG_MDP5_PP_FBC_LOSSY_MODE(uint32_t i0) { return 0x0000003c + __offset_PP(i0); } +static inline uint32_t __offset_WB(uint32_t idx) +{ + switch (idx) { +#if 0 /* TEMPORARY until patch that adds wb.base[] is merged */ + case 0: return (mdp5_cfg->wb.base[0]); + case 1: return (mdp5_cfg->wb.base[1]); + case 2: return (mdp5_cfg->wb.base[2]); + case 3: return (mdp5_cfg->wb.base[3]); + case 4: return (mdp5_cfg->wb.base[4]); +#endif + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_WB(uint32_t i0) { return 0x00000000 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_DST_FORMAT(uint32_t i0) { return 0x00000000 + __offset_WB(i0); } +#define MDP5_WB_DST_FORMAT_DSTC0_OUT__MASK 0x00000003 +#define MDP5_WB_DST_FORMAT_DSTC0_OUT__SHIFT 0 +static inline uint32_t MDP5_WB_DST_FORMAT_DSTC0_OUT(uint32_t val) +{ + return ((val) << MDP5_WB_DST_FORMAT_DSTC0_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC0_OUT__MASK; +} +#define MDP5_WB_DST_FORMAT_DSTC1_OUT__MASK 0x0000000c +#define MDP5_WB_DST_FORMAT_DSTC1_OUT__SHIFT 2 +static inline uint32_t MDP5_WB_DST_FORMAT_DSTC1_OUT(uint32_t val) +{ + return ((val) << MDP5_WB_DST_FORMAT_DSTC1_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC1_OUT__MASK; +} +#define MDP5_WB_DST_FORMAT_DSTC2_OUT__MASK 0x00000030 +#define MDP5_WB_DST_FORMAT_DSTC2_OUT__SHIFT 4 +static inline uint32_t MDP5_WB_DST_FORMAT_DSTC2_OUT(uint32_t val) +{ + return ((val) << MDP5_WB_DST_FORMAT_DSTC2_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC2_OUT__MASK; +} +#define MDP5_WB_DST_FORMAT_DSTC3_OUT__MASK 0x000000c0 +#define MDP5_WB_DST_FORMAT_DSTC3_OUT__SHIFT 6 +static inline uint32_t MDP5_WB_DST_FORMAT_DSTC3_OUT(uint32_t val) +{ + return ((val) << MDP5_WB_DST_FORMAT_DSTC3_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC3_OUT__MASK; +} +#define MDP5_WB_DST_FORMAT_DSTC3_EN 0x00000100 +#define MDP5_WB_DST_FORMAT_DST_BPP__MASK 0x00000600 +#define MDP5_WB_DST_FORMAT_DST_BPP__SHIFT 9 +static inline uint32_t MDP5_WB_DST_FORMAT_DST_BPP(uint32_t val) +{ + return ((val) << MDP5_WB_DST_FORMAT_DST_BPP__SHIFT) & MDP5_WB_DST_FORMAT_DST_BPP__MASK; +} +#define MDP5_WB_DST_FORMAT_PACK_COUNT__MASK 0x00003000 +#define MDP5_WB_DST_FORMAT_PACK_COUNT__SHIFT 12 +static inline uint32_t MDP5_WB_DST_FORMAT_PACK_COUNT(uint32_t val) +{ + return ((val) << MDP5_WB_DST_FORMAT_PACK_COUNT__SHIFT) & MDP5_WB_DST_FORMAT_PACK_COUNT__MASK; +} +#define MDP5_WB_DST_FORMAT_DST_ALPHA_X 0x00004000 +#define MDP5_WB_DST_FORMAT_PACK_TIGHT 0x00020000 +#define MDP5_WB_DST_FORMAT_PACK_ALIGN_MSB 0x00040000 +#define MDP5_WB_DST_FORMAT_WRITE_PLANES__MASK 0x00180000 +#define MDP5_WB_DST_FORMAT_WRITE_PLANES__SHIFT 19 +static inline uint32_t MDP5_WB_DST_FORMAT_WRITE_PLANES(uint32_t val) +{ + return ((val) << MDP5_WB_DST_FORMAT_WRITE_PLANES__SHIFT) & MDP5_WB_DST_FORMAT_WRITE_PLANES__MASK; +} +#define MDP5_WB_DST_FORMAT_DST_DITHER_EN 0x00400000 +#define MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__MASK 0x03800000 +#define MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__SHIFT 23 +static inline uint32_t MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP(uint32_t val) +{ + return ((val) << MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__SHIFT) & MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__MASK; +} +#define MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__MASK 0x3c000000 +#define MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__SHIFT 26 +static inline uint32_t MDP5_WB_DST_FORMAT_DST_CHROMA_SITE(uint32_t val) +{ + return ((val) << MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__SHIFT) & MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__MASK; +} +#define MDP5_WB_DST_FORMAT_FRAME_FORMAT__MASK 0xc0000000 +#define MDP5_WB_DST_FORMAT_FRAME_FORMAT__SHIFT 30 +static inline uint32_t MDP5_WB_DST_FORMAT_FRAME_FORMAT(uint32_t val) +{ + return ((val) << MDP5_WB_DST_FORMAT_FRAME_FORMAT__SHIFT) & MDP5_WB_DST_FORMAT_FRAME_FORMAT__MASK; +} + +static inline uint32_t REG_MDP5_WB_DST_OP_MODE(uint32_t i0) { return 0x00000004 + __offset_WB(i0); } +#define MDP5_WB_DST_OP_MODE_BWC_ENC_EN 0x00000001 +#define MDP5_WB_DST_OP_MODE_BWC_ENC_OP__MASK 0x00000006 +#define MDP5_WB_DST_OP_MODE_BWC_ENC_OP__SHIFT 1 +static inline uint32_t MDP5_WB_DST_OP_MODE_BWC_ENC_OP(uint32_t val) +{ + return ((val) << MDP5_WB_DST_OP_MODE_BWC_ENC_OP__SHIFT) & MDP5_WB_DST_OP_MODE_BWC_ENC_OP__MASK; +} +#define MDP5_WB_DST_OP_MODE_BLOCK_SIZE__MASK 0x00000010 +#define MDP5_WB_DST_OP_MODE_BLOCK_SIZE__SHIFT 4 +static inline uint32_t MDP5_WB_DST_OP_MODE_BLOCK_SIZE(uint32_t val) +{ + return ((val) << MDP5_WB_DST_OP_MODE_BLOCK_SIZE__SHIFT) & MDP5_WB_DST_OP_MODE_BLOCK_SIZE__MASK; +} +#define MDP5_WB_DST_OP_MODE_ROT_MODE__MASK 0x00000020 +#define MDP5_WB_DST_OP_MODE_ROT_MODE__SHIFT 5 +static inline uint32_t MDP5_WB_DST_OP_MODE_ROT_MODE(uint32_t val) +{ + return ((val) << MDP5_WB_DST_OP_MODE_ROT_MODE__SHIFT) & MDP5_WB_DST_OP_MODE_ROT_MODE__MASK; +} +#define MDP5_WB_DST_OP_MODE_ROT_EN 0x00000040 +#define MDP5_WB_DST_OP_MODE_CSC_EN 0x00000100 +#define MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__MASK 0x00000200 +#define MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT 9 +static inline uint32_t MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT(uint32_t val) +{ + return ((val) << MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT) & MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__MASK; +} +#define MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__MASK 0x00000400 +#define MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT 10 +static inline uint32_t MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT(uint32_t val) +{ + return ((val) << MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT) & MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__MASK; +} +#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_EN 0x00000800 +#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__MASK 0x00001000 +#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__SHIFT 12 +static inline uint32_t MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT(uint32_t val) +{ + return ((val) << MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__SHIFT) & MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__MASK; +} +#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__MASK 0x00002000 +#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__SHIFT 13 +static inline uint32_t MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD(uint32_t val) +{ + return ((val) << MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__SHIFT) & MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__MASK; +} +#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__MASK 0x00004000 +#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__SHIFT 14 +static inline uint32_t MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD(uint32_t val) +{ + return ((val) << MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__SHIFT) & MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__MASK; +} + +static inline uint32_t REG_MDP5_WB_DST_PACK_PATTERN(uint32_t i0) { return 0x00000008 + __offset_WB(i0); } +#define MDP5_WB_DST_PACK_PATTERN_ELEMENT0__MASK 0x00000003 +#define MDP5_WB_DST_PACK_PATTERN_ELEMENT0__SHIFT 0 +static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT0(uint32_t val) +{ + return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT0__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT0__MASK; +} +#define MDP5_WB_DST_PACK_PATTERN_ELEMENT1__MASK 0x00000300 +#define MDP5_WB_DST_PACK_PATTERN_ELEMENT1__SHIFT 8 +static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT1(uint32_t val) +{ + return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT1__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT1__MASK; +} +#define MDP5_WB_DST_PACK_PATTERN_ELEMENT2__MASK 0x00030000 +#define MDP5_WB_DST_PACK_PATTERN_ELEMENT2__SHIFT 16 +static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT2(uint32_t val) +{ + return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT2__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT2__MASK; +} +#define MDP5_WB_DST_PACK_PATTERN_ELEMENT3__MASK 0x03000000 +#define MDP5_WB_DST_PACK_PATTERN_ELEMENT3__SHIFT 24 +static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT3(uint32_t val) +{ + return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT3__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT3__MASK; +} + +static inline uint32_t REG_MDP5_WB_DST0_ADDR(uint32_t i0) { return 0x0000000c + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_DST1_ADDR(uint32_t i0) { return 0x00000010 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_DST2_ADDR(uint32_t i0) { return 0x00000014 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_DST3_ADDR(uint32_t i0) { return 0x00000018 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_DST_YSTRIDE0(uint32_t i0) { return 0x0000001c + __offset_WB(i0); } +#define MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__MASK 0x0000ffff +#define MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__SHIFT 0 +static inline uint32_t MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE(uint32_t val) +{ + return ((val) << MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__MASK; +} +#define MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__MASK 0xffff0000 +#define MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__SHIFT 16 +static inline uint32_t MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE(uint32_t val) +{ + return ((val) << MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__MASK; +} + +static inline uint32_t REG_MDP5_WB_DST_YSTRIDE1(uint32_t i0) { return 0x00000020 + __offset_WB(i0); } +#define MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__MASK 0x0000ffff +#define MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__SHIFT 0 +static inline uint32_t MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE(uint32_t val) +{ + return ((val) << MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__MASK; +} +#define MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__MASK 0xffff0000 +#define MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__SHIFT 16 +static inline uint32_t MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE(uint32_t val) +{ + return ((val) << MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__MASK; +} + +static inline uint32_t REG_MDP5_WB_DST_DITHER_BITDEPTH(uint32_t i0) { return 0x00000024 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW0(uint32_t i0) { return 0x00000030 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW1(uint32_t i0) { return 0x00000034 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW2(uint32_t i0) { return 0x00000038 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW3(uint32_t i0) { return 0x0000003c + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_DST_WRITE_CONFIG(uint32_t i0) { return 0x00000048 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_ROTATION_DNSCALER(uint32_t i0) { return 0x00000050 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_X_0_3(uint32_t i0) { return 0x00000060 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_X_1_2(uint32_t i0) { return 0x00000064 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_Y_0_3(uint32_t i0) { return 0x00000068 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_Y_1_2(uint32_t i0) { return 0x0000006c + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_OUT_SIZE(uint32_t i0) { return 0x00000074 + __offset_WB(i0); } +#define MDP5_WB_OUT_SIZE_DST_W__MASK 0x0000ffff +#define MDP5_WB_OUT_SIZE_DST_W__SHIFT 0 +static inline uint32_t MDP5_WB_OUT_SIZE_DST_W(uint32_t val) +{ + return ((val) << MDP5_WB_OUT_SIZE_DST_W__SHIFT) & MDP5_WB_OUT_SIZE_DST_W__MASK; +} +#define MDP5_WB_OUT_SIZE_DST_H__MASK 0xffff0000 +#define MDP5_WB_OUT_SIZE_DST_H__SHIFT 16 +static inline uint32_t MDP5_WB_OUT_SIZE_DST_H(uint32_t val) +{ + return ((val) << MDP5_WB_OUT_SIZE_DST_H__SHIFT) & MDP5_WB_OUT_SIZE_DST_H__MASK; +} + +static inline uint32_t REG_MDP5_WB_ALPHA_X_VALUE(uint32_t i0) { return 0x00000078 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_0(uint32_t i0) { return 0x00000260 + __offset_WB(i0); } +#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__MASK 0x00001fff +#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__SHIFT 0 +static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__MASK; +} +#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__MASK 0x1fff0000 +#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__SHIFT 16 +static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__MASK; +} + +static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_1(uint32_t i0) { return 0x00000264 + __offset_WB(i0); } +#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__MASK 0x00001fff +#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__SHIFT 0 +static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__MASK; +} +#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__MASK 0x1fff0000 +#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__SHIFT 16 +static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__MASK; +} + +static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_2(uint32_t i0) { return 0x00000268 + __offset_WB(i0); } +#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__MASK 0x00001fff +#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__SHIFT 0 +static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__MASK; +} +#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__MASK 0x1fff0000 +#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__SHIFT 16 +static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__MASK; +} + +static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_3(uint32_t i0) { return 0x0000026c + __offset_WB(i0); } +#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__MASK 0x00001fff +#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__SHIFT 0 +static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__MASK; +} +#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__MASK 0x1fff0000 +#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__SHIFT 16 +static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__MASK; +} + +static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_4(uint32_t i0) { return 0x00000270 + __offset_WB(i0); } +#define MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__MASK 0x00001fff +#define MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__SHIFT 0 +static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__MASK; +} + +static inline uint32_t REG_MDP5_WB_CSC_COMP_PRECLAMP(uint32_t i0, uint32_t i1) { return 0x00000274 + __offset_WB(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_WB_CSC_COMP_PRECLAMP_REG(uint32_t i0, uint32_t i1) { return 0x00000274 + __offset_WB(i0) + 0x4*i1; } +#define MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__MASK 0x000000ff +#define MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__SHIFT 0 +static inline uint32_t MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__SHIFT) & MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__MASK; +} +#define MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__MASK 0x0000ff00 +#define MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__SHIFT 8 +static inline uint32_t MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__SHIFT) & MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__MASK; +} + +static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTCLAMP(uint32_t i0, uint32_t i1) { return 0x00000280 + __offset_WB(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTCLAMP_REG(uint32_t i0, uint32_t i1) { return 0x00000280 + __offset_WB(i0) + 0x4*i1; } +#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__MASK 0x000000ff +#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__SHIFT 0 +static inline uint32_t MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__SHIFT) & MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__MASK; +} +#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__MASK 0x0000ff00 +#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__SHIFT 8 +static inline uint32_t MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__SHIFT) & MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__MASK; +} + +static inline uint32_t REG_MDP5_WB_CSC_COMP_PREBIAS(uint32_t i0, uint32_t i1) { return 0x0000028c + __offset_WB(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_WB_CSC_COMP_PREBIAS_REG(uint32_t i0, uint32_t i1) { return 0x0000028c + __offset_WB(i0) + 0x4*i1; } +#define MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__MASK 0x000001ff +#define MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__SHIFT 0 +static inline uint32_t MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__SHIFT) & MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__MASK; +} + +static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTBIAS(uint32_t i0, uint32_t i1) { return 0x00000298 + __offset_WB(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTBIAS_REG(uint32_t i0, uint32_t i1) { return 0x00000298 + __offset_WB(i0) + 0x4*i1; } +#define MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__MASK 0x000001ff +#define MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__SHIFT 0 +static inline uint32_t MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__SHIFT) & MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__MASK; +} + static inline uint32_t __offset_INTF(uint32_t idx) { switch (idx) { diff --git a/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c index 8b9a7931b..bb1225aa2 100644 --- a/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c +++ b/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c @@ -22,11 +22,84 @@ struct mdp5_cfg_handler { /* mdp5_cfg must be exposed (used in mdp5.xml.h) */ const struct mdp5_cfg_hw *mdp5_cfg = NULL; -const struct mdp5_cfg_hw msm8x74_config = { +const struct mdp5_cfg_hw msm8x74v1_config = { + .name = "msm8x74v1", + .mdp = { + .count = 1, + .base = { 0x00100 }, + .caps = MDP_CAP_SMP | + 0, + }, + .smp = { + .mmb_count = 22, + .mmb_size = 4096, + .clients = { + [SSPP_VIG0] = 1, [SSPP_VIG1] = 4, [SSPP_VIG2] = 7, + [SSPP_DMA0] = 10, [SSPP_DMA1] = 13, + [SSPP_RGB0] = 16, [SSPP_RGB1] = 17, [SSPP_RGB2] = 18, + }, + }, + .ctl = { + .count = 5, + .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 }, + .flush_hw_mask = 0x0003ffff, + }, + .pipe_vig = { + .count = 3, + .base = { 0x01200, 0x01600, 0x01a00 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | + MDP_PIPE_CAP_CSC | + 0, + }, + .pipe_rgb = { + .count = 3, + .base = { 0x01e00, 0x02200, 0x02600 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | + 0, + }, + .pipe_dma = { + .count = 2, + .base = { 0x02a00, 0x02e00 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + 0, + }, + .lm = { + .count = 5, + .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 }, + .nb_stages = 5, + }, + .dspp = { + .count = 3, + .base = { 0x04600, 0x04a00, 0x04e00 }, + }, + .pp = { + .count = 3, + .base = { 0x21b00, 0x21c00, 0x21d00 }, + }, + .intf = { + .base = { 0x21100, 0x21300, 0x21500, 0x21700 }, + .connect = { + [0] = INTF_eDP, + [1] = INTF_DSI, + [2] = INTF_DSI, + [3] = INTF_HDMI, + }, + }, + .max_clk = 200000000, +}; + +const struct mdp5_cfg_hw msm8x74v2_config = { .name = "msm8x74", .mdp = { .count = 1, .base = { 0x00100 }, + .caps = MDP_CAP_SMP | + 0, }, .smp = { .mmb_count = 22, @@ -45,19 +118,27 @@ const struct mdp5_cfg_hw msm8x74_config = { .pipe_vig = { .count = 3, .base = { 0x01200, 0x01600, 0x01a00 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION, }, .pipe_rgb = { .count = 3, .base = { 0x01e00, 0x02200, 0x02600 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, }, .pipe_dma = { .count = 2, .base = { 0x02a00, 0x02e00 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, }, .lm = { .count = 5, .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 }, .nb_stages = 5, + .max_width = 2048, + .max_height = 0xFFFF, }, .dspp = { .count = 3, @@ -65,7 +146,7 @@ const struct mdp5_cfg_hw msm8x74_config = { }, .ad = { .count = 2, - .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */ + .base = { 0x13100, 0x13300 }, }, .pp = { .count = 3, @@ -88,6 +169,8 @@ const struct mdp5_cfg_hw apq8084_config = { .mdp = { .count = 1, .base = { 0x00100 }, + .caps = MDP_CAP_SMP | + 0, }, .smp = { .mmb_count = 44, @@ -113,19 +196,27 @@ const struct mdp5_cfg_hw apq8084_config = { .pipe_vig = { .count = 4, .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION, }, .pipe_rgb = { .count = 4, .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, }, .pipe_dma = { .count = 2, .base = { 0x03200, 0x03600 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, }, .lm = { .count = 6, .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 }, .nb_stages = 5, + .max_width = 2048, + .max_height = 0xFFFF, }, .dspp = { .count = 4, @@ -157,6 +248,8 @@ const struct mdp5_cfg_hw msm8x16_config = { .mdp = { .count = 1, .base = { 0x01000 }, + .caps = MDP_CAP_SMP | + 0, }, .smp = { .mmb_count = 8, @@ -174,19 +267,27 @@ const struct mdp5_cfg_hw msm8x16_config = { .pipe_vig = { .count = 1, .base = { 0x05000 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION, }, .pipe_rgb = { .count = 2, .base = { 0x15000, 0x17000 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, }, .pipe_dma = { .count = 1, .base = { 0x25000 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, }, .lm = { .count = 2, /* LM0 and LM3 */ .base = { 0x45000, 0x48000 }, .nb_stages = 5, + .max_width = 2048, + .max_height = 0xFFFF, }, .dspp = { .count = 1, @@ -203,14 +304,176 @@ const struct mdp5_cfg_hw msm8x16_config = { .max_clk = 320000000, }; +const struct mdp5_cfg_hw msm8x94_config = { + .name = "msm8x94", + .mdp = { + .count = 1, + .base = { 0x01000 }, + .caps = MDP_CAP_SMP | + 0, + }, + .smp = { + .mmb_count = 44, + .mmb_size = 8192, + .clients = { + [SSPP_VIG0] = 1, [SSPP_VIG1] = 4, + [SSPP_VIG2] = 7, [SSPP_VIG3] = 19, + [SSPP_DMA0] = 10, [SSPP_DMA1] = 13, + [SSPP_RGB0] = 16, [SSPP_RGB1] = 17, + [SSPP_RGB2] = 18, [SSPP_RGB3] = 22, + }, + .reserved_state[0] = GENMASK(23, 0), /* first 24 MMBs */ + .reserved = { + [1] = 1, [4] = 1, [7] = 1, [19] = 1, + [16] = 5, [17] = 5, [18] = 5, [22] = 5, + }, + }, + .ctl = { + .count = 5, + .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 }, + .flush_hw_mask = 0xf0ffffff, + }, + .pipe_vig = { + .count = 4, + .base = { 0x05000, 0x07000, 0x09000, 0x0b000 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION, + }, + .pipe_rgb = { + .count = 4, + .base = { 0x15000, 0x17000, 0x19000, 0x1b000 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, + }, + .pipe_dma = { + .count = 2, + .base = { 0x25000, 0x27000 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, + }, + .lm = { + .count = 6, + .base = { 0x45000, 0x46000, 0x47000, 0x48000, 0x49000, 0x4a000 }, + .nb_stages = 8, + .max_width = 2048, + .max_height = 0xFFFF, + }, + .dspp = { + .count = 4, + .base = { 0x55000, 0x57000, 0x59000, 0x5b000 }, + + }, + .ad = { + .count = 3, + .base = { 0x79000, 0x79800, 0x7a000 }, + }, + .pp = { + .count = 4, + .base = { 0x71000, 0x71800, 0x72000, 0x72800 }, + }, + .intf = { + .base = { 0x6b000, 0x6b800, 0x6c000, 0x6c800, 0x6d000 }, + .connect = { + [0] = INTF_DISABLED, + [1] = INTF_DSI, + [2] = INTF_DSI, + [3] = INTF_HDMI, + }, + }, + .max_clk = 400000000, +}; + +const struct mdp5_cfg_hw msm8x96_config = { + .name = "msm8x96", + .mdp = { + .count = 1, + .base = { 0x01000 }, + .caps = MDP_CAP_DSC | + MDP_CAP_CDM | + 0, + }, + .ctl = { + .count = 5, + .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 }, + .flush_hw_mask = 0xf4ffffff, + }, + .pipe_vig = { + .count = 4, + .base = { 0x05000, 0x07000, 0x09000, 0x0b000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | + MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_rgb = { + .count = 4, + .base = { 0x15000, 0x17000, 0x19000, 0x1b000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | + MDP_PIPE_CAP_DECIMATION | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_dma = { + .count = 2, + .base = { 0x25000, 0x27000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .lm = { + .count = 6, + .base = { 0x45000, 0x46000, 0x47000, 0x48000, 0x49000, 0x4a000 }, + .nb_stages = 8, + .max_width = 2560, + .max_height = 0xFFFF, + }, + .dspp = { + .count = 2, + .base = { 0x55000, 0x57000 }, + }, + .ad = { + .count = 3, + .base = { 0x79000, 0x79800, 0x7a000 }, + }, + .pp = { + .count = 4, + .base = { 0x71000, 0x71800, 0x72000, 0x72800 }, + }, + .cdm = { + .count = 1, + .base = { 0x7a200 }, + }, + .dsc = { + .count = 2, + .base = { 0x81000, 0x81400 }, + }, + .intf = { + .base = { 0x6b000, 0x6b800, 0x6c000, 0x6c800, 0x6d000 }, + .connect = { + [0] = INTF_DISABLED, + [1] = INTF_DSI, + [2] = INTF_DSI, + [3] = INTF_HDMI, + }, + }, + .max_clk = 412500000, +}; + static const struct mdp5_cfg_handler cfg_handlers[] = { - { .revision = 0, .config = { .hw = &msm8x74_config } }, - { .revision = 2, .config = { .hw = &msm8x74_config } }, + { .revision = 0, .config = { .hw = &msm8x74v1_config } }, + { .revision = 2, .config = { .hw = &msm8x74v2_config } }, { .revision = 3, .config = { .hw = &apq8084_config } }, { .revision = 6, .config = { .hw = &msm8x16_config } }, + { .revision = 9, .config = { .hw = &msm8x94_config } }, + { .revision = 7, .config = { .hw = &msm8x96_config } }, }; - static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev); const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_handler) diff --git a/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h index 69349abe5..050e1618c 100644 --- a/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h +++ b/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h @@ -42,6 +42,13 @@ struct mdp5_sub_block { struct mdp5_lm_block { MDP5_SUB_BLOCK_DEFINITION; uint32_t nb_stages; /* number of stages per blender */ + uint32_t max_width; /* Maximum output resolution */ + uint32_t max_height; +}; + +struct mdp5_pipe_block { + MDP5_SUB_BLOCK_DEFINITION; + uint32_t caps; /* pipe capabilities */ }; struct mdp5_ctl_block { @@ -54,7 +61,12 @@ struct mdp5_smp_block { int mmb_size; /* MMB: size in bytes */ uint32_t clients[MAX_CLIENTS]; /* SMP port allocation /pipe */ mdp5_smp_state_t reserved_state;/* SMP MMBs statically allocated */ - int reserved[MAX_CLIENTS]; /* # of MMBs allocated per client */ + uint8_t reserved[MAX_CLIENTS]; /* # of MMBs allocated per client */ +}; + +struct mdp5_mdp_block { + MDP5_SUB_BLOCK_DEFINITION; + uint32_t caps; /* MDP capabilities: MDP_CAP_xxx bits */ }; #define MDP5_INTF_NUM_MAX 5 @@ -67,16 +79,18 @@ struct mdp5_intf_block { struct mdp5_cfg_hw { char *name; - struct mdp5_sub_block mdp; + struct mdp5_mdp_block mdp; struct mdp5_smp_block smp; struct mdp5_ctl_block ctl; - struct mdp5_sub_block pipe_vig; - struct mdp5_sub_block pipe_rgb; - struct mdp5_sub_block pipe_dma; + struct mdp5_pipe_block pipe_vig; + struct mdp5_pipe_block pipe_rgb; + struct mdp5_pipe_block pipe_dma; struct mdp5_lm_block lm; struct mdp5_sub_block dspp; struct mdp5_sub_block ad; struct mdp5_sub_block pp; + struct mdp5_sub_block dsc; + struct mdp5_sub_block cdm; struct mdp5_intf_block intf; uint32_t max_clk; diff --git a/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c index e4e89567f..8e6c9b598 100644 --- a/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c +++ b/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c @@ -21,6 +21,8 @@ struct mdp5_cmd_encoder { struct mdp5_interface intf; bool enabled; uint32_t bsc; + + struct mdp5_ctl *ctl; }; #define to_mdp5_cmd_encoder(x) container_of(x, struct mdp5_cmd_encoder, base) @@ -30,7 +32,7 @@ static struct mdp5_kms *get_kms(struct drm_encoder *encoder) return to_mdp5_kms(to_mdp_kms(priv->kms)); } -#ifdef CONFIG_MSM_BUS_SCALING +#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING #include <mach/board.h> #include <linux/msm-bus.h> #include <linux/msm-bus-board.h> @@ -210,22 +212,19 @@ static void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder, mode->vsync_end, mode->vtotal, mode->type, mode->flags); pingpong_tearcheck_setup(encoder, mode); - mdp5_crtc_set_intf(encoder->crtc, &mdp5_cmd_enc->intf); + mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_cmd_enc->intf, + mdp5_cmd_enc->ctl); } static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) { struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); - struct mdp5_kms *mdp5_kms = get_kms(encoder); - struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc); + struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl; struct mdp5_interface *intf = &mdp5_cmd_enc->intf; - int lm = mdp5_crtc_get_lm(encoder->crtc); if (WARN_ON(!mdp5_cmd_enc->enabled)) return; - /* Wait for the last frame done */ - mdp_irq_wait(&mdp5_kms->base, lm2ppdone(lm)); pingpong_tearcheck_disable(encoder); mdp5_ctl_set_encoder_state(ctl, false); @@ -239,7 +238,7 @@ static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) static void mdp5_cmd_encoder_enable(struct drm_encoder *encoder) { struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); - struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc); + struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl; struct mdp5_interface *intf = &mdp5_cmd_enc->intf; if (WARN_ON(mdp5_cmd_enc->enabled)) @@ -281,22 +280,22 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, * start signal for the slave encoder */ if (intf_num == 1) - data |= MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX; + data |= MDP5_MDP_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX; else if (intf_num == 2) - data |= MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX; + data |= MDP5_MDP_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX; else return -EINVAL; /* Smart Panel, Sync mode */ - data |= MDP5_SPLIT_DPL_UPPER_SMART_PANEL; + data |= MDP5_MDP_SPLIT_DPL_UPPER_SMART_PANEL; /* Make sure clocks are on when connectors calling this function. */ mdp5_enable(mdp5_kms); - mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, data); + mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), data); - mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, - MDP5_SPLIT_DPL_LOWER_SMART_PANEL); - mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1); + mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0), + MDP5_MDP_SPLIT_DPL_LOWER_SMART_PANEL); + mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1); mdp5_disable(mdp5_kms); return 0; @@ -304,7 +303,7 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, /* initialize command mode encoder */ struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev, - struct mdp5_interface *intf) + struct mdp5_interface *intf, struct mdp5_ctl *ctl) { struct drm_encoder *encoder = NULL; struct mdp5_cmd_encoder *mdp5_cmd_enc; @@ -324,6 +323,7 @@ struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev, memcpy(&mdp5_cmd_enc->intf, intf, sizeof(mdp5_cmd_enc->intf)); encoder = &mdp5_cmd_enc->base; + mdp5_cmd_enc->ctl = ctl; drm_encoder_init(dev, encoder, &mdp5_cmd_encoder_funcs, DRM_MODE_ENCODER_DSI); diff --git a/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index c15307721..7f9f4ac88 100644 --- a/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> * @@ -46,6 +46,11 @@ struct mdp5_crtc { /* if there is a pending flip, these will be non-null: */ struct drm_pending_vblank_event *event; + /* Bits have been flushed at the last commit, + * used to decide if a vsync has happened since last commit. + */ + u32 flushed_mask; + #define PENDING_CURSOR 0x1 #define PENDING_FLIP 0x2 atomic_t pending; @@ -55,6 +60,11 @@ struct mdp5_crtc { struct mdp_irq vblank; struct mdp_irq err; + struct mdp_irq pp_done; + + struct completion pp_completion; + + bool cmd_mode; struct { /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/ @@ -82,12 +92,18 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending) mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank); } -static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask) +static void request_pp_done_pending(struct drm_crtc *crtc) +{ + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + reinit_completion(&mdp5_crtc->pp_completion); +} + +static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask); - mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask); + return mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask); } /* @@ -95,7 +111,7 @@ static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask) * so that we can safely queue unref to current fb (ie. next * vblank we know hw is done w/ previous scanout_fb). */ -static void crtc_flush_all(struct drm_crtc *crtc) +static u32 crtc_flush_all(struct drm_crtc *crtc) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct drm_plane *plane; @@ -103,7 +119,7 @@ static void crtc_flush_all(struct drm_crtc *crtc) /* this should not happen: */ if (WARN_ON(!mdp5_crtc->ctl)) - return; + return 0; drm_atomic_crtc_for_each_plane(plane, crtc) { flush_mask |= mdp5_plane_get_flush(plane); @@ -111,7 +127,7 @@ static void crtc_flush_all(struct drm_crtc *crtc) flush_mask |= mdp_ctl_flush_mask_lm(mdp5_crtc->lm); - crtc_flush(crtc, flush_mask); + return crtc_flush(crtc, flush_mask); } /* if file!=NULL, this is preclose potential cancel-flip path */ @@ -143,7 +159,8 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) } if (mdp5_crtc->ctl && !crtc->state->enable) { - mdp5_ctl_release(mdp5_crtc->ctl); + /* set STAGE_UNUSED for all layers */ + mdp5_ctl_blend(mdp5_crtc->ctl, NULL, 0, 0); mdp5_crtc->ctl = NULL; } } @@ -178,13 +195,9 @@ static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc, /* * blend_setup() - blend all the planes of a CRTC * - * When border is enabled, the border color will ALWAYS be the base layer. - * Therefore, the first plane (private RGB pipe) will start at STAGE0. - * If disabled, the first plane starts at STAGE_BASE. - * - * Note: - * Border is not enabled here because the private plane is exactly - * the CRTC resolution. + * If no base layer is available, border will be enabled as the base layer. + * Otherwise all layers will be blended based on their stage calculated + * in mdp5_crtc_atomic_check. */ static void blend_setup(struct drm_crtc *crtc) { @@ -192,9 +205,14 @@ static void blend_setup(struct drm_crtc *crtc) struct mdp5_kms *mdp5_kms = get_kms(crtc); struct drm_plane *plane; const struct mdp5_cfg_hw *hw_cfg; - uint32_t lm = mdp5_crtc->lm, blend_cfg = 0; + struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL}; + const struct mdp_format *format; + uint32_t lm = mdp5_crtc->lm; + uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0; unsigned long flags; -#define blender(stage) ((stage) - STAGE_BASE) + uint8_t stage[STAGE_MAX + 1]; + int i, plane_cnt = 0; +#define blender(stage) ((stage) - STAGE0) hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); @@ -204,33 +222,73 @@ static void blend_setup(struct drm_crtc *crtc) if (!mdp5_crtc->ctl) goto out; + /* Collect all plane information */ drm_atomic_crtc_for_each_plane(plane, crtc) { - enum mdp_mixer_stage_id stage = - to_mdp5_plane_state(plane->state)->stage; + pstate = to_mdp5_plane_state(plane->state); + pstates[pstate->stage] = pstate; + stage[pstate->stage] = mdp5_plane_pipe(plane); + plane_cnt++; + } - /* - * Note: This cannot happen with current implementation but - * we need to check this condition once z property is added - */ - BUG_ON(stage > hw_cfg->lm.nb_stages); + /* + * If there is no base layer, enable border color. + * Although it's not possbile in current blend logic, + * put it here as a reminder. + */ + if (!pstates[STAGE_BASE] && plane_cnt) { + ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT; + DBG("Border Color is enabled"); + } + + /* The reset for blending */ + for (i = STAGE0; i <= STAGE_MAX; i++) { + if (!pstates[i]) + continue; + + format = to_mdp_format( + msm_framebuffer_format(pstates[i]->base.fb)); + plane = pstates[i]->base.plane; + blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) | + MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST); + fg_alpha = pstates[i]->alpha; + bg_alpha = 0xFF - pstates[i]->alpha; + DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha); + + if (format->alpha_enable && pstates[i]->premultiplied) { + blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) | + MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL); + if (fg_alpha != 0xff) { + bg_alpha = fg_alpha; + blend_op |= + MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA | + MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA; + } else { + blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA; + } + } else if (format->alpha_enable) { + blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) | + MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL); + if (fg_alpha != 0xff) { + bg_alpha = fg_alpha; + blend_op |= + MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA | + MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA | + MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA | + MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA; + } else { + blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA; + } + } - /* LM */ - mdp5_write(mdp5_kms, - REG_MDP5_LM_BLEND_OP_MODE(lm, blender(stage)), - MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) | - MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST)); + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm, + blender(i)), blend_op); mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm, - blender(stage)), 0xff); + blender(i)), fg_alpha); mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm, - blender(stage)), 0x00); - /* CTL */ - blend_cfg |= mdp_ctl_blend_mask(mdp5_plane_pipe(plane), stage); - DBG("%s: blending pipe %s on stage=%d", mdp5_crtc->name, - pipe2name(mdp5_plane_pipe(plane)), stage); + blender(i)), bg_alpha); } - DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc->name, lm, blend_cfg); - mdp5_ctl_blend(mdp5_crtc->ctl, lm, blend_cfg); + mdp5_ctl_blend(mdp5_crtc->ctl, stage, plane_cnt, ctl_blend_flags); out: spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags); @@ -274,8 +332,8 @@ static void mdp5_crtc_disable(struct drm_crtc *crtc) if (WARN_ON(!mdp5_crtc->enabled)) return; - /* set STAGE_UNUSED for all layers */ - mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000); + if (mdp5_crtc->cmd_mode) + mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done); mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err); mdp5_disable(mdp5_kms); @@ -296,6 +354,9 @@ static void mdp5_crtc_enable(struct drm_crtc *crtc) mdp5_enable(mdp5_kms); mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err); + if (mdp5_crtc->cmd_mode) + mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done); + mdp5_crtc->enabled = true; } @@ -318,25 +379,19 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, struct mdp5_kms *mdp5_kms = get_kms(crtc); struct drm_plane *plane; struct drm_device *dev = crtc->dev; - struct plane_state pstates[STAGE3 + 1]; + struct plane_state pstates[STAGE_MAX + 1]; + const struct mdp5_cfg_hw *hw_cfg; int cnt = 0, i; DBG("%s: check", mdp5_crtc->name); - /* request a free CTL, if none is already allocated for this CRTC */ - if (state->enable && !mdp5_crtc->ctl) { - mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc); - if (WARN_ON(!mdp5_crtc->ctl)) - return -EINVAL; - } - /* verify that there are not too many planes attached to crtc * and that we don't have conflicting mixer stages: */ + hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); drm_atomic_crtc_state_for_each_plane(plane, state) { struct drm_plane_state *pstate; - - if (cnt >= ARRAY_SIZE(pstates)) { + if (cnt >= (hw_cfg->lm.nb_stages)) { dev_err(dev->dev, "too many planes!\n"); return -EINVAL; } @@ -348,13 +403,13 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, */ if (!pstate) pstate = plane->state; - pstates[cnt].plane = plane; pstates[cnt].state = to_mdp5_plane_state(pstate); cnt++; } + /* assign a stage based on sorted zpos property */ sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL); for (i = 0; i < cnt; i++) { @@ -367,13 +422,15 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, return 0; } -static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc) +static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); DBG("%s: begin", mdp5_crtc->name); } -static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc) +static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct drm_device *dev = crtc->dev; @@ -396,7 +453,18 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc) return; blend_setup(crtc); - crtc_flush_all(crtc); + + /* PP_DONE irq is only used by command mode for now. + * It is better to request pending before FLUSH and START trigger + * to make sure no pp_done irq missed. + * This is safe because no pp_done will happen before SW trigger + * in command mode. + */ + if (mdp5_crtc->cmd_mode) + request_pp_done_pending(crtc); + + mdp5_crtc->flushed_mask = crtc_flush_all(crtc); + request_pending(crtc, PENDING_FLIP); } @@ -601,6 +669,52 @@ static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus) DBG("%s: error: %08x", mdp5_crtc->name, irqstatus); } +static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus) +{ + struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, + pp_done); + + complete(&mdp5_crtc->pp_completion); +} + +static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + int ret; + + ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion, + msecs_to_jiffies(50)); + if (ret == 0) + dev_warn(dev->dev, "pp done time out, lm=%d\n", mdp5_crtc->lm); +} + +static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + int ret; + + /* Should not call this function if crtc is disabled. */ + if (!mdp5_crtc->ctl) + return; + + ret = drm_crtc_vblank_get(crtc); + if (ret) + return; + + ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue, + ((mdp5_ctl_get_commit_status(mdp5_crtc->ctl) & + mdp5_crtc->flushed_mask) == 0), + msecs_to_jiffies(50)); + if (ret <= 0) + dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp5_crtc->id); + + mdp5_crtc->flushed_mask = 0; + + drm_crtc_vblank_put(crtc); +} + uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); @@ -613,8 +727,8 @@ void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file) complete_flip(crtc, file); } -/* set interface for routing crtc->encoder: */ -void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf) +void mdp5_crtc_set_pipeline(struct drm_crtc *crtc, + struct mdp5_interface *intf, struct mdp5_ctl *ctl) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_kms *mdp5_kms = get_kms(crtc); @@ -622,19 +736,23 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf) /* now that we know what irq's we want: */ mdp5_crtc->err.irqmask = intf2err(intf->num); + mdp5_crtc->vblank.irqmask = intf2vblank(lm, intf); + + if ((intf->type == INTF_DSI) && + (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) { + mdp5_crtc->pp_done.irqmask = lm2ppdone(lm); + mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq; + mdp5_crtc->cmd_mode = true; + } else { + mdp5_crtc->pp_done.irqmask = 0; + mdp5_crtc->pp_done.irq = NULL; + mdp5_crtc->cmd_mode = false; + } - /* Register command mode Pingpong done as vblank for now, - * so that atomic commit should wait for it to finish. - * Ideally, in the future, we should take rd_ptr done as vblank, - * and let atomic commit wait for pingpong done for commond mode. - */ - if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) - mdp5_crtc->vblank.irqmask = lm2ppdone(lm); - else - mdp5_crtc->vblank.irqmask = intf2vblank(lm, intf); mdp_irq_update(&mdp5_kms->base); - mdp5_ctl_set_intf(mdp5_crtc->ctl, intf); + mdp5_crtc->ctl = ctl; + mdp5_ctl_set_pipeline(ctl, intf, lm); } int mdp5_crtc_get_lm(struct drm_crtc *crtc) @@ -643,10 +761,14 @@ int mdp5_crtc_get_lm(struct drm_crtc *crtc) return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm; } -struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc) +void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); - return WARN_ON(!crtc) ? NULL : mdp5_crtc->ctl; + + if (mdp5_crtc->cmd_mode) + mdp5_crtc_wait_for_pp_done(crtc); + else + mdp5_crtc_wait_for_flush_done(crtc); } /* initialize crtc */ @@ -667,6 +789,7 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, spin_lock_init(&mdp5_crtc->lm_lock); spin_lock_init(&mdp5_crtc->cursor.lock); + init_completion(&mdp5_crtc->pp_completion); mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq; mdp5_crtc->err.irq = mdp5_crtc_err_irq; @@ -682,7 +805,5 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs); plane->crtc = crtc; - mdp5_plane_install_properties(plane, &crtc->base); - return crtc; } diff --git a/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c index 5488b687c..4e81ca4f9 100644 --- a/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c +++ b/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c @@ -17,7 +17,7 @@ /* * CTL - MDP Control Pool Manager * - * Controls are shared between all CRTCs. + * Controls are shared between all display interfaces. * * They are intended to be used for data path configuration. * The top level register programming describes the complete data path for @@ -27,12 +27,11 @@ * * In certain use cases (high-resolution dual pipe), one single CTL can be * shared across multiple CRTCs. - * - * Because the number of CTLs can be less than the number of CRTCs, - * CTLs are dynamically allocated from a pool of CTLs, only once a CRTC is - * requested by the client (in mdp5_crtc_mode_set()). */ +#define CTL_STAT_BUSY 0x1 +#define CTL_STAT_BOOKED 0x2 + struct op_mode { struct mdp5_interface intf; @@ -46,8 +45,8 @@ struct mdp5_ctl { u32 id; int lm; - /* whether this CTL has been allocated or not: */ - bool busy; + /* CTL status bitmask */ + u32 status; /* Operation Mode Configuration for the Pipeline */ struct op_mode pipeline; @@ -61,7 +60,10 @@ struct mdp5_ctl { bool cursor_on; - struct drm_crtc *crtc; + /* True if the current CTL has FLUSH bits pending for single FLUSH. */ + bool flush_pending; + + struct mdp5_ctl *pair; /* Paired CTL to be flushed together */ }; struct mdp5_ctl_manager { @@ -74,6 +76,10 @@ struct mdp5_ctl_manager { /* to filter out non-present bits in the current hardware config */ u32 flush_hw_mask; + /* status for single FLUSH */ + bool single_flush_supported; + u32 single_flush_pending_mask; + /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */ spinlock_t pool_lock; struct mdp5_ctl ctls[MAX_CTL]; @@ -168,11 +174,21 @@ static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf) spin_unlock_irqrestore(&ctl->hw_lock, flags); } -int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf) +int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, + struct mdp5_interface *intf, int lm) { struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr); + if (unlikely(WARN_ON(intf->num != ctl->pipeline.intf.num))) { + dev_err(mdp5_kms->dev->dev, + "CTL %d is allocated by INTF %d, but used by INTF %d\n", + ctl->id, ctl->pipeline.intf.num, intf->num); + return -EINVAL; + } + + ctl->lm = lm; + memcpy(&ctl->pipeline.intf, intf, sizeof(*intf)); ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) | @@ -287,29 +303,85 @@ int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable) blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT; ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg); + ctl->cursor_on = enable; spin_unlock_irqrestore(&ctl->hw_lock, flags); ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id); - ctl->cursor_on = enable; return 0; } -int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg) +static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe, + enum mdp_mixer_stage_id stage) +{ + switch (pipe) { + case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage); + case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage); + case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage); + case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage); + case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage); + case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage); + case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage); + case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage); + case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage); + case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage); + default: return 0; + } +} + +static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe, + enum mdp_mixer_stage_id stage) +{ + if (stage < STAGE6) + return 0; + + switch (pipe) { + case SSPP_VIG0: return MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3; + case SSPP_VIG1: return MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3; + case SSPP_VIG2: return MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3; + case SSPP_RGB0: return MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3; + case SSPP_RGB1: return MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3; + case SSPP_RGB2: return MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3; + case SSPP_DMA0: return MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3; + case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3; + case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3; + case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3; + default: return 0; + } +} + +int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt, + u32 ctl_blend_op_flags) { unsigned long flags; + u32 blend_cfg = 0, blend_ext_cfg = 0; + int i, start_stage; + + if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) { + start_stage = STAGE0; + blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR; + } else { + start_stage = STAGE_BASE; + } + + for (i = start_stage; i < start_stage + stage_cnt; i++) { + blend_cfg |= mdp_ctl_blend_mask(stage[i], i); + blend_ext_cfg |= mdp_ctl_blend_ext_mask(stage[i], i); + } + spin_lock_irqsave(&ctl->hw_lock, flags); if (ctl->cursor_on) blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT; - else - blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT; - spin_lock_irqsave(&ctl->hw_lock, flags); - ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg); + ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, ctl->lm), blend_cfg); + ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, ctl->lm), blend_ext_cfg); spin_unlock_irqrestore(&ctl->hw_lock, flags); - ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(lm); + ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(ctl->lm); + + DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", ctl->lm, + blend_cfg, blend_ext_cfg); return 0; } @@ -379,6 +451,31 @@ static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask) return sw_mask; } +static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask, + u32 *flush_id) +{ + struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; + + if (ctl->pair) { + DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask); + ctl->flush_pending = true; + ctl_mgr->single_flush_pending_mask |= (*flush_mask); + *flush_mask = 0; + + if (ctl->pair->flush_pending) { + *flush_id = min_t(u32, ctl->id, ctl->pair->id); + *flush_mask = ctl_mgr->single_flush_pending_mask; + + ctl->flush_pending = false; + ctl->pair->flush_pending = false; + ctl_mgr->single_flush_pending_mask = 0; + + DBG("Single FLUSH mask %x,ID %d", *flush_mask, + *flush_id); + } + } +} + /** * mdp5_ctl_commit() - Register Flush * @@ -392,12 +489,16 @@ static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask) * CTL registers need to be flushed in some circumstances; if that is the * case, some trigger bits will be present in both flush mask and * ctl->pending_ctl_trigger. + * + * Return H/W flushed bit mask. */ -int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask) +u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask) { struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; struct op_mode *pipeline = &ctl->pipeline; unsigned long flags; + u32 flush_id = ctl->id; + u32 curr_ctl_flush_mask; pipeline->start_mask &= ~flush_mask; @@ -413,9 +514,13 @@ int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask) flush_mask &= ctl_mgr->flush_hw_mask; + curr_ctl_flush_mask = flush_mask; + + fix_for_single_flush(ctl, &flush_mask, &flush_id); + if (flush_mask) { spin_lock_irqsave(&ctl->hw_lock, flags); - ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask); + ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask); spin_unlock_irqrestore(&ctl->hw_lock, flags); } @@ -424,25 +529,12 @@ int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask) refill_start_mask(ctl); } - return 0; + return curr_ctl_flush_mask; } -void mdp5_ctl_release(struct mdp5_ctl *ctl) +u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl) { - struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; - unsigned long flags; - - if (unlikely(WARN_ON(ctl->id >= MAX_CTL) || !ctl->busy)) { - dev_err(ctl_mgr->dev->dev, "CTL %d in bad state (%d)", - ctl->id, ctl->busy); - return; - } - - spin_lock_irqsave(&ctl_mgr->pool_lock, flags); - ctl->busy = false; - spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); - - DBG("CTL %d released", ctl->id); + return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id)); } int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl) @@ -451,35 +543,79 @@ int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl) } /* - * mdp5_ctl_request() - CTL dynamic allocation + * mdp5_ctl_pair() - Associate 2 booked CTLs for single FLUSH + */ +int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable) +{ + struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm; + struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr); + + /* do nothing silently if hw doesn't support */ + if (!ctl_mgr->single_flush_supported) + return 0; + + if (!enable) { + ctlx->pair = NULL; + ctly->pair = NULL; + mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), 0); + return 0; + } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) { + dev_err(ctl_mgr->dev->dev, "CTLs already paired\n"); + return -EINVAL; + } else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) { + dev_err(ctl_mgr->dev->dev, "Only pair booked CTLs\n"); + return -EINVAL; + } + + ctlx->pair = ctly; + ctly->pair = ctlx; + + mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), + MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN); + + return 0; +} + +/* + * mdp5_ctl_request() - CTL allocation * - * Note: Current implementation considers that we can only have one CRTC per CTL + * Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs. + * If no CTL is available in preferred category, allocate from the other one. * - * @return first free CTL + * @return fail if no CTL is available. */ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr, - struct drm_crtc *crtc) + int intf_num) { struct mdp5_ctl *ctl = NULL; + const u32 checkm = CTL_STAT_BUSY | CTL_STAT_BOOKED; + u32 match = ((intf_num == 1) || (intf_num == 2)) ? CTL_STAT_BOOKED : 0; unsigned long flags; int c; spin_lock_irqsave(&ctl_mgr->pool_lock, flags); + /* search the preferred */ for (c = 0; c < ctl_mgr->nctl; c++) - if (!ctl_mgr->ctls[c].busy) - break; + if ((ctl_mgr->ctls[c].status & checkm) == match) + goto found; - if (unlikely(c >= ctl_mgr->nctl)) { - dev_err(ctl_mgr->dev->dev, "No more CTL available!"); - goto unlock; - } + dev_warn(ctl_mgr->dev->dev, + "fall back to the other CTL category for INTF %d!\n", intf_num); - ctl = &ctl_mgr->ctls[c]; + match ^= CTL_STAT_BOOKED; + for (c = 0; c < ctl_mgr->nctl; c++) + if ((ctl_mgr->ctls[c].status & checkm) == match) + goto found; - ctl->lm = mdp5_crtc_get_lm(crtc); - ctl->crtc = crtc; - ctl->busy = true; + dev_err(ctl_mgr->dev->dev, "No more CTL available!"); + goto unlock; + +found: + ctl = &ctl_mgr->ctls[c]; + ctl->pipeline.intf.num = intf_num; + ctl->lm = -1; + ctl->status |= CTL_STAT_BUSY; ctl->pending_ctl_trigger = 0; DBG("CTL %d allocated", ctl->id); @@ -508,9 +644,11 @@ void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr) } struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, - void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg) + void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd) { struct mdp5_ctl_manager *ctl_mgr; + const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd); + int rev = mdp5_cfg_get_hw_rev(cfg_hnd); const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl; unsigned long flags; int c, ret; @@ -544,14 +682,28 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, if (WARN_ON(!ctl_cfg->base[c])) { dev_err(dev->dev, "CTL_%d: base is null!\n", c); ret = -EINVAL; + spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); goto fail; } ctl->ctlm = ctl_mgr; ctl->id = c; ctl->reg_offset = ctl_cfg->base[c]; - ctl->busy = false; + ctl->status = 0; spin_lock_init(&ctl->hw_lock); } + + /* + * In Dual DSI case, CTL0 and CTL1 are always assigned to two DSI + * interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when + * only write into CTL0's FLUSH register) to keep two DSI pipes in sync. + * Single FLUSH is supported from hw rev v3.0. + */ + if (rev >= 3) { + ctl_mgr->single_flush_supported = true; + /* Reserve CTL0/1 for INTF1/2 */ + ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED; + ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED; + } spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); DBG("Pool of %d CTLs created.", ctl_mgr->nctl); diff --git a/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h index 7a6200099..96148c6f8 100644 --- a/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h +++ b/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h @@ -23,7 +23,7 @@ */ struct mdp5_ctl_manager; struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, - void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg); + void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd); void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm); void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm); @@ -32,49 +32,32 @@ void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm); * mdp5_ctl_request(ctlm, ...) returns a ctl (CTL resource) handler, * which is then used to call the other mdp5_ctl_*(ctl, ...) functions. */ -struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc); +struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, int intf_num); + int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl); struct mdp5_interface; -int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf); +int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_interface *intf, + int lm); int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled); int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable); - -/* - * blend_cfg (LM blender config): - * - * The function below allows the caller of mdp5_ctl_blend() to specify how pipes - * are being blended according to their stage (z-order), through @blend_cfg arg. - */ -static inline u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe, - enum mdp_mixer_stage_id stage) -{ - switch (pipe) { - case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage); - case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage); - case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage); - case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage); - case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage); - case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage); - case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage); - case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage); - case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage); - case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage); - default: return 0; - } -} +int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable); /* * mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM) * - * @blend_cfg: see LM blender config definition below + * @stage: array to contain the pipe num for each stage + * @stage_cnt: valid stage number in stage array + * @ctl_blend_op_flags: blender operation mode flags * * Note: * CTL registers need to be flushed after calling this function * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask) */ -int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg); +#define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT BIT(0) +int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt, + u32 ctl_blend_op_flags); /** * mdp_ctl_flush_mask...() - Register FLUSH masks @@ -88,9 +71,8 @@ u32 mdp_ctl_flush_mask_cursor(int cursor_id); u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf); /* @flush_mask: see CTL flush masks definitions below */ -int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask); - -void mdp5_ctl_release(struct mdp5_ctl *ctl); +u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask); +u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl); diff --git a/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c index 1188f4bf1..c9e32b08a 100644 --- a/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c +++ b/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c @@ -27,6 +27,8 @@ struct mdp5_encoder { spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */ bool enabled; uint32_t bsc; + + struct mdp5_ctl *ctl; }; #define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base) @@ -36,7 +38,7 @@ static struct mdp5_kms *get_kms(struct drm_encoder *encoder) return to_mdp5_kms(to_mdp_kms(priv->kms)); } -#ifdef CONFIG_MSM_BUS_SCALING +#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING #include <mach/board.h> #include <mach/msm_bus.h> #include <mach/msm_bus_board.h> @@ -144,10 +146,14 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder, mode->type, mode->flags); ctrl_pol = 0; - if (mode->flags & DRM_MODE_FLAG_NHSYNC) - ctrl_pol |= MDP5_INTF_POLARITY_CTL_HSYNC_LOW; - if (mode->flags & DRM_MODE_FLAG_NVSYNC) - ctrl_pol |= MDP5_INTF_POLARITY_CTL_VSYNC_LOW; + + /* DSI controller cannot handle active-low sync signals. */ + if (mdp5_encoder->intf.type != INTF_DSI) { + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + ctrl_pol |= MDP5_INTF_POLARITY_CTL_HSYNC_LOW; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + ctrl_pol |= MDP5_INTF_POLARITY_CTL_VSYNC_LOW; + } /* probably need to get DATA_EN polarity from panel.. */ dtv_hsync_skew = 0; /* get this from panel? */ @@ -218,14 +224,15 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder, spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); - mdp5_crtc_set_intf(encoder->crtc, &mdp5_encoder->intf); + mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_encoder->intf, + mdp5_encoder->ctl); } static void mdp5_encoder_disable(struct drm_encoder *encoder) { struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); struct mdp5_kms *mdp5_kms = get_kms(encoder); - struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc); + struct mdp5_ctl *ctl = mdp5_encoder->ctl; int lm = mdp5_crtc_get_lm(encoder->crtc); struct mdp5_interface *intf = &mdp5_encoder->intf; int intfn = mdp5_encoder->intf.num; @@ -260,7 +267,7 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder) { struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); struct mdp5_kms *mdp5_kms = get_kms(encoder); - struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc); + struct mdp5_ctl *ctl = mdp5_encoder->ctl; struct mdp5_interface *intf = &mdp5_encoder->intf; int intfn = mdp5_encoder->intf.num; unsigned long flags; @@ -290,6 +297,7 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder, struct drm_encoder *slave_encoder) { struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder); struct mdp5_kms *mdp5_kms; int intf_num; u32 data = 0; @@ -304,20 +312,21 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder, * to use the master's enable signal for the slave encoder. */ if (intf_num == 1) - data |= MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC; + data |= MDP5_MDP_SPLIT_DPL_LOWER_INTF2_TG_SYNC; else if (intf_num == 2) - data |= MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC; + data |= MDP5_MDP_SPLIT_DPL_LOWER_INTF1_TG_SYNC; else return -EINVAL; /* Make sure clocks are on when connectors calling this function. */ mdp5_enable(mdp5_kms); - mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), - MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN); /* Dumb Panel, Sync mode */ - mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, 0); - mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, data); - mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1); + mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), 0); + mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0), data); + mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1); + + mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true); + mdp5_disable(mdp5_kms); return 0; @@ -325,7 +334,7 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder, /* initialize encoder */ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, - struct mdp5_interface *intf) + struct mdp5_interface *intf, struct mdp5_ctl *ctl) { struct drm_encoder *encoder = NULL; struct mdp5_encoder *mdp5_encoder; @@ -341,6 +350,7 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, memcpy(&mdp5_encoder->intf, intf, sizeof(mdp5_encoder->intf)); encoder = &mdp5_encoder->base; + mdp5_encoder->ctl = ctl; spin_lock_init(&mdp5_encoder->intf_lock); diff --git a/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c index 33bd4c616..b0d4b53b9 100644 --- a/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c +++ b/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c @@ -21,8 +21,11 @@ #include "msm_drv.h" #include "mdp5_kms.h" -void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask) +void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, + uint32_t old_irqmask) { + mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_CLEAR(0), + irqmask ^ (irqmask & old_irqmask)); mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_EN(0), irqmask); } @@ -71,9 +74,10 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms) struct drm_device *dev = mdp5_kms->dev; struct msm_drm_private *priv = dev->dev_private; unsigned int id; - uint32_t status; + uint32_t status, enable; - status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0)); + enable = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_EN(0)); + status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0)) & enable; mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), status); VERB("status=%08x", status); @@ -112,15 +116,24 @@ irqreturn_t mdp5_irq(struct msm_kms *kms) int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) { + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + + mdp5_enable(mdp5_kms); mdp_update_vblank_mask(to_mdp_kms(kms), mdp5_crtc_vblank(crtc), true); + mdp5_disable(mdp5_kms); + return 0; } void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) { + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + + mdp5_enable(mdp5_kms); mdp_update_vblank_mask(to_mdp_kms(kms), mdp5_crtc_vblank(crtc), false); + mdp5_disable(mdp5_kms); } /* @@ -165,7 +178,6 @@ static int mdp5_hw_irqdomain_map(struct irq_domain *d, irq_set_chip_and_handler(irq, &mdp5_hw_irq_chip, handle_level_irq); irq_set_chip_data(irq, mdp5_kms); - set_irq_flags(irq, IRQF_VALID); return 0; } diff --git a/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index bbacf9d2b..b532faa80 100644 --- a/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -76,10 +76,29 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) { + int i; struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + int nplanes = mdp5_kms->dev->mode_config.num_total_plane; + + for (i = 0; i < nplanes; i++) { + struct drm_plane *plane = state->planes[i]; + struct drm_plane_state *plane_state = state->plane_states[i]; + + if (!plane) + continue; + + mdp5_plane_complete_commit(plane, plane_state); + } + mdp5_disable(mdp5_kms); } +static void mdp5_wait_for_crtc_commit_done(struct msm_kms *kms, + struct drm_crtc *crtc) +{ + mdp5_crtc_wait_for_commit_done(crtc); +} + static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate, struct drm_encoder *encoder) { @@ -141,6 +160,7 @@ static const struct mdp_kms_funcs kms_funcs = { .disable_vblank = mdp5_disable_vblank, .prepare_commit = mdp5_prepare_commit, .complete_commit = mdp5_complete_commit, + .wait_for_crtc_commit_done = mdp5_wait_for_crtc_commit_done, .get_format = mdp_get_format, .round_pixclk = mdp5_round_pixclk, .set_split_display = mdp5_set_split_display, @@ -157,7 +177,8 @@ int mdp5_disable(struct mdp5_kms *mdp5_kms) clk_disable_unprepare(mdp5_kms->ahb_clk); clk_disable_unprepare(mdp5_kms->axi_clk); clk_disable_unprepare(mdp5_kms->core_clk); - clk_disable_unprepare(mdp5_kms->lut_clk); + if (mdp5_kms->lut_clk) + clk_disable_unprepare(mdp5_kms->lut_clk); return 0; } @@ -169,14 +190,15 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms) clk_prepare_enable(mdp5_kms->ahb_clk); clk_prepare_enable(mdp5_kms->axi_clk); clk_prepare_enable(mdp5_kms->core_clk); - clk_prepare_enable(mdp5_kms->lut_clk); + if (mdp5_kms->lut_clk) + clk_prepare_enable(mdp5_kms->lut_clk); return 0; } static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms, enum mdp5_intf_type intf_type, int intf_num, - enum mdp5_intf_mode intf_mode) + enum mdp5_intf_mode intf_mode, struct mdp5_ctl *ctl) { struct drm_device *dev = mdp5_kms->dev; struct msm_drm_private *priv = dev->dev_private; @@ -189,9 +211,9 @@ static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms, if ((intf_type == INTF_DSI) && (intf_mode == MDP5_INTF_DSI_MODE_COMMAND)) - encoder = mdp5_cmd_encoder_init(dev, &intf); + encoder = mdp5_cmd_encoder_init(dev, &intf, ctl); else - encoder = mdp5_encoder_init(dev, &intf); + encoder = mdp5_encoder_init(dev, &intf, ctl); if (IS_ERR(encoder)) { dev_err(dev->dev, "failed to construct encoder\n"); @@ -229,6 +251,8 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); enum mdp5_intf_type intf_type = hw_cfg->intf.connect[intf_num]; + struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm; + struct mdp5_ctl *ctl; struct drm_encoder *encoder; int ret = 0; @@ -239,8 +263,14 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) if (!priv->edp) break; + ctl = mdp5_ctlm_request(ctlm, intf_num); + if (!ctl) { + ret = -EINVAL; + break; + } + encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num, - MDP5_INTF_MODE_NONE); + MDP5_INTF_MODE_NONE, ctl); if (IS_ERR(encoder)) { ret = PTR_ERR(encoder); break; @@ -252,8 +282,14 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) if (!priv->hdmi) break; + ctl = mdp5_ctlm_request(ctlm, intf_num); + if (!ctl) { + ret = -EINVAL; + break; + } + encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num, - MDP5_INTF_MODE_NONE); + MDP5_INTF_MODE_NONE, ctl); if (IS_ERR(encoder)) { ret = PTR_ERR(encoder); break; @@ -278,14 +314,20 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) if (!priv->dsi[dsi_id]) break; + ctl = mdp5_ctlm_request(ctlm, intf_num); + if (!ctl) { + ret = -EINVAL; + break; + } + for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) { mode = (i == MSM_DSI_CMD_ENCODER_ID) ? MDP5_INTF_DSI_MODE_COMMAND : MDP5_INTF_DSI_MODE_VIDEO; dsi_encs[i] = construct_encoder(mdp5_kms, INTF_DSI, - intf_num, mode); - if (IS_ERR(dsi_encs)) { - ret = PTR_ERR(dsi_encs); + intf_num, mode, ctl); + if (IS_ERR(dsi_encs[i])) { + ret = PTR_ERR(dsi_encs[i]); break; } } @@ -307,9 +349,12 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) static const enum mdp5_pipe crtcs[] = { SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3, }; - static const enum mdp5_pipe pub_planes[] = { + static const enum mdp5_pipe vig_planes[] = { SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3, }; + static const enum mdp5_pipe dma_planes[] = { + SSPP_DMA0, SSPP_DMA1, + }; struct drm_device *dev = mdp5_kms->dev; struct msm_drm_private *priv = dev->dev_private; const struct mdp5_cfg_hw *hw_cfg; @@ -330,7 +375,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) struct drm_crtc *crtc; plane = mdp5_plane_init(dev, crtcs[i], true, - hw_cfg->pipe_rgb.base[i]); + hw_cfg->pipe_rgb.base[i], hw_cfg->pipe_rgb.caps); if (IS_ERR(plane)) { ret = PTR_ERR(plane); dev_err(dev->dev, "failed to construct plane for %s (%d)\n", @@ -348,16 +393,30 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) priv->crtcs[priv->num_crtcs++] = crtc; } - /* Construct public planes: */ + /* Construct video planes: */ for (i = 0; i < hw_cfg->pipe_vig.count; i++) { struct drm_plane *plane; - plane = mdp5_plane_init(dev, pub_planes[i], false, - hw_cfg->pipe_vig.base[i]); + plane = mdp5_plane_init(dev, vig_planes[i], false, + hw_cfg->pipe_vig.base[i], hw_cfg->pipe_vig.caps); + if (IS_ERR(plane)) { + ret = PTR_ERR(plane); + dev_err(dev->dev, "failed to construct %s plane: %d\n", + pipe2name(vig_planes[i]), ret); + goto fail; + } + } + + /* DMA planes */ + for (i = 0; i < hw_cfg->pipe_dma.count; i++) { + struct drm_plane *plane; + + plane = mdp5_plane_init(dev, dma_planes[i], false, + hw_cfg->pipe_dma.base[i], hw_cfg->pipe_dma.caps); if (IS_ERR(plane)) { ret = PTR_ERR(plane); dev_err(dev->dev, "failed to construct %s plane: %d\n", - pipe2name(pub_planes[i]), ret); + pipe2name(dma_planes[i]), ret); goto fail; } } @@ -393,15 +452,19 @@ static void read_hw_revision(struct mdp5_kms *mdp5_kms, } static int get_clk(struct platform_device *pdev, struct clk **clkp, - const char *name) + const char *name, bool mandatory) { struct device *dev = &pdev->dev; struct clk *clk = devm_clk_get(dev, name); - if (IS_ERR(clk)) { + if (IS_ERR(clk) && mandatory) { dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk)); return PTR_ERR(clk); } - *clkp = clk; + if (IS_ERR(clk)) + DBG("skipping %s", name); + else + *clkp = clk; + return 0; } @@ -455,25 +518,26 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) goto fail; } - ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk"); + /* mandatory clocks: */ + ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk", true); if (ret) goto fail; - ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk"); + ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true); if (ret) goto fail; - ret = get_clk(pdev, &mdp5_kms->src_clk, "core_clk_src"); + ret = get_clk(pdev, &mdp5_kms->src_clk, "core_clk_src", true); if (ret) goto fail; - ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk"); + ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true); if (ret) goto fail; - ret = get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk"); - if (ret) - goto fail; - ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk"); + ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk", true); if (ret) goto fail; + /* optional clocks: */ + get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk", false); + /* we need to set a default rate before enabling. Set a safe * rate first, then figure out hw revision, and then set a * more optimal rate: @@ -490,18 +554,26 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) } config = mdp5_cfg_get_config(mdp5_kms->cfg); + mdp5_kms->caps = config->hw->mdp.caps; /* TODO: compute core clock rate at runtime */ clk_set_rate(mdp5_kms->src_clk, config->hw->max_clk); - mdp5_kms->smp = mdp5_smp_init(mdp5_kms->dev, &config->hw->smp); - if (IS_ERR(mdp5_kms->smp)) { - ret = PTR_ERR(mdp5_kms->smp); - mdp5_kms->smp = NULL; - goto fail; + /* + * Some chipsets have a Shared Memory Pool (SMP), while others + * have dedicated latency buffering per source pipe instead; + * this section initializes the SMP: + */ + if (mdp5_kms->caps & MDP_CAP_SMP) { + mdp5_kms->smp = mdp5_smp_init(mdp5_kms->dev, &config->hw->smp); + if (IS_ERR(mdp5_kms->smp)) { + ret = PTR_ERR(mdp5_kms->smp); + mdp5_kms->smp = NULL; + goto fail; + } } - mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, config->hw); + mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg); if (IS_ERR(mdp5_kms->ctlm)) { ret = PTR_ERR(mdp5_kms->ctlm); mdp5_kms->ctlm = NULL; @@ -527,6 +599,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) if (IS_ERR(mmu)) { ret = PTR_ERR(mmu); dev_err(dev->dev, "failed to init iommu: %d\n", ret); + iommu_domain_free(config->platform.iommu); goto fail; } @@ -557,6 +630,11 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) goto fail; } + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + dev->mode_config.max_width = config->hw->lm.max_width; + dev->mode_config.max_height = config->hw->lm.max_height; + return kms; fail: diff --git a/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index 2c0de174c..84f65d415 100644 --- a/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h @@ -32,6 +32,8 @@ struct mdp5_kms { struct drm_device *dev; struct mdp5_cfg_handler *cfg; + uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */ + /* mapper-id used to request GEM buffer mapped for scanout: */ int id; @@ -70,18 +72,12 @@ struct mdp5_kms { struct mdp5_plane_state { struct drm_plane_state base; - /* "virtual" zpos.. we calculate actual mixer-stage at runtime - * by sorting the attached planes by zpos and then assigning - * mixer stage lowest to highest. Private planes get default - * zpos of zero, and public planes a unique value that is - * greater than zero. This way, things work out if a naive - * userspace assigns planes to a crtc without setting zpos. - */ - int zpos; + /* aligned with property */ + uint8_t premultiplied; + uint8_t zpos; + uint8_t alpha; - /* the actual mixer stage, calculated in crtc->atomic_check() - * NOTE: this should move to mdp5_crtc_state, when that exists - */ + /* assigned by crtc blender */ enum mdp_mixer_stage_id stage; /* some additional transactional status to help us know in the @@ -192,7 +188,8 @@ static inline uint32_t lm2ppdone(int lm) int mdp5_disable(struct mdp5_kms *mdp5_kms); int mdp5_enable(struct mdp5_kms *mdp5_kms); -void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask); +void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, + uint32_t old_irqmask); void mdp5_irq_preinstall(struct msm_kms *kms); int mdp5_irq_postinstall(struct msm_kms *kms); void mdp5_irq_uninstall(struct msm_kms *kms); @@ -202,57 +199,38 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms); void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms); -static inline bool pipe_supports_yuv(enum mdp5_pipe pipe) -{ - switch (pipe) { - case SSPP_VIG0: - case SSPP_VIG1: - case SSPP_VIG2: - case SSPP_VIG3: - return true; - default: - return false; - } -} - -static inline -uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats, - uint32_t max_formats) -{ - return mdp_get_formats(pixel_formats, max_formats, - !pipe_supports_yuv(pipe)); -} - -void mdp5_plane_install_properties(struct drm_plane *plane, - struct drm_mode_object *obj); uint32_t mdp5_plane_get_flush(struct drm_plane *plane); void mdp5_plane_complete_flip(struct drm_plane *plane); +void mdp5_plane_complete_commit(struct drm_plane *plane, + struct drm_plane_state *state); enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); struct drm_plane *mdp5_plane_init(struct drm_device *dev, - enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset); + enum mdp5_pipe pipe, bool private_plane, + uint32_t reg_offset, uint32_t caps); uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); int mdp5_crtc_get_lm(struct drm_crtc *crtc); -struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc); void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file); -void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf); +void mdp5_crtc_set_pipeline(struct drm_crtc *crtc, + struct mdp5_interface *intf, struct mdp5_ctl *ctl); +void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc); struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, struct drm_plane *plane, int id); struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, - struct mdp5_interface *intf); + struct mdp5_interface *intf, struct mdp5_ctl *ctl); int mdp5_encoder_set_split_display(struct drm_encoder *encoder, struct drm_encoder *slave_encoder); #ifdef CONFIG_DRM_MSM_DSI struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev, - struct mdp5_interface *intf); + struct mdp5_interface *intf, struct mdp5_ctl *ctl); int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, struct drm_encoder *slave_encoder); #else -static inline struct drm_encoder *mdp5_cmd_encoder_init( - struct drm_device *dev, struct mdp5_interface *intf) +static inline struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev, + struct mdp5_interface *intf, struct mdp5_ctl *ctl) { return ERR_PTR(-EINVAL); } diff --git a/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index 57b8f56ae..81cd49045 100644 --- a/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 The Linux Foundation. All rights reserved. + * Copyright (C) 2014-2015 The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> * @@ -26,13 +26,12 @@ struct mdp5_plane { spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */ uint32_t reg_offset; + uint32_t caps; uint32_t flush_mask; /* used to commit pipe registers */ uint32_t nformats; uint32_t formats[32]; - - bool enabled; }; #define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base) @@ -42,6 +41,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, unsigned int crtc_w, unsigned int crtc_h, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h); + static void set_scanout_locked(struct drm_plane *plane, struct drm_framebuffer *fb); @@ -56,44 +56,132 @@ static bool plane_enabled(struct drm_plane_state *state) return state->fb && state->crtc; } -static int mdp5_plane_disable(struct drm_plane *plane) +static void mdp5_plane_destroy(struct drm_plane *plane) { struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); - struct mdp5_kms *mdp5_kms = get_kms(plane); - enum mdp5_pipe pipe = mdp5_plane->pipe; - - DBG("%s: disable", mdp5_plane->name); - if (mdp5_kms) { - /* Release the memory we requested earlier from the SMP: */ - mdp5_smp_release(mdp5_kms->smp, pipe); - } + drm_plane_helper_disable(plane); + drm_plane_cleanup(plane); - return 0; + kfree(mdp5_plane); } -static void mdp5_plane_destroy(struct drm_plane *plane) +static void mdp5_plane_install_rotation_property(struct drm_device *dev, + struct drm_plane *plane) { struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); - drm_plane_helper_disable(plane); - drm_plane_cleanup(plane); + if (!(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP) && + !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) + return; - kfree(mdp5_plane); + if (!dev->mode_config.rotation_property) + dev->mode_config.rotation_property = + drm_mode_create_rotation_property(dev, + BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y)); + + if (dev->mode_config.rotation_property) + drm_object_attach_property(&plane->base, + dev->mode_config.rotation_property, + 0); } /* helper to install properties which are common to planes and crtcs */ -void mdp5_plane_install_properties(struct drm_plane *plane, +static void mdp5_plane_install_properties(struct drm_plane *plane, struct drm_mode_object *obj) { - // XXX + struct drm_device *dev = plane->dev; + struct msm_drm_private *dev_priv = dev->dev_private; + struct drm_property *prop; + +#define INSTALL_PROPERTY(name, NAME, init_val, fnc, ...) do { \ + prop = dev_priv->plane_property[PLANE_PROP_##NAME]; \ + if (!prop) { \ + prop = drm_property_##fnc(dev, 0, #name, \ + ##__VA_ARGS__); \ + if (!prop) { \ + dev_warn(dev->dev, \ + "Create property %s failed\n", \ + #name); \ + return; \ + } \ + dev_priv->plane_property[PLANE_PROP_##NAME] = prop; \ + } \ + drm_object_attach_property(&plane->base, prop, init_val); \ + } while (0) + +#define INSTALL_RANGE_PROPERTY(name, NAME, min, max, init_val) \ + INSTALL_PROPERTY(name, NAME, init_val, \ + create_range, min, max) + +#define INSTALL_ENUM_PROPERTY(name, NAME, init_val) \ + INSTALL_PROPERTY(name, NAME, init_val, \ + create_enum, name##_prop_enum_list, \ + ARRAY_SIZE(name##_prop_enum_list)) + + INSTALL_RANGE_PROPERTY(zpos, ZPOS, 1, 255, 1); + + mdp5_plane_install_rotation_property(dev, plane); + +#undef INSTALL_RANGE_PROPERTY +#undef INSTALL_ENUM_PROPERTY +#undef INSTALL_PROPERTY } -int mdp5_plane_set_property(struct drm_plane *plane, - struct drm_property *property, uint64_t val) +static int mdp5_plane_atomic_set_property(struct drm_plane *plane, + struct drm_plane_state *state, struct drm_property *property, + uint64_t val) { - // XXX - return -EINVAL; + struct drm_device *dev = plane->dev; + struct mdp5_plane_state *pstate; + struct msm_drm_private *dev_priv = dev->dev_private; + int ret = 0; + + pstate = to_mdp5_plane_state(state); + +#define SET_PROPERTY(name, NAME, type) do { \ + if (dev_priv->plane_property[PLANE_PROP_##NAME] == property) { \ + pstate->name = (type)val; \ + DBG("Set property %s %d", #name, (type)val); \ + goto done; \ + } \ + } while (0) + + SET_PROPERTY(zpos, ZPOS, uint8_t); + + dev_err(dev->dev, "Invalid property\n"); + ret = -EINVAL; +done: + return ret; +#undef SET_PROPERTY +} + +static int mdp5_plane_atomic_get_property(struct drm_plane *plane, + const struct drm_plane_state *state, + struct drm_property *property, uint64_t *val) +{ + struct drm_device *dev = plane->dev; + struct mdp5_plane_state *pstate; + struct msm_drm_private *dev_priv = dev->dev_private; + int ret = 0; + + pstate = to_mdp5_plane_state(state); + +#define GET_PROPERTY(name, NAME, type) do { \ + if (dev_priv->plane_property[PLANE_PROP_##NAME] == property) { \ + *val = pstate->name; \ + DBG("Get property %s %lld", #name, *val); \ + goto done; \ + } \ + } while (0) + + GET_PROPERTY(zpos, ZPOS, uint8_t); + + dev_err(dev->dev, "Invalid property\n"); + ret = -EINVAL; +done: + return ret; +#undef SET_PROPERTY } static void mdp5_plane_reset(struct drm_plane *plane) @@ -106,11 +194,15 @@ static void mdp5_plane_reset(struct drm_plane *plane) kfree(to_mdp5_plane_state(plane->state)); mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL); - if (plane->type == DRM_PLANE_TYPE_PRIMARY) { - mdp5_state->zpos = 0; - } else { - mdp5_state->zpos = 1 + drm_plane_index(plane); - } + /* assign default blend parameters */ + mdp5_state->alpha = 255; + mdp5_state->premultiplied = 0; + + if (plane->type == DRM_PLANE_TYPE_PRIMARY) + mdp5_state->zpos = STAGE_BASE; + else + mdp5_state->zpos = STAGE0 + drm_plane_index(plane); + mdp5_state->base.plane = plane; plane->state = &mdp5_state->base; @@ -149,29 +241,37 @@ static const struct drm_plane_funcs mdp5_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = mdp5_plane_destroy, - .set_property = mdp5_plane_set_property, + .set_property = drm_atomic_helper_plane_set_property, + .atomic_set_property = mdp5_plane_atomic_set_property, + .atomic_get_property = mdp5_plane_atomic_get_property, .reset = mdp5_plane_reset, .atomic_duplicate_state = mdp5_plane_duplicate_state, .atomic_destroy_state = mdp5_plane_destroy_state, }; static int mdp5_plane_prepare_fb(struct drm_plane *plane, - struct drm_framebuffer *fb, const struct drm_plane_state *new_state) { struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct mdp5_kms *mdp5_kms = get_kms(plane); + struct drm_framebuffer *fb = new_state->fb; + + if (!new_state->fb) + return 0; DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id); return msm_framebuffer_prepare(fb, mdp5_kms->id); } static void mdp5_plane_cleanup_fb(struct drm_plane *plane, - struct drm_framebuffer *fb, const struct drm_plane_state *old_state) { struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct mdp5_kms *mdp5_kms = get_kms(plane); + struct drm_framebuffer *fb = old_state->fb; + + if (!fb) + return; DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id); msm_framebuffer_cleanup(fb, mdp5_kms->id); @@ -182,10 +282,44 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, { struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct drm_plane_state *old_state = plane->state; + const struct mdp_format *format; + bool vflip, hflip; DBG("%s: check (%d -> %d)", mdp5_plane->name, plane_enabled(old_state), plane_enabled(state)); + if (plane_enabled(state)) { + format = to_mdp_format(msm_framebuffer_format(state->fb)); + if (MDP_FORMAT_IS_YUV(format) && + !pipe_supports_yuv(mdp5_plane->caps)) { + dev_err(plane->dev->dev, + "Pipe doesn't support YUV\n"); + + return -EINVAL; + } + + if (!(mdp5_plane->caps & MDP_PIPE_CAP_SCALE) && + (((state->src_w >> 16) != state->crtc_w) || + ((state->src_h >> 16) != state->crtc_h))) { + dev_err(plane->dev->dev, + "Pipe doesn't support scaling (%dx%d -> %dx%d)\n", + state->src_w >> 16, state->src_h >> 16, + state->crtc_w, state->crtc_h); + + return -EINVAL; + } + + hflip = !!(state->rotation & BIT(DRM_REFLECT_X)); + vflip = !!(state->rotation & BIT(DRM_REFLECT_Y)); + if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) || + (hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) { + dev_err(plane->dev->dev, + "Pipe doesn't support flip\n"); + + return -EINVAL; + } + } + if (plane_enabled(state) && plane_enabled(old_state)) { /* we cannot change SMP block configuration during scanout: */ bool full_modeset = false; @@ -224,7 +358,6 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane, if (!plane_enabled(state)) { to_mdp5_plane_state(state)->pending = true; - mdp5_plane_disable(plane); } else if (to_mdp5_plane_state(state)->mode_changed) { int ret; to_mdp5_plane_state(state)->pending = true; @@ -365,68 +498,170 @@ static int calc_phase_step(uint32_t src, uint32_t dst, uint32_t *out_phase) return 0; } -static int calc_scalex_steps(uint32_t pixel_format, uint32_t src, uint32_t dest, - uint32_t phasex_steps[2]) +static int calc_scalex_steps(struct drm_plane *plane, + uint32_t pixel_format, uint32_t src, uint32_t dest, + uint32_t phasex_steps[COMP_MAX]) { + struct mdp5_kms *mdp5_kms = get_kms(plane); + struct device *dev = mdp5_kms->dev->dev; uint32_t phasex_step; unsigned int hsub; int ret; ret = calc_phase_step(src, dest, &phasex_step); - if (ret) + if (ret) { + dev_err(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret); return ret; + } hsub = drm_format_horz_chroma_subsampling(pixel_format); - phasex_steps[0] = phasex_step; - phasex_steps[1] = phasex_step / hsub; + phasex_steps[COMP_0] = phasex_step; + phasex_steps[COMP_3] = phasex_step; + phasex_steps[COMP_1_2] = phasex_step / hsub; return 0; } -static int calc_scaley_steps(uint32_t pixel_format, uint32_t src, uint32_t dest, - uint32_t phasey_steps[2]) +static int calc_scaley_steps(struct drm_plane *plane, + uint32_t pixel_format, uint32_t src, uint32_t dest, + uint32_t phasey_steps[COMP_MAX]) { + struct mdp5_kms *mdp5_kms = get_kms(plane); + struct device *dev = mdp5_kms->dev->dev; uint32_t phasey_step; unsigned int vsub; int ret; ret = calc_phase_step(src, dest, &phasey_step); - if (ret) + if (ret) { + dev_err(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret); return ret; + } vsub = drm_format_vert_chroma_subsampling(pixel_format); - phasey_steps[0] = phasey_step; - phasey_steps[1] = phasey_step / vsub; + phasey_steps[COMP_0] = phasey_step; + phasey_steps[COMP_3] = phasey_step; + phasey_steps[COMP_1_2] = phasey_step / vsub; return 0; } -static uint32_t get_scalex_config(uint32_t src, uint32_t dest) +static uint32_t get_scale_config(const struct mdp_format *format, + uint32_t src, uint32_t dst, bool horz) +{ + bool scaling = format->is_yuv ? true : (src != dst); + uint32_t sub, pix_fmt = format->base.pixel_format; + uint32_t ya_filter, uv_filter; + bool yuv = format->is_yuv; + + if (!scaling) + return 0; + + if (yuv) { + sub = horz ? drm_format_horz_chroma_subsampling(pix_fmt) : + drm_format_vert_chroma_subsampling(pix_fmt); + uv_filter = ((src / sub) <= dst) ? + SCALE_FILTER_BIL : SCALE_FILTER_PCMN; + } + ya_filter = (src <= dst) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN; + + if (horz) + return MDP5_PIPE_SCALE_CONFIG_SCALEX_EN | + MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(ya_filter) | + MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(ya_filter) | + COND(yuv, MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(uv_filter)); + else + return MDP5_PIPE_SCALE_CONFIG_SCALEY_EN | + MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(ya_filter) | + MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(ya_filter) | + COND(yuv, MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(uv_filter)); +} + +static void calc_pixel_ext(const struct mdp_format *format, + uint32_t src, uint32_t dst, uint32_t phase_step[2], + int pix_ext_edge1[COMP_MAX], int pix_ext_edge2[COMP_MAX], + bool horz) { - uint32_t filter; + bool scaling = format->is_yuv ? true : (src != dst); + int i; - filter = (src <= dest) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN; + /* + * Note: + * We assume here that: + * 1. PCMN filter is used for downscale + * 2. bilinear filter is used for upscale + * 3. we are in a single pipe configuration + */ - return MDP5_PIPE_SCALE_CONFIG_SCALEX_EN | - MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(filter) | - MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(filter) | - MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(filter); + for (i = 0; i < COMP_MAX; i++) { + pix_ext_edge1[i] = 0; + pix_ext_edge2[i] = scaling ? 1 : 0; + } } -static uint32_t get_scaley_config(uint32_t src, uint32_t dest) +static void mdp5_write_pixel_ext(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe, + const struct mdp_format *format, + uint32_t src_w, int pe_left[COMP_MAX], int pe_right[COMP_MAX], + uint32_t src_h, int pe_top[COMP_MAX], int pe_bottom[COMP_MAX]) { - uint32_t filter; + uint32_t pix_fmt = format->base.pixel_format; + uint32_t lr, tb, req; + int i; + + for (i = 0; i < COMP_MAX; i++) { + uint32_t roi_w = src_w; + uint32_t roi_h = src_h; - filter = (src <= dest) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN; + if (format->is_yuv && i == COMP_1_2) { + roi_w /= drm_format_horz_chroma_subsampling(pix_fmt); + roi_h /= drm_format_vert_chroma_subsampling(pix_fmt); + } - return MDP5_PIPE_SCALE_CONFIG_SCALEY_EN | - MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(filter) | - MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(filter) | - MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(filter); + lr = (pe_left[i] >= 0) ? + MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT(pe_left[i]) : + MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF(pe_left[i]); + + lr |= (pe_right[i] >= 0) ? + MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT(pe_right[i]) : + MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF(pe_right[i]); + + tb = (pe_top[i] >= 0) ? + MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT(pe_top[i]) : + MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF(pe_top[i]); + + tb |= (pe_bottom[i] >= 0) ? + MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT(pe_bottom[i]) : + MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF(pe_bottom[i]); + + req = MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT(roi_w + + pe_left[i] + pe_right[i]); + + req |= MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM(roi_h + + pe_top[i] + pe_bottom[i]); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_LR(pipe, i), lr); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_TB(pipe, i), tb); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS(pipe, i), req); + + DBG("comp-%d (L/R): rpt=%d/%d, ovf=%d/%d, req=%d", i, + FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT), + FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT), + FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF), + FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF), + FIELD(req, MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT)); + + DBG("comp-%d (T/B): rpt=%d/%d, ovf=%d/%d, req=%d", i, + FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT), + FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT), + FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF), + FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF), + FIELD(req, MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM)); + } } + static int mdp5_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, int crtc_x, int crtc_y, @@ -435,15 +670,18 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, uint32_t src_w, uint32_t src_h) { struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); + struct drm_plane_state *pstate = plane->state; struct mdp5_kms *mdp5_kms = get_kms(plane); - struct device *dev = mdp5_kms->dev->dev; enum mdp5_pipe pipe = mdp5_plane->pipe; const struct mdp_format *format; uint32_t nplanes, config = 0; - /* below array -> index 0: comp 0/3 ; index 1: comp 1/2 */ - uint32_t phasex_step[2] = {0,}, phasey_step[2] = {0,}; + uint32_t phasex_step[COMP_MAX] = {0,}, phasey_step[COMP_MAX] = {0,}; + bool pe = mdp5_plane->caps & MDP_PIPE_CAP_SW_PIX_EXT; + int pe_left[COMP_MAX], pe_right[COMP_MAX]; + int pe_top[COMP_MAX], pe_bottom[COMP_MAX]; uint32_t hdecm = 0, vdecm = 0; uint32_t pix_format; + bool vflip, hflip; unsigned long flags; int ret; @@ -467,10 +705,12 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h); /* Request some memory from the SMP: */ - ret = mdp5_smp_request(mdp5_kms->smp, - mdp5_plane->pipe, fb->pixel_format, src_w); - if (ret) - return ret; + if (mdp5_kms->smp) { + ret = mdp5_smp_request(mdp5_kms->smp, + mdp5_plane->pipe, format, src_w, false); + if (ret) + return ret; + } /* * Currently we update the hw for allocations/requests immediately, @@ -478,32 +718,34 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, * would move into atomic->check_plane_state(), while updating the * hw would remain here: */ - mdp5_smp_configure(mdp5_kms->smp, pipe); + if (mdp5_kms->smp) + mdp5_smp_configure(mdp5_kms->smp, pipe); - /* SCALE is used to both scale and up-sample chroma components */ + ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, phasex_step); + if (ret) + return ret; - if ((src_w != crtc_w) || MDP_FORMAT_IS_YUV(format)) { - /* TODO calc hdecm */ - ret = calc_scalex_steps(pix_format, src_w, crtc_w, phasex_step); - if (ret) { - dev_err(dev, "X scaling (%d -> %d) failed: %d\n", - src_w, crtc_w, ret); - return ret; - } - config |= get_scalex_config(src_w, crtc_w); - } + ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, phasey_step); + if (ret) + return ret; - if ((src_h != crtc_h) || MDP_FORMAT_IS_YUV(format)) { - /* TODO calc vdecm */ - ret = calc_scaley_steps(pix_format, src_h, crtc_h, phasey_step); - if (ret) { - dev_err(dev, "Y scaling (%d -> %d) failed: %d\n", - src_h, crtc_h, ret); - return ret; - } - config |= get_scaley_config(src_h, crtc_h); + if (mdp5_plane->caps & MDP_PIPE_CAP_SW_PIX_EXT) { + calc_pixel_ext(format, src_w, crtc_w, phasex_step, + pe_left, pe_right, true); + calc_pixel_ext(format, src_h, crtc_h, phasey_step, + pe_top, pe_bottom, false); } + /* TODO calc hdecm, vdecm */ + + /* SCALE is used to both scale and up-sample chroma components */ + config |= get_scale_config(format, src_w, crtc_w, true); + config |= get_scale_config(format, src_h, crtc_h, false); + DBG("scale config = %x", config); + + hflip = !!(pstate->rotation & BIT(DRM_REFLECT_X)); + vflip = !!(pstate->rotation & BIT(DRM_REFLECT_Y)); + spin_lock_irqsave(&mdp5_plane->pipe_lock, flags); mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe), @@ -535,7 +777,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) | MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) | COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) | - MDP5_PIPE_SRC_FORMAT_NUM_PLANES(format->fetch_type) | + MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) | MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample)); mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe), @@ -545,29 +787,41 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3])); mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe), + (hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) | + (vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) | + COND(pe, MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE) | MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS)); /* not using secure mode: */ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe), - phasex_step[0]); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe), - phasey_step[0]); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe), - phasex_step[1]); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe), - phasey_step[1]); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe), - MDP5_PIPE_DECIMATION_VERT(vdecm) | - MDP5_PIPE_DECIMATION_HORZ(hdecm)); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), config); - - if (MDP_FORMAT_IS_YUV(format)) - csc_enable(mdp5_kms, pipe, - mdp_get_default_csc_cfg(CSC_YUV2RGB)); - else - csc_disable(mdp5_kms, pipe); + if (mdp5_plane->caps & MDP_PIPE_CAP_SW_PIX_EXT) + mdp5_write_pixel_ext(mdp5_kms, pipe, format, + src_w, pe_left, pe_right, + src_h, pe_top, pe_bottom); + + if (mdp5_plane->caps & MDP_PIPE_CAP_SCALE) { + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe), + phasex_step[COMP_0]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe), + phasey_step[COMP_0]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe), + phasex_step[COMP_1_2]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe), + phasey_step[COMP_1_2]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe), + MDP5_PIPE_DECIMATION_VERT(vdecm) | + MDP5_PIPE_DECIMATION_HORZ(hdecm)); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), config); + } + + if (mdp5_plane->caps & MDP_PIPE_CAP_CSC) { + if (MDP_FORMAT_IS_YUV(format)) + csc_enable(mdp5_kms, pipe, + mdp_get_default_csc_cfg(CSC_YUV2RGB)); + else + csc_disable(mdp5_kms, pipe); + } set_scanout_locked(plane, fb); @@ -584,7 +838,8 @@ void mdp5_plane_complete_flip(struct drm_plane *plane) DBG("%s: complete flip", mdp5_plane->name); - mdp5_smp_commit(mdp5_kms->smp, pipe); + if (mdp5_kms->smp) + mdp5_smp_commit(mdp5_kms->smp, pipe); to_mdp5_plane_state(plane->state)->pending = false; } @@ -602,9 +857,24 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane) return mdp5_plane->flush_mask; } +/* called after vsync in thread context */ +void mdp5_plane_complete_commit(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct mdp5_kms *mdp5_kms = get_kms(plane); + struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); + enum mdp5_pipe pipe = mdp5_plane->pipe; + + if (!plane_enabled(plane->state) && mdp5_kms->smp) { + DBG("%s: free SMP", mdp5_plane->name); + mdp5_smp_release(mdp5_kms->smp, pipe); + } +} + /* initialize plane */ struct drm_plane *mdp5_plane_init(struct drm_device *dev, - enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset) + enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset, + uint32_t caps) { struct drm_plane *plane = NULL; struct mdp5_plane *mdp5_plane; @@ -621,9 +891,11 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev, mdp5_plane->pipe = pipe; mdp5_plane->name = pipe2name(pipe); + mdp5_plane->caps = caps; - mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats, - ARRAY_SIZE(mdp5_plane->formats)); + mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats, + ARRAY_SIZE(mdp5_plane->formats), + !pipe_supports_yuv(mdp5_plane->caps)); mdp5_plane->flush_mask = mdp_ctl_flush_mask_pipe(pipe); mdp5_plane->reg_offset = reg_offset; diff --git a/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c index 16702aecf..6f425c25d 100644 --- a/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c +++ b/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c @@ -34,22 +34,44 @@ * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0). * * For each block that can be dynamically allocated, it can be either - * free, or pending/in-use by a client. The updates happen in three steps: + * free: + * The block is free. + * + * pending: + * The block is allocated to some client and not free. + * + * configured: + * The block is allocated to some client, and assigned to that + * client in MDP5_MDP_SMP_ALLOC registers. + * + * inuse: + * The block is being actively used by a client. + * + * The updates happen in the following steps: * * 1) mdp5_smp_request(): * When plane scanout is setup, calculate required number of - * blocks needed per client, and request. Blocks not inuse or - * pending by any other client are added to client's pending - * set. + * blocks needed per client, and request. Blocks neither inuse nor + * configured nor pending by any other client are added to client's + * pending set. + * For shrinking, blocks in pending but not in configured can be freed + * directly, but those already in configured will be freed later by + * mdp5_smp_commit. * * 2) mdp5_smp_configure(): * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers * are configured for the union(pending, inuse) + * Current pending is copied to configured. + * It is assumed that mdp5_smp_request and mdp5_smp_configure not run + * concurrently for the same pipe. * * 3) mdp5_smp_commit(): - * After next vblank, copy pending -> inuse. Optionally update + * After next vblank, copy configured -> inuse. Optionally update * MDP5_SMP_ALLOC registers if there are newly unused blocks * + * 4) mdp5_smp_release(): + * Must be called after the pipe is disabled and no longer uses any SMB + * * On the next vblank after changes have been committed to hw, the * client's pending blocks become it's in-use blocks (and no-longer * in-use blocks become available to other clients). @@ -68,6 +90,8 @@ struct mdp5_smp { struct drm_device *dev; + uint8_t reserved[MAX_CLIENTS]; /* fixed MMBs allocation per client */ + int blk_cnt; int blk_size; @@ -77,6 +101,9 @@ struct mdp5_smp { struct mdp5_client_smp_state client_state[MAX_CLIENTS]; }; +static void update_smp_state(struct mdp5_smp *smp, + u32 cid, mdp5_smp_state_t *assigned); + static inline struct mdp5_kms *get_kms(struct mdp5_smp *smp) { @@ -112,14 +139,12 @@ static int smp_request_block(struct mdp5_smp *smp, u32 cid, int nblks) { struct mdp5_kms *mdp5_kms = get_kms(smp); - const struct mdp5_cfg_hw *hw_cfg; struct mdp5_client_smp_state *ps = &smp->client_state[cid]; int i, ret, avail, cur_nblks, cnt = smp->blk_cnt; - int reserved; + uint8_t reserved; unsigned long flags; - hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); - reserved = hw_cfg->smp.reserved[cid]; + reserved = smp->reserved[cid]; spin_lock_irqsave(&smp->state_lock, flags); @@ -149,7 +174,12 @@ static int smp_request_block(struct mdp5_smp *smp, for (i = cur_nblks; i > nblks; i--) { int blk = find_first_bit(ps->pending, cnt); clear_bit(blk, ps->pending); - /* don't clear in global smp_state until _commit() */ + + /* clear in global smp_state if not in configured + * otherwise until _commit() + */ + if (!test_bit(blk, ps->configured)) + clear_bit(blk, smp->state); } } @@ -179,12 +209,14 @@ static void set_fifo_thresholds(struct mdp5_smp *smp, * decimated width. Ie. SMP buffering sits downstream of decimation (which * presumably happens during the dma from scanout buffer). */ -int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width) +int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, + const struct mdp_format *format, u32 width, bool hdecim) { struct mdp5_kms *mdp5_kms = get_kms(smp); struct drm_device *dev = mdp5_kms->dev; int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg); int i, hsub, nplanes, nlines, nblks, ret; + u32 fmt = format->base.pixel_format; nplanes = drm_format_num_planes(fmt); hsub = drm_format_horz_chroma_subsampling(fmt); @@ -192,6 +224,21 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 wid /* different if BWC (compressed framebuffer?) enabled: */ nlines = 2; + /* Newer MDPs have split/packing logic, which fetches sub-sampled + * U and V components (splits them from Y if necessary) and packs + * them together, writes to SMP using a single client. + */ + if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) { + fmt = DRM_FORMAT_NV24; + nplanes = 2; + + /* if decimation is enabled, HW decimates less on the + * sub sampled chroma components + */ + if (hdecim && (hsub > 1)) + hsub = 1; + } + for (i = 0, nblks = 0; i < nplanes; i++) { int n, fetch_stride, cpp; @@ -223,10 +270,33 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 wid /* Release SMP blocks for all clients of the pipe */ void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe) { - int i, nblks; + int i; + unsigned long flags; + int cnt = smp->blk_cnt; + + for (i = 0; i < pipe2nclients(pipe); i++) { + mdp5_smp_state_t assigned; + u32 cid = pipe2client(pipe, i); + struct mdp5_client_smp_state *ps = &smp->client_state[cid]; + + spin_lock_irqsave(&smp->state_lock, flags); + + /* clear hw assignment */ + bitmap_or(assigned, ps->inuse, ps->configured, cnt); + update_smp_state(smp, CID_UNUSED, &assigned); + + /* free to global pool */ + bitmap_andnot(smp->state, smp->state, ps->pending, cnt); + bitmap_andnot(smp->state, smp->state, assigned, cnt); + + /* clear client's infor */ + bitmap_zero(ps->pending, cnt); + bitmap_zero(ps->configured, cnt); + bitmap_zero(ps->inuse, cnt); + + spin_unlock_irqrestore(&smp->state_lock, flags); + } - for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++) - smp_request_block(smp, pipe2client(pipe, i), 0); set_fifo_thresholds(smp, pipe, 0); } @@ -274,12 +344,20 @@ void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe) u32 cid = pipe2client(pipe, i); struct mdp5_client_smp_state *ps = &smp->client_state[cid]; - bitmap_or(assigned, ps->inuse, ps->pending, cnt); + /* + * if vblank has not happened since last smp_configure + * skip the configure for now + */ + if (!bitmap_equal(ps->inuse, ps->configured, cnt)) + continue; + + bitmap_copy(ps->configured, ps->pending, cnt); + bitmap_or(assigned, ps->inuse, ps->configured, cnt); update_smp_state(smp, cid, &assigned); } } -/* step #3: after vblank, copy pending -> inuse: */ +/* step #3: after vblank, copy configured -> inuse: */ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe) { int cnt = smp->blk_cnt; @@ -295,7 +373,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe) * using, which can be released and made available to other * clients: */ - if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) { + if (bitmap_andnot(released, ps->inuse, ps->configured, cnt)) { unsigned long flags; spin_lock_irqsave(&smp->state_lock, flags); @@ -306,7 +384,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe) update_smp_state(smp, CID_UNUSED, &released); } - bitmap_copy(ps->inuse, ps->pending, cnt); + bitmap_copy(ps->inuse, ps->configured, cnt); } } @@ -332,6 +410,7 @@ struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_blo /* statically tied MMBs cannot be re-allocated: */ bitmap_copy(smp->state, cfg->reserved_state, smp->blk_cnt); + memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved)); spin_lock_init(&smp->state_lock); return smp; diff --git a/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h index e47179f63..20b87e800 100644 --- a/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h +++ b/kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h @@ -23,6 +23,7 @@ struct mdp5_client_smp_state { mdp5_smp_state_t inuse; + mdp5_smp_state_t configured; mdp5_smp_state_t pending; }; @@ -38,7 +39,8 @@ struct mdp5_smp; struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg); void mdp5_smp_destroy(struct mdp5_smp *smp); -int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width); +int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, + const struct mdp_format *format, u32 width, bool hdecim); void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe); void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe); void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe); diff --git a/kernel/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/kernel/drivers/gpu/drm/msm/mdp/mdp_common.xml.h index a1d35f162..0aec1ac1f 100644 --- a/kernel/drivers/gpu/drm/msm/mdp/mdp_common.xml.h +++ b/kernel/drivers/gpu/drm/msm/mdp/mdp_common.xml.h @@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) -- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) - -Copyright (C) 2013-2014 by the following authors: +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) + +Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) Permission is hereby granted, free of charge, to any person obtaining @@ -46,13 +46,13 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. enum mdp_chroma_samp_type { - CHROMA_RGB = 0, + CHROMA_FULL = 0, CHROMA_H2V1 = 1, CHROMA_H1V2 = 2, CHROMA_420 = 3, }; -enum mdp_sspp_fetch_type { +enum mdp_fetch_type { MDP_PLANE_INTERLEAVED = 0, MDP_PLANE_PLANAR = 1, MDP_PLANE_PSEUDO_PLANAR = 2, @@ -65,6 +65,10 @@ enum mdp_mixer_stage_id { STAGE1 = 3, STAGE2 = 4, STAGE3 = 5, + STAGE4 = 6, + STAGE5 = 7, + STAGE6 = 8, + STAGE_MAX = 8, }; enum mdp_alpha_type { @@ -74,6 +78,13 @@ enum mdp_alpha_type { BG_PIXEL = 3, }; +enum mdp_component_type { + COMP_0 = 0, + COMP_1_2 = 1, + COMP_3 = 2, + COMP_MAX = 3, +}; + enum mdp_bpc { BPC1 = 0, BPC5 = 1, diff --git a/kernel/drivers/gpu/drm/msm/mdp/mdp_format.c b/kernel/drivers/gpu/drm/msm/mdp/mdp_format.c index f683433b6..1c2caffc9 100644 --- a/kernel/drivers/gpu/drm/msm/mdp/mdp_format.c +++ b/kernel/drivers/gpu/drm/msm/mdp/mdp_format.c @@ -71,7 +71,7 @@ static struct csc_cfg csc_convert[CSC_MAX] = { }, }; -#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt, fp, cs) { \ +#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt, fp, cs, yuv) { \ .base = { .pixel_format = DRM_FORMAT_ ## name }, \ .bpc_a = BPC ## a ## A, \ .bpc_r = BPC ## r, \ @@ -83,7 +83,8 @@ static struct csc_cfg csc_convert[CSC_MAX] = { .cpp = c, \ .unpack_count = cnt, \ .fetch_type = fp, \ - .chroma_sample = cs \ + .chroma_sample = cs, \ + .is_yuv = yuv, \ } #define BPC0A 0 @@ -95,24 +96,49 @@ static struct csc_cfg csc_convert[CSC_MAX] = { static const struct mdp_format formats[] = { /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt ... */ FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4, - MDP_PLANE_INTERLEAVED, CHROMA_RGB), + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), + FMT(ABGR8888, 8, 8, 8, 8, 2, 0, 1, 3, true, true, 4, 4, + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), + FMT(RGBA8888, 8, 8, 8, 8, 3, 1, 0, 2, true, true, 4, 4, + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), + FMT(BGRA8888, 8, 8, 8, 8, 3, 2, 0, 1, true, true, 4, 4, + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4, - MDP_PLANE_INTERLEAVED, CHROMA_RGB), + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3, - MDP_PLANE_INTERLEAVED, CHROMA_RGB), + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3, - MDP_PLANE_INTERLEAVED, CHROMA_RGB), + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), FMT(RGB565, 0, 5, 6, 5, 1, 0, 2, 0, false, true, 2, 3, - MDP_PLANE_INTERLEAVED, CHROMA_RGB), + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3, - MDP_PLANE_INTERLEAVED, CHROMA_RGB), + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), /* --- RGB formats above / YUV formats below this line --- */ + /* 2 plane YUV */ FMT(NV12, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 2, 2, - MDP_PLANE_PSEUDO_PLANAR, CHROMA_420), + MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true), FMT(NV21, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 2, 2, - MDP_PLANE_PSEUDO_PLANAR, CHROMA_420), + MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true), + FMT(NV16, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 2, 2, + MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true), + FMT(NV61, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 2, 2, + MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true), + /* 1 plane YUV */ + FMT(VYUY, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 2, 4, + MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true), + FMT(UYVY, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 2, 4, + MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true), + FMT(YUYV, 0, 8, 8, 8, 0, 1, 0, 2, false, true, 2, 4, + MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true), + FMT(YVYU, 0, 8, 8, 8, 0, 2, 0, 1, false, true, 2, 4, + MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true), + /* 3 plane YUV */ + FMT(YUV420, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 1, 1, + MDP_PLANE_PLANAR, CHROMA_420, true), + FMT(YVU420, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 1, 1, + MDP_PLANE_PLANAR, CHROMA_420, true), }; /* diff --git a/kernel/drivers/gpu/drm/msm/mdp/mdp_kms.c b/kernel/drivers/gpu/drm/msm/mdp/mdp_kms.c index 1988c243f..642873040 100644 --- a/kernel/drivers/gpu/drm/msm/mdp/mdp_kms.c +++ b/kernel/drivers/gpu/drm/msm/mdp/mdp_kms.c @@ -39,7 +39,8 @@ static void update_irq(struct mdp_kms *mdp_kms) list_for_each_entry(irq, &mdp_kms->irq_list, node) irqmask |= irq->irqmask; - mdp_kms->funcs->set_irqmask(mdp_kms, irqmask); + mdp_kms->funcs->set_irqmask(mdp_kms, irqmask, mdp_kms->cur_irq_mask); + mdp_kms->cur_irq_mask = irqmask; } /* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder diff --git a/kernel/drivers/gpu/drm/msm/mdp/mdp_kms.h b/kernel/drivers/gpu/drm/msm/mdp/mdp_kms.h index 5ae4039d6..303130320 100644 --- a/kernel/drivers/gpu/drm/msm/mdp/mdp_kms.h +++ b/kernel/drivers/gpu/drm/msm/mdp/mdp_kms.h @@ -30,7 +30,8 @@ struct mdp_kms; struct mdp_kms_funcs { struct msm_kms_funcs base; - void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask); + void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask, + uint32_t old_irqmask); }; struct mdp_kms { @@ -42,6 +43,7 @@ struct mdp_kms { bool in_irq; struct list_head irq_list; /* list of mdp4_irq */ uint32_t vblank_mask; /* irq bits set for userspace vblank */ + uint32_t cur_irq_mask; /* current irq mask */ }; #define to_mdp_kms(x) container_of(x, struct mdp_kms, base) @@ -88,15 +90,35 @@ struct mdp_format { uint8_t unpack[4]; bool alpha_enable, unpack_tight; uint8_t cpp, unpack_count; - enum mdp_sspp_fetch_type fetch_type; + enum mdp_fetch_type fetch_type; enum mdp_chroma_samp_type chroma_sample; + bool is_yuv; }; #define to_mdp_format(x) container_of(x, struct mdp_format, base) -#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->chroma_sample > CHROMA_RGB) +#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv) uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only); const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format); +/* MDP capabilities */ +#define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */ +#define MDP_CAP_DSC BIT(1) /* VESA Display Stream Compression */ +#define MDP_CAP_CDM BIT(2) /* Chroma Down Module (HDMI 2.0 YUV) */ + +/* MDP pipe capabilities */ +#define MDP_PIPE_CAP_HFLIP BIT(0) +#define MDP_PIPE_CAP_VFLIP BIT(1) +#define MDP_PIPE_CAP_SCALE BIT(2) +#define MDP_PIPE_CAP_CSC BIT(3) +#define MDP_PIPE_CAP_DECIMATION BIT(4) +#define MDP_PIPE_CAP_SW_PIX_EXT BIT(5) + +static inline bool pipe_supports_yuv(uint32_t pipe_caps) +{ + return (pipe_caps & MDP_PIPE_CAP_SCALE) && + (pipe_caps & MDP_PIPE_CAP_CSC); +} + enum csc_type { CSC_RGB2RGB = 0, CSC_YUV2RGB, diff --git a/kernel/drivers/gpu/drm/msm/msm_atomic.c b/kernel/drivers/gpu/drm/msm/msm_atomic.c index 5b192128c..7eb253bc2 100644 --- a/kernel/drivers/gpu/drm/msm/msm_atomic.c +++ b/kernel/drivers/gpu/drm/msm/msm_atomic.c @@ -84,6 +84,33 @@ static void commit_destroy(struct msm_commit *c) kfree(c); } +static void msm_atomic_wait_for_commit_done(struct drm_device *dev, + struct drm_atomic_state *old_state) +{ + struct drm_crtc *crtc; + struct msm_drm_private *priv = old_state->dev->dev_private; + struct msm_kms *kms = priv->kms; + int ncrtcs = old_state->dev->mode_config.num_crtc; + int i; + + for (i = 0; i < ncrtcs; i++) { + crtc = old_state->crtcs[i]; + + if (!crtc) + continue; + + if (!crtc->state->enable) + continue; + + /* Legacy cursor ioctls are completely unsynced, and userspace + * relies on that (by doing tons of cursor updates). */ + if (old_state->legacy_cursor_update) + continue; + + kms->funcs->wait_for_crtc_commit_done(kms, crtc); + } +} + /* The (potentially) asynchronous part of the commit. At this point * nothing can fail short of armageddon. */ @@ -98,7 +125,7 @@ static void complete_commit(struct msm_commit *c) drm_atomic_helper_commit_modeset_disables(dev, state); - drm_atomic_helper_commit_planes(dev, state); + drm_atomic_helper_commit_planes(dev, state, false); drm_atomic_helper_commit_modeset_enables(dev, state); @@ -115,7 +142,7 @@ static void complete_commit(struct msm_commit *c) * not be critical path) */ - drm_atomic_helper_wait_for_vblanks(dev, state); + msm_atomic_wait_for_commit_done(dev, state); drm_atomic_helper_cleanup_planes(dev, state); @@ -139,7 +166,6 @@ static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb) c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ)); } - int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { @@ -178,7 +204,7 @@ int msm_atomic_commit(struct drm_device *dev, { int nplanes = dev->mode_config.num_total_plane; int ncrtcs = dev->mode_config.num_crtc; - struct timespec timeout; + ktime_t timeout; struct msm_commit *c; int i, ret; @@ -187,8 +213,10 @@ int msm_atomic_commit(struct drm_device *dev, return ret; c = commit_init(state); - if (!c) - return -ENOMEM; + if (!c) { + ret = -ENOMEM; + goto error; + } /* * Figure out what crtcs we have: @@ -221,7 +249,7 @@ int msm_atomic_commit(struct drm_device *dev, ret = start_atomic(dev->dev_private, c->crtc_mask); if (ret) { kfree(c); - return ret; + goto error; } /* @@ -253,16 +281,16 @@ int msm_atomic_commit(struct drm_device *dev, return 0; } - jiffies_to_timespec(jiffies + msecs_to_jiffies(1000), &timeout); + timeout = ktime_add_ms(ktime_get(), 1000); - ret = msm_wait_fence_interruptable(dev, c->fence, &timeout); - if (ret) { - WARN_ON(ret); // TODO unswap state back? or?? - commit_destroy(c); - return ret; - } + /* uninterruptible wait */ + msm_wait_fence(dev, c->fence, &timeout, false); complete_commit(c); return 0; + +error: + drm_atomic_helper_cleanup_planes(dev, state); + return ret; } diff --git a/kernel/drivers/gpu/drm/msm/msm_drv.c b/kernel/drivers/gpu/drm/msm/msm_drv.c index c80a6bee2..b88ce514e 100644 --- a/kernel/drivers/gpu/drm/msm/msm_drv.c +++ b/kernel/drivers/gpu/drm/msm/msm_drv.c @@ -21,11 +21,9 @@ static void msm_fb_output_poll_changed(struct drm_device *dev) { -#ifdef CONFIG_DRM_MSM_FBDEV struct msm_drm_private *priv = dev->dev_private; if (priv->fbdev) drm_fb_helper_hotplug_event(priv->fbdev); -#endif } static const struct drm_mode_config_funcs mode_config_funcs = { @@ -56,7 +54,7 @@ module_param(reglog, bool, 0600); #define reglog 0 #endif -#ifdef CONFIG_DRM_MSM_FBDEV +#ifdef CONFIG_DRM_FBDEV_EMULATION static bool fbdev = true; MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer"); module_param(fbdev, bool, 0600); @@ -116,6 +114,65 @@ u32 msm_readl(const void __iomem *addr) return val; } +struct vblank_event { + struct list_head node; + int crtc_id; + bool enable; +}; + +static void vblank_ctrl_worker(struct work_struct *work) +{ + struct msm_vblank_ctrl *vbl_ctrl = container_of(work, + struct msm_vblank_ctrl, work); + struct msm_drm_private *priv = container_of(vbl_ctrl, + struct msm_drm_private, vblank_ctrl); + struct msm_kms *kms = priv->kms; + struct vblank_event *vbl_ev, *tmp; + unsigned long flags; + + spin_lock_irqsave(&vbl_ctrl->lock, flags); + list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) { + list_del(&vbl_ev->node); + spin_unlock_irqrestore(&vbl_ctrl->lock, flags); + + if (vbl_ev->enable) + kms->funcs->enable_vblank(kms, + priv->crtcs[vbl_ev->crtc_id]); + else + kms->funcs->disable_vblank(kms, + priv->crtcs[vbl_ev->crtc_id]); + + kfree(vbl_ev); + + spin_lock_irqsave(&vbl_ctrl->lock, flags); + } + + spin_unlock_irqrestore(&vbl_ctrl->lock, flags); +} + +static int vblank_ctrl_queue_work(struct msm_drm_private *priv, + int crtc_id, bool enable) +{ + struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl; + struct vblank_event *vbl_ev; + unsigned long flags; + + vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC); + if (!vbl_ev) + return -ENOMEM; + + vbl_ev->crtc_id = crtc_id; + vbl_ev->enable = enable; + + spin_lock_irqsave(&vbl_ctrl->lock, flags); + list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list); + spin_unlock_irqrestore(&vbl_ctrl->lock, flags); + + queue_work(priv->wq, &vbl_ctrl->work); + + return 0; +} + /* * DRM operations: */ @@ -125,6 +182,18 @@ static int msm_unload(struct drm_device *dev) struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; struct msm_gpu *gpu = priv->gpu; + struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl; + struct vblank_event *vbl_ev, *tmp; + + /* We must cancel and cleanup any pending vblank enable/disable + * work before drm_irq_uninstall() to avoid work re-enabling an + * irq after uninstall has disabled it. + */ + cancel_work_sync(&vbl_ctrl->work); + list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) { + list_del(&vbl_ev->node); + kfree(vbl_ev); + } drm_kms_helper_poll_fini(dev); drm_mode_config_cleanup(dev); @@ -282,6 +351,9 @@ static int msm_load(struct drm_device *dev, unsigned long flags) INIT_LIST_HEAD(&priv->inactive_list); INIT_LIST_HEAD(&priv->fence_cbs); + INIT_LIST_HEAD(&priv->vblank_ctrl.event_list); + INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker); + spin_lock_init(&priv->vblank_ctrl.lock); drm_mode_config_init(dev); @@ -331,10 +403,6 @@ static int msm_load(struct drm_device *dev, unsigned long flags) } } - dev->mode_config.min_width = 0; - dev->mode_config.min_height = 0; - dev->mode_config.max_width = 2048; - dev->mode_config.max_height = 2048; dev->mode_config.funcs = &mode_config_funcs; ret = drm_vblank_init(dev, priv->num_crtcs); @@ -353,7 +421,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags) drm_mode_config_reset(dev); -#ifdef CONFIG_DRM_MSM_FBDEV +#ifdef CONFIG_DRM_FBDEV_EMULATION if (fbdev) priv->fbdev = msm_fbdev_init(dev); #endif @@ -421,11 +489,9 @@ static void msm_preclose(struct drm_device *dev, struct drm_file *file) static void msm_lastclose(struct drm_device *dev) { -#ifdef CONFIG_DRM_MSM_FBDEV struct msm_drm_private *priv = dev->dev_private; if (priv->fbdev) drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev); -#endif } static irqreturn_t msm_irq(int irq, void *arg) @@ -461,24 +527,24 @@ static void msm_irq_uninstall(struct drm_device *dev) kms->funcs->irq_uninstall(kms); } -static int msm_enable_vblank(struct drm_device *dev, int crtc_id) +static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe) { struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; if (!kms) return -ENXIO; - DBG("dev=%p, crtc=%d", dev, crtc_id); - return kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]); + DBG("dev=%p, crtc=%u", dev, pipe); + return vblank_ctrl_queue_work(priv, pipe, true); } -static void msm_disable_vblank(struct drm_device *dev, int crtc_id) +static void msm_disable_vblank(struct drm_device *dev, unsigned int pipe) { struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; if (!kms) return; - DBG("dev=%p, crtc=%d", dev, crtc_id); - kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]); + DBG("dev=%p, crtc=%u", dev, pipe); + vblank_ctrl_queue_work(priv, pipe, false); } /* @@ -637,8 +703,8 @@ static void msm_debugfs_cleanup(struct drm_minor *minor) * Fences: */ -int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, - struct timespec *timeout) +int msm_wait_fence(struct drm_device *dev, uint32_t fence, + ktime_t *timeout , bool interruptible) { struct msm_drm_private *priv = dev->dev_private; int ret; @@ -656,16 +722,23 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, /* no-wait: */ ret = fence_completed(dev, fence) ? 0 : -EBUSY; } else { - unsigned long timeout_jiffies = timespec_to_jiffies(timeout); - unsigned long start_jiffies = jiffies; + ktime_t now = ktime_get(); unsigned long remaining_jiffies; - if (time_after(start_jiffies, timeout_jiffies)) + if (ktime_compare(*timeout, now) < 0) { remaining_jiffies = 0; - else - remaining_jiffies = timeout_jiffies - start_jiffies; + } else { + ktime_t rem = ktime_sub(*timeout, now); + struct timespec ts = ktime_to_timespec(rem); + remaining_jiffies = timespec_to_jiffies(&ts); + } - ret = wait_event_interruptible_timeout(priv->fence_event, + if (interruptible) + ret = wait_event_interruptible_timeout(priv->fence_event, + fence_completed(dev, fence), + remaining_jiffies); + else + ret = wait_event_timeout(priv->fence_event, fence_completed(dev, fence), remaining_jiffies); @@ -772,13 +845,17 @@ static int msm_ioctl_gem_new(struct drm_device *dev, void *data, args->flags, &args->handle); } -#define TS(t) ((struct timespec){ .tv_sec = (t).tv_sec, .tv_nsec = (t).tv_nsec }) +static inline ktime_t to_ktime(struct drm_msm_timespec timeout) +{ + return ktime_set(timeout.tv_sec, timeout.tv_nsec); +} static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_msm_gem_cpu_prep *args = data; struct drm_gem_object *obj; + ktime_t timeout = to_ktime(args->timeout); int ret; if (args->op & ~MSM_PREP_FLAGS) { @@ -790,7 +867,7 @@ static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data, if (!obj) return -ENOENT; - ret = msm_gem_cpu_prep(obj, args->op, &TS(args->timeout)); + ret = msm_gem_cpu_prep(obj, args->op, &timeout); drm_gem_object_unreference_unlocked(obj); @@ -840,24 +917,24 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_msm_wait_fence *args = data; + ktime_t timeout = to_ktime(args->timeout); if (args->pad) { DRM_ERROR("invalid pad: %08x\n", args->pad); return -EINVAL; } - return msm_wait_fence_interruptable(dev, args->fence, - &TS(args->timeout)); + return msm_wait_fence(dev, args->fence, &timeout, true); } static const struct drm_ioctl_desc msm_ioctls[] = { - DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW), }; static const struct vm_operations_struct vm_ops = { @@ -885,6 +962,7 @@ static struct drm_driver msm_driver = { DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | + DRIVER_ATOMIC | DRIVER_MODESET, .load = msm_load, .unload = msm_unload, @@ -896,7 +974,7 @@ static struct drm_driver msm_driver = { .irq_preinstall = msm_irq_preinstall, .irq_postinstall = msm_irq_postinstall, .irq_uninstall = msm_irq_uninstall, - .get_vblank_counter = drm_vblank_count, + .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = msm_enable_vblank, .disable_vblank = msm_disable_vblank, .gem_free_object = msm_gem_free_object, diff --git a/kernel/drivers/gpu/drm/msm/msm_drv.h b/kernel/drivers/gpu/drm/msm/msm_drv.h index 04db4bd1b..3be7a56b1 100644 --- a/kernel/drivers/gpu/drm/msm/msm_drv.h +++ b/kernel/drivers/gpu/drm/msm/msm_drv.h @@ -30,6 +30,7 @@ #include <linux/list.h> #include <linux/iommu.h> #include <linux/types.h> +#include <linux/of_graph.h> #include <asm/sizes.h> #ifndef CONFIG_OF @@ -64,6 +65,19 @@ struct msm_file_private { int dummy; }; +enum msm_mdp_plane_property { + PLANE_PROP_ZPOS, + PLANE_PROP_ALPHA, + PLANE_PROP_PREMULTIPLIED, + PLANE_PROP_MAX_NUM +}; + +struct msm_vblank_ctrl { + struct work_struct work; + struct list_head event_list; + spinlock_t lock; +}; + struct msm_drm_private { struct msm_kms *kms; @@ -128,6 +142,9 @@ struct msm_drm_private { unsigned int num_connectors; struct drm_connector *connectors[8]; + /* Properties */ + struct drm_property *plane_property[PLANE_PROP_MAX_NUM]; + /* VRAM carveout, used when no IOMMU: */ struct { unsigned long size; @@ -137,6 +154,8 @@ struct msm_drm_private { */ struct drm_mm mm; } vram; + + struct msm_vblank_ctrl vblank_ctrl; }; struct msm_format { @@ -164,8 +183,8 @@ int msm_atomic_commit(struct drm_device *dev, int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu); -int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, - struct timespec *timeout); +int msm_wait_fence(struct drm_device *dev, uint32_t fence, + ktime_t *timeout, bool interruptible); int msm_queue_fence_cb(struct drm_device *dev, struct msm_fence_cb *cb, uint32_t fence); void msm_update_fence(struct drm_device *dev, uint32_t fence); @@ -205,7 +224,7 @@ void msm_gem_move_to_active(struct drm_gem_object *obj, struct msm_gpu *gpu, bool write, uint32_t fence); void msm_gem_move_to_inactive(struct drm_gem_object *obj); int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, - struct timespec *timeout); + ktime_t *timeout); int msm_gem_cpu_fini(struct drm_gem_object *obj); void msm_gem_free_object(struct drm_gem_object *obj); int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, diff --git a/kernel/drivers/gpu/drm/msm/msm_fbdev.c b/kernel/drivers/gpu/drm/msm/msm_fbdev.c index 95f6532df..3f6ec077b 100644 --- a/kernel/drivers/gpu/drm/msm/msm_fbdev.c +++ b/kernel/drivers/gpu/drm/msm/msm_fbdev.c @@ -43,11 +43,11 @@ static struct fb_ops msm_fb_ops = { /* Note: to properly handle manual update displays, we wrap the * basic fbdev ops which write to the framebuffer */ - .fb_read = fb_sys_read, - .fb_write = fb_sys_write, - .fb_fillrect = sys_fillrect, - .fb_copyarea = sys_copyarea, - .fb_imageblit = sys_imageblit, + .fb_read = drm_fb_helper_sys_read, + .fb_write = drm_fb_helper_sys_write, + .fb_fillrect = drm_fb_helper_sys_fillrect, + .fb_copyarea = drm_fb_helper_sys_copyarea, + .fb_imageblit = drm_fb_helper_sys_imageblit, .fb_mmap = msm_fbdev_mmap, .fb_check_var = drm_fb_helper_check_var, @@ -68,12 +68,7 @@ static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma) if (drm_device_is_unplugged(dev)) return -ENODEV; - mutex_lock(&dev->struct_mutex); - ret = drm_gem_mmap_obj(drm_obj, drm_obj->size, vma); - - mutex_unlock(&dev->struct_mutex); - if (ret) { pr_err("%s:drm_gem_mmap_obj fail\n", __func__); return ret; @@ -144,10 +139,10 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, goto fail_unlock; } - fbi = framebuffer_alloc(0, dev->dev); - if (!fbi) { + fbi = drm_fb_helper_alloc_fbi(helper); + if (IS_ERR(fbi)) { dev_err(dev->dev, "failed to allocate fb info\n"); - ret = -ENOMEM; + ret = PTR_ERR(fbi); goto fail_unlock; } @@ -155,7 +150,6 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, fbdev->fb = fb; helper->fb = fb; - helper->fbdev = fbi; fbi->par = helper; fbi->flags = FBINFO_DEFAULT; @@ -163,12 +157,6 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, strcpy(fbi->fix.id, "msm"); - ret = fb_alloc_cmap(&fbi->cmap, 256, 0); - if (ret) { - ret = -ENOMEM; - goto fail_unlock; - } - drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); @@ -191,7 +179,6 @@ fail_unlock: fail: if (ret) { - framebuffer_release(fbi); if (fb) { drm_framebuffer_unregister_private(fb); drm_framebuffer_remove(fb); @@ -266,17 +253,11 @@ void msm_fbdev_free(struct drm_device *dev) struct msm_drm_private *priv = dev->dev_private; struct drm_fb_helper *helper = priv->fbdev; struct msm_fbdev *fbdev; - struct fb_info *fbi; DBG(); - fbi = helper->fbdev; - - /* only cleanup framebuffer if it is present */ - if (fbi) { - unregister_framebuffer(fbi); - framebuffer_release(fbi); - } + drm_fb_helper_unregister_fbi(helper); + drm_fb_helper_release_fbi(helper); drm_fb_helper_fini(helper); diff --git a/kernel/drivers/gpu/drm/msm/msm_gem.c b/kernel/drivers/gpu/drm/msm/msm_gem.c index 52839769e..c76cc853b 100644 --- a/kernel/drivers/gpu/drm/msm/msm_gem.c +++ b/kernel/drivers/gpu/drm/msm/msm_gem.c @@ -448,8 +448,7 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj) list_add_tail(&msm_obj->mm_list, &priv->inactive_list); } -int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, - struct timespec *timeout) +int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) { struct drm_device *dev = obj->dev; struct msm_gem_object *msm_obj = to_msm_bo(obj); @@ -461,7 +460,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, if (op & MSM_PREP_NOSYNC) timeout = NULL; - ret = msm_wait_fence_interruptable(dev, fence, timeout); + ret = msm_wait_fence(dev, fence, timeout, true); } /* TODO cache maintenance */ @@ -540,6 +539,7 @@ void msm_gem_free_object(struct drm_gem_object *obj) if (msm_obj->pages) drm_free_large(msm_obj->pages); + drm_prime_gem_destroy(obj, msm_obj->sgt); } else { vunmap(msm_obj->vaddr); put_pages(obj); diff --git a/kernel/drivers/gpu/drm/msm/msm_gem.h b/kernel/drivers/gpu/drm/msm/msm_gem.h index 85d481e29..6fc59bfee 100644 --- a/kernel/drivers/gpu/drm/msm/msm_gem.h +++ b/kernel/drivers/gpu/drm/msm/msm_gem.h @@ -96,6 +96,7 @@ static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj, struct msm_gem_submit { struct drm_device *dev; struct msm_gpu *gpu; + struct list_head node; /* node in gpu submit_list */ struct list_head bo_list; struct ww_acquire_ctx ticket; uint32_t fence; diff --git a/kernel/drivers/gpu/drm/msm/msm_gem_prime.c b/kernel/drivers/gpu/drm/msm/msm_gem_prime.c index dd7a7ab60..121975b07 100644 --- a/kernel/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/kernel/drivers/gpu/drm/msm/msm_gem_prime.c @@ -23,8 +23,12 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - BUG_ON(!msm_obj->sgt); /* should have already pinned! */ - return msm_obj->sgt; + int npages = obj->size >> PAGE_SHIFT; + + if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */ + return NULL; + + return drm_prime_pages_to_sg(msm_obj->pages, npages); } void *msm_gem_prime_vmap(struct drm_gem_object *obj) @@ -41,9 +45,7 @@ int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) { int ret; - mutex_lock(&obj->dev->struct_mutex); ret = drm_gem_mmap_obj(obj, obj->size, vma); - mutex_unlock(&obj->dev->struct_mutex); if (ret < 0) return ret; diff --git a/kernel/drivers/gpu/drm/msm/msm_gem_submit.c b/kernel/drivers/gpu/drm/msm/msm_gem_submit.c index cd0554f68..6d7cd3fe2 100644 --- a/kernel/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/kernel/drivers/gpu/drm/msm/msm_gem_submit.c @@ -314,7 +314,6 @@ static void submit_cleanup(struct msm_gem_submit *submit, bool fail) } ww_acquire_fini(&submit->ticket); - kfree(submit); } int msm_ioctl_gem_submit(struct drm_device *dev, void *data, diff --git a/kernel/drivers/gpu/drm/msm/msm_gpu.c b/kernel/drivers/gpu/drm/msm/msm_gpu.c index 4a0dce587..6b02ada65 100644 --- a/kernel/drivers/gpu/drm/msm/msm_gpu.c +++ b/kernel/drivers/gpu/drm/msm/msm_gpu.c @@ -24,7 +24,7 @@ * Power Management: */ -#ifdef CONFIG_MSM_BUS_SCALING +#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING #include <mach/board.h> static void bs_init(struct msm_gpu *gpu) { @@ -265,6 +265,8 @@ static void inactive_start(struct msm_gpu *gpu) * Hangcheck detection for locked gpu: */ +static void retire_submits(struct msm_gpu *gpu, uint32_t fence); + static void recover_worker(struct work_struct *work) { struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work); @@ -274,8 +276,19 @@ static void recover_worker(struct work_struct *work) mutex_lock(&dev->struct_mutex); if (msm_gpu_active(gpu)) { + struct msm_gem_submit *submit; + uint32_t fence = gpu->funcs->last_fence(gpu); + + /* retire completed submits, plus the one that hung: */ + retire_submits(gpu, fence + 1); + inactive_cancel(gpu); gpu->funcs->recover(gpu); + + /* replay the remaining submits after the one that hung: */ + list_for_each_entry(submit, &gpu->submit_list, node) { + gpu->funcs->submit(gpu, submit, NULL); + } } mutex_unlock(&dev->struct_mutex); @@ -418,6 +431,27 @@ out: * Cmdstream submission/retirement: */ +static void retire_submits(struct msm_gpu *gpu, uint32_t fence) +{ + struct drm_device *dev = gpu->dev; + + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + + while (!list_empty(&gpu->submit_list)) { + struct msm_gem_submit *submit; + + submit = list_first_entry(&gpu->submit_list, + struct msm_gem_submit, node); + + if (submit->fence <= fence) { + list_del(&submit->node); + kfree(submit); + } else { + break; + } + } +} + static void retire_worker(struct work_struct *work) { struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work); @@ -428,6 +462,8 @@ static void retire_worker(struct work_struct *work) mutex_lock(&dev->struct_mutex); + retire_submits(gpu, fence); + while (!list_empty(&gpu->active_list)) { struct msm_gem_object *obj; @@ -467,21 +503,22 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_drm_private *priv = dev->dev_private; int i, ret; + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + submit->fence = ++priv->next_fence; gpu->submitted_fence = submit->fence; inactive_cancel(gpu); + list_add_tail(&submit->node, &gpu->submit_list); + msm_rd_dump_submit(submit); gpu->submitted_fence = submit->fence; update_sw_cntrs(gpu); - ret = gpu->funcs->submit(gpu, submit, ctx); - priv->lastctx = ctx; - for (i = 0; i < submit->nr_bos; i++) { struct msm_gem_object *msm_obj = submit->bos[i].obj; @@ -505,6 +542,10 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); } + + ret = gpu->funcs->submit(gpu, submit, ctx); + priv->lastctx = ctx; + hangcheck_timer_reset(gpu); return ret; @@ -522,6 +563,7 @@ static irqreturn_t irq_handler(int irq, void *data) static const char *clk_names[] = { "src_clk", "core_clk", "iface_clk", "mem_clk", "mem_iface_clk", + "alt_mem_iface_clk", }; int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, @@ -544,6 +586,8 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, INIT_WORK(&gpu->inactive_work, inactive_worker); INIT_WORK(&gpu->recover_work, recover_worker); + INIT_LIST_HEAD(&gpu->submit_list); + setup_timer(&gpu->inactive_timer, inactive_handler, (unsigned long)gpu); setup_timer(&gpu->hangcheck_timer, hangcheck_handler, @@ -607,6 +651,14 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, if (iommu) { dev_info(drm->dev, "%s: using IOMMU\n", name); gpu->mmu = msm_iommu_new(&pdev->dev, iommu); + if (IS_ERR(gpu->mmu)) { + ret = PTR_ERR(gpu->mmu); + dev_err(drm->dev, "failed to init iommu: %d\n", ret); + gpu->mmu = NULL; + iommu_domain_free(iommu); + goto fail; + } + } else { dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name); } diff --git a/kernel/drivers/gpu/drm/msm/msm_gpu.h b/kernel/drivers/gpu/drm/msm/msm_gpu.h index fd1e4b4a6..2bbe85a3d 100644 --- a/kernel/drivers/gpu/drm/msm/msm_gpu.h +++ b/kernel/drivers/gpu/drm/msm/msm_gpu.h @@ -100,10 +100,10 @@ struct msm_gpu { /* Power Control: */ struct regulator *gpu_reg, *gpu_cx; - struct clk *ebi1_clk, *grp_clks[5]; + struct clk *ebi1_clk, *grp_clks[6]; uint32_t fast_rate, slow_rate, bus_freq; -#ifdef CONFIG_MSM_BUS_SCALING +#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING struct msm_bus_scale_pdata *bus_scale_table; uint32_t bsc; #endif @@ -119,6 +119,8 @@ struct msm_gpu { struct timer_list hangcheck_timer; uint32_t hangcheck_fence; struct work_struct recover_work; + + struct list_head submit_list; }; static inline bool msm_gpu_active(struct msm_gpu *gpu) diff --git a/kernel/drivers/gpu/drm/msm/msm_kms.h b/kernel/drivers/gpu/drm/msm/msm_kms.h index a9f17bdb4..9bcabaada 100644 --- a/kernel/drivers/gpu/drm/msm/msm_kms.h +++ b/kernel/drivers/gpu/drm/msm/msm_kms.h @@ -43,6 +43,9 @@ struct msm_kms_funcs { /* modeset, bracketing atomic_commit(): */ void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state); void (*complete_commit)(struct msm_kms *kms, struct drm_atomic_state *state); + /* functions to wait for atomic commit completed on each CRTC */ + void (*wait_for_crtc_commit_done)(struct msm_kms *kms, + struct drm_crtc *crtc); /* misc: */ const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format); long (*round_pixclk)(struct msm_kms *kms, unsigned long rate, |