From 9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 Mon Sep 17 00:00:00 2001 From: Yunhong Jiang Date: Tue, 4 Aug 2015 12:17:53 -0700 Subject: Add the rt linux 4.1.3-rt3 as base Import the rt linux 4.1.3-rt3 as OPNFV kvm base. It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and the base is: commit 0917f823c59692d751951bf5ea699a2d1e2f26a2 Author: Sebastian Andrzej Siewior Date: Sat Jul 25 12:13:34 2015 +0200 Prepare v4.1.3-rt3 Signed-off-by: Sebastian Andrzej Siewior We lose all the git history this way and it's not good. We should apply another opnfv project repo in future. Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423 Signed-off-by: Yunhong Jiang --- kernel/drivers/gpu/Makefile | 6 + kernel/drivers/gpu/drm/Kconfig | 219 + kernel/drivers/gpu/drm/Makefile | 72 + kernel/drivers/gpu/drm/amd/amdkfd/Kconfig | 9 + kernel/drivers/gpu/drm/amd/amdkfd/Makefile | 16 + kernel/drivers/gpu/drm/amd/amdkfd/cik_regs.h | 234 + kernel/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 643 + kernel/drivers/gpu/drm/amd/amdkfd/kfd_crat.h | 294 + kernel/drivers/gpu/drm/amd/amdkfd/kfd_device.c | 522 + .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 1217 ++ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 180 + .../drm/amd/amdkfd/kfd_device_queue_manager_cik.c | 135 + .../drm/amd/amdkfd/kfd_device_queue_manager_vi.c | 64 + kernel/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c | 249 + .../drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c | 355 + .../drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | 340 + .../drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h | 101 + .../gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c | 44 + .../gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c | 56 + kernel/drivers/gpu/drm/amd/amdkfd/kfd_module.c | 138 + .../drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c | 37 + .../drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h | 91 + .../gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 451 + .../gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 33 + .../gpu/drm/amd/amdkfd/kfd_packet_manager.c | 557 + kernel/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c | 96 + .../drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h | 405 + .../drivers/gpu/drm/amd/amdkfd/kfd_pm4_opcodes.h | 107 + kernel/drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 645 + kernel/drivers/gpu/drm/amd/amdkfd/kfd_process.c | 433 + .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 359 + kernel/drivers/gpu/drm/amd/amdkfd/kfd_queue.c | 85 + kernel/drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 1254 ++ kernel/drivers/gpu/drm/amd/amdkfd/kfd_topology.h | 168 + kernel/drivers/gpu/drm/amd/include/cik_structs.h | 293 + .../gpu/drm/amd/include/kgd_kfd_interface.h | 203 + kernel/drivers/gpu/drm/armada/Kconfig | 25 + kernel/drivers/gpu/drm/armada/Makefile | 7 + kernel/drivers/gpu/drm/armada/armada_510.c | 84 + kernel/drivers/gpu/drm/armada/armada_crtc.c | 1234 ++ kernel/drivers/gpu/drm/armada/armada_crtc.h | 90 + kernel/drivers/gpu/drm/armada/armada_debugfs.c | 177 + kernel/drivers/gpu/drm/armada/armada_drm.h | 111 + kernel/drivers/gpu/drm/armada/armada_drv.c | 519 + kernel/drivers/gpu/drm/armada/armada_fb.c | 170 + kernel/drivers/gpu/drm/armada/armada_fb.h | 24 + kernel/drivers/gpu/drm/armada/armada_fbdev.c | 210 + kernel/drivers/gpu/drm/armada/armada_gem.c | 616 + kernel/drivers/gpu/drm/armada/armada_gem.h | 54 + kernel/drivers/gpu/drm/armada/armada_hw.h | 318 + kernel/drivers/gpu/drm/armada/armada_ioctlP.h | 18 + kernel/drivers/gpu/drm/armada/armada_output.c | 158 + kernel/drivers/gpu/drm/armada/armada_output.h | 39 + kernel/drivers/gpu/drm/armada/armada_overlay.c | 477 + kernel/drivers/gpu/drm/armada/armada_slave.c | 139 + kernel/drivers/gpu/drm/armada/armada_slave.h | 26 + kernel/drivers/gpu/drm/ast/Kconfig | 17 + kernel/drivers/gpu/drm/ast/Makefile | 9 + kernel/drivers/gpu/drm/ast/ast_dp501.c | 434 + kernel/drivers/gpu/drm/ast/ast_dram_tables.h | 144 + kernel/drivers/gpu/drm/ast/ast_drv.c | 241 + kernel/drivers/gpu/drm/ast/ast_drv.h | 403 + kernel/drivers/gpu/drm/ast/ast_fb.c | 381 + kernel/drivers/gpu/drm/ast/ast_main.c | 594 + kernel/drivers/gpu/drm/ast/ast_mode.c | 1253 ++ kernel/drivers/gpu/drm/ast/ast_post.c | 1657 +++ kernel/drivers/gpu/drm/ast/ast_tables.h | 305 + kernel/drivers/gpu/drm/ast/ast_ttm.c | 435 + kernel/drivers/gpu/drm/ati_pcigart.c | 204 + kernel/drivers/gpu/drm/atmel-hlcdc/Kconfig | 11 + kernel/drivers/gpu/drm/atmel-hlcdc/Makefile | 7 + .../drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c | 367 + .../drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c | 617 + .../drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h | 165 + .../gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c | 666 + .../gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.h | 399 + .../gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c | 308 + .../gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c | 1048 ++ kernel/drivers/gpu/drm/bochs/Kconfig | 12 + kernel/drivers/gpu/drm/bochs/Makefile | 4 + kernel/drivers/gpu/drm/bochs/bochs.h | 165 + kernel/drivers/gpu/drm/bochs/bochs_drv.c | 225 + kernel/drivers/gpu/drm/bochs/bochs_fbdev.c | 235 + kernel/drivers/gpu/drm/bochs/bochs_hw.c | 195 + kernel/drivers/gpu/drm/bochs/bochs_kms.c | 303 + kernel/drivers/gpu/drm/bochs/bochs_mm.c | 550 + kernel/drivers/gpu/drm/bridge/Kconfig | 24 + kernel/drivers/gpu/drm/bridge/Makefile | 5 + kernel/drivers/gpu/drm/bridge/dw_hdmi.c | 1711 +++ kernel/drivers/gpu/drm/bridge/dw_hdmi.h | 1034 ++ kernel/drivers/gpu/drm/bridge/ps8622.c | 684 + kernel/drivers/gpu/drm/bridge/ptn3460.c | 417 + kernel/drivers/gpu/drm/cirrus/Kconfig | 13 + kernel/drivers/gpu/drm/cirrus/Makefile | 5 + kernel/drivers/gpu/drm/cirrus/cirrus_drv.c | 186 + kernel/drivers/gpu/drm/cirrus/cirrus_drv.h | 268 + kernel/drivers/gpu/drm/cirrus/cirrus_fbdev.c | 341 + kernel/drivers/gpu/drm/cirrus/cirrus_main.c | 335 + kernel/drivers/gpu/drm/cirrus/cirrus_mode.c | 619 + kernel/drivers/gpu/drm/cirrus/cirrus_ttm.c | 419 + kernel/drivers/gpu/drm/drm_agpsupport.c | 506 + kernel/drivers/gpu/drm/drm_atomic.c | 1375 ++ kernel/drivers/gpu/drm/drm_atomic_helper.c | 2233 +++ kernel/drivers/gpu/drm/drm_auth.c | 201 + kernel/drivers/gpu/drm/drm_bridge.c | 91 + kernel/drivers/gpu/drm/drm_bufs.c | 1473 ++ kernel/drivers/gpu/drm/drm_cache.c | 149 + kernel/drivers/gpu/drm/drm_context.c | 468 + kernel/drivers/gpu/drm/drm_crtc.c | 5643 +++++++ kernel/drivers/gpu/drm/drm_crtc_helper.c | 1026 ++ kernel/drivers/gpu/drm/drm_crtc_internal.h | 44 + kernel/drivers/gpu/drm/drm_debugfs.c | 422 + kernel/drivers/gpu/drm/drm_dma.c | 169 + kernel/drivers/gpu/drm/drm_dp_helper.c | 652 + kernel/drivers/gpu/drm/drm_dp_mst_topology.c | 2807 ++++ kernel/drivers/gpu/drm/drm_drv.c | 912 ++ kernel/drivers/gpu/drm/drm_edid.c | 4041 +++++ kernel/drivers/gpu/drm/drm_edid_load.c | 294 + kernel/drivers/gpu/drm/drm_encoder_slave.c | 184 + kernel/drivers/gpu/drm/drm_fb_cma_helper.c | 450 + kernel/drivers/gpu/drm/drm_fb_helper.c | 1838 +++ kernel/drivers/gpu/drm/drm_flip_work.c | 167 + kernel/drivers/gpu/drm/drm_fops.c | 549 + kernel/drivers/gpu/drm/drm_gem.c | 904 ++ kernel/drivers/gpu/drm/drm_gem_cma_helper.c | 543 + kernel/drivers/gpu/drm/drm_global.c | 110 + kernel/drivers/gpu/drm/drm_hashtab.c | 208 + kernel/drivers/gpu/drm/drm_info.c | 222 + kernel/drivers/gpu/drm/drm_internal.h | 131 + kernel/drivers/gpu/drm/drm_ioc32.c | 1085 ++ kernel/drivers/gpu/drm/drm_ioctl.c | 783 + kernel/drivers/gpu/drm/drm_irq.c | 1775 +++ kernel/drivers/gpu/drm/drm_legacy.h | 113 + kernel/drivers/gpu/drm/drm_lock.c | 375 + kernel/drivers/gpu/drm/drm_memory.c | 151 + kernel/drivers/gpu/drm/drm_mipi_dsi.c | 933 ++ kernel/drivers/gpu/drm/drm_mm.c | 861 ++ kernel/drivers/gpu/drm/drm_modes.c | 1407 ++ kernel/drivers/gpu/drm/drm_modeset_lock.c | 482 + kernel/drivers/gpu/drm/drm_of.c | 63 + kernel/drivers/gpu/drm/drm_panel.c | 100 + kernel/drivers/gpu/drm/drm_pci.c | 455 + kernel/drivers/gpu/drm/drm_plane_helper.c | 588 + kernel/drivers/gpu/drm/drm_platform.c | 106 + kernel/drivers/gpu/drm/drm_prime.c | 774 + kernel/drivers/gpu/drm/drm_probe_helper.c | 523 + kernel/drivers/gpu/drm/drm_rect.c | 435 + kernel/drivers/gpu/drm/drm_scatter.c | 221 + kernel/drivers/gpu/drm/drm_sysfs.c | 617 + kernel/drivers/gpu/drm/drm_trace.h | 65 + kernel/drivers/gpu/drm/drm_trace_points.c | 4 + kernel/drivers/gpu/drm/drm_vm.c | 734 + kernel/drivers/gpu/drm/drm_vma_manager.c | 435 + kernel/drivers/gpu/drm/exynos/Kconfig | 99 + kernel/drivers/gpu/drm/exynos/Makefile | 25 + kernel/drivers/gpu/drm/exynos/exynos7_drm_decon.c | 940 ++ kernel/drivers/gpu/drm/exynos/exynos_dp_core.c | 1425 ++ kernel/drivers/gpu/drm/exynos/exynos_dp_core.h | 282 + kernel/drivers/gpu/drm/exynos/exynos_dp_reg.c | 1263 ++ kernel/drivers/gpu/drm/exynos/exynos_dp_reg.h | 366 + kernel/drivers/gpu/drm/exynos/exynos_drm_buf.c | 186 + kernel/drivers/gpu/drm/exynos/exynos_drm_buf.h | 33 + kernel/drivers/gpu/drm/exynos/exynos_drm_core.c | 161 + kernel/drivers/gpu/drm/exynos/exynos_drm_crtc.c | 376 + kernel/drivers/gpu/drm/exynos/exynos_drm_crtc.h | 42 + kernel/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c | 286 + kernel/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h | 20 + kernel/drivers/gpu/drm/exynos/exynos_drm_dpi.c | 353 + kernel/drivers/gpu/drm/exynos/exynos_drm_drv.c | 707 + kernel/drivers/gpu/drm/exynos/exynos_drm_drv.h | 349 + kernel/drivers/gpu/drm/exynos/exynos_drm_dsi.c | 1795 +++ kernel/drivers/gpu/drm/exynos/exynos_drm_encoder.c | 197 + kernel/drivers/gpu/drm/exynos/exynos_drm_encoder.h | 23 + kernel/drivers/gpu/drm/exynos/exynos_drm_fb.c | 287 + kernel/drivers/gpu/drm/exynos/exynos_drm_fb.h | 35 + kernel/drivers/gpu/drm/exynos/exynos_drm_fbdev.c | 360 + kernel/drivers/gpu/drm/exynos/exynos_drm_fbdev.h | 23 + kernel/drivers/gpu/drm/exynos/exynos_drm_fimc.c | 1852 +++ kernel/drivers/gpu/drm/exynos/exynos_drm_fimc.h | 23 + kernel/drivers/gpu/drm/exynos/exynos_drm_fimd.c | 1216 ++ kernel/drivers/gpu/drm/exynos/exynos_drm_g2d.c | 1587 ++ kernel/drivers/gpu/drm/exynos/exynos_drm_g2d.h | 36 + kernel/drivers/gpu/drm/exynos/exynos_drm_gem.c | 651 + kernel/drivers/gpu/drm/exynos/exynos_drm_gem.h | 182 + kernel/drivers/gpu/drm/exynos/exynos_drm_gsc.c | 1795 +++ kernel/drivers/gpu/drm/exynos/exynos_drm_gsc.h | 24 + kernel/drivers/gpu/drm/exynos/exynos_drm_iommu.c | 143 + kernel/drivers/gpu/drm/exynos/exynos_drm_iommu.h | 69 + kernel/drivers/gpu/drm/exynos/exynos_drm_ipp.c | 1822 +++ kernel/drivers/gpu/drm/exynos/exynos_drm_ipp.h | 252 + kernel/drivers/gpu/drm/exynos/exynos_drm_plane.c | 232 + kernel/drivers/gpu/drm/exynos/exynos_drm_plane.h | 26 + kernel/drivers/gpu/drm/exynos/exynos_drm_rotator.c | 856 ++ kernel/drivers/gpu/drm/exynos/exynos_drm_rotator.h | 19 + kernel/drivers/gpu/drm/exynos/exynos_drm_vidi.c | 621 + kernel/drivers/gpu/drm/exynos/exynos_drm_vidi.h | 22 + kernel/drivers/gpu/drm/exynos/exynos_hdmi.c | 2528 ++++ kernel/drivers/gpu/drm/exynos/exynos_mixer.c | 1318 ++ kernel/drivers/gpu/drm/exynos/exynos_mixer.h | 20 + kernel/drivers/gpu/drm/exynos/regs-fimc.h | 668 + kernel/drivers/gpu/drm/exynos/regs-gsc.h | 284 + kernel/drivers/gpu/drm/exynos/regs-hdmi.h | 597 + kernel/drivers/gpu/drm/exynos/regs-mixer.h | 152 + kernel/drivers/gpu/drm/exynos/regs-rotator.h | 73 + kernel/drivers/gpu/drm/exynos/regs-vp.h | 91 + kernel/drivers/gpu/drm/gma500/Kconfig | 38 + kernel/drivers/gpu/drm/gma500/Makefile | 56 + kernel/drivers/gpu/drm/gma500/accel_2d.c | 364 + kernel/drivers/gpu/drm/gma500/backlight.c | 94 + kernel/drivers/gpu/drm/gma500/blitter.c | 51 + kernel/drivers/gpu/drm/gma500/blitter.h | 22 + kernel/drivers/gpu/drm/gma500/cdv_device.c | 626 + kernel/drivers/gpu/drm/gma500/cdv_device.h | 30 + kernel/drivers/gpu/drm/gma500/cdv_intel_crt.c | 317 + kernel/drivers/gpu/drm/gma500/cdv_intel_display.c | 999 ++ kernel/drivers/gpu/drm/gma500/cdv_intel_dp.c | 2147 +++ kernel/drivers/gpu/drm/gma500/cdv_intel_hdmi.c | 369 + kernel/drivers/gpu/drm/gma500/cdv_intel_lvds.c | 798 + kernel/drivers/gpu/drm/gma500/framebuffer.c | 811 + kernel/drivers/gpu/drm/gma500/framebuffer.h | 47 + kernel/drivers/gpu/drm/gma500/gem.c | 229 + kernel/drivers/gpu/drm/gma500/gem.h | 21 + kernel/drivers/gpu/drm/gma500/gma_device.c | 56 + kernel/drivers/gpu/drm/gma500/gma_device.h | 21 + kernel/drivers/gpu/drm/gma500/gma_display.c | 791 + kernel/drivers/gpu/drm/gma500/gma_display.h | 106 + kernel/drivers/gpu/drm/gma500/gtt.c | 587 + kernel/drivers/gpu/drm/gma500/gtt.h | 66 + kernel/drivers/gpu/drm/gma500/intel_bios.c | 597 + kernel/drivers/gpu/drm/gma500/intel_bios.h | 621 + kernel/drivers/gpu/drm/gma500/intel_gmbus.c | 500 + kernel/drivers/gpu/drm/gma500/intel_i2c.c | 169 + kernel/drivers/gpu/drm/gma500/mdfld_device.c | 551 + kernel/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c | 1016 ++ kernel/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h | 79 + kernel/drivers/gpu/drm/gma500/mdfld_dsi_output.c | 614 + kernel/drivers/gpu/drm/gma500/mdfld_dsi_output.h | 377 + .../drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c | 675 + .../drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h | 80 + .../drivers/gpu/drm/gma500/mdfld_intel_display.c | 1035 ++ kernel/drivers/gpu/drm/gma500/mdfld_output.c | 74 + kernel/drivers/gpu/drm/gma500/mdfld_output.h | 77 + kernel/drivers/gpu/drm/gma500/mdfld_tmd_vid.c | 201 + kernel/drivers/gpu/drm/gma500/mdfld_tpo_vid.c | 124 + kernel/drivers/gpu/drm/gma500/mid_bios.c | 337 + kernel/drivers/gpu/drm/gma500/mid_bios.h | 21 + kernel/drivers/gpu/drm/gma500/mmu.c | 812 ++ kernel/drivers/gpu/drm/gma500/mmu.h | 93 + kernel/drivers/gpu/drm/gma500/oaktrail.h | 257 + kernel/drivers/gpu/drm/gma500/oaktrail_crtc.c | 672 + kernel/drivers/gpu/drm/gma500/oaktrail_device.c | 575 + kernel/drivers/gpu/drm/gma500/oaktrail_hdmi.c | 856 ++ kernel/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c | 327 + kernel/drivers/gpu/drm/gma500/oaktrail_lvds.c | 432 + kernel/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c | 170 + kernel/drivers/gpu/drm/gma500/opregion.c | 357 + kernel/drivers/gpu/drm/gma500/opregion.h | 54 + kernel/drivers/gpu/drm/gma500/power.c | 332 + kernel/drivers/gpu/drm/gma500/power.h | 70 + kernel/drivers/gpu/drm/gma500/psb_device.c | 360 + kernel/drivers/gpu/drm/gma500/psb_device.h | 24 + kernel/drivers/gpu/drm/gma500/psb_drv.c | 550 + kernel/drivers/gpu/drm/gma500/psb_drv.h | 925 ++ kernel/drivers/gpu/drm/gma500/psb_intel_display.c | 585 + kernel/drivers/gpu/drm/gma500/psb_intel_drv.h | 286 + kernel/drivers/gpu/drm/gma500/psb_intel_lvds.c | 850 ++ kernel/drivers/gpu/drm/gma500/psb_intel_modes.c | 75 + kernel/drivers/gpu/drm/gma500/psb_intel_reg.h | 1545 ++ kernel/drivers/gpu/drm/gma500/psb_intel_sdvo.c | 2594 ++++ .../drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h | 723 + kernel/drivers/gpu/drm/gma500/psb_irq.c | 684 + kernel/drivers/gpu/drm/gma500/psb_irq.h | 47 + kernel/drivers/gpu/drm/gma500/psb_lid.c | 94 + kernel/drivers/gpu/drm/gma500/psb_reg.h | 582 + kernel/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c | 829 ++ kernel/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h | 38 + kernel/drivers/gpu/drm/i2c/Kconfig | 34 + kernel/drivers/gpu/drm/i2c/Makefile | 12 + kernel/drivers/gpu/drm/i2c/adv7511.c | 1014 ++ kernel/drivers/gpu/drm/i2c/adv7511.h | 289 + kernel/drivers/gpu/drm/i2c/ch7006_drv.c | 552 + kernel/drivers/gpu/drm/i2c/ch7006_mode.c | 471 + kernel/drivers/gpu/drm/i2c/ch7006_priv.h | 345 + kernel/drivers/gpu/drm/i2c/sil164_drv.c | 464 + kernel/drivers/gpu/drm/i2c/tda998x_drv.c | 1639 +++ kernel/drivers/gpu/drm/i810/Makefile | 8 + kernel/drivers/gpu/drm/i810/i810_dma.c | 1269 ++ kernel/drivers/gpu/drm/i810/i810_drv.c | 104 + kernel/drivers/gpu/drm/i810/i810_drv.h | 245 + kernel/drivers/gpu/drm/i915/Kconfig | 73 + kernel/drivers/gpu/drm/i915/Makefile | 95 + kernel/drivers/gpu/drm/i915/dvo.h | 138 + kernel/drivers/gpu/drm/i915/dvo_ch7017.c | 414 + kernel/drivers/gpu/drm/i915/dvo_ch7xxx.c | 368 + kernel/drivers/gpu/drm/i915/dvo_ivch.c | 436 + kernel/drivers/gpu/drm/i915/dvo_ns2501.c | 631 + kernel/drivers/gpu/drm/i915/dvo_sil164.c | 279 + kernel/drivers/gpu/drm/i915/dvo_tfp410.c | 318 + kernel/drivers/gpu/drm/i915/i915_cmd_parser.c | 1157 ++ kernel/drivers/gpu/drm/i915/i915_debugfs.c | 4785 ++++++ kernel/drivers/gpu/drm/i915/i915_dma.c | 1223 ++ kernel/drivers/gpu/drm/i915/i915_drv.c | 1677 +++ kernel/drivers/gpu/drm/i915/i915_drv.h | 3280 +++++ kernel/drivers/gpu/drm/i915/i915_gem.c | 5204 +++++++ kernel/drivers/gpu/drm/i915/i915_gem_batch_pool.c | 137 + kernel/drivers/gpu/drm/i915/i915_gem_context.c | 964 ++ kernel/drivers/gpu/drm/i915/i915_gem_debug.c | 118 + kernel/drivers/gpu/drm/i915/i915_gem_dmabuf.c | 320 + kernel/drivers/gpu/drm/i915/i915_gem_evict.c | 284 + kernel/drivers/gpu/drm/i915/i915_gem_execbuffer.c | 1783 +++ kernel/drivers/gpu/drm/i915/i915_gem_gtt.c | 2759 ++++ kernel/drivers/gpu/drm/i915/i915_gem_gtt.h | 438 + .../drivers/gpu/drm/i915/i915_gem_render_state.c | 181 + .../drivers/gpu/drm/i915/i915_gem_render_state.h | 47 + kernel/drivers/gpu/drm/i915/i915_gem_shrinker.c | 335 + kernel/drivers/gpu/drm/i915/i915_gem_stolen.c | 553 + kernel/drivers/gpu/drm/i915/i915_gem_tiling.c | 549 + kernel/drivers/gpu/drm/i915/i915_gem_userptr.c | 854 ++ kernel/drivers/gpu/drm/i915/i915_gpu_error.c | 1386 ++ kernel/drivers/gpu/drm/i915/i915_ioc32.c | 219 + kernel/drivers/gpu/drm/i915/i915_irq.c | 4433 ++++++ kernel/drivers/gpu/drm/i915/i915_params.c | 186 + kernel/drivers/gpu/drm/i915/i915_reg.h | 7327 ++++++++++ kernel/drivers/gpu/drm/i915/i915_suspend.c | 171 + kernel/drivers/gpu/drm/i915/i915_sysfs.c | 677 + kernel/drivers/gpu/drm/i915/i915_trace.h | 814 ++ kernel/drivers/gpu/drm/i915/i915_trace_points.c | 13 + kernel/drivers/gpu/drm/i915/i915_vgpu.c | 264 + kernel/drivers/gpu/drm/i915/i915_vgpu.h | 91 + kernel/drivers/gpu/drm/i915/intel_acpi.c | 165 + kernel/drivers/gpu/drm/i915/intel_atomic.c | 243 + kernel/drivers/gpu/drm/i915/intel_atomic_plane.c | 230 + kernel/drivers/gpu/drm/i915/intel_audio.c | 573 + kernel/drivers/gpu/drm/i915/intel_bios.c | 1321 ++ kernel/drivers/gpu/drm/i915/intel_bios.h | 960 ++ kernel/drivers/gpu/drm/i915/intel_crt.c | 932 ++ kernel/drivers/gpu/drm/i915/intel_ddi.c | 2298 +++ kernel/drivers/gpu/drm/i915/intel_display.c | 14613 +++++++++++++++++++ kernel/drivers/gpu/drm/i915/intel_dp.c | 5693 ++++++++ kernel/drivers/gpu/drm/i915/intel_dp_mst.c | 563 + kernel/drivers/gpu/drm/i915/intel_drv.h | 1325 ++ kernel/drivers/gpu/drm/i915/intel_dsi.c | 1114 ++ kernel/drivers/gpu/drm/i915/intel_dsi.h | 130 + kernel/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c | 681 + kernel/drivers/gpu/drm/i915/intel_dsi_pll.c | 384 + kernel/drivers/gpu/drm/i915/intel_dvo.c | 577 + kernel/drivers/gpu/drm/i915/intel_fbc.c | 741 + kernel/drivers/gpu/drm/i915/intel_fbdev.c | 809 + kernel/drivers/gpu/drm/i915/intel_fifo_underrun.c | 377 + kernel/drivers/gpu/drm/i915/intel_frontbuffer.c | 270 + kernel/drivers/gpu/drm/i915/intel_hdmi.c | 1806 +++ kernel/drivers/gpu/drm/i915/intel_i2c.c | 671 + kernel/drivers/gpu/drm/i915/intel_lrc.c | 2068 +++ kernel/drivers/gpu/drm/i915/intel_lrc.h | 93 + kernel/drivers/gpu/drm/i915/intel_lvds.c | 1164 ++ kernel/drivers/gpu/drm/i915/intel_modes.c | 128 + kernel/drivers/gpu/drm/i915/intel_opregion.c | 915 ++ kernel/drivers/gpu/drm/i915/intel_overlay.c | 1562 ++ kernel/drivers/gpu/drm/i915/intel_panel.c | 1423 ++ kernel/drivers/gpu/drm/i915/intel_pm.c | 6773 +++++++++ kernel/drivers/gpu/drm/i915/intel_psr.c | 665 + kernel/drivers/gpu/drm/i915/intel_renderstate.h | 41 + .../drivers/gpu/drm/i915/intel_renderstate_gen6.c | 315 + .../drivers/gpu/drm/i915/intel_renderstate_gen7.c | 279 + .../drivers/gpu/drm/i915/intel_renderstate_gen8.c | 983 ++ .../drivers/gpu/drm/i915/intel_renderstate_gen9.c | 999 ++ kernel/drivers/gpu/drm/i915/intel_ringbuffer.c | 2902 ++++ kernel/drivers/gpu/drm/i915/intel_ringbuffer.h | 443 + kernel/drivers/gpu/drm/i915/intel_runtime_pm.c | 1594 ++ kernel/drivers/gpu/drm/i915/intel_sdvo.c | 3066 ++++ kernel/drivers/gpu/drm/i915/intel_sdvo_regs.h | 730 + kernel/drivers/gpu/drm/i915/intel_sideband.c | 282 + kernel/drivers/gpu/drm/i915/intel_sprite.c | 1304 ++ kernel/drivers/gpu/drm/i915/intel_tv.c | 1700 +++ kernel/drivers/gpu/drm/i915/intel_uncore.c | 1429 ++ kernel/drivers/gpu/drm/imx/Kconfig | 56 + kernel/drivers/gpu/drm/imx/Makefile | 12 + kernel/drivers/gpu/drm/imx/dw_hdmi-imx.c | 284 + kernel/drivers/gpu/drm/imx/imx-drm-core.c | 644 + kernel/drivers/gpu/drm/imx/imx-drm.h | 56 + kernel/drivers/gpu/drm/imx/imx-ldb.c | 690 + kernel/drivers/gpu/drm/imx/imx-tve.c | 739 + kernel/drivers/gpu/drm/imx/ipuv3-crtc.c | 515 + kernel/drivers/gpu/drm/imx/ipuv3-plane.c | 397 + kernel/drivers/gpu/drm/imx/ipuv3-plane.h | 57 + kernel/drivers/gpu/drm/imx/parallel-display.c | 303 + kernel/drivers/gpu/drm/mga/Makefile | 11 + kernel/drivers/gpu/drm/mga/mga_dma.c | 1150 ++ kernel/drivers/gpu/drm/mga/mga_drv.c | 143 + kernel/drivers/gpu/drm/mga/mga_drv.h | 668 + kernel/drivers/gpu/drm/mga/mga_ioc32.c | 226 + kernel/drivers/gpu/drm/mga/mga_irq.c | 173 + kernel/drivers/gpu/drm/mga/mga_state.c | 1102 ++ kernel/drivers/gpu/drm/mga/mga_warp.c | 169 + kernel/drivers/gpu/drm/mgag200/Kconfig | 16 + kernel/drivers/gpu/drm/mgag200/Makefile | 5 + kernel/drivers/gpu/drm/mgag200/mgag200_cursor.c | 277 + kernel/drivers/gpu/drm/mgag200/mgag200_drv.c | 138 + kernel/drivers/gpu/drm/mgag200/mgag200_drv.h | 310 + kernel/drivers/gpu/drm/mgag200/mgag200_fb.c | 330 + kernel/drivers/gpu/drm/mgag200/mgag200_i2c.c | 156 + kernel/drivers/gpu/drm/mgag200/mgag200_main.c | 367 + kernel/drivers/gpu/drm/mgag200/mgag200_mode.c | 1660 +++ kernel/drivers/gpu/drm/mgag200/mgag200_reg.h | 665 + kernel/drivers/gpu/drm/mgag200/mgag200_ttm.c | 436 + kernel/drivers/gpu/drm/msm/Kconfig | 48 + kernel/drivers/gpu/drm/msm/Makefile | 59 + kernel/drivers/gpu/drm/msm/NOTES | 87 + kernel/drivers/gpu/drm/msm/adreno/a2xx.xml.h | 1597 ++ kernel/drivers/gpu/drm/msm/adreno/a3xx.xml.h | 2684 ++++ kernel/drivers/gpu/drm/msm/adreno/a3xx_gpu.c | 597 + kernel/drivers/gpu/drm/msm/adreno/a3xx_gpu.h | 39 + kernel/drivers/gpu/drm/msm/adreno/a4xx.xml.h | 2300 +++ kernel/drivers/gpu/drm/msm/adreno/a4xx_gpu.c | 604 + kernel/drivers/gpu/drm/msm/adreno/a4xx_gpu.h | 34 + .../drivers/gpu/drm/msm/adreno/adreno_common.xml.h | 446 + kernel/drivers/gpu/drm/msm/adreno/adreno_device.c | 298 + kernel/drivers/gpu/drm/msm/adreno/adreno_gpu.c | 392 + kernel/drivers/gpu/drm/msm/adreno/adreno_gpu.h | 301 + kernel/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h | 499 + kernel/drivers/gpu/drm/msm/dsi/dsi.c | 212 + kernel/drivers/gpu/drm/msm/dsi/dsi.h | 117 + kernel/drivers/gpu/drm/msm/dsi/dsi.xml.h | 839 ++ kernel/drivers/gpu/drm/msm/dsi/dsi_host.c | 1988 +++ kernel/drivers/gpu/drm/msm/dsi/dsi_manager.c | 709 + kernel/drivers/gpu/drm/msm/dsi/dsi_phy.c | 352 + kernel/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h | 123 + kernel/drivers/gpu/drm/msm/dsi/sfpb.xml.h | 51 + kernel/drivers/gpu/drm/msm/edp/edp.c | 208 + kernel/drivers/gpu/drm/msm/edp/edp.h | 85 + kernel/drivers/gpu/drm/msm/edp/edp.xml.h | 292 + kernel/drivers/gpu/drm/msm/edp/edp_aux.c | 268 + kernel/drivers/gpu/drm/msm/edp/edp_bridge.c | 120 + kernel/drivers/gpu/drm/msm/edp/edp_connector.c | 163 + kernel/drivers/gpu/drm/msm/edp/edp_ctrl.c | 1374 ++ kernel/drivers/gpu/drm/msm/edp/edp_phy.c | 106 + kernel/drivers/gpu/drm/msm/hdmi/hdmi.c | 489 + kernel/drivers/gpu/drm/msm/hdmi/hdmi.h | 166 + kernel/drivers/gpu/drm/msm/hdmi/hdmi.xml.h | 754 + kernel/drivers/gpu/drm/msm/hdmi/hdmi_audio.c | 273 + kernel/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c | 233 + kernel/drivers/gpu/drm/msm/hdmi/hdmi_connector.c | 449 + kernel/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c | 281 + kernel/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c | 561 + kernel/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c | 214 + kernel/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c | 157 + kernel/drivers/gpu/drm/msm/hdmi/qfprom.xml.h | 53 + kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h | 1173 ++ kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c | 647 + .../gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c | 298 + kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c | 98 + kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c | 545 + kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h | 250 + .../gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c | 504 + .../gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c | 154 + .../drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_pll.c | 172 + kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c | 382 + kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h | 1462 ++ kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c | 299 + kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h | 110 + .../gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c | 343 + kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | 688 + kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c | 565 + kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h | 97 + kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c | 360 + kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c | 203 + kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c | 566 + kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h | 266 + kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c | 650 + kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c | 343 + kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h | 46 + kernel/drivers/gpu/drm/msm/mdp/mdp_common.xml.h | 92 + kernel/drivers/gpu/drm/msm/mdp/mdp_format.c | 159 + kernel/drivers/gpu/drm/msm/mdp/mdp_kms.c | 148 + kernel/drivers/gpu/drm/msm/mdp/mdp_kms.h | 119 + kernel/drivers/gpu/drm/msm/msm_atomic.c | 268 + kernel/drivers/gpu/drm/msm/msm_drv.c | 1105 ++ kernel/drivers/gpu/drm/msm/msm_drv.h | 316 + kernel/drivers/gpu/drm/msm/msm_fb.c | 245 + kernel/drivers/gpu/drm/msm/msm_fbdev.c | 294 + kernel/drivers/gpu/drm/msm/msm_gem.c | 707 + kernel/drivers/gpu/drm/msm/msm_gem.h | 118 + kernel/drivers/gpu/drm/msm/msm_gem_prime.c | 70 + kernel/drivers/gpu/drm/msm/msm_gem_submit.c | 427 + kernel/drivers/gpu/drm/msm/msm_gpu.c | 651 + kernel/drivers/gpu/drm/msm/msm_gpu.h | 173 + kernel/drivers/gpu/drm/msm/msm_iommu.c | 140 + kernel/drivers/gpu/drm/msm/msm_kms.h | 77 + kernel/drivers/gpu/drm/msm/msm_mmu.h | 48 + kernel/drivers/gpu/drm/msm/msm_perf.c | 275 + kernel/drivers/gpu/drm/msm/msm_rd.c | 337 + kernel/drivers/gpu/drm/msm/msm_ringbuffer.c | 61 + kernel/drivers/gpu/drm/msm/msm_ringbuffer.h | 43 + kernel/drivers/gpu/drm/nouveau/Kbuild | 66 + kernel/drivers/gpu/drm/nouveau/Kconfig | 70 + kernel/drivers/gpu/drm/nouveau/dispnv04/Kbuild | 11 + kernel/drivers/gpu/drm/nouveau/dispnv04/arb.c | 265 + kernel/drivers/gpu/drm/nouveau/dispnv04/crtc.c | 1146 ++ kernel/drivers/gpu/drm/nouveau/dispnv04/cursor.c | 70 + kernel/drivers/gpu/drm/nouveau/dispnv04/dac.c | 556 + kernel/drivers/gpu/drm/nouveau/dispnv04/dfp.c | 722 + kernel/drivers/gpu/drm/nouveau/dispnv04/disp.c | 190 + kernel/drivers/gpu/drm/nouveau/dispnv04/disp.h | 186 + kernel/drivers/gpu/drm/nouveau/dispnv04/hw.c | 825 ++ kernel/drivers/gpu/drm/nouveau/dispnv04/hw.h | 409 + kernel/drivers/gpu/drm/nouveau/dispnv04/nvreg.h | 517 + kernel/drivers/gpu/drm/nouveau/dispnv04/overlay.c | 502 + .../drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c | 592 + kernel/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c | 249 + kernel/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c | 837 ++ kernel/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h | 163 + .../drivers/gpu/drm/nouveau/include/nvif/class.h | 583 + .../drivers/gpu/drm/nouveau/include/nvif/client.h | 39 + .../drivers/gpu/drm/nouveau/include/nvif/device.h | 61 + .../drivers/gpu/drm/nouveau/include/nvif/driver.h | 22 + .../drivers/gpu/drm/nouveau/include/nvif/event.h | 62 + .../drivers/gpu/drm/nouveau/include/nvif/ioctl.h | 128 + kernel/drivers/gpu/drm/nouveau/include/nvif/list.h | 353 + .../drivers/gpu/drm/nouveau/include/nvif/notify.h | 39 + .../drivers/gpu/drm/nouveau/include/nvif/object.h | 75 + kernel/drivers/gpu/drm/nouveau/include/nvif/os.h | 44 + .../drivers/gpu/drm/nouveau/include/nvif/unpack.h | 24 + .../gpu/drm/nouveau/include/nvkm/core/client.h | 55 + .../gpu/drm/nouveau/include/nvkm/core/debug.h | 18 + .../gpu/drm/nouveau/include/nvkm/core/device.h | 101 + .../gpu/drm/nouveau/include/nvkm/core/devidx.h | 62 + .../gpu/drm/nouveau/include/nvkm/core/engctx.h | 51 + .../gpu/drm/nouveau/include/nvkm/core/engine.h | 56 + .../gpu/drm/nouveau/include/nvkm/core/enum.h | 21 + .../gpu/drm/nouveau/include/nvkm/core/event.h | 34 + .../gpu/drm/nouveau/include/nvkm/core/gpuobj.h | 64 + .../gpu/drm/nouveau/include/nvkm/core/handle.h | 34 + .../gpu/drm/nouveau/include/nvkm/core/ioctl.h | 7 + .../drivers/gpu/drm/nouveau/include/nvkm/core/mm.h | 40 + .../gpu/drm/nouveau/include/nvkm/core/namedb.h | 53 + .../gpu/drm/nouveau/include/nvkm/core/notify.h | 38 + .../gpu/drm/nouveau/include/nvkm/core/object.h | 203 + .../gpu/drm/nouveau/include/nvkm/core/option.h | 17 + .../drivers/gpu/drm/nouveau/include/nvkm/core/os.h | 4 + .../gpu/drm/nouveau/include/nvkm/core/parent.h | 58 + .../gpu/drm/nouveau/include/nvkm/core/printk.h | 29 + .../gpu/drm/nouveau/include/nvkm/core/ramht.h | 20 + .../gpu/drm/nouveau/include/nvkm/core/subdev.h | 119 + .../gpu/drm/nouveau/include/nvkm/engine/bsp.h | 5 + .../gpu/drm/nouveau/include/nvkm/engine/ce.h | 16 + .../gpu/drm/nouveau/include/nvkm/engine/cipher.h | 5 + .../gpu/drm/nouveau/include/nvkm/engine/device.h | 30 + .../gpu/drm/nouveau/include/nvkm/engine/disp.h | 32 + .../gpu/drm/nouveau/include/nvkm/engine/dmaobj.h | 26 + .../gpu/drm/nouveau/include/nvkm/engine/falcon.h | 81 + .../gpu/drm/nouveau/include/nvkm/engine/fifo.h | 127 + .../gpu/drm/nouveau/include/nvkm/engine/gr.h | 88 + .../gpu/drm/nouveau/include/nvkm/engine/mpeg.h | 62 + .../gpu/drm/nouveau/include/nvkm/engine/mspdec.h | 7 + .../gpu/drm/nouveau/include/nvkm/engine/msppp.h | 6 + .../gpu/drm/nouveau/include/nvkm/engine/msvld.h | 7 + .../gpu/drm/nouveau/include/nvkm/engine/pm.h | 34 + .../gpu/drm/nouveau/include/nvkm/engine/sec.h | 5 + .../gpu/drm/nouveau/include/nvkm/engine/sw.h | 50 + .../gpu/drm/nouveau/include/nvkm/engine/vp.h | 5 + .../gpu/drm/nouveau/include/nvkm/engine/xtensa.h | 35 + .../gpu/drm/nouveau/include/nvkm/subdev/bar.h | 33 + .../gpu/drm/nouveau/include/nvkm/subdev/bios.h | 32 + .../drm/nouveau/include/nvkm/subdev/bios/M0203.h | 29 + .../drm/nouveau/include/nvkm/subdev/bios/M0205.h | 29 + .../drm/nouveau/include/nvkm/subdev/bios/M0209.h | 27 + .../drm/nouveau/include/nvkm/subdev/bios/P0260.h | 21 + .../gpu/drm/nouveau/include/nvkm/subdev/bios/bit.h | 11 + .../gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h | 37 + .../drm/nouveau/include/nvkm/subdev/bios/boost.h | 27 + .../drm/nouveau/include/nvkm/subdev/bios/conn.h | 44 + .../drm/nouveau/include/nvkm/subdev/bios/cstep.h | 26 + .../gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h | 65 + .../drm/nouveau/include/nvkm/subdev/bios/disp.h | 39 + .../gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h | 31 + .../drm/nouveau/include/nvkm/subdev/bios/extdev.h | 25 + .../gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h | 6 + .../drm/nouveau/include/nvkm/subdev/bios/gpio.h | 46 + .../gpu/drm/nouveau/include/nvkm/subdev/bios/i2c.h | 25 + .../drm/nouveau/include/nvkm/subdev/bios/image.h | 11 + .../drm/nouveau/include/nvkm/subdev/bios/init.h | 20 + .../gpu/drm/nouveau/include/nvkm/subdev/bios/mxm.h | 6 + .../drm/nouveau/include/nvkm/subdev/bios/npde.h | 10 + .../drm/nouveau/include/nvkm/subdev/bios/pcir.h | 16 + .../drm/nouveau/include/nvkm/subdev/bios/perf.h | 41 + .../gpu/drm/nouveau/include/nvkm/subdev/bios/pll.h | 75 + .../gpu/drm/nouveau/include/nvkm/subdev/bios/pmu.h | 35 + .../drm/nouveau/include/nvkm/subdev/bios/ramcfg.h | 141 + .../drm/nouveau/include/nvkm/subdev/bios/rammap.h | 21 + .../drm/nouveau/include/nvkm/subdev/bios/therm.h | 72 + .../drm/nouveau/include/nvkm/subdev/bios/timing.h | 11 + .../drm/nouveau/include/nvkm/subdev/bios/vmap.h | 21 + .../drm/nouveau/include/nvkm/subdev/bios/volt.h | 23 + .../drm/nouveau/include/nvkm/subdev/bios/xpio.h | 18 + .../gpu/drm/nouveau/include/nvkm/subdev/bus.h | 50 + .../gpu/drm/nouveau/include/nvkm/subdev/clk.h | 161 + .../gpu/drm/nouveau/include/nvkm/subdev/devinit.h | 32 + .../gpu/drm/nouveau/include/nvkm/subdev/fb.h | 154 + .../gpu/drm/nouveau/include/nvkm/subdev/fuse.h | 28 + .../gpu/drm/nouveau/include/nvkm/subdev/gpio.h | 44 + .../gpu/drm/nouveau/include/nvkm/subdev/i2c.h | 135 + .../gpu/drm/nouveau/include/nvkm/subdev/ibus.h | 32 + .../gpu/drm/nouveau/include/nvkm/subdev/instmem.h | 49 + .../gpu/drm/nouveau/include/nvkm/subdev/ltc.h | 31 + .../gpu/drm/nouveau/include/nvkm/subdev/mc.h | 28 + .../gpu/drm/nouveau/include/nvkm/subdev/mmu.h | 104 + .../gpu/drm/nouveau/include/nvkm/subdev/mxm.h | 34 + .../gpu/drm/nouveau/include/nvkm/subdev/pmu.h | 54 + .../gpu/drm/nouveau/include/nvkm/subdev/therm.h | 79 + .../gpu/drm/nouveau/include/nvkm/subdev/timer.h | 61 + .../gpu/drm/nouveau/include/nvkm/subdev/vga.h | 30 + .../gpu/drm/nouveau/include/nvkm/subdev/volt.h | 58 + kernel/drivers/gpu/drm/nouveau/nouveau_abi16.c | 537 + kernel/drivers/gpu/drm/nouveau/nouveau_abi16.h | 114 + kernel/drivers/gpu/drm/nouveau/nouveau_acpi.c | 427 + kernel/drivers/gpu/drm/nouveau/nouveau_acpi.h | 26 + kernel/drivers/gpu/drm/nouveau/nouveau_agp.c | 195 + kernel/drivers/gpu/drm/nouveau/nouveau_agp.h | 10 + kernel/drivers/gpu/drm/nouveau/nouveau_backlight.c | 254 + kernel/drivers/gpu/drm/nouveau/nouveau_bios.c | 2127 +++ kernel/drivers/gpu/drm/nouveau/nouveau_bios.h | 179 + kernel/drivers/gpu/drm/nouveau/nouveau_bo.c | 1661 +++ kernel/drivers/gpu/drm/nouveau/nouveau_bo.h | 108 + kernel/drivers/gpu/drm/nouveau/nouveau_chan.c | 425 + kernel/drivers/gpu/drm/nouveau/nouveau_chan.h | 52 + kernel/drivers/gpu/drm/nouveau/nouveau_connector.c | 1275 ++ kernel/drivers/gpu/drm/nouveau/nouveau_connector.h | 113 + kernel/drivers/gpu/drm/nouveau/nouveau_crtc.h | 90 + kernel/drivers/gpu/drm/nouveau/nouveau_debugfs.c | 64 + kernel/drivers/gpu/drm/nouveau/nouveau_debugfs.h | 22 + kernel/drivers/gpu/drm/nouveau/nouveau_display.c | 910 ++ kernel/drivers/gpu/drm/nouveau/nouveau_display.h | 104 + kernel/drivers/gpu/drm/nouveau/nouveau_dma.c | 261 + kernel/drivers/gpu/drm/nouveau/nouveau_dma.h | 195 + kernel/drivers/gpu/drm/nouveau/nouveau_dp.c | 89 + kernel/drivers/gpu/drm/nouveau/nouveau_drm.c | 1136 ++ kernel/drivers/gpu/drm/nouveau/nouveau_drm.h | 202 + kernel/drivers/gpu/drm/nouveau/nouveau_encoder.h | 93 + kernel/drivers/gpu/drm/nouveau/nouveau_fbcon.c | 572 + kernel/drivers/gpu/drm/nouveau/nouveau_fbcon.h | 79 + kernel/drivers/gpu/drm/nouveau/nouveau_fence.c | 590 + kernel/drivers/gpu/drm/nouveau/nouveau_fence.h | 107 + kernel/drivers/gpu/drm/nouveau/nouveau_gem.c | 925 ++ kernel/drivers/gpu/drm/nouveau/nouveau_gem.h | 46 + kernel/drivers/gpu/drm/nouveau/nouveau_hwmon.c | 648 + kernel/drivers/gpu/drm/nouveau/nouveau_hwmon.h | 43 + kernel/drivers/gpu/drm/nouveau/nouveau_ioc32.c | 69 + kernel/drivers/gpu/drm/nouveau/nouveau_ioctl.h | 7 + kernel/drivers/gpu/drm/nouveau/nouveau_nvif.c | 136 + kernel/drivers/gpu/drm/nouveau/nouveau_platform.c | 252 + kernel/drivers/gpu/drm/nouveau/nouveau_platform.h | 72 + kernel/drivers/gpu/drm/nouveau/nouveau_prime.c | 115 + kernel/drivers/gpu/drm/nouveau/nouveau_reg.h | 858 ++ kernel/drivers/gpu/drm/nouveau/nouveau_sgdma.c | 116 + kernel/drivers/gpu/drm/nouveau/nouveau_sysfs.c | 198 + kernel/drivers/gpu/drm/nouveau/nouveau_sysfs.h | 21 + kernel/drivers/gpu/drm/nouveau/nouveau_ttm.c | 438 + kernel/drivers/gpu/drm/nouveau/nouveau_ttm.h | 25 + kernel/drivers/gpu/drm/nouveau/nouveau_usif.c | 384 + kernel/drivers/gpu/drm/nouveau/nouveau_usif.h | 9 + kernel/drivers/gpu/drm/nouveau/nouveau_vga.c | 129 + kernel/drivers/gpu/drm/nouveau/nouveau_vga.h | 8 + kernel/drivers/gpu/drm/nouveau/nv04_fbcon.c | 280 + kernel/drivers/gpu/drm/nouveau/nv04_fence.c | 111 + kernel/drivers/gpu/drm/nouveau/nv10_fence.c | 113 + kernel/drivers/gpu/drm/nouveau/nv10_fence.h | 20 + kernel/drivers/gpu/drm/nouveau/nv17_fence.c | 151 + kernel/drivers/gpu/drm/nouveau/nv50_display.c | 2568 ++++ kernel/drivers/gpu/drm/nouveau/nv50_display.h | 45 + kernel/drivers/gpu/drm/nouveau/nv50_fbcon.c | 258 + kernel/drivers/gpu/drm/nouveau/nv50_fence.c | 122 + kernel/drivers/gpu/drm/nouveau/nv84_fence.c | 273 + kernel/drivers/gpu/drm/nouveau/nvc0_fbcon.c | 261 + kernel/drivers/gpu/drm/nouveau/nvc0_fence.c | 84 + kernel/drivers/gpu/drm/nouveau/nvif/Kbuild | 4 + kernel/drivers/gpu/drm/nouveau/nvif/client.c | 130 + kernel/drivers/gpu/drm/nouveau/nvif/device.c | 78 + kernel/drivers/gpu/drm/nouveau/nvif/notify.c | 248 + kernel/drivers/gpu/drm/nouveau/nvif/object.c | 304 + kernel/drivers/gpu/drm/nouveau/nvkm/Kbuild | 3 + kernel/drivers/gpu/drm/nouveau/nvkm/core/Kbuild | 17 + kernel/drivers/gpu/drm/nouveau/nvkm/core/client.c | 266 + kernel/drivers/gpu/drm/nouveau/nvkm/core/engctx.c | 239 + kernel/drivers/gpu/drm/nouveau/nvkm/core/engine.c | 75 + kernel/drivers/gpu/drm/nouveau/nvkm/core/enum.c | 66 + kernel/drivers/gpu/drm/nouveau/nvkm/core/event.c | 99 + kernel/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c | 316 + kernel/drivers/gpu/drm/nouveau/nvkm/core/handle.c | 221 + kernel/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c | 526 + kernel/drivers/gpu/drm/nouveau/nvkm/core/mm.c | 304 + kernel/drivers/gpu/drm/nouveau/nvkm/core/namedb.c | 199 + kernel/drivers/gpu/drm/nouveau/nvkm/core/notify.c | 163 + kernel/drivers/gpu/drm/nouveau/nvkm/core/object.c | 330 + kernel/drivers/gpu/drm/nouveau/nvkm/core/option.c | 121 + kernel/drivers/gpu/drm/nouveau/nvkm/core/parent.c | 159 + kernel/drivers/gpu/drm/nouveau/nvkm/core/printk.c | 103 + kernel/drivers/gpu/drm/nouveau/nvkm/core/ramht.c | 106 + kernel/drivers/gpu/drm/nouveau/nvkm/core/subdev.c | 120 + kernel/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild | 19 + .../drivers/gpu/drm/nouveau/nvkm/engine/bsp/Kbuild | 1 + .../drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c | 93 + .../drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild | 4 + .../gpu/drm/nouveau/nvkm/engine/ce/fuc/com.fuc | 864 ++ .../gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3 | 2 + .../drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h | 606 + .../gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3 | 2 + .../drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h | 620 + .../drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c | 166 + .../drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c | 173 + .../drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c | 173 + .../drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c | 152 + .../gpu/drm/nouveau/nvkm/engine/cipher/Kbuild | 1 + .../gpu/drm/nouveau/nvkm/engine/cipher/g84.c | 184 + .../gpu/drm/nouveau/nvkm/engine/device/Kbuild | 12 + .../gpu/drm/nouveau/nvkm/engine/device/acpi.c | 60 + .../gpu/drm/nouveau/nvkm/engine/device/acpi.h | 8 + .../gpu/drm/nouveau/nvkm/engine/device/base.c | 736 + .../gpu/drm/nouveau/nvkm/engine/device/ctrl.c | 199 + .../gpu/drm/nouveau/nvkm/engine/device/gf100.c | 358 + .../gpu/drm/nouveau/nvkm/engine/device/gk104.c | 326 + .../gpu/drm/nouveau/nvkm/engine/device/gm100.c | 190 + .../gpu/drm/nouveau/nvkm/engine/device/nv04.c | 89 + .../gpu/drm/nouveau/nvkm/engine/device/nv10.c | 204 + .../gpu/drm/nouveau/nvkm/engine/device/nv20.c | 131 + .../gpu/drm/nouveau/nvkm/engine/device/nv30.c | 153 + .../gpu/drm/nouveau/nvkm/engine/device/nv40.c | 427 + .../gpu/drm/nouveau/nvkm/engine/device/nv50.c | 478 + .../gpu/drm/nouveau/nvkm/engine/device/priv.h | 16 + .../gpu/drm/nouveau/nvkm/engine/disp/Kbuild | 29 + .../gpu/drm/nouveau/nvkm/engine/disp/base.c | 240 + .../gpu/drm/nouveau/nvkm/engine/disp/conn.c | 174 + .../gpu/drm/nouveau/nvkm/engine/disp/conn.h | 58 + .../gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c | 99 + .../gpu/drm/nouveau/nvkm/engine/disp/dport.c | 398 + .../gpu/drm/nouveau/nvkm/engine/disp/dport.h | 75 + .../drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c | 272 + .../drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c | 139 + .../gpu/drm/nouveau/nvkm/engine/disp/gf110.c | 1310 ++ .../gpu/drm/nouveau/nvkm/engine/disp/gk104.c | 268 + .../gpu/drm/nouveau/nvkm/engine/disp/gk110.c | 103 + .../gpu/drm/nouveau/nvkm/engine/disp/gm107.c | 103 + .../gpu/drm/nouveau/nvkm/engine/disp/gm204.c | 111 + .../gpu/drm/nouveau/nvkm/engine/disp/gt200.c | 148 + .../gpu/drm/nouveau/nvkm/engine/disp/gt215.c | 104 + .../gpu/drm/nouveau/nvkm/engine/disp/hdagf110.c | 73 + .../gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c | 69 + .../gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c | 91 + .../gpu/drm/nouveau/nvkm/engine/disp/hdmigf110.c | 79 + .../gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c | 83 + .../gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c | 92 + .../gpu/drm/nouveau/nvkm/engine/disp/nv04.c | 205 + .../gpu/drm/nouveau/nvkm/engine/disp/nv50.c | 2019 +++ .../gpu/drm/nouveau/nvkm/engine/disp/nv50.h | 226 + .../gpu/drm/nouveau/nvkm/engine/disp/outp.c | 142 + .../gpu/drm/nouveau/nvkm/engine/disp/outp.h | 61 + .../gpu/drm/nouveau/nvkm/engine/disp/outpdp.c | 301 + .../gpu/drm/nouveau/nvkm/engine/disp/outpdp.h | 61 + .../gpu/drm/nouveau/nvkm/engine/disp/piornv50.c | 170 + .../gpu/drm/nouveau/nvkm/engine/disp/priv.h | 42 + .../gpu/drm/nouveau/nvkm/engine/disp/sorg94.c | 145 + .../gpu/drm/nouveau/nvkm/engine/disp/sorgf110.c | 124 + .../gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c | 139 + .../gpu/drm/nouveau/nvkm/engine/disp/sornv50.c | 56 + .../drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c | 219 + .../gpu/drm/nouveau/nvkm/engine/dmaobj/Kbuild | 5 + .../gpu/drm/nouveau/nvkm/engine/dmaobj/base.c | 164 + .../gpu/drm/nouveau/nvkm/engine/dmaobj/gf100.c | 176 + .../gpu/drm/nouveau/nvkm/engine/dmaobj/gf110.c | 165 + .../gpu/drm/nouveau/nvkm/engine/dmaobj/nv04.c | 163 + .../gpu/drm/nouveau/nvkm/engine/dmaobj/nv50.c | 195 + .../gpu/drm/nouveau/nvkm/engine/dmaobj/priv.h | 28 + .../drivers/gpu/drm/nouveau/nvkm/engine/falcon.c | 277 + .../gpu/drm/nouveau/nvkm/engine/fifo/Kbuild | 12 + .../gpu/drm/nouveau/nvkm/engine/fifo/base.c | 282 + .../drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c | 487 + .../gpu/drm/nouveau/nvkm/engine/fifo/gf100.c | 967 ++ .../gpu/drm/nouveau/nvkm/engine/fifo/gk104.c | 1139 ++ .../gpu/drm/nouveau/nvkm/engine/fifo/gk104.h | 18 + .../gpu/drm/nouveau/nvkm/engine/fifo/gk208.c | 36 + .../gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c | 34 + .../gpu/drm/nouveau/nvkm/engine/fifo/gm204.c | 57 + .../gpu/drm/nouveau/nvkm/engine/fifo/nv04.c | 635 + .../gpu/drm/nouveau/nvkm/engine/fifo/nv04.h | 175 + .../gpu/drm/nouveau/nvkm/engine/fifo/nv10.c | 178 + .../gpu/drm/nouveau/nvkm/engine/fifo/nv17.c | 215 + .../gpu/drm/nouveau/nvkm/engine/fifo/nv40.c | 356 + .../gpu/drm/nouveau/nvkm/engine/fifo/nv50.c | 534 + .../gpu/drm/nouveau/nvkm/engine/fifo/nv50.h | 36 + .../drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild | 40 + .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c | 1390 ++ .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h | 228 + .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c | 108 + .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c | 806 + .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c | 359 + .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c | 284 + .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c | 529 + .../gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c | 1024 ++ .../gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c | 842 ++ .../gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c | 103 + .../gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c | 564 + .../gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c | 62 + .../gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c | 1028 ++ .../gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c | 1054 ++ .../gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c | 83 + .../gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c | 694 + .../gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h | 129 + .../gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c | 3345 +++++ .../gpu/drm/nouveau/nvkm/engine/gr/fuc/com.fuc | 335 + .../gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc | 490 + .../drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3 | 42 + .../drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h | 530 + .../drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3 | 42 + .../drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h | 537 + .../drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3 | 42 + .../drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h | 537 + .../drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3 | 42 + .../drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h | 537 + .../drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5 | 42 + .../drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h | 473 + .../drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5 | 42 + .../drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h | 605 + .../gpu/drm/nouveau/nvkm/engine/gr/fuc/hub.fuc | 696 + .../drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3 | 40 + .../drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h | 1047 ++ .../drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3 | 40 + .../drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h | 1047 ++ .../drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3 | 40 + .../drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h | 1044 ++ .../drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3 | 40 + .../drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h | 1044 ++ .../drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5 | 40 + .../drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h | 916 ++ .../drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5 | 40 + .../drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h | 916 ++ .../gpu/drm/nouveau/nvkm/engine/gr/fuc/macros.fuc | 261 + .../gpu/drm/nouveau/nvkm/engine/gr/fuc/os.h | 8 + .../drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c | 1692 +++ .../drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h | 266 + .../drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c | 127 + .../drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c | 134 + .../drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c | 116 + .../drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c | 136 + .../drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c | 190 + .../drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c | 359 + .../drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c | 211 + .../gpu/drm/nouveau/nvkm/engine/gr/gk110b.c | 116 + .../drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c | 190 + .../drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c | 49 + .../drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c | 470 + .../drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c | 386 + .../drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c | 40 + .../drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c | 1382 ++ .../drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c | 1315 ++ .../drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c | 376 + .../drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h | 26 + .../drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c | 158 + .../drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c | 125 + .../drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c | 231 + .../drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c | 159 + .../drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c | 159 + .../drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c | 527 + .../drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h | 24 + .../drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c | 999 ++ .../drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h | 9 + .../drivers/gpu/drm/nouveau/nvkm/engine/gr/regs.h | 274 + .../gpu/drm/nouveau/nvkm/engine/mpeg/Kbuild | 5 + .../drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c | 94 + .../gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c | 304 + .../gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h | 13 + .../gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c | 134 + .../gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c | 185 + .../gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c | 225 + .../gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild | 3 + .../gpu/drm/nouveau/nvkm/engine/mspdec/g98.c | 109 + .../gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c | 109 + .../gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c | 109 + .../gpu/drm/nouveau/nvkm/engine/msppp/Kbuild | 2 + .../gpu/drm/nouveau/nvkm/engine/msppp/g98.c | 109 + .../gpu/drm/nouveau/nvkm/engine/msppp/gf100.c | 109 + .../gpu/drm/nouveau/nvkm/engine/msvld/Kbuild | 3 + .../gpu/drm/nouveau/nvkm/engine/msvld/g98.c | 110 + .../gpu/drm/nouveau/nvkm/engine/msvld/gf100.c | 109 + .../gpu/drm/nouveau/nvkm/engine/msvld/gk104.c | 109 + .../drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild | 9 + .../drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c | 476 + .../gpu/drm/nouveau/nvkm/engine/pm/daemon.c | 108 + .../drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c | 65 + .../drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c | 159 + .../drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h | 15 + .../drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c | 148 + .../drivers/gpu/drm/nouveau/nvkm/engine/pm/gk110.c | 57 + .../drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c | 83 + .../drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c | 130 + .../drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h | 24 + .../drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c | 57 + .../drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h | 90 + .../drivers/gpu/drm/nouveau/nvkm/engine/sec/Kbuild | 1 + .../gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s | 698 + .../drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h | 584 + .../drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c | 149 + .../drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild | 4 + .../drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c | 141 + .../drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c | 139 + .../drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c | 122 + .../drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c | 234 + .../drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h | 45 + .../drivers/gpu/drm/nouveau/nvkm/engine/vp/Kbuild | 1 + .../drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c | 93 + .../drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c | 172 + kernel/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild | 19 + .../drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild | 4 + .../drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c | 144 + .../gpu/drm/nouveau/nvkm/subdev/bar/gf100.c | 219 + .../gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c | 50 + .../drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c | 271 + .../drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h | 30 + .../gpu/drm/nouveau/nvkm/subdev/bios/Kbuild | 37 + .../gpu/drm/nouveau/nvkm/subdev/bios/M0203.c | 128 + .../gpu/drm/nouveau/nvkm/subdev/bios/M0205.c | 135 + .../gpu/drm/nouveau/nvkm/subdev/bios/M0209.c | 135 + .../gpu/drm/nouveau/nvkm/subdev/bios/P0260.c | 107 + .../gpu/drm/nouveau/nvkm/subdev/bios/base.c | 206 + .../drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c | 49 + .../gpu/drm/nouveau/nvkm/subdev/bios/boost.c | 126 + .../gpu/drm/nouveau/nvkm/subdev/bios/conn.c | 97 + .../gpu/drm/nouveau/nvkm/subdev/bios/cstep.c | 122 + .../drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c | 234 + .../gpu/drm/nouveau/nvkm/subdev/bios/disp.c | 172 + .../drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c | 215 + .../gpu/drm/nouveau/nvkm/subdev/bios/extdev.c | 97 + .../drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c | 93 + .../gpu/drm/nouveau/nvkm/subdev/bios/gpio.c | 150 + .../drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c | 163 + .../gpu/drm/nouveau/nvkm/subdev/bios/image.c | 77 + .../gpu/drm/nouveau/nvkm/subdev/bios/init.c | 2247 +++ .../drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c | 134 + .../gpu/drm/nouveau/nvkm/subdev/bios/npde.c | 58 + .../gpu/drm/nouveau/nvkm/subdev/bios/pcir.c | 68 + .../gpu/drm/nouveau/nvkm/subdev/bios/perf.c | 201 + .../drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c | 417 + .../drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c | 134 + .../gpu/drm/nouveau/nvkm/subdev/bios/priv.h | 23 + .../gpu/drm/nouveau/nvkm/subdev/bios/ramcfg.c | 78 + .../gpu/drm/nouveau/nvkm/subdev/bios/rammap.c | 211 + .../gpu/drm/nouveau/nvkm/subdev/bios/shadow.c | 272 + .../gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c | 112 + .../gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c | 72 + .../gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c | 109 + .../gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c | 115 + .../gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c | 70 + .../gpu/drm/nouveau/nvkm/subdev/bios/therm.c | 214 + .../gpu/drm/nouveau/nvkm/subdev/bios/timing.c | 166 + .../gpu/drm/nouveau/nvkm/subdev/bios/vmap.c | 111 + .../gpu/drm/nouveau/nvkm/subdev/bios/volt.c | 136 + .../gpu/drm/nouveau/nvkm/subdev/bios/xpio.c | 74 + .../drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild | 6 + .../drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c | 58 + .../gpu/drm/nouveau/nvkm/subdev/bus/gf100.c | 80 + .../drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c | 143 + .../drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h | 141 + .../drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c | 94 + .../drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.h | 21 + .../drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c | 91 + .../drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c | 104 + .../drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild | 12 + .../drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c | 591 + .../drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c | 47 + .../gpu/drm/nouveau/nvkm/subdev/clk/gf100.c | 462 + .../gpu/drm/nouveau/nvkm/subdev/clk/gk104.c | 500 + .../gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c | 680 + .../gpu/drm/nouveau/nvkm/subdev/clk/gt215.c | 533 + .../gpu/drm/nouveau/nvkm/subdev/clk/gt215.h | 18 + .../gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c | 429 + .../drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c | 103 + .../drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c | 241 + .../drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c | 561 + .../drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h | 28 + .../drivers/gpu/drm/nouveau/nvkm/subdev/clk/pll.h | 11 + .../gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c | 87 + .../gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c | 245 + .../drivers/gpu/drm/nouveau/nvkm/subdev/clk/seq.h | 14 + .../gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild | 14 + .../gpu/drm/nouveau/nvkm/subdev/devinit/base.c | 96 + .../gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h | 84 + .../gpu/drm/nouveau/nvkm/subdev/devinit/g84.c | 66 + .../gpu/drm/nouveau/nvkm/subdev/devinit/g98.c | 65 + .../gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c | 127 + .../gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c | 59 + .../gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c | 172 + .../gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c | 150 + .../gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c | 66 + .../gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c | 470 + .../gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h | 22 + .../gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c | 140 + .../gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c | 111 + .../gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c | 40 + .../gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c | 77 + .../gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c | 174 + .../gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h | 24 + .../gpu/drm/nouveau/nvkm/subdev/devinit/priv.h | 34 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild | 44 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c | 165 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c | 38 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c | 115 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c | 120 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c | 122 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h | 28 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c | 37 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c | 68 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c | 37 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c | 38 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c | 38 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c | 38 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c | 87 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.h | 53 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c | 70 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c | 43 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c | 94 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c | 60 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c | 140 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c | 61 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c | 61 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c | 75 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.h | 14 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c | 68 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c | 77 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c | 57 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c | 44 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c | 44 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c | 43 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c | 320 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h | 31 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h | 73 + .../gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h | 180 + .../gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c | 731 + .../gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c | 1639 +++ .../gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c | 55 + .../gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c | 1012 ++ .../gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c | 101 + .../gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c | 79 + .../gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c | 59 + .../gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c | 72 + .../gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c | 62 + .../gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c | 212 + .../gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c | 66 + .../gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c | 64 + .../gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c | 66 + .../gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c | 54 + .../gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c | 465 + .../gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h | 15 + .../gpu/drm/nouveau/nvkm/subdev/fb/regsnv04.h | 22 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c | 93 + .../drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c | 119 + .../gpu/drm/nouveau/nvkm/subdev/fuse/Kbuild | 4 + .../gpu/drm/nouveau/nvkm/subdev/fuse/base.c | 51 + .../gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c | 78 + .../gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c | 62 + .../gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c | 76 + .../gpu/drm/nouveau/nvkm/subdev/fuse/priv.h | 7 + .../gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild | 6 + .../gpu/drm/nouveau/nvkm/subdev/gpio/base.c | 251 + .../drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c | 73 + .../gpu/drm/nouveau/nvkm/subdev/gpio/gf110.c | 84 + .../gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c | 73 + .../gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c | 115 + .../gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c | 128 + .../gpu/drm/nouveau/nvkm/subdev/gpio/priv.h | 64 + .../drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild | 16 + .../gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c | 292 + .../drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c | 113 + .../drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c | 622 + .../drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bit.c | 233 + .../drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c | 279 + .../gpu/drm/nouveau/nvkm/subdev/i2c/gf110.c | 106 + .../gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c | 38 + .../gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c | 71 + .../gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c | 219 + .../drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c | 128 + .../drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c | 120 + .../drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c | 133 + .../drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.h | 32 + .../drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c | 83 + .../drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h | 56 + .../gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c | 85 + .../gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c | 85 + .../gpu/drm/nouveau/nvkm/subdev/i2c/padnv04.c | 34 + .../drivers/gpu/drm/nouveau/nvkm/subdev/i2c/port.h | 13 + .../drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h | 87 + .../gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild | 3 + .../gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c | 122 + .../gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c | 139 + .../gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c | 102 + .../gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild | 5 + .../gpu/drm/nouveau/nvkm/subdev/instmem/base.c | 146 + .../gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c | 440 + .../gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c | 185 + .../gpu/drm/nouveau/nvkm/subdev/instmem/nv04.h | 36 + .../gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c | 136 + .../gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c | 169 + .../gpu/drm/nouveau/nvkm/subdev/instmem/priv.h | 54 + .../drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild | 4 + .../drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c | 124 + .../gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c | 244 + .../gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c | 59 + .../gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c | 153 + .../drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h | 69 + .../drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild | 11 + .../drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c | 169 + .../drivers/gpu/drm/nouveau/nvkm/subdev/mc/g94.c | 37 + .../drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c | 58 + .../drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c | 76 + .../drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf106.c | 38 + .../drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c | 37 + .../drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c | 78 + .../drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.h | 20 + .../drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv40.c | 44 + .../drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c | 53 + .../drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv4c.c | 36 + .../drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c | 72 + .../drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h | 36 + .../drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild | 6 + .../drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c | 480 + .../gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c | 237 + .../drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c | 151 + .../drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h | 19 + .../drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c | 157 + .../drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c | 247 + .../drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c | 241 + .../drivers/gpu/drm/nouveau/nvkm/subdev/mxm/Kbuild | 3 + .../drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c | 271 + .../drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c | 191 + .../drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h | 22 + .../drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c | 231 + .../drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild | 9 + .../drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c | 268 + .../gpu/drm/nouveau/nvkm/subdev/pmu/fuc/arith.fuc | 94 + .../gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3 | 70 + .../drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h | 1865 +++ .../gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4 | 70 + .../drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4.h | 1795 +++ .../gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5 | 70 + .../drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h | 1731 +++ .../gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3 | 70 + .../drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h | 1868 +++ .../gpu/drm/nouveau/nvkm/subdev/pmu/fuc/host.fuc | 151 + .../gpu/drm/nouveau/nvkm/subdev/pmu/fuc/i2c_.fuc | 393 + .../gpu/drm/nouveau/nvkm/subdev/pmu/fuc/idle.fuc | 84 + .../gpu/drm/nouveau/nvkm/subdev/pmu/fuc/kernel.fuc | 556 + .../gpu/drm/nouveau/nvkm/subdev/pmu/fuc/macros.fuc | 272 + .../gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc | 447 + .../gpu/drm/nouveau/nvkm/subdev/pmu/fuc/os.h | 52 + .../gpu/drm/nouveau/nvkm/subdev/pmu/fuc/perf.fuc | 57 + .../gpu/drm/nouveau/nvkm/subdev/pmu/fuc/test.fuc | 64 + .../gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c | 40 + .../gpu/drm/nouveau/nvkm/subdev/pmu/gf110.c | 40 + .../gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c | 67 + .../gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c | 95 + .../gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c | 41 + .../gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c | 230 + .../gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c | 49 + .../drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c | 200 + .../drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h | 45 + .../gpu/drm/nouveau/nvkm/subdev/therm/Kbuild | 13 + .../gpu/drm/nouveau/nvkm/subdev/therm/base.c | 367 + .../gpu/drm/nouveau/nvkm/subdev/therm/fan.c | 282 + .../gpu/drm/nouveau/nvkm/subdev/therm/fannil.c | 53 + .../gpu/drm/nouveau/nvkm/subdev/therm/fanpwm.c | 113 + .../gpu/drm/nouveau/nvkm/subdev/therm/fantog.c | 118 + .../gpu/drm/nouveau/nvkm/subdev/therm/g84.c | 266 + .../gpu/drm/nouveau/nvkm/subdev/therm/gf110.c | 174 + .../gpu/drm/nouveau/nvkm/subdev/therm/gm107.c | 93 + .../gpu/drm/nouveau/nvkm/subdev/therm/gt215.c | 100 + .../drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c | 119 + .../gpu/drm/nouveau/nvkm/subdev/therm/nv40.c | 225 + .../gpu/drm/nouveau/nvkm/subdev/therm/nv50.c | 198 + .../gpu/drm/nouveau/nvkm/subdev/therm/priv.h | 153 + .../gpu/drm/nouveau/nvkm/subdev/therm/temp.c | 259 + .../gpu/drm/nouveau/nvkm/subdev/timer/Kbuild | 3 + .../gpu/drm/nouveau/nvkm/subdev/timer/base.c | 93 + .../gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c | 56 + .../gpu/drm/nouveau/nvkm/subdev/timer/nv04.c | 262 + .../gpu/drm/nouveau/nvkm/subdev/timer/nv04.h | 25 + .../gpu/drm/nouveau/nvkm/subdev/timer/priv.h | 4 + .../gpu/drm/nouveau/nvkm/subdev/volt/Kbuild | 4 + .../gpu/drm/nouveau/nvkm/subdev/volt/base.c | 204 + .../gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c | 197 + .../gpu/drm/nouveau/nvkm/subdev/volt/gpio.c | 96 + .../gpu/drm/nouveau/nvkm/subdev/volt/nv40.c | 55 + kernel/drivers/gpu/drm/omapdrm/Kconfig | 26 + kernel/drivers/gpu/drm/omapdrm/Makefile | 21 + kernel/drivers/gpu/drm/omapdrm/TODO | 23 + kernel/drivers/gpu/drm/omapdrm/omap_connector.c | 319 + kernel/drivers/gpu/drm/omapdrm/omap_crtc.c | 758 + kernel/drivers/gpu/drm/omapdrm/omap_debugfs.c | 125 + kernel/drivers/gpu/drm/omapdrm/omap_dmm_priv.h | 194 + kernel/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c | 1032 ++ kernel/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h | 142 + kernel/drivers/gpu/drm/omapdrm/omap_drv.c | 779 + kernel/drivers/gpu/drm/omapdrm/omap_drv.h | 298 + kernel/drivers/gpu/drm/omapdrm/omap_encoder.c | 218 + kernel/drivers/gpu/drm/omapdrm/omap_fb.c | 465 + kernel/drivers/gpu/drm/omapdrm/omap_fbdev.c | 343 + kernel/drivers/gpu/drm/omapdrm/omap_gem.c | 1487 ++ kernel/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c | 208 + kernel/drivers/gpu/drm/omapdrm/omap_irq.c | 342 + kernel/drivers/gpu/drm/omapdrm/omap_plane.c | 445 + kernel/drivers/gpu/drm/omapdrm/tcm-sita.c | 703 + kernel/drivers/gpu/drm/omapdrm/tcm-sita.h | 95 + kernel/drivers/gpu/drm/omapdrm/tcm.h | 328 + kernel/drivers/gpu/drm/panel/Kconfig | 46 + kernel/drivers/gpu/drm/panel/Makefile | 4 + kernel/drivers/gpu/drm/panel/panel-ld9040.c | 389 + kernel/drivers/gpu/drm/panel/panel-s6e8aa0.c | 1067 ++ .../gpu/drm/panel/panel-sharp-lq101r1sx01.c | 475 + kernel/drivers/gpu/drm/panel/panel-simple.c | 1297 ++ kernel/drivers/gpu/drm/qxl/Kconfig | 15 + kernel/drivers/gpu/drm/qxl/Makefile | 9 + kernel/drivers/gpu/drm/qxl/qxl_cmd.c | 686 + kernel/drivers/gpu/drm/qxl/qxl_debugfs.c | 149 + kernel/drivers/gpu/drm/qxl/qxl_dev.h | 879 ++ kernel/drivers/gpu/drm/qxl/qxl_display.c | 1114 ++ kernel/drivers/gpu/drm/qxl/qxl_draw.c | 487 + kernel/drivers/gpu/drm/qxl/qxl_drv.c | 295 + kernel/drivers/gpu/drm/qxl/qxl_drv.h | 576 + kernel/drivers/gpu/drm/qxl/qxl_dumb.c | 86 + kernel/drivers/gpu/drm/qxl/qxl_fb.c | 729 + kernel/drivers/gpu/drm/qxl/qxl_gem.c | 117 + kernel/drivers/gpu/drm/qxl/qxl_image.c | 237 + kernel/drivers/gpu/drm/qxl/qxl_ioctl.c | 454 + kernel/drivers/gpu/drm/qxl/qxl_irq.c | 100 + kernel/drivers/gpu/drm/qxl/qxl_kms.c | 350 + kernel/drivers/gpu/drm/qxl/qxl_object.c | 325 + kernel/drivers/gpu/drm/qxl/qxl_object.h | 103 + kernel/drivers/gpu/drm/qxl/qxl_prime.c | 72 + kernel/drivers/gpu/drm/qxl/qxl_release.c | 476 + kernel/drivers/gpu/drm/qxl/qxl_ttm.c | 489 + kernel/drivers/gpu/drm/r128/Makefile | 10 + kernel/drivers/gpu/drm/r128/r128_cce.c | 937 ++ kernel/drivers/gpu/drm/r128/r128_drv.c | 112 + kernel/drivers/gpu/drm/r128/r128_drv.h | 533 + kernel/drivers/gpu/drm/r128/r128_ioc32.c | 215 + kernel/drivers/gpu/drm/r128/r128_irq.c | 115 + kernel/drivers/gpu/drm/r128/r128_state.c | 1644 +++ kernel/drivers/gpu/drm/radeon/.gitignore | 3 + kernel/drivers/gpu/drm/radeon/Kconfig | 16 + kernel/drivers/gpu/drm/radeon/Makefile | 118 + kernel/drivers/gpu/drm/radeon/ObjectID.h | 736 + kernel/drivers/gpu/drm/radeon/atom-bits.h | 48 + kernel/drivers/gpu/drm/radeon/atom-names.h | 100 + kernel/drivers/gpu/drm/radeon/atom-types.h | 42 + kernel/drivers/gpu/drm/radeon/atom.c | 1425 ++ kernel/drivers/gpu/drm/radeon/atom.h | 161 + kernel/drivers/gpu/drm/radeon/atombios.h | 7981 ++++++++++ kernel/drivers/gpu/drm/radeon/atombios_crtc.c | 2246 +++ kernel/drivers/gpu/drm/radeon/atombios_dp.c | 886 ++ kernel/drivers/gpu/drm/radeon/atombios_encoders.c | 2821 ++++ kernel/drivers/gpu/drm/radeon/atombios_i2c.c | 159 + kernel/drivers/gpu/drm/radeon/avivod.h | 62 + kernel/drivers/gpu/drm/radeon/btc_dpm.c | 2825 ++++ kernel/drivers/gpu/drm/radeon/btc_dpm.h | 59 + kernel/drivers/gpu/drm/radeon/btcd.h | 185 + .../drivers/gpu/drm/radeon/cayman_blit_shaders.c | 320 + .../drivers/gpu/drm/radeon/cayman_blit_shaders.h | 35 + kernel/drivers/gpu/drm/radeon/ci_dpm.c | 5959 ++++++++ kernel/drivers/gpu/drm/radeon/ci_dpm.h | 341 + kernel/drivers/gpu/drm/radeon/ci_smc.c | 295 + kernel/drivers/gpu/drm/radeon/cik.c | 9986 +++++++++++++ kernel/drivers/gpu/drm/radeon/cik_blit_shaders.c | 246 + kernel/drivers/gpu/drm/radeon/cik_blit_shaders.h | 32 + kernel/drivers/gpu/drm/radeon/cik_reg.h | 187 + kernel/drivers/gpu/drm/radeon/cik_sdma.c | 985 ++ kernel/drivers/gpu/drm/radeon/cikd.h | 2168 +++ kernel/drivers/gpu/drm/radeon/clearstate_cayman.h | 1081 ++ kernel/drivers/gpu/drm/radeon/clearstate_ci.h | 944 ++ kernel/drivers/gpu/drm/radeon/clearstate_defs.h | 44 + .../drivers/gpu/drm/radeon/clearstate_evergreen.h | 1080 ++ kernel/drivers/gpu/drm/radeon/clearstate_si.h | 941 ++ kernel/drivers/gpu/drm/radeon/cypress_dpm.c | 2165 +++ kernel/drivers/gpu/drm/radeon/cypress_dpm.h | 160 + kernel/drivers/gpu/drm/radeon/dce3_1_afmt.c | 232 + kernel/drivers/gpu/drm/radeon/dce6_afmt.c | 297 + kernel/drivers/gpu/drm/radeon/drm_buffer.c | 177 + kernel/drivers/gpu/drm/radeon/drm_buffer.h | 148 + kernel/drivers/gpu/drm/radeon/evergreen.c | 5834 ++++++++ .../gpu/drm/radeon/evergreen_blit_shaders.c | 303 + .../gpu/drm/radeon/evergreen_blit_shaders.h | 35 + kernel/drivers/gpu/drm/radeon/evergreen_cs.c | 3593 +++++ kernel/drivers/gpu/drm/radeon/evergreen_dma.c | 182 + kernel/drivers/gpu/drm/radeon/evergreen_hdmi.c | 479 + kernel/drivers/gpu/drm/radeon/evergreen_reg.h | 269 + kernel/drivers/gpu/drm/radeon/evergreen_smc.h | 67 + kernel/drivers/gpu/drm/radeon/evergreend.h | 2578 ++++ kernel/drivers/gpu/drm/radeon/kv_dpm.c | 2898 ++++ kernel/drivers/gpu/drm/radeon/kv_dpm.h | 200 + kernel/drivers/gpu/drm/radeon/kv_smc.c | 215 + kernel/drivers/gpu/drm/radeon/mkregtable.c | 712 + kernel/drivers/gpu/drm/radeon/ni.c | 2556 ++++ kernel/drivers/gpu/drm/radeon/ni_dma.c | 472 + kernel/drivers/gpu/drm/radeon/ni_dpm.c | 4379 ++++++ kernel/drivers/gpu/drm/radeon/ni_dpm.h | 250 + kernel/drivers/gpu/drm/radeon/ni_reg.h | 130 + kernel/drivers/gpu/drm/radeon/nid.h | 1361 ++ kernel/drivers/gpu/drm/radeon/nislands_smc.h | 329 + kernel/drivers/gpu/drm/radeon/ppsmc.h | 191 + kernel/drivers/gpu/drm/radeon/pptable.h | 690 + kernel/drivers/gpu/drm/radeon/r100.c | 4111 ++++++ kernel/drivers/gpu/drm/radeon/r100_track.h | 97 + kernel/drivers/gpu/drm/radeon/r100d.h | 869 ++ kernel/drivers/gpu/drm/radeon/r200.c | 551 + kernel/drivers/gpu/drm/radeon/r300.c | 1571 ++ kernel/drivers/gpu/drm/radeon/r300_cmdbuf.c | 1186 ++ kernel/drivers/gpu/drm/radeon/r300_reg.h | 1789 +++ kernel/drivers/gpu/drm/radeon/r300d.h | 343 + kernel/drivers/gpu/drm/radeon/r420.c | 503 + kernel/drivers/gpu/drm/radeon/r420d.h | 249 + kernel/drivers/gpu/drm/radeon/r500_reg.h | 801 + kernel/drivers/gpu/drm/radeon/r520.c | 332 + kernel/drivers/gpu/drm/radeon/r520d.h | 187 + kernel/drivers/gpu/drm/radeon/r600.c | 4508 ++++++ kernel/drivers/gpu/drm/radeon/r600_blit.c | 874 ++ kernel/drivers/gpu/drm/radeon/r600_blit_shaders.c | 719 + kernel/drivers/gpu/drm/radeon/r600_blit_shaders.h | 38 + kernel/drivers/gpu/drm/radeon/r600_cp.c | 2660 ++++ kernel/drivers/gpu/drm/radeon/r600_cs.c | 2630 ++++ kernel/drivers/gpu/drm/radeon/r600_dma.c | 491 + kernel/drivers/gpu/drm/radeon/r600_dpm.c | 1367 ++ kernel/drivers/gpu/drm/radeon/r600_dpm.h | 236 + kernel/drivers/gpu/drm/radeon/r600_hdmi.c | 535 + kernel/drivers/gpu/drm/radeon/r600_reg.h | 180 + kernel/drivers/gpu/drm/radeon/r600d.h | 2369 +++ kernel/drivers/gpu/drm/radeon/radeon.h | 3127 ++++ kernel/drivers/gpu/drm/radeon/radeon_acpi.c | 790 + kernel/drivers/gpu/drm/radeon/radeon_acpi.h | 445 + kernel/drivers/gpu/drm/radeon/radeon_agp.c | 282 + kernel/drivers/gpu/drm/radeon/radeon_asic.c | 2674 ++++ kernel/drivers/gpu/drm/radeon/radeon_asic.h | 979 ++ kernel/drivers/gpu/drm/radeon/radeon_atombios.c | 4448 ++++++ .../drivers/gpu/drm/radeon/radeon_atpx_handler.c | 571 + kernel/drivers/gpu/drm/radeon/radeon_audio.c | 791 + kernel/drivers/gpu/drm/radeon/radeon_audio.h | 84 + kernel/drivers/gpu/drm/radeon/radeon_benchmark.c | 249 + kernel/drivers/gpu/drm/radeon/radeon_bios.c | 703 + kernel/drivers/gpu/drm/radeon/radeon_clocks.c | 912 ++ kernel/drivers/gpu/drm/radeon/radeon_combios.c | 3603 +++++ kernel/drivers/gpu/drm/radeon/radeon_connectors.c | 2464 ++++ kernel/drivers/gpu/drm/radeon/radeon_cp.c | 2243 +++ kernel/drivers/gpu/drm/radeon/radeon_cs.c | 864 ++ kernel/drivers/gpu/drm/radeon/radeon_cursor.c | 380 + kernel/drivers/gpu/drm/radeon/radeon_device.c | 1896 +++ kernel/drivers/gpu/drm/radeon/radeon_display.c | 1967 +++ kernel/drivers/gpu/drm/radeon/radeon_dp_auxch.c | 204 + kernel/drivers/gpu/drm/radeon/radeon_dp_mst.c | 790 + kernel/drivers/gpu/drm/radeon/radeon_drv.c | 687 + kernel/drivers/gpu/drm/radeon/radeon_drv.h | 2170 +++ kernel/drivers/gpu/drm/radeon/radeon_encoders.c | 451 + kernel/drivers/gpu/drm/radeon/radeon_family.h | 122 + kernel/drivers/gpu/drm/radeon/radeon_fb.c | 447 + kernel/drivers/gpu/drm/radeon/radeon_fence.c | 1096 ++ kernel/drivers/gpu/drm/radeon/radeon_gart.c | 380 + kernel/drivers/gpu/drm/radeon/radeon_gem.c | 807 + kernel/drivers/gpu/drm/radeon/radeon_i2c.c | 1161 ++ kernel/drivers/gpu/drm/radeon/radeon_ib.c | 319 + kernel/drivers/gpu/drm/radeon/radeon_ioc32.c | 424 + kernel/drivers/gpu/drm/radeon/radeon_irq.c | 402 + kernel/drivers/gpu/drm/radeon/radeon_irq_kms.c | 530 + kernel/drivers/gpu/drm/radeon/radeon_kfd.c | 696 + kernel/drivers/gpu/drm/radeon/radeon_kfd.h | 47 + kernel/drivers/gpu/drm/radeon/radeon_kms.c | 929 ++ kernel/drivers/gpu/drm/radeon/radeon_legacy_crtc.c | 1127 ++ .../gpu/drm/radeon/radeon_legacy_encoders.c | 1810 +++ kernel/drivers/gpu/drm/radeon/radeon_legacy_tv.c | 923 ++ kernel/drivers/gpu/drm/radeon/radeon_mem.c | 302 + kernel/drivers/gpu/drm/radeon/radeon_mn.c | 322 + kernel/drivers/gpu/drm/radeon/radeon_mode.h | 1007 ++ kernel/drivers/gpu/drm/radeon/radeon_object.c | 848 ++ kernel/drivers/gpu/drm/radeon/radeon_object.h | 196 + kernel/drivers/gpu/drm/radeon/radeon_pm.c | 1876 +++ kernel/drivers/gpu/drm/radeon/radeon_prime.c | 127 + kernel/drivers/gpu/drm/radeon/radeon_reg.h | 3725 +++++ kernel/drivers/gpu/drm/radeon/radeon_ring.c | 557 + kernel/drivers/gpu/drm/radeon/radeon_sa.c | 419 + kernel/drivers/gpu/drm/radeon/radeon_semaphore.c | 106 + kernel/drivers/gpu/drm/radeon/radeon_state.c | 3261 +++++ kernel/drivers/gpu/drm/radeon/radeon_sync.c | 220 + kernel/drivers/gpu/drm/radeon/radeon_test.c | 574 + kernel/drivers/gpu/drm/radeon/radeon_trace.h | 208 + .../drivers/gpu/drm/radeon/radeon_trace_points.c | 9 + kernel/drivers/gpu/drm/radeon/radeon_ttm.c | 1194 ++ kernel/drivers/gpu/drm/radeon/radeon_ucode.c | 167 + kernel/drivers/gpu/drm/radeon/radeon_ucode.h | 227 + kernel/drivers/gpu/drm/radeon/radeon_uvd.c | 1003 ++ kernel/drivers/gpu/drm/radeon/radeon_vce.c | 804 + kernel/drivers/gpu/drm/radeon/radeon_vm.c | 1262 ++ kernel/drivers/gpu/drm/radeon/reg_srcs/cayman | 642 + kernel/drivers/gpu/drm/radeon/reg_srcs/evergreen | 644 + kernel/drivers/gpu/drm/radeon/reg_srcs/r100 | 105 + kernel/drivers/gpu/drm/radeon/reg_srcs/r200 | 186 + kernel/drivers/gpu/drm/radeon/reg_srcs/r300 | 714 + kernel/drivers/gpu/drm/radeon/reg_srcs/r420 | 780 + kernel/drivers/gpu/drm/radeon/reg_srcs/r600 | 756 + kernel/drivers/gpu/drm/radeon/reg_srcs/rn50 | 30 + kernel/drivers/gpu/drm/radeon/reg_srcs/rs600 | 780 + kernel/drivers/gpu/drm/radeon/reg_srcs/rv515 | 496 + kernel/drivers/gpu/drm/radeon/rs100d.h | 40 + kernel/drivers/gpu/drm/radeon/rs400.c | 586 + kernel/drivers/gpu/drm/radeon/rs400d.h | 160 + kernel/drivers/gpu/drm/radeon/rs600.c | 1160 ++ kernel/drivers/gpu/drm/radeon/rs600d.h | 685 + kernel/drivers/gpu/drm/radeon/rs690.c | 870 ++ kernel/drivers/gpu/drm/radeon/rs690d.h | 313 + kernel/drivers/gpu/drm/radeon/rs780_dpm.c | 1076 ++ kernel/drivers/gpu/drm/radeon/rs780_dpm.h | 109 + kernel/drivers/gpu/drm/radeon/rs780d.h | 171 + kernel/drivers/gpu/drm/radeon/rv200d.h | 36 + kernel/drivers/gpu/drm/radeon/rv250d.h | 123 + kernel/drivers/gpu/drm/radeon/rv350d.h | 52 + kernel/drivers/gpu/drm/radeon/rv515.c | 1306 ++ kernel/drivers/gpu/drm/radeon/rv515d.h | 638 + kernel/drivers/gpu/drm/radeon/rv6xx_dpm.c | 2159 +++ kernel/drivers/gpu/drm/radeon/rv6xx_dpm.h | 95 + kernel/drivers/gpu/drm/radeon/rv6xxd.h | 246 + kernel/drivers/gpu/drm/radeon/rv730_dpm.c | 508 + kernel/drivers/gpu/drm/radeon/rv730d.h | 165 + kernel/drivers/gpu/drm/radeon/rv740_dpm.c | 416 + kernel/drivers/gpu/drm/radeon/rv740d.h | 117 + kernel/drivers/gpu/drm/radeon/rv770.c | 2052 +++ kernel/drivers/gpu/drm/radeon/rv770_dma.c | 94 + kernel/drivers/gpu/drm/radeon/rv770_dpm.c | 2586 ++++ kernel/drivers/gpu/drm/radeon/rv770_dpm.h | 284 + kernel/drivers/gpu/drm/radeon/rv770_smc.c | 631 + kernel/drivers/gpu/drm/radeon/rv770_smc.h | 207 + kernel/drivers/gpu/drm/radeon/rv770d.h | 1014 ++ kernel/drivers/gpu/drm/radeon/si.c | 7677 ++++++++++ kernel/drivers/gpu/drm/radeon/si_blit_shaders.c | 253 + kernel/drivers/gpu/drm/radeon/si_blit_shaders.h | 32 + kernel/drivers/gpu/drm/radeon/si_dma.c | 284 + kernel/drivers/gpu/drm/radeon/si_dpm.c | 7032 +++++++++ kernel/drivers/gpu/drm/radeon/si_dpm.h | 238 + kernel/drivers/gpu/drm/radeon/si_reg.h | 105 + kernel/drivers/gpu/drm/radeon/si_smc.c | 310 + kernel/drivers/gpu/drm/radeon/sid.h | 1920 +++ kernel/drivers/gpu/drm/radeon/sislands_smc.h | 423 + kernel/drivers/gpu/drm/radeon/smu7.h | 170 + kernel/drivers/gpu/drm/radeon/smu7_discrete.h | 514 + kernel/drivers/gpu/drm/radeon/smu7_fusion.h | 300 + kernel/drivers/gpu/drm/radeon/sumo_dpm.c | 1945 +++ kernel/drivers/gpu/drm/radeon/sumo_dpm.h | 220 + kernel/drivers/gpu/drm/radeon/sumo_smc.c | 221 + kernel/drivers/gpu/drm/radeon/sumod.h | 372 + kernel/drivers/gpu/drm/radeon/trinity_dpm.c | 2021 +++ kernel/drivers/gpu/drm/radeon/trinity_dpm.h | 134 + kernel/drivers/gpu/drm/radeon/trinity_smc.c | 127 + kernel/drivers/gpu/drm/radeon/trinityd.h | 228 + kernel/drivers/gpu/drm/radeon/uvd_v1_0.c | 535 + kernel/drivers/gpu/drm/radeon/uvd_v2_2.c | 198 + kernel/drivers/gpu/drm/radeon/uvd_v3_1.c | 57 + kernel/drivers/gpu/drm/radeon/uvd_v4_2.c | 68 + kernel/drivers/gpu/drm/radeon/vce_v1_0.c | 187 + kernel/drivers/gpu/drm/radeon/vce_v2_0.c | 184 + kernel/drivers/gpu/drm/rcar-du/Kconfig | 27 + kernel/drivers/gpu/drm/rcar-du/Makefile | 14 + kernel/drivers/gpu/drm/rcar-du/rcar_du_crtc.c | 638 + kernel/drivers/gpu/drm/rcar-du/rcar_du_crtc.h | 64 + kernel/drivers/gpu/drm/rcar-du/rcar_du_drv.c | 350 + kernel/drivers/gpu/drm/rcar-du/rcar_du_drv.h | 117 + kernel/drivers/gpu/drm/rcar-du/rcar_du_encoder.c | 209 + kernel/drivers/gpu/drm/rcar-du/rcar_du_encoder.h | 61 + kernel/drivers/gpu/drm/rcar-du/rcar_du_group.c | 200 + kernel/drivers/gpu/drm/rcar-du/rcar_du_group.h | 55 + kernel/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c | 126 + kernel/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h | 31 + kernel/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c | 168 + kernel/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.h | 35 + kernel/drivers/gpu/drm/rcar-du/rcar_du_kms.c | 747 + kernel/drivers/gpu/drm/rcar-du/rcar_du_kms.h | 39 + kernel/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c | 135 + kernel/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h | 24 + kernel/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c | 192 + kernel/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h | 45 + kernel/drivers/gpu/drm/rcar-du/rcar_du_plane.c | 460 + kernel/drivers/gpu/drm/rcar-du/rcar_du_plane.h | 70 + kernel/drivers/gpu/drm/rcar-du/rcar_du_regs.h | 514 + kernel/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c | 94 + kernel/drivers/gpu/drm/rcar-du/rcar_du_vgacon.h | 23 + kernel/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h | 69 + kernel/drivers/gpu/drm/rockchip/Kconfig | 27 + kernel/drivers/gpu/drm/rockchip/Makefile | 10 + kernel/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c | 341 + kernel/drivers/gpu/drm/rockchip/rockchip_drm_drv.c | 568 + kernel/drivers/gpu/drm/rockchip/rockchip_drm_drv.h | 68 + kernel/drivers/gpu/drm/rockchip/rockchip_drm_fb.c | 201 + kernel/drivers/gpu/drm/rockchip/rockchip_drm_fb.h | 28 + .../drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c | 213 + .../drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h | 21 + kernel/drivers/gpu/drm/rockchip/rockchip_drm_gem.c | 298 + kernel/drivers/gpu/drm/rockchip/rockchip_drm_gem.h | 55 + kernel/drivers/gpu/drm/rockchip/rockchip_drm_vop.c | 1522 ++ kernel/drivers/gpu/drm/rockchip/rockchip_drm_vop.h | 201 + kernel/drivers/gpu/drm/savage/Makefile | 9 + kernel/drivers/gpu/drm/savage/savage_bci.c | 1083 ++ kernel/drivers/gpu/drm/savage/savage_drv.c | 93 + kernel/drivers/gpu/drm/savage/savage_drv.h | 574 + kernel/drivers/gpu/drm/savage/savage_state.c | 1163 ++ kernel/drivers/gpu/drm/shmobile/Kconfig | 15 + kernel/drivers/gpu/drm/shmobile/Makefile | 7 + .../drivers/gpu/drm/shmobile/shmob_drm_backlight.c | 90 + .../drivers/gpu/drm/shmobile/shmob_drm_backlight.h | 23 + kernel/drivers/gpu/drm/shmobile/shmob_drm_crtc.c | 757 + kernel/drivers/gpu/drm/shmobile/shmob_drm_crtc.h | 60 + kernel/drivers/gpu/drm/shmobile/shmob_drm_drv.c | 360 + kernel/drivers/gpu/drm/shmobile/shmob_drm_drv.h | 47 + kernel/drivers/gpu/drm/shmobile/shmob_drm_kms.c | 160 + kernel/drivers/gpu/drm/shmobile/shmob_drm_kms.h | 34 + kernel/drivers/gpu/drm/shmobile/shmob_drm_plane.c | 263 + kernel/drivers/gpu/drm/shmobile/shmob_drm_plane.h | 22 + kernel/drivers/gpu/drm/shmobile/shmob_drm_regs.h | 311 + kernel/drivers/gpu/drm/sis/Makefile | 10 + kernel/drivers/gpu/drm/sis/sis_drv.c | 145 + kernel/drivers/gpu/drm/sis/sis_drv.h | 78 + kernel/drivers/gpu/drm/sis/sis_mm.c | 362 + kernel/drivers/gpu/drm/sti/Kconfig | 17 + kernel/drivers/gpu/drm/sti/Makefile | 27 + kernel/drivers/gpu/drm/sti/NOTES | 58 + kernel/drivers/gpu/drm/sti/sti_awg_utils.c | 182 + kernel/drivers/gpu/drm/sti/sti_awg_utils.h | 34 + kernel/drivers/gpu/drm/sti/sti_compositor.c | 284 + kernel/drivers/gpu/drm/sti/sti_compositor.h | 88 + kernel/drivers/gpu/drm/sti/sti_cursor.c | 242 + kernel/drivers/gpu/drm/sti/sti_cursor.h | 12 + kernel/drivers/gpu/drm/sti/sti_drm_crtc.c | 322 + kernel/drivers/gpu/drm/sti/sti_drm_crtc.h | 22 + kernel/drivers/gpu/drm/sti/sti_drm_drv.c | 327 + kernel/drivers/gpu/drm/sti/sti_drm_drv.h | 35 + kernel/drivers/gpu/drm/sti/sti_drm_plane.c | 251 + kernel/drivers/gpu/drm/sti/sti_drm_plane.h | 18 + kernel/drivers/gpu/drm/sti/sti_dvo.c | 564 + kernel/drivers/gpu/drm/sti/sti_gdp.c | 584 + kernel/drivers/gpu/drm/sti/sti_gdp.h | 16 + kernel/drivers/gpu/drm/sti/sti_hda.c | 791 + kernel/drivers/gpu/drm/sti/sti_hdmi.c | 909 ++ kernel/drivers/gpu/drm/sti/sti_hdmi.h | 90 + kernel/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c | 336 + kernel/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h | 14 + kernel/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c | 211 + kernel/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.h | 14 + kernel/drivers/gpu/drm/sti/sti_hqvdp.c | 1073 ++ kernel/drivers/gpu/drm/sti/sti_hqvdp.h | 12 + kernel/drivers/gpu/drm/sti/sti_hqvdp_lut.h | 373 + kernel/drivers/gpu/drm/sti/sti_layer.c | 213 + kernel/drivers/gpu/drm/sti/sti_layer.h | 131 + kernel/drivers/gpu/drm/sti/sti_mixer.c | 266 + kernel/drivers/gpu/drm/sti/sti_mixer.h | 57 + kernel/drivers/gpu/drm/sti/sti_tvout.c | 780 + kernel/drivers/gpu/drm/sti/sti_vid.c | 138 + kernel/drivers/gpu/drm/sti/sti_vid.h | 12 + kernel/drivers/gpu/drm/sti/sti_vtac.c | 223 + kernel/drivers/gpu/drm/sti/sti_vtg.c | 391 + kernel/drivers/gpu/drm/sti/sti_vtg.h | 28 + kernel/drivers/gpu/drm/tdfx/Makefile | 8 + kernel/drivers/gpu/drm/tdfx/tdfx_drv.c | 89 + kernel/drivers/gpu/drm/tdfx/tdfx_drv.h | 47 + kernel/drivers/gpu/drm/tegra/Kconfig | 44 + kernel/drivers/gpu/drm/tegra/Makefile | 18 + kernel/drivers/gpu/drm/tegra/dc.c | 2024 +++ kernel/drivers/gpu/drm/tegra/dc.h | 444 + kernel/drivers/gpu/drm/tegra/dpaux.c | 574 + kernel/drivers/gpu/drm/tegra/dpaux.h | 74 + kernel/drivers/gpu/drm/tegra/drm.c | 1135 ++ kernel/drivers/gpu/drm/tegra/drm.h | 279 + kernel/drivers/gpu/drm/tegra/dsi.c | 1636 +++ kernel/drivers/gpu/drm/tegra/dsi.h | 142 + kernel/drivers/gpu/drm/tegra/fb.c | 433 + kernel/drivers/gpu/drm/tegra/gem.c | 659 + kernel/drivers/gpu/drm/tegra/gem.h | 78 + kernel/drivers/gpu/drm/tegra/gr2d.c | 220 + kernel/drivers/gpu/drm/tegra/gr2d.h | 28 + kernel/drivers/gpu/drm/tegra/gr3d.c | 358 + kernel/drivers/gpu/drm/tegra/gr3d.h | 27 + kernel/drivers/gpu/drm/tegra/hdmi.c | 1579 ++ kernel/drivers/gpu/drm/tegra/hdmi.h | 541 + kernel/drivers/gpu/drm/tegra/mipi-phy.c | 137 + kernel/drivers/gpu/drm/tegra/mipi-phy.h | 51 + kernel/drivers/gpu/drm/tegra/output.c | 218 + kernel/drivers/gpu/drm/tegra/rgb.c | 343 + kernel/drivers/gpu/drm/tegra/sor.c | 1679 +++ kernel/drivers/gpu/drm/tegra/sor.h | 282 + kernel/drivers/gpu/drm/tilcdc/Kconfig | 14 + kernel/drivers/gpu/drm/tilcdc/Makefile | 13 + kernel/drivers/gpu/drm/tilcdc/tilcdc_crtc.c | 682 + kernel/drivers/gpu/drm/tilcdc/tilcdc_drv.c | 676 + kernel/drivers/gpu/drm/tilcdc/tilcdc_drv.h | 172 + kernel/drivers/gpu/drm/tilcdc/tilcdc_panel.c | 476 + kernel/drivers/gpu/drm/tilcdc/tilcdc_panel.h | 26 + kernel/drivers/gpu/drm/tilcdc/tilcdc_regs.h | 155 + kernel/drivers/gpu/drm/tilcdc/tilcdc_slave.c | 411 + kernel/drivers/gpu/drm/tilcdc/tilcdc_slave.h | 26 + kernel/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c | 420 + kernel/drivers/gpu/drm/tilcdc/tilcdc_tfp410.h | 26 + kernel/drivers/gpu/drm/ttm/Makefile | 10 + kernel/drivers/gpu/drm/ttm/ttm_agp_backend.c | 152 + kernel/drivers/gpu/drm/ttm/ttm_bo.c | 1730 +++ kernel/drivers/gpu/drm/ttm/ttm_bo_manager.c | 157 + kernel/drivers/gpu/drm/ttm/ttm_bo_util.c | 695 + kernel/drivers/gpu/drm/ttm/ttm_bo_vm.c | 364 + kernel/drivers/gpu/drm/ttm/ttm_execbuf_util.c | 214 + kernel/drivers/gpu/drm/ttm/ttm_lock.c | 302 + kernel/drivers/gpu/drm/ttm/ttm_memory.c | 602 + kernel/drivers/gpu/drm/ttm/ttm_module.c | 102 + kernel/drivers/gpu/drm/ttm/ttm_object.c | 765 + kernel/drivers/gpu/drm/ttm/ttm_page_alloc.c | 940 ++ kernel/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 1158 ++ kernel/drivers/gpu/drm/ttm/ttm_tt.c | 402 + kernel/drivers/gpu/drm/udl/Kconfig | 15 + kernel/drivers/gpu/drm/udl/Makefile | 6 + kernel/drivers/gpu/drm/udl/udl_connector.c | 157 + kernel/drivers/gpu/drm/udl/udl_dmabuf.c | 283 + kernel/drivers/gpu/drm/udl/udl_drv.c | 141 + kernel/drivers/gpu/drm/udl/udl_drv.h | 154 + kernel/drivers/gpu/drm/udl/udl_encoder.c | 80 + kernel/drivers/gpu/drm/udl/udl_fb.c | 673 + kernel/drivers/gpu/drm/udl/udl_gem.c | 241 + kernel/drivers/gpu/drm/udl/udl_main.c | 352 + kernel/drivers/gpu/drm/udl/udl_modeset.c | 467 + kernel/drivers/gpu/drm/udl/udl_transfer.c | 263 + kernel/drivers/gpu/drm/vgem/Makefile | 4 + kernel/drivers/gpu/drm/vgem/vgem_drv.c | 355 + kernel/drivers/gpu/drm/vgem/vgem_drv.h | 46 + kernel/drivers/gpu/drm/via/Makefile | 8 + kernel/drivers/gpu/drm/via/via_3d_reg.h | 1650 +++ kernel/drivers/gpu/drm/via/via_dma.c | 740 + kernel/drivers/gpu/drm/via/via_dmablit.c | 808 + kernel/drivers/gpu/drm/via/via_dmablit.h | 140 + kernel/drivers/gpu/drm/via/via_drv.c | 125 + kernel/drivers/gpu/drm/via/via_drv.h | 162 + kernel/drivers/gpu/drm/via/via_irq.c | 391 + kernel/drivers/gpu/drm/via/via_map.c | 128 + kernel/drivers/gpu/drm/via/via_mm.c | 237 + kernel/drivers/gpu/drm/via/via_verifier.c | 1111 ++ kernel/drivers/gpu/drm/via/via_verifier.h | 62 + kernel/drivers/gpu/drm/via/via_video.c | 93 + kernel/drivers/gpu/drm/vmwgfx/Kconfig | 25 + kernel/drivers/gpu/drm/vmwgfx/Makefile | 12 + kernel/drivers/gpu/drm/vmwgfx/svga3d_reg.h | 2627 ++++ kernel/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h | 912 ++ kernel/drivers/gpu/drm/vmwgfx/svga_escape.h | 89 + kernel/drivers/gpu/drm/vmwgfx/svga_overlay.h | 201 + kernel/drivers/gpu/drm/vmwgfx/svga_reg.h | 1564 ++ kernel/drivers/gpu/drm/vmwgfx/svga_types.h | 45 + kernel/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 846 ++ kernel/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c | 342 + kernel/drivers/gpu/drm/vmwgfx/vmwgfx_context.c | 921 ++ kernel/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c | 323 + kernel/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 1470 ++ kernel/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 1119 ++ kernel/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 2792 ++++ kernel/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | 662 + kernel/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 1174 ++ kernel/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h | 127 + kernel/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | 649 + kernel/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c | 151 + .../drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | 172 + kernel/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | 422 + kernel/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | 317 + kernel/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 2072 +++ kernel/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h | 167 + kernel/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | 449 + kernel/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c | 155 + kernel/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c | 656 + kernel/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c | 619 + kernel/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c | 137 + kernel/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h | 57 + kernel/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 1575 ++ .../drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h | 84 + kernel/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c | 577 + kernel/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c | 663 + kernel/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 1431 ++ kernel/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c | 98 + kernel/drivers/gpu/host1x/Kconfig | 22 + kernel/drivers/gpu/host1x/Makefile | 15 + kernel/drivers/gpu/host1x/bus.c | 602 + kernel/drivers/gpu/host1x/bus.h | 29 + kernel/drivers/gpu/host1x/cdma.c | 491 + kernel/drivers/gpu/host1x/cdma.h | 100 + kernel/drivers/gpu/host1x/channel.c | 131 + kernel/drivers/gpu/host1x/channel.h | 46 + kernel/drivers/gpu/host1x/debug.c | 213 + kernel/drivers/gpu/host1x/debug.h | 51 + kernel/drivers/gpu/host1x/dev.c | 252 + kernel/drivers/gpu/host1x/dev.h | 311 + kernel/drivers/gpu/host1x/hw/cdma_hw.c | 326 + kernel/drivers/gpu/host1x/hw/channel_hw.c | 192 + kernel/drivers/gpu/host1x/hw/debug_hw.c | 314 + kernel/drivers/gpu/host1x/hw/host1x01.c | 42 + kernel/drivers/gpu/host1x/hw/host1x01.h | 25 + kernel/drivers/gpu/host1x/hw/host1x01_hardware.h | 143 + kernel/drivers/gpu/host1x/hw/host1x02.c | 42 + kernel/drivers/gpu/host1x/hw/host1x02.h | 26 + kernel/drivers/gpu/host1x/hw/host1x02_hardware.h | 142 + kernel/drivers/gpu/host1x/hw/host1x04.c | 42 + kernel/drivers/gpu/host1x/hw/host1x04.h | 26 + kernel/drivers/gpu/host1x/hw/host1x04_hardware.h | 142 + kernel/drivers/gpu/host1x/hw/hw_host1x01_channel.h | 120 + kernel/drivers/gpu/host1x/hw/hw_host1x01_sync.h | 243 + kernel/drivers/gpu/host1x/hw/hw_host1x01_uclass.h | 180 + kernel/drivers/gpu/host1x/hw/hw_host1x02_channel.h | 121 + kernel/drivers/gpu/host1x/hw/hw_host1x02_sync.h | 243 + kernel/drivers/gpu/host1x/hw/hw_host1x02_uclass.h | 181 + kernel/drivers/gpu/host1x/hw/hw_host1x04_channel.h | 121 + kernel/drivers/gpu/host1x/hw/hw_host1x04_sync.h | 243 + kernel/drivers/gpu/host1x/hw/hw_host1x04_uclass.h | 181 + kernel/drivers/gpu/host1x/hw/intr_hw.c | 142 + kernel/drivers/gpu/host1x/hw/syncpt_hw.c | 112 + kernel/drivers/gpu/host1x/intr.c | 354 + kernel/drivers/gpu/host1x/intr.h | 102 + kernel/drivers/gpu/host1x/job.c | 598 + kernel/drivers/gpu/host1x/job.h | 54 + kernel/drivers/gpu/host1x/mipi.c | 341 + kernel/drivers/gpu/host1x/syncpt.c | 467 + kernel/drivers/gpu/host1x/syncpt.h | 130 + kernel/drivers/gpu/ipu-v3/Kconfig | 8 + kernel/drivers/gpu/ipu-v3/Makefile | 4 + kernel/drivers/gpu/ipu-v3/ipu-common.c | 1350 ++ kernel/drivers/gpu/ipu-v3/ipu-cpmem.c | 764 + kernel/drivers/gpu/ipu-v3/ipu-csi.c | 741 + kernel/drivers/gpu/ipu-v3/ipu-dc.c | 482 + kernel/drivers/gpu/ipu-v3/ipu-di.c | 756 + kernel/drivers/gpu/ipu-v3/ipu-dmfc.c | 436 + kernel/drivers/gpu/ipu-v3/ipu-dp.c | 363 + kernel/drivers/gpu/ipu-v3/ipu-ic.c | 778 + kernel/drivers/gpu/ipu-v3/ipu-prv.h | 223 + kernel/drivers/gpu/ipu-v3/ipu-smfc.c | 208 + kernel/drivers/gpu/vga/Kconfig | 30 + kernel/drivers/gpu/vga/Makefile | 2 + kernel/drivers/gpu/vga/vga_switcheroo.c | 711 + kernel/drivers/gpu/vga/vgaarb.c | 1357 ++ 1735 files changed, 780511 insertions(+) create mode 100644 kernel/drivers/gpu/Makefile create mode 100644 kernel/drivers/gpu/drm/Kconfig create mode 100644 kernel/drivers/gpu/drm/Makefile create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/Kconfig create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/Makefile create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/cik_regs.h create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/kfd_crat.h create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/kfd_device.c create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/kfd_module.c create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/kfd_pm4_opcodes.h create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/kfd_priv.h create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/kfd_process.c create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/kfd_queue.c create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/kfd_topology.c create mode 100644 kernel/drivers/gpu/drm/amd/amdkfd/kfd_topology.h create mode 100644 kernel/drivers/gpu/drm/amd/include/cik_structs.h create mode 100644 kernel/drivers/gpu/drm/amd/include/kgd_kfd_interface.h create mode 100644 kernel/drivers/gpu/drm/armada/Kconfig create mode 100644 kernel/drivers/gpu/drm/armada/Makefile create mode 100644 kernel/drivers/gpu/drm/armada/armada_510.c create mode 100644 kernel/drivers/gpu/drm/armada/armada_crtc.c create mode 100644 kernel/drivers/gpu/drm/armada/armada_crtc.h create mode 100644 kernel/drivers/gpu/drm/armada/armada_debugfs.c create mode 100644 kernel/drivers/gpu/drm/armada/armada_drm.h create mode 100644 kernel/drivers/gpu/drm/armada/armada_drv.c create mode 100644 kernel/drivers/gpu/drm/armada/armada_fb.c create mode 100644 kernel/drivers/gpu/drm/armada/armada_fb.h create mode 100644 kernel/drivers/gpu/drm/armada/armada_fbdev.c create mode 100644 kernel/drivers/gpu/drm/armada/armada_gem.c create mode 100644 kernel/drivers/gpu/drm/armada/armada_gem.h create mode 100644 kernel/drivers/gpu/drm/armada/armada_hw.h create mode 100644 kernel/drivers/gpu/drm/armada/armada_ioctlP.h create mode 100644 kernel/drivers/gpu/drm/armada/armada_output.c create mode 100644 kernel/drivers/gpu/drm/armada/armada_output.h create mode 100644 kernel/drivers/gpu/drm/armada/armada_overlay.c create mode 100644 kernel/drivers/gpu/drm/armada/armada_slave.c create mode 100644 kernel/drivers/gpu/drm/armada/armada_slave.h create mode 100644 kernel/drivers/gpu/drm/ast/Kconfig create mode 100644 kernel/drivers/gpu/drm/ast/Makefile create mode 100644 kernel/drivers/gpu/drm/ast/ast_dp501.c create mode 100644 kernel/drivers/gpu/drm/ast/ast_dram_tables.h create mode 100644 kernel/drivers/gpu/drm/ast/ast_drv.c create mode 100644 kernel/drivers/gpu/drm/ast/ast_drv.h create mode 100644 kernel/drivers/gpu/drm/ast/ast_fb.c create mode 100644 kernel/drivers/gpu/drm/ast/ast_main.c create mode 100644 kernel/drivers/gpu/drm/ast/ast_mode.c create mode 100644 kernel/drivers/gpu/drm/ast/ast_post.c create mode 100644 kernel/drivers/gpu/drm/ast/ast_tables.h create mode 100644 kernel/drivers/gpu/drm/ast/ast_ttm.c create mode 100644 kernel/drivers/gpu/drm/ati_pcigart.c create mode 100644 kernel/drivers/gpu/drm/atmel-hlcdc/Kconfig create mode 100644 kernel/drivers/gpu/drm/atmel-hlcdc/Makefile create mode 100644 kernel/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c create mode 100644 kernel/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c create mode 100644 kernel/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h create mode 100644 kernel/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c create mode 100644 kernel/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.h create mode 100644 kernel/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c create mode 100644 kernel/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c create mode 100644 kernel/drivers/gpu/drm/bochs/Kconfig create mode 100644 kernel/drivers/gpu/drm/bochs/Makefile create mode 100644 kernel/drivers/gpu/drm/bochs/bochs.h create mode 100644 kernel/drivers/gpu/drm/bochs/bochs_drv.c create mode 100644 kernel/drivers/gpu/drm/bochs/bochs_fbdev.c create mode 100644 kernel/drivers/gpu/drm/bochs/bochs_hw.c create mode 100644 kernel/drivers/gpu/drm/bochs/bochs_kms.c create mode 100644 kernel/drivers/gpu/drm/bochs/bochs_mm.c create mode 100644 kernel/drivers/gpu/drm/bridge/Kconfig create mode 100644 kernel/drivers/gpu/drm/bridge/Makefile create mode 100644 kernel/drivers/gpu/drm/bridge/dw_hdmi.c create mode 100644 kernel/drivers/gpu/drm/bridge/dw_hdmi.h create mode 100644 kernel/drivers/gpu/drm/bridge/ps8622.c create mode 100644 kernel/drivers/gpu/drm/bridge/ptn3460.c create mode 100644 kernel/drivers/gpu/drm/cirrus/Kconfig create mode 100644 kernel/drivers/gpu/drm/cirrus/Makefile create mode 100644 kernel/drivers/gpu/drm/cirrus/cirrus_drv.c create mode 100644 kernel/drivers/gpu/drm/cirrus/cirrus_drv.h create mode 100644 kernel/drivers/gpu/drm/cirrus/cirrus_fbdev.c create mode 100644 kernel/drivers/gpu/drm/cirrus/cirrus_main.c create mode 100644 kernel/drivers/gpu/drm/cirrus/cirrus_mode.c create mode 100644 kernel/drivers/gpu/drm/cirrus/cirrus_ttm.c create mode 100644 kernel/drivers/gpu/drm/drm_agpsupport.c create mode 100644 kernel/drivers/gpu/drm/drm_atomic.c create mode 100644 kernel/drivers/gpu/drm/drm_atomic_helper.c create mode 100644 kernel/drivers/gpu/drm/drm_auth.c create mode 100644 kernel/drivers/gpu/drm/drm_bridge.c create mode 100644 kernel/drivers/gpu/drm/drm_bufs.c create mode 100644 kernel/drivers/gpu/drm/drm_cache.c create mode 100644 kernel/drivers/gpu/drm/drm_context.c create mode 100644 kernel/drivers/gpu/drm/drm_crtc.c create mode 100644 kernel/drivers/gpu/drm/drm_crtc_helper.c create mode 100644 kernel/drivers/gpu/drm/drm_crtc_internal.h create mode 100644 kernel/drivers/gpu/drm/drm_debugfs.c create mode 100644 kernel/drivers/gpu/drm/drm_dma.c create mode 100644 kernel/drivers/gpu/drm/drm_dp_helper.c create mode 100644 kernel/drivers/gpu/drm/drm_dp_mst_topology.c create mode 100644 kernel/drivers/gpu/drm/drm_drv.c create mode 100644 kernel/drivers/gpu/drm/drm_edid.c create mode 100644 kernel/drivers/gpu/drm/drm_edid_load.c create mode 100644 kernel/drivers/gpu/drm/drm_encoder_slave.c create mode 100644 kernel/drivers/gpu/drm/drm_fb_cma_helper.c create mode 100644 kernel/drivers/gpu/drm/drm_fb_helper.c create mode 100644 kernel/drivers/gpu/drm/drm_flip_work.c create mode 100644 kernel/drivers/gpu/drm/drm_fops.c create mode 100644 kernel/drivers/gpu/drm/drm_gem.c create mode 100644 kernel/drivers/gpu/drm/drm_gem_cma_helper.c create mode 100644 kernel/drivers/gpu/drm/drm_global.c create mode 100644 kernel/drivers/gpu/drm/drm_hashtab.c create mode 100644 kernel/drivers/gpu/drm/drm_info.c create mode 100644 kernel/drivers/gpu/drm/drm_internal.h create mode 100644 kernel/drivers/gpu/drm/drm_ioc32.c create mode 100644 kernel/drivers/gpu/drm/drm_ioctl.c create mode 100644 kernel/drivers/gpu/drm/drm_irq.c create mode 100644 kernel/drivers/gpu/drm/drm_legacy.h create mode 100644 kernel/drivers/gpu/drm/drm_lock.c create mode 100644 kernel/drivers/gpu/drm/drm_memory.c create mode 100644 kernel/drivers/gpu/drm/drm_mipi_dsi.c create mode 100644 kernel/drivers/gpu/drm/drm_mm.c create mode 100644 kernel/drivers/gpu/drm/drm_modes.c create mode 100644 kernel/drivers/gpu/drm/drm_modeset_lock.c create mode 100644 kernel/drivers/gpu/drm/drm_of.c create mode 100644 kernel/drivers/gpu/drm/drm_panel.c create mode 100644 kernel/drivers/gpu/drm/drm_pci.c create mode 100644 kernel/drivers/gpu/drm/drm_plane_helper.c create mode 100644 kernel/drivers/gpu/drm/drm_platform.c create mode 100644 kernel/drivers/gpu/drm/drm_prime.c create mode 100644 kernel/drivers/gpu/drm/drm_probe_helper.c create mode 100644 kernel/drivers/gpu/drm/drm_rect.c create mode 100644 kernel/drivers/gpu/drm/drm_scatter.c create mode 100644 kernel/drivers/gpu/drm/drm_sysfs.c create mode 100644 kernel/drivers/gpu/drm/drm_trace.h create mode 100644 kernel/drivers/gpu/drm/drm_trace_points.c create mode 100644 kernel/drivers/gpu/drm/drm_vm.c create mode 100644 kernel/drivers/gpu/drm/drm_vma_manager.c create mode 100644 kernel/drivers/gpu/drm/exynos/Kconfig create mode 100644 kernel/drivers/gpu/drm/exynos/Makefile create mode 100644 kernel/drivers/gpu/drm/exynos/exynos7_drm_decon.c create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_dp_core.c create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_dp_core.h create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_dp_reg.c create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_dp_reg.h create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_buf.c create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_buf.h create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_core.c create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_crtc.c create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_crtc.h create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_dpi.c create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_drv.c create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_drv.h create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_dsi.c create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_encoder.c create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_encoder.h create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_fb.c create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_fb.h create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_fbdev.c create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_fbdev.h create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_fimc.c create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_fimc.h create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_fimd.c create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_g2d.c create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_g2d.h create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_gem.c create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_gem.h create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_gsc.c create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_gsc.h create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_iommu.c create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_iommu.h create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_ipp.c create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_ipp.h create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_plane.c create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_plane.h create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_rotator.c create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_rotator.h create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_vidi.c create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_drm_vidi.h create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_hdmi.c create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_mixer.c create mode 100644 kernel/drivers/gpu/drm/exynos/exynos_mixer.h create mode 100644 kernel/drivers/gpu/drm/exynos/regs-fimc.h create mode 100644 kernel/drivers/gpu/drm/exynos/regs-gsc.h create mode 100644 kernel/drivers/gpu/drm/exynos/regs-hdmi.h create mode 100644 kernel/drivers/gpu/drm/exynos/regs-mixer.h create mode 100644 kernel/drivers/gpu/drm/exynos/regs-rotator.h create mode 100644 kernel/drivers/gpu/drm/exynos/regs-vp.h create mode 100644 kernel/drivers/gpu/drm/gma500/Kconfig create mode 100644 kernel/drivers/gpu/drm/gma500/Makefile create mode 100644 kernel/drivers/gpu/drm/gma500/accel_2d.c create mode 100644 kernel/drivers/gpu/drm/gma500/backlight.c create mode 100644 kernel/drivers/gpu/drm/gma500/blitter.c create mode 100644 kernel/drivers/gpu/drm/gma500/blitter.h create mode 100644 kernel/drivers/gpu/drm/gma500/cdv_device.c create mode 100644 kernel/drivers/gpu/drm/gma500/cdv_device.h create mode 100644 kernel/drivers/gpu/drm/gma500/cdv_intel_crt.c create mode 100644 kernel/drivers/gpu/drm/gma500/cdv_intel_display.c create mode 100644 kernel/drivers/gpu/drm/gma500/cdv_intel_dp.c create mode 100644 kernel/drivers/gpu/drm/gma500/cdv_intel_hdmi.c create mode 100644 kernel/drivers/gpu/drm/gma500/cdv_intel_lvds.c create mode 100644 kernel/drivers/gpu/drm/gma500/framebuffer.c create mode 100644 kernel/drivers/gpu/drm/gma500/framebuffer.h create mode 100644 kernel/drivers/gpu/drm/gma500/gem.c create mode 100644 kernel/drivers/gpu/drm/gma500/gem.h create mode 100644 kernel/drivers/gpu/drm/gma500/gma_device.c create mode 100644 kernel/drivers/gpu/drm/gma500/gma_device.h create mode 100644 kernel/drivers/gpu/drm/gma500/gma_display.c create mode 100644 kernel/drivers/gpu/drm/gma500/gma_display.h create mode 100644 kernel/drivers/gpu/drm/gma500/gtt.c create mode 100644 kernel/drivers/gpu/drm/gma500/gtt.h create mode 100644 kernel/drivers/gpu/drm/gma500/intel_bios.c create mode 100644 kernel/drivers/gpu/drm/gma500/intel_bios.h create mode 100644 kernel/drivers/gpu/drm/gma500/intel_gmbus.c create mode 100644 kernel/drivers/gpu/drm/gma500/intel_i2c.c create mode 100644 kernel/drivers/gpu/drm/gma500/mdfld_device.c create mode 100644 kernel/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c create mode 100644 kernel/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h create mode 100644 kernel/drivers/gpu/drm/gma500/mdfld_dsi_output.c create mode 100644 kernel/drivers/gpu/drm/gma500/mdfld_dsi_output.h create mode 100644 kernel/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c create mode 100644 kernel/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h create mode 100644 kernel/drivers/gpu/drm/gma500/mdfld_intel_display.c create mode 100644 kernel/drivers/gpu/drm/gma500/mdfld_output.c create mode 100644 kernel/drivers/gpu/drm/gma500/mdfld_output.h create mode 100644 kernel/drivers/gpu/drm/gma500/mdfld_tmd_vid.c create mode 100644 kernel/drivers/gpu/drm/gma500/mdfld_tpo_vid.c create mode 100644 kernel/drivers/gpu/drm/gma500/mid_bios.c create mode 100644 kernel/drivers/gpu/drm/gma500/mid_bios.h create mode 100644 kernel/drivers/gpu/drm/gma500/mmu.c create mode 100644 kernel/drivers/gpu/drm/gma500/mmu.h create mode 100644 kernel/drivers/gpu/drm/gma500/oaktrail.h create mode 100644 kernel/drivers/gpu/drm/gma500/oaktrail_crtc.c create mode 100644 kernel/drivers/gpu/drm/gma500/oaktrail_device.c create mode 100644 kernel/drivers/gpu/drm/gma500/oaktrail_hdmi.c create mode 100644 kernel/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c create mode 100644 kernel/drivers/gpu/drm/gma500/oaktrail_lvds.c create mode 100644 kernel/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c create mode 100644 kernel/drivers/gpu/drm/gma500/opregion.c create mode 100644 kernel/drivers/gpu/drm/gma500/opregion.h create mode 100644 kernel/drivers/gpu/drm/gma500/power.c create mode 100644 kernel/drivers/gpu/drm/gma500/power.h create mode 100644 kernel/drivers/gpu/drm/gma500/psb_device.c create mode 100644 kernel/drivers/gpu/drm/gma500/psb_device.h create mode 100644 kernel/drivers/gpu/drm/gma500/psb_drv.c create mode 100644 kernel/drivers/gpu/drm/gma500/psb_drv.h create mode 100644 kernel/drivers/gpu/drm/gma500/psb_intel_display.c create mode 100644 kernel/drivers/gpu/drm/gma500/psb_intel_drv.h create mode 100644 kernel/drivers/gpu/drm/gma500/psb_intel_lvds.c create mode 100644 kernel/drivers/gpu/drm/gma500/psb_intel_modes.c create mode 100644 kernel/drivers/gpu/drm/gma500/psb_intel_reg.h create mode 100644 kernel/drivers/gpu/drm/gma500/psb_intel_sdvo.c create mode 100644 kernel/drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h create mode 100644 kernel/drivers/gpu/drm/gma500/psb_irq.c create mode 100644 kernel/drivers/gpu/drm/gma500/psb_irq.h create mode 100644 kernel/drivers/gpu/drm/gma500/psb_lid.c create mode 100644 kernel/drivers/gpu/drm/gma500/psb_reg.h create mode 100644 kernel/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c create mode 100644 kernel/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h create mode 100644 kernel/drivers/gpu/drm/i2c/Kconfig create mode 100644 kernel/drivers/gpu/drm/i2c/Makefile create mode 100644 kernel/drivers/gpu/drm/i2c/adv7511.c create mode 100644 kernel/drivers/gpu/drm/i2c/adv7511.h create mode 100644 kernel/drivers/gpu/drm/i2c/ch7006_drv.c create mode 100644 kernel/drivers/gpu/drm/i2c/ch7006_mode.c create mode 100644 kernel/drivers/gpu/drm/i2c/ch7006_priv.h create mode 100644 kernel/drivers/gpu/drm/i2c/sil164_drv.c create mode 100644 kernel/drivers/gpu/drm/i2c/tda998x_drv.c create mode 100644 kernel/drivers/gpu/drm/i810/Makefile create mode 100644 kernel/drivers/gpu/drm/i810/i810_dma.c create mode 100644 kernel/drivers/gpu/drm/i810/i810_drv.c create mode 100644 kernel/drivers/gpu/drm/i810/i810_drv.h create mode 100644 kernel/drivers/gpu/drm/i915/Kconfig create mode 100644 kernel/drivers/gpu/drm/i915/Makefile create mode 100644 kernel/drivers/gpu/drm/i915/dvo.h create mode 100644 kernel/drivers/gpu/drm/i915/dvo_ch7017.c create mode 100644 kernel/drivers/gpu/drm/i915/dvo_ch7xxx.c create mode 100644 kernel/drivers/gpu/drm/i915/dvo_ivch.c create mode 100644 kernel/drivers/gpu/drm/i915/dvo_ns2501.c create mode 100644 kernel/drivers/gpu/drm/i915/dvo_sil164.c create mode 100644 kernel/drivers/gpu/drm/i915/dvo_tfp410.c create mode 100644 kernel/drivers/gpu/drm/i915/i915_cmd_parser.c create mode 100644 kernel/drivers/gpu/drm/i915/i915_debugfs.c create mode 100644 kernel/drivers/gpu/drm/i915/i915_dma.c create mode 100644 kernel/drivers/gpu/drm/i915/i915_drv.c create mode 100644 kernel/drivers/gpu/drm/i915/i915_drv.h create mode 100644 kernel/drivers/gpu/drm/i915/i915_gem.c create mode 100644 kernel/drivers/gpu/drm/i915/i915_gem_batch_pool.c create mode 100644 kernel/drivers/gpu/drm/i915/i915_gem_context.c create mode 100644 kernel/drivers/gpu/drm/i915/i915_gem_debug.c create mode 100644 kernel/drivers/gpu/drm/i915/i915_gem_dmabuf.c create mode 100644 kernel/drivers/gpu/drm/i915/i915_gem_evict.c create mode 100644 kernel/drivers/gpu/drm/i915/i915_gem_execbuffer.c create mode 100644 kernel/drivers/gpu/drm/i915/i915_gem_gtt.c create mode 100644 kernel/drivers/gpu/drm/i915/i915_gem_gtt.h create mode 100644 kernel/drivers/gpu/drm/i915/i915_gem_render_state.c create mode 100644 kernel/drivers/gpu/drm/i915/i915_gem_render_state.h create mode 100644 kernel/drivers/gpu/drm/i915/i915_gem_shrinker.c create mode 100644 kernel/drivers/gpu/drm/i915/i915_gem_stolen.c create mode 100644 kernel/drivers/gpu/drm/i915/i915_gem_tiling.c create mode 100644 kernel/drivers/gpu/drm/i915/i915_gem_userptr.c create mode 100644 kernel/drivers/gpu/drm/i915/i915_gpu_error.c create mode 100644 kernel/drivers/gpu/drm/i915/i915_ioc32.c create mode 100644 kernel/drivers/gpu/drm/i915/i915_irq.c create mode 100644 kernel/drivers/gpu/drm/i915/i915_params.c create mode 100644 kernel/drivers/gpu/drm/i915/i915_reg.h create mode 100644 kernel/drivers/gpu/drm/i915/i915_suspend.c create mode 100644 kernel/drivers/gpu/drm/i915/i915_sysfs.c create mode 100644 kernel/drivers/gpu/drm/i915/i915_trace.h create mode 100644 kernel/drivers/gpu/drm/i915/i915_trace_points.c create mode 100644 kernel/drivers/gpu/drm/i915/i915_vgpu.c create mode 100644 kernel/drivers/gpu/drm/i915/i915_vgpu.h create mode 100644 kernel/drivers/gpu/drm/i915/intel_acpi.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_atomic.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_atomic_plane.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_audio.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_bios.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_bios.h create mode 100644 kernel/drivers/gpu/drm/i915/intel_crt.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_ddi.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_display.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_dp.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_dp_mst.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_drv.h create mode 100644 kernel/drivers/gpu/drm/i915/intel_dsi.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_dsi.h create mode 100644 kernel/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_dsi_pll.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_dvo.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_fbc.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_fbdev.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_fifo_underrun.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_frontbuffer.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_hdmi.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_i2c.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_lrc.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_lrc.h create mode 100644 kernel/drivers/gpu/drm/i915/intel_lvds.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_modes.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_opregion.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_overlay.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_panel.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_pm.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_psr.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_renderstate.h create mode 100644 kernel/drivers/gpu/drm/i915/intel_renderstate_gen6.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_renderstate_gen7.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_renderstate_gen8.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_renderstate_gen9.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_ringbuffer.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_ringbuffer.h create mode 100644 kernel/drivers/gpu/drm/i915/intel_runtime_pm.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_sdvo.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_sdvo_regs.h create mode 100644 kernel/drivers/gpu/drm/i915/intel_sideband.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_sprite.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_tv.c create mode 100644 kernel/drivers/gpu/drm/i915/intel_uncore.c create mode 100644 kernel/drivers/gpu/drm/imx/Kconfig create mode 100644 kernel/drivers/gpu/drm/imx/Makefile create mode 100644 kernel/drivers/gpu/drm/imx/dw_hdmi-imx.c create mode 100644 kernel/drivers/gpu/drm/imx/imx-drm-core.c create mode 100644 kernel/drivers/gpu/drm/imx/imx-drm.h create mode 100644 kernel/drivers/gpu/drm/imx/imx-ldb.c create mode 100644 kernel/drivers/gpu/drm/imx/imx-tve.c create mode 100644 kernel/drivers/gpu/drm/imx/ipuv3-crtc.c create mode 100644 kernel/drivers/gpu/drm/imx/ipuv3-plane.c create mode 100644 kernel/drivers/gpu/drm/imx/ipuv3-plane.h create mode 100644 kernel/drivers/gpu/drm/imx/parallel-display.c create mode 100644 kernel/drivers/gpu/drm/mga/Makefile create mode 100644 kernel/drivers/gpu/drm/mga/mga_dma.c create mode 100644 kernel/drivers/gpu/drm/mga/mga_drv.c create mode 100644 kernel/drivers/gpu/drm/mga/mga_drv.h create mode 100644 kernel/drivers/gpu/drm/mga/mga_ioc32.c create mode 100644 kernel/drivers/gpu/drm/mga/mga_irq.c create mode 100644 kernel/drivers/gpu/drm/mga/mga_state.c create mode 100644 kernel/drivers/gpu/drm/mga/mga_warp.c create mode 100644 kernel/drivers/gpu/drm/mgag200/Kconfig create mode 100644 kernel/drivers/gpu/drm/mgag200/Makefile create mode 100644 kernel/drivers/gpu/drm/mgag200/mgag200_cursor.c create mode 100644 kernel/drivers/gpu/drm/mgag200/mgag200_drv.c create mode 100644 kernel/drivers/gpu/drm/mgag200/mgag200_drv.h create mode 100644 kernel/drivers/gpu/drm/mgag200/mgag200_fb.c create mode 100644 kernel/drivers/gpu/drm/mgag200/mgag200_i2c.c create mode 100644 kernel/drivers/gpu/drm/mgag200/mgag200_main.c create mode 100644 kernel/drivers/gpu/drm/mgag200/mgag200_mode.c create mode 100644 kernel/drivers/gpu/drm/mgag200/mgag200_reg.h create mode 100644 kernel/drivers/gpu/drm/mgag200/mgag200_ttm.c create mode 100644 kernel/drivers/gpu/drm/msm/Kconfig create mode 100644 kernel/drivers/gpu/drm/msm/Makefile create mode 100644 kernel/drivers/gpu/drm/msm/NOTES create mode 100644 kernel/drivers/gpu/drm/msm/adreno/a2xx.xml.h create mode 100644 kernel/drivers/gpu/drm/msm/adreno/a3xx.xml.h create mode 100644 kernel/drivers/gpu/drm/msm/adreno/a3xx_gpu.c create mode 100644 kernel/drivers/gpu/drm/msm/adreno/a3xx_gpu.h create mode 100644 kernel/drivers/gpu/drm/msm/adreno/a4xx.xml.h create mode 100644 kernel/drivers/gpu/drm/msm/adreno/a4xx_gpu.c create mode 100644 kernel/drivers/gpu/drm/msm/adreno/a4xx_gpu.h create mode 100644 kernel/drivers/gpu/drm/msm/adreno/adreno_common.xml.h create mode 100644 kernel/drivers/gpu/drm/msm/adreno/adreno_device.c create mode 100644 kernel/drivers/gpu/drm/msm/adreno/adreno_gpu.c create mode 100644 kernel/drivers/gpu/drm/msm/adreno/adreno_gpu.h create mode 100644 kernel/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h create mode 100644 kernel/drivers/gpu/drm/msm/dsi/dsi.c create mode 100644 kernel/drivers/gpu/drm/msm/dsi/dsi.h create mode 100644 kernel/drivers/gpu/drm/msm/dsi/dsi.xml.h create mode 100644 kernel/drivers/gpu/drm/msm/dsi/dsi_host.c create mode 100644 kernel/drivers/gpu/drm/msm/dsi/dsi_manager.c create mode 100644 kernel/drivers/gpu/drm/msm/dsi/dsi_phy.c create mode 100644 kernel/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h create mode 100644 kernel/drivers/gpu/drm/msm/dsi/sfpb.xml.h create mode 100644 kernel/drivers/gpu/drm/msm/edp/edp.c create mode 100644 kernel/drivers/gpu/drm/msm/edp/edp.h create mode 100644 kernel/drivers/gpu/drm/msm/edp/edp.xml.h create mode 100644 kernel/drivers/gpu/drm/msm/edp/edp_aux.c create mode 100644 kernel/drivers/gpu/drm/msm/edp/edp_bridge.c create mode 100644 kernel/drivers/gpu/drm/msm/edp/edp_connector.c create mode 100644 kernel/drivers/gpu/drm/msm/edp/edp_ctrl.c create mode 100644 kernel/drivers/gpu/drm/msm/edp/edp_phy.c create mode 100644 kernel/drivers/gpu/drm/msm/hdmi/hdmi.c create mode 100644 kernel/drivers/gpu/drm/msm/hdmi/hdmi.h create mode 100644 kernel/drivers/gpu/drm/msm/hdmi/hdmi.xml.h create mode 100644 kernel/drivers/gpu/drm/msm/hdmi/hdmi_audio.c create mode 100644 kernel/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c create mode 100644 kernel/drivers/gpu/drm/msm/hdmi/hdmi_connector.c create mode 100644 kernel/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c create mode 100644 kernel/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c create mode 100644 kernel/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c create mode 100644 kernel/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c create mode 100644 kernel/drivers/gpu/drm/msm/hdmi/qfprom.xml.h create mode 100644 kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h create mode 100644 kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c create mode 100644 kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c create mode 100644 kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c create mode 100644 kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c create mode 100644 kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h create mode 100644 kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c create mode 100644 kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c create mode 100644 kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_pll.c create mode 100644 kernel/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c create mode 100644 kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h create mode 100644 kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c create mode 100644 kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h create mode 100644 kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c create mode 100644 kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c create mode 100644 kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c create mode 100644 kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h create mode 100644 kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c create mode 100644 kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c create mode 100644 kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c create mode 100644 kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h create mode 100644 kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c create mode 100644 kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c create mode 100644 kernel/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h create mode 100644 kernel/drivers/gpu/drm/msm/mdp/mdp_common.xml.h create mode 100644 kernel/drivers/gpu/drm/msm/mdp/mdp_format.c create mode 100644 kernel/drivers/gpu/drm/msm/mdp/mdp_kms.c create mode 100644 kernel/drivers/gpu/drm/msm/mdp/mdp_kms.h create mode 100644 kernel/drivers/gpu/drm/msm/msm_atomic.c create mode 100644 kernel/drivers/gpu/drm/msm/msm_drv.c create mode 100644 kernel/drivers/gpu/drm/msm/msm_drv.h create mode 100644 kernel/drivers/gpu/drm/msm/msm_fb.c create mode 100644 kernel/drivers/gpu/drm/msm/msm_fbdev.c create mode 100644 kernel/drivers/gpu/drm/msm/msm_gem.c create mode 100644 kernel/drivers/gpu/drm/msm/msm_gem.h create mode 100644 kernel/drivers/gpu/drm/msm/msm_gem_prime.c create mode 100644 kernel/drivers/gpu/drm/msm/msm_gem_submit.c create mode 100644 kernel/drivers/gpu/drm/msm/msm_gpu.c create mode 100644 kernel/drivers/gpu/drm/msm/msm_gpu.h create mode 100644 kernel/drivers/gpu/drm/msm/msm_iommu.c create mode 100644 kernel/drivers/gpu/drm/msm/msm_kms.h create mode 100644 kernel/drivers/gpu/drm/msm/msm_mmu.h create mode 100644 kernel/drivers/gpu/drm/msm/msm_perf.c create mode 100644 kernel/drivers/gpu/drm/msm/msm_rd.c create mode 100644 kernel/drivers/gpu/drm/msm/msm_ringbuffer.c create mode 100644 kernel/drivers/gpu/drm/msm/msm_ringbuffer.h create mode 100644 kernel/drivers/gpu/drm/nouveau/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/Kconfig create mode 100644 kernel/drivers/gpu/drm/nouveau/dispnv04/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/dispnv04/arb.c create mode 100644 kernel/drivers/gpu/drm/nouveau/dispnv04/crtc.c create mode 100644 kernel/drivers/gpu/drm/nouveau/dispnv04/cursor.c create mode 100644 kernel/drivers/gpu/drm/nouveau/dispnv04/dac.c create mode 100644 kernel/drivers/gpu/drm/nouveau/dispnv04/dfp.c create mode 100644 kernel/drivers/gpu/drm/nouveau/dispnv04/disp.c create mode 100644 kernel/drivers/gpu/drm/nouveau/dispnv04/disp.h create mode 100644 kernel/drivers/gpu/drm/nouveau/dispnv04/hw.c create mode 100644 kernel/drivers/gpu/drm/nouveau/dispnv04/hw.h create mode 100644 kernel/drivers/gpu/drm/nouveau/dispnv04/nvreg.h create mode 100644 kernel/drivers/gpu/drm/nouveau/dispnv04/overlay.c create mode 100644 kernel/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c create mode 100644 kernel/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c create mode 100644 kernel/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c create mode 100644 kernel/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvif/class.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvif/client.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvif/device.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvif/driver.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvif/event.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvif/ioctl.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvif/list.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvif/notify.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvif/object.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvif/os.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvif/unpack.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/core/client.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/core/device.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/core/devidx.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/core/event.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/core/handle.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/core/notify.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/core/object.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/core/option.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/core/os.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/core/parent.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/core/printk.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/engine/device.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/engine/dmaobj.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0205.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0209.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/P0260.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bit.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/i2c.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/image.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/mxm.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/npde.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pcir.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pll.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pmu.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/therm.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/xpio.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h create mode 100644 kernel/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_abi16.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_abi16.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_acpi.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_acpi.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_agp.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_agp.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_backlight.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_bios.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_bios.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_bo.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_bo.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_chan.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_chan.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_connector.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_connector.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_crtc.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_debugfs.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_debugfs.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_display.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_display.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_dma.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_dma.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_dp.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_drm.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_drm.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_encoder.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_fbcon.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_fbcon.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_fence.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_fence.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_gem.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_gem.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_hwmon.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_hwmon.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_ioc32.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_ioctl.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_nvif.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_platform.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_platform.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_prime.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_reg.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_sgdma.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_sysfs.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_sysfs.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_ttm.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_ttm.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_usif.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_usif.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_vga.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nouveau_vga.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nv04_fbcon.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nv04_fence.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nv10_fence.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nv10_fence.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nv17_fence.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nv50_display.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nv50_display.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nv50_fbcon.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nv50_fence.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nv84_fence.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvc0_fbcon.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvc0_fence.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvif/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvif/client.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvif/device.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvif/notify.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvif/object.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/core/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/core/client.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/core/engctx.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/core/engine.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/core/enum.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/core/event.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/core/handle.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/core/mm.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/core/namedb.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/core/notify.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/core/object.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/core/option.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/core/parent.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/core/printk.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/core/ramht.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/core/subdev.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/bsp/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/com.fuc create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3 create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3 create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/cipher/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/device/gf100.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/device/nv04.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/device/nv10.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/device/nv20.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/device/nv30.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/device/nv40.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/device/nv50.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf110.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf110.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf110.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/base.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf100.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf110.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv04.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv50.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/priv.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/com.fuc create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3 create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3 create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3 create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3 create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5 create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5 create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hub.fuc create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3 create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3 create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3 create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3 create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5 create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5 create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/macros.fuc create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/os.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/gr/regs.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/pm/daemon.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk110.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/sec/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/vp/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0203.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/P0260.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/conn.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/npde.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pcir.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/ramcfg.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bios/xpio.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pll.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/clk/seq.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/regsnv04.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf110.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bit.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf110.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv04.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/port.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g94.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf106.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv40.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv4c.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/arith.fuc create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3 create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4 create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5 create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3 create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/host.fuc create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/i2c_.fuc create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/idle.fuc create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/kernel.fuc create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/macros.fuc create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/os.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/perf.fuc create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/test.fuc create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf110.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fannil.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fanpwm.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf110.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c create mode 100644 kernel/drivers/gpu/drm/omapdrm/Kconfig create mode 100644 kernel/drivers/gpu/drm/omapdrm/Makefile create mode 100644 kernel/drivers/gpu/drm/omapdrm/TODO create mode 100644 kernel/drivers/gpu/drm/omapdrm/omap_connector.c create mode 100644 kernel/drivers/gpu/drm/omapdrm/omap_crtc.c create mode 100644 kernel/drivers/gpu/drm/omapdrm/omap_debugfs.c create mode 100644 kernel/drivers/gpu/drm/omapdrm/omap_dmm_priv.h create mode 100644 kernel/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c create mode 100644 kernel/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h create mode 100644 kernel/drivers/gpu/drm/omapdrm/omap_drv.c create mode 100644 kernel/drivers/gpu/drm/omapdrm/omap_drv.h create mode 100644 kernel/drivers/gpu/drm/omapdrm/omap_encoder.c create mode 100644 kernel/drivers/gpu/drm/omapdrm/omap_fb.c create mode 100644 kernel/drivers/gpu/drm/omapdrm/omap_fbdev.c create mode 100644 kernel/drivers/gpu/drm/omapdrm/omap_gem.c create mode 100644 kernel/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c create mode 100644 kernel/drivers/gpu/drm/omapdrm/omap_irq.c create mode 100644 kernel/drivers/gpu/drm/omapdrm/omap_plane.c create mode 100644 kernel/drivers/gpu/drm/omapdrm/tcm-sita.c create mode 100644 kernel/drivers/gpu/drm/omapdrm/tcm-sita.h create mode 100644 kernel/drivers/gpu/drm/omapdrm/tcm.h create mode 100644 kernel/drivers/gpu/drm/panel/Kconfig create mode 100644 kernel/drivers/gpu/drm/panel/Makefile create mode 100644 kernel/drivers/gpu/drm/panel/panel-ld9040.c create mode 100644 kernel/drivers/gpu/drm/panel/panel-s6e8aa0.c create mode 100644 kernel/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c create mode 100644 kernel/drivers/gpu/drm/panel/panel-simple.c create mode 100644 kernel/drivers/gpu/drm/qxl/Kconfig create mode 100644 kernel/drivers/gpu/drm/qxl/Makefile create mode 100644 kernel/drivers/gpu/drm/qxl/qxl_cmd.c create mode 100644 kernel/drivers/gpu/drm/qxl/qxl_debugfs.c create mode 100644 kernel/drivers/gpu/drm/qxl/qxl_dev.h create mode 100644 kernel/drivers/gpu/drm/qxl/qxl_display.c create mode 100644 kernel/drivers/gpu/drm/qxl/qxl_draw.c create mode 100644 kernel/drivers/gpu/drm/qxl/qxl_drv.c create mode 100644 kernel/drivers/gpu/drm/qxl/qxl_drv.h create mode 100644 kernel/drivers/gpu/drm/qxl/qxl_dumb.c create mode 100644 kernel/drivers/gpu/drm/qxl/qxl_fb.c create mode 100644 kernel/drivers/gpu/drm/qxl/qxl_gem.c create mode 100644 kernel/drivers/gpu/drm/qxl/qxl_image.c create mode 100644 kernel/drivers/gpu/drm/qxl/qxl_ioctl.c create mode 100644 kernel/drivers/gpu/drm/qxl/qxl_irq.c create mode 100644 kernel/drivers/gpu/drm/qxl/qxl_kms.c create mode 100644 kernel/drivers/gpu/drm/qxl/qxl_object.c create mode 100644 kernel/drivers/gpu/drm/qxl/qxl_object.h create mode 100644 kernel/drivers/gpu/drm/qxl/qxl_prime.c create mode 100644 kernel/drivers/gpu/drm/qxl/qxl_release.c create mode 100644 kernel/drivers/gpu/drm/qxl/qxl_ttm.c create mode 100644 kernel/drivers/gpu/drm/r128/Makefile create mode 100644 kernel/drivers/gpu/drm/r128/r128_cce.c create mode 100644 kernel/drivers/gpu/drm/r128/r128_drv.c create mode 100644 kernel/drivers/gpu/drm/r128/r128_drv.h create mode 100644 kernel/drivers/gpu/drm/r128/r128_ioc32.c create mode 100644 kernel/drivers/gpu/drm/r128/r128_irq.c create mode 100644 kernel/drivers/gpu/drm/r128/r128_state.c create mode 100644 kernel/drivers/gpu/drm/radeon/.gitignore create mode 100644 kernel/drivers/gpu/drm/radeon/Kconfig create mode 100644 kernel/drivers/gpu/drm/radeon/Makefile create mode 100644 kernel/drivers/gpu/drm/radeon/ObjectID.h create mode 100644 kernel/drivers/gpu/drm/radeon/atom-bits.h create mode 100644 kernel/drivers/gpu/drm/radeon/atom-names.h create mode 100644 kernel/drivers/gpu/drm/radeon/atom-types.h create mode 100644 kernel/drivers/gpu/drm/radeon/atom.c create mode 100644 kernel/drivers/gpu/drm/radeon/atom.h create mode 100644 kernel/drivers/gpu/drm/radeon/atombios.h create mode 100644 kernel/drivers/gpu/drm/radeon/atombios_crtc.c create mode 100644 kernel/drivers/gpu/drm/radeon/atombios_dp.c create mode 100644 kernel/drivers/gpu/drm/radeon/atombios_encoders.c create mode 100644 kernel/drivers/gpu/drm/radeon/atombios_i2c.c create mode 100644 kernel/drivers/gpu/drm/radeon/avivod.h create mode 100644 kernel/drivers/gpu/drm/radeon/btc_dpm.c create mode 100644 kernel/drivers/gpu/drm/radeon/btc_dpm.h create mode 100644 kernel/drivers/gpu/drm/radeon/btcd.h create mode 100644 kernel/drivers/gpu/drm/radeon/cayman_blit_shaders.c create mode 100644 kernel/drivers/gpu/drm/radeon/cayman_blit_shaders.h create mode 100644 kernel/drivers/gpu/drm/radeon/ci_dpm.c create mode 100644 kernel/drivers/gpu/drm/radeon/ci_dpm.h create mode 100644 kernel/drivers/gpu/drm/radeon/ci_smc.c create mode 100644 kernel/drivers/gpu/drm/radeon/cik.c create mode 100644 kernel/drivers/gpu/drm/radeon/cik_blit_shaders.c create mode 100644 kernel/drivers/gpu/drm/radeon/cik_blit_shaders.h create mode 100644 kernel/drivers/gpu/drm/radeon/cik_reg.h create mode 100644 kernel/drivers/gpu/drm/radeon/cik_sdma.c create mode 100644 kernel/drivers/gpu/drm/radeon/cikd.h create mode 100644 kernel/drivers/gpu/drm/radeon/clearstate_cayman.h create mode 100644 kernel/drivers/gpu/drm/radeon/clearstate_ci.h create mode 100644 kernel/drivers/gpu/drm/radeon/clearstate_defs.h create mode 100644 kernel/drivers/gpu/drm/radeon/clearstate_evergreen.h create mode 100644 kernel/drivers/gpu/drm/radeon/clearstate_si.h create mode 100644 kernel/drivers/gpu/drm/radeon/cypress_dpm.c create mode 100644 kernel/drivers/gpu/drm/radeon/cypress_dpm.h create mode 100644 kernel/drivers/gpu/drm/radeon/dce3_1_afmt.c create mode 100644 kernel/drivers/gpu/drm/radeon/dce6_afmt.c create mode 100644 kernel/drivers/gpu/drm/radeon/drm_buffer.c create mode 100644 kernel/drivers/gpu/drm/radeon/drm_buffer.h create mode 100644 kernel/drivers/gpu/drm/radeon/evergreen.c create mode 100644 kernel/drivers/gpu/drm/radeon/evergreen_blit_shaders.c create mode 100644 kernel/drivers/gpu/drm/radeon/evergreen_blit_shaders.h create mode 100644 kernel/drivers/gpu/drm/radeon/evergreen_cs.c create mode 100644 kernel/drivers/gpu/drm/radeon/evergreen_dma.c create mode 100644 kernel/drivers/gpu/drm/radeon/evergreen_hdmi.c create mode 100644 kernel/drivers/gpu/drm/radeon/evergreen_reg.h create mode 100644 kernel/drivers/gpu/drm/radeon/evergreen_smc.h create mode 100644 kernel/drivers/gpu/drm/radeon/evergreend.h create mode 100644 kernel/drivers/gpu/drm/radeon/kv_dpm.c create mode 100644 kernel/drivers/gpu/drm/radeon/kv_dpm.h create mode 100644 kernel/drivers/gpu/drm/radeon/kv_smc.c create mode 100644 kernel/drivers/gpu/drm/radeon/mkregtable.c create mode 100644 kernel/drivers/gpu/drm/radeon/ni.c create mode 100644 kernel/drivers/gpu/drm/radeon/ni_dma.c create mode 100644 kernel/drivers/gpu/drm/radeon/ni_dpm.c create mode 100644 kernel/drivers/gpu/drm/radeon/ni_dpm.h create mode 100644 kernel/drivers/gpu/drm/radeon/ni_reg.h create mode 100644 kernel/drivers/gpu/drm/radeon/nid.h create mode 100644 kernel/drivers/gpu/drm/radeon/nislands_smc.h create mode 100644 kernel/drivers/gpu/drm/radeon/ppsmc.h create mode 100644 kernel/drivers/gpu/drm/radeon/pptable.h create mode 100644 kernel/drivers/gpu/drm/radeon/r100.c create mode 100644 kernel/drivers/gpu/drm/radeon/r100_track.h create mode 100644 kernel/drivers/gpu/drm/radeon/r100d.h create mode 100644 kernel/drivers/gpu/drm/radeon/r200.c create mode 100644 kernel/drivers/gpu/drm/radeon/r300.c create mode 100644 kernel/drivers/gpu/drm/radeon/r300_cmdbuf.c create mode 100644 kernel/drivers/gpu/drm/radeon/r300_reg.h create mode 100644 kernel/drivers/gpu/drm/radeon/r300d.h create mode 100644 kernel/drivers/gpu/drm/radeon/r420.c create mode 100644 kernel/drivers/gpu/drm/radeon/r420d.h create mode 100644 kernel/drivers/gpu/drm/radeon/r500_reg.h create mode 100644 kernel/drivers/gpu/drm/radeon/r520.c create mode 100644 kernel/drivers/gpu/drm/radeon/r520d.h create mode 100644 kernel/drivers/gpu/drm/radeon/r600.c create mode 100644 kernel/drivers/gpu/drm/radeon/r600_blit.c create mode 100644 kernel/drivers/gpu/drm/radeon/r600_blit_shaders.c create mode 100644 kernel/drivers/gpu/drm/radeon/r600_blit_shaders.h create mode 100644 kernel/drivers/gpu/drm/radeon/r600_cp.c create mode 100644 kernel/drivers/gpu/drm/radeon/r600_cs.c create mode 100644 kernel/drivers/gpu/drm/radeon/r600_dma.c create mode 100644 kernel/drivers/gpu/drm/radeon/r600_dpm.c create mode 100644 kernel/drivers/gpu/drm/radeon/r600_dpm.h create mode 100644 kernel/drivers/gpu/drm/radeon/r600_hdmi.c create mode 100644 kernel/drivers/gpu/drm/radeon/r600_reg.h create mode 100644 kernel/drivers/gpu/drm/radeon/r600d.h create mode 100644 kernel/drivers/gpu/drm/radeon/radeon.h create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_acpi.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_acpi.h create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_agp.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_asic.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_asic.h create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_atombios.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_atpx_handler.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_audio.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_audio.h create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_benchmark.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_bios.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_clocks.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_combios.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_connectors.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_cp.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_cs.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_cursor.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_device.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_display.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_dp_auxch.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_dp_mst.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_drv.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_drv.h create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_encoders.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_family.h create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_fb.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_fence.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_gart.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_gem.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_i2c.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_ib.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_ioc32.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_irq.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_irq_kms.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_kfd.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_kfd.h create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_kms.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_legacy_crtc.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_legacy_encoders.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_legacy_tv.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_mem.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_mn.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_mode.h create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_object.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_object.h create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_pm.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_prime.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_reg.h create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_ring.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_sa.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_semaphore.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_state.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_sync.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_test.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_trace.h create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_trace_points.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_ttm.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_ucode.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_ucode.h create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_uvd.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_vce.c create mode 100644 kernel/drivers/gpu/drm/radeon/radeon_vm.c create mode 100644 kernel/drivers/gpu/drm/radeon/reg_srcs/cayman create mode 100644 kernel/drivers/gpu/drm/radeon/reg_srcs/evergreen create mode 100644 kernel/drivers/gpu/drm/radeon/reg_srcs/r100 create mode 100644 kernel/drivers/gpu/drm/radeon/reg_srcs/r200 create mode 100644 kernel/drivers/gpu/drm/radeon/reg_srcs/r300 create mode 100644 kernel/drivers/gpu/drm/radeon/reg_srcs/r420 create mode 100644 kernel/drivers/gpu/drm/radeon/reg_srcs/r600 create mode 100644 kernel/drivers/gpu/drm/radeon/reg_srcs/rn50 create mode 100644 kernel/drivers/gpu/drm/radeon/reg_srcs/rs600 create mode 100644 kernel/drivers/gpu/drm/radeon/reg_srcs/rv515 create mode 100644 kernel/drivers/gpu/drm/radeon/rs100d.h create mode 100644 kernel/drivers/gpu/drm/radeon/rs400.c create mode 100644 kernel/drivers/gpu/drm/radeon/rs400d.h create mode 100644 kernel/drivers/gpu/drm/radeon/rs600.c create mode 100644 kernel/drivers/gpu/drm/radeon/rs600d.h create mode 100644 kernel/drivers/gpu/drm/radeon/rs690.c create mode 100644 kernel/drivers/gpu/drm/radeon/rs690d.h create mode 100644 kernel/drivers/gpu/drm/radeon/rs780_dpm.c create mode 100644 kernel/drivers/gpu/drm/radeon/rs780_dpm.h create mode 100644 kernel/drivers/gpu/drm/radeon/rs780d.h create mode 100644 kernel/drivers/gpu/drm/radeon/rv200d.h create mode 100644 kernel/drivers/gpu/drm/radeon/rv250d.h create mode 100644 kernel/drivers/gpu/drm/radeon/rv350d.h create mode 100644 kernel/drivers/gpu/drm/radeon/rv515.c create mode 100644 kernel/drivers/gpu/drm/radeon/rv515d.h create mode 100644 kernel/drivers/gpu/drm/radeon/rv6xx_dpm.c create mode 100644 kernel/drivers/gpu/drm/radeon/rv6xx_dpm.h create mode 100644 kernel/drivers/gpu/drm/radeon/rv6xxd.h create mode 100644 kernel/drivers/gpu/drm/radeon/rv730_dpm.c create mode 100644 kernel/drivers/gpu/drm/radeon/rv730d.h create mode 100644 kernel/drivers/gpu/drm/radeon/rv740_dpm.c create mode 100644 kernel/drivers/gpu/drm/radeon/rv740d.h create mode 100644 kernel/drivers/gpu/drm/radeon/rv770.c create mode 100644 kernel/drivers/gpu/drm/radeon/rv770_dma.c create mode 100644 kernel/drivers/gpu/drm/radeon/rv770_dpm.c create mode 100644 kernel/drivers/gpu/drm/radeon/rv770_dpm.h create mode 100644 kernel/drivers/gpu/drm/radeon/rv770_smc.c create mode 100644 kernel/drivers/gpu/drm/radeon/rv770_smc.h create mode 100644 kernel/drivers/gpu/drm/radeon/rv770d.h create mode 100644 kernel/drivers/gpu/drm/radeon/si.c create mode 100644 kernel/drivers/gpu/drm/radeon/si_blit_shaders.c create mode 100644 kernel/drivers/gpu/drm/radeon/si_blit_shaders.h create mode 100644 kernel/drivers/gpu/drm/radeon/si_dma.c create mode 100644 kernel/drivers/gpu/drm/radeon/si_dpm.c create mode 100644 kernel/drivers/gpu/drm/radeon/si_dpm.h create mode 100644 kernel/drivers/gpu/drm/radeon/si_reg.h create mode 100644 kernel/drivers/gpu/drm/radeon/si_smc.c create mode 100644 kernel/drivers/gpu/drm/radeon/sid.h create mode 100644 kernel/drivers/gpu/drm/radeon/sislands_smc.h create mode 100644 kernel/drivers/gpu/drm/radeon/smu7.h create mode 100644 kernel/drivers/gpu/drm/radeon/smu7_discrete.h create mode 100644 kernel/drivers/gpu/drm/radeon/smu7_fusion.h create mode 100644 kernel/drivers/gpu/drm/radeon/sumo_dpm.c create mode 100644 kernel/drivers/gpu/drm/radeon/sumo_dpm.h create mode 100644 kernel/drivers/gpu/drm/radeon/sumo_smc.c create mode 100644 kernel/drivers/gpu/drm/radeon/sumod.h create mode 100644 kernel/drivers/gpu/drm/radeon/trinity_dpm.c create mode 100644 kernel/drivers/gpu/drm/radeon/trinity_dpm.h create mode 100644 kernel/drivers/gpu/drm/radeon/trinity_smc.c create mode 100644 kernel/drivers/gpu/drm/radeon/trinityd.h create mode 100644 kernel/drivers/gpu/drm/radeon/uvd_v1_0.c create mode 100644 kernel/drivers/gpu/drm/radeon/uvd_v2_2.c create mode 100644 kernel/drivers/gpu/drm/radeon/uvd_v3_1.c create mode 100644 kernel/drivers/gpu/drm/radeon/uvd_v4_2.c create mode 100644 kernel/drivers/gpu/drm/radeon/vce_v1_0.c create mode 100644 kernel/drivers/gpu/drm/radeon/vce_v2_0.c create mode 100644 kernel/drivers/gpu/drm/rcar-du/Kconfig create mode 100644 kernel/drivers/gpu/drm/rcar-du/Makefile create mode 100644 kernel/drivers/gpu/drm/rcar-du/rcar_du_crtc.c create mode 100644 kernel/drivers/gpu/drm/rcar-du/rcar_du_crtc.h create mode 100644 kernel/drivers/gpu/drm/rcar-du/rcar_du_drv.c create mode 100644 kernel/drivers/gpu/drm/rcar-du/rcar_du_drv.h create mode 100644 kernel/drivers/gpu/drm/rcar-du/rcar_du_encoder.c create mode 100644 kernel/drivers/gpu/drm/rcar-du/rcar_du_encoder.h create mode 100644 kernel/drivers/gpu/drm/rcar-du/rcar_du_group.c create mode 100644 kernel/drivers/gpu/drm/rcar-du/rcar_du_group.h create mode 100644 kernel/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c create mode 100644 kernel/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h create mode 100644 kernel/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c create mode 100644 kernel/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.h create mode 100644 kernel/drivers/gpu/drm/rcar-du/rcar_du_kms.c create mode 100644 kernel/drivers/gpu/drm/rcar-du/rcar_du_kms.h create mode 100644 kernel/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c create mode 100644 kernel/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h create mode 100644 kernel/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c create mode 100644 kernel/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h create mode 100644 kernel/drivers/gpu/drm/rcar-du/rcar_du_plane.c create mode 100644 kernel/drivers/gpu/drm/rcar-du/rcar_du_plane.h create mode 100644 kernel/drivers/gpu/drm/rcar-du/rcar_du_regs.h create mode 100644 kernel/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c create mode 100644 kernel/drivers/gpu/drm/rcar-du/rcar_du_vgacon.h create mode 100644 kernel/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h create mode 100644 kernel/drivers/gpu/drm/rockchip/Kconfig create mode 100644 kernel/drivers/gpu/drm/rockchip/Makefile create mode 100644 kernel/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c create mode 100644 kernel/drivers/gpu/drm/rockchip/rockchip_drm_drv.c create mode 100644 kernel/drivers/gpu/drm/rockchip/rockchip_drm_drv.h create mode 100644 kernel/drivers/gpu/drm/rockchip/rockchip_drm_fb.c create mode 100644 kernel/drivers/gpu/drm/rockchip/rockchip_drm_fb.h create mode 100644 kernel/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c create mode 100644 kernel/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h create mode 100644 kernel/drivers/gpu/drm/rockchip/rockchip_drm_gem.c create mode 100644 kernel/drivers/gpu/drm/rockchip/rockchip_drm_gem.h create mode 100644 kernel/drivers/gpu/drm/rockchip/rockchip_drm_vop.c create mode 100644 kernel/drivers/gpu/drm/rockchip/rockchip_drm_vop.h create mode 100644 kernel/drivers/gpu/drm/savage/Makefile create mode 100644 kernel/drivers/gpu/drm/savage/savage_bci.c create mode 100644 kernel/drivers/gpu/drm/savage/savage_drv.c create mode 100644 kernel/drivers/gpu/drm/savage/savage_drv.h create mode 100644 kernel/drivers/gpu/drm/savage/savage_state.c create mode 100644 kernel/drivers/gpu/drm/shmobile/Kconfig create mode 100644 kernel/drivers/gpu/drm/shmobile/Makefile create mode 100644 kernel/drivers/gpu/drm/shmobile/shmob_drm_backlight.c create mode 100644 kernel/drivers/gpu/drm/shmobile/shmob_drm_backlight.h create mode 100644 kernel/drivers/gpu/drm/shmobile/shmob_drm_crtc.c create mode 100644 kernel/drivers/gpu/drm/shmobile/shmob_drm_crtc.h create mode 100644 kernel/drivers/gpu/drm/shmobile/shmob_drm_drv.c create mode 100644 kernel/drivers/gpu/drm/shmobile/shmob_drm_drv.h create mode 100644 kernel/drivers/gpu/drm/shmobile/shmob_drm_kms.c create mode 100644 kernel/drivers/gpu/drm/shmobile/shmob_drm_kms.h create mode 100644 kernel/drivers/gpu/drm/shmobile/shmob_drm_plane.c create mode 100644 kernel/drivers/gpu/drm/shmobile/shmob_drm_plane.h create mode 100644 kernel/drivers/gpu/drm/shmobile/shmob_drm_regs.h create mode 100644 kernel/drivers/gpu/drm/sis/Makefile create mode 100644 kernel/drivers/gpu/drm/sis/sis_drv.c create mode 100644 kernel/drivers/gpu/drm/sis/sis_drv.h create mode 100644 kernel/drivers/gpu/drm/sis/sis_mm.c create mode 100644 kernel/drivers/gpu/drm/sti/Kconfig create mode 100644 kernel/drivers/gpu/drm/sti/Makefile create mode 100644 kernel/drivers/gpu/drm/sti/NOTES create mode 100644 kernel/drivers/gpu/drm/sti/sti_awg_utils.c create mode 100644 kernel/drivers/gpu/drm/sti/sti_awg_utils.h create mode 100644 kernel/drivers/gpu/drm/sti/sti_compositor.c create mode 100644 kernel/drivers/gpu/drm/sti/sti_compositor.h create mode 100644 kernel/drivers/gpu/drm/sti/sti_cursor.c create mode 100644 kernel/drivers/gpu/drm/sti/sti_cursor.h create mode 100644 kernel/drivers/gpu/drm/sti/sti_drm_crtc.c create mode 100644 kernel/drivers/gpu/drm/sti/sti_drm_crtc.h create mode 100644 kernel/drivers/gpu/drm/sti/sti_drm_drv.c create mode 100644 kernel/drivers/gpu/drm/sti/sti_drm_drv.h create mode 100644 kernel/drivers/gpu/drm/sti/sti_drm_plane.c create mode 100644 kernel/drivers/gpu/drm/sti/sti_drm_plane.h create mode 100644 kernel/drivers/gpu/drm/sti/sti_dvo.c create mode 100644 kernel/drivers/gpu/drm/sti/sti_gdp.c create mode 100644 kernel/drivers/gpu/drm/sti/sti_gdp.h create mode 100644 kernel/drivers/gpu/drm/sti/sti_hda.c create mode 100644 kernel/drivers/gpu/drm/sti/sti_hdmi.c create mode 100644 kernel/drivers/gpu/drm/sti/sti_hdmi.h create mode 100644 kernel/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c create mode 100644 kernel/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h create mode 100644 kernel/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c create mode 100644 kernel/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.h create mode 100644 kernel/drivers/gpu/drm/sti/sti_hqvdp.c create mode 100644 kernel/drivers/gpu/drm/sti/sti_hqvdp.h create mode 100644 kernel/drivers/gpu/drm/sti/sti_hqvdp_lut.h create mode 100644 kernel/drivers/gpu/drm/sti/sti_layer.c create mode 100644 kernel/drivers/gpu/drm/sti/sti_layer.h create mode 100644 kernel/drivers/gpu/drm/sti/sti_mixer.c create mode 100644 kernel/drivers/gpu/drm/sti/sti_mixer.h create mode 100644 kernel/drivers/gpu/drm/sti/sti_tvout.c create mode 100644 kernel/drivers/gpu/drm/sti/sti_vid.c create mode 100644 kernel/drivers/gpu/drm/sti/sti_vid.h create mode 100644 kernel/drivers/gpu/drm/sti/sti_vtac.c create mode 100644 kernel/drivers/gpu/drm/sti/sti_vtg.c create mode 100644 kernel/drivers/gpu/drm/sti/sti_vtg.h create mode 100644 kernel/drivers/gpu/drm/tdfx/Makefile create mode 100644 kernel/drivers/gpu/drm/tdfx/tdfx_drv.c create mode 100644 kernel/drivers/gpu/drm/tdfx/tdfx_drv.h create mode 100644 kernel/drivers/gpu/drm/tegra/Kconfig create mode 100644 kernel/drivers/gpu/drm/tegra/Makefile create mode 100644 kernel/drivers/gpu/drm/tegra/dc.c create mode 100644 kernel/drivers/gpu/drm/tegra/dc.h create mode 100644 kernel/drivers/gpu/drm/tegra/dpaux.c create mode 100644 kernel/drivers/gpu/drm/tegra/dpaux.h create mode 100644 kernel/drivers/gpu/drm/tegra/drm.c create mode 100644 kernel/drivers/gpu/drm/tegra/drm.h create mode 100644 kernel/drivers/gpu/drm/tegra/dsi.c create mode 100644 kernel/drivers/gpu/drm/tegra/dsi.h create mode 100644 kernel/drivers/gpu/drm/tegra/fb.c create mode 100644 kernel/drivers/gpu/drm/tegra/gem.c create mode 100644 kernel/drivers/gpu/drm/tegra/gem.h create mode 100644 kernel/drivers/gpu/drm/tegra/gr2d.c create mode 100644 kernel/drivers/gpu/drm/tegra/gr2d.h create mode 100644 kernel/drivers/gpu/drm/tegra/gr3d.c create mode 100644 kernel/drivers/gpu/drm/tegra/gr3d.h create mode 100644 kernel/drivers/gpu/drm/tegra/hdmi.c create mode 100644 kernel/drivers/gpu/drm/tegra/hdmi.h create mode 100644 kernel/drivers/gpu/drm/tegra/mipi-phy.c create mode 100644 kernel/drivers/gpu/drm/tegra/mipi-phy.h create mode 100644 kernel/drivers/gpu/drm/tegra/output.c create mode 100644 kernel/drivers/gpu/drm/tegra/rgb.c create mode 100644 kernel/drivers/gpu/drm/tegra/sor.c create mode 100644 kernel/drivers/gpu/drm/tegra/sor.h create mode 100644 kernel/drivers/gpu/drm/tilcdc/Kconfig create mode 100644 kernel/drivers/gpu/drm/tilcdc/Makefile create mode 100644 kernel/drivers/gpu/drm/tilcdc/tilcdc_crtc.c create mode 100644 kernel/drivers/gpu/drm/tilcdc/tilcdc_drv.c create mode 100644 kernel/drivers/gpu/drm/tilcdc/tilcdc_drv.h create mode 100644 kernel/drivers/gpu/drm/tilcdc/tilcdc_panel.c create mode 100644 kernel/drivers/gpu/drm/tilcdc/tilcdc_panel.h create mode 100644 kernel/drivers/gpu/drm/tilcdc/tilcdc_regs.h create mode 100644 kernel/drivers/gpu/drm/tilcdc/tilcdc_slave.c create mode 100644 kernel/drivers/gpu/drm/tilcdc/tilcdc_slave.h create mode 100644 kernel/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c create mode 100644 kernel/drivers/gpu/drm/tilcdc/tilcdc_tfp410.h create mode 100644 kernel/drivers/gpu/drm/ttm/Makefile create mode 100644 kernel/drivers/gpu/drm/ttm/ttm_agp_backend.c create mode 100644 kernel/drivers/gpu/drm/ttm/ttm_bo.c create mode 100644 kernel/drivers/gpu/drm/ttm/ttm_bo_manager.c create mode 100644 kernel/drivers/gpu/drm/ttm/ttm_bo_util.c create mode 100644 kernel/drivers/gpu/drm/ttm/ttm_bo_vm.c create mode 100644 kernel/drivers/gpu/drm/ttm/ttm_execbuf_util.c create mode 100644 kernel/drivers/gpu/drm/ttm/ttm_lock.c create mode 100644 kernel/drivers/gpu/drm/ttm/ttm_memory.c create mode 100644 kernel/drivers/gpu/drm/ttm/ttm_module.c create mode 100644 kernel/drivers/gpu/drm/ttm/ttm_object.c create mode 100644 kernel/drivers/gpu/drm/ttm/ttm_page_alloc.c create mode 100644 kernel/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c create mode 100644 kernel/drivers/gpu/drm/ttm/ttm_tt.c create mode 100644 kernel/drivers/gpu/drm/udl/Kconfig create mode 100644 kernel/drivers/gpu/drm/udl/Makefile create mode 100644 kernel/drivers/gpu/drm/udl/udl_connector.c create mode 100644 kernel/drivers/gpu/drm/udl/udl_dmabuf.c create mode 100644 kernel/drivers/gpu/drm/udl/udl_drv.c create mode 100644 kernel/drivers/gpu/drm/udl/udl_drv.h create mode 100644 kernel/drivers/gpu/drm/udl/udl_encoder.c create mode 100644 kernel/drivers/gpu/drm/udl/udl_fb.c create mode 100644 kernel/drivers/gpu/drm/udl/udl_gem.c create mode 100644 kernel/drivers/gpu/drm/udl/udl_main.c create mode 100644 kernel/drivers/gpu/drm/udl/udl_modeset.c create mode 100644 kernel/drivers/gpu/drm/udl/udl_transfer.c create mode 100644 kernel/drivers/gpu/drm/vgem/Makefile create mode 100644 kernel/drivers/gpu/drm/vgem/vgem_drv.c create mode 100644 kernel/drivers/gpu/drm/vgem/vgem_drv.h create mode 100644 kernel/drivers/gpu/drm/via/Makefile create mode 100644 kernel/drivers/gpu/drm/via/via_3d_reg.h create mode 100644 kernel/drivers/gpu/drm/via/via_dma.c create mode 100644 kernel/drivers/gpu/drm/via/via_dmablit.c create mode 100644 kernel/drivers/gpu/drm/via/via_dmablit.h create mode 100644 kernel/drivers/gpu/drm/via/via_drv.c create mode 100644 kernel/drivers/gpu/drm/via/via_drv.h create mode 100644 kernel/drivers/gpu/drm/via/via_irq.c create mode 100644 kernel/drivers/gpu/drm/via/via_map.c create mode 100644 kernel/drivers/gpu/drm/via/via_mm.c create mode 100644 kernel/drivers/gpu/drm/via/via_verifier.c create mode 100644 kernel/drivers/gpu/drm/via/via_verifier.h create mode 100644 kernel/drivers/gpu/drm/via/via_video.c create mode 100644 kernel/drivers/gpu/drm/vmwgfx/Kconfig create mode 100644 kernel/drivers/gpu/drm/vmwgfx/Makefile create mode 100644 kernel/drivers/gpu/drm/vmwgfx/svga3d_reg.h create mode 100644 kernel/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h create mode 100644 kernel/drivers/gpu/drm/vmwgfx/svga_escape.h create mode 100644 kernel/drivers/gpu/drm/vmwgfx/svga_overlay.h create mode 100644 kernel/drivers/gpu/drm/vmwgfx/svga_reg.h create mode 100644 kernel/drivers/gpu/drm/vmwgfx/svga_types.h create mode 100644 kernel/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c create mode 100644 kernel/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c create mode 100644 kernel/drivers/gpu/drm/vmwgfx/vmwgfx_context.c create mode 100644 kernel/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c create mode 100644 kernel/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c create mode 100644 kernel/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h create mode 100644 kernel/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c create mode 100644 kernel/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c create mode 100644 kernel/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c create mode 100644 kernel/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h create mode 100644 kernel/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c create mode 100644 kernel/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c create mode 100644 kernel/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c create mode 100644 kernel/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c create mode 100644 kernel/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c create mode 100644 kernel/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c create mode 100644 kernel/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h create mode 100644 kernel/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c create mode 100644 kernel/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c create mode 100644 kernel/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c create mode 100644 kernel/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c create mode 100644 kernel/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c create mode 100644 kernel/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h create mode 100644 kernel/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c create mode 100644 kernel/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h create mode 100644 kernel/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c create mode 100644 kernel/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c create mode 100644 kernel/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c create mode 100644 kernel/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c create mode 100644 kernel/drivers/gpu/host1x/Kconfig create mode 100644 kernel/drivers/gpu/host1x/Makefile create mode 100644 kernel/drivers/gpu/host1x/bus.c create mode 100644 kernel/drivers/gpu/host1x/bus.h create mode 100644 kernel/drivers/gpu/host1x/cdma.c create mode 100644 kernel/drivers/gpu/host1x/cdma.h create mode 100644 kernel/drivers/gpu/host1x/channel.c create mode 100644 kernel/drivers/gpu/host1x/channel.h create mode 100644 kernel/drivers/gpu/host1x/debug.c create mode 100644 kernel/drivers/gpu/host1x/debug.h create mode 100644 kernel/drivers/gpu/host1x/dev.c create mode 100644 kernel/drivers/gpu/host1x/dev.h create mode 100644 kernel/drivers/gpu/host1x/hw/cdma_hw.c create mode 100644 kernel/drivers/gpu/host1x/hw/channel_hw.c create mode 100644 kernel/drivers/gpu/host1x/hw/debug_hw.c create mode 100644 kernel/drivers/gpu/host1x/hw/host1x01.c create mode 100644 kernel/drivers/gpu/host1x/hw/host1x01.h create mode 100644 kernel/drivers/gpu/host1x/hw/host1x01_hardware.h create mode 100644 kernel/drivers/gpu/host1x/hw/host1x02.c create mode 100644 kernel/drivers/gpu/host1x/hw/host1x02.h create mode 100644 kernel/drivers/gpu/host1x/hw/host1x02_hardware.h create mode 100644 kernel/drivers/gpu/host1x/hw/host1x04.c create mode 100644 kernel/drivers/gpu/host1x/hw/host1x04.h create mode 100644 kernel/drivers/gpu/host1x/hw/host1x04_hardware.h create mode 100644 kernel/drivers/gpu/host1x/hw/hw_host1x01_channel.h create mode 100644 kernel/drivers/gpu/host1x/hw/hw_host1x01_sync.h create mode 100644 kernel/drivers/gpu/host1x/hw/hw_host1x01_uclass.h create mode 100644 kernel/drivers/gpu/host1x/hw/hw_host1x02_channel.h create mode 100644 kernel/drivers/gpu/host1x/hw/hw_host1x02_sync.h create mode 100644 kernel/drivers/gpu/host1x/hw/hw_host1x02_uclass.h create mode 100644 kernel/drivers/gpu/host1x/hw/hw_host1x04_channel.h create mode 100644 kernel/drivers/gpu/host1x/hw/hw_host1x04_sync.h create mode 100644 kernel/drivers/gpu/host1x/hw/hw_host1x04_uclass.h create mode 100644 kernel/drivers/gpu/host1x/hw/intr_hw.c create mode 100644 kernel/drivers/gpu/host1x/hw/syncpt_hw.c create mode 100644 kernel/drivers/gpu/host1x/intr.c create mode 100644 kernel/drivers/gpu/host1x/intr.h create mode 100644 kernel/drivers/gpu/host1x/job.c create mode 100644 kernel/drivers/gpu/host1x/job.h create mode 100644 kernel/drivers/gpu/host1x/mipi.c create mode 100644 kernel/drivers/gpu/host1x/syncpt.c create mode 100644 kernel/drivers/gpu/host1x/syncpt.h create mode 100644 kernel/drivers/gpu/ipu-v3/Kconfig create mode 100644 kernel/drivers/gpu/ipu-v3/Makefile create mode 100644 kernel/drivers/gpu/ipu-v3/ipu-common.c create mode 100644 kernel/drivers/gpu/ipu-v3/ipu-cpmem.c create mode 100644 kernel/drivers/gpu/ipu-v3/ipu-csi.c create mode 100644 kernel/drivers/gpu/ipu-v3/ipu-dc.c create mode 100644 kernel/drivers/gpu/ipu-v3/ipu-di.c create mode 100644 kernel/drivers/gpu/ipu-v3/ipu-dmfc.c create mode 100644 kernel/drivers/gpu/ipu-v3/ipu-dp.c create mode 100644 kernel/drivers/gpu/ipu-v3/ipu-ic.c create mode 100644 kernel/drivers/gpu/ipu-v3/ipu-prv.h create mode 100644 kernel/drivers/gpu/ipu-v3/ipu-smfc.c create mode 100644 kernel/drivers/gpu/vga/Kconfig create mode 100644 kernel/drivers/gpu/vga/Makefile create mode 100644 kernel/drivers/gpu/vga/vga_switcheroo.c create mode 100644 kernel/drivers/gpu/vga/vgaarb.c (limited to 'kernel/drivers/gpu') diff --git a/kernel/drivers/gpu/Makefile b/kernel/drivers/gpu/Makefile new file mode 100644 index 000000000..e9ed439a5 --- /dev/null +++ b/kernel/drivers/gpu/Makefile @@ -0,0 +1,6 @@ +# drm/tegra depends on host1x, so if both drivers are built-in care must be +# taken to initialize them in the correct order. Link order is the only way +# to ensure this currently. +obj-$(CONFIG_TEGRA_HOST1X) += host1x/ +obj-y += drm/ vga/ +obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/ diff --git a/kernel/drivers/gpu/drm/Kconfig b/kernel/drivers/gpu/drm/Kconfig new file mode 100644 index 000000000..47f2ce81b --- /dev/null +++ b/kernel/drivers/gpu/drm/Kconfig @@ -0,0 +1,219 @@ +# +# Drm device configuration +# +# This driver provides support for the +# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. +# +menuconfig DRM + tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" + depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU && HAS_DMA + select HDMI + select FB_CMDLINE + select I2C + select I2C_ALGOBIT + select DMA_SHARED_BUFFER + help + Kernel-level support for the Direct Rendering Infrastructure (DRI) + introduced in XFree86 4.0. If you say Y here, you need to select + the module that's right for your graphics card from the list below. + These modules provide support for synchronization, security, and + DMA transfers. Please see for more + details. You should also select and configure AGP + (/dev/agpgart) support if it is available for your platform. + +config DRM_MIPI_DSI + bool + depends on DRM + +config DRM_KMS_HELPER + tristate + depends on DRM + help + CRTC helpers for KMS drivers. + +config DRM_KMS_FB_HELPER + bool + depends on DRM_KMS_HELPER + select FB + select FRAMEBUFFER_CONSOLE if !EXPERT + select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE + help + FBDEV helpers for KMS drivers. + +config DRM_LOAD_EDID_FIRMWARE + bool "Allow to specify an EDID data set instead of probing for it" + depends on DRM_KMS_HELPER + help + Say Y here, if you want to use EDID data to be loaded from the + /lib/firmware directory or one of the provided built-in + data sets. This may be necessary, if the graphics adapter or + monitor are unable to provide appropriate EDID data. Since this + feature is provided as a workaround for broken hardware, the + default case is N. Details and instructions how to build your own + EDID data are given in Documentation/EDID/HOWTO.txt. + +config DRM_TTM + tristate + depends on DRM + help + GPU memory management subsystem for devices with multiple + GPU memory types. Will be enabled automatically if a device driver + uses it. + +config DRM_GEM_CMA_HELPER + bool + depends on DRM && HAVE_DMA_ATTRS + help + Choose this if you need the GEM CMA helper functions + +config DRM_KMS_CMA_HELPER + bool + depends on DRM && HAVE_DMA_ATTRS + select DRM_GEM_CMA_HELPER + select DRM_KMS_FB_HELPER + select FB_SYS_FILLRECT + select FB_SYS_COPYAREA + select FB_SYS_IMAGEBLIT + help + Choose this if you need the KMS CMA helper functions + +source "drivers/gpu/drm/i2c/Kconfig" + +source "drivers/gpu/drm/bridge/Kconfig" + +config DRM_TDFX + tristate "3dfx Banshee/Voodoo3+" + depends on DRM && PCI + help + Choose this option if you have a 3dfx Banshee or Voodoo3 (or later), + graphics card. If M is selected, the module will be called tdfx. + +config DRM_R128 + tristate "ATI Rage 128" + depends on DRM && PCI + select FW_LOADER + help + Choose this option if you have an ATI Rage 128 graphics card. If M + is selected, the module will be called r128. AGP support for + this card is strongly suggested (unless you have a PCI version). + +config DRM_RADEON + tristate "ATI Radeon" + depends on DRM && PCI + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + select FW_LOADER + select DRM_KMS_HELPER + select DRM_KMS_FB_HELPER + select DRM_TTM + select POWER_SUPPLY + select HWMON + select BACKLIGHT_CLASS_DEVICE + select INTERVAL_TREE + help + Choose this option if you have an ATI Radeon graphics card. There + are both PCI and AGP versions. You don't need to choose this to + run the Radeon in plain VGA mode. + + If M is selected, the module will be called radeon. + +source "drivers/gpu/drm/radeon/Kconfig" + +source "drivers/gpu/drm/nouveau/Kconfig" + +config DRM_I810 + tristate "Intel I810" + # !PREEMPT because of missing ioctl locking + depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN) + help + Choose this option if you have an Intel I810 graphics card. If M is + selected, the module will be called i810. AGP support is required + for this driver to work. + +source "drivers/gpu/drm/i915/Kconfig" + +config DRM_MGA + tristate "Matrox g200/g400" + depends on DRM && PCI + select FW_LOADER + help + Choose this option if you have a Matrox G200, G400 or G450 graphics + card. If M is selected, the module will be called mga. AGP + support is required for this driver to work. + +config DRM_SIS + tristate "SiS video cards" + depends on DRM && AGP + depends on FB_SIS || FB_SIS=n + help + Choose this option if you have a SiS 630 or compatible video + chipset. If M is selected the module will be called sis. AGP + support is required for this driver to work. + +config DRM_VIA + tristate "Via unichrome video cards" + depends on DRM && PCI + help + Choose this option if you have a Via unichrome or compatible video + chipset. If M is selected the module will be called via. + +config DRM_SAVAGE + tristate "Savage video cards" + depends on DRM && PCI + help + Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister + chipset. If M is selected the module will be called savage. + +config DRM_VGEM + tristate "Virtual GEM provider" + depends on DRM + help + Choose this option to get a virtual graphics memory manager, + as used by Mesa's software renderer for enhanced performance. + If M is selected the module will be called vgem. + + +source "drivers/gpu/drm/exynos/Kconfig" + +source "drivers/gpu/drm/rockchip/Kconfig" + +source "drivers/gpu/drm/vmwgfx/Kconfig" + +source "drivers/gpu/drm/gma500/Kconfig" + +source "drivers/gpu/drm/udl/Kconfig" + +source "drivers/gpu/drm/ast/Kconfig" + +source "drivers/gpu/drm/mgag200/Kconfig" + +source "drivers/gpu/drm/cirrus/Kconfig" + +source "drivers/gpu/drm/armada/Kconfig" + +source "drivers/gpu/drm/atmel-hlcdc/Kconfig" + +source "drivers/gpu/drm/rcar-du/Kconfig" + +source "drivers/gpu/drm/shmobile/Kconfig" + +source "drivers/gpu/drm/omapdrm/Kconfig" + +source "drivers/gpu/drm/tilcdc/Kconfig" + +source "drivers/gpu/drm/qxl/Kconfig" + +source "drivers/gpu/drm/bochs/Kconfig" + +source "drivers/gpu/drm/msm/Kconfig" + +source "drivers/gpu/drm/tegra/Kconfig" + +source "drivers/gpu/drm/panel/Kconfig" + +source "drivers/gpu/drm/sti/Kconfig" + +source "drivers/gpu/drm/amd/amdkfd/Kconfig" + +source "drivers/gpu/drm/imx/Kconfig" diff --git a/kernel/drivers/gpu/drm/Makefile b/kernel/drivers/gpu/drm/Makefile new file mode 100644 index 000000000..7d4944e1a --- /dev/null +++ b/kernel/drivers/gpu/drm/Makefile @@ -0,0 +1,72 @@ +# +# Makefile for the drm device driver. This driver provides support for the +# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. + +ccflags-y := -Iinclude/drm + +drm-y := drm_auth.o drm_bufs.o drm_cache.o \ + drm_context.o drm_dma.o \ + drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ + drm_lock.o drm_memory.o drm_drv.o drm_vm.o \ + drm_agpsupport.o drm_scatter.o drm_pci.o \ + drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \ + drm_crtc.o drm_modes.o drm_edid.o \ + drm_info.o drm_debugfs.o drm_encoder_slave.o \ + drm_trace_points.o drm_global.o drm_prime.o \ + drm_rect.o drm_vma_manager.o drm_flip_work.o \ + drm_modeset_lock.o drm_atomic.o drm_bridge.o + +drm-$(CONFIG_COMPAT) += drm_ioc32.o +drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o +drm-$(CONFIG_PCI) += ati_pcigart.o +drm-$(CONFIG_DRM_PANEL) += drm_panel.o +drm-$(CONFIG_OF) += drm_of.o + +drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ + drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o +drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o +drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o +drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o + +obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o + +CFLAGS_drm_trace_points.o := -I$(src) + +obj-$(CONFIG_DRM) += drm.o +obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o +obj-$(CONFIG_DRM_TTM) += ttm/ +obj-$(CONFIG_DRM_TDFX) += tdfx/ +obj-$(CONFIG_DRM_R128) += r128/ +obj-$(CONFIG_HSA_AMD) += amd/amdkfd/ +obj-$(CONFIG_DRM_RADEON)+= radeon/ +obj-$(CONFIG_DRM_MGA) += mga/ +obj-$(CONFIG_DRM_I810) += i810/ +obj-$(CONFIG_DRM_I915) += i915/ +obj-$(CONFIG_DRM_MGAG200) += mgag200/ +obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus/ +obj-$(CONFIG_DRM_SIS) += sis/ +obj-$(CONFIG_DRM_SAVAGE)+= savage/ +obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/ +obj-$(CONFIG_DRM_VIA) +=via/ +obj-$(CONFIG_DRM_VGEM) += vgem/ +obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/ +obj-$(CONFIG_DRM_EXYNOS) +=exynos/ +obj-$(CONFIG_DRM_ROCKCHIP) +=rockchip/ +obj-$(CONFIG_DRM_GMA500) += gma500/ +obj-$(CONFIG_DRM_UDL) += udl/ +obj-$(CONFIG_DRM_AST) += ast/ +obj-$(CONFIG_DRM_ARMADA) += armada/ +obj-$(CONFIG_DRM_ATMEL_HLCDC) += atmel-hlcdc/ +obj-$(CONFIG_DRM_RCAR_DU) += rcar-du/ +obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/ +obj-$(CONFIG_DRM_OMAP) += omapdrm/ +obj-$(CONFIG_DRM_TILCDC) += tilcdc/ +obj-$(CONFIG_DRM_QXL) += qxl/ +obj-$(CONFIG_DRM_BOCHS) += bochs/ +obj-$(CONFIG_DRM_MSM) += msm/ +obj-$(CONFIG_DRM_TEGRA) += tegra/ +obj-$(CONFIG_DRM_STI) += sti/ +obj-$(CONFIG_DRM_IMX) += imx/ +obj-y += i2c/ +obj-y += panel/ +obj-y += bridge/ diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/Kconfig b/kernel/drivers/gpu/drm/amd/amdkfd/Kconfig new file mode 100644 index 000000000..8dfac37ff --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/Kconfig @@ -0,0 +1,9 @@ +# +# Heterogenous system architecture configuration +# + +config HSA_AMD + tristate "HSA kernel driver for AMD GPU devices" + depends on DRM_RADEON && AMD_IOMMU_V2 && X86_64 + help + Enable this if you want to use HSA features on AMD GPU devices. diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/Makefile b/kernel/drivers/gpu/drm/amd/amdkfd/Makefile new file mode 100644 index 000000000..0f4960148 --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/Makefile @@ -0,0 +1,16 @@ +# +# Makefile for Heterogenous System Architecture support for AMD GPU devices +# + +ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/ + +amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \ + kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \ + kfd_process.o kfd_queue.o kfd_mqd_manager.o \ + kfd_mqd_manager_cik.o kfd_mqd_manager_vi.o \ + kfd_kernel_queue.o kfd_kernel_queue_cik.o \ + kfd_kernel_queue_vi.o kfd_packet_manager.o \ + kfd_process_queue_manager.o kfd_device_queue_manager.o \ + kfd_device_queue_manager_cik.o kfd_device_queue_manager_vi.o \ + +obj-$(CONFIG_HSA_AMD) += amdkfd.o diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/cik_regs.h b/kernel/drivers/gpu/drm/amd/amdkfd/cik_regs.h new file mode 100644 index 000000000..01ff332fa --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/cik_regs.h @@ -0,0 +1,234 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef CIK_REGS_H +#define CIK_REGS_H + +#define IH_VMID_0_LUT 0x3D40u + +#define BIF_DOORBELL_CNTL 0x530Cu + +#define SRBM_GFX_CNTL 0xE44 +#define PIPEID(x) ((x) << 0) +#define MEID(x) ((x) << 2) +#define VMID(x) ((x) << 4) +#define QUEUEID(x) ((x) << 8) + +#define SQ_CONFIG 0x8C00 + +#define SH_MEM_BASES 0x8C28 +/* if PTR32, these are the bases for scratch and lds */ +#define PRIVATE_BASE(x) ((x) << 0) /* scratch */ +#define SHARED_BASE(x) ((x) << 16) /* LDS */ +#define SH_MEM_APE1_BASE 0x8C2C +/* if PTR32, this is the base location of GPUVM */ +#define SH_MEM_APE1_LIMIT 0x8C30 +/* if PTR32, this is the upper limit of GPUVM */ +#define SH_MEM_CONFIG 0x8C34 +#define PTR32 (1 << 0) +#define PRIVATE_ATC (1 << 1) +#define ALIGNMENT_MODE(x) ((x) << 2) +#define SH_MEM_ALIGNMENT_MODE_DWORD 0 +#define SH_MEM_ALIGNMENT_MODE_DWORD_STRICT 1 +#define SH_MEM_ALIGNMENT_MODE_STRICT 2 +#define SH_MEM_ALIGNMENT_MODE_UNALIGNED 3 +#define DEFAULT_MTYPE(x) ((x) << 4) +#define APE1_MTYPE(x) ((x) << 7) + +/* valid for both DEFAULT_MTYPE and APE1_MTYPE */ +#define MTYPE_CACHED 0 +#define MTYPE_NONCACHED 3 + + +#define SH_STATIC_MEM_CONFIG 0x9604u + +#define TC_CFG_L1_LOAD_POLICY0 0xAC68 +#define TC_CFG_L1_LOAD_POLICY1 0xAC6C +#define TC_CFG_L1_STORE_POLICY 0xAC70 +#define TC_CFG_L2_LOAD_POLICY0 0xAC74 +#define TC_CFG_L2_LOAD_POLICY1 0xAC78 +#define TC_CFG_L2_STORE_POLICY0 0xAC7C +#define TC_CFG_L2_STORE_POLICY1 0xAC80 +#define TC_CFG_L2_ATOMIC_POLICY 0xAC84 +#define TC_CFG_L1_VOLATILE 0xAC88 +#define TC_CFG_L2_VOLATILE 0xAC8C + +#define CP_PQ_WPTR_POLL_CNTL 0xC20C +#define WPTR_POLL_EN (1 << 31) + +#define CPC_INT_CNTL 0xC2D0 +#define CP_ME1_PIPE0_INT_CNTL 0xC214 +#define CP_ME1_PIPE1_INT_CNTL 0xC218 +#define CP_ME1_PIPE2_INT_CNTL 0xC21C +#define CP_ME1_PIPE3_INT_CNTL 0xC220 +#define CP_ME2_PIPE0_INT_CNTL 0xC224 +#define CP_ME2_PIPE1_INT_CNTL 0xC228 +#define CP_ME2_PIPE2_INT_CNTL 0xC22C +#define CP_ME2_PIPE3_INT_CNTL 0xC230 +#define DEQUEUE_REQUEST_INT_ENABLE (1 << 13) +#define WRM_POLL_TIMEOUT_INT_ENABLE (1 << 17) +#define PRIV_REG_INT_ENABLE (1 << 23) +#define TIME_STAMP_INT_ENABLE (1 << 26) +#define GENERIC2_INT_ENABLE (1 << 29) +#define GENERIC1_INT_ENABLE (1 << 30) +#define GENERIC0_INT_ENABLE (1 << 31) +#define CP_ME1_PIPE0_INT_STATUS 0xC214 +#define CP_ME1_PIPE1_INT_STATUS 0xC218 +#define CP_ME1_PIPE2_INT_STATUS 0xC21C +#define CP_ME1_PIPE3_INT_STATUS 0xC220 +#define CP_ME2_PIPE0_INT_STATUS 0xC224 +#define CP_ME2_PIPE1_INT_STATUS 0xC228 +#define CP_ME2_PIPE2_INT_STATUS 0xC22C +#define CP_ME2_PIPE3_INT_STATUS 0xC230 +#define DEQUEUE_REQUEST_INT_STATUS (1 << 13) +#define WRM_POLL_TIMEOUT_INT_STATUS (1 << 17) +#define PRIV_REG_INT_STATUS (1 << 23) +#define TIME_STAMP_INT_STATUS (1 << 26) +#define GENERIC2_INT_STATUS (1 << 29) +#define GENERIC1_INT_STATUS (1 << 30) +#define GENERIC0_INT_STATUS (1 << 31) + +#define CP_HPD_EOP_BASE_ADDR 0xC904 +#define CP_HPD_EOP_BASE_ADDR_HI 0xC908 +#define CP_HPD_EOP_VMID 0xC90C +#define CP_HPD_EOP_CONTROL 0xC910 +#define EOP_SIZE(x) ((x) << 0) +#define EOP_SIZE_MASK (0x3f << 0) +#define CP_MQD_BASE_ADDR 0xC914 +#define CP_MQD_BASE_ADDR_HI 0xC918 +#define CP_HQD_ACTIVE 0xC91C +#define CP_HQD_VMID 0xC920 + +#define CP_HQD_PERSISTENT_STATE 0xC924u +#define DEFAULT_CP_HQD_PERSISTENT_STATE (0x33U << 8) +#define PRELOAD_REQ (1 << 0) + +#define CP_HQD_PIPE_PRIORITY 0xC928u +#define CP_HQD_QUEUE_PRIORITY 0xC92Cu +#define CP_HQD_QUANTUM 0xC930u +#define QUANTUM_EN 1U +#define QUANTUM_SCALE_1MS (1U << 4) +#define QUANTUM_DURATION(x) ((x) << 8) + +#define CP_HQD_PQ_BASE 0xC934 +#define CP_HQD_PQ_BASE_HI 0xC938 +#define CP_HQD_PQ_RPTR 0xC93C +#define CP_HQD_PQ_RPTR_REPORT_ADDR 0xC940 +#define CP_HQD_PQ_RPTR_REPORT_ADDR_HI 0xC944 +#define CP_HQD_PQ_WPTR_POLL_ADDR 0xC948 +#define CP_HQD_PQ_WPTR_POLL_ADDR_HI 0xC94C +#define CP_HQD_PQ_DOORBELL_CONTROL 0xC950 +#define DOORBELL_OFFSET(x) ((x) << 2) +#define DOORBELL_OFFSET_MASK (0x1fffff << 2) +#define DOORBELL_SOURCE (1 << 28) +#define DOORBELL_SCHD_HIT (1 << 29) +#define DOORBELL_EN (1 << 30) +#define DOORBELL_HIT (1 << 31) +#define CP_HQD_PQ_WPTR 0xC954 +#define CP_HQD_PQ_CONTROL 0xC958 +#define QUEUE_SIZE(x) ((x) << 0) +#define QUEUE_SIZE_MASK (0x3f << 0) +#define RPTR_BLOCK_SIZE(x) ((x) << 8) +#define RPTR_BLOCK_SIZE_MASK (0x3f << 8) +#define MIN_AVAIL_SIZE(x) ((x) << 20) +#define PQ_ATC_EN (1 << 23) +#define PQ_VOLATILE (1 << 26) +#define NO_UPDATE_RPTR (1 << 27) +#define UNORD_DISPATCH (1 << 28) +#define ROQ_PQ_IB_FLIP (1 << 29) +#define PRIV_STATE (1 << 30) +#define KMD_QUEUE (1 << 31) + +#define DEFAULT_RPTR_BLOCK_SIZE RPTR_BLOCK_SIZE(5) +#define DEFAULT_MIN_AVAIL_SIZE MIN_AVAIL_SIZE(3) + +#define CP_HQD_IB_BASE_ADDR 0xC95Cu +#define CP_HQD_IB_BASE_ADDR_HI 0xC960u +#define CP_HQD_IB_RPTR 0xC964u +#define CP_HQD_IB_CONTROL 0xC968u +#define IB_ATC_EN (1U << 23) +#define DEFAULT_MIN_IB_AVAIL_SIZE (3U << 20) + +#define AQL_ENABLE 1 + +#define CP_HQD_DEQUEUE_REQUEST 0xC974 +#define DEQUEUE_REQUEST_DRAIN 1 +#define DEQUEUE_REQUEST_RESET 2 +#define DEQUEUE_INT (1U << 8) + +#define CP_HQD_SEMA_CMD 0xC97Cu +#define CP_HQD_MSG_TYPE 0xC980u +#define CP_HQD_ATOMIC0_PREOP_LO 0xC984u +#define CP_HQD_ATOMIC0_PREOP_HI 0xC988u +#define CP_HQD_ATOMIC1_PREOP_LO 0xC98Cu +#define CP_HQD_ATOMIC1_PREOP_HI 0xC990u +#define CP_HQD_HQ_SCHEDULER0 0xC994u +#define CP_HQD_HQ_SCHEDULER1 0xC998u + + +#define CP_MQD_CONTROL 0xC99C +#define MQD_VMID(x) ((x) << 0) +#define MQD_VMID_MASK (0xf << 0) +#define MQD_CONTROL_PRIV_STATE_EN (1U << 8) + +#define SDMA_RB_VMID(x) (x << 24) +#define SDMA_RB_ENABLE (1 << 0) +#define SDMA_RB_SIZE(x) ((x) << 1) /* log2 */ +#define SDMA_RPTR_WRITEBACK_ENABLE (1 << 12) +#define SDMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */ +#define SDMA_OFFSET(x) (x << 0) +#define SDMA_DB_ENABLE (1 << 28) +#define SDMA_ATC (1 << 0) +#define SDMA_VA_PTR32 (1 << 4) +#define SDMA_VA_SHARED_BASE(x) (x << 8) + +#define GRBM_GFX_INDEX 0x30800 +#define INSTANCE_INDEX(x) ((x) << 0) +#define SH_INDEX(x) ((x) << 8) +#define SE_INDEX(x) ((x) << 16) +#define SH_BROADCAST_WRITES (1 << 29) +#define INSTANCE_BROADCAST_WRITES (1 << 30) +#define SE_BROADCAST_WRITES (1 << 31) + +#define SQC_CACHES 0x30d20 +#define SQC_POLICY 0x8C38u +#define SQC_VOLATILE 0x8C3Cu + +#define CP_PERFMON_CNTL 0x36020 + +#define ATC_VMID0_PASID_MAPPING 0x339Cu +#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS 0x3398u +#define ATC_VMID_PASID_MAPPING_VALID (1U << 31) + +#define ATC_VM_APERTURE0_CNTL 0x3310u +#define ATS_ACCESS_MODE_NEVER 0 +#define ATS_ACCESS_MODE_ALWAYS 1 + +#define ATC_VM_APERTURE0_CNTL2 0x3318u +#define ATC_VM_APERTURE0_HIGH_ADDR 0x3308u +#define ATC_VM_APERTURE0_LOW_ADDR 0x3300u +#define ATC_VM_APERTURE1_CNTL 0x3314u +#define ATC_VM_APERTURE1_CNTL2 0x331Cu +#define ATC_VM_APERTURE1_HIGH_ADDR 0x330Cu +#define ATC_VM_APERTURE1_LOW_ADDR 0x3304u + +#endif diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c new file mode 100644 index 000000000..19a4fba46 --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -0,0 +1,643 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "kfd_priv.h" +#include "kfd_device_queue_manager.h" + +static long kfd_ioctl(struct file *, unsigned int, unsigned long); +static int kfd_open(struct inode *, struct file *); +static int kfd_mmap(struct file *, struct vm_area_struct *); + +static const char kfd_dev_name[] = "kfd"; + +static const struct file_operations kfd_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = kfd_ioctl, + .compat_ioctl = kfd_ioctl, + .open = kfd_open, + .mmap = kfd_mmap, +}; + +static int kfd_char_dev_major = -1; +static struct class *kfd_class; +struct device *kfd_device; + +int kfd_chardev_init(void) +{ + int err = 0; + + kfd_char_dev_major = register_chrdev(0, kfd_dev_name, &kfd_fops); + err = kfd_char_dev_major; + if (err < 0) + goto err_register_chrdev; + + kfd_class = class_create(THIS_MODULE, kfd_dev_name); + err = PTR_ERR(kfd_class); + if (IS_ERR(kfd_class)) + goto err_class_create; + + kfd_device = device_create(kfd_class, NULL, + MKDEV(kfd_char_dev_major, 0), + NULL, kfd_dev_name); + err = PTR_ERR(kfd_device); + if (IS_ERR(kfd_device)) + goto err_device_create; + + return 0; + +err_device_create: + class_destroy(kfd_class); +err_class_create: + unregister_chrdev(kfd_char_dev_major, kfd_dev_name); +err_register_chrdev: + return err; +} + +void kfd_chardev_exit(void) +{ + device_destroy(kfd_class, MKDEV(kfd_char_dev_major, 0)); + class_destroy(kfd_class); + unregister_chrdev(kfd_char_dev_major, kfd_dev_name); +} + +struct device *kfd_chardev(void) +{ + return kfd_device; +} + + +static int kfd_open(struct inode *inode, struct file *filep) +{ + struct kfd_process *process; + bool is_32bit_user_mode; + + if (iminor(inode) != 0) + return -ENODEV; + + is_32bit_user_mode = is_compat_task(); + + if (is_32bit_user_mode == true) { + dev_warn(kfd_device, + "Process %d (32-bit) failed to open /dev/kfd\n" + "32-bit processes are not supported by amdkfd\n", + current->pid); + return -EPERM; + } + + process = kfd_create_process(current); + if (IS_ERR(process)) + return PTR_ERR(process); + + dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n", + process->pasid, process->is_32bit_user_mode); + + return 0; +} + +static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p, + void *data) +{ + struct kfd_ioctl_get_version_args *args = data; + int err = 0; + + args->major_version = KFD_IOCTL_MAJOR_VERSION; + args->minor_version = KFD_IOCTL_MINOR_VERSION; + + return err; +} + +static int set_queue_properties_from_user(struct queue_properties *q_properties, + struct kfd_ioctl_create_queue_args *args) +{ + if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) { + pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n"); + return -EINVAL; + } + + if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) { + pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n"); + return -EINVAL; + } + + if ((args->ring_base_address) && + (!access_ok(VERIFY_WRITE, + (const void __user *) args->ring_base_address, + sizeof(uint64_t)))) { + pr_err("kfd: can't access ring base address\n"); + return -EFAULT; + } + + if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) { + pr_err("kfd: ring size must be a power of 2 or 0\n"); + return -EINVAL; + } + + if (!access_ok(VERIFY_WRITE, + (const void __user *) args->read_pointer_address, + sizeof(uint32_t))) { + pr_err("kfd: can't access read pointer\n"); + return -EFAULT; + } + + if (!access_ok(VERIFY_WRITE, + (const void __user *) args->write_pointer_address, + sizeof(uint32_t))) { + pr_err("kfd: can't access write pointer\n"); + return -EFAULT; + } + + if (args->eop_buffer_address && + !access_ok(VERIFY_WRITE, + (const void __user *) args->eop_buffer_address, + sizeof(uint32_t))) { + pr_debug("kfd: can't access eop buffer"); + return -EFAULT; + } + + if (args->ctx_save_restore_address && + !access_ok(VERIFY_WRITE, + (const void __user *) args->ctx_save_restore_address, + sizeof(uint32_t))) { + pr_debug("kfd: can't access ctx save restore buffer"); + return -EFAULT; + } + + q_properties->is_interop = false; + q_properties->queue_percent = args->queue_percentage; + q_properties->priority = args->queue_priority; + q_properties->queue_address = args->ring_base_address; + q_properties->queue_size = args->ring_size; + q_properties->read_ptr = (uint32_t *) args->read_pointer_address; + q_properties->write_ptr = (uint32_t *) args->write_pointer_address; + q_properties->eop_ring_buffer_address = args->eop_buffer_address; + q_properties->eop_ring_buffer_size = args->eop_buffer_size; + q_properties->ctx_save_restore_area_address = + args->ctx_save_restore_address; + q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size; + if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE || + args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL) + q_properties->type = KFD_QUEUE_TYPE_COMPUTE; + else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA) + q_properties->type = KFD_QUEUE_TYPE_SDMA; + else + return -ENOTSUPP; + + if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL) + q_properties->format = KFD_QUEUE_FORMAT_AQL; + else + q_properties->format = KFD_QUEUE_FORMAT_PM4; + + pr_debug("Queue Percentage (%d, %d)\n", + q_properties->queue_percent, args->queue_percentage); + + pr_debug("Queue Priority (%d, %d)\n", + q_properties->priority, args->queue_priority); + + pr_debug("Queue Address (0x%llX, 0x%llX)\n", + q_properties->queue_address, args->ring_base_address); + + pr_debug("Queue Size (0x%llX, %u)\n", + q_properties->queue_size, args->ring_size); + + pr_debug("Queue r/w Pointers (0x%llX, 0x%llX)\n", + (uint64_t) q_properties->read_ptr, + (uint64_t) q_properties->write_ptr); + + pr_debug("Queue Format (%d)\n", q_properties->format); + + pr_debug("Queue EOP (0x%llX)\n", q_properties->eop_ring_buffer_address); + + pr_debug("Queue CTX save arex (0x%llX)\n", + q_properties->ctx_save_restore_area_address); + + return 0; +} + +static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, + void *data) +{ + struct kfd_ioctl_create_queue_args *args = data; + struct kfd_dev *dev; + int err = 0; + unsigned int queue_id; + struct kfd_process_device *pdd; + struct queue_properties q_properties; + + memset(&q_properties, 0, sizeof(struct queue_properties)); + + pr_debug("kfd: creating queue ioctl\n"); + + err = set_queue_properties_from_user(&q_properties, args); + if (err) + return err; + + pr_debug("kfd: looking for gpu id 0x%x\n", args->gpu_id); + dev = kfd_device_by_id(args->gpu_id); + if (dev == NULL) { + pr_debug("kfd: gpu id 0x%x was not found\n", args->gpu_id); + return -EINVAL; + } + + mutex_lock(&p->mutex); + + pdd = kfd_bind_process_to_device(dev, p); + if (IS_ERR(pdd)) { + err = -ESRCH; + goto err_bind_process; + } + + pr_debug("kfd: creating queue for PASID %d on GPU 0x%x\n", + p->pasid, + dev->id); + + err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, + 0, q_properties.type, &queue_id); + if (err != 0) + goto err_create_queue; + + args->queue_id = queue_id; + + /* Return gpu_id as doorbell offset for mmap usage */ + args->doorbell_offset = args->gpu_id << PAGE_SHIFT; + + mutex_unlock(&p->mutex); + + pr_debug("kfd: queue id %d was created successfully\n", args->queue_id); + + pr_debug("ring buffer address == 0x%016llX\n", + args->ring_base_address); + + pr_debug("read ptr address == 0x%016llX\n", + args->read_pointer_address); + + pr_debug("write ptr address == 0x%016llX\n", + args->write_pointer_address); + + return 0; + +err_create_queue: +err_bind_process: + mutex_unlock(&p->mutex); + return err; +} + +static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p, + void *data) +{ + int retval; + struct kfd_ioctl_destroy_queue_args *args = data; + + pr_debug("kfd: destroying queue id %d for PASID %d\n", + args->queue_id, + p->pasid); + + mutex_lock(&p->mutex); + + retval = pqm_destroy_queue(&p->pqm, args->queue_id); + + mutex_unlock(&p->mutex); + return retval; +} + +static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p, + void *data) +{ + int retval; + struct kfd_ioctl_update_queue_args *args = data; + struct queue_properties properties; + + if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) { + pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n"); + return -EINVAL; + } + + if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) { + pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n"); + return -EINVAL; + } + + if ((args->ring_base_address) && + (!access_ok(VERIFY_WRITE, + (const void __user *) args->ring_base_address, + sizeof(uint64_t)))) { + pr_err("kfd: can't access ring base address\n"); + return -EFAULT; + } + + if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) { + pr_err("kfd: ring size must be a power of 2 or 0\n"); + return -EINVAL; + } + + properties.queue_address = args->ring_base_address; + properties.queue_size = args->ring_size; + properties.queue_percent = args->queue_percentage; + properties.priority = args->queue_priority; + + pr_debug("kfd: updating queue id %d for PASID %d\n", + args->queue_id, p->pasid); + + mutex_lock(&p->mutex); + + retval = pqm_update_queue(&p->pqm, args->queue_id, &properties); + + mutex_unlock(&p->mutex); + + return retval; +} + +static int kfd_ioctl_set_memory_policy(struct file *filep, + struct kfd_process *p, void *data) +{ + struct kfd_ioctl_set_memory_policy_args *args = data; + struct kfd_dev *dev; + int err = 0; + struct kfd_process_device *pdd; + enum cache_policy default_policy, alternate_policy; + + if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT + && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { + return -EINVAL; + } + + if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT + && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { + return -EINVAL; + } + + dev = kfd_device_by_id(args->gpu_id); + if (dev == NULL) + return -EINVAL; + + mutex_lock(&p->mutex); + + pdd = kfd_bind_process_to_device(dev, p); + if (IS_ERR(pdd)) { + err = -ESRCH; + goto out; + } + + default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT) + ? cache_policy_coherent : cache_policy_noncoherent; + + alternate_policy = + (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT) + ? cache_policy_coherent : cache_policy_noncoherent; + + if (!dev->dqm->ops.set_cache_memory_policy(dev->dqm, + &pdd->qpd, + default_policy, + alternate_policy, + (void __user *)args->alternate_aperture_base, + args->alternate_aperture_size)) + err = -EINVAL; + +out: + mutex_unlock(&p->mutex); + + return err; +} + +static int kfd_ioctl_get_clock_counters(struct file *filep, + struct kfd_process *p, void *data) +{ + struct kfd_ioctl_get_clock_counters_args *args = data; + struct kfd_dev *dev; + struct timespec64 time; + + dev = kfd_device_by_id(args->gpu_id); + if (dev == NULL) + return -EINVAL; + + /* Reading GPU clock counter from KGD */ + args->gpu_clock_counter = + dev->kfd2kgd->get_gpu_clock_counter(dev->kgd); + + /* No access to rdtsc. Using raw monotonic time */ + getrawmonotonic64(&time); + args->cpu_clock_counter = (uint64_t)timespec64_to_ns(&time); + + get_monotonic_boottime64(&time); + args->system_clock_counter = (uint64_t)timespec64_to_ns(&time); + + /* Since the counter is in nano-seconds we use 1GHz frequency */ + args->system_clock_freq = 1000000000; + + return 0; +} + + +static int kfd_ioctl_get_process_apertures(struct file *filp, + struct kfd_process *p, void *data) +{ + struct kfd_ioctl_get_process_apertures_args *args = data; + struct kfd_process_device_apertures *pAperture; + struct kfd_process_device *pdd; + + dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid); + + args->num_of_nodes = 0; + + mutex_lock(&p->mutex); + + /*if the process-device list isn't empty*/ + if (kfd_has_process_device_data(p)) { + /* Run over all pdd of the process */ + pdd = kfd_get_first_process_device_data(p); + do { + pAperture = + &args->process_apertures[args->num_of_nodes]; + pAperture->gpu_id = pdd->dev->id; + pAperture->lds_base = pdd->lds_base; + pAperture->lds_limit = pdd->lds_limit; + pAperture->gpuvm_base = pdd->gpuvm_base; + pAperture->gpuvm_limit = pdd->gpuvm_limit; + pAperture->scratch_base = pdd->scratch_base; + pAperture->scratch_limit = pdd->scratch_limit; + + dev_dbg(kfd_device, + "node id %u\n", args->num_of_nodes); + dev_dbg(kfd_device, + "gpu id %u\n", pdd->dev->id); + dev_dbg(kfd_device, + "lds_base %llX\n", pdd->lds_base); + dev_dbg(kfd_device, + "lds_limit %llX\n", pdd->lds_limit); + dev_dbg(kfd_device, + "gpuvm_base %llX\n", pdd->gpuvm_base); + dev_dbg(kfd_device, + "gpuvm_limit %llX\n", pdd->gpuvm_limit); + dev_dbg(kfd_device, + "scratch_base %llX\n", pdd->scratch_base); + dev_dbg(kfd_device, + "scratch_limit %llX\n", pdd->scratch_limit); + + args->num_of_nodes++; + } while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL && + (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS)); + } + + mutex_unlock(&p->mutex); + + return 0; +} + +#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \ + [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl} + +/** Ioctl table */ +static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = { + AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION, + kfd_ioctl_get_version, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE, + kfd_ioctl_create_queue, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE, + kfd_ioctl_destroy_queue, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY, + kfd_ioctl_set_memory_policy, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS, + kfd_ioctl_get_clock_counters, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES, + kfd_ioctl_get_process_apertures, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE, + kfd_ioctl_update_queue, 0), +}; + +#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls) + +static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) +{ + struct kfd_process *process; + amdkfd_ioctl_t *func; + const struct amdkfd_ioctl_desc *ioctl = NULL; + unsigned int nr = _IOC_NR(cmd); + char stack_kdata[128]; + char *kdata = NULL; + unsigned int usize, asize; + int retcode = -EINVAL; + + if (nr >= AMDKFD_CORE_IOCTL_COUNT) + goto err_i1; + + if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) { + u32 amdkfd_size; + + ioctl = &amdkfd_ioctls[nr]; + + amdkfd_size = _IOC_SIZE(ioctl->cmd); + usize = asize = _IOC_SIZE(cmd); + if (amdkfd_size > asize) + asize = amdkfd_size; + + cmd = ioctl->cmd; + } else + goto err_i1; + + dev_dbg(kfd_device, "ioctl cmd 0x%x (#%d), arg 0x%lx\n", cmd, nr, arg); + + process = kfd_get_process(current); + if (IS_ERR(process)) { + dev_dbg(kfd_device, "no process\n"); + goto err_i1; + } + + /* Do not trust userspace, use our own definition */ + func = ioctl->func; + + if (unlikely(!func)) { + dev_dbg(kfd_device, "no function\n"); + retcode = -EINVAL; + goto err_i1; + } + + if (cmd & (IOC_IN | IOC_OUT)) { + if (asize <= sizeof(stack_kdata)) { + kdata = stack_kdata; + } else { + kdata = kmalloc(asize, GFP_KERNEL); + if (!kdata) { + retcode = -ENOMEM; + goto err_i1; + } + } + if (asize > usize) + memset(kdata + usize, 0, asize - usize); + } + + if (cmd & IOC_IN) { + if (copy_from_user(kdata, (void __user *)arg, usize) != 0) { + retcode = -EFAULT; + goto err_i1; + } + } else if (cmd & IOC_OUT) { + memset(kdata, 0, usize); + } + + retcode = func(filep, process, kdata); + + if (cmd & IOC_OUT) + if (copy_to_user((void __user *)arg, kdata, usize) != 0) + retcode = -EFAULT; + +err_i1: + if (!ioctl) + dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n", + task_pid_nr(current), cmd, nr); + + if (kdata != stack_kdata) + kfree(kdata); + + if (retcode) + dev_dbg(kfd_device, "ret = %d\n", retcode); + + return retcode; +} + +static int kfd_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct kfd_process *process; + + process = kfd_get_process(current); + if (IS_ERR(process)) + return PTR_ERR(process); + + return kfd_doorbell_mmap(process, vma); +} diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_crat.h new file mode 100644 index 000000000..a374fa3d3 --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_crat.h @@ -0,0 +1,294 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef KFD_CRAT_H_INCLUDED +#define KFD_CRAT_H_INCLUDED + +#include + +#pragma pack(1) + +/* + * 4CC signature values for the CRAT and CDIT ACPI tables + */ + +#define CRAT_SIGNATURE "CRAT" +#define CDIT_SIGNATURE "CDIT" + +/* + * Component Resource Association Table (CRAT) + */ + +#define CRAT_OEMID_LENGTH 6 +#define CRAT_OEMTABLEID_LENGTH 8 +#define CRAT_RESERVED_LENGTH 6 + +#define CRAT_OEMID_64BIT_MASK ((1ULL << (CRAT_OEMID_LENGTH * 8)) - 1) + +struct crat_header { + uint32_t signature; + uint32_t length; + uint8_t revision; + uint8_t checksum; + uint8_t oem_id[CRAT_OEMID_LENGTH]; + uint8_t oem_table_id[CRAT_OEMTABLEID_LENGTH]; + uint32_t oem_revision; + uint32_t creator_id; + uint32_t creator_revision; + uint32_t total_entries; + uint16_t num_domains; + uint8_t reserved[CRAT_RESERVED_LENGTH]; +}; + +/* + * The header structure is immediately followed by total_entries of the + * data definitions + */ + +/* + * The currently defined subtype entries in the CRAT + */ +#define CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY 0 +#define CRAT_SUBTYPE_MEMORY_AFFINITY 1 +#define CRAT_SUBTYPE_CACHE_AFFINITY 2 +#define CRAT_SUBTYPE_TLB_AFFINITY 3 +#define CRAT_SUBTYPE_CCOMPUTE_AFFINITY 4 +#define CRAT_SUBTYPE_IOLINK_AFFINITY 5 +#define CRAT_SUBTYPE_MAX 6 + +#define CRAT_SIBLINGMAP_SIZE 32 + +/* + * ComputeUnit Affinity structure and definitions + */ +#define CRAT_CU_FLAGS_ENABLED 0x00000001 +#define CRAT_CU_FLAGS_HOT_PLUGGABLE 0x00000002 +#define CRAT_CU_FLAGS_CPU_PRESENT 0x00000004 +#define CRAT_CU_FLAGS_GPU_PRESENT 0x00000008 +#define CRAT_CU_FLAGS_IOMMU_PRESENT 0x00000010 +#define CRAT_CU_FLAGS_RESERVED 0xffffffe0 + +#define CRAT_COMPUTEUNIT_RESERVED_LENGTH 4 + +struct crat_subtype_computeunit { + uint8_t type; + uint8_t length; + uint16_t reserved; + uint32_t flags; + uint32_t proximity_domain; + uint32_t processor_id_low; + uint16_t num_cpu_cores; + uint16_t num_simd_cores; + uint16_t max_waves_simd; + uint16_t io_count; + uint16_t hsa_capability; + uint16_t lds_size_in_kb; + uint8_t wave_front_size; + uint8_t num_banks; + uint16_t micro_engine_id; + uint8_t num_arrays; + uint8_t num_cu_per_array; + uint8_t num_simd_per_cu; + uint8_t max_slots_scatch_cu; + uint8_t reserved2[CRAT_COMPUTEUNIT_RESERVED_LENGTH]; +}; + +/* + * HSA Memory Affinity structure and definitions + */ +#define CRAT_MEM_FLAGS_ENABLED 0x00000001 +#define CRAT_MEM_FLAGS_HOT_PLUGGABLE 0x00000002 +#define CRAT_MEM_FLAGS_NON_VOLATILE 0x00000004 +#define CRAT_MEM_FLAGS_RESERVED 0xfffffff8 + +#define CRAT_MEMORY_RESERVED_LENGTH 8 + +struct crat_subtype_memory { + uint8_t type; + uint8_t length; + uint16_t reserved; + uint32_t flags; + uint32_t promixity_domain; + uint32_t base_addr_low; + uint32_t base_addr_high; + uint32_t length_low; + uint32_t length_high; + uint32_t width; + uint8_t reserved2[CRAT_MEMORY_RESERVED_LENGTH]; +}; + +/* + * HSA Cache Affinity structure and definitions + */ +#define CRAT_CACHE_FLAGS_ENABLED 0x00000001 +#define CRAT_CACHE_FLAGS_DATA_CACHE 0x00000002 +#define CRAT_CACHE_FLAGS_INST_CACHE 0x00000004 +#define CRAT_CACHE_FLAGS_CPU_CACHE 0x00000008 +#define CRAT_CACHE_FLAGS_SIMD_CACHE 0x00000010 +#define CRAT_CACHE_FLAGS_RESERVED 0xffffffe0 + +#define CRAT_CACHE_RESERVED_LENGTH 8 + +struct crat_subtype_cache { + uint8_t type; + uint8_t length; + uint16_t reserved; + uint32_t flags; + uint32_t processor_id_low; + uint8_t sibling_map[CRAT_SIBLINGMAP_SIZE]; + uint32_t cache_size; + uint8_t cache_level; + uint8_t lines_per_tag; + uint16_t cache_line_size; + uint8_t associativity; + uint8_t cache_properties; + uint16_t cache_latency; + uint8_t reserved2[CRAT_CACHE_RESERVED_LENGTH]; +}; + +/* + * HSA TLB Affinity structure and definitions + */ +#define CRAT_TLB_FLAGS_ENABLED 0x00000001 +#define CRAT_TLB_FLAGS_DATA_TLB 0x00000002 +#define CRAT_TLB_FLAGS_INST_TLB 0x00000004 +#define CRAT_TLB_FLAGS_CPU_TLB 0x00000008 +#define CRAT_TLB_FLAGS_SIMD_TLB 0x00000010 +#define CRAT_TLB_FLAGS_RESERVED 0xffffffe0 + +#define CRAT_TLB_RESERVED_LENGTH 4 + +struct crat_subtype_tlb { + uint8_t type; + uint8_t length; + uint16_t reserved; + uint32_t flags; + uint32_t processor_id_low; + uint8_t sibling_map[CRAT_SIBLINGMAP_SIZE]; + uint32_t tlb_level; + uint8_t data_tlb_associativity_2mb; + uint8_t data_tlb_size_2mb; + uint8_t instruction_tlb_associativity_2mb; + uint8_t instruction_tlb_size_2mb; + uint8_t data_tlb_associativity_4k; + uint8_t data_tlb_size_4k; + uint8_t instruction_tlb_associativity_4k; + uint8_t instruction_tlb_size_4k; + uint8_t data_tlb_associativity_1gb; + uint8_t data_tlb_size_1gb; + uint8_t instruction_tlb_associativity_1gb; + uint8_t instruction_tlb_size_1gb; + uint8_t reserved2[CRAT_TLB_RESERVED_LENGTH]; +}; + +/* + * HSA CCompute/APU Affinity structure and definitions + */ +#define CRAT_CCOMPUTE_FLAGS_ENABLED 0x00000001 +#define CRAT_CCOMPUTE_FLAGS_RESERVED 0xfffffffe + +#define CRAT_CCOMPUTE_RESERVED_LENGTH 16 + +struct crat_subtype_ccompute { + uint8_t type; + uint8_t length; + uint16_t reserved; + uint32_t flags; + uint32_t processor_id_low; + uint8_t sibling_map[CRAT_SIBLINGMAP_SIZE]; + uint32_t apu_size; + uint8_t reserved2[CRAT_CCOMPUTE_RESERVED_LENGTH]; +}; + +/* + * HSA IO Link Affinity structure and definitions + */ +#define CRAT_IOLINK_FLAGS_ENABLED 0x00000001 +#define CRAT_IOLINK_FLAGS_COHERENCY 0x00000002 +#define CRAT_IOLINK_FLAGS_RESERVED 0xfffffffc + +/* + * IO interface types + */ +#define CRAT_IOLINK_TYPE_UNDEFINED 0 +#define CRAT_IOLINK_TYPE_HYPERTRANSPORT 1 +#define CRAT_IOLINK_TYPE_PCIEXPRESS 2 +#define CRAT_IOLINK_TYPE_OTHER 3 +#define CRAT_IOLINK_TYPE_MAX 255 + +#define CRAT_IOLINK_RESERVED_LENGTH 24 + +struct crat_subtype_iolink { + uint8_t type; + uint8_t length; + uint16_t reserved; + uint32_t flags; + uint32_t proximity_domain_from; + uint32_t proximity_domain_to; + uint8_t io_interface_type; + uint8_t version_major; + uint16_t version_minor; + uint32_t minimum_latency; + uint32_t maximum_latency; + uint32_t minimum_bandwidth_mbs; + uint32_t maximum_bandwidth_mbs; + uint32_t recommended_transfer_size; + uint8_t reserved2[CRAT_IOLINK_RESERVED_LENGTH]; +}; + +/* + * HSA generic sub-type header + */ + +#define CRAT_SUBTYPE_FLAGS_ENABLED 0x00000001 + +struct crat_subtype_generic { + uint8_t type; + uint8_t length; + uint16_t reserved; + uint32_t flags; +}; + +/* + * Component Locality Distance Information Table (CDIT) + */ +#define CDIT_OEMID_LENGTH 6 +#define CDIT_OEMTABLEID_LENGTH 8 + +struct cdit_header { + uint32_t signature; + uint32_t length; + uint8_t revision; + uint8_t checksum; + uint8_t oem_id[CDIT_OEMID_LENGTH]; + uint8_t oem_table_id[CDIT_OEMTABLEID_LENGTH]; + uint32_t oem_revision; + uint32_t creator_id; + uint32_t creator_revision; + uint32_t total_entries; + uint16_t num_domains; + uint8_t entry[1]; +}; + +#pragma pack() + +#endif /* KFD_CRAT_H_INCLUDED */ diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_device.c new file mode 100644 index 000000000..ca7f2d3af --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -0,0 +1,522 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include "kfd_priv.h" +#include "kfd_device_queue_manager.h" +#include "kfd_pm4_headers.h" + +#define MQD_SIZE_ALIGNED 768 + +static const struct kfd_device_info kaveri_device_info = { + .asic_family = CHIP_KAVERI, + .max_pasid_bits = 16, + .ih_ring_entry_size = 4 * sizeof(uint32_t), + .mqd_size_aligned = MQD_SIZE_ALIGNED +}; + +static const struct kfd_device_info carrizo_device_info = { + .asic_family = CHIP_CARRIZO, + .max_pasid_bits = 16, + .ih_ring_entry_size = 4 * sizeof(uint32_t), + .num_of_watch_points = 4, + .mqd_size_aligned = MQD_SIZE_ALIGNED +}; + +struct kfd_deviceid { + unsigned short did; + const struct kfd_device_info *device_info; +}; + +/* Please keep this sorted by increasing device id. */ +static const struct kfd_deviceid supported_devices[] = { + { 0x1304, &kaveri_device_info }, /* Kaveri */ + { 0x1305, &kaveri_device_info }, /* Kaveri */ + { 0x1306, &kaveri_device_info }, /* Kaveri */ + { 0x1307, &kaveri_device_info }, /* Kaveri */ + { 0x1309, &kaveri_device_info }, /* Kaveri */ + { 0x130A, &kaveri_device_info }, /* Kaveri */ + { 0x130B, &kaveri_device_info }, /* Kaveri */ + { 0x130C, &kaveri_device_info }, /* Kaveri */ + { 0x130D, &kaveri_device_info }, /* Kaveri */ + { 0x130E, &kaveri_device_info }, /* Kaveri */ + { 0x130F, &kaveri_device_info }, /* Kaveri */ + { 0x1310, &kaveri_device_info }, /* Kaveri */ + { 0x1311, &kaveri_device_info }, /* Kaveri */ + { 0x1312, &kaveri_device_info }, /* Kaveri */ + { 0x1313, &kaveri_device_info }, /* Kaveri */ + { 0x1315, &kaveri_device_info }, /* Kaveri */ + { 0x1316, &kaveri_device_info }, /* Kaveri */ + { 0x1317, &kaveri_device_info }, /* Kaveri */ + { 0x1318, &kaveri_device_info }, /* Kaveri */ + { 0x131B, &kaveri_device_info }, /* Kaveri */ + { 0x131C, &kaveri_device_info }, /* Kaveri */ + { 0x131D, &kaveri_device_info } /* Kaveri */ +}; + +static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, + unsigned int chunk_size); +static void kfd_gtt_sa_fini(struct kfd_dev *kfd); + +static const struct kfd_device_info *lookup_device_info(unsigned short did) +{ + size_t i; + + for (i = 0; i < ARRAY_SIZE(supported_devices); i++) { + if (supported_devices[i].did == did) { + BUG_ON(supported_devices[i].device_info == NULL); + return supported_devices[i].device_info; + } + } + + return NULL; +} + +struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, + struct pci_dev *pdev, const struct kfd2kgd_calls *f2g) +{ + struct kfd_dev *kfd; + + const struct kfd_device_info *device_info = + lookup_device_info(pdev->device); + + if (!device_info) + return NULL; + + kfd = kzalloc(sizeof(*kfd), GFP_KERNEL); + if (!kfd) + return NULL; + + kfd->kgd = kgd; + kfd->device_info = device_info; + kfd->pdev = pdev; + kfd->init_complete = false; + kfd->kfd2kgd = f2g; + + mutex_init(&kfd->doorbell_mutex); + memset(&kfd->doorbell_available_index, 0, + sizeof(kfd->doorbell_available_index)); + + return kfd; +} + +static bool device_iommu_pasid_init(struct kfd_dev *kfd) +{ + const u32 required_iommu_flags = AMD_IOMMU_DEVICE_FLAG_ATS_SUP | + AMD_IOMMU_DEVICE_FLAG_PRI_SUP | + AMD_IOMMU_DEVICE_FLAG_PASID_SUP; + + struct amd_iommu_device_info iommu_info; + unsigned int pasid_limit; + int err; + + err = amd_iommu_device_info(kfd->pdev, &iommu_info); + if (err < 0) { + dev_err(kfd_device, + "error getting iommu info. is the iommu enabled?\n"); + return false; + } + + if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) { + dev_err(kfd_device, "error required iommu flags ats(%i), pri(%i), pasid(%i)\n", + (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0, + (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0, + (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) != 0); + return false; + } + + pasid_limit = min_t(unsigned int, + (unsigned int)1 << kfd->device_info->max_pasid_bits, + iommu_info.max_pasids); + /* + * last pasid is used for kernel queues doorbells + * in the future the last pasid might be used for a kernel thread. + */ + pasid_limit = min_t(unsigned int, + pasid_limit, + kfd->doorbell_process_limit - 1); + + err = amd_iommu_init_device(kfd->pdev, pasid_limit); + if (err < 0) { + dev_err(kfd_device, "error initializing iommu device\n"); + return false; + } + + if (!kfd_set_pasid_limit(pasid_limit)) { + dev_err(kfd_device, "error setting pasid limit\n"); + amd_iommu_free_device(kfd->pdev); + return false; + } + + return true; +} + +static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid) +{ + struct kfd_dev *dev = kfd_device_by_pci_dev(pdev); + + if (dev) + kfd_unbind_process_from_device(dev, pasid); +} + +bool kgd2kfd_device_init(struct kfd_dev *kfd, + const struct kgd2kfd_shared_resources *gpu_resources) +{ + unsigned int size; + + kfd->shared_resources = *gpu_resources; + + /* calculate max size of mqds needed for queues */ + size = max_num_of_queues_per_device * + kfd->device_info->mqd_size_aligned; + + /* + * calculate max size of runlist packet. + * There can be only 2 packets at once + */ + size += (KFD_MAX_NUM_OF_PROCESSES * sizeof(struct pm4_map_process) + + max_num_of_queues_per_device * + sizeof(struct pm4_map_queues) + sizeof(struct pm4_runlist)) * 2; + + /* Add size of HIQ & DIQ */ + size += KFD_KERNEL_QUEUE_SIZE * 2; + + /* add another 512KB for all other allocations on gart (HPD, fences) */ + size += 512 * 1024; + + if (kfd->kfd2kgd->init_gtt_mem_allocation( + kfd->kgd, size, &kfd->gtt_mem, + &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){ + dev_err(kfd_device, + "Could not allocate %d bytes for device (%x:%x)\n", + size, kfd->pdev->vendor, kfd->pdev->device); + goto out; + } + + dev_info(kfd_device, + "Allocated %d bytes on gart for device(%x:%x)\n", + size, kfd->pdev->vendor, kfd->pdev->device); + + /* Initialize GTT sa with 512 byte chunk size */ + if (kfd_gtt_sa_init(kfd, size, 512) != 0) { + dev_err(kfd_device, + "Error initializing gtt sub-allocator\n"); + goto kfd_gtt_sa_init_error; + } + + kfd_doorbell_init(kfd); + + if (kfd_topology_add_device(kfd) != 0) { + dev_err(kfd_device, + "Error adding device (%x:%x) to topology\n", + kfd->pdev->vendor, kfd->pdev->device); + goto kfd_topology_add_device_error; + } + + if (!device_iommu_pasid_init(kfd)) { + dev_err(kfd_device, + "Error initializing iommuv2 for device (%x:%x)\n", + kfd->pdev->vendor, kfd->pdev->device); + goto device_iommu_pasid_error; + } + amd_iommu_set_invalidate_ctx_cb(kfd->pdev, + iommu_pasid_shutdown_callback); + + kfd->dqm = device_queue_manager_init(kfd); + if (!kfd->dqm) { + dev_err(kfd_device, + "Error initializing queue manager for device (%x:%x)\n", + kfd->pdev->vendor, kfd->pdev->device); + goto device_queue_manager_error; + } + + if (kfd->dqm->ops.start(kfd->dqm) != 0) { + dev_err(kfd_device, + "Error starting queuen manager for device (%x:%x)\n", + kfd->pdev->vendor, kfd->pdev->device); + goto dqm_start_error; + } + + kfd->init_complete = true; + dev_info(kfd_device, "added device (%x:%x)\n", kfd->pdev->vendor, + kfd->pdev->device); + + pr_debug("kfd: Starting kfd with the following scheduling policy %d\n", + sched_policy); + + goto out; + +dqm_start_error: + device_queue_manager_uninit(kfd->dqm); +device_queue_manager_error: + amd_iommu_free_device(kfd->pdev); +device_iommu_pasid_error: + kfd_topology_remove_device(kfd); +kfd_topology_add_device_error: + kfd_gtt_sa_fini(kfd); +kfd_gtt_sa_init_error: + kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem); + dev_err(kfd_device, + "device (%x:%x) NOT added due to errors\n", + kfd->pdev->vendor, kfd->pdev->device); +out: + return kfd->init_complete; +} + +void kgd2kfd_device_exit(struct kfd_dev *kfd) +{ + if (kfd->init_complete) { + device_queue_manager_uninit(kfd->dqm); + amd_iommu_free_device(kfd->pdev); + kfd_topology_remove_device(kfd); + kfd_gtt_sa_fini(kfd); + kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem); + } + + kfree(kfd); +} + +void kgd2kfd_suspend(struct kfd_dev *kfd) +{ + BUG_ON(kfd == NULL); + + if (kfd->init_complete) { + kfd->dqm->ops.stop(kfd->dqm); + amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL); + amd_iommu_free_device(kfd->pdev); + } +} + +int kgd2kfd_resume(struct kfd_dev *kfd) +{ + unsigned int pasid_limit; + int err; + + BUG_ON(kfd == NULL); + + pasid_limit = kfd_get_pasid_limit(); + + if (kfd->init_complete) { + err = amd_iommu_init_device(kfd->pdev, pasid_limit); + if (err < 0) + return -ENXIO; + amd_iommu_set_invalidate_ctx_cb(kfd->pdev, + iommu_pasid_shutdown_callback); + kfd->dqm->ops.start(kfd->dqm); + } + + return 0; +} + +/* This is called directly from KGD at ISR. */ +void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) +{ + /* Process interrupts / schedule work as necessary */ +} + +static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, + unsigned int chunk_size) +{ + unsigned int num_of_bits; + + BUG_ON(!kfd); + BUG_ON(!kfd->gtt_mem); + BUG_ON(buf_size < chunk_size); + BUG_ON(buf_size == 0); + BUG_ON(chunk_size == 0); + + kfd->gtt_sa_chunk_size = chunk_size; + kfd->gtt_sa_num_of_chunks = buf_size / chunk_size; + + num_of_bits = kfd->gtt_sa_num_of_chunks / BITS_PER_BYTE; + BUG_ON(num_of_bits == 0); + + kfd->gtt_sa_bitmap = kzalloc(num_of_bits, GFP_KERNEL); + + if (!kfd->gtt_sa_bitmap) + return -ENOMEM; + + pr_debug("kfd: gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n", + kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap); + + mutex_init(&kfd->gtt_sa_lock); + + return 0; + +} + +static void kfd_gtt_sa_fini(struct kfd_dev *kfd) +{ + mutex_destroy(&kfd->gtt_sa_lock); + kfree(kfd->gtt_sa_bitmap); +} + +static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr, + unsigned int bit_num, + unsigned int chunk_size) +{ + return start_addr + bit_num * chunk_size; +} + +static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr, + unsigned int bit_num, + unsigned int chunk_size) +{ + return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size); +} + +int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, + struct kfd_mem_obj **mem_obj) +{ + unsigned int found, start_search, cur_size; + + BUG_ON(!kfd); + + if (size == 0) + return -EINVAL; + + if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size) + return -ENOMEM; + + *mem_obj = kmalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL); + if ((*mem_obj) == NULL) + return -ENOMEM; + + pr_debug("kfd: allocated mem_obj = %p for size = %d\n", *mem_obj, size); + + start_search = 0; + + mutex_lock(&kfd->gtt_sa_lock); + +kfd_gtt_restart_search: + /* Find the first chunk that is free */ + found = find_next_zero_bit(kfd->gtt_sa_bitmap, + kfd->gtt_sa_num_of_chunks, + start_search); + + pr_debug("kfd: found = %d\n", found); + + /* If there wasn't any free chunk, bail out */ + if (found == kfd->gtt_sa_num_of_chunks) + goto kfd_gtt_no_free_chunk; + + /* Update fields of mem_obj */ + (*mem_obj)->range_start = found; + (*mem_obj)->range_end = found; + (*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr( + kfd->gtt_start_gpu_addr, + found, + kfd->gtt_sa_chunk_size); + (*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr( + kfd->gtt_start_cpu_ptr, + found, + kfd->gtt_sa_chunk_size); + + pr_debug("kfd: gpu_addr = %p, cpu_addr = %p\n", + (uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr); + + /* If we need only one chunk, mark it as allocated and get out */ + if (size <= kfd->gtt_sa_chunk_size) { + pr_debug("kfd: single bit\n"); + set_bit(found, kfd->gtt_sa_bitmap); + goto kfd_gtt_out; + } + + /* Otherwise, try to see if we have enough contiguous chunks */ + cur_size = size - kfd->gtt_sa_chunk_size; + do { + (*mem_obj)->range_end = + find_next_zero_bit(kfd->gtt_sa_bitmap, + kfd->gtt_sa_num_of_chunks, ++found); + /* + * If next free chunk is not contiguous than we need to + * restart our search from the last free chunk we found (which + * wasn't contiguous to the previous ones + */ + if ((*mem_obj)->range_end != found) { + start_search = found; + goto kfd_gtt_restart_search; + } + + /* + * If we reached end of buffer, bail out with error + */ + if (found == kfd->gtt_sa_num_of_chunks) + goto kfd_gtt_no_free_chunk; + + /* Check if we don't need another chunk */ + if (cur_size <= kfd->gtt_sa_chunk_size) + cur_size = 0; + else + cur_size -= kfd->gtt_sa_chunk_size; + + } while (cur_size > 0); + + pr_debug("kfd: range_start = %d, range_end = %d\n", + (*mem_obj)->range_start, (*mem_obj)->range_end); + + /* Mark the chunks as allocated */ + for (found = (*mem_obj)->range_start; + found <= (*mem_obj)->range_end; + found++) + set_bit(found, kfd->gtt_sa_bitmap); + +kfd_gtt_out: + mutex_unlock(&kfd->gtt_sa_lock); + return 0; + +kfd_gtt_no_free_chunk: + pr_debug("kfd: allocation failed with mem_obj = %p\n", mem_obj); + mutex_unlock(&kfd->gtt_sa_lock); + kfree(mem_obj); + return -ENOMEM; +} + +int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj) +{ + unsigned int bit; + + BUG_ON(!kfd); + + /* Act like kfree when trying to free a NULL object */ + if (!mem_obj) + return 0; + + pr_debug("kfd: free mem_obj = %p, range_start = %d, range_end = %d\n", + mem_obj, mem_obj->range_start, mem_obj->range_end); + + mutex_lock(&kfd->gtt_sa_lock); + + /* Mark the chunks as free */ + for (bit = mem_obj->range_start; + bit <= mem_obj->range_end; + bit++) + clear_bit(bit, kfd->gtt_sa_bitmap); + + mutex_unlock(&kfd->gtt_sa_lock); + + kfree(mem_obj); + return 0; +} diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c new file mode 100644 index 000000000..596ee5cd3 --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -0,0 +1,1217 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include +#include +#include +#include +#include +#include "kfd_priv.h" +#include "kfd_device_queue_manager.h" +#include "kfd_mqd_manager.h" +#include "cik_regs.h" +#include "kfd_kernel_queue.h" + +/* Size of the per-pipe EOP queue */ +#define CIK_HPD_EOP_BYTES_LOG2 11 +#define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2) + +static int set_pasid_vmid_mapping(struct device_queue_manager *dqm, + unsigned int pasid, unsigned int vmid); + +static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, + struct queue *q, + struct qcm_process_device *qpd); + +static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock); +static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock); + +static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, + struct queue *q, + struct qcm_process_device *qpd); + +static void deallocate_sdma_queue(struct device_queue_manager *dqm, + unsigned int sdma_queue_id); + +static inline +enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type) +{ + if (type == KFD_QUEUE_TYPE_SDMA) + return KFD_MQD_TYPE_SDMA; + return KFD_MQD_TYPE_CP; +} + +unsigned int get_first_pipe(struct device_queue_manager *dqm) +{ + BUG_ON(!dqm || !dqm->dev); + return dqm->dev->shared_resources.first_compute_pipe; +} + +unsigned int get_pipes_num(struct device_queue_manager *dqm) +{ + BUG_ON(!dqm || !dqm->dev); + return dqm->dev->shared_resources.compute_pipe_count; +} + +static inline unsigned int get_pipes_num_cpsch(void) +{ + return PIPE_PER_ME_CP_SCHEDULING; +} + +void program_sh_mem_settings(struct device_queue_manager *dqm, + struct qcm_process_device *qpd) +{ + return dqm->dev->kfd2kgd->program_sh_mem_settings( + dqm->dev->kgd, qpd->vmid, + qpd->sh_mem_config, + qpd->sh_mem_ape1_base, + qpd->sh_mem_ape1_limit, + qpd->sh_mem_bases); +} + +static int allocate_vmid(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + struct queue *q) +{ + int bit, allocated_vmid; + + if (dqm->vmid_bitmap == 0) + return -ENOMEM; + + bit = find_first_bit((unsigned long *)&dqm->vmid_bitmap, CIK_VMID_NUM); + clear_bit(bit, (unsigned long *)&dqm->vmid_bitmap); + + /* Kaveri kfd vmid's starts from vmid 8 */ + allocated_vmid = bit + KFD_VMID_START_OFFSET; + pr_debug("kfd: vmid allocation %d\n", allocated_vmid); + qpd->vmid = allocated_vmid; + q->properties.vmid = allocated_vmid; + + set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid); + program_sh_mem_settings(dqm, qpd); + + return 0; +} + +static void deallocate_vmid(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + struct queue *q) +{ + int bit = qpd->vmid - KFD_VMID_START_OFFSET; + + /* Release the vmid mapping */ + set_pasid_vmid_mapping(dqm, 0, qpd->vmid); + + set_bit(bit, (unsigned long *)&dqm->vmid_bitmap); + qpd->vmid = 0; + q->properties.vmid = 0; +} + +static int create_queue_nocpsch(struct device_queue_manager *dqm, + struct queue *q, + struct qcm_process_device *qpd, + int *allocated_vmid) +{ + int retval; + + BUG_ON(!dqm || !q || !qpd || !allocated_vmid); + + pr_debug("kfd: In func %s\n", __func__); + print_queue(q); + + mutex_lock(&dqm->lock); + + if (dqm->total_queue_count >= max_num_of_queues_per_device) { + pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n", + dqm->total_queue_count); + mutex_unlock(&dqm->lock); + return -EPERM; + } + + if (list_empty(&qpd->queues_list)) { + retval = allocate_vmid(dqm, qpd, q); + if (retval != 0) { + mutex_unlock(&dqm->lock); + return retval; + } + } + *allocated_vmid = qpd->vmid; + q->properties.vmid = qpd->vmid; + + if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) + retval = create_compute_queue_nocpsch(dqm, q, qpd); + if (q->properties.type == KFD_QUEUE_TYPE_SDMA) + retval = create_sdma_queue_nocpsch(dqm, q, qpd); + + if (retval != 0) { + if (list_empty(&qpd->queues_list)) { + deallocate_vmid(dqm, qpd, q); + *allocated_vmid = 0; + } + mutex_unlock(&dqm->lock); + return retval; + } + + list_add(&q->list, &qpd->queues_list); + if (q->properties.is_active) + dqm->queue_count++; + + if (q->properties.type == KFD_QUEUE_TYPE_SDMA) + dqm->sdma_queue_count++; + + /* + * Unconditionally increment this counter, regardless of the queue's + * type or whether the queue is active. + */ + dqm->total_queue_count++; + pr_debug("Total of %d queues are accountable so far\n", + dqm->total_queue_count); + + mutex_unlock(&dqm->lock); + return 0; +} + +static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q) +{ + bool set; + int pipe, bit, i; + + set = false; + + for (pipe = dqm->next_pipe_to_allocate, i = 0; i < get_pipes_num(dqm); + pipe = ((pipe + 1) % get_pipes_num(dqm)), ++i) { + if (dqm->allocated_queues[pipe] != 0) { + bit = find_first_bit( + (unsigned long *)&dqm->allocated_queues[pipe], + QUEUES_PER_PIPE); + + clear_bit(bit, + (unsigned long *)&dqm->allocated_queues[pipe]); + q->pipe = pipe; + q->queue = bit; + set = true; + break; + } + } + + if (set == false) + return -EBUSY; + + pr_debug("kfd: DQM %s hqd slot - pipe (%d) queue(%d)\n", + __func__, q->pipe, q->queue); + /* horizontal hqd allocation */ + dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_num(dqm); + + return 0; +} + +static inline void deallocate_hqd(struct device_queue_manager *dqm, + struct queue *q) +{ + set_bit(q->queue, (unsigned long *)&dqm->allocated_queues[q->pipe]); +} + +static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, + struct queue *q, + struct qcm_process_device *qpd) +{ + int retval; + struct mqd_manager *mqd; + + BUG_ON(!dqm || !q || !qpd); + + mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); + if (mqd == NULL) + return -ENOMEM; + + retval = allocate_hqd(dqm, q); + if (retval != 0) + return retval; + + retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, + &q->gart_mqd_addr, &q->properties); + if (retval != 0) { + deallocate_hqd(dqm, q); + return retval; + } + + pr_debug("kfd: loading mqd to hqd on pipe (%d) queue (%d)\n", + q->pipe, + q->queue); + + retval = mqd->load_mqd(mqd, q->mqd, q->pipe, + q->queue, (uint32_t __user *) q->properties.write_ptr); + if (retval != 0) { + deallocate_hqd(dqm, q); + mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); + return retval; + } + + return 0; +} + +static int destroy_queue_nocpsch(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + struct queue *q) +{ + int retval; + struct mqd_manager *mqd; + + BUG_ON(!dqm || !q || !q->mqd || !qpd); + + retval = 0; + + pr_debug("kfd: In Func %s\n", __func__); + + mutex_lock(&dqm->lock); + + if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) { + mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); + if (mqd == NULL) { + retval = -ENOMEM; + goto out; + } + deallocate_hqd(dqm, q); + } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { + mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA); + if (mqd == NULL) { + retval = -ENOMEM; + goto out; + } + dqm->sdma_queue_count--; + deallocate_sdma_queue(dqm, q->sdma_id); + } else { + pr_debug("q->properties.type is invalid (%d)\n", + q->properties.type); + retval = -EINVAL; + goto out; + } + + retval = mqd->destroy_mqd(mqd, q->mqd, + KFD_PREEMPT_TYPE_WAVEFRONT_RESET, + QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS, + q->pipe, q->queue); + + if (retval != 0) + goto out; + + mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); + + list_del(&q->list); + if (list_empty(&qpd->queues_list)) + deallocate_vmid(dqm, qpd, q); + if (q->properties.is_active) + dqm->queue_count--; + + /* + * Unconditionally decrement this counter, regardless of the queue's + * type + */ + dqm->total_queue_count--; + pr_debug("Total of %d queues are accountable so far\n", + dqm->total_queue_count); + +out: + mutex_unlock(&dqm->lock); + return retval; +} + +static int update_queue(struct device_queue_manager *dqm, struct queue *q) +{ + int retval; + struct mqd_manager *mqd; + bool prev_active = false; + + BUG_ON(!dqm || !q || !q->mqd); + + mutex_lock(&dqm->lock); + mqd = dqm->ops.get_mqd_manager(dqm, + get_mqd_type_from_queue_type(q->properties.type)); + if (mqd == NULL) { + mutex_unlock(&dqm->lock); + return -ENOMEM; + } + + if (q->properties.is_active == true) + prev_active = true; + + /* + * + * check active state vs. the previous state + * and modify counter accordingly + */ + retval = mqd->update_mqd(mqd, q->mqd, &q->properties); + if ((q->properties.is_active == true) && (prev_active == false)) + dqm->queue_count++; + else if ((q->properties.is_active == false) && (prev_active == true)) + dqm->queue_count--; + + if (sched_policy != KFD_SCHED_POLICY_NO_HWS) + retval = execute_queues_cpsch(dqm, false); + + mutex_unlock(&dqm->lock); + return retval; +} + +static struct mqd_manager *get_mqd_manager_nocpsch( + struct device_queue_manager *dqm, enum KFD_MQD_TYPE type) +{ + struct mqd_manager *mqd; + + BUG_ON(!dqm || type >= KFD_MQD_TYPE_MAX); + + pr_debug("kfd: In func %s mqd type %d\n", __func__, type); + + mqd = dqm->mqds[type]; + if (!mqd) { + mqd = mqd_manager_init(type, dqm->dev); + if (mqd == NULL) + pr_err("kfd: mqd manager is NULL"); + dqm->mqds[type] = mqd; + } + + return mqd; +} + +static int register_process_nocpsch(struct device_queue_manager *dqm, + struct qcm_process_device *qpd) +{ + struct device_process_node *n; + int retval; + + BUG_ON(!dqm || !qpd); + + pr_debug("kfd: In func %s\n", __func__); + + n = kzalloc(sizeof(struct device_process_node), GFP_KERNEL); + if (!n) + return -ENOMEM; + + n->qpd = qpd; + + mutex_lock(&dqm->lock); + list_add(&n->list, &dqm->queues); + + retval = dqm->ops_asic_specific.register_process(dqm, qpd); + + dqm->processes_count++; + + mutex_unlock(&dqm->lock); + + return retval; +} + +static int unregister_process_nocpsch(struct device_queue_manager *dqm, + struct qcm_process_device *qpd) +{ + int retval; + struct device_process_node *cur, *next; + + BUG_ON(!dqm || !qpd); + + pr_debug("In func %s\n", __func__); + + pr_debug("qpd->queues_list is %s\n", + list_empty(&qpd->queues_list) ? "empty" : "not empty"); + + retval = 0; + mutex_lock(&dqm->lock); + + list_for_each_entry_safe(cur, next, &dqm->queues, list) { + if (qpd == cur->qpd) { + list_del(&cur->list); + kfree(cur); + dqm->processes_count--; + goto out; + } + } + /* qpd not found in dqm list */ + retval = 1; +out: + mutex_unlock(&dqm->lock); + return retval; +} + +static int +set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid, + unsigned int vmid) +{ + uint32_t pasid_mapping; + + pasid_mapping = (pasid == 0) ? 0 : + (uint32_t)pasid | + ATC_VMID_PASID_MAPPING_VALID; + + return dqm->dev->kfd2kgd->set_pasid_vmid_mapping( + dqm->dev->kgd, pasid_mapping, + vmid); +} + +int init_pipelines(struct device_queue_manager *dqm, + unsigned int pipes_num, unsigned int first_pipe) +{ + void *hpdptr; + struct mqd_manager *mqd; + unsigned int i, err, inx; + uint64_t pipe_hpd_addr; + + BUG_ON(!dqm || !dqm->dev); + + pr_debug("kfd: In func %s\n", __func__); + + /* + * Allocate memory for the HPDs. This is hardware-owned per-pipe data. + * The driver never accesses this memory after zeroing it. + * It doesn't even have to be saved/restored on suspend/resume + * because it contains no data when there are no active queues. + */ + + err = kfd_gtt_sa_allocate(dqm->dev, CIK_HPD_EOP_BYTES * pipes_num, + &dqm->pipeline_mem); + + if (err) { + pr_err("kfd: error allocate vidmem num pipes: %d\n", + pipes_num); + return -ENOMEM; + } + + hpdptr = dqm->pipeline_mem->cpu_ptr; + dqm->pipelines_addr = dqm->pipeline_mem->gpu_addr; + + memset(hpdptr, 0, CIK_HPD_EOP_BYTES * pipes_num); + + mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); + if (mqd == NULL) { + kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem); + return -ENOMEM; + } + + for (i = 0; i < pipes_num; i++) { + inx = i + first_pipe; + /* + * HPD buffer on GTT is allocated by amdkfd, no need to waste + * space in GTT for pipelines we don't initialize + */ + pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES; + pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr); + /* = log2(bytes/4)-1 */ + dqm->dev->kfd2kgd->init_pipeline(dqm->dev->kgd, inx, + CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr); + } + + return 0; +} + +static int init_scheduler(struct device_queue_manager *dqm) +{ + int retval; + + BUG_ON(!dqm); + + pr_debug("kfd: In %s\n", __func__); + + retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm)); + return retval; +} + +static int initialize_nocpsch(struct device_queue_manager *dqm) +{ + int i; + + BUG_ON(!dqm); + + pr_debug("kfd: In func %s num of pipes: %d\n", + __func__, get_pipes_num(dqm)); + + mutex_init(&dqm->lock); + INIT_LIST_HEAD(&dqm->queues); + dqm->queue_count = dqm->next_pipe_to_allocate = 0; + dqm->sdma_queue_count = 0; + dqm->allocated_queues = kcalloc(get_pipes_num(dqm), + sizeof(unsigned int), GFP_KERNEL); + if (!dqm->allocated_queues) { + mutex_destroy(&dqm->lock); + return -ENOMEM; + } + + for (i = 0; i < get_pipes_num(dqm); i++) + dqm->allocated_queues[i] = (1 << QUEUES_PER_PIPE) - 1; + + dqm->vmid_bitmap = (1 << VMID_PER_DEVICE) - 1; + dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1; + + init_scheduler(dqm); + return 0; +} + +static void uninitialize_nocpsch(struct device_queue_manager *dqm) +{ + int i; + + BUG_ON(!dqm); + + BUG_ON(dqm->queue_count > 0 || dqm->processes_count > 0); + + kfree(dqm->allocated_queues); + for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++) + kfree(dqm->mqds[i]); + mutex_destroy(&dqm->lock); + kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem); +} + +static int start_nocpsch(struct device_queue_manager *dqm) +{ + return 0; +} + +static int stop_nocpsch(struct device_queue_manager *dqm) +{ + return 0; +} + +static int allocate_sdma_queue(struct device_queue_manager *dqm, + unsigned int *sdma_queue_id) +{ + int bit; + + if (dqm->sdma_bitmap == 0) + return -ENOMEM; + + bit = find_first_bit((unsigned long *)&dqm->sdma_bitmap, + CIK_SDMA_QUEUES); + + clear_bit(bit, (unsigned long *)&dqm->sdma_bitmap); + *sdma_queue_id = bit; + + return 0; +} + +static void deallocate_sdma_queue(struct device_queue_manager *dqm, + unsigned int sdma_queue_id) +{ + if (sdma_queue_id >= CIK_SDMA_QUEUES) + return; + set_bit(sdma_queue_id, (unsigned long *)&dqm->sdma_bitmap); +} + +static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, + struct qcm_process_device *qpd) +{ + uint32_t value = SDMA_ATC; + + if (q->process->is_32bit_user_mode) + value |= SDMA_VA_PTR32 | get_sh_mem_bases_32(qpd_to_pdd(qpd)); + else + value |= SDMA_VA_SHARED_BASE(get_sh_mem_bases_nybble_64( + qpd_to_pdd(qpd))); + q->properties.sdma_vm_addr = value; +} + +static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, + struct queue *q, + struct qcm_process_device *qpd) +{ + struct mqd_manager *mqd; + int retval; + + mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA); + if (!mqd) + return -ENOMEM; + + retval = allocate_sdma_queue(dqm, &q->sdma_id); + if (retval != 0) + return retval; + + q->properties.sdma_queue_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE; + q->properties.sdma_engine_id = q->sdma_id / CIK_SDMA_ENGINE_NUM; + + pr_debug("kfd: sdma id is: %d\n", q->sdma_id); + pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id); + pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id); + + init_sdma_vm(dqm, q, qpd); + retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, + &q->gart_mqd_addr, &q->properties); + if (retval != 0) { + deallocate_sdma_queue(dqm, q->sdma_id); + return retval; + } + + retval = mqd->load_mqd(mqd, q->mqd, 0, + 0, NULL); + if (retval != 0) { + deallocate_sdma_queue(dqm, q->sdma_id); + mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); + return retval; + } + + return 0; +} + +/* + * Device Queue Manager implementation for cp scheduler + */ + +static int set_sched_resources(struct device_queue_manager *dqm) +{ + struct scheduling_resources res; + unsigned int queue_num, queue_mask; + + BUG_ON(!dqm); + + pr_debug("kfd: In func %s\n", __func__); + + queue_num = get_pipes_num_cpsch() * QUEUES_PER_PIPE; + queue_mask = (1 << queue_num) - 1; + res.vmid_mask = (1 << VMID_PER_DEVICE) - 1; + res.vmid_mask <<= KFD_VMID_START_OFFSET; + res.queue_mask = queue_mask << (get_first_pipe(dqm) * QUEUES_PER_PIPE); + res.gws_mask = res.oac_mask = res.gds_heap_base = + res.gds_heap_size = 0; + + pr_debug("kfd: scheduling resources:\n" + " vmid mask: 0x%8X\n" + " queue mask: 0x%8llX\n", + res.vmid_mask, res.queue_mask); + + return pm_send_set_resources(&dqm->packets, &res); +} + +static int initialize_cpsch(struct device_queue_manager *dqm) +{ + int retval; + + BUG_ON(!dqm); + + pr_debug("kfd: In func %s num of pipes: %d\n", + __func__, get_pipes_num_cpsch()); + + mutex_init(&dqm->lock); + INIT_LIST_HEAD(&dqm->queues); + dqm->queue_count = dqm->processes_count = 0; + dqm->sdma_queue_count = 0; + dqm->active_runlist = false; + retval = dqm->ops_asic_specific.initialize(dqm); + if (retval != 0) + goto fail_init_pipelines; + + return 0; + +fail_init_pipelines: + mutex_destroy(&dqm->lock); + return retval; +} + +static int start_cpsch(struct device_queue_manager *dqm) +{ + struct device_process_node *node; + int retval; + + BUG_ON(!dqm); + + retval = 0; + + retval = pm_init(&dqm->packets, dqm); + if (retval != 0) + goto fail_packet_manager_init; + + retval = set_sched_resources(dqm); + if (retval != 0) + goto fail_set_sched_resources; + + pr_debug("kfd: allocating fence memory\n"); + + /* allocate fence memory on the gart */ + retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr), + &dqm->fence_mem); + + if (retval != 0) + goto fail_allocate_vidmem; + + dqm->fence_addr = dqm->fence_mem->cpu_ptr; + dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr; + list_for_each_entry(node, &dqm->queues, list) + if (node->qpd->pqm->process && dqm->dev) + kfd_bind_process_to_device(dqm->dev, + node->qpd->pqm->process); + + execute_queues_cpsch(dqm, true); + + return 0; +fail_allocate_vidmem: +fail_set_sched_resources: + pm_uninit(&dqm->packets); +fail_packet_manager_init: + return retval; +} + +static int stop_cpsch(struct device_queue_manager *dqm) +{ + struct device_process_node *node; + struct kfd_process_device *pdd; + + BUG_ON(!dqm); + + destroy_queues_cpsch(dqm, true); + + list_for_each_entry(node, &dqm->queues, list) { + pdd = qpd_to_pdd(node->qpd); + pdd->bound = false; + } + kfd_gtt_sa_free(dqm->dev, dqm->fence_mem); + pm_uninit(&dqm->packets); + + return 0; +} + +static int create_kernel_queue_cpsch(struct device_queue_manager *dqm, + struct kernel_queue *kq, + struct qcm_process_device *qpd) +{ + BUG_ON(!dqm || !kq || !qpd); + + pr_debug("kfd: In func %s\n", __func__); + + mutex_lock(&dqm->lock); + if (dqm->total_queue_count >= max_num_of_queues_per_device) { + pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n", + dqm->total_queue_count); + mutex_unlock(&dqm->lock); + return -EPERM; + } + + /* + * Unconditionally increment this counter, regardless of the queue's + * type or whether the queue is active. + */ + dqm->total_queue_count++; + pr_debug("Total of %d queues are accountable so far\n", + dqm->total_queue_count); + + list_add(&kq->list, &qpd->priv_queue_list); + dqm->queue_count++; + qpd->is_debug = true; + execute_queues_cpsch(dqm, false); + mutex_unlock(&dqm->lock); + + return 0; +} + +static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, + struct kernel_queue *kq, + struct qcm_process_device *qpd) +{ + BUG_ON(!dqm || !kq); + + pr_debug("kfd: In %s\n", __func__); + + mutex_lock(&dqm->lock); + destroy_queues_cpsch(dqm, false); + list_del(&kq->list); + dqm->queue_count--; + qpd->is_debug = false; + execute_queues_cpsch(dqm, false); + /* + * Unconditionally decrement this counter, regardless of the queue's + * type. + */ + dqm->total_queue_count--; + pr_debug("Total of %d queues are accountable so far\n", + dqm->total_queue_count); + mutex_unlock(&dqm->lock); +} + +static void select_sdma_engine_id(struct queue *q) +{ + static int sdma_id; + + q->sdma_id = sdma_id; + sdma_id = (sdma_id + 1) % 2; +} + +static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, + struct qcm_process_device *qpd, int *allocate_vmid) +{ + int retval; + struct mqd_manager *mqd; + + BUG_ON(!dqm || !q || !qpd); + + retval = 0; + + if (allocate_vmid) + *allocate_vmid = 0; + + mutex_lock(&dqm->lock); + + if (dqm->total_queue_count >= max_num_of_queues_per_device) { + pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n", + dqm->total_queue_count); + retval = -EPERM; + goto out; + } + + if (q->properties.type == KFD_QUEUE_TYPE_SDMA) + select_sdma_engine_id(q); + + mqd = dqm->ops.get_mqd_manager(dqm, + get_mqd_type_from_queue_type(q->properties.type)); + + if (mqd == NULL) { + mutex_unlock(&dqm->lock); + return -ENOMEM; + } + + init_sdma_vm(dqm, q, qpd); + + retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, + &q->gart_mqd_addr, &q->properties); + if (retval != 0) + goto out; + + list_add(&q->list, &qpd->queues_list); + if (q->properties.is_active) { + dqm->queue_count++; + retval = execute_queues_cpsch(dqm, false); + } + + if (q->properties.type == KFD_QUEUE_TYPE_SDMA) + dqm->sdma_queue_count++; + /* + * Unconditionally increment this counter, regardless of the queue's + * type or whether the queue is active. + */ + dqm->total_queue_count++; + + pr_debug("Total of %d queues are accountable so far\n", + dqm->total_queue_count); + +out: + mutex_unlock(&dqm->lock); + return retval; +} + +static int amdkfd_fence_wait_timeout(unsigned int *fence_addr, + unsigned int fence_value, + unsigned long timeout) +{ + BUG_ON(!fence_addr); + timeout += jiffies; + + while (*fence_addr != fence_value) { + if (time_after(jiffies, timeout)) { + pr_err("kfd: qcm fence wait loop timeout expired\n"); + return -ETIME; + } + schedule(); + } + + return 0; +} + +static int destroy_sdma_queues(struct device_queue_manager *dqm, + unsigned int sdma_engine) +{ + return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA, + KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES, 0, false, + sdma_engine); +} + +static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock) +{ + int retval; + + BUG_ON(!dqm); + + retval = 0; + + if (lock) + mutex_lock(&dqm->lock); + if (dqm->active_runlist == false) + goto out; + + pr_debug("kfd: Before destroying queues, sdma queue count is : %u\n", + dqm->sdma_queue_count); + + if (dqm->sdma_queue_count > 0) { + destroy_sdma_queues(dqm, 0); + destroy_sdma_queues(dqm, 1); + } + + retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE, + KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES, 0, false, 0); + if (retval != 0) + goto out; + + *dqm->fence_addr = KFD_FENCE_INIT; + pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr, + KFD_FENCE_COMPLETED); + /* should be timed out */ + amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED, + QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS); + pm_release_ib(&dqm->packets); + dqm->active_runlist = false; + +out: + if (lock) + mutex_unlock(&dqm->lock); + return retval; +} + +static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock) +{ + int retval; + + BUG_ON(!dqm); + + if (lock) + mutex_lock(&dqm->lock); + + retval = destroy_queues_cpsch(dqm, false); + if (retval != 0) { + pr_err("kfd: the cp might be in an unrecoverable state due to an unsuccessful queues preemption"); + goto out; + } + + if (dqm->queue_count <= 0 || dqm->processes_count <= 0) { + retval = 0; + goto out; + } + + if (dqm->active_runlist) { + retval = 0; + goto out; + } + + retval = pm_send_runlist(&dqm->packets, &dqm->queues); + if (retval != 0) { + pr_err("kfd: failed to execute runlist"); + goto out; + } + dqm->active_runlist = true; + +out: + if (lock) + mutex_unlock(&dqm->lock); + return retval; +} + +static int destroy_queue_cpsch(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + struct queue *q) +{ + int retval; + struct mqd_manager *mqd; + + BUG_ON(!dqm || !qpd || !q); + + retval = 0; + + /* remove queue from list to prevent rescheduling after preemption */ + mutex_lock(&dqm->lock); + mqd = dqm->ops.get_mqd_manager(dqm, + get_mqd_type_from_queue_type(q->properties.type)); + if (!mqd) { + retval = -ENOMEM; + goto failed; + } + + if (q->properties.type == KFD_QUEUE_TYPE_SDMA) + dqm->sdma_queue_count--; + + list_del(&q->list); + if (q->properties.is_active) + dqm->queue_count--; + + execute_queues_cpsch(dqm, false); + + mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); + + /* + * Unconditionally decrement this counter, regardless of the queue's + * type + */ + dqm->total_queue_count--; + pr_debug("Total of %d queues are accountable so far\n", + dqm->total_queue_count); + + mutex_unlock(&dqm->lock); + + return 0; + +failed: + mutex_unlock(&dqm->lock); + return retval; +} + +/* + * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to + * stay in user mode. + */ +#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL +/* APE1 limit is inclusive and 64K aligned. */ +#define APE1_LIMIT_ALIGNMENT 0xFFFF + +static bool set_cache_memory_policy(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + enum cache_policy default_policy, + enum cache_policy alternate_policy, + void __user *alternate_aperture_base, + uint64_t alternate_aperture_size) +{ + bool retval; + + pr_debug("kfd: In func %s\n", __func__); + + mutex_lock(&dqm->lock); + + if (alternate_aperture_size == 0) { + /* base > limit disables APE1 */ + qpd->sh_mem_ape1_base = 1; + qpd->sh_mem_ape1_limit = 0; + } else { + /* + * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]}, + * SH_MEM_APE1_BASE[31:0], 0x0000 } + * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]}, + * SH_MEM_APE1_LIMIT[31:0], 0xFFFF } + * Verify that the base and size parameters can be + * represented in this format and convert them. + * Additionally restrict APE1 to user-mode addresses. + */ + + uint64_t base = (uintptr_t)alternate_aperture_base; + uint64_t limit = base + alternate_aperture_size - 1; + + if (limit <= base) + goto out; + + if ((base & APE1_FIXED_BITS_MASK) != 0) + goto out; + + if ((limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) + goto out; + + qpd->sh_mem_ape1_base = base >> 16; + qpd->sh_mem_ape1_limit = limit >> 16; + } + + retval = dqm->ops_asic_specific.set_cache_memory_policy( + dqm, + qpd, + default_policy, + alternate_policy, + alternate_aperture_base, + alternate_aperture_size); + + if ((sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0)) + program_sh_mem_settings(dqm, qpd); + + pr_debug("kfd: sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n", + qpd->sh_mem_config, qpd->sh_mem_ape1_base, + qpd->sh_mem_ape1_limit); + + mutex_unlock(&dqm->lock); + return retval; + +out: + mutex_unlock(&dqm->lock); + return false; +} + +struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) +{ + struct device_queue_manager *dqm; + + BUG_ON(!dev); + + pr_debug("kfd: loading device queue manager\n"); + + dqm = kzalloc(sizeof(struct device_queue_manager), GFP_KERNEL); + if (!dqm) + return NULL; + + dqm->dev = dev; + switch (sched_policy) { + case KFD_SCHED_POLICY_HWS: + case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: + /* initialize dqm for cp scheduling */ + dqm->ops.create_queue = create_queue_cpsch; + dqm->ops.initialize = initialize_cpsch; + dqm->ops.start = start_cpsch; + dqm->ops.stop = stop_cpsch; + dqm->ops.destroy_queue = destroy_queue_cpsch; + dqm->ops.update_queue = update_queue; + dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch; + dqm->ops.register_process = register_process_nocpsch; + dqm->ops.unregister_process = unregister_process_nocpsch; + dqm->ops.uninitialize = uninitialize_nocpsch; + dqm->ops.create_kernel_queue = create_kernel_queue_cpsch; + dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch; + dqm->ops.set_cache_memory_policy = set_cache_memory_policy; + break; + case KFD_SCHED_POLICY_NO_HWS: + /* initialize dqm for no cp scheduling */ + dqm->ops.start = start_nocpsch; + dqm->ops.stop = stop_nocpsch; + dqm->ops.create_queue = create_queue_nocpsch; + dqm->ops.destroy_queue = destroy_queue_nocpsch; + dqm->ops.update_queue = update_queue; + dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch; + dqm->ops.register_process = register_process_nocpsch; + dqm->ops.unregister_process = unregister_process_nocpsch; + dqm->ops.initialize = initialize_nocpsch; + dqm->ops.uninitialize = uninitialize_nocpsch; + dqm->ops.set_cache_memory_policy = set_cache_memory_policy; + break; + default: + BUG(); + break; + } + + switch (dev->device_info->asic_family) { + case CHIP_CARRIZO: + device_queue_manager_init_vi(&dqm->ops_asic_specific); + break; + + case CHIP_KAVERI: + device_queue_manager_init_cik(&dqm->ops_asic_specific); + break; + } + + if (dqm->ops.initialize(dqm) != 0) { + kfree(dqm); + return NULL; + } + + return dqm; +} + +void device_queue_manager_uninit(struct device_queue_manager *dqm) +{ + BUG_ON(!dqm); + + dqm->ops.uninitialize(dqm); + kfree(dqm); +} diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h new file mode 100644 index 000000000..488f51d19 --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -0,0 +1,180 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef KFD_DEVICE_QUEUE_MANAGER_H_ +#define KFD_DEVICE_QUEUE_MANAGER_H_ + +#include +#include +#include "kfd_priv.h" +#include "kfd_mqd_manager.h" + +#define QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS (500) +#define QUEUES_PER_PIPE (8) +#define PIPE_PER_ME_CP_SCHEDULING (3) +#define CIK_VMID_NUM (8) +#define KFD_VMID_START_OFFSET (8) +#define VMID_PER_DEVICE CIK_VMID_NUM +#define KFD_DQM_FIRST_PIPE (0) +#define CIK_SDMA_QUEUES (4) +#define CIK_SDMA_QUEUES_PER_ENGINE (2) +#define CIK_SDMA_ENGINE_NUM (2) + +struct device_process_node { + struct qcm_process_device *qpd; + struct list_head list; +}; + +/** + * struct device_queue_manager_ops + * + * @create_queue: Queue creation routine. + * + * @destroy_queue: Queue destruction routine. + * + * @update_queue: Queue update routine. + * + * @get_mqd_manager: Returns the mqd manager according to the mqd type. + * + * @exeute_queues: Dispatches the queues list to the H/W. + * + * @register_process: This routine associates a specific process with device. + * + * @unregister_process: destroys the associations between process to device. + * + * @initialize: Initializes the pipelines and memory module for that device. + * + * @start: Initializes the resources/modules the the device needs for queues + * execution. This function is called on device initialization and after the + * system woke up after suspension. + * + * @stop: This routine stops execution of all the active queue running on the + * H/W and basically this function called on system suspend. + * + * @uninitialize: Destroys all the device queue manager resources allocated in + * initialize routine. + * + * @create_kernel_queue: Creates kernel queue. Used for debug queue. + * + * @destroy_kernel_queue: Destroys kernel queue. Used for debug queue. + * + * @set_cache_memory_policy: Sets memory policy (cached/ non cached) for the + * memory apertures. + * + */ + +struct device_queue_manager_ops { + int (*create_queue)(struct device_queue_manager *dqm, + struct queue *q, + struct qcm_process_device *qpd, + int *allocate_vmid); + int (*destroy_queue)(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + struct queue *q); + int (*update_queue)(struct device_queue_manager *dqm, + struct queue *q); + + struct mqd_manager * (*get_mqd_manager) + (struct device_queue_manager *dqm, + enum KFD_MQD_TYPE type); + + int (*register_process)(struct device_queue_manager *dqm, + struct qcm_process_device *qpd); + int (*unregister_process)(struct device_queue_manager *dqm, + struct qcm_process_device *qpd); + int (*initialize)(struct device_queue_manager *dqm); + int (*start)(struct device_queue_manager *dqm); + int (*stop)(struct device_queue_manager *dqm); + void (*uninitialize)(struct device_queue_manager *dqm); + int (*create_kernel_queue)(struct device_queue_manager *dqm, + struct kernel_queue *kq, + struct qcm_process_device *qpd); + void (*destroy_kernel_queue)(struct device_queue_manager *dqm, + struct kernel_queue *kq, + struct qcm_process_device *qpd); + bool (*set_cache_memory_policy)(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + enum cache_policy default_policy, + enum cache_policy alternate_policy, + void __user *alternate_aperture_base, + uint64_t alternate_aperture_size); +}; + +/** + * struct device_queue_manager + * + * This struct is a base class for the kfd queues scheduler in the + * device level. The device base class should expose the basic operations + * for queue creation and queue destruction. This base class hides the + * scheduling mode of the driver and the specific implementation of the + * concrete device. This class is the only class in the queues scheduler + * that configures the H/W. + * + */ + +struct device_queue_manager { + struct device_queue_manager_ops ops; + struct device_queue_manager_ops ops_asic_specific; + + struct mqd_manager *mqds[KFD_MQD_TYPE_MAX]; + struct packet_manager packets; + struct kfd_dev *dev; + struct mutex lock; + struct list_head queues; + unsigned int processes_count; + unsigned int queue_count; + unsigned int sdma_queue_count; + unsigned int total_queue_count; + unsigned int next_pipe_to_allocate; + unsigned int *allocated_queues; + unsigned int sdma_bitmap; + unsigned int vmid_bitmap; + uint64_t pipelines_addr; + struct kfd_mem_obj *pipeline_mem; + uint64_t fence_gpu_addr; + unsigned int *fence_addr; + struct kfd_mem_obj *fence_mem; + bool active_runlist; +}; + +void device_queue_manager_init_cik(struct device_queue_manager_ops *ops); +void device_queue_manager_init_vi(struct device_queue_manager_ops *ops); +void program_sh_mem_settings(struct device_queue_manager *dqm, + struct qcm_process_device *qpd); +int init_pipelines(struct device_queue_manager *dqm, + unsigned int pipes_num, unsigned int first_pipe); +unsigned int get_first_pipe(struct device_queue_manager *dqm); +unsigned int get_pipes_num(struct device_queue_manager *dqm); + +extern inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) +{ + return (pdd->lds_base >> 16) & 0xFF; +} + +extern inline unsigned int +get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd) +{ + return (pdd->lds_base >> 60) & 0x0E; +} + +#endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */ diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c new file mode 100644 index 000000000..5469efe05 --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c @@ -0,0 +1,135 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "kfd_device_queue_manager.h" +#include "cik_regs.h" + +static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + enum cache_policy default_policy, + enum cache_policy alternate_policy, + void __user *alternate_aperture_base, + uint64_t alternate_aperture_size); +static int register_process_cik(struct device_queue_manager *dqm, + struct qcm_process_device *qpd); +static int initialize_cpsch_cik(struct device_queue_manager *dqm); + +void device_queue_manager_init_cik(struct device_queue_manager_ops *ops) +{ + ops->set_cache_memory_policy = set_cache_memory_policy_cik; + ops->register_process = register_process_cik; + ops->initialize = initialize_cpsch_cik; +} + +static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble) +{ + /* In 64-bit mode, we can only control the top 3 bits of the LDS, + * scratch and GPUVM apertures. + * The hardware fills in the remaining 59 bits according to the + * following pattern: + * LDS: X0000000'00000000 - X0000001'00000000 (4GB) + * Scratch: X0000001'00000000 - X0000002'00000000 (4GB) + * GPUVM: Y0010000'00000000 - Y0020000'00000000 (1TB) + * + * (where X/Y is the configurable nybble with the low-bit 0) + * + * LDS and scratch will have the same top nybble programmed in the + * top 3 bits of SH_MEM_BASES.PRIVATE_BASE. + * GPUVM can have a different top nybble programmed in the + * top 3 bits of SH_MEM_BASES.SHARED_BASE. + * We don't bother to support different top nybbles + * for LDS/Scratch and GPUVM. + */ + + BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE || + top_address_nybble == 0); + + return PRIVATE_BASE(top_address_nybble << 12) | + SHARED_BASE(top_address_nybble << 12); +} + +static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + enum cache_policy default_policy, + enum cache_policy alternate_policy, + void __user *alternate_aperture_base, + uint64_t alternate_aperture_size) +{ + uint32_t default_mtype; + uint32_t ape1_mtype; + + default_mtype = (default_policy == cache_policy_coherent) ? + MTYPE_NONCACHED : + MTYPE_CACHED; + + ape1_mtype = (alternate_policy == cache_policy_coherent) ? + MTYPE_NONCACHED : + MTYPE_CACHED; + + qpd->sh_mem_config = (qpd->sh_mem_config & PTR32) + | ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) + | DEFAULT_MTYPE(default_mtype) + | APE1_MTYPE(ape1_mtype); + + return true; +} + +static int register_process_cik(struct device_queue_manager *dqm, + struct qcm_process_device *qpd) +{ + struct kfd_process_device *pdd; + unsigned int temp; + + BUG_ON(!dqm || !qpd); + + pdd = qpd_to_pdd(qpd); + + /* check if sh_mem_config register already configured */ + if (qpd->sh_mem_config == 0) { + qpd->sh_mem_config = + ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) | + DEFAULT_MTYPE(MTYPE_NONCACHED) | + APE1_MTYPE(MTYPE_NONCACHED); + qpd->sh_mem_ape1_limit = 0; + qpd->sh_mem_ape1_base = 0; + } + + if (qpd->pqm->process->is_32bit_user_mode) { + temp = get_sh_mem_bases_32(pdd); + qpd->sh_mem_bases = SHARED_BASE(temp); + qpd->sh_mem_config |= PTR32; + } else { + temp = get_sh_mem_bases_nybble_64(pdd); + qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp); + } + + pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n", + qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases); + + return 0; +} + +static int initialize_cpsch_cik(struct device_queue_manager *dqm) +{ + return init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm)); +} diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c new file mode 100644 index 000000000..20553dcd2 --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c @@ -0,0 +1,64 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "kfd_device_queue_manager.h" + +static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + enum cache_policy default_policy, + enum cache_policy alternate_policy, + void __user *alternate_aperture_base, + uint64_t alternate_aperture_size); +static int register_process_vi(struct device_queue_manager *dqm, + struct qcm_process_device *qpd); +static int initialize_cpsch_vi(struct device_queue_manager *dqm); + +void device_queue_manager_init_vi(struct device_queue_manager_ops *ops) +{ + pr_warn("amdkfd: VI DQM is not currently supported\n"); + + ops->set_cache_memory_policy = set_cache_memory_policy_vi; + ops->register_process = register_process_vi; + ops->initialize = initialize_cpsch_vi; +} + +static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + enum cache_policy default_policy, + enum cache_policy alternate_policy, + void __user *alternate_aperture_base, + uint64_t alternate_aperture_size) +{ + return false; +} + +static int register_process_vi(struct device_queue_manager *dqm, + struct qcm_process_device *qpd) +{ + return -1; +} + +static int initialize_cpsch_vi(struct device_queue_manager *dqm) +{ + return 0; +} diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c new file mode 100644 index 000000000..17e56dcc8 --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c @@ -0,0 +1,249 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "kfd_priv.h" +#include +#include +#include +#include + +/* + * This extension supports a kernel level doorbells management for + * the kernel queues. + * Basically the last doorbells page is devoted to kernel queues + * and that's assures that any user process won't get access to the + * kernel doorbells page + */ + +#define KERNEL_DOORBELL_PASID 1 +#define KFD_SIZE_OF_DOORBELL_IN_BYTES 4 + +/* + * Each device exposes a doorbell aperture, a PCI MMIO aperture that + * receives 32-bit writes that are passed to queues as wptr values. + * The doorbells are intended to be written by applications as part + * of queueing work on user-mode queues. + * We assign doorbells to applications in PAGE_SIZE-sized and aligned chunks. + * We map the doorbell address space into user-mode when a process creates + * its first queue on each device. + * Although the mapping is done by KFD, it is equivalent to an mmap of + * the /dev/kfd with the particular device encoded in the mmap offset. + * There will be other uses for mmap of /dev/kfd, so only a range of + * offsets (KFD_MMAP_DOORBELL_START-END) is used for doorbells. + */ + +/* # of doorbell bytes allocated for each process. */ +static inline size_t doorbell_process_allocation(void) +{ + return roundup(KFD_SIZE_OF_DOORBELL_IN_BYTES * + KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, + PAGE_SIZE); +} + +/* Doorbell calculations for device init. */ +void kfd_doorbell_init(struct kfd_dev *kfd) +{ + size_t doorbell_start_offset; + size_t doorbell_aperture_size; + size_t doorbell_process_limit; + + /* + * We start with calculations in bytes because the input data might + * only be byte-aligned. + * Only after we have done the rounding can we assume any alignment. + */ + + doorbell_start_offset = + roundup(kfd->shared_resources.doorbell_start_offset, + doorbell_process_allocation()); + + doorbell_aperture_size = + rounddown(kfd->shared_resources.doorbell_aperture_size, + doorbell_process_allocation()); + + if (doorbell_aperture_size > doorbell_start_offset) + doorbell_process_limit = + (doorbell_aperture_size - doorbell_start_offset) / + doorbell_process_allocation(); + else + doorbell_process_limit = 0; + + kfd->doorbell_base = kfd->shared_resources.doorbell_physical_address + + doorbell_start_offset; + + kfd->doorbell_id_offset = doorbell_start_offset / sizeof(u32); + kfd->doorbell_process_limit = doorbell_process_limit - 1; + + kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base, + doorbell_process_allocation()); + + BUG_ON(!kfd->doorbell_kernel_ptr); + + pr_debug("kfd: doorbell initialization:\n"); + pr_debug("kfd: doorbell base == 0x%08lX\n", + (uintptr_t)kfd->doorbell_base); + + pr_debug("kfd: doorbell_id_offset == 0x%08lX\n", + kfd->doorbell_id_offset); + + pr_debug("kfd: doorbell_process_limit == 0x%08lX\n", + doorbell_process_limit); + + pr_debug("kfd: doorbell_kernel_offset == 0x%08lX\n", + (uintptr_t)kfd->doorbell_base); + + pr_debug("kfd: doorbell aperture size == 0x%08lX\n", + kfd->shared_resources.doorbell_aperture_size); + + pr_debug("kfd: doorbell kernel address == 0x%08lX\n", + (uintptr_t)kfd->doorbell_kernel_ptr); +} + +int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma) +{ + phys_addr_t address; + struct kfd_dev *dev; + + /* + * For simplicitly we only allow mapping of the entire doorbell + * allocation of a single device & process. + */ + if (vma->vm_end - vma->vm_start != doorbell_process_allocation()) + return -EINVAL; + + /* Find kfd device according to gpu id */ + dev = kfd_device_by_id(vma->vm_pgoff); + if (dev == NULL) + return -EINVAL; + + /* Calculate physical address of doorbell */ + address = kfd_get_process_doorbells(dev, process); + + vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE | + VM_DONTDUMP | VM_PFNMAP; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + pr_debug("kfd: mapping doorbell page in kfd_doorbell_mmap\n" + " target user address == 0x%08llX\n" + " physical address == 0x%08llX\n" + " vm_flags == 0x%04lX\n" + " size == 0x%04lX\n", + (unsigned long long) vma->vm_start, address, vma->vm_flags, + doorbell_process_allocation()); + + + return io_remap_pfn_range(vma, + vma->vm_start, + address >> PAGE_SHIFT, + doorbell_process_allocation(), + vma->vm_page_prot); +} + + +/* get kernel iomem pointer for a doorbell */ +u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, + unsigned int *doorbell_off) +{ + u32 inx; + + BUG_ON(!kfd || !doorbell_off); + + mutex_lock(&kfd->doorbell_mutex); + inx = find_first_zero_bit(kfd->doorbell_available_index, + KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); + + __set_bit(inx, kfd->doorbell_available_index); + mutex_unlock(&kfd->doorbell_mutex); + + if (inx >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) + return NULL; + + /* + * Calculating the kernel doorbell offset using "faked" kernel + * pasid that allocated for kernel queues only + */ + *doorbell_off = KERNEL_DOORBELL_PASID * (doorbell_process_allocation() / + sizeof(u32)) + inx; + + pr_debug("kfd: get kernel queue doorbell\n" + " doorbell offset == 0x%08d\n" + " kernel address == 0x%08lX\n", + *doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx)); + + return kfd->doorbell_kernel_ptr + inx; +} + +void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr) +{ + unsigned int inx; + + BUG_ON(!kfd || !db_addr); + + inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr); + + mutex_lock(&kfd->doorbell_mutex); + __clear_bit(inx, kfd->doorbell_available_index); + mutex_unlock(&kfd->doorbell_mutex); +} + +inline void write_kernel_doorbell(u32 __iomem *db, u32 value) +{ + if (db) { + writel(value, db); + pr_debug("writing %d to doorbell address 0x%p\n", value, db); + } +} + +/* + * queue_ids are in the range [0,MAX_PROCESS_QUEUES) and are mapped 1:1 + * to doorbells with the process's doorbell page + */ +unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd, + struct kfd_process *process, + unsigned int queue_id) +{ + /* + * doorbell_id_offset accounts for doorbells taken by KGD. + * pasid * doorbell_process_allocation/sizeof(u32) adjusts + * to the process's doorbells + */ + return kfd->doorbell_id_offset + + process->pasid * (doorbell_process_allocation()/sizeof(u32)) + + queue_id; +} + +uint64_t kfd_get_number_elems(struct kfd_dev *kfd) +{ + uint64_t num_of_elems = (kfd->shared_resources.doorbell_aperture_size - + kfd->shared_resources.doorbell_start_offset) / + doorbell_process_allocation() + 1; + + return num_of_elems; + +} + +phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev, + struct kfd_process *process) +{ + return dev->doorbell_base + + process->pasid * doorbell_process_allocation(); +} diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c new file mode 100644 index 000000000..35b987574 --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -0,0 +1,355 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "kfd_priv.h" +#include +#include +#include + +/* + * The primary memory I/O features being added for revisions of gfxip + * beyond 7.0 (Kaveri) are: + * + * Access to ATC/IOMMU mapped memory w/ associated extension of VA to 48b + * + * “Flat” shader memory access – These are new shader vector memory + * operations that do not reference a T#/V# so a “pointer” is what is + * sourced from the vector gprs for direct access to memory. + * This pointer space has the Shared(LDS) and Private(Scratch) memory + * mapped into this pointer space as apertures. + * The hardware then determines how to direct the memory request + * based on what apertures the request falls in. + * + * Unaligned support and alignment check + * + * + * System Unified Address - SUA + * + * The standard usage for GPU virtual addresses are that they are mapped by + * a set of page tables we call GPUVM and these page tables are managed by + * a combination of vidMM/driver software components. The current virtual + * address (VA) range for GPUVM is 40b. + * + * As of gfxip7.1 and beyond we’re adding the ability for compute memory + * clients (CP/RLC, DMA, SHADER(ifetch, scalar, and vector ops)) to access + * the same page tables used by host x86 processors and that are managed by + * the operating system. This is via a technique and hardware called ATC/IOMMU. + * The GPU has the capability of accessing both the GPUVM and ATC address + * spaces for a given VMID (process) simultaneously and we call this feature + * system unified address (SUA). + * + * There are three fundamental address modes of operation for a given VMID + * (process) on the GPU: + * + * HSA64 – 64b pointers and the default address space is ATC + * HSA32 – 32b pointers and the default address space is ATC + * GPUVM – 64b pointers and the default address space is GPUVM (driver + * model mode) + * + * + * HSA64 - ATC/IOMMU 64b + * + * A 64b pointer in the AMD64/IA64 CPU architecture is not fully utilized + * by the CPU so an AMD CPU can only access the high area + * (VA[63:47] == 0x1FFFF) and low area (VA[63:47 == 0) of the address space + * so the actual VA carried to translation is 48b. There is a “hole” in + * the middle of the 64b VA space. + * + * The GPU not only has access to all of the CPU accessible address space via + * ATC/IOMMU, but it also has access to the GPUVM address space. The “system + * unified address” feature (SUA) is the mapping of GPUVM and ATC address + * spaces into a unified pointer space. The method we take for 64b mode is + * to map the full 40b GPUVM address space into the hole of the 64b address + * space. + + * The GPUVM_Base/GPUVM_Limit defines the aperture in the 64b space where we + * direct requests to be translated via GPUVM page tables instead of the + * IOMMU path. + * + * + * 64b to 49b Address conversion + * + * Note that there are still significant portions of unused regions (holes) + * in the 64b address space even for the GPU. There are several places in + * the pipeline (sw and hw), we wish to compress the 64b virtual address + * to a 49b address. This 49b address is constituted of an “ATC” bit + * plus a 48b virtual address. This 49b address is what is passed to the + * translation hardware. ATC==0 means the 48b address is a GPUVM address + * (max of 2^40 – 1) intended to be translated via GPUVM page tables. + * ATC==1 means the 48b address is intended to be translated via IOMMU + * page tables. + * + * A 64b pointer is compared to the apertures that are defined (Base/Limit), in + * this case the GPUVM aperture (red) is defined and if a pointer falls in this + * aperture, we subtract the GPUVM_Base address and set the ATC bit to zero + * as part of the 64b to 49b conversion. + * + * Where this 64b to 49b conversion is done is a function of the usage. + * Most GPU memory access is via memory objects where the driver builds + * a descriptor which consists of a base address and a memory access by + * the GPU usually consists of some kind of an offset or Cartesian coordinate + * that references this memory descriptor. This is the case for shader + * instructions that reference the T# or V# constants, or for specified + * locations of assets (ex. the shader program location). In these cases + * the driver is what handles the 64b to 49b conversion and the base + * address in the descriptor (ex. V# or T# or shader program location) + * is defined as a 48b address w/ an ATC bit. For this usage a given + * memory object cannot straddle multiple apertures in the 64b address + * space. For example a shader program cannot jump in/out between ATC + * and GPUVM space. + * + * In some cases we wish to pass a 64b pointer to the GPU hardware and + * the GPU hw does the 64b to 49b conversion before passing memory + * requests to the cache/memory system. This is the case for the + * S_LOAD and FLAT_* shader memory instructions where we have 64b pointers + * in scalar and vector GPRs respectively. + * + * In all cases (no matter where the 64b -> 49b conversion is done), the gfxip + * hardware sends a 48b address along w/ an ATC bit, to the memory controller + * on the memory request interfaces. + * + * _MC_rdreq_atc // read request ATC bit + * + * 0 : _MC_rdreq_addr is a GPUVM VA + * + * 1 : _MC_rdreq_addr is a ATC VA + * + * + * “Spare” aperture (APE1) + * + * We use the GPUVM aperture to differentiate ATC vs. GPUVM, but we also use + * apertures to set the Mtype field for S_LOAD/FLAT_* ops which is input to the + * config tables for setting cache policies. The “spare” (APE1) aperture is + * motivated by getting a different Mtype from the default. + * The default aperture isn’t an actual base/limit aperture; it is just the + * address space that doesn’t hit any defined base/limit apertures. + * The following diagram is a complete picture of the gfxip7.x SUA apertures. + * The APE1 can be placed either below or above + * the hole (cannot be in the hole). + * + * + * General Aperture definitions and rules + * + * An aperture register definition consists of a Base, Limit, Mtype, and + * usually an ATC bit indicating which translation tables that aperture uses. + * In all cases (for SUA and DUA apertures discussed later), aperture base + * and limit definitions are 64KB aligned. + * + * _Base[63:0] = { _Base_register[63:16], 0x0000 } + * + * _Limit[63:0] = { _Limit_register[63:16], 0xFFFF } + * + * The base and limit are considered inclusive to an aperture so being + * inside an aperture means (address >= Base) AND (address <= Limit). + * + * In no case is a payload that straddles multiple apertures expected to work. + * For example a load_dword_x4 that starts in one aperture and ends in another, + * does not work. For the vector FLAT_* ops we have detection capability in + * the shader for reporting a “memory violation” back to the + * SQ block for use in traps. + * A memory violation results when an op falls into the hole, + * or a payload straddles multiple apertures. The S_LOAD instruction + * does not have this detection. + * + * Apertures cannot overlap. + * + * + * + * HSA32 - ATC/IOMMU 32b + * + * For HSA32 mode, the pointers are interpreted as 32 bits and use a single GPR + * instead of two for the S_LOAD and FLAT_* ops. The entire GPUVM space of 40b + * will not fit so there is only partial visibility to the GPUVM + * space (defined by the aperture) for S_LOAD and FLAT_* ops. + * There is no spare (APE1) aperture for HSA32 mode. + * + * + * GPUVM 64b mode (driver model) + * + * This mode is related to HSA64 in that the difference really is that + * the default aperture is GPUVM (ATC==0) and not ATC space. + * We have gfxip7.x hardware that has FLAT_* and S_LOAD support for + * SUA GPUVM mode, but does not support HSA32/HSA64. + * + * + * Device Unified Address - DUA + * + * Device unified address (DUA) is the name of the feature that maps the + * Shared(LDS) memory and Private(Scratch) memory into the overall address + * space for use by the new FLAT_* vector memory ops. The Shared and + * Private memories are mapped as apertures into the address space, + * and the hardware detects when a FLAT_* memory request is to be redirected + * to the LDS or Scratch memory when it falls into one of these apertures. + * Like the SUA apertures, the Shared/Private apertures are 64KB aligned and + * the base/limit is “in” the aperture. For both HSA64 and GPUVM SUA modes, + * the Shared/Private apertures are always placed in a limited selection of + * options in the hole of the 64b address space. For HSA32 mode, the + * Shared/Private apertures can be placed anywhere in the 32b space + * except at 0. + * + * + * HSA64 Apertures for FLAT_* vector ops + * + * For HSA64 SUA mode, the Shared and Private apertures are always placed + * in the hole w/ a limited selection of possible locations. The requests + * that fall in the private aperture are expanded as a function of the + * work-item id (tid) and redirected to the location of the + * “hidden private memory”. The hidden private can be placed in either GPUVM + * or ATC space. The addresses that fall in the shared aperture are + * re-directed to the on-chip LDS memory hardware. + * + * + * HSA32 Apertures for FLAT_* vector ops + * + * In HSA32 mode, the Private and Shared apertures can be placed anywhere + * in the 32b space except at 0 (Private or Shared Base at zero disables + * the apertures). If the base address of the apertures are non-zero + * (ie apertures exists), the size is always 64KB. + * + * + * GPUVM Apertures for FLAT_* vector ops + * + * In GPUVM mode, the Shared/Private apertures are specified identically + * to HSA64 mode where they are always in the hole at a limited selection + * of locations. + * + * + * Aperture Definitions for SUA and DUA + * + * The interpretation of the aperture register definitions for a given + * VMID is a function of the “SUA Mode” which is one of HSA64, HSA32, or + * GPUVM64 discussed in previous sections. The mode is first decoded, and + * then the remaining register decode is a function of the mode. + * + * + * SUA Mode Decode + * + * For the S_LOAD and FLAT_* shader operations, the SUA mode is decoded from + * the COMPUTE_DISPATCH_INITIATOR:DATA_ATC bit and + * the SH_MEM_CONFIG:PTR32 bits. + * + * COMPUTE_DISPATCH_INITIATOR:DATA_ATC SH_MEM_CONFIG:PTR32 Mode + * + * 1 0 HSA64 + * + * 1 1 HSA32 + * + * 0 X GPUVM64 + * + * In general the hardware will ignore the PTR32 bit and treat + * as “0” whenever DATA_ATC = “0”, but sw should set PTR32=0 + * when DATA_ATC=0. + * + * The DATA_ATC bit is only set for compute dispatches. + * All “Draw” dispatches are hardcoded to GPUVM64 mode + * for FLAT_* / S_LOAD operations. + */ + +#define MAKE_GPUVM_APP_BASE(gpu_num) \ + (((uint64_t)(gpu_num) << 61) + 0x1000000000000L) + +#define MAKE_GPUVM_APP_LIMIT(base) \ + (((uint64_t)(base) & \ + 0xFFFFFF0000000000UL) | 0xFFFFFFFFFFL) + +#define MAKE_SCRATCH_APP_BASE(gpu_num) \ + (((uint64_t)(gpu_num) << 61) + 0x100000000L) + +#define MAKE_SCRATCH_APP_LIMIT(base) \ + (((uint64_t)base & 0xFFFFFFFF00000000UL) | 0xFFFFFFFF) + +#define MAKE_LDS_APP_BASE(gpu_num) \ + (((uint64_t)(gpu_num) << 61) + 0x0) +#define MAKE_LDS_APP_LIMIT(base) \ + (((uint64_t)(base) & 0xFFFFFFFF00000000UL) | 0xFFFFFFFF) + +int kfd_init_apertures(struct kfd_process *process) +{ + uint8_t id = 0; + struct kfd_dev *dev; + struct kfd_process_device *pdd; + + /*Iterating over all devices*/ + while ((dev = kfd_topology_enum_kfd_devices(id)) != NULL && + id < NUM_OF_SUPPORTED_GPUS) { + + pdd = kfd_create_process_device_data(dev, process); + if (pdd == NULL) { + pr_err("Failed to create process device data\n"); + return -1; + } + /* + * For 64 bit process aperture will be statically reserved in + * the x86_64 non canonical process address space + * amdkfd doesn't currently support apertures for 32 bit process + */ + if (process->is_32bit_user_mode) { + pdd->lds_base = pdd->lds_limit = 0; + pdd->gpuvm_base = pdd->gpuvm_limit = 0; + pdd->scratch_base = pdd->scratch_limit = 0; + } else { + /* + * node id couldn't be 0 - the three MSB bits of + * aperture shoudn't be 0 + */ + pdd->lds_base = MAKE_LDS_APP_BASE(id + 1); + + pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base); + + pdd->gpuvm_base = MAKE_GPUVM_APP_BASE(id + 1); + + pdd->gpuvm_limit = + MAKE_GPUVM_APP_LIMIT(pdd->gpuvm_base); + + pdd->scratch_base = MAKE_SCRATCH_APP_BASE(id + 1); + + pdd->scratch_limit = + MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base); + } + + dev_dbg(kfd_device, "node id %u\n", id); + dev_dbg(kfd_device, "gpu id %u\n", pdd->dev->id); + dev_dbg(kfd_device, "lds_base %llX\n", pdd->lds_base); + dev_dbg(kfd_device, "lds_limit %llX\n", pdd->lds_limit); + dev_dbg(kfd_device, "gpuvm_base %llX\n", pdd->gpuvm_base); + dev_dbg(kfd_device, "gpuvm_limit %llX\n", pdd->gpuvm_limit); + dev_dbg(kfd_device, "scratch_base %llX\n", pdd->scratch_base); + dev_dbg(kfd_device, "scratch_limit %llX\n", pdd->scratch_limit); + + id++; + } + + return 0; +} + + diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c new file mode 100644 index 000000000..c7d298e62 --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -0,0 +1,340 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include +#include +#include +#include +#include "kfd_kernel_queue.h" +#include "kfd_priv.h" +#include "kfd_device_queue_manager.h" +#include "kfd_pm4_headers.h" +#include "kfd_pm4_opcodes.h" + +#define PM4_COUNT_ZERO (((1 << 15) - 1) << 16) + +static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, + enum kfd_queue_type type, unsigned int queue_size) +{ + struct queue_properties prop; + int retval; + union PM4_MES_TYPE_3_HEADER nop; + + BUG_ON(!kq || !dev); + BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ); + + pr_debug("amdkfd: In func %s initializing queue type %d size %d\n", + __func__, KFD_QUEUE_TYPE_HIQ, queue_size); + + nop.opcode = IT_NOP; + nop.type = PM4_TYPE_3; + nop.u32all |= PM4_COUNT_ZERO; + + kq->dev = dev; + kq->nop_packet = nop.u32all; + switch (type) { + case KFD_QUEUE_TYPE_DIQ: + case KFD_QUEUE_TYPE_HIQ: + kq->mqd = dev->dqm->ops.get_mqd_manager(dev->dqm, + KFD_MQD_TYPE_HIQ); + break; + default: + BUG(); + break; + } + + if (kq->mqd == NULL) + return false; + + prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off); + + if (prop.doorbell_ptr == NULL) { + pr_err("amdkfd: error init doorbell"); + goto err_get_kernel_doorbell; + } + + retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq); + if (retval != 0) { + pr_err("amdkfd: error init pq queues size (%d)\n", queue_size); + goto err_pq_allocate_vidmem; + } + + kq->pq_kernel_addr = kq->pq->cpu_ptr; + kq->pq_gpu_addr = kq->pq->gpu_addr; + + retval = kq->ops_asic_specific.initialize(kq, dev, type, queue_size); + if (retval == false) + goto err_eop_allocate_vidmem; + + retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->rptr_kernel), + &kq->rptr_mem); + + if (retval != 0) + goto err_rptr_allocate_vidmem; + + kq->rptr_kernel = kq->rptr_mem->cpu_ptr; + kq->rptr_gpu_addr = kq->rptr_mem->gpu_addr; + + retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->wptr_kernel), + &kq->wptr_mem); + + if (retval != 0) + goto err_wptr_allocate_vidmem; + + kq->wptr_kernel = kq->wptr_mem->cpu_ptr; + kq->wptr_gpu_addr = kq->wptr_mem->gpu_addr; + + memset(kq->pq_kernel_addr, 0, queue_size); + memset(kq->rptr_kernel, 0, sizeof(*kq->rptr_kernel)); + memset(kq->wptr_kernel, 0, sizeof(*kq->wptr_kernel)); + + prop.queue_size = queue_size; + prop.is_interop = false; + prop.priority = 1; + prop.queue_percent = 100; + prop.type = type; + prop.vmid = 0; + prop.queue_address = kq->pq_gpu_addr; + prop.read_ptr = (uint32_t *) kq->rptr_gpu_addr; + prop.write_ptr = (uint32_t *) kq->wptr_gpu_addr; + prop.eop_ring_buffer_address = kq->eop_gpu_addr; + prop.eop_ring_buffer_size = PAGE_SIZE; + + if (init_queue(&kq->queue, prop) != 0) + goto err_init_queue; + + kq->queue->device = dev; + kq->queue->process = kfd_get_process(current); + + retval = kq->mqd->init_mqd(kq->mqd, &kq->queue->mqd, + &kq->queue->mqd_mem_obj, + &kq->queue->gart_mqd_addr, + &kq->queue->properties); + if (retval != 0) + goto err_init_mqd; + + /* assign HIQ to HQD */ + if (type == KFD_QUEUE_TYPE_HIQ) { + pr_debug("assigning hiq to hqd\n"); + kq->queue->pipe = KFD_CIK_HIQ_PIPE; + kq->queue->queue = KFD_CIK_HIQ_QUEUE; + kq->mqd->load_mqd(kq->mqd, kq->queue->mqd, kq->queue->pipe, + kq->queue->queue, NULL); + } else { + /* allocate fence for DIQ */ + + retval = kfd_gtt_sa_allocate(dev, sizeof(uint32_t), + &kq->fence_mem_obj); + + if (retval != 0) + goto err_alloc_fence; + + kq->fence_kernel_address = kq->fence_mem_obj->cpu_ptr; + kq->fence_gpu_addr = kq->fence_mem_obj->gpu_addr; + } + + print_queue(kq->queue); + + return true; +err_alloc_fence: +err_init_mqd: + uninit_queue(kq->queue); +err_init_queue: + kfd_gtt_sa_free(dev, kq->wptr_mem); +err_wptr_allocate_vidmem: + kfd_gtt_sa_free(dev, kq->rptr_mem); +err_rptr_allocate_vidmem: + kfd_gtt_sa_free(dev, kq->eop_mem); +err_eop_allocate_vidmem: + kfd_gtt_sa_free(dev, kq->pq); +err_pq_allocate_vidmem: + kfd_release_kernel_doorbell(dev, prop.doorbell_ptr); +err_get_kernel_doorbell: + return false; + +} + +static void uninitialize(struct kernel_queue *kq) +{ + BUG_ON(!kq); + + if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ) + kq->mqd->destroy_mqd(kq->mqd, + NULL, + false, + QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS, + kq->queue->pipe, + kq->queue->queue); + else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ) + kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj); + + kq->mqd->uninit_mqd(kq->mqd, kq->queue->mqd, kq->queue->mqd_mem_obj); + + kfd_gtt_sa_free(kq->dev, kq->rptr_mem); + kfd_gtt_sa_free(kq->dev, kq->wptr_mem); + kq->ops_asic_specific.uninitialize(kq); + kfd_gtt_sa_free(kq->dev, kq->pq); + kfd_release_kernel_doorbell(kq->dev, + kq->queue->properties.doorbell_ptr); + uninit_queue(kq->queue); +} + +static int acquire_packet_buffer(struct kernel_queue *kq, + size_t packet_size_in_dwords, unsigned int **buffer_ptr) +{ + size_t available_size; + size_t queue_size_dwords; + uint32_t wptr, rptr; + unsigned int *queue_address; + + BUG_ON(!kq || !buffer_ptr); + + rptr = *kq->rptr_kernel; + wptr = *kq->wptr_kernel; + queue_address = (unsigned int *)kq->pq_kernel_addr; + queue_size_dwords = kq->queue->properties.queue_size / sizeof(uint32_t); + + pr_debug("amdkfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n", + __func__, rptr, wptr, queue_address); + + available_size = (rptr - 1 - wptr + queue_size_dwords) % + queue_size_dwords; + + if (packet_size_in_dwords >= queue_size_dwords || + packet_size_in_dwords >= available_size) { + /* + * make sure calling functions know + * acquire_packet_buffer() failed + */ + *buffer_ptr = NULL; + return -ENOMEM; + } + + if (wptr + packet_size_in_dwords >= queue_size_dwords) { + while (wptr > 0) { + queue_address[wptr] = kq->nop_packet; + wptr = (wptr + 1) % queue_size_dwords; + } + } + + *buffer_ptr = &queue_address[wptr]; + kq->pending_wptr = wptr + packet_size_in_dwords; + + return 0; +} + +static void submit_packet(struct kernel_queue *kq) +{ +#ifdef DEBUG + int i; +#endif + + BUG_ON(!kq); + +#ifdef DEBUG + for (i = *kq->wptr_kernel; i < kq->pending_wptr; i++) { + pr_debug("0x%2X ", kq->pq_kernel_addr[i]); + if (i % 15 == 0) + pr_debug("\n"); + } + pr_debug("\n"); +#endif + + *kq->wptr_kernel = kq->pending_wptr; + write_kernel_doorbell(kq->queue->properties.doorbell_ptr, + kq->pending_wptr); +} + +static void rollback_packet(struct kernel_queue *kq) +{ + BUG_ON(!kq); + kq->pending_wptr = *kq->queue->properties.write_ptr; +} + +struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, + enum kfd_queue_type type) +{ + struct kernel_queue *kq; + + BUG_ON(!dev); + + kq = kzalloc(sizeof(struct kernel_queue), GFP_KERNEL); + if (!kq) + return NULL; + + kq->ops.initialize = initialize; + kq->ops.uninitialize = uninitialize; + kq->ops.acquire_packet_buffer = acquire_packet_buffer; + kq->ops.submit_packet = submit_packet; + kq->ops.rollback_packet = rollback_packet; + + switch (dev->device_info->asic_family) { + case CHIP_CARRIZO: + kernel_queue_init_vi(&kq->ops_asic_specific); + break; + + case CHIP_KAVERI: + kernel_queue_init_cik(&kq->ops_asic_specific); + break; + } + + if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) { + pr_err("amdkfd: failed to init kernel queue\n"); + kfree(kq); + return NULL; + } + return kq; +} + +void kernel_queue_uninit(struct kernel_queue *kq) +{ + BUG_ON(!kq); + + kq->ops.uninitialize(kq); + kfree(kq); +} + +static __attribute__((unused)) void test_kq(struct kfd_dev *dev) +{ + struct kernel_queue *kq; + uint32_t *buffer, i; + int retval; + + BUG_ON(!dev); + + pr_err("amdkfd: starting kernel queue test\n"); + + kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ); + BUG_ON(!kq); + + retval = kq->ops.acquire_packet_buffer(kq, 5, &buffer); + BUG_ON(retval != 0); + for (i = 0; i < 5; i++) + buffer[i] = kq->nop_packet; + kq->ops.submit_packet(kq); + + pr_err("amdkfd: ending kernel queue test\n"); +} + + diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h new file mode 100644 index 000000000..594053136 --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h @@ -0,0 +1,101 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef KFD_KERNEL_QUEUE_H_ +#define KFD_KERNEL_QUEUE_H_ + +#include +#include +#include "kfd_priv.h" + +/** + * struct kernel_queue_ops + * + * @initialize: Initialize a kernel queue, including allocations of GART memory + * needed for the queue. + * + * @uninitialize: Uninitialize a kernel queue and free all its memory usages. + * + * @acquire_packet_buffer: Returns a pointer to the location in the kernel + * queue ring buffer where the calling function can write its packet. It is + * Guaranteed that there is enough space for that packet. It also updates the + * pending write pointer to that location so subsequent calls to + * acquire_packet_buffer will get a correct write pointer + * + * @submit_packet: Update the write pointer and doorbell of a kernel queue. + * + * @sync_with_hw: Wait until the write pointer and the read pointer of a kernel + * queue are equal, which means the CP has read all the submitted packets. + * + * @rollback_packet: This routine is called if we failed to build an acquired + * packet for some reason. It just overwrites the pending wptr with the current + * one + * + */ +struct kernel_queue_ops { + bool (*initialize)(struct kernel_queue *kq, struct kfd_dev *dev, + enum kfd_queue_type type, unsigned int queue_size); + void (*uninitialize)(struct kernel_queue *kq); + int (*acquire_packet_buffer)(struct kernel_queue *kq, + size_t packet_size_in_dwords, + unsigned int **buffer_ptr); + + void (*submit_packet)(struct kernel_queue *kq); + void (*rollback_packet)(struct kernel_queue *kq); +}; + +struct kernel_queue { + struct kernel_queue_ops ops; + struct kernel_queue_ops ops_asic_specific; + + /* data */ + struct kfd_dev *dev; + struct mqd_manager *mqd; + struct queue *queue; + uint32_t pending_wptr; + unsigned int nop_packet; + + struct kfd_mem_obj *rptr_mem; + uint32_t *rptr_kernel; + uint64_t rptr_gpu_addr; + struct kfd_mem_obj *wptr_mem; + uint32_t *wptr_kernel; + uint64_t wptr_gpu_addr; + struct kfd_mem_obj *pq; + uint64_t pq_gpu_addr; + uint32_t *pq_kernel_addr; + struct kfd_mem_obj *eop_mem; + uint64_t eop_gpu_addr; + uint32_t *eop_kernel_addr; + + struct kfd_mem_obj *fence_mem_obj; + uint64_t fence_gpu_addr; + void *fence_kernel_address; + + struct list_head list; +}; + +void kernel_queue_init_cik(struct kernel_queue_ops *ops); +void kernel_queue_init_vi(struct kernel_queue_ops *ops); + +#endif /* KFD_KERNEL_QUEUE_H_ */ diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c new file mode 100644 index 000000000..a90eb440b --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c @@ -0,0 +1,44 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "kfd_kernel_queue.h" + +static bool initialize_cik(struct kernel_queue *kq, struct kfd_dev *dev, + enum kfd_queue_type type, unsigned int queue_size); +static void uninitialize_cik(struct kernel_queue *kq); + +void kernel_queue_init_cik(struct kernel_queue_ops *ops) +{ + ops->initialize = initialize_cik; + ops->uninitialize = uninitialize_cik; +} + +static bool initialize_cik(struct kernel_queue *kq, struct kfd_dev *dev, + enum kfd_queue_type type, unsigned int queue_size) +{ + return true; +} + +static void uninitialize_cik(struct kernel_queue *kq) +{ +} diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c new file mode 100644 index 000000000..f1d48281e --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c @@ -0,0 +1,56 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "kfd_kernel_queue.h" + +static bool initialize_vi(struct kernel_queue *kq, struct kfd_dev *dev, + enum kfd_queue_type type, unsigned int queue_size); +static void uninitialize_vi(struct kernel_queue *kq); + +void kernel_queue_init_vi(struct kernel_queue_ops *ops) +{ + ops->initialize = initialize_vi; + ops->uninitialize = uninitialize_vi; +} + +static bool initialize_vi(struct kernel_queue *kq, struct kfd_dev *dev, + enum kfd_queue_type type, unsigned int queue_size) +{ + int retval; + + retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem); + if (retval != 0) + return false; + + kq->eop_gpu_addr = kq->eop_mem->gpu_addr; + kq->eop_kernel_addr = kq->eop_mem->cpu_ptr; + + memset(kq->eop_kernel_addr, 0, PAGE_SIZE); + + return true; +} + +static void uninitialize_vi(struct kernel_queue *kq) +{ + kfd_gtt_sa_free(kq->dev, kq->eop_mem); +} diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_module.c new file mode 100644 index 000000000..4e0a68f13 --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_module.c @@ -0,0 +1,138 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include "kfd_priv.h" + +#define KFD_DRIVER_AUTHOR "AMD Inc. and others" + +#define KFD_DRIVER_DESC "Standalone HSA driver for AMD's GPUs" +#define KFD_DRIVER_DATE "20150122" +#define KFD_DRIVER_MAJOR 0 +#define KFD_DRIVER_MINOR 7 +#define KFD_DRIVER_PATCHLEVEL 1 + +static const struct kgd2kfd_calls kgd2kfd = { + .exit = kgd2kfd_exit, + .probe = kgd2kfd_probe, + .device_init = kgd2kfd_device_init, + .device_exit = kgd2kfd_device_exit, + .interrupt = kgd2kfd_interrupt, + .suspend = kgd2kfd_suspend, + .resume = kgd2kfd_resume, +}; + +int sched_policy = KFD_SCHED_POLICY_HWS; +module_param(sched_policy, int, 0444); +MODULE_PARM_DESC(sched_policy, + "Scheduling policy (0 = HWS (Default), 1 = HWS without over-subscription, 2 = Non-HWS (Used for debugging only)"); + +int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT; +module_param(max_num_of_queues_per_device, int, 0444); +MODULE_PARM_DESC(max_num_of_queues_per_device, + "Maximum number of supported queues per device (1 = Minimum, 4096 = default)"); + +bool kgd2kfd_init(unsigned interface_version, const struct kgd2kfd_calls **g2f) +{ + /* + * Only one interface version is supported, + * no kfd/kgd version skew allowed. + */ + if (interface_version != KFD_INTERFACE_VERSION) + return false; + + *g2f = &kgd2kfd; + + return true; +} +EXPORT_SYMBOL(kgd2kfd_init); + +void kgd2kfd_exit(void) +{ +} + +static int __init kfd_module_init(void) +{ + int err; + + /* Verify module parameters */ + if ((sched_policy < KFD_SCHED_POLICY_HWS) || + (sched_policy > KFD_SCHED_POLICY_NO_HWS)) { + pr_err("kfd: sched_policy has invalid value\n"); + return -1; + } + + /* Verify module parameters */ + if ((max_num_of_queues_per_device < 1) || + (max_num_of_queues_per_device > + KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) { + pr_err("kfd: max_num_of_queues_per_device must be between 1 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n"); + return -1; + } + + err = kfd_pasid_init(); + if (err < 0) + goto err_pasid; + + err = kfd_chardev_init(); + if (err < 0) + goto err_ioctl; + + err = kfd_topology_init(); + if (err < 0) + goto err_topology; + + kfd_process_create_wq(); + + dev_info(kfd_device, "Initialized module\n"); + + return 0; + +err_topology: + kfd_chardev_exit(); +err_ioctl: + kfd_pasid_exit(); +err_pasid: + return err; +} + +static void __exit kfd_module_exit(void) +{ + kfd_process_destroy_wq(); + kfd_topology_shutdown(); + kfd_chardev_exit(); + kfd_pasid_exit(); + dev_info(kfd_device, "Removed module\n"); +} + +module_init(kfd_module_init); +module_exit(kfd_module_exit); + +MODULE_AUTHOR(KFD_DRIVER_AUTHOR); +MODULE_DESCRIPTION(KFD_DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); +MODULE_VERSION(__stringify(KFD_DRIVER_MAJOR) "." + __stringify(KFD_DRIVER_MINOR) "." + __stringify(KFD_DRIVER_PATCHLEVEL)); diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c new file mode 100644 index 000000000..b1ef1368c --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c @@ -0,0 +1,37 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "kfd_priv.h" + +struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type, + struct kfd_dev *dev) +{ + switch (dev->device_info->asic_family) { + case CHIP_KAVERI: + return mqd_manager_init_cik(type, dev); + case CHIP_CARRIZO: + return mqd_manager_init_vi(type, dev); + } + + return NULL; +} diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h new file mode 100644 index 000000000..213a71e0b --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h @@ -0,0 +1,91 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef KFD_MQD_MANAGER_H_ +#define KFD_MQD_MANAGER_H_ + +#include "kfd_priv.h" + +/** + * struct mqd_manager + * + * @init_mqd: Allocates the mqd buffer on local gpu memory and initialize it. + * + * @load_mqd: Loads the mqd to a concrete hqd slot. Used only for no cp + * scheduling mode. + * + * @update_mqd: Handles a update call for the MQD + * + * @destroy_mqd: Destroys the HQD slot and by that preempt the relevant queue. + * Used only for no cp scheduling. + * + * @uninit_mqd: Releases the mqd buffer from local gpu memory. + * + * @is_occupied: Checks if the relevant HQD slot is occupied. + * + * @mqd_mutex: Mqd manager mutex. + * + * @dev: The kfd device structure coupled with this module. + * + * MQD stands for Memory Queue Descriptor which represents the current queue + * state in the memory and initiate the HQD (Hardware Queue Descriptor) state. + * This structure is actually a base class for the different types of MQDs + * structures for the variant ASICs that should be supported in the future. + * This base class is also contains all the MQD specific operations. + * Another important thing to mention is that each queue has a MQD that keeps + * his state (or context) after each preemption or reassignment. + * Basically there are a instances of the mqd manager class per MQD type per + * ASIC. Currently the kfd driver supports only Kaveri so there are instances + * per KFD_MQD_TYPE for each device. + * + */ + +struct mqd_manager { + int (*init_mqd)(struct mqd_manager *mm, void **mqd, + struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr, + struct queue_properties *q); + + int (*load_mqd)(struct mqd_manager *mm, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t __user *wptr); + + int (*update_mqd)(struct mqd_manager *mm, void *mqd, + struct queue_properties *q); + + int (*destroy_mqd)(struct mqd_manager *mm, void *mqd, + enum kfd_preempt_type type, + unsigned int timeout, uint32_t pipe_id, + uint32_t queue_id); + + void (*uninit_mqd)(struct mqd_manager *mm, void *mqd, + struct kfd_mem_obj *mqd_mem_obj); + + bool (*is_occupied)(struct mqd_manager *mm, void *mqd, + uint64_t queue_address, uint32_t pipe_id, + uint32_t queue_id); + + struct mutex mqd_mutex; + struct kfd_dev *dev; +}; + +#endif /* KFD_MQD_MANAGER_H_ */ diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c new file mode 100644 index 000000000..434979428 --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -0,0 +1,451 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include +#include "kfd_priv.h" +#include "kfd_mqd_manager.h" +#include "cik_regs.h" +#include "cik_structs.h" + +static inline struct cik_mqd *get_mqd(void *mqd) +{ + return (struct cik_mqd *)mqd; +} + +static int init_mqd(struct mqd_manager *mm, void **mqd, + struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr, + struct queue_properties *q) +{ + uint64_t addr; + struct cik_mqd *m; + int retval; + + BUG_ON(!mm || !q || !mqd); + + pr_debug("kfd: In func %s\n", __func__); + + retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd), + mqd_mem_obj); + + if (retval != 0) + return -ENOMEM; + + m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr; + addr = (*mqd_mem_obj)->gpu_addr; + + memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256)); + + m->header = 0xC0310800; + m->compute_pipelinestat_enable = 1; + m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF; + m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF; + m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF; + m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF; + + /* + * Make sure to use the last queue state saved on mqd when the cp + * reassigns the queue, so when queue is switched on/off (e.g over + * subscription or quantum timeout) the context will be consistent + */ + m->cp_hqd_persistent_state = + DEFAULT_CP_HQD_PERSISTENT_STATE | PRELOAD_REQ; + + m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN; + m->cp_mqd_base_addr_lo = lower_32_bits(addr); + m->cp_mqd_base_addr_hi = upper_32_bits(addr); + + m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE | IB_ATC_EN; + /* Although WinKFD writes this, I suspect it should not be necessary */ + m->cp_hqd_ib_control = IB_ATC_EN | DEFAULT_MIN_IB_AVAIL_SIZE; + + m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS | + QUANTUM_DURATION(10); + + /* + * Pipe Priority + * Identifies the pipe relative priority when this queue is connected + * to the pipeline. The pipe priority is against the GFX pipe and HP3D. + * In KFD we are using a fixed pipe priority set to CS_MEDIUM. + * 0 = CS_LOW (typically below GFX) + * 1 = CS_MEDIUM (typically between HP3D and GFX + * 2 = CS_HIGH (typically above HP3D) + */ + m->cp_hqd_pipe_priority = 1; + m->cp_hqd_queue_priority = 15; + + if (q->format == KFD_QUEUE_FORMAT_AQL) + m->cp_hqd_iq_rptr = AQL_ENABLE; + + *mqd = m; + if (gart_addr != NULL) + *gart_addr = addr; + retval = mm->update_mqd(mm, m, q); + + return retval; +} + +static int init_mqd_sdma(struct mqd_manager *mm, void **mqd, + struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr, + struct queue_properties *q) +{ + int retval; + struct cik_sdma_rlc_registers *m; + + BUG_ON(!mm || !mqd || !mqd_mem_obj); + + retval = kfd_gtt_sa_allocate(mm->dev, + sizeof(struct cik_sdma_rlc_registers), + mqd_mem_obj); + + if (retval != 0) + return -ENOMEM; + + m = (struct cik_sdma_rlc_registers *) (*mqd_mem_obj)->cpu_ptr; + + memset(m, 0, sizeof(struct cik_sdma_rlc_registers)); + + *mqd = m; + if (gart_addr != NULL) + *gart_addr = (*mqd_mem_obj)->gpu_addr; + + retval = mm->update_mqd(mm, m, q); + + return retval; +} + +static void uninit_mqd(struct mqd_manager *mm, void *mqd, + struct kfd_mem_obj *mqd_mem_obj) +{ + BUG_ON(!mm || !mqd); + kfd_gtt_sa_free(mm->dev, mqd_mem_obj); +} + +static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd, + struct kfd_mem_obj *mqd_mem_obj) +{ + BUG_ON(!mm || !mqd); + kfd_gtt_sa_free(mm->dev, mqd_mem_obj); +} + +static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, + uint32_t queue_id, uint32_t __user *wptr) +{ + return mm->dev->kfd2kgd->hqd_load + (mm->dev->kgd, mqd, pipe_id, queue_id, wptr); +} + +static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t __user *wptr) +{ + return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd); +} + +static int update_mqd(struct mqd_manager *mm, void *mqd, + struct queue_properties *q) +{ + struct cik_mqd *m; + + BUG_ON(!mm || !q || !mqd); + + pr_debug("kfd: In func %s\n", __func__); + + m = get_mqd(mqd); + m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE | + DEFAULT_MIN_AVAIL_SIZE | PQ_ATC_EN; + + /* + * Calculating queue size which is log base 2 of actual queue size -1 + * dwords and another -1 for ffs + */ + m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int)) + - 1 - 1; + m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8); + m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8); + m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr); + m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr); + m->cp_hqd_pq_doorbell_control = DOORBELL_EN | + DOORBELL_OFFSET(q->doorbell_off); + + m->cp_hqd_vmid = q->vmid; + + if (q->format == KFD_QUEUE_FORMAT_AQL) { + m->cp_hqd_pq_control |= NO_UPDATE_RPTR; + } + + m->cp_hqd_active = 0; + q->is_active = false; + if (q->queue_size > 0 && + q->queue_address != 0 && + q->queue_percent > 0) { + m->cp_hqd_active = 1; + q->is_active = true; + } + + return 0; +} + +static int update_mqd_sdma(struct mqd_manager *mm, void *mqd, + struct queue_properties *q) +{ + struct cik_sdma_rlc_registers *m; + + BUG_ON(!mm || !mqd || !q); + + m = get_sdma_mqd(mqd); + m->sdma_rlc_rb_cntl = + SDMA_RB_SIZE((ffs(q->queue_size / sizeof(unsigned int)))) | + SDMA_RB_VMID(q->vmid) | + SDMA_RPTR_WRITEBACK_ENABLE | + SDMA_RPTR_WRITEBACK_TIMER(6); + + m->sdma_rlc_rb_base = lower_32_bits(q->queue_address >> 8); + m->sdma_rlc_rb_base_hi = upper_32_bits(q->queue_address >> 8); + m->sdma_rlc_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr); + m->sdma_rlc_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr); + m->sdma_rlc_doorbell = SDMA_OFFSET(q->doorbell_off) | SDMA_DB_ENABLE; + m->sdma_rlc_virtual_addr = q->sdma_vm_addr; + + m->sdma_engine_id = q->sdma_engine_id; + m->sdma_queue_id = q->sdma_queue_id; + + q->is_active = false; + if (q->queue_size > 0 && + q->queue_address != 0 && + q->queue_percent > 0) { + m->sdma_rlc_rb_cntl |= SDMA_RB_ENABLE; + q->is_active = true; + } + + return 0; +} + +static int destroy_mqd(struct mqd_manager *mm, void *mqd, + enum kfd_preempt_type type, + unsigned int timeout, uint32_t pipe_id, + uint32_t queue_id) +{ + return mm->dev->kfd2kgd->hqd_destroy(mm->dev->kgd, type, timeout, + pipe_id, queue_id); +} + +/* + * preempt type here is ignored because there is only one way + * to preempt sdma queue + */ +static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd, + enum kfd_preempt_type type, + unsigned int timeout, uint32_t pipe_id, + uint32_t queue_id) +{ + return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout); +} + +static bool is_occupied(struct mqd_manager *mm, void *mqd, + uint64_t queue_address, uint32_t pipe_id, + uint32_t queue_id) +{ + + return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address, + pipe_id, queue_id); + +} + +static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd, + uint64_t queue_address, uint32_t pipe_id, + uint32_t queue_id) +{ + return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd); +} + +/* + * HIQ MQD Implementation, concrete implementation for HIQ MQD implementation. + * The HIQ queue in Kaveri is using the same MQD structure as all the user mode + * queues but with different initial values. + */ + +static int init_mqd_hiq(struct mqd_manager *mm, void **mqd, + struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr, + struct queue_properties *q) +{ + uint64_t addr; + struct cik_mqd *m; + int retval; + + BUG_ON(!mm || !q || !mqd || !mqd_mem_obj); + + pr_debug("kfd: In func %s\n", __func__); + + retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd), + mqd_mem_obj); + + if (retval != 0) + return -ENOMEM; + + m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr; + addr = (*mqd_mem_obj)->gpu_addr; + + memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256)); + + m->header = 0xC0310800; + m->compute_pipelinestat_enable = 1; + m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF; + m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF; + m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF; + m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF; + + m->cp_hqd_persistent_state = DEFAULT_CP_HQD_PERSISTENT_STATE | + PRELOAD_REQ; + m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS | + QUANTUM_DURATION(10); + + m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN; + m->cp_mqd_base_addr_lo = lower_32_bits(addr); + m->cp_mqd_base_addr_hi = upper_32_bits(addr); + + m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE; + + /* + * Pipe Priority + * Identifies the pipe relative priority when this queue is connected + * to the pipeline. The pipe priority is against the GFX pipe and HP3D. + * In KFD we are using a fixed pipe priority set to CS_MEDIUM. + * 0 = CS_LOW (typically below GFX) + * 1 = CS_MEDIUM (typically between HP3D and GFX + * 2 = CS_HIGH (typically above HP3D) + */ + m->cp_hqd_pipe_priority = 1; + m->cp_hqd_queue_priority = 15; + + *mqd = m; + if (gart_addr) + *gart_addr = addr; + retval = mm->update_mqd(mm, m, q); + + return retval; +} + +static int update_mqd_hiq(struct mqd_manager *mm, void *mqd, + struct queue_properties *q) +{ + struct cik_mqd *m; + + BUG_ON(!mm || !q || !mqd); + + pr_debug("kfd: In func %s\n", __func__); + + m = get_mqd(mqd); + m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE | + DEFAULT_MIN_AVAIL_SIZE | + PRIV_STATE | + KMD_QUEUE; + + /* + * Calculating queue size which is log base 2 of actual queue + * size -1 dwords + */ + m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int)) + - 1 - 1; + m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8); + m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8); + m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr); + m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr); + m->cp_hqd_pq_doorbell_control = DOORBELL_EN | + DOORBELL_OFFSET(q->doorbell_off); + + m->cp_hqd_vmid = q->vmid; + + m->cp_hqd_active = 0; + q->is_active = false; + if (q->queue_size > 0 && + q->queue_address != 0 && + q->queue_percent > 0) { + m->cp_hqd_active = 1; + q->is_active = true; + } + + return 0; +} + +struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd) +{ + struct cik_sdma_rlc_registers *m; + + BUG_ON(!mqd); + + m = (struct cik_sdma_rlc_registers *)mqd; + + return m; +} + +struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, + struct kfd_dev *dev) +{ + struct mqd_manager *mqd; + + BUG_ON(!dev); + BUG_ON(type >= KFD_MQD_TYPE_MAX); + + pr_debug("kfd: In func %s\n", __func__); + + mqd = kzalloc(sizeof(struct mqd_manager), GFP_KERNEL); + if (!mqd) + return NULL; + + mqd->dev = dev; + + switch (type) { + case KFD_MQD_TYPE_CP: + case KFD_MQD_TYPE_COMPUTE: + mqd->init_mqd = init_mqd; + mqd->uninit_mqd = uninit_mqd; + mqd->load_mqd = load_mqd; + mqd->update_mqd = update_mqd; + mqd->destroy_mqd = destroy_mqd; + mqd->is_occupied = is_occupied; + break; + case KFD_MQD_TYPE_HIQ: + mqd->init_mqd = init_mqd_hiq; + mqd->uninit_mqd = uninit_mqd; + mqd->load_mqd = load_mqd; + mqd->update_mqd = update_mqd_hiq; + mqd->destroy_mqd = destroy_mqd; + mqd->is_occupied = is_occupied; + break; + case KFD_MQD_TYPE_SDMA: + mqd->init_mqd = init_mqd_sdma; + mqd->uninit_mqd = uninit_mqd_sdma; + mqd->load_mqd = load_mqd_sdma; + mqd->update_mqd = update_mqd_sdma; + mqd->destroy_mqd = destroy_mqd_sdma; + mqd->is_occupied = is_occupied_sdma; + break; + default: + kfree(mqd); + return NULL; + } + + return mqd; +} + diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c new file mode 100644 index 000000000..b3a7e3ba1 --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -0,0 +1,33 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include "kfd_priv.h" +#include "kfd_mqd_manager.h" + +struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, + struct kfd_dev *dev) +{ + pr_warn("amdkfd: VI MQD is not currently supported\n"); + return NULL; +} diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c new file mode 100644 index 000000000..e2533d875 --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -0,0 +1,557 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include +#include "kfd_device_queue_manager.h" +#include "kfd_kernel_queue.h" +#include "kfd_priv.h" +#include "kfd_pm4_headers.h" +#include "kfd_pm4_opcodes.h" + +static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes, + unsigned int buffer_size_bytes) +{ + unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t); + + BUG_ON((temp * sizeof(uint32_t)) > buffer_size_bytes); + *wptr = temp; +} + +static unsigned int build_pm4_header(unsigned int opcode, size_t packet_size) +{ + union PM4_MES_TYPE_3_HEADER header; + + header.u32all = 0; + header.opcode = opcode; + header.count = packet_size/sizeof(uint32_t) - 2; + header.type = PM4_TYPE_3; + + return header.u32all; +} + +static void pm_calc_rlib_size(struct packet_manager *pm, + unsigned int *rlib_size, + bool *over_subscription) +{ + unsigned int process_count, queue_count; + + BUG_ON(!pm || !rlib_size || !over_subscription); + + process_count = pm->dqm->processes_count; + queue_count = pm->dqm->queue_count; + + /* check if there is over subscription*/ + *over_subscription = false; + if ((process_count > 1) || + queue_count > PIPE_PER_ME_CP_SCHEDULING * QUEUES_PER_PIPE) { + *over_subscription = true; + pr_debug("kfd: over subscribed runlist\n"); + } + + /* calculate run list ib allocation size */ + *rlib_size = process_count * sizeof(struct pm4_map_process) + + queue_count * sizeof(struct pm4_map_queues); + + /* + * Increase the allocation size in case we need a chained run list + * when over subscription + */ + if (*over_subscription) + *rlib_size += sizeof(struct pm4_runlist); + + pr_debug("kfd: runlist ib size %d\n", *rlib_size); +} + +static int pm_allocate_runlist_ib(struct packet_manager *pm, + unsigned int **rl_buffer, + uint64_t *rl_gpu_buffer, + unsigned int *rl_buffer_size, + bool *is_over_subscription) +{ + int retval; + + BUG_ON(!pm); + BUG_ON(pm->allocated == true); + BUG_ON(is_over_subscription == NULL); + + pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription); + + retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size, + &pm->ib_buffer_obj); + + if (retval != 0) { + pr_err("kfd: failed to allocate runlist IB\n"); + return retval; + } + + *(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr; + *rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr; + + memset(*rl_buffer, 0, *rl_buffer_size); + pm->allocated = true; + return retval; +} + +static int pm_create_runlist(struct packet_manager *pm, uint32_t *buffer, + uint64_t ib, size_t ib_size_in_dwords, bool chain) +{ + struct pm4_runlist *packet; + + BUG_ON(!pm || !buffer || !ib); + + packet = (struct pm4_runlist *)buffer; + + memset(buffer, 0, sizeof(struct pm4_runlist)); + packet->header.u32all = build_pm4_header(IT_RUN_LIST, + sizeof(struct pm4_runlist)); + + packet->bitfields4.ib_size = ib_size_in_dwords; + packet->bitfields4.chain = chain ? 1 : 0; + packet->bitfields4.offload_polling = 0; + packet->bitfields4.valid = 1; + packet->ordinal2 = lower_32_bits(ib); + packet->bitfields3.ib_base_hi = upper_32_bits(ib); + + return 0; +} + +static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer, + struct qcm_process_device *qpd) +{ + struct pm4_map_process *packet; + struct queue *cur; + uint32_t num_queues; + + BUG_ON(!pm || !buffer || !qpd); + + packet = (struct pm4_map_process *)buffer; + + pr_debug("kfd: In func %s\n", __func__); + + memset(buffer, 0, sizeof(struct pm4_map_process)); + + packet->header.u32all = build_pm4_header(IT_MAP_PROCESS, + sizeof(struct pm4_map_process)); + packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0; + packet->bitfields2.process_quantum = 1; + packet->bitfields2.pasid = qpd->pqm->process->pasid; + packet->bitfields3.page_table_base = qpd->page_table_base; + packet->bitfields10.gds_size = qpd->gds_size; + packet->bitfields10.num_gws = qpd->num_gws; + packet->bitfields10.num_oac = qpd->num_oac; + num_queues = 0; + list_for_each_entry(cur, &qpd->queues_list, list) + num_queues++; + packet->bitfields10.num_queues = num_queues; + + packet->sh_mem_config = qpd->sh_mem_config; + packet->sh_mem_bases = qpd->sh_mem_bases; + packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base; + packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit; + + packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area); + packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area); + + return 0; +} + +static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer, + struct queue *q) +{ + struct pm4_map_queues *packet; + + BUG_ON(!pm || !buffer || !q); + + pr_debug("kfd: In func %s\n", __func__); + + packet = (struct pm4_map_queues *)buffer; + memset(buffer, 0, sizeof(struct pm4_map_queues)); + + packet->header.u32all = build_pm4_header(IT_MAP_QUEUES, + sizeof(struct pm4_map_queues)); + packet->bitfields2.alloc_format = + alloc_format__mes_map_queues__one_per_pipe; + packet->bitfields2.num_queues = 1; + packet->bitfields2.queue_sel = + queue_sel__mes_map_queues__map_to_hws_determined_queue_slots; + + packet->bitfields2.vidmem = (q->properties.is_interop) ? + vidmem__mes_map_queues__uses_video_memory : + vidmem__mes_map_queues__uses_no_video_memory; + + switch (q->properties.type) { + case KFD_QUEUE_TYPE_COMPUTE: + case KFD_QUEUE_TYPE_DIQ: + packet->bitfields2.engine_sel = + engine_sel__mes_map_queues__compute; + break; + case KFD_QUEUE_TYPE_SDMA: + packet->bitfields2.engine_sel = + engine_sel__mes_map_queues__sdma0; + break; + default: + BUG(); + break; + } + + packet->mes_map_queues_ordinals[0].bitfields3.doorbell_offset = + q->properties.doorbell_off; + + packet->mes_map_queues_ordinals[0].mqd_addr_lo = + lower_32_bits(q->gart_mqd_addr); + + packet->mes_map_queues_ordinals[0].mqd_addr_hi = + upper_32_bits(q->gart_mqd_addr); + + packet->mes_map_queues_ordinals[0].wptr_addr_lo = + lower_32_bits((uint64_t)q->properties.write_ptr); + + packet->mes_map_queues_ordinals[0].wptr_addr_hi = + upper_32_bits((uint64_t)q->properties.write_ptr); + + return 0; +} + +static int pm_create_runlist_ib(struct packet_manager *pm, + struct list_head *queues, + uint64_t *rl_gpu_addr, + size_t *rl_size_bytes) +{ + unsigned int alloc_size_bytes; + unsigned int *rl_buffer, rl_wptr, i; + int retval, proccesses_mapped; + struct device_process_node *cur; + struct qcm_process_device *qpd; + struct queue *q; + struct kernel_queue *kq; + bool is_over_subscription; + + BUG_ON(!pm || !queues || !rl_size_bytes || !rl_gpu_addr); + + rl_wptr = retval = proccesses_mapped = 0; + + retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr, + &alloc_size_bytes, &is_over_subscription); + if (retval != 0) + return retval; + + *rl_size_bytes = alloc_size_bytes; + + pr_debug("kfd: In func %s\n", __func__); + pr_debug("kfd: building runlist ib process count: %d queues count %d\n", + pm->dqm->processes_count, pm->dqm->queue_count); + + /* build the run list ib packet */ + list_for_each_entry(cur, queues, list) { + qpd = cur->qpd; + /* build map process packet */ + if (proccesses_mapped >= pm->dqm->processes_count) { + pr_debug("kfd: not enough space left in runlist IB\n"); + pm_release_ib(pm); + return -ENOMEM; + } + retval = pm_create_map_process(pm, &rl_buffer[rl_wptr], qpd); + if (retval != 0) + return retval; + proccesses_mapped++; + inc_wptr(&rl_wptr, sizeof(struct pm4_map_process), + alloc_size_bytes); + + list_for_each_entry(kq, &qpd->priv_queue_list, list) { + if (kq->queue->properties.is_active != true) + continue; + retval = pm_create_map_queue(pm, &rl_buffer[rl_wptr], + kq->queue); + if (retval != 0) + return retval; + inc_wptr(&rl_wptr, sizeof(struct pm4_map_queues), + alloc_size_bytes); + } + + list_for_each_entry(q, &qpd->queues_list, list) { + if (q->properties.is_active != true) + continue; + retval = pm_create_map_queue(pm, + &rl_buffer[rl_wptr], q); + if (retval != 0) + return retval; + inc_wptr(&rl_wptr, sizeof(struct pm4_map_queues), + alloc_size_bytes); + } + } + + pr_debug("kfd: finished map process and queues to runlist\n"); + + if (is_over_subscription) + pm_create_runlist(pm, &rl_buffer[rl_wptr], *rl_gpu_addr, + alloc_size_bytes / sizeof(uint32_t), true); + + for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++) + pr_debug("0x%2X ", rl_buffer[i]); + pr_debug("\n"); + + return 0; +} + +int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) +{ + BUG_ON(!dqm); + + pm->dqm = dqm; + mutex_init(&pm->lock); + pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ); + if (pm->priv_queue == NULL) { + mutex_destroy(&pm->lock); + return -ENOMEM; + } + pm->allocated = false; + + return 0; +} + +void pm_uninit(struct packet_manager *pm) +{ + BUG_ON(!pm); + + mutex_destroy(&pm->lock); + kernel_queue_uninit(pm->priv_queue); +} + +int pm_send_set_resources(struct packet_manager *pm, + struct scheduling_resources *res) +{ + struct pm4_set_resources *packet; + + BUG_ON(!pm || !res); + + pr_debug("kfd: In func %s\n", __func__); + + mutex_lock(&pm->lock); + pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, + sizeof(*packet) / sizeof(uint32_t), + (unsigned int **)&packet); + if (packet == NULL) { + mutex_unlock(&pm->lock); + pr_err("kfd: failed to allocate buffer on kernel queue\n"); + return -ENOMEM; + } + + memset(packet, 0, sizeof(struct pm4_set_resources)); + packet->header.u32all = build_pm4_header(IT_SET_RESOURCES, + sizeof(struct pm4_set_resources)); + + packet->bitfields2.queue_type = + queue_type__mes_set_resources__hsa_interface_queue_hiq; + packet->bitfields2.vmid_mask = res->vmid_mask; + packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY; + packet->bitfields7.oac_mask = res->oac_mask; + packet->bitfields8.gds_heap_base = res->gds_heap_base; + packet->bitfields8.gds_heap_size = res->gds_heap_size; + + packet->gws_mask_lo = lower_32_bits(res->gws_mask); + packet->gws_mask_hi = upper_32_bits(res->gws_mask); + + packet->queue_mask_lo = lower_32_bits(res->queue_mask); + packet->queue_mask_hi = upper_32_bits(res->queue_mask); + + pm->priv_queue->ops.submit_packet(pm->priv_queue); + + mutex_unlock(&pm->lock); + + return 0; +} + +int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues) +{ + uint64_t rl_gpu_ib_addr; + uint32_t *rl_buffer; + size_t rl_ib_size, packet_size_dwords; + int retval; + + BUG_ON(!pm || !dqm_queues); + + retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr, + &rl_ib_size); + if (retval != 0) + goto fail_create_runlist_ib; + + pr_debug("kfd: runlist IB address: 0x%llX\n", rl_gpu_ib_addr); + + packet_size_dwords = sizeof(struct pm4_runlist) / sizeof(uint32_t); + mutex_lock(&pm->lock); + + retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, + packet_size_dwords, &rl_buffer); + if (retval != 0) + goto fail_acquire_packet_buffer; + + retval = pm_create_runlist(pm, rl_buffer, rl_gpu_ib_addr, + rl_ib_size / sizeof(uint32_t), false); + if (retval != 0) + goto fail_create_runlist; + + pm->priv_queue->ops.submit_packet(pm->priv_queue); + + mutex_unlock(&pm->lock); + + return retval; + +fail_create_runlist: + pm->priv_queue->ops.rollback_packet(pm->priv_queue); +fail_acquire_packet_buffer: + mutex_unlock(&pm->lock); +fail_create_runlist_ib: + if (pm->allocated == true) + pm_release_ib(pm); + return retval; +} + +int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, + uint32_t fence_value) +{ + int retval; + struct pm4_query_status *packet; + + BUG_ON(!pm || !fence_address); + + mutex_lock(&pm->lock); + retval = pm->priv_queue->ops.acquire_packet_buffer( + pm->priv_queue, + sizeof(struct pm4_query_status) / sizeof(uint32_t), + (unsigned int **)&packet); + if (retval != 0) + goto fail_acquire_packet_buffer; + + packet->header.u32all = build_pm4_header(IT_QUERY_STATUS, + sizeof(struct pm4_query_status)); + + packet->bitfields2.context_id = 0; + packet->bitfields2.interrupt_sel = + interrupt_sel__mes_query_status__completion_status; + packet->bitfields2.command = + command__mes_query_status__fence_only_after_write_ack; + + packet->addr_hi = upper_32_bits((uint64_t)fence_address); + packet->addr_lo = lower_32_bits((uint64_t)fence_address); + packet->data_hi = upper_32_bits((uint64_t)fence_value); + packet->data_lo = lower_32_bits((uint64_t)fence_value); + + pm->priv_queue->ops.submit_packet(pm->priv_queue); + mutex_unlock(&pm->lock); + + return 0; + +fail_acquire_packet_buffer: + mutex_unlock(&pm->lock); + return retval; +} + +int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, + enum kfd_preempt_type_filter mode, + uint32_t filter_param, bool reset, + unsigned int sdma_engine) +{ + int retval; + uint32_t *buffer; + struct pm4_unmap_queues *packet; + + BUG_ON(!pm); + + mutex_lock(&pm->lock); + retval = pm->priv_queue->ops.acquire_packet_buffer( + pm->priv_queue, + sizeof(struct pm4_unmap_queues) / sizeof(uint32_t), + &buffer); + if (retval != 0) + goto err_acquire_packet_buffer; + + packet = (struct pm4_unmap_queues *)buffer; + memset(buffer, 0, sizeof(struct pm4_unmap_queues)); + + packet->header.u32all = build_pm4_header(IT_UNMAP_QUEUES, + sizeof(struct pm4_unmap_queues)); + switch (type) { + case KFD_QUEUE_TYPE_COMPUTE: + case KFD_QUEUE_TYPE_DIQ: + packet->bitfields2.engine_sel = + engine_sel__mes_unmap_queues__compute; + break; + case KFD_QUEUE_TYPE_SDMA: + packet->bitfields2.engine_sel = + engine_sel__mes_unmap_queues__sdma0 + sdma_engine; + break; + default: + BUG(); + break; + } + + if (reset) + packet->bitfields2.action = + action__mes_unmap_queues__reset_queues; + else + packet->bitfields2.action = + action__mes_unmap_queues__preempt_queues; + + switch (mode) { + case KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE: + packet->bitfields2.queue_sel = + queue_sel__mes_unmap_queues__perform_request_on_specified_queues; + packet->bitfields2.num_queues = 1; + packet->bitfields3b.doorbell_offset0 = filter_param; + break; + case KFD_PREEMPT_TYPE_FILTER_BY_PASID: + packet->bitfields2.queue_sel = + queue_sel__mes_unmap_queues__perform_request_on_pasid_queues; + packet->bitfields3a.pasid = filter_param; + break; + case KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES: + packet->bitfields2.queue_sel = + queue_sel__mes_unmap_queues__perform_request_on_all_active_queues; + break; + default: + BUG(); + break; + }; + + pm->priv_queue->ops.submit_packet(pm->priv_queue); + + mutex_unlock(&pm->lock); + return 0; + +err_acquire_packet_buffer: + mutex_unlock(&pm->lock); + return retval; +} + +void pm_release_ib(struct packet_manager *pm) +{ + BUG_ON(!pm); + + mutex_lock(&pm->lock); + if (pm->allocated) { + kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj); + pm->allocated = false; + } + mutex_unlock(&pm->lock); +} diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c new file mode 100644 index 000000000..6cfe7f1f1 --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c @@ -0,0 +1,96 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include "kfd_priv.h" + +static unsigned long *pasid_bitmap; +static unsigned int pasid_limit; +static DEFINE_MUTEX(pasid_mutex); + +int kfd_pasid_init(void) +{ + pasid_limit = KFD_MAX_NUM_OF_PROCESSES; + + pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL); + if (!pasid_bitmap) + return -ENOMEM; + + set_bit(0, pasid_bitmap); /* PASID 0 is reserved. */ + + return 0; +} + +void kfd_pasid_exit(void) +{ + kfree(pasid_bitmap); +} + +bool kfd_set_pasid_limit(unsigned int new_limit) +{ + if (new_limit < pasid_limit) { + bool ok; + + mutex_lock(&pasid_mutex); + + /* ensure that no pasids >= new_limit are in-use */ + ok = (find_next_bit(pasid_bitmap, pasid_limit, new_limit) == + pasid_limit); + if (ok) + pasid_limit = new_limit; + + mutex_unlock(&pasid_mutex); + + return ok; + } + + return true; +} + +inline unsigned int kfd_get_pasid_limit(void) +{ + return pasid_limit; +} + +unsigned int kfd_pasid_alloc(void) +{ + unsigned int found; + + mutex_lock(&pasid_mutex); + + found = find_first_zero_bit(pasid_bitmap, pasid_limit); + if (found == pasid_limit) + found = 0; + else + set_bit(found, pasid_bitmap); + + mutex_unlock(&pasid_mutex); + + return found; +} + +void kfd_pasid_free(unsigned int pasid) +{ + BUG_ON(pasid == 0 || pasid >= pasid_limit); + clear_bit(pasid, pasid_bitmap); +} diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h new file mode 100644 index 000000000..071ad5724 --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h @@ -0,0 +1,405 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef KFD_PM4_HEADERS_H_ +#define KFD_PM4_HEADERS_H_ + +#ifndef PM4_MES_HEADER_DEFINED +#define PM4_MES_HEADER_DEFINED +union PM4_MES_TYPE_3_HEADER { + struct { + uint32_t reserved1:8; /* < reserved */ + uint32_t opcode:8; /* < IT opcode */ + uint32_t count:14; /* < number of DWORDs - 1 + * in the information body. + */ + uint32_t type:2; /* < packet identifier. + * It should be 3 for type 3 packets + */ + }; + uint32_t u32all; +}; +#endif /* PM4_MES_HEADER_DEFINED */ + +/* --------------------MES_SET_RESOURCES-------------------- */ + +#ifndef PM4_MES_SET_RESOURCES_DEFINED +#define PM4_MES_SET_RESOURCES_DEFINED +enum set_resources_queue_type_enum { + queue_type__mes_set_resources__kernel_interface_queue_kiq = 0, + queue_type__mes_set_resources__hsa_interface_queue_hiq = 1, + queue_type__mes_set_resources__hsa_debug_interface_queue = 4 +}; + +struct pm4_set_resources { + union { + union PM4_MES_TYPE_3_HEADER header; /* header */ + uint32_t ordinal1; + }; + + union { + struct { + uint32_t vmid_mask:16; + uint32_t unmap_latency:8; + uint32_t reserved1:5; + enum set_resources_queue_type_enum queue_type:3; + } bitfields2; + uint32_t ordinal2; + }; + + uint32_t queue_mask_lo; + uint32_t queue_mask_hi; + uint32_t gws_mask_lo; + uint32_t gws_mask_hi; + + union { + struct { + uint32_t oac_mask:16; + uint32_t reserved2:16; + } bitfields7; + uint32_t ordinal7; + }; + + union { + struct { + uint32_t gds_heap_base:6; + uint32_t reserved3:5; + uint32_t gds_heap_size:6; + uint32_t reserved4:15; + } bitfields8; + uint32_t ordinal8; + }; + +}; +#endif + +/*--------------------MES_RUN_LIST-------------------- */ + +#ifndef PM4_MES_RUN_LIST_DEFINED +#define PM4_MES_RUN_LIST_DEFINED + +struct pm4_runlist { + union { + union PM4_MES_TYPE_3_HEADER header; /* header */ + uint32_t ordinal1; + }; + + union { + struct { + uint32_t reserved1:2; + uint32_t ib_base_lo:30; + } bitfields2; + uint32_t ordinal2; + }; + + union { + struct { + uint32_t ib_base_hi:16; + uint32_t reserved2:16; + } bitfields3; + uint32_t ordinal3; + }; + + union { + struct { + uint32_t ib_size:20; + uint32_t chain:1; + uint32_t offload_polling:1; + uint32_t reserved3:1; + uint32_t valid:1; + uint32_t reserved4:8; + } bitfields4; + uint32_t ordinal4; + }; + +}; +#endif + +/*--------------------MES_MAP_PROCESS-------------------- */ + +#ifndef PM4_MES_MAP_PROCESS_DEFINED +#define PM4_MES_MAP_PROCESS_DEFINED + +struct pm4_map_process { + union { + union PM4_MES_TYPE_3_HEADER header; /* header */ + uint32_t ordinal1; + }; + + union { + struct { + uint32_t pasid:16; + uint32_t reserved1:8; + uint32_t diq_enable:1; + uint32_t process_quantum:7; + } bitfields2; + uint32_t ordinal2; + }; + + union { + struct { + uint32_t page_table_base:28; + uint32_t reserved3:4; + } bitfields3; + uint32_t ordinal3; + }; + + uint32_t sh_mem_bases; + uint32_t sh_mem_ape1_base; + uint32_t sh_mem_ape1_limit; + uint32_t sh_mem_config; + uint32_t gds_addr_lo; + uint32_t gds_addr_hi; + + union { + struct { + uint32_t num_gws:6; + uint32_t reserved4:2; + uint32_t num_oac:4; + uint32_t reserved5:4; + uint32_t gds_size:6; + uint32_t num_queues:10; + } bitfields10; + uint32_t ordinal10; + }; + +}; +#endif + +/*--------------------MES_MAP_QUEUES--------------------*/ + +#ifndef PM4_MES_MAP_QUEUES_DEFINED +#define PM4_MES_MAP_QUEUES_DEFINED +enum map_queues_queue_sel_enum { + queue_sel__mes_map_queues__map_to_specified_queue_slots = 0, + queue_sel__mes_map_queues__map_to_hws_determined_queue_slots = 1, + queue_sel__mes_map_queues__enable_process_queues = 2 +}; + +enum map_queues_vidmem_enum { + vidmem__mes_map_queues__uses_no_video_memory = 0, + vidmem__mes_map_queues__uses_video_memory = 1 +}; + +enum map_queues_alloc_format_enum { + alloc_format__mes_map_queues__one_per_pipe = 0, + alloc_format__mes_map_queues__all_on_one_pipe = 1 +}; + +enum map_queues_engine_sel_enum { + engine_sel__mes_map_queues__compute = 0, + engine_sel__mes_map_queues__sdma0 = 2, + engine_sel__mes_map_queues__sdma1 = 3 +}; + +struct pm4_map_queues { + union { + union PM4_MES_TYPE_3_HEADER header; /* header */ + uint32_t ordinal1; + }; + + union { + struct { + uint32_t reserved1:4; + enum map_queues_queue_sel_enum queue_sel:2; + uint32_t reserved2:2; + uint32_t vmid:4; + uint32_t reserved3:4; + enum map_queues_vidmem_enum vidmem:2; + uint32_t reserved4:6; + enum map_queues_alloc_format_enum alloc_format:2; + enum map_queues_engine_sel_enum engine_sel:3; + uint32_t num_queues:3; + } bitfields2; + uint32_t ordinal2; + }; + + struct { + union { + struct { + uint32_t reserved5:2; + uint32_t doorbell_offset:21; + uint32_t reserved6:3; + uint32_t queue:6; + } bitfields3; + uint32_t ordinal3; + }; + + uint32_t mqd_addr_lo; + uint32_t mqd_addr_hi; + uint32_t wptr_addr_lo; + uint32_t wptr_addr_hi; + + } mes_map_queues_ordinals[1]; /* 1..N of these ordinal groups */ + +}; +#endif + +/*--------------------MES_QUERY_STATUS--------------------*/ + +#ifndef PM4_MES_QUERY_STATUS_DEFINED +#define PM4_MES_QUERY_STATUS_DEFINED +enum query_status_interrupt_sel_enum { + interrupt_sel__mes_query_status__completion_status = 0, + interrupt_sel__mes_query_status__process_status = 1, + interrupt_sel__mes_query_status__queue_status = 2 +}; + +enum query_status_command_enum { + command__mes_query_status__interrupt_only = 0, + command__mes_query_status__fence_only_immediate = 1, + command__mes_query_status__fence_only_after_write_ack = 2, + command__mes_query_status__fence_wait_for_write_ack_send_interrupt = 3 +}; + +enum query_status_engine_sel_enum { + engine_sel__mes_query_status__compute = 0, + engine_sel__mes_query_status__sdma0_queue = 2, + engine_sel__mes_query_status__sdma1_queue = 3 +}; + +struct pm4_query_status { + union { + union PM4_MES_TYPE_3_HEADER header; /* header */ + uint32_t ordinal1; + }; + + union { + struct { + uint32_t context_id:28; + enum query_status_interrupt_sel_enum interrupt_sel:2; + enum query_status_command_enum command:2; + } bitfields2; + uint32_t ordinal2; + }; + + union { + struct { + uint32_t pasid:16; + uint32_t reserved1:16; + } bitfields3a; + struct { + uint32_t reserved2:2; + uint32_t doorbell_offset:21; + uint32_t reserved3:3; + enum query_status_engine_sel_enum engine_sel:3; + uint32_t reserved4:3; + } bitfields3b; + uint32_t ordinal3; + }; + + uint32_t addr_lo; + uint32_t addr_hi; + uint32_t data_lo; + uint32_t data_hi; +}; +#endif + +/*--------------------MES_UNMAP_QUEUES--------------------*/ + +#ifndef PM4_MES_UNMAP_QUEUES_DEFINED +#define PM4_MES_UNMAP_QUEUES_DEFINED +enum unmap_queues_action_enum { + action__mes_unmap_queues__preempt_queues = 0, + action__mes_unmap_queues__reset_queues = 1, + action__mes_unmap_queues__disable_process_queues = 2 +}; + +enum unmap_queues_queue_sel_enum { + queue_sel__mes_unmap_queues__perform_request_on_specified_queues = 0, + queue_sel__mes_unmap_queues__perform_request_on_pasid_queues = 1, + queue_sel__mes_unmap_queues__perform_request_on_all_active_queues = 2 +}; + +enum unmap_queues_engine_sel_enum { + engine_sel__mes_unmap_queues__compute = 0, + engine_sel__mes_unmap_queues__sdma0 = 2, + engine_sel__mes_unmap_queues__sdma1 = 3 +}; + +struct pm4_unmap_queues { + union { + union PM4_MES_TYPE_3_HEADER header; /* header */ + uint32_t ordinal1; + }; + + union { + struct { + enum unmap_queues_action_enum action:2; + uint32_t reserved1:2; + enum unmap_queues_queue_sel_enum queue_sel:2; + uint32_t reserved2:20; + enum unmap_queues_engine_sel_enum engine_sel:3; + uint32_t num_queues:3; + } bitfields2; + uint32_t ordinal2; + }; + + union { + struct { + uint32_t pasid:16; + uint32_t reserved3:16; + } bitfields3a; + struct { + uint32_t reserved4:2; + uint32_t doorbell_offset0:21; + uint32_t reserved5:9; + } bitfields3b; + uint32_t ordinal3; + }; + + union { + struct { + uint32_t reserved6:2; + uint32_t doorbell_offset1:21; + uint32_t reserved7:9; + } bitfields4; + uint32_t ordinal4; + }; + + union { + struct { + uint32_t reserved8:2; + uint32_t doorbell_offset2:21; + uint32_t reserved9:9; + } bitfields5; + uint32_t ordinal5; + }; + + union { + struct { + uint32_t reserved10:2; + uint32_t doorbell_offset3:21; + uint32_t reserved11:9; + } bitfields6; + uint32_t ordinal6; + }; + +}; +#endif + +enum { + CACHE_FLUSH_AND_INV_TS_EVENT = 0x00000014 +}; + +#endif /* KFD_PM4_HEADERS_H_ */ diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/kfd_pm4_opcodes.h b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_pm4_opcodes.h new file mode 100644 index 000000000..b72fa3b8c --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_pm4_opcodes.h @@ -0,0 +1,107 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + + +#ifndef KFD_PM4_OPCODES_H +#define KFD_PM4_OPCODES_H + +enum it_opcode_type { + IT_NOP = 0x10, + IT_SET_BASE = 0x11, + IT_CLEAR_STATE = 0x12, + IT_INDEX_BUFFER_SIZE = 0x13, + IT_DISPATCH_DIRECT = 0x15, + IT_DISPATCH_INDIRECT = 0x16, + IT_ATOMIC_GDS = 0x1D, + IT_OCCLUSION_QUERY = 0x1F, + IT_SET_PREDICATION = 0x20, + IT_REG_RMW = 0x21, + IT_COND_EXEC = 0x22, + IT_PRED_EXEC = 0x23, + IT_DRAW_INDIRECT = 0x24, + IT_DRAW_INDEX_INDIRECT = 0x25, + IT_INDEX_BASE = 0x26, + IT_DRAW_INDEX_2 = 0x27, + IT_CONTEXT_CONTROL = 0x28, + IT_INDEX_TYPE = 0x2A, + IT_DRAW_INDIRECT_MULTI = 0x2C, + IT_DRAW_INDEX_AUTO = 0x2D, + IT_NUM_INSTANCES = 0x2F, + IT_DRAW_INDEX_MULTI_AUTO = 0x30, + IT_INDIRECT_BUFFER_CNST = 0x33, + IT_STRMOUT_BUFFER_UPDATE = 0x34, + IT_DRAW_INDEX_OFFSET_2 = 0x35, + IT_DRAW_PREAMBLE = 0x36, + IT_WRITE_DATA = 0x37, + IT_DRAW_INDEX_INDIRECT_MULTI = 0x38, + IT_MEM_SEMAPHORE = 0x39, + IT_COPY_DW = 0x3B, + IT_WAIT_REG_MEM = 0x3C, + IT_INDIRECT_BUFFER = 0x3F, + IT_COPY_DATA = 0x40, + IT_PFP_SYNC_ME = 0x42, + IT_SURFACE_SYNC = 0x43, + IT_COND_WRITE = 0x45, + IT_EVENT_WRITE = 0x46, + IT_EVENT_WRITE_EOP = 0x47, + IT_EVENT_WRITE_EOS = 0x48, + IT_RELEASE_MEM = 0x49, + IT_PREAMBLE_CNTL = 0x4A, + IT_DMA_DATA = 0x50, + IT_ACQUIRE_MEM = 0x58, + IT_REWIND = 0x59, + IT_LOAD_UCONFIG_REG = 0x5E, + IT_LOAD_SH_REG = 0x5F, + IT_LOAD_CONFIG_REG = 0x60, + IT_LOAD_CONTEXT_REG = 0x61, + IT_SET_CONFIG_REG = 0x68, + IT_SET_CONTEXT_REG = 0x69, + IT_SET_CONTEXT_REG_INDIRECT = 0x73, + IT_SET_SH_REG = 0x76, + IT_SET_SH_REG_OFFSET = 0x77, + IT_SET_QUEUE_REG = 0x78, + IT_SET_UCONFIG_REG = 0x79, + IT_SCRATCH_RAM_WRITE = 0x7D, + IT_SCRATCH_RAM_READ = 0x7E, + IT_LOAD_CONST_RAM = 0x80, + IT_WRITE_CONST_RAM = 0x81, + IT_DUMP_CONST_RAM = 0x83, + IT_INCREMENT_CE_COUNTER = 0x84, + IT_INCREMENT_DE_COUNTER = 0x85, + IT_WAIT_ON_CE_COUNTER = 0x86, + IT_WAIT_ON_DE_COUNTER_DIFF = 0x88, + IT_SWITCH_BUFFER = 0x8B, + IT_SET_RESOURCES = 0xA0, + IT_MAP_PROCESS = 0xA1, + IT_MAP_QUEUES = 0xA2, + IT_UNMAP_QUEUES = 0xA3, + IT_QUERY_STATUS = 0xA4, + IT_RUN_LIST = 0xA5, +}; + +#define PM4_TYPE_0 0 +#define PM4_TYPE_2 2 +#define PM4_TYPE_3 3 + +#endif /* KFD_PM4_OPCODES_H */ + diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_priv.h new file mode 100644 index 000000000..f21fccebd --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -0,0 +1,645 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef KFD_PRIV_H_INCLUDED +#define KFD_PRIV_H_INCLUDED + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define KFD_SYSFS_FILE_MODE 0444 + +/* + * When working with cp scheduler we should assign the HIQ manually or via + * the radeon driver to a fixed hqd slot, here are the fixed HIQ hqd slot + * definitions for Kaveri. In Kaveri only the first ME queues participates + * in the cp scheduling taking that in mind we set the HIQ slot in the + * second ME. + */ +#define KFD_CIK_HIQ_PIPE 4 +#define KFD_CIK_HIQ_QUEUE 0 + +/* GPU ID hash width in bits */ +#define KFD_GPU_ID_HASH_WIDTH 16 + +/* Macro for allocating structures */ +#define kfd_alloc_struct(ptr_to_struct) \ + ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL)) + +#define KFD_MAX_NUM_OF_PROCESSES 512 +#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 + +/* + * Kernel module parameter to specify maximum number of supported queues per + * device + */ +extern int max_num_of_queues_per_device; + +#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096 +#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \ + (KFD_MAX_NUM_OF_PROCESSES * \ + KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) + +#define KFD_KERNEL_QUEUE_SIZE 2048 + +/* Kernel module parameter to specify the scheduling policy */ +extern int sched_policy; + +/** + * enum kfd_sched_policy + * + * @KFD_SCHED_POLICY_HWS: H/W scheduling policy known as command processor (cp) + * scheduling. In this scheduling mode we're using the firmware code to + * schedule the user mode queues and kernel queues such as HIQ and DIQ. + * the HIQ queue is used as a special queue that dispatches the configuration + * to the cp and the user mode queues list that are currently running. + * the DIQ queue is a debugging queue that dispatches debugging commands to the + * firmware. + * in this scheduling mode user mode queues over subscription feature is + * enabled. + * + * @KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: The same as above but the over + * subscription feature disabled. + * + * @KFD_SCHED_POLICY_NO_HWS: no H/W scheduling policy is a mode which directly + * set the command processor registers and sets the queues "manually". This + * mode is used *ONLY* for debugging proposes. + * + */ +enum kfd_sched_policy { + KFD_SCHED_POLICY_HWS = 0, + KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION, + KFD_SCHED_POLICY_NO_HWS +}; + +enum cache_policy { + cache_policy_coherent, + cache_policy_noncoherent +}; + +enum asic_family_type { + CHIP_KAVERI = 0, + CHIP_CARRIZO +}; + +struct kfd_device_info { + unsigned int asic_family; + unsigned int max_pasid_bits; + size_t ih_ring_entry_size; + uint8_t num_of_watch_points; + uint16_t mqd_size_aligned; +}; + +struct kfd_mem_obj { + uint32_t range_start; + uint32_t range_end; + uint64_t gpu_addr; + uint32_t *cpu_ptr; +}; + +struct kfd_dev { + struct kgd_dev *kgd; + + const struct kfd_device_info *device_info; + struct pci_dev *pdev; + + unsigned int id; /* topology stub index */ + + phys_addr_t doorbell_base; /* Start of actual doorbells used by + * KFD. It is aligned for mapping + * into user mode + */ + size_t doorbell_id_offset; /* Doorbell offset (from KFD doorbell + * to HW doorbell, GFX reserved some + * at the start) + */ + size_t doorbell_process_limit; /* Number of processes we have doorbell + * space for. + */ + u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells + * page used by kernel queue + */ + + struct kgd2kfd_shared_resources shared_resources; + + const struct kfd2kgd_calls *kfd2kgd; + struct mutex doorbell_mutex; + unsigned long doorbell_available_index[DIV_ROUND_UP( + KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, BITS_PER_LONG)]; + + void *gtt_mem; + uint64_t gtt_start_gpu_addr; + void *gtt_start_cpu_ptr; + void *gtt_sa_bitmap; + struct mutex gtt_sa_lock; + unsigned int gtt_sa_chunk_size; + unsigned int gtt_sa_num_of_chunks; + + /* QCM Device instance */ + struct device_queue_manager *dqm; + + bool init_complete; +}; + +/* KGD2KFD callbacks */ +void kgd2kfd_exit(void); +struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, + struct pci_dev *pdev, const struct kfd2kgd_calls *f2g); +bool kgd2kfd_device_init(struct kfd_dev *kfd, + const struct kgd2kfd_shared_resources *gpu_resources); +void kgd2kfd_device_exit(struct kfd_dev *kfd); + +enum kfd_mempool { + KFD_MEMPOOL_SYSTEM_CACHEABLE = 1, + KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2, + KFD_MEMPOOL_FRAMEBUFFER = 3, +}; + +/* Character device interface */ +int kfd_chardev_init(void); +void kfd_chardev_exit(void); +struct device *kfd_chardev(void); + +/** + * enum kfd_preempt_type_filter + * + * @KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE: Preempts single queue. + * + * @KFD_PRERMPT_TYPE_FILTER_ALL_QUEUES: Preempts all queues in the + * running queues list. + * + * @KFD_PRERMPT_TYPE_FILTER_BY_PASID: Preempts queues that belongs to + * specific process. + * + */ +enum kfd_preempt_type_filter { + KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE, + KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES, + KFD_PREEMPT_TYPE_FILTER_BY_PASID +}; + +enum kfd_preempt_type { + KFD_PREEMPT_TYPE_WAVEFRONT, + KFD_PREEMPT_TYPE_WAVEFRONT_RESET +}; + +/** + * enum kfd_queue_type + * + * @KFD_QUEUE_TYPE_COMPUTE: Regular user mode queue type. + * + * @KFD_QUEUE_TYPE_SDMA: Sdma user mode queue type. + * + * @KFD_QUEUE_TYPE_HIQ: HIQ queue type. + * + * @KFD_QUEUE_TYPE_DIQ: DIQ queue type. + */ +enum kfd_queue_type { + KFD_QUEUE_TYPE_COMPUTE, + KFD_QUEUE_TYPE_SDMA, + KFD_QUEUE_TYPE_HIQ, + KFD_QUEUE_TYPE_DIQ +}; + +enum kfd_queue_format { + KFD_QUEUE_FORMAT_PM4, + KFD_QUEUE_FORMAT_AQL +}; + +/** + * struct queue_properties + * + * @type: The queue type. + * + * @queue_id: Queue identifier. + * + * @queue_address: Queue ring buffer address. + * + * @queue_size: Queue ring buffer size. + * + * @priority: Defines the queue priority relative to other queues in the + * process. + * This is just an indication and HW scheduling may override the priority as + * necessary while keeping the relative prioritization. + * the priority granularity is from 0 to f which f is the highest priority. + * currently all queues are initialized with the highest priority. + * + * @queue_percent: This field is partially implemented and currently a zero in + * this field defines that the queue is non active. + * + * @read_ptr: User space address which points to the number of dwords the + * cp read from the ring buffer. This field updates automatically by the H/W. + * + * @write_ptr: Defines the number of dwords written to the ring buffer. + * + * @doorbell_ptr: This field aim is to notify the H/W of new packet written to + * the queue ring buffer. This field should be similar to write_ptr and the user + * should update this field after he updated the write_ptr. + * + * @doorbell_off: The doorbell offset in the doorbell pci-bar. + * + * @is_interop: Defines if this is a interop queue. Interop queue means that the + * queue can access both graphics and compute resources. + * + * @is_active: Defines if the queue is active or not. + * + * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid + * of the queue. + * + * This structure represents the queue properties for each queue no matter if + * it's user mode or kernel mode queue. + * + */ +struct queue_properties { + enum kfd_queue_type type; + enum kfd_queue_format format; + unsigned int queue_id; + uint64_t queue_address; + uint64_t queue_size; + uint32_t priority; + uint32_t queue_percent; + uint32_t *read_ptr; + uint32_t *write_ptr; + uint32_t __iomem *doorbell_ptr; + uint32_t doorbell_off; + bool is_interop; + bool is_active; + /* Not relevant for user mode queues in cp scheduling */ + unsigned int vmid; + /* Relevant only for sdma queues*/ + uint32_t sdma_engine_id; + uint32_t sdma_queue_id; + uint32_t sdma_vm_addr; + /* Relevant only for VI */ + uint64_t eop_ring_buffer_address; + uint32_t eop_ring_buffer_size; + uint64_t ctx_save_restore_area_address; + uint32_t ctx_save_restore_area_size; +}; + +/** + * struct queue + * + * @list: Queue linked list. + * + * @mqd: The queue MQD. + * + * @mqd_mem_obj: The MQD local gpu memory object. + * + * @gart_mqd_addr: The MQD gart mc address. + * + * @properties: The queue properties. + * + * @mec: Used only in no cp scheduling mode and identifies to micro engine id + * that the queue should be execute on. + * + * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe id. + * + * @queue: Used only in no cp scheduliong mode and identifies the queue's slot. + * + * @process: The kfd process that created this queue. + * + * @device: The kfd device that created this queue. + * + * This structure represents user mode compute queues. + * It contains all the necessary data to handle such queues. + * + */ + +struct queue { + struct list_head list; + void *mqd; + struct kfd_mem_obj *mqd_mem_obj; + uint64_t gart_mqd_addr; + struct queue_properties properties; + + uint32_t mec; + uint32_t pipe; + uint32_t queue; + + unsigned int sdma_id; + + struct kfd_process *process; + struct kfd_dev *device; +}; + +/* + * Please read the kfd_mqd_manager.h description. + */ +enum KFD_MQD_TYPE { + KFD_MQD_TYPE_COMPUTE = 0, /* for no cp scheduling */ + KFD_MQD_TYPE_HIQ, /* for hiq */ + KFD_MQD_TYPE_CP, /* for cp queues and diq */ + KFD_MQD_TYPE_SDMA, /* for sdma queues */ + KFD_MQD_TYPE_MAX +}; + +struct scheduling_resources { + unsigned int vmid_mask; + enum kfd_queue_type type; + uint64_t queue_mask; + uint64_t gws_mask; + uint32_t oac_mask; + uint32_t gds_heap_base; + uint32_t gds_heap_size; +}; + +struct process_queue_manager { + /* data */ + struct kfd_process *process; + unsigned int num_concurrent_processes; + struct list_head queues; + unsigned long *queue_slot_bitmap; +}; + +struct qcm_process_device { + /* The Device Queue Manager that owns this data */ + struct device_queue_manager *dqm; + struct process_queue_manager *pqm; + /* Queues list */ + struct list_head queues_list; + struct list_head priv_queue_list; + + unsigned int queue_count; + unsigned int vmid; + bool is_debug; + /* + * All the memory management data should be here too + */ + uint64_t gds_context_area; + uint32_t sh_mem_config; + uint32_t sh_mem_bases; + uint32_t sh_mem_ape1_base; + uint32_t sh_mem_ape1_limit; + uint32_t page_table_base; + uint32_t gds_size; + uint32_t num_gws; + uint32_t num_oac; +}; + +/* Data that is per-process-per device. */ +struct kfd_process_device { + /* + * List of all per-device data for a process. + * Starts from kfd_process.per_device_data. + */ + struct list_head per_device_list; + + /* The device that owns this data. */ + struct kfd_dev *dev; + + + /* per-process-per device QCM data structure */ + struct qcm_process_device qpd; + + /*Apertures*/ + uint64_t lds_base; + uint64_t lds_limit; + uint64_t gpuvm_base; + uint64_t gpuvm_limit; + uint64_t scratch_base; + uint64_t scratch_limit; + + /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */ + bool bound; +}; + +#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd) + +/* Process data */ +struct kfd_process { + /* + * kfd_process are stored in an mm_struct*->kfd_process* + * hash table (kfd_processes in kfd_process.c) + */ + struct hlist_node kfd_processes; + + struct mm_struct *mm; + + struct mutex mutex; + + /* + * In any process, the thread that started main() is the lead + * thread and outlives the rest. + * It is here because amd_iommu_bind_pasid wants a task_struct. + */ + struct task_struct *lead_thread; + + /* We want to receive a notification when the mm_struct is destroyed */ + struct mmu_notifier mmu_notifier; + + /* Use for delayed freeing of kfd_process structure */ + struct rcu_head rcu; + + unsigned int pasid; + + /* + * List of kfd_process_device structures, + * one for each device the process is using. + */ + struct list_head per_device_data; + + struct process_queue_manager pqm; + + /* The process's queues. */ + size_t queue_array_size; + + /* Size is queue_array_size, up to MAX_PROCESS_QUEUES. */ + struct kfd_queue **queues; + + unsigned long allocated_queue_bitmap[DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, BITS_PER_LONG)]; + + /*Is the user space process 32 bit?*/ + bool is_32bit_user_mode; +}; + +/** + * Ioctl function type. + * + * \param filep pointer to file structure. + * \param p amdkfd process pointer. + * \param data pointer to arg that was copied from user. + */ +typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p, + void *data); + +struct amdkfd_ioctl_desc { + unsigned int cmd; + int flags; + amdkfd_ioctl_t *func; + unsigned int cmd_drv; + const char *name; +}; + +void kfd_process_create_wq(void); +void kfd_process_destroy_wq(void); +struct kfd_process *kfd_create_process(const struct task_struct *); +struct kfd_process *kfd_get_process(const struct task_struct *); + +struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev, + struct kfd_process *p); +void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid); +struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev, + struct kfd_process *p); +struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, + struct kfd_process *p); + +/* Process device data iterator */ +struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p); +struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p, + struct kfd_process_device *pdd); +bool kfd_has_process_device_data(struct kfd_process *p); + +/* PASIDs */ +int kfd_pasid_init(void); +void kfd_pasid_exit(void); +bool kfd_set_pasid_limit(unsigned int new_limit); +unsigned int kfd_get_pasid_limit(void); +unsigned int kfd_pasid_alloc(void); +void kfd_pasid_free(unsigned int pasid); + +/* Doorbells */ +void kfd_doorbell_init(struct kfd_dev *kfd); +int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma); +u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, + unsigned int *doorbell_off); +void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr); +u32 read_kernel_doorbell(u32 __iomem *db); +void write_kernel_doorbell(u32 __iomem *db, u32 value); +unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd, + struct kfd_process *process, + unsigned int queue_id); + +/* GTT Sub-Allocator */ + +int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, + struct kfd_mem_obj **mem_obj); + +int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj); + +extern struct device *kfd_device; + +/* Topology */ +int kfd_topology_init(void); +void kfd_topology_shutdown(void); +int kfd_topology_add_device(struct kfd_dev *gpu); +int kfd_topology_remove_device(struct kfd_dev *gpu); +struct kfd_dev *kfd_device_by_id(uint32_t gpu_id); +struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev); +struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx); + +/* Interrupts */ +void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry); + +/* Power Management */ +void kgd2kfd_suspend(struct kfd_dev *kfd); +int kgd2kfd_resume(struct kfd_dev *kfd); + +/* amdkfd Apertures */ +int kfd_init_apertures(struct kfd_process *process); + +/* Queue Context Management */ +inline uint32_t lower_32(uint64_t x); +inline uint32_t upper_32(uint64_t x); +struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd); +inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m); + +int init_queue(struct queue **q, struct queue_properties properties); +void uninit_queue(struct queue *q); +void print_queue_properties(struct queue_properties *q); +void print_queue(struct queue *q); + +struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type, + struct kfd_dev *dev); +struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, + struct kfd_dev *dev); +struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, + struct kfd_dev *dev); +struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev); +void device_queue_manager_uninit(struct device_queue_manager *dqm); +struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, + enum kfd_queue_type type); +void kernel_queue_uninit(struct kernel_queue *kq); + +/* Process Queue Manager */ +struct process_queue_node { + struct queue *q; + struct kernel_queue *kq; + struct list_head process_queue_list; +}; + +int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p); +void pqm_uninit(struct process_queue_manager *pqm); +int pqm_create_queue(struct process_queue_manager *pqm, + struct kfd_dev *dev, + struct file *f, + struct queue_properties *properties, + unsigned int flags, + enum kfd_queue_type type, + unsigned int *qid); +int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid); +int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid, + struct queue_properties *p); + +/* Packet Manager */ + +#define KFD_HIQ_TIMEOUT (500) + +#define KFD_FENCE_COMPLETED (100) +#define KFD_FENCE_INIT (10) +#define KFD_UNMAP_LATENCY (150) + +struct packet_manager { + struct device_queue_manager *dqm; + struct kernel_queue *priv_queue; + struct mutex lock; + bool allocated; + struct kfd_mem_obj *ib_buffer_obj; +}; + +int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm); +void pm_uninit(struct packet_manager *pm); +int pm_send_set_resources(struct packet_manager *pm, + struct scheduling_resources *res); +int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues); +int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, + uint32_t fence_value); + +int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, + enum kfd_preempt_type_filter mode, + uint32_t filter_param, bool reset, + unsigned int sdma_engine); + +void pm_release_ib(struct packet_manager *pm); + +uint64_t kfd_get_number_elems(struct kfd_dev *kfd); +phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev, + struct kfd_process *process); + +#endif diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_process.c new file mode 100644 index 000000000..945d6226d --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -0,0 +1,433 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include + +struct mm_struct; + +#include "kfd_priv.h" + +/* + * Initial size for the array of queues. + * The allocated size is doubled each time + * it is exceeded up to MAX_PROCESS_QUEUES. + */ +#define INITIAL_QUEUE_ARRAY_SIZE 16 + +/* + * List of struct kfd_process (field kfd_process). + * Unique/indexed by mm_struct* + */ +#define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */ +static DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE); +static DEFINE_MUTEX(kfd_processes_mutex); + +DEFINE_STATIC_SRCU(kfd_processes_srcu); + +static struct workqueue_struct *kfd_process_wq; + +struct kfd_process_release_work { + struct work_struct kfd_work; + struct kfd_process *p; +}; + +static struct kfd_process *find_process(const struct task_struct *thread); +static struct kfd_process *create_process(const struct task_struct *thread); + +void kfd_process_create_wq(void) +{ + if (!kfd_process_wq) + kfd_process_wq = create_workqueue("kfd_process_wq"); +} + +void kfd_process_destroy_wq(void) +{ + if (kfd_process_wq) { + flush_workqueue(kfd_process_wq); + destroy_workqueue(kfd_process_wq); + kfd_process_wq = NULL; + } +} + +struct kfd_process *kfd_create_process(const struct task_struct *thread) +{ + struct kfd_process *process; + + BUG_ON(!kfd_process_wq); + + if (thread->mm == NULL) + return ERR_PTR(-EINVAL); + + /* Only the pthreads threading model is supported. */ + if (thread->group_leader->mm != thread->mm) + return ERR_PTR(-EINVAL); + + /* Take mmap_sem because we call __mmu_notifier_register inside */ + down_write(&thread->mm->mmap_sem); + + /* + * take kfd processes mutex before starting of process creation + * so there won't be a case where two threads of the same process + * create two kfd_process structures + */ + mutex_lock(&kfd_processes_mutex); + + /* A prior open of /dev/kfd could have already created the process. */ + process = find_process(thread); + if (process) + pr_debug("kfd: process already found\n"); + + if (!process) + process = create_process(thread); + + mutex_unlock(&kfd_processes_mutex); + + up_write(&thread->mm->mmap_sem); + + return process; +} + +struct kfd_process *kfd_get_process(const struct task_struct *thread) +{ + struct kfd_process *process; + + if (thread->mm == NULL) + return ERR_PTR(-EINVAL); + + /* Only the pthreads threading model is supported. */ + if (thread->group_leader->mm != thread->mm) + return ERR_PTR(-EINVAL); + + process = find_process(thread); + + return process; +} + +static struct kfd_process *find_process_by_mm(const struct mm_struct *mm) +{ + struct kfd_process *process; + + hash_for_each_possible_rcu(kfd_processes_table, process, + kfd_processes, (uintptr_t)mm) + if (process->mm == mm) + return process; + + return NULL; +} + +static struct kfd_process *find_process(const struct task_struct *thread) +{ + struct kfd_process *p; + int idx; + + idx = srcu_read_lock(&kfd_processes_srcu); + p = find_process_by_mm(thread->mm); + srcu_read_unlock(&kfd_processes_srcu, idx); + + return p; +} + +static void kfd_process_wq_release(struct work_struct *work) +{ + struct kfd_process_release_work *my_work; + struct kfd_process_device *pdd, *temp; + struct kfd_process *p; + + my_work = (struct kfd_process_release_work *) work; + + p = my_work->p; + + pr_debug("Releasing process (pasid %d) in workqueue\n", + p->pasid); + + mutex_lock(&p->mutex); + + list_for_each_entry_safe(pdd, temp, &p->per_device_data, + per_device_list) { + pr_debug("Releasing pdd (topology id %d) for process (pasid %d) in workqueue\n", + pdd->dev->id, p->pasid); + + amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid); + list_del(&pdd->per_device_list); + + kfree(pdd); + } + + kfd_pasid_free(p->pasid); + + mutex_unlock(&p->mutex); + + mutex_destroy(&p->mutex); + + kfree(p->queues); + + kfree(p); + + kfree((void *)work); +} + +static void kfd_process_destroy_delayed(struct rcu_head *rcu) +{ + struct kfd_process_release_work *work; + struct kfd_process *p; + + BUG_ON(!kfd_process_wq); + + p = container_of(rcu, struct kfd_process, rcu); + BUG_ON(atomic_read(&p->mm->mm_count) <= 0); + + mmdrop(p->mm); + + work = (struct kfd_process_release_work *) + kmalloc(sizeof(struct kfd_process_release_work), GFP_ATOMIC); + + if (work) { + INIT_WORK((struct work_struct *) work, kfd_process_wq_release); + work->p = p; + queue_work(kfd_process_wq, (struct work_struct *) work); + } +} + +static void kfd_process_notifier_release(struct mmu_notifier *mn, + struct mm_struct *mm) +{ + struct kfd_process *p; + + /* + * The kfd_process structure can not be free because the + * mmu_notifier srcu is read locked + */ + p = container_of(mn, struct kfd_process, mmu_notifier); + BUG_ON(p->mm != mm); + + mutex_lock(&kfd_processes_mutex); + hash_del_rcu(&p->kfd_processes); + mutex_unlock(&kfd_processes_mutex); + synchronize_srcu(&kfd_processes_srcu); + + mutex_lock(&p->mutex); + + /* In case our notifier is called before IOMMU notifier */ + pqm_uninit(&p->pqm); + + mutex_unlock(&p->mutex); + + /* + * Because we drop mm_count inside kfd_process_destroy_delayed + * and because the mmu_notifier_unregister function also drop + * mm_count we need to take an extra count here. + */ + atomic_inc(&p->mm->mm_count); + mmu_notifier_unregister_no_release(&p->mmu_notifier, p->mm); + mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed); +} + +static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = { + .release = kfd_process_notifier_release, +}; + +static struct kfd_process *create_process(const struct task_struct *thread) +{ + struct kfd_process *process; + int err = -ENOMEM; + + process = kzalloc(sizeof(*process), GFP_KERNEL); + + if (!process) + goto err_alloc_process; + + process->queues = kmalloc_array(INITIAL_QUEUE_ARRAY_SIZE, + sizeof(process->queues[0]), GFP_KERNEL); + if (!process->queues) + goto err_alloc_queues; + + process->pasid = kfd_pasid_alloc(); + if (process->pasid == 0) + goto err_alloc_pasid; + + mutex_init(&process->mutex); + + process->mm = thread->mm; + + /* register notifier */ + process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops; + err = __mmu_notifier_register(&process->mmu_notifier, process->mm); + if (err) + goto err_mmu_notifier; + + hash_add_rcu(kfd_processes_table, &process->kfd_processes, + (uintptr_t)process->mm); + + process->lead_thread = thread->group_leader; + + process->queue_array_size = INITIAL_QUEUE_ARRAY_SIZE; + + INIT_LIST_HEAD(&process->per_device_data); + + err = pqm_init(&process->pqm, process); + if (err != 0) + goto err_process_pqm_init; + + /* init process apertures*/ + process->is_32bit_user_mode = is_compat_task(); + if (kfd_init_apertures(process) != 0) + goto err_init_apretures; + + return process; + +err_init_apretures: + pqm_uninit(&process->pqm); +err_process_pqm_init: + hash_del_rcu(&process->kfd_processes); + synchronize_rcu(); + mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm); +err_mmu_notifier: + kfd_pasid_free(process->pasid); +err_alloc_pasid: + kfree(process->queues); +err_alloc_queues: + kfree(process); +err_alloc_process: + return ERR_PTR(err); +} + +struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev, + struct kfd_process *p) +{ + struct kfd_process_device *pdd = NULL; + + list_for_each_entry(pdd, &p->per_device_data, per_device_list) + if (pdd->dev == dev) + break; + + return pdd; +} + +struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, + struct kfd_process *p) +{ + struct kfd_process_device *pdd = NULL; + + pdd = kzalloc(sizeof(*pdd), GFP_KERNEL); + if (pdd != NULL) { + pdd->dev = dev; + INIT_LIST_HEAD(&pdd->qpd.queues_list); + INIT_LIST_HEAD(&pdd->qpd.priv_queue_list); + pdd->qpd.dqm = dev->dqm; + list_add(&pdd->per_device_list, &p->per_device_data); + } + + return pdd; +} + +/* + * Direct the IOMMU to bind the process (specifically the pasid->mm) + * to the device. + * Unbinding occurs when the process dies or the device is removed. + * + * Assumes that the process lock is held. + */ +struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev, + struct kfd_process *p) +{ + struct kfd_process_device *pdd; + int err; + + pdd = kfd_get_process_device_data(dev, p); + if (!pdd) { + pr_err("Process device data doesn't exist\n"); + return ERR_PTR(-ENOMEM); + } + + if (pdd->bound) + return pdd; + + err = amd_iommu_bind_pasid(dev->pdev, p->pasid, p->lead_thread); + if (err < 0) + return ERR_PTR(err); + + pdd->bound = true; + + return pdd; +} + +void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid) +{ + struct kfd_process *p; + struct kfd_process_device *pdd; + int idx, i; + + BUG_ON(dev == NULL); + + idx = srcu_read_lock(&kfd_processes_srcu); + + hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes) + if (p->pasid == pasid) + break; + + srcu_read_unlock(&kfd_processes_srcu, idx); + + BUG_ON(p->pasid != pasid); + + mutex_lock(&p->mutex); + + pqm_uninit(&p->pqm); + + pdd = kfd_get_process_device_data(dev, p); + + /* + * Just mark pdd as unbound, because we still need it to call + * amd_iommu_unbind_pasid() in when the process exits. + * We don't call amd_iommu_unbind_pasid() here + * because the IOMMU called us. + */ + if (pdd) + pdd->bound = false; + + mutex_unlock(&p->mutex); +} + +struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p) +{ + return list_first_entry(&p->per_device_data, + struct kfd_process_device, + per_device_list); +} + +struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p, + struct kfd_process_device *pdd) +{ + if (list_is_last(&pdd->per_device_list, &p->per_device_data)) + return NULL; + return list_next_entry(pdd, per_device_list); +} + +bool kfd_has_process_device_data(struct kfd_process *p) +{ + return !(list_empty(&p->per_device_data)); +} diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c new file mode 100644 index 000000000..530b82c4e --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -0,0 +1,359 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include +#include "kfd_device_queue_manager.h" +#include "kfd_priv.h" +#include "kfd_kernel_queue.h" + +static inline struct process_queue_node *get_queue_by_qid( + struct process_queue_manager *pqm, unsigned int qid) +{ + struct process_queue_node *pqn; + + BUG_ON(!pqm); + + list_for_each_entry(pqn, &pqm->queues, process_queue_list) { + if (pqn->q && pqn->q->properties.queue_id == qid) + return pqn; + if (pqn->kq && pqn->kq->queue->properties.queue_id == qid) + return pqn; + } + + return NULL; +} + +static int find_available_queue_slot(struct process_queue_manager *pqm, + unsigned int *qid) +{ + unsigned long found; + + BUG_ON(!pqm || !qid); + + pr_debug("kfd: in %s\n", __func__); + + found = find_first_zero_bit(pqm->queue_slot_bitmap, + KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); + + pr_debug("kfd: the new slot id %lu\n", found); + + if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) { + pr_info("amdkfd: Can not open more queues for process with pasid %d\n", + pqm->process->pasid); + return -ENOMEM; + } + + set_bit(found, pqm->queue_slot_bitmap); + *qid = found; + + return 0; +} + +int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p) +{ + BUG_ON(!pqm); + + INIT_LIST_HEAD(&pqm->queues); + pqm->queue_slot_bitmap = + kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, + BITS_PER_BYTE), GFP_KERNEL); + if (pqm->queue_slot_bitmap == NULL) + return -ENOMEM; + pqm->process = p; + + return 0; +} + +void pqm_uninit(struct process_queue_manager *pqm) +{ + int retval; + struct process_queue_node *pqn, *next; + + BUG_ON(!pqm); + + pr_debug("In func %s\n", __func__); + + list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) { + retval = pqm_destroy_queue( + pqm, + (pqn->q != NULL) ? + pqn->q->properties.queue_id : + pqn->kq->queue->properties.queue_id); + + if (retval != 0) { + pr_err("kfd: failed to destroy queue\n"); + return; + } + } + kfree(pqm->queue_slot_bitmap); + pqm->queue_slot_bitmap = NULL; +} + +static int create_cp_queue(struct process_queue_manager *pqm, + struct kfd_dev *dev, struct queue **q, + struct queue_properties *q_properties, + struct file *f, unsigned int qid) +{ + int retval; + + retval = 0; + + /* Doorbell initialized in user space*/ + q_properties->doorbell_ptr = NULL; + + q_properties->doorbell_off = + kfd_queue_id_to_doorbell(dev, pqm->process, qid); + + /* let DQM handle it*/ + q_properties->vmid = 0; + q_properties->queue_id = qid; + + retval = init_queue(q, *q_properties); + if (retval != 0) + goto err_init_queue; + + (*q)->device = dev; + (*q)->process = pqm->process; + + pr_debug("kfd: PQM After init queue"); + + return retval; + +err_init_queue: + return retval; +} + +int pqm_create_queue(struct process_queue_manager *pqm, + struct kfd_dev *dev, + struct file *f, + struct queue_properties *properties, + unsigned int flags, + enum kfd_queue_type type, + unsigned int *qid) +{ + int retval; + struct kfd_process_device *pdd; + struct queue_properties q_properties; + struct queue *q; + struct process_queue_node *pqn; + struct kernel_queue *kq; + + BUG_ON(!pqm || !dev || !properties || !qid); + + memset(&q_properties, 0, sizeof(struct queue_properties)); + memcpy(&q_properties, properties, sizeof(struct queue_properties)); + q = NULL; + kq = NULL; + + pdd = kfd_get_process_device_data(dev, pqm->process); + if (!pdd) { + pr_err("Process device data doesn't exist\n"); + return -1; + } + + retval = find_available_queue_slot(pqm, qid); + if (retval != 0) + return retval; + + if (list_empty(&pqm->queues)) { + pdd->qpd.pqm = pqm; + dev->dqm->ops.register_process(dev->dqm, &pdd->qpd); + } + + pqn = kzalloc(sizeof(struct process_queue_node), GFP_KERNEL); + if (!pqn) { + retval = -ENOMEM; + goto err_allocate_pqn; + } + + switch (type) { + case KFD_QUEUE_TYPE_SDMA: + case KFD_QUEUE_TYPE_COMPUTE: + /* check if there is over subscription */ + if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) && + ((dev->dqm->processes_count >= VMID_PER_DEVICE) || + (dev->dqm->queue_count >= PIPE_PER_ME_CP_SCHEDULING * QUEUES_PER_PIPE))) { + pr_err("kfd: over-subscription is not allowed in radeon_kfd.sched_policy == 1\n"); + retval = -EPERM; + goto err_create_queue; + } + + retval = create_cp_queue(pqm, dev, &q, &q_properties, f, *qid); + if (retval != 0) + goto err_create_queue; + pqn->q = q; + pqn->kq = NULL; + retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, + &q->properties.vmid); + pr_debug("DQM returned %d for create_queue\n", retval); + print_queue(q); + break; + case KFD_QUEUE_TYPE_DIQ: + kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_DIQ); + if (kq == NULL) { + retval = -ENOMEM; + goto err_create_queue; + } + kq->queue->properties.queue_id = *qid; + pqn->kq = kq; + pqn->q = NULL; + retval = dev->dqm->ops.create_kernel_queue(dev->dqm, + kq, &pdd->qpd); + break; + default: + BUG(); + break; + } + + if (retval != 0) { + pr_debug("Error dqm create queue\n"); + goto err_create_queue; + } + + pr_debug("kfd: PQM After DQM create queue\n"); + + list_add(&pqn->process_queue_list, &pqm->queues); + + if (q) { + *properties = q->properties; + pr_debug("kfd: PQM done creating queue\n"); + print_queue_properties(properties); + } + + return retval; + +err_create_queue: + kfree(pqn); +err_allocate_pqn: + /* check if queues list is empty unregister process from device */ + clear_bit(*qid, pqm->queue_slot_bitmap); + if (list_empty(&pqm->queues)) + dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd); + return retval; +} + +int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) +{ + struct process_queue_node *pqn; + struct kfd_process_device *pdd; + struct device_queue_manager *dqm; + struct kfd_dev *dev; + int retval; + + dqm = NULL; + + BUG_ON(!pqm); + retval = 0; + + pr_debug("kfd: In Func %s\n", __func__); + + pqn = get_queue_by_qid(pqm, qid); + if (pqn == NULL) { + pr_err("kfd: queue id does not match any known queue\n"); + return -EINVAL; + } + + dev = NULL; + if (pqn->kq) + dev = pqn->kq->dev; + if (pqn->q) + dev = pqn->q->device; + BUG_ON(!dev); + + pdd = kfd_get_process_device_data(dev, pqm->process); + if (!pdd) { + pr_err("Process device data doesn't exist\n"); + return -1; + } + + if (pqn->kq) { + /* destroy kernel queue (DIQ) */ + dqm = pqn->kq->dev->dqm; + dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd); + kernel_queue_uninit(pqn->kq); + } + + if (pqn->q) { + dqm = pqn->q->device->dqm; + retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q); + if (retval != 0) + return retval; + + uninit_queue(pqn->q); + } + + list_del(&pqn->process_queue_list); + kfree(pqn); + clear_bit(qid, pqm->queue_slot_bitmap); + + if (list_empty(&pqm->queues)) + dqm->ops.unregister_process(dqm, &pdd->qpd); + + return retval; +} + +int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid, + struct queue_properties *p) +{ + int retval; + struct process_queue_node *pqn; + + BUG_ON(!pqm); + + pqn = get_queue_by_qid(pqm, qid); + if (!pqn) { + pr_debug("amdkfd: No queue %d exists for update operation\n", + qid); + return -EFAULT; + } + + pqn->q->properties.queue_address = p->queue_address; + pqn->q->properties.queue_size = p->queue_size; + pqn->q->properties.queue_percent = p->queue_percent; + pqn->q->properties.priority = p->priority; + + retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, + pqn->q); + if (retval != 0) + return retval; + + return 0; +} + +static __attribute__((unused)) struct kernel_queue *pqm_get_kernel_queue( + struct process_queue_manager *pqm, + unsigned int qid) +{ + struct process_queue_node *pqn; + + BUG_ON(!pqm); + + pqn = get_queue_by_qid(pqm, qid); + if (pqn && pqn->kq) + return pqn->kq; + + return NULL; +} + + diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_queue.c new file mode 100644 index 000000000..9a0c90b07 --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_queue.c @@ -0,0 +1,85 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include "kfd_priv.h" + +void print_queue_properties(struct queue_properties *q) +{ + if (!q) + return; + + pr_debug("Printing queue properties:\n"); + pr_debug("Queue Type: %u\n", q->type); + pr_debug("Queue Size: %llu\n", q->queue_size); + pr_debug("Queue percent: %u\n", q->queue_percent); + pr_debug("Queue Address: 0x%llX\n", q->queue_address); + pr_debug("Queue Id: %u\n", q->queue_id); + pr_debug("Queue Process Vmid: %u\n", q->vmid); + pr_debug("Queue Read Pointer: 0x%p\n", q->read_ptr); + pr_debug("Queue Write Pointer: 0x%p\n", q->write_ptr); + pr_debug("Queue Doorbell Pointer: 0x%p\n", q->doorbell_ptr); + pr_debug("Queue Doorbell Offset: %u\n", q->doorbell_off); +} + +void print_queue(struct queue *q) +{ + if (!q) + return; + pr_debug("Printing queue:\n"); + pr_debug("Queue Type: %u\n", q->properties.type); + pr_debug("Queue Size: %llu\n", q->properties.queue_size); + pr_debug("Queue percent: %u\n", q->properties.queue_percent); + pr_debug("Queue Address: 0x%llX\n", q->properties.queue_address); + pr_debug("Queue Id: %u\n", q->properties.queue_id); + pr_debug("Queue Process Vmid: %u\n", q->properties.vmid); + pr_debug("Queue Read Pointer: 0x%p\n", q->properties.read_ptr); + pr_debug("Queue Write Pointer: 0x%p\n", q->properties.write_ptr); + pr_debug("Queue Doorbell Pointer: 0x%p\n", q->properties.doorbell_ptr); + pr_debug("Queue Doorbell Offset: %u\n", q->properties.doorbell_off); + pr_debug("Queue MQD Address: 0x%p\n", q->mqd); + pr_debug("Queue MQD Gart: 0x%llX\n", q->gart_mqd_addr); + pr_debug("Queue Process Address: 0x%p\n", q->process); + pr_debug("Queue Device Address: 0x%p\n", q->device); +} + +int init_queue(struct queue **q, struct queue_properties properties) +{ + struct queue *tmp; + + BUG_ON(!q); + + tmp = kzalloc(sizeof(struct queue), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + memcpy(&tmp->properties, &properties, sizeof(struct queue_properties)); + + *q = tmp; + return 0; +} + +void uninit_queue(struct queue *q) +{ + kfree(q); +} diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_topology.c new file mode 100644 index 000000000..c25728bc3 --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -0,0 +1,1254 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "kfd_priv.h" +#include "kfd_crat.h" +#include "kfd_topology.h" + +static struct list_head topology_device_list; +static int topology_crat_parsed; +static struct kfd_system_properties sys_props; + +static DECLARE_RWSEM(topology_lock); + +struct kfd_dev *kfd_device_by_id(uint32_t gpu_id) +{ + struct kfd_topology_device *top_dev; + struct kfd_dev *device = NULL; + + down_read(&topology_lock); + + list_for_each_entry(top_dev, &topology_device_list, list) + if (top_dev->gpu_id == gpu_id) { + device = top_dev->gpu; + break; + } + + up_read(&topology_lock); + + return device; +} + +struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev) +{ + struct kfd_topology_device *top_dev; + struct kfd_dev *device = NULL; + + down_read(&topology_lock); + + list_for_each_entry(top_dev, &topology_device_list, list) + if (top_dev->gpu->pdev == pdev) { + device = top_dev->gpu; + break; + } + + up_read(&topology_lock); + + return device; +} + +static int kfd_topology_get_crat_acpi(void *crat_image, size_t *size) +{ + struct acpi_table_header *crat_table; + acpi_status status; + + if (!size) + return -EINVAL; + + /* + * Fetch the CRAT table from ACPI + */ + status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table); + if (status == AE_NOT_FOUND) { + pr_warn("CRAT table not found\n"); + return -ENODATA; + } else if (ACPI_FAILURE(status)) { + const char *err = acpi_format_exception(status); + + pr_err("CRAT table error: %s\n", err); + return -EINVAL; + } + + if (*size >= crat_table->length && crat_image != NULL) + memcpy(crat_image, crat_table, crat_table->length); + + *size = crat_table->length; + + return 0; +} + +static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev, + struct crat_subtype_computeunit *cu) +{ + BUG_ON(!dev); + BUG_ON(!cu); + + dev->node_props.cpu_cores_count = cu->num_cpu_cores; + dev->node_props.cpu_core_id_base = cu->processor_id_low; + if (cu->hsa_capability & CRAT_CU_FLAGS_IOMMU_PRESENT) + dev->node_props.capability |= HSA_CAP_ATS_PRESENT; + + pr_info("CU CPU: cores=%d id_base=%d\n", cu->num_cpu_cores, + cu->processor_id_low); +} + +static void kfd_populated_cu_info_gpu(struct kfd_topology_device *dev, + struct crat_subtype_computeunit *cu) +{ + BUG_ON(!dev); + BUG_ON(!cu); + + dev->node_props.simd_id_base = cu->processor_id_low; + dev->node_props.simd_count = cu->num_simd_cores; + dev->node_props.lds_size_in_kb = cu->lds_size_in_kb; + dev->node_props.max_waves_per_simd = cu->max_waves_simd; + dev->node_props.wave_front_size = cu->wave_front_size; + dev->node_props.mem_banks_count = cu->num_banks; + dev->node_props.array_count = cu->num_arrays; + dev->node_props.cu_per_simd_array = cu->num_cu_per_array; + dev->node_props.simd_per_cu = cu->num_simd_per_cu; + dev->node_props.max_slots_scratch_cu = cu->max_slots_scatch_cu; + if (cu->hsa_capability & CRAT_CU_FLAGS_HOT_PLUGGABLE) + dev->node_props.capability |= HSA_CAP_HOT_PLUGGABLE; + pr_info("CU GPU: simds=%d id_base=%d\n", cu->num_simd_cores, + cu->processor_id_low); +} + +/* kfd_parse_subtype_cu is called when the topology mutex is already acquired */ +static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu) +{ + struct kfd_topology_device *dev; + int i = 0; + + BUG_ON(!cu); + + pr_info("Found CU entry in CRAT table with proximity_domain=%d caps=%x\n", + cu->proximity_domain, cu->hsa_capability); + list_for_each_entry(dev, &topology_device_list, list) { + if (cu->proximity_domain == i) { + if (cu->flags & CRAT_CU_FLAGS_CPU_PRESENT) + kfd_populated_cu_info_cpu(dev, cu); + + if (cu->flags & CRAT_CU_FLAGS_GPU_PRESENT) + kfd_populated_cu_info_gpu(dev, cu); + break; + } + i++; + } + + return 0; +} + +/* + * kfd_parse_subtype_mem is called when the topology mutex is + * already acquired + */ +static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem) +{ + struct kfd_mem_properties *props; + struct kfd_topology_device *dev; + int i = 0; + + BUG_ON(!mem); + + pr_info("Found memory entry in CRAT table with proximity_domain=%d\n", + mem->promixity_domain); + list_for_each_entry(dev, &topology_device_list, list) { + if (mem->promixity_domain == i) { + props = kfd_alloc_struct(props); + if (props == NULL) + return -ENOMEM; + + if (dev->node_props.cpu_cores_count == 0) + props->heap_type = HSA_MEM_HEAP_TYPE_FB_PRIVATE; + else + props->heap_type = HSA_MEM_HEAP_TYPE_SYSTEM; + + if (mem->flags & CRAT_MEM_FLAGS_HOT_PLUGGABLE) + props->flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE; + if (mem->flags & CRAT_MEM_FLAGS_NON_VOLATILE) + props->flags |= HSA_MEM_FLAGS_NON_VOLATILE; + + props->size_in_bytes = + ((uint64_t)mem->length_high << 32) + + mem->length_low; + props->width = mem->width; + + dev->mem_bank_count++; + list_add_tail(&props->list, &dev->mem_props); + + break; + } + i++; + } + + return 0; +} + +/* + * kfd_parse_subtype_cache is called when the topology mutex + * is already acquired + */ +static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache) +{ + struct kfd_cache_properties *props; + struct kfd_topology_device *dev; + uint32_t id; + + BUG_ON(!cache); + + id = cache->processor_id_low; + + pr_info("Found cache entry in CRAT table with processor_id=%d\n", id); + list_for_each_entry(dev, &topology_device_list, list) + if (id == dev->node_props.cpu_core_id_base || + id == dev->node_props.simd_id_base) { + props = kfd_alloc_struct(props); + if (props == NULL) + return -ENOMEM; + + props->processor_id_low = id; + props->cache_level = cache->cache_level; + props->cache_size = cache->cache_size; + props->cacheline_size = cache->cache_line_size; + props->cachelines_per_tag = cache->lines_per_tag; + props->cache_assoc = cache->associativity; + props->cache_latency = cache->cache_latency; + + if (cache->flags & CRAT_CACHE_FLAGS_DATA_CACHE) + props->cache_type |= HSA_CACHE_TYPE_DATA; + if (cache->flags & CRAT_CACHE_FLAGS_INST_CACHE) + props->cache_type |= HSA_CACHE_TYPE_INSTRUCTION; + if (cache->flags & CRAT_CACHE_FLAGS_CPU_CACHE) + props->cache_type |= HSA_CACHE_TYPE_CPU; + if (cache->flags & CRAT_CACHE_FLAGS_SIMD_CACHE) + props->cache_type |= HSA_CACHE_TYPE_HSACU; + + dev->cache_count++; + dev->node_props.caches_count++; + list_add_tail(&props->list, &dev->cache_props); + + break; + } + + return 0; +} + +/* + * kfd_parse_subtype_iolink is called when the topology mutex + * is already acquired + */ +static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink) +{ + struct kfd_iolink_properties *props; + struct kfd_topology_device *dev; + uint32_t i = 0; + uint32_t id_from; + uint32_t id_to; + + BUG_ON(!iolink); + + id_from = iolink->proximity_domain_from; + id_to = iolink->proximity_domain_to; + + pr_info("Found IO link entry in CRAT table with id_from=%d\n", id_from); + list_for_each_entry(dev, &topology_device_list, list) { + if (id_from == i) { + props = kfd_alloc_struct(props); + if (props == NULL) + return -ENOMEM; + + props->node_from = id_from; + props->node_to = id_to; + props->ver_maj = iolink->version_major; + props->ver_min = iolink->version_minor; + + /* + * weight factor (derived from CDIR), currently always 1 + */ + props->weight = 1; + + props->min_latency = iolink->minimum_latency; + props->max_latency = iolink->maximum_latency; + props->min_bandwidth = iolink->minimum_bandwidth_mbs; + props->max_bandwidth = iolink->maximum_bandwidth_mbs; + props->rec_transfer_size = + iolink->recommended_transfer_size; + + dev->io_link_count++; + dev->node_props.io_links_count++; + list_add_tail(&props->list, &dev->io_link_props); + + break; + } + i++; + } + + return 0; +} + +static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr) +{ + struct crat_subtype_computeunit *cu; + struct crat_subtype_memory *mem; + struct crat_subtype_cache *cache; + struct crat_subtype_iolink *iolink; + int ret = 0; + + BUG_ON(!sub_type_hdr); + + switch (sub_type_hdr->type) { + case CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY: + cu = (struct crat_subtype_computeunit *)sub_type_hdr; + ret = kfd_parse_subtype_cu(cu); + break; + case CRAT_SUBTYPE_MEMORY_AFFINITY: + mem = (struct crat_subtype_memory *)sub_type_hdr; + ret = kfd_parse_subtype_mem(mem); + break; + case CRAT_SUBTYPE_CACHE_AFFINITY: + cache = (struct crat_subtype_cache *)sub_type_hdr; + ret = kfd_parse_subtype_cache(cache); + break; + case CRAT_SUBTYPE_TLB_AFFINITY: + /* + * For now, nothing to do here + */ + pr_info("Found TLB entry in CRAT table (not processing)\n"); + break; + case CRAT_SUBTYPE_CCOMPUTE_AFFINITY: + /* + * For now, nothing to do here + */ + pr_info("Found CCOMPUTE entry in CRAT table (not processing)\n"); + break; + case CRAT_SUBTYPE_IOLINK_AFFINITY: + iolink = (struct crat_subtype_iolink *)sub_type_hdr; + ret = kfd_parse_subtype_iolink(iolink); + break; + default: + pr_warn("Unknown subtype (%d) in CRAT\n", + sub_type_hdr->type); + } + + return ret; +} + +static void kfd_release_topology_device(struct kfd_topology_device *dev) +{ + struct kfd_mem_properties *mem; + struct kfd_cache_properties *cache; + struct kfd_iolink_properties *iolink; + + BUG_ON(!dev); + + list_del(&dev->list); + + while (dev->mem_props.next != &dev->mem_props) { + mem = container_of(dev->mem_props.next, + struct kfd_mem_properties, list); + list_del(&mem->list); + kfree(mem); + } + + while (dev->cache_props.next != &dev->cache_props) { + cache = container_of(dev->cache_props.next, + struct kfd_cache_properties, list); + list_del(&cache->list); + kfree(cache); + } + + while (dev->io_link_props.next != &dev->io_link_props) { + iolink = container_of(dev->io_link_props.next, + struct kfd_iolink_properties, list); + list_del(&iolink->list); + kfree(iolink); + } + + kfree(dev); + + sys_props.num_devices--; +} + +static void kfd_release_live_view(void) +{ + struct kfd_topology_device *dev; + + while (topology_device_list.next != &topology_device_list) { + dev = container_of(topology_device_list.next, + struct kfd_topology_device, list); + kfd_release_topology_device(dev); +} + + memset(&sys_props, 0, sizeof(sys_props)); +} + +static struct kfd_topology_device *kfd_create_topology_device(void) +{ + struct kfd_topology_device *dev; + + dev = kfd_alloc_struct(dev); + if (dev == NULL) { + pr_err("No memory to allocate a topology device"); + return NULL; + } + + INIT_LIST_HEAD(&dev->mem_props); + INIT_LIST_HEAD(&dev->cache_props); + INIT_LIST_HEAD(&dev->io_link_props); + + list_add_tail(&dev->list, &topology_device_list); + sys_props.num_devices++; + + return dev; +} + +static int kfd_parse_crat_table(void *crat_image) +{ + struct kfd_topology_device *top_dev; + struct crat_subtype_generic *sub_type_hdr; + uint16_t node_id; + int ret; + struct crat_header *crat_table = (struct crat_header *)crat_image; + uint16_t num_nodes; + uint32_t image_len; + + if (!crat_image) + return -EINVAL; + + num_nodes = crat_table->num_domains; + image_len = crat_table->length; + + pr_info("Parsing CRAT table with %d nodes\n", num_nodes); + + for (node_id = 0; node_id < num_nodes; node_id++) { + top_dev = kfd_create_topology_device(); + if (!top_dev) { + kfd_release_live_view(); + return -ENOMEM; + } + } + + sys_props.platform_id = + (*((uint64_t *)crat_table->oem_id)) & CRAT_OEMID_64BIT_MASK; + sys_props.platform_oem = *((uint64_t *)crat_table->oem_table_id); + sys_props.platform_rev = crat_table->revision; + + sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1); + while ((char *)sub_type_hdr + sizeof(struct crat_subtype_generic) < + ((char *)crat_image) + image_len) { + if (sub_type_hdr->flags & CRAT_SUBTYPE_FLAGS_ENABLED) { + ret = kfd_parse_subtype(sub_type_hdr); + if (ret != 0) { + kfd_release_live_view(); + return ret; + } + } + + sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + + sub_type_hdr->length); + } + + sys_props.generation_count++; + topology_crat_parsed = 1; + + return 0; +} + + +#define sysfs_show_gen_prop(buffer, fmt, ...) \ + snprintf(buffer, PAGE_SIZE, "%s"fmt, buffer, __VA_ARGS__) +#define sysfs_show_32bit_prop(buffer, name, value) \ + sysfs_show_gen_prop(buffer, "%s %u\n", name, value) +#define sysfs_show_64bit_prop(buffer, name, value) \ + sysfs_show_gen_prop(buffer, "%s %llu\n", name, value) +#define sysfs_show_32bit_val(buffer, value) \ + sysfs_show_gen_prop(buffer, "%u\n", value) +#define sysfs_show_str_val(buffer, value) \ + sysfs_show_gen_prop(buffer, "%s\n", value) + +static ssize_t sysprops_show(struct kobject *kobj, struct attribute *attr, + char *buffer) +{ + ssize_t ret; + + /* Making sure that the buffer is an empty string */ + buffer[0] = 0; + + if (attr == &sys_props.attr_genid) { + ret = sysfs_show_32bit_val(buffer, sys_props.generation_count); + } else if (attr == &sys_props.attr_props) { + sysfs_show_64bit_prop(buffer, "platform_oem", + sys_props.platform_oem); + sysfs_show_64bit_prop(buffer, "platform_id", + sys_props.platform_id); + ret = sysfs_show_64bit_prop(buffer, "platform_rev", + sys_props.platform_rev); + } else { + ret = -EINVAL; + } + + return ret; +} + +static const struct sysfs_ops sysprops_ops = { + .show = sysprops_show, +}; + +static struct kobj_type sysprops_type = { + .sysfs_ops = &sysprops_ops, +}; + +static ssize_t iolink_show(struct kobject *kobj, struct attribute *attr, + char *buffer) +{ + ssize_t ret; + struct kfd_iolink_properties *iolink; + + /* Making sure that the buffer is an empty string */ + buffer[0] = 0; + + iolink = container_of(attr, struct kfd_iolink_properties, attr); + sysfs_show_32bit_prop(buffer, "type", iolink->iolink_type); + sysfs_show_32bit_prop(buffer, "version_major", iolink->ver_maj); + sysfs_show_32bit_prop(buffer, "version_minor", iolink->ver_min); + sysfs_show_32bit_prop(buffer, "node_from", iolink->node_from); + sysfs_show_32bit_prop(buffer, "node_to", iolink->node_to); + sysfs_show_32bit_prop(buffer, "weight", iolink->weight); + sysfs_show_32bit_prop(buffer, "min_latency", iolink->min_latency); + sysfs_show_32bit_prop(buffer, "max_latency", iolink->max_latency); + sysfs_show_32bit_prop(buffer, "min_bandwidth", iolink->min_bandwidth); + sysfs_show_32bit_prop(buffer, "max_bandwidth", iolink->max_bandwidth); + sysfs_show_32bit_prop(buffer, "recommended_transfer_size", + iolink->rec_transfer_size); + ret = sysfs_show_32bit_prop(buffer, "flags", iolink->flags); + + return ret; +} + +static const struct sysfs_ops iolink_ops = { + .show = iolink_show, +}; + +static struct kobj_type iolink_type = { + .sysfs_ops = &iolink_ops, +}; + +static ssize_t mem_show(struct kobject *kobj, struct attribute *attr, + char *buffer) +{ + ssize_t ret; + struct kfd_mem_properties *mem; + + /* Making sure that the buffer is an empty string */ + buffer[0] = 0; + + mem = container_of(attr, struct kfd_mem_properties, attr); + sysfs_show_32bit_prop(buffer, "heap_type", mem->heap_type); + sysfs_show_64bit_prop(buffer, "size_in_bytes", mem->size_in_bytes); + sysfs_show_32bit_prop(buffer, "flags", mem->flags); + sysfs_show_32bit_prop(buffer, "width", mem->width); + ret = sysfs_show_32bit_prop(buffer, "mem_clk_max", mem->mem_clk_max); + + return ret; +} + +static const struct sysfs_ops mem_ops = { + .show = mem_show, +}; + +static struct kobj_type mem_type = { + .sysfs_ops = &mem_ops, +}; + +static ssize_t kfd_cache_show(struct kobject *kobj, struct attribute *attr, + char *buffer) +{ + ssize_t ret; + uint32_t i; + struct kfd_cache_properties *cache; + + /* Making sure that the buffer is an empty string */ + buffer[0] = 0; + + cache = container_of(attr, struct kfd_cache_properties, attr); + sysfs_show_32bit_prop(buffer, "processor_id_low", + cache->processor_id_low); + sysfs_show_32bit_prop(buffer, "level", cache->cache_level); + sysfs_show_32bit_prop(buffer, "size", cache->cache_size); + sysfs_show_32bit_prop(buffer, "cache_line_size", cache->cacheline_size); + sysfs_show_32bit_prop(buffer, "cache_lines_per_tag", + cache->cachelines_per_tag); + sysfs_show_32bit_prop(buffer, "association", cache->cache_assoc); + sysfs_show_32bit_prop(buffer, "latency", cache->cache_latency); + sysfs_show_32bit_prop(buffer, "type", cache->cache_type); + snprintf(buffer, PAGE_SIZE, "%ssibling_map ", buffer); + for (i = 0; i < KFD_TOPOLOGY_CPU_SIBLINGS; i++) + ret = snprintf(buffer, PAGE_SIZE, "%s%d%s", + buffer, cache->sibling_map[i], + (i == KFD_TOPOLOGY_CPU_SIBLINGS-1) ? + "\n" : ","); + + return ret; +} + +static const struct sysfs_ops cache_ops = { + .show = kfd_cache_show, +}; + +static struct kobj_type cache_type = { + .sysfs_ops = &cache_ops, +}; + +static ssize_t node_show(struct kobject *kobj, struct attribute *attr, + char *buffer) +{ + struct kfd_topology_device *dev; + char public_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE]; + uint32_t i; + uint32_t log_max_watch_addr; + + /* Making sure that the buffer is an empty string */ + buffer[0] = 0; + + if (strcmp(attr->name, "gpu_id") == 0) { + dev = container_of(attr, struct kfd_topology_device, + attr_gpuid); + return sysfs_show_32bit_val(buffer, dev->gpu_id); + } + + if (strcmp(attr->name, "name") == 0) { + dev = container_of(attr, struct kfd_topology_device, + attr_name); + for (i = 0; i < KFD_TOPOLOGY_PUBLIC_NAME_SIZE; i++) { + public_name[i] = + (char)dev->node_props.marketing_name[i]; + if (dev->node_props.marketing_name[i] == 0) + break; + } + public_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE-1] = 0x0; + return sysfs_show_str_val(buffer, public_name); + } + + dev = container_of(attr, struct kfd_topology_device, + attr_props); + sysfs_show_32bit_prop(buffer, "cpu_cores_count", + dev->node_props.cpu_cores_count); + sysfs_show_32bit_prop(buffer, "simd_count", + dev->node_props.simd_count); + + if (dev->mem_bank_count < dev->node_props.mem_banks_count) { + pr_warn("kfd: mem_banks_count truncated from %d to %d\n", + dev->node_props.mem_banks_count, + dev->mem_bank_count); + sysfs_show_32bit_prop(buffer, "mem_banks_count", + dev->mem_bank_count); + } else { + sysfs_show_32bit_prop(buffer, "mem_banks_count", + dev->node_props.mem_banks_count); + } + + sysfs_show_32bit_prop(buffer, "caches_count", + dev->node_props.caches_count); + sysfs_show_32bit_prop(buffer, "io_links_count", + dev->node_props.io_links_count); + sysfs_show_32bit_prop(buffer, "cpu_core_id_base", + dev->node_props.cpu_core_id_base); + sysfs_show_32bit_prop(buffer, "simd_id_base", + dev->node_props.simd_id_base); + sysfs_show_32bit_prop(buffer, "max_waves_per_simd", + dev->node_props.max_waves_per_simd); + sysfs_show_32bit_prop(buffer, "lds_size_in_kb", + dev->node_props.lds_size_in_kb); + sysfs_show_32bit_prop(buffer, "gds_size_in_kb", + dev->node_props.gds_size_in_kb); + sysfs_show_32bit_prop(buffer, "wave_front_size", + dev->node_props.wave_front_size); + sysfs_show_32bit_prop(buffer, "array_count", + dev->node_props.array_count); + sysfs_show_32bit_prop(buffer, "simd_arrays_per_engine", + dev->node_props.simd_arrays_per_engine); + sysfs_show_32bit_prop(buffer, "cu_per_simd_array", + dev->node_props.cu_per_simd_array); + sysfs_show_32bit_prop(buffer, "simd_per_cu", + dev->node_props.simd_per_cu); + sysfs_show_32bit_prop(buffer, "max_slots_scratch_cu", + dev->node_props.max_slots_scratch_cu); + sysfs_show_32bit_prop(buffer, "vendor_id", + dev->node_props.vendor_id); + sysfs_show_32bit_prop(buffer, "device_id", + dev->node_props.device_id); + sysfs_show_32bit_prop(buffer, "location_id", + dev->node_props.location_id); + + if (dev->gpu) { + log_max_watch_addr = + __ilog2_u32(dev->gpu->device_info->num_of_watch_points); + + if (log_max_watch_addr) { + dev->node_props.capability |= + HSA_CAP_WATCH_POINTS_SUPPORTED; + + dev->node_props.capability |= + ((log_max_watch_addr << + HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT) & + HSA_CAP_WATCH_POINTS_TOTALBITS_MASK); + } + + sysfs_show_32bit_prop(buffer, "max_engine_clk_fcompute", + dev->gpu->kfd2kgd->get_max_engine_clock_in_mhz( + dev->gpu->kgd)); + + sysfs_show_64bit_prop(buffer, "local_mem_size", + (unsigned long long int) 0); + + sysfs_show_32bit_prop(buffer, "fw_version", + dev->gpu->kfd2kgd->get_fw_version( + dev->gpu->kgd, + KGD_ENGINE_MEC1)); + sysfs_show_32bit_prop(buffer, "capability", + dev->node_props.capability); + } + + return sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute", + cpufreq_quick_get_max(0)/1000); +} + +static const struct sysfs_ops node_ops = { + .show = node_show, +}; + +static struct kobj_type node_type = { + .sysfs_ops = &node_ops, +}; + +static void kfd_remove_sysfs_file(struct kobject *kobj, struct attribute *attr) +{ + sysfs_remove_file(kobj, attr); + kobject_del(kobj); + kobject_put(kobj); +} + +static void kfd_remove_sysfs_node_entry(struct kfd_topology_device *dev) +{ + struct kfd_iolink_properties *iolink; + struct kfd_cache_properties *cache; + struct kfd_mem_properties *mem; + + BUG_ON(!dev); + + if (dev->kobj_iolink) { + list_for_each_entry(iolink, &dev->io_link_props, list) + if (iolink->kobj) { + kfd_remove_sysfs_file(iolink->kobj, + &iolink->attr); + iolink->kobj = NULL; + } + kobject_del(dev->kobj_iolink); + kobject_put(dev->kobj_iolink); + dev->kobj_iolink = NULL; + } + + if (dev->kobj_cache) { + list_for_each_entry(cache, &dev->cache_props, list) + if (cache->kobj) { + kfd_remove_sysfs_file(cache->kobj, + &cache->attr); + cache->kobj = NULL; + } + kobject_del(dev->kobj_cache); + kobject_put(dev->kobj_cache); + dev->kobj_cache = NULL; + } + + if (dev->kobj_mem) { + list_for_each_entry(mem, &dev->mem_props, list) + if (mem->kobj) { + kfd_remove_sysfs_file(mem->kobj, &mem->attr); + mem->kobj = NULL; + } + kobject_del(dev->kobj_mem); + kobject_put(dev->kobj_mem); + dev->kobj_mem = NULL; + } + + if (dev->kobj_node) { + sysfs_remove_file(dev->kobj_node, &dev->attr_gpuid); + sysfs_remove_file(dev->kobj_node, &dev->attr_name); + sysfs_remove_file(dev->kobj_node, &dev->attr_props); + kobject_del(dev->kobj_node); + kobject_put(dev->kobj_node); + dev->kobj_node = NULL; + } +} + +static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev, + uint32_t id) +{ + struct kfd_iolink_properties *iolink; + struct kfd_cache_properties *cache; + struct kfd_mem_properties *mem; + int ret; + uint32_t i; + + BUG_ON(!dev); + + /* + * Creating the sysfs folders + */ + BUG_ON(dev->kobj_node); + dev->kobj_node = kfd_alloc_struct(dev->kobj_node); + if (!dev->kobj_node) + return -ENOMEM; + + ret = kobject_init_and_add(dev->kobj_node, &node_type, + sys_props.kobj_nodes, "%d", id); + if (ret < 0) + return ret; + + dev->kobj_mem = kobject_create_and_add("mem_banks", dev->kobj_node); + if (!dev->kobj_mem) + return -ENOMEM; + + dev->kobj_cache = kobject_create_and_add("caches", dev->kobj_node); + if (!dev->kobj_cache) + return -ENOMEM; + + dev->kobj_iolink = kobject_create_and_add("io_links", dev->kobj_node); + if (!dev->kobj_iolink) + return -ENOMEM; + + /* + * Creating sysfs files for node properties + */ + dev->attr_gpuid.name = "gpu_id"; + dev->attr_gpuid.mode = KFD_SYSFS_FILE_MODE; + sysfs_attr_init(&dev->attr_gpuid); + dev->attr_name.name = "name"; + dev->attr_name.mode = KFD_SYSFS_FILE_MODE; + sysfs_attr_init(&dev->attr_name); + dev->attr_props.name = "properties"; + dev->attr_props.mode = KFD_SYSFS_FILE_MODE; + sysfs_attr_init(&dev->attr_props); + ret = sysfs_create_file(dev->kobj_node, &dev->attr_gpuid); + if (ret < 0) + return ret; + ret = sysfs_create_file(dev->kobj_node, &dev->attr_name); + if (ret < 0) + return ret; + ret = sysfs_create_file(dev->kobj_node, &dev->attr_props); + if (ret < 0) + return ret; + + i = 0; + list_for_each_entry(mem, &dev->mem_props, list) { + mem->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL); + if (!mem->kobj) + return -ENOMEM; + ret = kobject_init_and_add(mem->kobj, &mem_type, + dev->kobj_mem, "%d", i); + if (ret < 0) + return ret; + + mem->attr.name = "properties"; + mem->attr.mode = KFD_SYSFS_FILE_MODE; + sysfs_attr_init(&mem->attr); + ret = sysfs_create_file(mem->kobj, &mem->attr); + if (ret < 0) + return ret; + i++; + } + + i = 0; + list_for_each_entry(cache, &dev->cache_props, list) { + cache->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL); + if (!cache->kobj) + return -ENOMEM; + ret = kobject_init_and_add(cache->kobj, &cache_type, + dev->kobj_cache, "%d", i); + if (ret < 0) + return ret; + + cache->attr.name = "properties"; + cache->attr.mode = KFD_SYSFS_FILE_MODE; + sysfs_attr_init(&cache->attr); + ret = sysfs_create_file(cache->kobj, &cache->attr); + if (ret < 0) + return ret; + i++; + } + + i = 0; + list_for_each_entry(iolink, &dev->io_link_props, list) { + iolink->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL); + if (!iolink->kobj) + return -ENOMEM; + ret = kobject_init_and_add(iolink->kobj, &iolink_type, + dev->kobj_iolink, "%d", i); + if (ret < 0) + return ret; + + iolink->attr.name = "properties"; + iolink->attr.mode = KFD_SYSFS_FILE_MODE; + sysfs_attr_init(&iolink->attr); + ret = sysfs_create_file(iolink->kobj, &iolink->attr); + if (ret < 0) + return ret; + i++; +} + + return 0; +} + +static int kfd_build_sysfs_node_tree(void) +{ + struct kfd_topology_device *dev; + int ret; + uint32_t i = 0; + + list_for_each_entry(dev, &topology_device_list, list) { + ret = kfd_build_sysfs_node_entry(dev, i); + if (ret < 0) + return ret; + i++; + } + + return 0; +} + +static void kfd_remove_sysfs_node_tree(void) +{ + struct kfd_topology_device *dev; + + list_for_each_entry(dev, &topology_device_list, list) + kfd_remove_sysfs_node_entry(dev); +} + +static int kfd_topology_update_sysfs(void) +{ + int ret; + + pr_info("Creating topology SYSFS entries\n"); + if (sys_props.kobj_topology == NULL) { + sys_props.kobj_topology = + kfd_alloc_struct(sys_props.kobj_topology); + if (!sys_props.kobj_topology) + return -ENOMEM; + + ret = kobject_init_and_add(sys_props.kobj_topology, + &sysprops_type, &kfd_device->kobj, + "topology"); + if (ret < 0) + return ret; + + sys_props.kobj_nodes = kobject_create_and_add("nodes", + sys_props.kobj_topology); + if (!sys_props.kobj_nodes) + return -ENOMEM; + + sys_props.attr_genid.name = "generation_id"; + sys_props.attr_genid.mode = KFD_SYSFS_FILE_MODE; + sysfs_attr_init(&sys_props.attr_genid); + ret = sysfs_create_file(sys_props.kobj_topology, + &sys_props.attr_genid); + if (ret < 0) + return ret; + + sys_props.attr_props.name = "system_properties"; + sys_props.attr_props.mode = KFD_SYSFS_FILE_MODE; + sysfs_attr_init(&sys_props.attr_props); + ret = sysfs_create_file(sys_props.kobj_topology, + &sys_props.attr_props); + if (ret < 0) + return ret; + } + + kfd_remove_sysfs_node_tree(); + + return kfd_build_sysfs_node_tree(); +} + +static void kfd_topology_release_sysfs(void) +{ + kfd_remove_sysfs_node_tree(); + if (sys_props.kobj_topology) { + sysfs_remove_file(sys_props.kobj_topology, + &sys_props.attr_genid); + sysfs_remove_file(sys_props.kobj_topology, + &sys_props.attr_props); + if (sys_props.kobj_nodes) { + kobject_del(sys_props.kobj_nodes); + kobject_put(sys_props.kobj_nodes); + sys_props.kobj_nodes = NULL; + } + kobject_del(sys_props.kobj_topology); + kobject_put(sys_props.kobj_topology); + sys_props.kobj_topology = NULL; + } +} + +int kfd_topology_init(void) +{ + void *crat_image = NULL; + size_t image_size = 0; + int ret; + + /* + * Initialize the head for the topology device list + */ + INIT_LIST_HEAD(&topology_device_list); + init_rwsem(&topology_lock); + topology_crat_parsed = 0; + + memset(&sys_props, 0, sizeof(sys_props)); + + /* + * Get the CRAT image from the ACPI + */ + ret = kfd_topology_get_crat_acpi(crat_image, &image_size); + if (ret == 0 && image_size > 0) { + pr_info("Found CRAT image with size=%zd\n", image_size); + crat_image = kmalloc(image_size, GFP_KERNEL); + if (!crat_image) { + ret = -ENOMEM; + pr_err("No memory for allocating CRAT image\n"); + goto err; + } + ret = kfd_topology_get_crat_acpi(crat_image, &image_size); + + if (ret == 0) { + down_write(&topology_lock); + ret = kfd_parse_crat_table(crat_image); + if (ret == 0) + ret = kfd_topology_update_sysfs(); + up_write(&topology_lock); + } else { + pr_err("Couldn't get CRAT table size from ACPI\n"); + } + kfree(crat_image); + } else if (ret == -ENODATA) { + ret = 0; + } else { + pr_err("Couldn't get CRAT table size from ACPI\n"); + } + +err: + pr_info("Finished initializing topology ret=%d\n", ret); + return ret; +} + +void kfd_topology_shutdown(void) +{ + kfd_topology_release_sysfs(); + kfd_release_live_view(); +} + +static void kfd_debug_print_topology(void) +{ + struct kfd_topology_device *dev; + uint32_t i = 0; + + pr_info("DEBUG PRINT OF TOPOLOGY:"); + list_for_each_entry(dev, &topology_device_list, list) { + pr_info("Node: %d\n", i); + pr_info("\tGPU assigned: %s\n", (dev->gpu ? "yes" : "no")); + pr_info("\tCPU count: %d\n", dev->node_props.cpu_cores_count); + pr_info("\tSIMD count: %d", dev->node_props.simd_count); + i++; + } +} + +static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu) +{ + uint32_t hashout; + uint32_t buf[7]; + int i; + + if (!gpu) + return 0; + + buf[0] = gpu->pdev->devfn; + buf[1] = gpu->pdev->subsystem_vendor; + buf[2] = gpu->pdev->subsystem_device; + buf[3] = gpu->pdev->device; + buf[4] = gpu->pdev->bus->number; + buf[5] = (uint32_t)(gpu->kfd2kgd->get_vmem_size(gpu->kgd) + & 0xffffffff); + buf[6] = (uint32_t)(gpu->kfd2kgd->get_vmem_size(gpu->kgd) >> 32); + + for (i = 0, hashout = 0; i < 7; i++) + hashout ^= hash_32(buf[i], KFD_GPU_ID_HASH_WIDTH); + + return hashout; +} + +static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) +{ + struct kfd_topology_device *dev; + struct kfd_topology_device *out_dev = NULL; + + BUG_ON(!gpu); + + list_for_each_entry(dev, &topology_device_list, list) + if (dev->gpu == NULL && dev->node_props.simd_count > 0) { + dev->gpu = gpu; + out_dev = dev; + break; + } + + return out_dev; +} + +static void kfd_notify_gpu_change(uint32_t gpu_id, int arrival) +{ + /* + * TODO: Generate an event for thunk about the arrival/removal + * of the GPU + */ +} + +int kfd_topology_add_device(struct kfd_dev *gpu) +{ + uint32_t gpu_id; + struct kfd_topology_device *dev; + int res; + + BUG_ON(!gpu); + + gpu_id = kfd_generate_gpu_id(gpu); + + pr_debug("kfd: Adding new GPU (ID: 0x%x) to topology\n", gpu_id); + + down_write(&topology_lock); + /* + * Try to assign the GPU to existing topology device (generated from + * CRAT table + */ + dev = kfd_assign_gpu(gpu); + if (!dev) { + pr_info("GPU was not found in the current topology. Extending.\n"); + kfd_debug_print_topology(); + dev = kfd_create_topology_device(); + if (!dev) { + res = -ENOMEM; + goto err; + } + dev->gpu = gpu; + + /* + * TODO: Make a call to retrieve topology information from the + * GPU vBIOS + */ + + /* + * Update the SYSFS tree, since we added another topology device + */ + if (kfd_topology_update_sysfs() < 0) + kfd_topology_release_sysfs(); + + } + + dev->gpu_id = gpu_id; + gpu->id = gpu_id; + dev->node_props.vendor_id = gpu->pdev->vendor; + dev->node_props.device_id = gpu->pdev->device; + dev->node_props.location_id = (gpu->pdev->bus->number << 24) + + (gpu->pdev->devfn & 0xffffff); + /* + * TODO: Retrieve max engine clock values from KGD + */ + + res = 0; + +err: + up_write(&topology_lock); + + if (res == 0) + kfd_notify_gpu_change(gpu_id, 1); + + return res; +} + +int kfd_topology_remove_device(struct kfd_dev *gpu) +{ + struct kfd_topology_device *dev; + uint32_t gpu_id; + int res = -ENODEV; + + BUG_ON(!gpu); + + down_write(&topology_lock); + + list_for_each_entry(dev, &topology_device_list, list) + if (dev->gpu == gpu) { + gpu_id = dev->gpu_id; + kfd_remove_sysfs_node_entry(dev); + kfd_release_topology_device(dev); + res = 0; + if (kfd_topology_update_sysfs() < 0) + kfd_topology_release_sysfs(); + break; + } + + up_write(&topology_lock); + + if (res == 0) + kfd_notify_gpu_change(gpu_id, 0); + + return res; +} + +/* + * When idx is out of bounds, the function will return NULL + */ +struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx) +{ + + struct kfd_topology_device *top_dev; + struct kfd_dev *device = NULL; + uint8_t device_idx = 0; + + down_read(&topology_lock); + + list_for_each_entry(top_dev, &topology_device_list, list) { + if (device_idx == idx) { + device = top_dev->gpu; + break; + } + + device_idx++; + } + + up_read(&topology_lock); + + return device; + +} diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_topology.h new file mode 100644 index 000000000..989624b3c --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/amdkfd/kfd_topology.h @@ -0,0 +1,168 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __KFD_TOPOLOGY_H__ +#define __KFD_TOPOLOGY_H__ + +#include +#include +#include "kfd_priv.h" + +#define KFD_TOPOLOGY_PUBLIC_NAME_SIZE 128 + +#define HSA_CAP_HOT_PLUGGABLE 0x00000001 +#define HSA_CAP_ATS_PRESENT 0x00000002 +#define HSA_CAP_SHARED_WITH_GRAPHICS 0x00000004 +#define HSA_CAP_QUEUE_SIZE_POW2 0x00000008 +#define HSA_CAP_QUEUE_SIZE_32BIT 0x00000010 +#define HSA_CAP_QUEUE_IDLE_EVENT 0x00000020 +#define HSA_CAP_VA_LIMIT 0x00000040 +#define HSA_CAP_WATCH_POINTS_SUPPORTED 0x00000080 +#define HSA_CAP_WATCH_POINTS_TOTALBITS_MASK 0x00000f00 +#define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT 8 +#define HSA_CAP_RESERVED 0xfffff000 + +struct kfd_node_properties { + uint32_t cpu_cores_count; + uint32_t simd_count; + uint32_t mem_banks_count; + uint32_t caches_count; + uint32_t io_links_count; + uint32_t cpu_core_id_base; + uint32_t simd_id_base; + uint32_t capability; + uint32_t max_waves_per_simd; + uint32_t lds_size_in_kb; + uint32_t gds_size_in_kb; + uint32_t wave_front_size; + uint32_t array_count; + uint32_t simd_arrays_per_engine; + uint32_t cu_per_simd_array; + uint32_t simd_per_cu; + uint32_t max_slots_scratch_cu; + uint32_t engine_id; + uint32_t vendor_id; + uint32_t device_id; + uint32_t location_id; + uint32_t max_engine_clk_fcompute; + uint32_t max_engine_clk_ccompute; + uint16_t marketing_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE]; +}; + +#define HSA_MEM_HEAP_TYPE_SYSTEM 0 +#define HSA_MEM_HEAP_TYPE_FB_PUBLIC 1 +#define HSA_MEM_HEAP_TYPE_FB_PRIVATE 2 +#define HSA_MEM_HEAP_TYPE_GPU_GDS 3 +#define HSA_MEM_HEAP_TYPE_GPU_LDS 4 +#define HSA_MEM_HEAP_TYPE_GPU_SCRATCH 5 + +#define HSA_MEM_FLAGS_HOT_PLUGGABLE 0x00000001 +#define HSA_MEM_FLAGS_NON_VOLATILE 0x00000002 +#define HSA_MEM_FLAGS_RESERVED 0xfffffffc + +struct kfd_mem_properties { + struct list_head list; + uint32_t heap_type; + uint64_t size_in_bytes; + uint32_t flags; + uint32_t width; + uint32_t mem_clk_max; + struct kobject *kobj; + struct attribute attr; +}; + +#define KFD_TOPOLOGY_CPU_SIBLINGS 256 + +#define HSA_CACHE_TYPE_DATA 0x00000001 +#define HSA_CACHE_TYPE_INSTRUCTION 0x00000002 +#define HSA_CACHE_TYPE_CPU 0x00000004 +#define HSA_CACHE_TYPE_HSACU 0x00000008 +#define HSA_CACHE_TYPE_RESERVED 0xfffffff0 + +struct kfd_cache_properties { + struct list_head list; + uint32_t processor_id_low; + uint32_t cache_level; + uint32_t cache_size; + uint32_t cacheline_size; + uint32_t cachelines_per_tag; + uint32_t cache_assoc; + uint32_t cache_latency; + uint32_t cache_type; + uint8_t sibling_map[KFD_TOPOLOGY_CPU_SIBLINGS]; + struct kobject *kobj; + struct attribute attr; +}; + +struct kfd_iolink_properties { + struct list_head list; + uint32_t iolink_type; + uint32_t ver_maj; + uint32_t ver_min; + uint32_t node_from; + uint32_t node_to; + uint32_t weight; + uint32_t min_latency; + uint32_t max_latency; + uint32_t min_bandwidth; + uint32_t max_bandwidth; + uint32_t rec_transfer_size; + uint32_t flags; + struct kobject *kobj; + struct attribute attr; +}; + +struct kfd_topology_device { + struct list_head list; + uint32_t gpu_id; + struct kfd_node_properties node_props; + uint32_t mem_bank_count; + struct list_head mem_props; + uint32_t cache_count; + struct list_head cache_props; + uint32_t io_link_count; + struct list_head io_link_props; + struct kfd_dev *gpu; + struct kobject *kobj_node; + struct kobject *kobj_mem; + struct kobject *kobj_cache; + struct kobject *kobj_iolink; + struct attribute attr_gpuid; + struct attribute attr_name; + struct attribute attr_props; +}; + +struct kfd_system_properties { + uint32_t num_devices; /* Number of H-NUMA nodes */ + uint32_t generation_count; + uint64_t platform_oem; + uint64_t platform_id; + uint64_t platform_rev; + struct kobject *kobj_topology; + struct kobject *kobj_nodes; + struct attribute attr_genid; + struct attribute attr_props; +}; + + + +#endif /* __KFD_TOPOLOGY_H__ */ diff --git a/kernel/drivers/gpu/drm/amd/include/cik_structs.h b/kernel/drivers/gpu/drm/amd/include/cik_structs.h new file mode 100644 index 000000000..749eab94e --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/include/cik_structs.h @@ -0,0 +1,293 @@ +/* + * Copyright 2012 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef CIK_STRUCTS_H_ +#define CIK_STRUCTS_H_ + +struct cik_mqd { + uint32_t header; + uint32_t compute_dispatch_initiator; + uint32_t compute_dim_x; + uint32_t compute_dim_y; + uint32_t compute_dim_z; + uint32_t compute_start_x; + uint32_t compute_start_y; + uint32_t compute_start_z; + uint32_t compute_num_thread_x; + uint32_t compute_num_thread_y; + uint32_t compute_num_thread_z; + uint32_t compute_pipelinestat_enable; + uint32_t compute_perfcount_enable; + uint32_t compute_pgm_lo; + uint32_t compute_pgm_hi; + uint32_t compute_tba_lo; + uint32_t compute_tba_hi; + uint32_t compute_tma_lo; + uint32_t compute_tma_hi; + uint32_t compute_pgm_rsrc1; + uint32_t compute_pgm_rsrc2; + uint32_t compute_vmid; + uint32_t compute_resource_limits; + uint32_t compute_static_thread_mgmt_se0; + uint32_t compute_static_thread_mgmt_se1; + uint32_t compute_tmpring_size; + uint32_t compute_static_thread_mgmt_se2; + uint32_t compute_static_thread_mgmt_se3; + uint32_t compute_restart_x; + uint32_t compute_restart_y; + uint32_t compute_restart_z; + uint32_t compute_thread_trace_enable; + uint32_t compute_misc_reserved; + uint32_t compute_user_data_0; + uint32_t compute_user_data_1; + uint32_t compute_user_data_2; + uint32_t compute_user_data_3; + uint32_t compute_user_data_4; + uint32_t compute_user_data_5; + uint32_t compute_user_data_6; + uint32_t compute_user_data_7; + uint32_t compute_user_data_8; + uint32_t compute_user_data_9; + uint32_t compute_user_data_10; + uint32_t compute_user_data_11; + uint32_t compute_user_data_12; + uint32_t compute_user_data_13; + uint32_t compute_user_data_14; + uint32_t compute_user_data_15; + uint32_t cp_compute_csinvoc_count_lo; + uint32_t cp_compute_csinvoc_count_hi; + uint32_t cp_mqd_base_addr_lo; + uint32_t cp_mqd_base_addr_hi; + uint32_t cp_hqd_active; + uint32_t cp_hqd_vmid; + uint32_t cp_hqd_persistent_state; + uint32_t cp_hqd_pipe_priority; + uint32_t cp_hqd_queue_priority; + uint32_t cp_hqd_quantum; + uint32_t cp_hqd_pq_base_lo; + uint32_t cp_hqd_pq_base_hi; + uint32_t cp_hqd_pq_rptr; + uint32_t cp_hqd_pq_rptr_report_addr_lo; + uint32_t cp_hqd_pq_rptr_report_addr_hi; + uint32_t cp_hqd_pq_wptr_poll_addr_lo; + uint32_t cp_hqd_pq_wptr_poll_addr_hi; + uint32_t cp_hqd_pq_doorbell_control; + uint32_t cp_hqd_pq_wptr; + uint32_t cp_hqd_pq_control; + uint32_t cp_hqd_ib_base_addr_lo; + uint32_t cp_hqd_ib_base_addr_hi; + uint32_t cp_hqd_ib_rptr; + uint32_t cp_hqd_ib_control; + uint32_t cp_hqd_iq_timer; + uint32_t cp_hqd_iq_rptr; + uint32_t cp_hqd_dequeue_request; + uint32_t cp_hqd_dma_offload; + uint32_t cp_hqd_sema_cmd; + uint32_t cp_hqd_msg_type; + uint32_t cp_hqd_atomic0_preop_lo; + uint32_t cp_hqd_atomic0_preop_hi; + uint32_t cp_hqd_atomic1_preop_lo; + uint32_t cp_hqd_atomic1_preop_hi; + uint32_t cp_hqd_hq_status0; + uint32_t cp_hqd_hq_control0; + uint32_t cp_mqd_control; + uint32_t cp_mqd_query_time_lo; + uint32_t cp_mqd_query_time_hi; + uint32_t cp_mqd_connect_start_time_lo; + uint32_t cp_mqd_connect_start_time_hi; + uint32_t cp_mqd_connect_end_time_lo; + uint32_t cp_mqd_connect_end_time_hi; + uint32_t cp_mqd_connect_end_wf_count; + uint32_t cp_mqd_connect_end_pq_rptr; + uint32_t cp_mqd_connect_end_pq_wptr; + uint32_t cp_mqd_connect_end_ib_rptr; + uint32_t reserved_96; + uint32_t reserved_97; + uint32_t reserved_98; + uint32_t reserved_99; + uint32_t iqtimer_pkt_header; + uint32_t iqtimer_pkt_dw0; + uint32_t iqtimer_pkt_dw1; + uint32_t iqtimer_pkt_dw2; + uint32_t iqtimer_pkt_dw3; + uint32_t iqtimer_pkt_dw4; + uint32_t iqtimer_pkt_dw5; + uint32_t iqtimer_pkt_dw6; + uint32_t reserved_108; + uint32_t reserved_109; + uint32_t reserved_110; + uint32_t reserved_111; + uint32_t queue_doorbell_id0; + uint32_t queue_doorbell_id1; + uint32_t queue_doorbell_id2; + uint32_t queue_doorbell_id3; + uint32_t queue_doorbell_id4; + uint32_t queue_doorbell_id5; + uint32_t queue_doorbell_id6; + uint32_t queue_doorbell_id7; + uint32_t queue_doorbell_id8; + uint32_t queue_doorbell_id9; + uint32_t queue_doorbell_id10; + uint32_t queue_doorbell_id11; + uint32_t queue_doorbell_id12; + uint32_t queue_doorbell_id13; + uint32_t queue_doorbell_id14; + uint32_t queue_doorbell_id15; +}; + +struct cik_sdma_rlc_registers { + uint32_t sdma_rlc_rb_cntl; + uint32_t sdma_rlc_rb_base; + uint32_t sdma_rlc_rb_base_hi; + uint32_t sdma_rlc_rb_rptr; + uint32_t sdma_rlc_rb_wptr; + uint32_t sdma_rlc_rb_wptr_poll_cntl; + uint32_t sdma_rlc_rb_wptr_poll_addr_hi; + uint32_t sdma_rlc_rb_wptr_poll_addr_lo; + uint32_t sdma_rlc_rb_rptr_addr_hi; + uint32_t sdma_rlc_rb_rptr_addr_lo; + uint32_t sdma_rlc_ib_cntl; + uint32_t sdma_rlc_ib_rptr; + uint32_t sdma_rlc_ib_offset; + uint32_t sdma_rlc_ib_base_lo; + uint32_t sdma_rlc_ib_base_hi; + uint32_t sdma_rlc_ib_size; + uint32_t sdma_rlc_skip_cntl; + uint32_t sdma_rlc_context_status; + uint32_t sdma_rlc_doorbell; + uint32_t sdma_rlc_virtual_addr; + uint32_t sdma_rlc_ape1_cntl; + uint32_t sdma_rlc_doorbell_log; + uint32_t reserved_22; + uint32_t reserved_23; + uint32_t reserved_24; + uint32_t reserved_25; + uint32_t reserved_26; + uint32_t reserved_27; + uint32_t reserved_28; + uint32_t reserved_29; + uint32_t reserved_30; + uint32_t reserved_31; + uint32_t reserved_32; + uint32_t reserved_33; + uint32_t reserved_34; + uint32_t reserved_35; + uint32_t reserved_36; + uint32_t reserved_37; + uint32_t reserved_38; + uint32_t reserved_39; + uint32_t reserved_40; + uint32_t reserved_41; + uint32_t reserved_42; + uint32_t reserved_43; + uint32_t reserved_44; + uint32_t reserved_45; + uint32_t reserved_46; + uint32_t reserved_47; + uint32_t reserved_48; + uint32_t reserved_49; + uint32_t reserved_50; + uint32_t reserved_51; + uint32_t reserved_52; + uint32_t reserved_53; + uint32_t reserved_54; + uint32_t reserved_55; + uint32_t reserved_56; + uint32_t reserved_57; + uint32_t reserved_58; + uint32_t reserved_59; + uint32_t reserved_60; + uint32_t reserved_61; + uint32_t reserved_62; + uint32_t reserved_63; + uint32_t reserved_64; + uint32_t reserved_65; + uint32_t reserved_66; + uint32_t reserved_67; + uint32_t reserved_68; + uint32_t reserved_69; + uint32_t reserved_70; + uint32_t reserved_71; + uint32_t reserved_72; + uint32_t reserved_73; + uint32_t reserved_74; + uint32_t reserved_75; + uint32_t reserved_76; + uint32_t reserved_77; + uint32_t reserved_78; + uint32_t reserved_79; + uint32_t reserved_80; + uint32_t reserved_81; + uint32_t reserved_82; + uint32_t reserved_83; + uint32_t reserved_84; + uint32_t reserved_85; + uint32_t reserved_86; + uint32_t reserved_87; + uint32_t reserved_88; + uint32_t reserved_89; + uint32_t reserved_90; + uint32_t reserved_91; + uint32_t reserved_92; + uint32_t reserved_93; + uint32_t reserved_94; + uint32_t reserved_95; + uint32_t reserved_96; + uint32_t reserved_97; + uint32_t reserved_98; + uint32_t reserved_99; + uint32_t reserved_100; + uint32_t reserved_101; + uint32_t reserved_102; + uint32_t reserved_103; + uint32_t reserved_104; + uint32_t reserved_105; + uint32_t reserved_106; + uint32_t reserved_107; + uint32_t reserved_108; + uint32_t reserved_109; + uint32_t reserved_110; + uint32_t reserved_111; + uint32_t reserved_112; + uint32_t reserved_113; + uint32_t reserved_114; + uint32_t reserved_115; + uint32_t reserved_116; + uint32_t reserved_117; + uint32_t reserved_118; + uint32_t reserved_119; + uint32_t reserved_120; + uint32_t reserved_121; + uint32_t reserved_122; + uint32_t reserved_123; + uint32_t reserved_124; + uint32_t reserved_125; + uint32_t reserved_126; + uint32_t reserved_127; + uint32_t sdma_engine_id; + uint32_t sdma_queue_id; +}; + + + +#endif /* CIK_STRUCTS_H_ */ diff --git a/kernel/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/kernel/drivers/gpu/drm/amd/include/kgd_kfd_interface.h new file mode 100644 index 000000000..dabd94446 --- /dev/null +++ b/kernel/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -0,0 +1,203 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * This file defines the private interface between the + * AMD kernel graphics drivers and the AMD KFD. + */ + +#ifndef KGD_KFD_INTERFACE_H_INCLUDED +#define KGD_KFD_INTERFACE_H_INCLUDED + +#include + +struct pci_dev; + +#define KFD_INTERFACE_VERSION 1 + +struct kfd_dev; +struct kgd_dev; + +struct kgd_mem; + +enum kgd_memory_pool { + KGD_POOL_SYSTEM_CACHEABLE = 1, + KGD_POOL_SYSTEM_WRITECOMBINE = 2, + KGD_POOL_FRAMEBUFFER = 3, +}; + +enum kgd_engine_type { + KGD_ENGINE_PFP = 1, + KGD_ENGINE_ME, + KGD_ENGINE_CE, + KGD_ENGINE_MEC1, + KGD_ENGINE_MEC2, + KGD_ENGINE_RLC, + KGD_ENGINE_SDMA, + KGD_ENGINE_MAX +}; + +struct kgd2kfd_shared_resources { + /* Bit n == 1 means VMID n is available for KFD. */ + unsigned int compute_vmid_bitmap; + + /* Compute pipes are counted starting from MEC0/pipe0 as 0. */ + unsigned int first_compute_pipe; + + /* Number of MEC pipes available for KFD. */ + unsigned int compute_pipe_count; + + /* Base address of doorbell aperture. */ + phys_addr_t doorbell_physical_address; + + /* Size in bytes of doorbell aperture. */ + size_t doorbell_aperture_size; + + /* Number of bytes at start of aperture reserved for KGD. */ + size_t doorbell_start_offset; +}; + +/** + * struct kfd2kgd_calls + * + * @init_gtt_mem_allocation: Allocate a buffer on the gart aperture. + * The buffer can be used for mqds, hpds, kernel queue, fence and runlists + * + * @free_gtt_mem: Frees a buffer that was allocated on the gart aperture + * + * @get_vmem_size: Retrieves (physical) size of VRAM + * + * @get_gpu_clock_counter: Retrieves GPU clock counter + * + * @get_max_engine_clock_in_mhz: Retrieves maximum GPU clock in MHz + * + * @program_sh_mem_settings: A function that should initiate the memory + * properties such as main aperture memory type (cache / non cached) and + * secondary aperture base address, size and memory type. + * This function is used only for no cp scheduling mode. + * + * @set_pasid_vmid_mapping: Exposes pasid/vmid pair to the H/W for no cp + * scheduling mode. Only used for no cp scheduling mode. + * + * @init_pipeline: Initialized the compute pipelines. + * + * @hqd_load: Loads the mqd structure to a H/W hqd slot. used only for no cp + * sceduling mode. + * + * @hqd_sdma_load: Loads the SDMA mqd structure to a H/W SDMA hqd slot. + * used only for no HWS mode. + * + * @hqd_is_occupies: Checks if a hqd slot is occupied. + * + * @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot. + * + * @hqd_sdma_is_occupied: Checks if an SDMA hqd slot is occupied. + * + * @hqd_sdma_destroy: Destructs and preempts the SDMA queue assigned to that + * SDMA hqd slot. + * + * @get_fw_version: Returns FW versions from the header + * + * This structure contains function pointers to services that the kgd driver + * provides to amdkfd driver. + * + */ +struct kfd2kgd_calls { + int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size, + void **mem_obj, uint64_t *gpu_addr, + void **cpu_ptr); + + void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj); + + uint64_t (*get_vmem_size)(struct kgd_dev *kgd); + uint64_t (*get_gpu_clock_counter)(struct kgd_dev *kgd); + + uint32_t (*get_max_engine_clock_in_mhz)(struct kgd_dev *kgd); + + /* Register access functions */ + void (*program_sh_mem_settings)(struct kgd_dev *kgd, uint32_t vmid, + uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, + uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases); + + int (*set_pasid_vmid_mapping)(struct kgd_dev *kgd, unsigned int pasid, + unsigned int vmid); + + int (*init_pipeline)(struct kgd_dev *kgd, uint32_t pipe_id, + uint32_t hpd_size, uint64_t hpd_gpu_addr); + + int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, + uint32_t queue_id, uint32_t __user *wptr); + + int (*hqd_sdma_load)(struct kgd_dev *kgd, void *mqd); + + bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address, + uint32_t pipe_id, uint32_t queue_id); + + int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type, + unsigned int timeout, uint32_t pipe_id, + uint32_t queue_id); + + bool (*hqd_sdma_is_occupied)(struct kgd_dev *kgd, void *mqd); + + int (*hqd_sdma_destroy)(struct kgd_dev *kgd, void *mqd, + unsigned int timeout); + + uint16_t (*get_fw_version)(struct kgd_dev *kgd, + enum kgd_engine_type type); +}; + +/** + * struct kgd2kfd_calls + * + * @exit: Notifies amdkfd that kgd module is unloaded + * + * @probe: Notifies amdkfd about a probe done on a device in the kgd driver. + * + * @device_init: Initialize the newly probed device (if it is a device that + * amdkfd supports) + * + * @device_exit: Notifies amdkfd about a removal of a kgd device + * + * @suspend: Notifies amdkfd about a suspend action done to a kgd device + * + * @resume: Notifies amdkfd about a resume action done to a kgd device + * + * This structure contains function callback pointers so the kgd driver + * will notify to the amdkfd about certain status changes. + * + */ +struct kgd2kfd_calls { + void (*exit)(void); + struct kfd_dev* (*probe)(struct kgd_dev *kgd, struct pci_dev *pdev, + const struct kfd2kgd_calls *f2g); + bool (*device_init)(struct kfd_dev *kfd, + const struct kgd2kfd_shared_resources *gpu_resources); + void (*device_exit)(struct kfd_dev *kfd); + void (*interrupt)(struct kfd_dev *kfd, const void *ih_ring_entry); + void (*suspend)(struct kfd_dev *kfd); + int (*resume)(struct kfd_dev *kfd); +}; + +bool kgd2kfd_init(unsigned interface_version, + const struct kgd2kfd_calls **g2f); + +#endif /* KGD_KFD_INTERFACE_H_INCLUDED */ diff --git a/kernel/drivers/gpu/drm/armada/Kconfig b/kernel/drivers/gpu/drm/armada/Kconfig new file mode 100644 index 000000000..50ae88ad4 --- /dev/null +++ b/kernel/drivers/gpu/drm/armada/Kconfig @@ -0,0 +1,25 @@ +config DRM_ARMADA + tristate "DRM support for Marvell Armada SoCs" + depends on DRM && HAVE_CLK && ARM + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + select DRM_KMS_HELPER + select DRM_KMS_FB_HELPER + help + Support the "LCD" controllers found on the Marvell Armada 510 + devices. There are two controllers on the device, each controller + supports graphics and video overlays. + + This driver provides no built-in acceleration; acceleration is + performed by other IP found on the SoC. This driver provides + kernel mode setting and buffer management to userspace. + +config DRM_ARMADA_TDA1998X + bool "Support TDA1998X HDMI output" + depends on DRM_ARMADA != n + depends on I2C && DRM_I2C_NXP_TDA998X = y + default y + help + Support the TDA1998x HDMI output device found on the Solid-Run + CuBox. diff --git a/kernel/drivers/gpu/drm/armada/Makefile b/kernel/drivers/gpu/drm/armada/Makefile new file mode 100644 index 000000000..d6f43e061 --- /dev/null +++ b/kernel/drivers/gpu/drm/armada/Makefile @@ -0,0 +1,7 @@ +armada-y := armada_crtc.o armada_drv.o armada_fb.o armada_fbdev.o \ + armada_gem.o armada_output.o armada_overlay.o \ + armada_slave.o +armada-y += armada_510.o +armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o + +obj-$(CONFIG_DRM_ARMADA) := armada.o diff --git a/kernel/drivers/gpu/drm/armada/armada_510.c b/kernel/drivers/gpu/drm/armada/armada_510.c new file mode 100644 index 000000000..ad3d2ebf9 --- /dev/null +++ b/kernel/drivers/gpu/drm/armada/armada_510.c @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2012 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Armada 510 (aka Dove) variant support + */ +#include +#include +#include +#include +#include "armada_crtc.h" +#include "armada_drm.h" +#include "armada_hw.h" + +static int armada510_crtc_init(struct armada_crtc *dcrtc, struct device *dev) +{ + struct clk *clk; + + clk = devm_clk_get(dev, "ext_ref_clk1"); + if (IS_ERR(clk)) + return PTR_ERR(clk) == -ENOENT ? -EPROBE_DEFER : PTR_ERR(clk); + + dcrtc->extclk[0] = clk; + + /* Lower the watermark so to eliminate jitter at higher bandwidths */ + armada_updatel(0x20, (1 << 11) | 0xff, dcrtc->base + LCD_CFG_RDREG4F); + + return 0; +} + +/* + * Armada510 specific SCLK register selection. + * This gets called with sclk = NULL to test whether the mode is + * supportable, and again with sclk != NULL to set the clocks up for + * that. The former can return an error, but the latter is expected + * not to. + * + * We currently are pretty rudimentary here, always selecting + * EXT_REF_CLK_1 for LCD0 and erroring LCD1. This needs improvement! + */ +static int armada510_crtc_compute_clock(struct armada_crtc *dcrtc, + const struct drm_display_mode *mode, uint32_t *sclk) +{ + struct clk *clk = dcrtc->extclk[0]; + int ret; + + if (dcrtc->num == 1) + return -EINVAL; + + if (IS_ERR(clk)) + return PTR_ERR(clk); + + if (dcrtc->clk != clk) { + ret = clk_prepare_enable(clk); + if (ret) + return ret; + dcrtc->clk = clk; + } + + if (sclk) { + uint32_t rate, ref, div; + + rate = mode->clock * 1000; + ref = clk_round_rate(clk, rate); + div = DIV_ROUND_UP(ref, rate); + if (div < 1) + div = 1; + + clk_set_rate(clk, ref); + *sclk = div | SCLK_510_EXTCLK1; + } + + return 0; +} + +const struct armada_variant armada510_ops = { + .has_spu_adv_reg = true, + .spu_adv_reg = ADV_HWC32ENABLE | ADV_HWC32ARGB | ADV_HWC32BLEND, + .init = armada510_crtc_init, + .compute_clock = armada510_crtc_compute_clock, +}; diff --git a/kernel/drivers/gpu/drm/armada/armada_crtc.c b/kernel/drivers/gpu/drm/armada/armada_crtc.c new file mode 100644 index 000000000..42d2ffa08 --- /dev/null +++ b/kernel/drivers/gpu/drm/armada/armada_crtc.c @@ -0,0 +1,1234 @@ +/* + * Copyright (C) 2012 Russell King + * Rewritten from the dovefb driver, and Armada510 manuals. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include "armada_crtc.h" +#include "armada_drm.h" +#include "armada_fb.h" +#include "armada_gem.h" +#include "armada_hw.h" + +struct armada_frame_work { + struct drm_pending_vblank_event *event; + struct armada_regs regs[4]; + struct drm_framebuffer *old_fb; +}; + +enum csc_mode { + CSC_AUTO = 0, + CSC_YUV_CCIR601 = 1, + CSC_YUV_CCIR709 = 2, + CSC_RGB_COMPUTER = 1, + CSC_RGB_STUDIO = 2, +}; + +/* + * A note about interlacing. Let's consider HDMI 1920x1080i. + * The timing parameters we have from X are: + * Hact HsyA HsyI Htot Vact VsyA VsyI Vtot + * 1920 2448 2492 2640 1080 1084 1094 1125 + * Which get translated to: + * Hact HsyA HsyI Htot Vact VsyA VsyI Vtot + * 1920 2448 2492 2640 540 542 547 562 + * + * This is how it is defined by CEA-861-D - line and pixel numbers are + * referenced to the rising edge of VSYNC and HSYNC. Total clocks per + * line: 2640. The odd frame, the first active line is at line 21, and + * the even frame, the first active line is 584. + * + * LN: 560 561 562 563 567 568 569 + * DE: ~~~|____________________________//__________________________ + * HSYNC: ____|~|_____|~|_____|~|_____|~|_//__|~|_____|~|_____|~|_____ + * VSYNC: _________________________|~~~~~~//~~~~~~~~~~~~~~~|__________ + * 22 blanking lines. VSYNC at 1320 (referenced to the HSYNC rising edge). + * + * LN: 1123 1124 1125 1 5 6 7 + * DE: ~~~|____________________________//__________________________ + * HSYNC: ____|~|_____|~|_____|~|_____|~|_//__|~|_____|~|_____|~|_____ + * VSYNC: ____________________|~~~~~~~~~~~//~~~~~~~~~~|_______________ + * 23 blanking lines + * + * The Armada LCD Controller line and pixel numbers are, like X timings, + * referenced to the top left of the active frame. + * + * So, translating these to our LCD controller: + * Odd frame, 563 total lines, VSYNC at line 543-548, pixel 1128. + * Even frame, 562 total lines, VSYNC at line 542-547, pixel 2448. + * Note: Vsync front porch remains constant! + * + * if (odd_frame) { + * vtotal = mode->crtc_vtotal + 1; + * vbackporch = mode->crtc_vsync_start - mode->crtc_vdisplay + 1; + * vhorizpos = mode->crtc_hsync_start - mode->crtc_htotal / 2 + * } else { + * vtotal = mode->crtc_vtotal; + * vbackporch = mode->crtc_vsync_start - mode->crtc_vdisplay; + * vhorizpos = mode->crtc_hsync_start; + * } + * vfrontporch = mode->crtc_vtotal - mode->crtc_vsync_end; + * + * So, we need to reprogram these registers on each vsync event: + * LCD_SPU_V_PORCH, LCD_SPU_ADV_REG, LCD_SPUT_V_H_TOTAL + * + * Note: we do not use the frame done interrupts because these appear + * to happen too early, and lead to jitter on the display (presumably + * they occur at the end of the last active line, before the vsync back + * porch, which we're reprogramming.) + */ + +void +armada_drm_crtc_update_regs(struct armada_crtc *dcrtc, struct armada_regs *regs) +{ + while (regs->offset != ~0) { + void __iomem *reg = dcrtc->base + regs->offset; + uint32_t val; + + val = regs->mask; + if (val != 0) + val &= readl_relaxed(reg); + writel_relaxed(val | regs->val, reg); + ++regs; + } +} + +#define dpms_blanked(dpms) ((dpms) != DRM_MODE_DPMS_ON) + +static void armada_drm_crtc_update(struct armada_crtc *dcrtc) +{ + uint32_t dumb_ctrl; + + dumb_ctrl = dcrtc->cfg_dumb_ctrl; + + if (!dpms_blanked(dcrtc->dpms)) + dumb_ctrl |= CFG_DUMB_ENA; + + /* + * When the dumb interface isn't in DUMB24_RGB888_0 mode, it might + * be using SPI or GPIO. If we set this to DUMB_BLANK, we will + * force LCD_D[23:0] to output blank color, overriding the GPIO or + * SPI usage. So leave it as-is unless in DUMB24_RGB888_0 mode. + */ + if (dpms_blanked(dcrtc->dpms) && + (dumb_ctrl & DUMB_MASK) == DUMB24_RGB888_0) { + dumb_ctrl &= ~DUMB_MASK; + dumb_ctrl |= DUMB_BLANK; + } + + /* + * The documentation doesn't indicate what the normal state of + * the sync signals are. Sebastian Hesselbart kindly probed + * these signals on his board to determine their state. + * + * The non-inverted state of the sync signals is active high. + * Setting these bits makes the appropriate signal active low. + */ + if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NCSYNC) + dumb_ctrl |= CFG_INV_CSYNC; + if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NHSYNC) + dumb_ctrl |= CFG_INV_HSYNC; + if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NVSYNC) + dumb_ctrl |= CFG_INV_VSYNC; + + if (dcrtc->dumb_ctrl != dumb_ctrl) { + dcrtc->dumb_ctrl = dumb_ctrl; + writel_relaxed(dumb_ctrl, dcrtc->base + LCD_SPU_DUMB_CTRL); + } +} + +static unsigned armada_drm_crtc_calc_fb(struct drm_framebuffer *fb, + int x, int y, struct armada_regs *regs, bool interlaced) +{ + struct armada_gem_object *obj = drm_fb_obj(fb); + unsigned pitch = fb->pitches[0]; + unsigned offset = y * pitch + x * fb->bits_per_pixel / 8; + uint32_t addr_odd, addr_even; + unsigned i = 0; + + DRM_DEBUG_DRIVER("pitch %u x %d y %d bpp %d\n", + pitch, x, y, fb->bits_per_pixel); + + addr_odd = addr_even = obj->dev_addr + offset; + + if (interlaced) { + addr_even += pitch; + pitch *= 2; + } + + /* write offset, base, and pitch */ + armada_reg_queue_set(regs, i, addr_odd, LCD_CFG_GRA_START_ADDR0); + armada_reg_queue_set(regs, i, addr_even, LCD_CFG_GRA_START_ADDR1); + armada_reg_queue_mod(regs, i, pitch, 0xffff, LCD_CFG_GRA_PITCH); + + return i; +} + +static int armada_drm_crtc_queue_frame_work(struct armada_crtc *dcrtc, + struct armada_frame_work *work) +{ + struct drm_device *dev = dcrtc->crtc.dev; + unsigned long flags; + int ret; + + ret = drm_vblank_get(dev, dcrtc->num); + if (ret) { + DRM_ERROR("failed to acquire vblank counter\n"); + return ret; + } + + spin_lock_irqsave(&dev->event_lock, flags); + if (!dcrtc->frame_work) + dcrtc->frame_work = work; + else + ret = -EBUSY; + spin_unlock_irqrestore(&dev->event_lock, flags); + + if (ret) + drm_vblank_put(dev, dcrtc->num); + + return ret; +} + +static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc) +{ + struct drm_device *dev = dcrtc->crtc.dev; + struct armada_frame_work *work = dcrtc->frame_work; + + dcrtc->frame_work = NULL; + + armada_drm_crtc_update_regs(dcrtc, work->regs); + + if (work->event) + drm_send_vblank_event(dev, dcrtc->num, work->event); + + drm_vblank_put(dev, dcrtc->num); + + /* Finally, queue the process-half of the cleanup. */ + __armada_drm_queue_unref_work(dcrtc->crtc.dev, work->old_fb); + kfree(work); +} + +static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc, + struct drm_framebuffer *fb, bool force) +{ + struct armada_frame_work *work; + + if (!fb) + return; + + if (force) { + /* Display is disabled, so just drop the old fb */ + drm_framebuffer_unreference(fb); + return; + } + + work = kmalloc(sizeof(*work), GFP_KERNEL); + if (work) { + int i = 0; + work->event = NULL; + work->old_fb = fb; + armada_reg_queue_end(work->regs, i); + + if (armada_drm_crtc_queue_frame_work(dcrtc, work) == 0) + return; + + kfree(work); + } + + /* + * Oops - just drop the reference immediately and hope for + * the best. The worst that will happen is the buffer gets + * reused before it has finished being displayed. + */ + drm_framebuffer_unreference(fb); +} + +static void armada_drm_vblank_off(struct armada_crtc *dcrtc) +{ + struct drm_device *dev = dcrtc->crtc.dev; + + /* + * Tell the DRM core that vblank IRQs aren't going to happen for + * a while. This cleans up any pending vblank events for us. + */ + drm_crtc_vblank_off(&dcrtc->crtc); + + /* Handle any pending flip event. */ + spin_lock_irq(&dev->event_lock); + if (dcrtc->frame_work) + armada_drm_crtc_complete_frame_work(dcrtc); + spin_unlock_irq(&dev->event_lock); +} + +void armada_drm_crtc_gamma_set(struct drm_crtc *crtc, u16 r, u16 g, u16 b, + int idx) +{ +} + +void armada_drm_crtc_gamma_get(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, + int idx) +{ +} + +/* The mode_config.mutex will be held for this call */ +static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms) +{ + struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); + + if (dcrtc->dpms != dpms) { + dcrtc->dpms = dpms; + armada_drm_crtc_update(dcrtc); + if (dpms_blanked(dpms)) + armada_drm_vblank_off(dcrtc); + else + drm_crtc_vblank_on(&dcrtc->crtc); + } +} + +/* + * Prepare for a mode set. Turn off overlay to ensure that we don't end + * up with the overlay size being bigger than the active screen size. + * We rely upon X refreshing this state after the mode set has completed. + * + * The mode_config.mutex will be held for this call + */ +static void armada_drm_crtc_prepare(struct drm_crtc *crtc) +{ + struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); + struct drm_plane *plane; + + /* + * If we have an overlay plane associated with this CRTC, disable + * it before the modeset to avoid its coordinates being outside + * the new mode parameters. DRM doesn't provide help with this. + */ + plane = dcrtc->plane; + if (plane) { + struct drm_framebuffer *fb = plane->fb; + + plane->funcs->disable_plane(plane); + plane->fb = NULL; + plane->crtc = NULL; + drm_framebuffer_unreference(fb); + } +} + +/* The mode_config.mutex will be held for this call */ +static void armada_drm_crtc_commit(struct drm_crtc *crtc) +{ + struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); + + if (dcrtc->dpms != DRM_MODE_DPMS_ON) { + dcrtc->dpms = DRM_MODE_DPMS_ON; + armada_drm_crtc_update(dcrtc); + } +} + +/* The mode_config.mutex will be held for this call */ +static bool armada_drm_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, struct drm_display_mode *adj) +{ + struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); + int ret; + + /* We can't do interlaced modes if we don't have the SPU_ADV_REG */ + if (!dcrtc->variant->has_spu_adv_reg && + adj->flags & DRM_MODE_FLAG_INTERLACE) + return false; + + /* Check whether the display mode is possible */ + ret = dcrtc->variant->compute_clock(dcrtc, adj, NULL); + if (ret) + return false; + + return true; +} + +static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat) +{ + struct armada_vbl_event *e, *n; + void __iomem *base = dcrtc->base; + + if (stat & DMA_FF_UNDERFLOW) + DRM_ERROR("video underflow on crtc %u\n", dcrtc->num); + if (stat & GRA_FF_UNDERFLOW) + DRM_ERROR("graphics underflow on crtc %u\n", dcrtc->num); + + if (stat & VSYNC_IRQ) + drm_handle_vblank(dcrtc->crtc.dev, dcrtc->num); + + spin_lock(&dcrtc->irq_lock); + + list_for_each_entry_safe(e, n, &dcrtc->vbl_list, node) { + list_del_init(&e->node); + drm_vblank_put(dcrtc->crtc.dev, dcrtc->num); + e->fn(dcrtc, e->data); + } + + if (stat & GRA_FRAME_IRQ && dcrtc->interlaced) { + int i = stat & GRA_FRAME_IRQ0 ? 0 : 1; + uint32_t val; + + writel_relaxed(dcrtc->v[i].spu_v_porch, base + LCD_SPU_V_PORCH); + writel_relaxed(dcrtc->v[i].spu_v_h_total, + base + LCD_SPUT_V_H_TOTAL); + + val = readl_relaxed(base + LCD_SPU_ADV_REG); + val &= ~(ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF | ADV_VSYNCOFFEN); + val |= dcrtc->v[i].spu_adv_reg; + writel_relaxed(val, base + LCD_SPU_ADV_REG); + } + + if (stat & DUMB_FRAMEDONE && dcrtc->cursor_update) { + writel_relaxed(dcrtc->cursor_hw_pos, + base + LCD_SPU_HWC_OVSA_HPXL_VLN); + writel_relaxed(dcrtc->cursor_hw_sz, + base + LCD_SPU_HWC_HPXL_VLN); + armada_updatel(CFG_HWC_ENA, + CFG_HWC_ENA | CFG_HWC_1BITMOD | CFG_HWC_1BITENA, + base + LCD_SPU_DMA_CTRL0); + dcrtc->cursor_update = false; + armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA); + } + + spin_unlock(&dcrtc->irq_lock); + + if (stat & GRA_FRAME_IRQ) { + struct drm_device *dev = dcrtc->crtc.dev; + + spin_lock(&dev->event_lock); + if (dcrtc->frame_work) + armada_drm_crtc_complete_frame_work(dcrtc); + spin_unlock(&dev->event_lock); + + wake_up(&dcrtc->frame_wait); + } +} + +static irqreturn_t armada_drm_irq(int irq, void *arg) +{ + struct armada_crtc *dcrtc = arg; + u32 v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR); + + /* + * This is rediculous - rather than writing bits to clear, we + * have to set the actual status register value. This is racy. + */ + writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR); + + /* Mask out those interrupts we haven't enabled */ + v = stat & dcrtc->irq_ena; + + if (v & (VSYNC_IRQ|GRA_FRAME_IRQ|DUMB_FRAMEDONE)) { + armada_drm_crtc_irq(dcrtc, stat); + return IRQ_HANDLED; + } + return IRQ_NONE; +} + +/* These are locked by dev->vbl_lock */ +void armada_drm_crtc_disable_irq(struct armada_crtc *dcrtc, u32 mask) +{ + if (dcrtc->irq_ena & mask) { + dcrtc->irq_ena &= ~mask; + writel(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA); + } +} + +void armada_drm_crtc_enable_irq(struct armada_crtc *dcrtc, u32 mask) +{ + if ((dcrtc->irq_ena & mask) != mask) { + dcrtc->irq_ena |= mask; + writel(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA); + if (readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR) & mask) + writel(0, dcrtc->base + LCD_SPU_IRQ_ISR); + } +} + +static uint32_t armada_drm_crtc_calculate_csc(struct armada_crtc *dcrtc) +{ + struct drm_display_mode *adj = &dcrtc->crtc.mode; + uint32_t val = 0; + + if (dcrtc->csc_yuv_mode == CSC_YUV_CCIR709) + val |= CFG_CSC_YUV_CCIR709; + if (dcrtc->csc_rgb_mode == CSC_RGB_STUDIO) + val |= CFG_CSC_RGB_STUDIO; + + /* + * In auto mode, set the colorimetry, based upon the HDMI spec. + * 1280x720p, 1920x1080p and 1920x1080i use ITU709, others use + * ITU601. It may be more appropriate to set this depending on + * the source - but what if the graphic frame is YUV and the + * video frame is RGB? + */ + if ((adj->hdisplay == 1280 && adj->vdisplay == 720 && + !(adj->flags & DRM_MODE_FLAG_INTERLACE)) || + (adj->hdisplay == 1920 && adj->vdisplay == 1080)) { + if (dcrtc->csc_yuv_mode == CSC_AUTO) + val |= CFG_CSC_YUV_CCIR709; + } + + /* + * We assume we're connected to a TV-like device, so the YUV->RGB + * conversion should produce a limited range. We should set this + * depending on the connectors attached to this CRTC, and what + * kind of device they report being connected. + */ + if (dcrtc->csc_rgb_mode == CSC_AUTO) + val |= CFG_CSC_RGB_STUDIO; + + return val; +} + +/* The mode_config.mutex will be held for this call */ +static int armada_drm_crtc_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, struct drm_display_mode *adj, + int x, int y, struct drm_framebuffer *old_fb) +{ + struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); + struct armada_regs regs[17]; + uint32_t lm, rm, tm, bm, val, sclk; + unsigned long flags; + unsigned i; + bool interlaced; + + drm_framebuffer_reference(crtc->primary->fb); + + interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE); + + i = armada_drm_crtc_calc_fb(dcrtc->crtc.primary->fb, + x, y, regs, interlaced); + + rm = adj->crtc_hsync_start - adj->crtc_hdisplay; + lm = adj->crtc_htotal - adj->crtc_hsync_end; + bm = adj->crtc_vsync_start - adj->crtc_vdisplay; + tm = adj->crtc_vtotal - adj->crtc_vsync_end; + + DRM_DEBUG_DRIVER("H: %d %d %d %d lm %d rm %d\n", + adj->crtc_hdisplay, + adj->crtc_hsync_start, + adj->crtc_hsync_end, + adj->crtc_htotal, lm, rm); + DRM_DEBUG_DRIVER("V: %d %d %d %d tm %d bm %d\n", + adj->crtc_vdisplay, + adj->crtc_vsync_start, + adj->crtc_vsync_end, + adj->crtc_vtotal, tm, bm); + + /* Wait for pending flips to complete */ + wait_event(dcrtc->frame_wait, !dcrtc->frame_work); + + drm_crtc_vblank_off(crtc); + + crtc->mode = *adj; + + val = dcrtc->dumb_ctrl & ~CFG_DUMB_ENA; + if (val != dcrtc->dumb_ctrl) { + dcrtc->dumb_ctrl = val; + writel_relaxed(val, dcrtc->base + LCD_SPU_DUMB_CTRL); + } + + /* Now compute the divider for real */ + dcrtc->variant->compute_clock(dcrtc, adj, &sclk); + + /* Ensure graphic fifo is enabled */ + armada_reg_queue_mod(regs, i, 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1); + armada_reg_queue_set(regs, i, sclk, LCD_CFG_SCLK_DIV); + + if (interlaced ^ dcrtc->interlaced) { + if (adj->flags & DRM_MODE_FLAG_INTERLACE) + drm_vblank_get(dcrtc->crtc.dev, dcrtc->num); + else + drm_vblank_put(dcrtc->crtc.dev, dcrtc->num); + dcrtc->interlaced = interlaced; + } + + spin_lock_irqsave(&dcrtc->irq_lock, flags); + + /* Even interlaced/progressive frame */ + dcrtc->v[1].spu_v_h_total = adj->crtc_vtotal << 16 | + adj->crtc_htotal; + dcrtc->v[1].spu_v_porch = tm << 16 | bm; + val = adj->crtc_hsync_start; + dcrtc->v[1].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN | + dcrtc->variant->spu_adv_reg; + + if (interlaced) { + /* Odd interlaced frame */ + dcrtc->v[0].spu_v_h_total = dcrtc->v[1].spu_v_h_total + + (1 << 16); + dcrtc->v[0].spu_v_porch = dcrtc->v[1].spu_v_porch + 1; + val = adj->crtc_hsync_start - adj->crtc_htotal / 2; + dcrtc->v[0].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN | + dcrtc->variant->spu_adv_reg; + } else { + dcrtc->v[0] = dcrtc->v[1]; + } + + val = adj->crtc_vdisplay << 16 | adj->crtc_hdisplay; + + armada_reg_queue_set(regs, i, val, LCD_SPU_V_H_ACTIVE); + armada_reg_queue_set(regs, i, val, LCD_SPU_GRA_HPXL_VLN); + armada_reg_queue_set(regs, i, val, LCD_SPU_GZM_HPXL_VLN); + armada_reg_queue_set(regs, i, (lm << 16) | rm, LCD_SPU_H_PORCH); + armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_porch, LCD_SPU_V_PORCH); + armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_h_total, + LCD_SPUT_V_H_TOTAL); + + if (dcrtc->variant->has_spu_adv_reg) { + armada_reg_queue_mod(regs, i, dcrtc->v[0].spu_adv_reg, + ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF | + ADV_VSYNCOFFEN, LCD_SPU_ADV_REG); + } + + val = CFG_GRA_ENA | CFG_GRA_HSMOOTH; + val |= CFG_GRA_FMT(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt); + val |= CFG_GRA_MOD(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->mod); + + if (drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt > CFG_420) + val |= CFG_PALETTE_ENA; + + if (interlaced) + val |= CFG_GRA_FTOGGLE; + + armada_reg_queue_mod(regs, i, val, CFG_GRAFORMAT | + CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV | + CFG_SWAPYU | CFG_YUV2RGB) | + CFG_PALETTE_ENA | CFG_GRA_FTOGGLE, + LCD_SPU_DMA_CTRL0); + + val = adj->flags & DRM_MODE_FLAG_NVSYNC ? CFG_VSYNC_INV : 0; + armada_reg_queue_mod(regs, i, val, CFG_VSYNC_INV, LCD_SPU_DMA_CTRL1); + + val = dcrtc->spu_iopad_ctrl | armada_drm_crtc_calculate_csc(dcrtc); + armada_reg_queue_set(regs, i, val, LCD_SPU_IOPAD_CONTROL); + armada_reg_queue_end(regs, i); + + armada_drm_crtc_update_regs(dcrtc, regs); + spin_unlock_irqrestore(&dcrtc->irq_lock, flags); + + armada_drm_crtc_update(dcrtc); + + drm_crtc_vblank_on(crtc); + armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms)); + + return 0; +} + +/* The mode_config.mutex will be held for this call */ +static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); + struct armada_regs regs[4]; + unsigned i; + + i = armada_drm_crtc_calc_fb(crtc->primary->fb, crtc->x, crtc->y, regs, + dcrtc->interlaced); + armada_reg_queue_end(regs, i); + + /* Wait for pending flips to complete */ + wait_event(dcrtc->frame_wait, !dcrtc->frame_work); + + /* Take a reference to the new fb as we're using it */ + drm_framebuffer_reference(crtc->primary->fb); + + /* Update the base in the CRTC */ + armada_drm_crtc_update_regs(dcrtc, regs); + + /* Drop our previously held reference */ + armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms)); + + return 0; +} + +/* The mode_config.mutex will be held for this call */ +static void armada_drm_crtc_disable(struct drm_crtc *crtc) +{ + struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); + + armada_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + armada_drm_crtc_finish_fb(dcrtc, crtc->primary->fb, true); + + /* Power down most RAMs and FIFOs */ + writel_relaxed(CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 | + CFG_PDWN32x32 | CFG_PDWN16x66 | CFG_PDWN32x66 | + CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1); +} + +static const struct drm_crtc_helper_funcs armada_crtc_helper_funcs = { + .dpms = armada_drm_crtc_dpms, + .prepare = armada_drm_crtc_prepare, + .commit = armada_drm_crtc_commit, + .mode_fixup = armada_drm_crtc_mode_fixup, + .mode_set = armada_drm_crtc_mode_set, + .mode_set_base = armada_drm_crtc_mode_set_base, + .disable = armada_drm_crtc_disable, +}; + +static void armada_load_cursor_argb(void __iomem *base, uint32_t *pix, + unsigned stride, unsigned width, unsigned height) +{ + uint32_t addr; + unsigned y; + + addr = SRAM_HWC32_RAM1; + for (y = 0; y < height; y++) { + uint32_t *p = &pix[y * stride]; + unsigned x; + + for (x = 0; x < width; x++, p++) { + uint32_t val = *p; + + val = (val & 0xff00ff00) | + (val & 0x000000ff) << 16 | + (val & 0x00ff0000) >> 16; + + writel_relaxed(val, + base + LCD_SPU_SRAM_WRDAT); + writel_relaxed(addr | SRAM_WRITE, + base + LCD_SPU_SRAM_CTRL); + readl_relaxed(base + LCD_SPU_HWC_OVSA_HPXL_VLN); + addr += 1; + if ((addr & 0x00ff) == 0) + addr += 0xf00; + if ((addr & 0x30ff) == 0) + addr = SRAM_HWC32_RAM2; + } + } +} + +static void armada_drm_crtc_cursor_tran(void __iomem *base) +{ + unsigned addr; + + for (addr = 0; addr < 256; addr++) { + /* write the default value */ + writel_relaxed(0x55555555, base + LCD_SPU_SRAM_WRDAT); + writel_relaxed(addr | SRAM_WRITE | SRAM_HWC32_TRAN, + base + LCD_SPU_SRAM_CTRL); + } +} + +static int armada_drm_crtc_cursor_update(struct armada_crtc *dcrtc, bool reload) +{ + uint32_t xoff, xscr, w = dcrtc->cursor_w, s; + uint32_t yoff, yscr, h = dcrtc->cursor_h; + uint32_t para1; + + /* + * Calculate the visible width and height of the cursor, + * screen position, and the position in the cursor bitmap. + */ + if (dcrtc->cursor_x < 0) { + xoff = -dcrtc->cursor_x; + xscr = 0; + w -= min(xoff, w); + } else if (dcrtc->cursor_x + w > dcrtc->crtc.mode.hdisplay) { + xoff = 0; + xscr = dcrtc->cursor_x; + w = max_t(int, dcrtc->crtc.mode.hdisplay - dcrtc->cursor_x, 0); + } else { + xoff = 0; + xscr = dcrtc->cursor_x; + } + + if (dcrtc->cursor_y < 0) { + yoff = -dcrtc->cursor_y; + yscr = 0; + h -= min(yoff, h); + } else if (dcrtc->cursor_y + h > dcrtc->crtc.mode.vdisplay) { + yoff = 0; + yscr = dcrtc->cursor_y; + h = max_t(int, dcrtc->crtc.mode.vdisplay - dcrtc->cursor_y, 0); + } else { + yoff = 0; + yscr = dcrtc->cursor_y; + } + + /* On interlaced modes, the vertical cursor size must be halved */ + s = dcrtc->cursor_w; + if (dcrtc->interlaced) { + s *= 2; + yscr /= 2; + h /= 2; + } + + if (!dcrtc->cursor_obj || !h || !w) { + spin_lock_irq(&dcrtc->irq_lock); + armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA); + dcrtc->cursor_update = false; + armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0); + spin_unlock_irq(&dcrtc->irq_lock); + return 0; + } + + para1 = readl_relaxed(dcrtc->base + LCD_SPU_SRAM_PARA1); + armada_updatel(CFG_CSB_256x32, CFG_CSB_256x32 | CFG_PDWN256x32, + dcrtc->base + LCD_SPU_SRAM_PARA1); + + /* + * Initialize the transparency if the SRAM was powered down. + * We must also reload the cursor data as well. + */ + if (!(para1 & CFG_CSB_256x32)) { + armada_drm_crtc_cursor_tran(dcrtc->base); + reload = true; + } + + if (dcrtc->cursor_hw_sz != (h << 16 | w)) { + spin_lock_irq(&dcrtc->irq_lock); + armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA); + dcrtc->cursor_update = false; + armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0); + spin_unlock_irq(&dcrtc->irq_lock); + reload = true; + } + if (reload) { + struct armada_gem_object *obj = dcrtc->cursor_obj; + uint32_t *pix; + /* Set the top-left corner of the cursor image */ + pix = obj->addr; + pix += yoff * s + xoff; + armada_load_cursor_argb(dcrtc->base, pix, s, w, h); + } + + /* Reload the cursor position, size and enable in the IRQ handler */ + spin_lock_irq(&dcrtc->irq_lock); + dcrtc->cursor_hw_pos = yscr << 16 | xscr; + dcrtc->cursor_hw_sz = h << 16 | w; + dcrtc->cursor_update = true; + armada_drm_crtc_enable_irq(dcrtc, DUMB_FRAMEDONE_ENA); + spin_unlock_irq(&dcrtc->irq_lock); + + return 0; +} + +static void cursor_update(void *data) +{ + armada_drm_crtc_cursor_update(data, true); +} + +static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc, + struct drm_file *file, uint32_t handle, uint32_t w, uint32_t h) +{ + struct drm_device *dev = crtc->dev; + struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); + struct armada_gem_object *obj = NULL; + int ret; + + /* If no cursor support, replicate drm's return value */ + if (!dcrtc->variant->has_spu_adv_reg) + return -ENXIO; + + if (handle && w > 0 && h > 0) { + /* maximum size is 64x32 or 32x64 */ + if (w > 64 || h > 64 || (w > 32 && h > 32)) + return -ENOMEM; + + obj = armada_gem_object_lookup(dev, file, handle); + if (!obj) + return -ENOENT; + + /* Must be a kernel-mapped object */ + if (!obj->addr) { + drm_gem_object_unreference_unlocked(&obj->obj); + return -EINVAL; + } + + if (obj->obj.size < w * h * 4) { + DRM_ERROR("buffer is too small\n"); + drm_gem_object_unreference_unlocked(&obj->obj); + return -ENOMEM; + } + } + + mutex_lock(&dev->struct_mutex); + if (dcrtc->cursor_obj) { + dcrtc->cursor_obj->update = NULL; + dcrtc->cursor_obj->update_data = NULL; + drm_gem_object_unreference(&dcrtc->cursor_obj->obj); + } + dcrtc->cursor_obj = obj; + dcrtc->cursor_w = w; + dcrtc->cursor_h = h; + ret = armada_drm_crtc_cursor_update(dcrtc, true); + if (obj) { + obj->update_data = dcrtc; + obj->update = cursor_update; + } + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +static int armada_drm_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) +{ + struct drm_device *dev = crtc->dev; + struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); + int ret; + + /* If no cursor support, replicate drm's return value */ + if (!dcrtc->variant->has_spu_adv_reg) + return -EFAULT; + + mutex_lock(&dev->struct_mutex); + dcrtc->cursor_x = x; + dcrtc->cursor_y = y; + ret = armada_drm_crtc_cursor_update(dcrtc, false); + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +static void armada_drm_crtc_destroy(struct drm_crtc *crtc) +{ + struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); + struct armada_private *priv = crtc->dev->dev_private; + + if (dcrtc->cursor_obj) + drm_gem_object_unreference(&dcrtc->cursor_obj->obj); + + priv->dcrtc[dcrtc->num] = NULL; + drm_crtc_cleanup(&dcrtc->crtc); + + if (!IS_ERR(dcrtc->clk)) + clk_disable_unprepare(dcrtc->clk); + + writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ENA); + + of_node_put(dcrtc->crtc.port); + + kfree(dcrtc); +} + +/* + * The mode_config lock is held here, to prevent races between this + * and a mode_set. + */ +static int armada_drm_crtc_page_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t page_flip_flags) +{ + struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); + struct armada_frame_work *work; + struct drm_device *dev = crtc->dev; + unsigned long flags; + unsigned i; + int ret; + + /* We don't support changing the pixel format */ + if (fb->pixel_format != crtc->primary->fb->pixel_format) + return -EINVAL; + + work = kmalloc(sizeof(*work), GFP_KERNEL); + if (!work) + return -ENOMEM; + + work->event = event; + work->old_fb = dcrtc->crtc.primary->fb; + + i = armada_drm_crtc_calc_fb(fb, crtc->x, crtc->y, work->regs, + dcrtc->interlaced); + armada_reg_queue_end(work->regs, i); + + /* + * Ensure that we hold a reference on the new framebuffer. + * This has to match the behaviour in mode_set. + */ + drm_framebuffer_reference(fb); + + ret = armada_drm_crtc_queue_frame_work(dcrtc, work); + if (ret) { + /* Undo our reference above */ + drm_framebuffer_unreference(fb); + kfree(work); + return ret; + } + + /* + * Don't take a reference on the new framebuffer; + * drm_mode_page_flip_ioctl() has already grabbed a reference and + * will _not_ drop that reference on successful return from this + * function. Simply mark this new framebuffer as the current one. + */ + dcrtc->crtc.primary->fb = fb; + + /* + * Finally, if the display is blanked, we won't receive an + * interrupt, so complete it now. + */ + if (dpms_blanked(dcrtc->dpms)) { + spin_lock_irqsave(&dev->event_lock, flags); + if (dcrtc->frame_work) + armada_drm_crtc_complete_frame_work(dcrtc); + spin_unlock_irqrestore(&dev->event_lock, flags); + } + + return 0; +} + +static int +armada_drm_crtc_set_property(struct drm_crtc *crtc, + struct drm_property *property, uint64_t val) +{ + struct armada_private *priv = crtc->dev->dev_private; + struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); + bool update_csc = false; + + if (property == priv->csc_yuv_prop) { + dcrtc->csc_yuv_mode = val; + update_csc = true; + } else if (property == priv->csc_rgb_prop) { + dcrtc->csc_rgb_mode = val; + update_csc = true; + } + + if (update_csc) { + uint32_t val; + + val = dcrtc->spu_iopad_ctrl | + armada_drm_crtc_calculate_csc(dcrtc); + writel_relaxed(val, dcrtc->base + LCD_SPU_IOPAD_CONTROL); + } + + return 0; +} + +static struct drm_crtc_funcs armada_crtc_funcs = { + .cursor_set = armada_drm_crtc_cursor_set, + .cursor_move = armada_drm_crtc_cursor_move, + .destroy = armada_drm_crtc_destroy, + .set_config = drm_crtc_helper_set_config, + .page_flip = armada_drm_crtc_page_flip, + .set_property = armada_drm_crtc_set_property, +}; + +static struct drm_prop_enum_list armada_drm_csc_yuv_enum_list[] = { + { CSC_AUTO, "Auto" }, + { CSC_YUV_CCIR601, "CCIR601" }, + { CSC_YUV_CCIR709, "CCIR709" }, +}; + +static struct drm_prop_enum_list armada_drm_csc_rgb_enum_list[] = { + { CSC_AUTO, "Auto" }, + { CSC_RGB_COMPUTER, "Computer system" }, + { CSC_RGB_STUDIO, "Studio" }, +}; + +static int armada_drm_crtc_create_properties(struct drm_device *dev) +{ + struct armada_private *priv = dev->dev_private; + + if (priv->csc_yuv_prop) + return 0; + + priv->csc_yuv_prop = drm_property_create_enum(dev, 0, + "CSC_YUV", armada_drm_csc_yuv_enum_list, + ARRAY_SIZE(armada_drm_csc_yuv_enum_list)); + priv->csc_rgb_prop = drm_property_create_enum(dev, 0, + "CSC_RGB", armada_drm_csc_rgb_enum_list, + ARRAY_SIZE(armada_drm_csc_rgb_enum_list)); + + if (!priv->csc_yuv_prop || !priv->csc_rgb_prop) + return -ENOMEM; + + return 0; +} + +int armada_drm_crtc_create(struct drm_device *drm, struct device *dev, + struct resource *res, int irq, const struct armada_variant *variant, + struct device_node *port) +{ + struct armada_private *priv = drm->dev_private; + struct armada_crtc *dcrtc; + void __iomem *base; + int ret; + + ret = armada_drm_crtc_create_properties(drm); + if (ret) + return ret; + + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + dcrtc = kzalloc(sizeof(*dcrtc), GFP_KERNEL); + if (!dcrtc) { + DRM_ERROR("failed to allocate Armada crtc\n"); + return -ENOMEM; + } + + if (dev != drm->dev) + dev_set_drvdata(dev, dcrtc); + + dcrtc->variant = variant; + dcrtc->base = base; + dcrtc->num = drm->mode_config.num_crtc; + dcrtc->clk = ERR_PTR(-EINVAL); + dcrtc->csc_yuv_mode = CSC_AUTO; + dcrtc->csc_rgb_mode = CSC_AUTO; + dcrtc->cfg_dumb_ctrl = DUMB24_RGB888_0; + dcrtc->spu_iopad_ctrl = CFG_VSCALE_LN_EN | CFG_IOPAD_DUMB24; + spin_lock_init(&dcrtc->irq_lock); + dcrtc->irq_ena = CLEAN_SPU_IRQ_ISR; + INIT_LIST_HEAD(&dcrtc->vbl_list); + init_waitqueue_head(&dcrtc->frame_wait); + + /* Initialize some registers which we don't otherwise set */ + writel_relaxed(0x00000001, dcrtc->base + LCD_CFG_SCLK_DIV); + writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_BLANKCOLOR); + writel_relaxed(dcrtc->spu_iopad_ctrl, + dcrtc->base + LCD_SPU_IOPAD_CONTROL); + writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_SRAM_PARA0); + writel_relaxed(CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 | + CFG_PDWN32x32 | CFG_PDWN16x66 | CFG_PDWN32x66 | + CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1); + writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1); + writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_GRA_OVSA_HPXL_VLN); + writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA); + writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR); + + ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc", + dcrtc); + if (ret < 0) { + kfree(dcrtc); + return ret; + } + + if (dcrtc->variant->init) { + ret = dcrtc->variant->init(dcrtc, dev); + if (ret) { + kfree(dcrtc); + return ret; + } + } + + /* Ensure AXI pipeline is enabled */ + armada_updatel(CFG_ARBFAST_ENA, 0, dcrtc->base + LCD_SPU_DMA_CTRL0); + + priv->dcrtc[dcrtc->num] = dcrtc; + + dcrtc->crtc.port = port; + drm_crtc_init(drm, &dcrtc->crtc, &armada_crtc_funcs); + drm_crtc_helper_add(&dcrtc->crtc, &armada_crtc_helper_funcs); + + drm_object_attach_property(&dcrtc->crtc.base, priv->csc_yuv_prop, + dcrtc->csc_yuv_mode); + drm_object_attach_property(&dcrtc->crtc.base, priv->csc_rgb_prop, + dcrtc->csc_rgb_mode); + + return armada_overlay_plane_create(drm, 1 << dcrtc->num); +} + +static int +armada_lcd_bind(struct device *dev, struct device *master, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + struct drm_device *drm = data; + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + int irq = platform_get_irq(pdev, 0); + const struct armada_variant *variant; + struct device_node *port = NULL; + + if (irq < 0) + return irq; + + if (!dev->of_node) { + const struct platform_device_id *id; + + id = platform_get_device_id(pdev); + if (!id) + return -ENXIO; + + variant = (const struct armada_variant *)id->driver_data; + } else { + const struct of_device_id *match; + struct device_node *np, *parent = dev->of_node; + + match = of_match_device(dev->driver->of_match_table, dev); + if (!match) + return -ENXIO; + + np = of_get_child_by_name(parent, "ports"); + if (np) + parent = np; + port = of_get_child_by_name(parent, "port"); + of_node_put(np); + if (!port) { + dev_err(dev, "no port node found in %s\n", + parent->full_name); + return -ENXIO; + } + + variant = match->data; + } + + return armada_drm_crtc_create(drm, dev, res, irq, variant, port); +} + +static void +armada_lcd_unbind(struct device *dev, struct device *master, void *data) +{ + struct armada_crtc *dcrtc = dev_get_drvdata(dev); + + armada_drm_crtc_destroy(&dcrtc->crtc); +} + +static const struct component_ops armada_lcd_ops = { + .bind = armada_lcd_bind, + .unbind = armada_lcd_unbind, +}; + +static int armada_lcd_probe(struct platform_device *pdev) +{ + return component_add(&pdev->dev, &armada_lcd_ops); +} + +static int armada_lcd_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &armada_lcd_ops); + return 0; +} + +static struct of_device_id armada_lcd_of_match[] = { + { + .compatible = "marvell,dove-lcd", + .data = &armada510_ops, + }, + {} +}; +MODULE_DEVICE_TABLE(of, armada_lcd_of_match); + +static const struct platform_device_id armada_lcd_platform_ids[] = { + { + .name = "armada-lcd", + .driver_data = (unsigned long)&armada510_ops, + }, { + .name = "armada-510-lcd", + .driver_data = (unsigned long)&armada510_ops, + }, + { }, +}; +MODULE_DEVICE_TABLE(platform, armada_lcd_platform_ids); + +struct platform_driver armada_lcd_platform_driver = { + .probe = armada_lcd_probe, + .remove = armada_lcd_remove, + .driver = { + .name = "armada-lcd", + .owner = THIS_MODULE, + .of_match_table = armada_lcd_of_match, + }, + .id_table = armada_lcd_platform_ids, +}; diff --git a/kernel/drivers/gpu/drm/armada/armada_crtc.h b/kernel/drivers/gpu/drm/armada/armada_crtc.h new file mode 100644 index 000000000..98102a5a9 --- /dev/null +++ b/kernel/drivers/gpu/drm/armada/armada_crtc.h @@ -0,0 +1,90 @@ +/* + * Copyright (C) 2012 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ARMADA_CRTC_H +#define ARMADA_CRTC_H + +struct armada_gem_object; + +struct armada_regs { + uint32_t offset; + uint32_t mask; + uint32_t val; +}; + +#define armada_reg_queue_mod(_r, _i, _v, _m, _o) \ + do { \ + struct armada_regs *__reg = _r; \ + __reg[_i].offset = _o; \ + __reg[_i].mask = ~(_m); \ + __reg[_i].val = _v; \ + _i++; \ + } while (0) + +#define armada_reg_queue_set(_r, _i, _v, _o) \ + armada_reg_queue_mod(_r, _i, _v, ~0, _o) + +#define armada_reg_queue_end(_r, _i) \ + armada_reg_queue_mod(_r, _i, 0, 0, ~0) + +struct armada_frame_work; +struct armada_variant; + +struct armada_crtc { + struct drm_crtc crtc; + const struct armada_variant *variant; + unsigned num; + void __iomem *base; + struct clk *clk; + struct clk *extclk[2]; + struct { + uint32_t spu_v_h_total; + uint32_t spu_v_porch; + uint32_t spu_adv_reg; + } v[2]; + bool interlaced; + bool cursor_update; + uint8_t csc_yuv_mode; + uint8_t csc_rgb_mode; + + struct drm_plane *plane; + + struct armada_gem_object *cursor_obj; + int cursor_x; + int cursor_y; + uint32_t cursor_hw_pos; + uint32_t cursor_hw_sz; + uint32_t cursor_w; + uint32_t cursor_h; + + int dpms; + uint32_t cfg_dumb_ctrl; + uint32_t dumb_ctrl; + uint32_t spu_iopad_ctrl; + + wait_queue_head_t frame_wait; + struct armada_frame_work *frame_work; + + spinlock_t irq_lock; + uint32_t irq_ena; + struct list_head vbl_list; +}; +#define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc) + +struct device_node; +int armada_drm_crtc_create(struct drm_device *, struct device *, + struct resource *, int, const struct armada_variant *, + struct device_node *); +void armada_drm_crtc_gamma_set(struct drm_crtc *, u16, u16, u16, int); +void armada_drm_crtc_gamma_get(struct drm_crtc *, u16 *, u16 *, u16 *, int); +void armada_drm_crtc_disable_irq(struct armada_crtc *, u32); +void armada_drm_crtc_enable_irq(struct armada_crtc *, u32); +void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *); + +extern struct platform_driver armada_lcd_platform_driver; + +#endif diff --git a/kernel/drivers/gpu/drm/armada/armada_debugfs.c b/kernel/drivers/gpu/drm/armada/armada_debugfs.c new file mode 100644 index 000000000..471e45627 --- /dev/null +++ b/kernel/drivers/gpu/drm/armada/armada_debugfs.c @@ -0,0 +1,177 @@ +/* + * Copyright (C) 2012 Russell King + * Rewritten from the dovefb driver, and Armada510 manuals. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include "armada_crtc.h" +#include "armada_drm.h" + +static int armada_debugfs_gem_linear_show(struct seq_file *m, void *data) +{ + struct drm_info_node *node = m->private; + struct drm_device *dev = node->minor->dev; + struct armada_private *priv = dev->dev_private; + int ret; + + mutex_lock(&dev->struct_mutex); + ret = drm_mm_dump_table(m, &priv->linear); + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +static int armada_debugfs_reg_show(struct seq_file *m, void *data) +{ + struct drm_device *dev = m->private; + struct armada_private *priv = dev->dev_private; + int n, i; + + if (priv) { + for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) { + struct armada_crtc *dcrtc = priv->dcrtc[n]; + if (!dcrtc) + continue; + + for (i = 0x84; i <= 0x1c4; i += 4) { + uint32_t v = readl_relaxed(dcrtc->base + i); + seq_printf(m, "%u: 0x%04x: 0x%08x\n", n, i, v); + } + } + } + + return 0; +} + +static int armada_debugfs_reg_r_open(struct inode *inode, struct file *file) +{ + return single_open(file, armada_debugfs_reg_show, inode->i_private); +} + +static const struct file_operations fops_reg_r = { + .owner = THIS_MODULE, + .open = armada_debugfs_reg_r_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int armada_debugfs_write(struct file *file, const char __user *ptr, + size_t len, loff_t *off) +{ + struct drm_device *dev = file->private_data; + struct armada_private *priv = dev->dev_private; + struct armada_crtc *dcrtc = priv->dcrtc[0]; + char buf[32], *p; + uint32_t reg, val; + int ret; + + if (*off != 0) + return 0; + + if (len > sizeof(buf) - 1) + len = sizeof(buf) - 1; + + ret = strncpy_from_user(buf, ptr, len); + if (ret < 0) + return ret; + buf[len] = '\0'; + + reg = simple_strtoul(buf, &p, 16); + if (!isspace(*p)) + return -EINVAL; + val = simple_strtoul(p + 1, NULL, 16); + + if (reg >= 0x84 && reg <= 0x1c4) + writel(val, dcrtc->base + reg); + + return len; +} + +static const struct file_operations fops_reg_w = { + .owner = THIS_MODULE, + .open = simple_open, + .write = armada_debugfs_write, + .llseek = noop_llseek, +}; + +static struct drm_info_list armada_debugfs_list[] = { + { "gem_linear", armada_debugfs_gem_linear_show, 0 }, +}; +#define ARMADA_DEBUGFS_ENTRIES ARRAY_SIZE(armada_debugfs_list) + +static int drm_add_fake_info_node(struct drm_minor *minor, struct dentry *ent, + const void *key) +{ + struct drm_info_node *node; + + node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL); + if (node == NULL) { + debugfs_remove(ent); + return -ENOMEM; + } + + node->minor = minor; + node->dent = ent; + node->info_ent = (void *) key; + + mutex_lock(&minor->debugfs_lock); + list_add(&node->list, &minor->debugfs_list); + mutex_unlock(&minor->debugfs_lock); + + return 0; +} + +static int armada_debugfs_create(struct dentry *root, struct drm_minor *minor, + const char *name, umode_t mode, const struct file_operations *fops) +{ + struct dentry *de; + + de = debugfs_create_file(name, mode, root, minor->dev, fops); + + return drm_add_fake_info_node(minor, de, fops); +} + +int armada_drm_debugfs_init(struct drm_minor *minor) +{ + int ret; + + ret = drm_debugfs_create_files(armada_debugfs_list, + ARMADA_DEBUGFS_ENTRIES, + minor->debugfs_root, minor); + if (ret) + return ret; + + ret = armada_debugfs_create(minor->debugfs_root, minor, + "reg", S_IFREG | S_IRUSR, &fops_reg_r); + if (ret) + goto err_1; + + ret = armada_debugfs_create(minor->debugfs_root, minor, + "reg_wr", S_IFREG | S_IWUSR, &fops_reg_w); + if (ret) + goto err_2; + return ret; + + err_2: + drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_r, 1, minor); + err_1: + drm_debugfs_remove_files(armada_debugfs_list, ARMADA_DEBUGFS_ENTRIES, + minor); + return ret; +} + +void armada_drm_debugfs_cleanup(struct drm_minor *minor) +{ + drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_w, 1, minor); + drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_r, 1, minor); + drm_debugfs_remove_files(armada_debugfs_list, ARMADA_DEBUGFS_ENTRIES, + minor); +} diff --git a/kernel/drivers/gpu/drm/armada/armada_drm.h b/kernel/drivers/gpu/drm/armada/armada_drm.h new file mode 100644 index 000000000..ea63c6c7c --- /dev/null +++ b/kernel/drivers/gpu/drm/armada/armada_drm.h @@ -0,0 +1,111 @@ +/* + * Copyright (C) 2012 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ARMADA_DRM_H +#define ARMADA_DRM_H + +#include +#include +#include +#include + +struct armada_crtc; +struct armada_gem_object; +struct clk; +struct drm_fb_helper; + +static inline void +armada_updatel(uint32_t val, uint32_t mask, void __iomem *ptr) +{ + uint32_t ov, v; + + ov = v = readl_relaxed(ptr); + v = (v & ~mask) | val; + if (ov != v) + writel_relaxed(v, ptr); +} + +static inline uint32_t armada_pitch(uint32_t width, uint32_t bpp) +{ + uint32_t pitch = bpp != 4 ? width * ((bpp + 7) / 8) : width / 2; + + /* 88AP510 spec recommends pitch be a multiple of 128 */ + return ALIGN(pitch, 128); +} + +struct armada_vbl_event { + struct list_head node; + void *data; + void (*fn)(struct armada_crtc *, void *); +}; +void armada_drm_vbl_event_add(struct armada_crtc *, + struct armada_vbl_event *); +void armada_drm_vbl_event_remove(struct armada_crtc *, + struct armada_vbl_event *); +void armada_drm_vbl_event_remove_unlocked(struct armada_crtc *, + struct armada_vbl_event *); +#define armada_drm_vbl_event_init(_e, _f, _d) do { \ + struct armada_vbl_event *__e = _e; \ + INIT_LIST_HEAD(&__e->node); \ + __e->data = _d; \ + __e->fn = _f; \ +} while (0) + + +struct armada_private; + +struct armada_variant { + bool has_spu_adv_reg; + uint32_t spu_adv_reg; + int (*init)(struct armada_crtc *, struct device *); + int (*compute_clock)(struct armada_crtc *, + const struct drm_display_mode *, + uint32_t *); +}; + +/* Variant ops */ +extern const struct armada_variant armada510_ops; + +struct armada_private { + struct work_struct fb_unref_work; + DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8); + struct drm_fb_helper *fbdev; + struct armada_crtc *dcrtc[2]; + struct drm_mm linear; + struct drm_property *csc_yuv_prop; + struct drm_property *csc_rgb_prop; + struct drm_property *colorkey_prop; + struct drm_property *colorkey_min_prop; + struct drm_property *colorkey_max_prop; + struct drm_property *colorkey_val_prop; + struct drm_property *colorkey_alpha_prop; + struct drm_property *colorkey_mode_prop; + struct drm_property *brightness_prop; + struct drm_property *contrast_prop; + struct drm_property *saturation_prop; +#ifdef CONFIG_DEBUG_FS + struct dentry *de; +#endif +}; + +void __armada_drm_queue_unref_work(struct drm_device *, + struct drm_framebuffer *); +void armada_drm_queue_unref_work(struct drm_device *, + struct drm_framebuffer *); + +extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs; + +int armada_fbdev_init(struct drm_device *); +void armada_fbdev_lastclose(struct drm_device *); +void armada_fbdev_fini(struct drm_device *); + +int armada_overlay_plane_create(struct drm_device *, unsigned long); + +int armada_drm_debugfs_init(struct drm_minor *); +void armada_drm_debugfs_cleanup(struct drm_minor *); + +#endif diff --git a/kernel/drivers/gpu/drm/armada/armada_drv.c b/kernel/drivers/gpu/drm/armada/armada_drv.c new file mode 100644 index 000000000..b01420c84 --- /dev/null +++ b/kernel/drivers/gpu/drm/armada/armada_drv.c @@ -0,0 +1,519 @@ +/* + * Copyright (C) 2012 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include "armada_crtc.h" +#include "armada_drm.h" +#include "armada_gem.h" +#include "armada_hw.h" +#include +#include "armada_ioctlP.h" + +#ifdef CONFIG_DRM_ARMADA_TDA1998X +#include +#include "armada_slave.h" + +static struct tda998x_encoder_params params = { + /* With 0x24, there is no translation between vp_out and int_vp + FB LCD out Pins VIP Int Vp + R:23:16 R:7:0 VPC7:0 7:0 7:0[R] + G:15:8 G:15:8 VPB7:0 23:16 23:16[G] + B:7:0 B:23:16 VPA7:0 15:8 15:8[B] + */ + .swap_a = 2, + .swap_b = 3, + .swap_c = 4, + .swap_d = 5, + .swap_e = 0, + .swap_f = 1, + .audio_cfg = BIT(2), + .audio_frame[1] = 1, + .audio_format = AFMT_SPDIF, + .audio_sample_rate = 44100, +}; + +static const struct armada_drm_slave_config tda19988_config = { + .i2c_adapter_id = 0, + .crtcs = 1 << 0, /* Only LCD0 at the moment */ + .polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT, + .interlace_allowed = true, + .info = { + .type = "tda998x", + .addr = 0x70, + .platform_data = ¶ms, + }, +}; +#endif + +static bool is_componentized(struct device *dev) +{ + return dev->of_node || dev->platform_data; +} + +static void armada_drm_unref_work(struct work_struct *work) +{ + struct armada_private *priv = + container_of(work, struct armada_private, fb_unref_work); + struct drm_framebuffer *fb; + + while (kfifo_get(&priv->fb_unref, &fb)) + drm_framebuffer_unreference(fb); +} + +/* Must be called with dev->event_lock held */ +void __armada_drm_queue_unref_work(struct drm_device *dev, + struct drm_framebuffer *fb) +{ + struct armada_private *priv = dev->dev_private; + + WARN_ON(!kfifo_put(&priv->fb_unref, fb)); + schedule_work(&priv->fb_unref_work); +} + +void armada_drm_queue_unref_work(struct drm_device *dev, + struct drm_framebuffer *fb) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->event_lock, flags); + __armada_drm_queue_unref_work(dev, fb); + spin_unlock_irqrestore(&dev->event_lock, flags); +} + +static int armada_drm_load(struct drm_device *dev, unsigned long flags) +{ + const struct platform_device_id *id; + const struct armada_variant *variant; + struct armada_private *priv; + struct resource *res[ARRAY_SIZE(priv->dcrtc)]; + struct resource *mem = NULL; + int ret, n, i; + + memset(res, 0, sizeof(res)); + + for (n = i = 0; ; n++) { + struct resource *r = platform_get_resource(dev->platformdev, + IORESOURCE_MEM, n); + if (!r) + break; + + /* Resources above 64K are graphics memory */ + if (resource_size(r) > SZ_64K) + mem = r; + else if (i < ARRAY_SIZE(priv->dcrtc)) + res[i++] = r; + else + return -EINVAL; + } + + if (!mem) + return -ENXIO; + + if (!devm_request_mem_region(dev->dev, mem->start, + resource_size(mem), "armada-drm")) + return -EBUSY; + + priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + DRM_ERROR("failed to allocate private\n"); + return -ENOMEM; + } + + platform_set_drvdata(dev->platformdev, dev); + dev->dev_private = priv; + + /* Get the implementation specific driver data. */ + id = platform_get_device_id(dev->platformdev); + if (!id) + return -ENXIO; + + variant = (const struct armada_variant *)id->driver_data; + + INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work); + INIT_KFIFO(priv->fb_unref); + + /* Mode setting support */ + drm_mode_config_init(dev); + dev->mode_config.min_width = 320; + dev->mode_config.min_height = 200; + + /* + * With vscale enabled, the maximum width is 1920 due to the + * 1920 by 3 lines RAM + */ + dev->mode_config.max_width = 1920; + dev->mode_config.max_height = 2048; + + dev->mode_config.preferred_depth = 24; + dev->mode_config.funcs = &armada_drm_mode_config_funcs; + drm_mm_init(&priv->linear, mem->start, resource_size(mem)); + + /* Create all LCD controllers */ + for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) { + int irq; + + if (!res[n]) + break; + + irq = platform_get_irq(dev->platformdev, n); + if (irq < 0) + goto err_kms; + + ret = armada_drm_crtc_create(dev, dev->dev, res[n], irq, + variant, NULL); + if (ret) + goto err_kms; + } + + if (is_componentized(dev->dev)) { + ret = component_bind_all(dev->dev, dev); + if (ret) + goto err_kms; + } else { +#ifdef CONFIG_DRM_ARMADA_TDA1998X + ret = armada_drm_connector_slave_create(dev, &tda19988_config); + if (ret) + goto err_kms; +#endif + } + + ret = drm_vblank_init(dev, dev->mode_config.num_crtc); + if (ret) + goto err_comp; + + dev->irq_enabled = true; + dev->vblank_disable_allowed = 1; + + ret = armada_fbdev_init(dev); + if (ret) + goto err_comp; + + drm_kms_helper_poll_init(dev); + + return 0; + + err_comp: + if (is_componentized(dev->dev)) + component_unbind_all(dev->dev, dev); + err_kms: + drm_mode_config_cleanup(dev); + drm_mm_takedown(&priv->linear); + flush_work(&priv->fb_unref_work); + + return ret; +} + +static int armada_drm_unload(struct drm_device *dev) +{ + struct armada_private *priv = dev->dev_private; + + drm_kms_helper_poll_fini(dev); + armada_fbdev_fini(dev); + + if (is_componentized(dev->dev)) + component_unbind_all(dev->dev, dev); + + drm_mode_config_cleanup(dev); + drm_mm_takedown(&priv->linear); + flush_work(&priv->fb_unref_work); + dev->dev_private = NULL; + + return 0; +} + +void armada_drm_vbl_event_add(struct armada_crtc *dcrtc, + struct armada_vbl_event *evt) +{ + unsigned long flags; + + spin_lock_irqsave(&dcrtc->irq_lock, flags); + if (list_empty(&evt->node)) { + list_add_tail(&evt->node, &dcrtc->vbl_list); + + drm_vblank_get(dcrtc->crtc.dev, dcrtc->num); + } + spin_unlock_irqrestore(&dcrtc->irq_lock, flags); +} + +void armada_drm_vbl_event_remove(struct armada_crtc *dcrtc, + struct armada_vbl_event *evt) +{ + if (!list_empty(&evt->node)) { + list_del_init(&evt->node); + drm_vblank_put(dcrtc->crtc.dev, dcrtc->num); + } +} + +void armada_drm_vbl_event_remove_unlocked(struct armada_crtc *dcrtc, + struct armada_vbl_event *evt) +{ + unsigned long flags; + + spin_lock_irqsave(&dcrtc->irq_lock, flags); + armada_drm_vbl_event_remove(dcrtc, evt); + spin_unlock_irqrestore(&dcrtc->irq_lock, flags); +} + +/* These are called under the vbl_lock. */ +static int armada_drm_enable_vblank(struct drm_device *dev, int crtc) +{ + struct armada_private *priv = dev->dev_private; + armada_drm_crtc_enable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA); + return 0; +} + +static void armada_drm_disable_vblank(struct drm_device *dev, int crtc) +{ + struct armada_private *priv = dev->dev_private; + armada_drm_crtc_disable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA); +} + +static struct drm_ioctl_desc armada_ioctls[] = { + DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl, + DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl, + DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(ARMADA_GEM_PWRITE, armada_gem_pwrite_ioctl, + DRM_UNLOCKED), +}; + +static void armada_drm_lastclose(struct drm_device *dev) +{ + armada_fbdev_lastclose(dev); +} + +static const struct file_operations armada_drm_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .read = drm_read, + .poll = drm_poll, + .unlocked_ioctl = drm_ioctl, + .mmap = drm_gem_mmap, + .open = drm_open, + .release = drm_release, +}; + +static struct drm_driver armada_drm_driver = { + .load = armada_drm_load, + .open = NULL, + .preclose = NULL, + .postclose = NULL, + .lastclose = armada_drm_lastclose, + .unload = armada_drm_unload, + .set_busid = drm_platform_set_busid, + .get_vblank_counter = drm_vblank_count, + .enable_vblank = armada_drm_enable_vblank, + .disable_vblank = armada_drm_disable_vblank, +#ifdef CONFIG_DEBUG_FS + .debugfs_init = armada_drm_debugfs_init, + .debugfs_cleanup = armada_drm_debugfs_cleanup, +#endif + .gem_free_object = armada_gem_free_object, + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_export = armada_gem_prime_export, + .gem_prime_import = armada_gem_prime_import, + .dumb_create = armada_gem_dumb_create, + .dumb_map_offset = armada_gem_dumb_map_offset, + .dumb_destroy = armada_gem_dumb_destroy, + .gem_vm_ops = &armada_gem_vm_ops, + .major = 1, + .minor = 0, + .name = "armada-drm", + .desc = "Armada SoC DRM", + .date = "20120730", + .driver_features = DRIVER_GEM | DRIVER_MODESET | + DRIVER_HAVE_IRQ | DRIVER_PRIME, + .ioctls = armada_ioctls, + .fops = &armada_drm_fops, +}; + +static int armada_drm_bind(struct device *dev) +{ + return drm_platform_init(&armada_drm_driver, to_platform_device(dev)); +} + +static void armada_drm_unbind(struct device *dev) +{ + drm_put_dev(dev_get_drvdata(dev)); +} + +static int compare_of(struct device *dev, void *data) +{ + return dev->of_node == data; +} + +static int compare_dev_name(struct device *dev, void *data) +{ + const char *name = data; + return !strcmp(dev_name(dev), name); +} + +static void armada_add_endpoints(struct device *dev, + struct component_match **match, struct device_node *port) +{ + struct device_node *ep, *remote; + + for_each_child_of_node(port, ep) { + remote = of_graph_get_remote_port_parent(ep); + if (!remote || !of_device_is_available(remote)) { + of_node_put(remote); + continue; + } else if (!of_device_is_available(remote->parent)) { + dev_warn(dev, "parent device of %s is not available\n", + remote->full_name); + of_node_put(remote); + continue; + } + + component_match_add(dev, match, compare_of, remote); + of_node_put(remote); + } +} + +static int armada_drm_find_components(struct device *dev, + struct component_match **match) +{ + struct device_node *port; + int i; + + if (dev->of_node) { + struct device_node *np = dev->of_node; + + for (i = 0; ; i++) { + port = of_parse_phandle(np, "ports", i); + if (!port) + break; + + component_match_add(dev, match, compare_of, port); + of_node_put(port); + } + + if (i == 0) { + dev_err(dev, "missing 'ports' property\n"); + return -ENODEV; + } + + for (i = 0; ; i++) { + port = of_parse_phandle(np, "ports", i); + if (!port) + break; + + armada_add_endpoints(dev, match, port); + of_node_put(port); + } + } else if (dev->platform_data) { + char **devices = dev->platform_data; + struct device *d; + + for (i = 0; devices[i]; i++) + component_match_add(dev, match, compare_dev_name, + devices[i]); + + if (i == 0) { + dev_err(dev, "missing 'ports' property\n"); + return -ENODEV; + } + + for (i = 0; devices[i]; i++) { + d = bus_find_device_by_name(&platform_bus_type, NULL, + devices[i]); + if (d && d->of_node) { + for_each_child_of_node(d->of_node, port) + armada_add_endpoints(dev, match, port); + } + put_device(d); + } + } + + return 0; +} + +static const struct component_master_ops armada_master_ops = { + .bind = armada_drm_bind, + .unbind = armada_drm_unbind, +}; + +static int armada_drm_probe(struct platform_device *pdev) +{ + if (is_componentized(&pdev->dev)) { + struct component_match *match = NULL; + int ret; + + ret = armada_drm_find_components(&pdev->dev, &match); + if (ret < 0) + return ret; + + return component_master_add_with_match(&pdev->dev, + &armada_master_ops, match); + } else { + return drm_platform_init(&armada_drm_driver, pdev); + } +} + +static int armada_drm_remove(struct platform_device *pdev) +{ + if (is_componentized(&pdev->dev)) + component_master_del(&pdev->dev, &armada_master_ops); + else + drm_put_dev(platform_get_drvdata(pdev)); + return 0; +} + +static const struct platform_device_id armada_drm_platform_ids[] = { + { + .name = "armada-drm", + .driver_data = (unsigned long)&armada510_ops, + }, { + .name = "armada-510-drm", + .driver_data = (unsigned long)&armada510_ops, + }, + { }, +}; +MODULE_DEVICE_TABLE(platform, armada_drm_platform_ids); + +static struct platform_driver armada_drm_platform_driver = { + .probe = armada_drm_probe, + .remove = armada_drm_remove, + .driver = { + .name = "armada-drm", + }, + .id_table = armada_drm_platform_ids, +}; + +static int __init armada_drm_init(void) +{ + int ret; + + armada_drm_driver.num_ioctls = ARRAY_SIZE(armada_ioctls); + + ret = platform_driver_register(&armada_lcd_platform_driver); + if (ret) + return ret; + ret = platform_driver_register(&armada_drm_platform_driver); + if (ret) + platform_driver_unregister(&armada_lcd_platform_driver); + return ret; +} +module_init(armada_drm_init); + +static void __exit armada_drm_exit(void) +{ + platform_driver_unregister(&armada_drm_platform_driver); + platform_driver_unregister(&armada_lcd_platform_driver); +} +module_exit(armada_drm_exit); + +MODULE_AUTHOR("Russell King "); +MODULE_DESCRIPTION("Armada DRM Driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:armada-drm"); diff --git a/kernel/drivers/gpu/drm/armada/armada_fb.c b/kernel/drivers/gpu/drm/armada/armada_fb.c new file mode 100644 index 000000000..1c90969de --- /dev/null +++ b/kernel/drivers/gpu/drm/armada/armada_fb.c @@ -0,0 +1,170 @@ +/* + * Copyright (C) 2012 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include "armada_drm.h" +#include "armada_fb.h" +#include "armada_gem.h" +#include "armada_hw.h" + +static void armada_fb_destroy(struct drm_framebuffer *fb) +{ + struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb); + + drm_framebuffer_cleanup(&dfb->fb); + drm_gem_object_unreference_unlocked(&dfb->obj->obj); + kfree(dfb); +} + +static int armada_fb_create_handle(struct drm_framebuffer *fb, + struct drm_file *dfile, unsigned int *handle) +{ + struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb); + return drm_gem_handle_create(dfile, &dfb->obj->obj, handle); +} + +static const struct drm_framebuffer_funcs armada_fb_funcs = { + .destroy = armada_fb_destroy, + .create_handle = armada_fb_create_handle, +}; + +struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev, + struct drm_mode_fb_cmd2 *mode, struct armada_gem_object *obj) +{ + struct armada_framebuffer *dfb; + uint8_t format, config; + int ret; + + switch (mode->pixel_format) { +#define FMT(drm, fmt, mod) \ + case DRM_FORMAT_##drm: \ + format = CFG_##fmt; \ + config = mod; \ + break + FMT(RGB565, 565, CFG_SWAPRB); + FMT(BGR565, 565, 0); + FMT(ARGB1555, 1555, CFG_SWAPRB); + FMT(ABGR1555, 1555, 0); + FMT(RGB888, 888PACK, CFG_SWAPRB); + FMT(BGR888, 888PACK, 0); + FMT(XRGB8888, X888, CFG_SWAPRB); + FMT(XBGR8888, X888, 0); + FMT(ARGB8888, 8888, CFG_SWAPRB); + FMT(ABGR8888, 8888, 0); + FMT(YUYV, 422PACK, CFG_YUV2RGB | CFG_SWAPYU | CFG_SWAPUV); + FMT(UYVY, 422PACK, CFG_YUV2RGB); + FMT(VYUY, 422PACK, CFG_YUV2RGB | CFG_SWAPUV); + FMT(YVYU, 422PACK, CFG_YUV2RGB | CFG_SWAPYU); + FMT(YUV422, 422, CFG_YUV2RGB); + FMT(YVU422, 422, CFG_YUV2RGB | CFG_SWAPUV); + FMT(YUV420, 420, CFG_YUV2RGB); + FMT(YVU420, 420, CFG_YUV2RGB | CFG_SWAPUV); + FMT(C8, PSEUDO8, 0); +#undef FMT + default: + return ERR_PTR(-EINVAL); + } + + dfb = kzalloc(sizeof(*dfb), GFP_KERNEL); + if (!dfb) { + DRM_ERROR("failed to allocate Armada fb object\n"); + return ERR_PTR(-ENOMEM); + } + + dfb->fmt = format; + dfb->mod = config; + dfb->obj = obj; + + drm_helper_mode_fill_fb_struct(&dfb->fb, mode); + + ret = drm_framebuffer_init(dev, &dfb->fb, &armada_fb_funcs); + if (ret) { + kfree(dfb); + return ERR_PTR(ret); + } + + /* + * Take a reference on our object as we're successful - the + * caller already holds a reference, which keeps us safe for + * the above call, but the caller will drop their reference + * to it. Hence we need to take our own reference. + */ + drm_gem_object_reference(&obj->obj); + + return dfb; +} + +static struct drm_framebuffer *armada_fb_create(struct drm_device *dev, + struct drm_file *dfile, struct drm_mode_fb_cmd2 *mode) +{ + struct armada_gem_object *obj; + struct armada_framebuffer *dfb; + int ret; + + DRM_DEBUG_DRIVER("w%u h%u pf%08x f%u p%u,%u,%u\n", + mode->width, mode->height, mode->pixel_format, + mode->flags, mode->pitches[0], mode->pitches[1], + mode->pitches[2]); + + /* We can only handle a single plane at the moment */ + if (drm_format_num_planes(mode->pixel_format) > 1 && + (mode->handles[0] != mode->handles[1] || + mode->handles[0] != mode->handles[2])) { + ret = -EINVAL; + goto err; + } + + obj = armada_gem_object_lookup(dev, dfile, mode->handles[0]); + if (!obj) { + ret = -ENOENT; + goto err; + } + + if (obj->obj.import_attach && !obj->sgt) { + ret = armada_gem_map_import(obj); + if (ret) + goto err_unref; + } + + /* Framebuffer objects must have a valid device address for scanout */ + if (obj->dev_addr == DMA_ERROR_CODE) { + ret = -EINVAL; + goto err_unref; + } + + dfb = armada_framebuffer_create(dev, mode, obj); + if (IS_ERR(dfb)) { + ret = PTR_ERR(dfb); + goto err; + } + + drm_gem_object_unreference_unlocked(&obj->obj); + + return &dfb->fb; + + err_unref: + drm_gem_object_unreference_unlocked(&obj->obj); + err: + DRM_ERROR("failed to initialize framebuffer: %d\n", ret); + return ERR_PTR(ret); +} + +static void armada_output_poll_changed(struct drm_device *dev) +{ + struct armada_private *priv = dev->dev_private; + struct drm_fb_helper *fbh = priv->fbdev; + + if (fbh) + drm_fb_helper_hotplug_event(fbh); +} + +const struct drm_mode_config_funcs armada_drm_mode_config_funcs = { + .fb_create = armada_fb_create, + .output_poll_changed = armada_output_poll_changed, +}; diff --git a/kernel/drivers/gpu/drm/armada/armada_fb.h b/kernel/drivers/gpu/drm/armada/armada_fb.h new file mode 100644 index 000000000..ce3f12ebf --- /dev/null +++ b/kernel/drivers/gpu/drm/armada/armada_fb.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2012 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ARMADA_FB_H +#define ARMADA_FB_H + +struct armada_framebuffer { + struct drm_framebuffer fb; + struct armada_gem_object *obj; + uint8_t fmt; + uint8_t mod; +}; +#define drm_fb_to_armada_fb(dfb) \ + container_of(dfb, struct armada_framebuffer, fb) +#define drm_fb_obj(fb) drm_fb_to_armada_fb(fb)->obj + +struct armada_framebuffer *armada_framebuffer_create(struct drm_device *, + struct drm_mode_fb_cmd2 *, struct armada_gem_object *); + +#endif diff --git a/kernel/drivers/gpu/drm/armada/armada_fbdev.c b/kernel/drivers/gpu/drm/armada/armada_fbdev.c new file mode 100644 index 000000000..7838e731b --- /dev/null +++ b/kernel/drivers/gpu/drm/armada/armada_fbdev.c @@ -0,0 +1,210 @@ +/* + * Copyright (C) 2012 Russell King + * Written from the i915 driver. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include + +#include +#include +#include "armada_crtc.h" +#include "armada_drm.h" +#include "armada_fb.h" +#include "armada_gem.h" + +static /*const*/ struct fb_ops armada_fb_ops = { + .owner = THIS_MODULE, + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = drm_fb_helper_set_par, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, + .fb_pan_display = drm_fb_helper_pan_display, + .fb_blank = drm_fb_helper_blank, + .fb_setcmap = drm_fb_helper_setcmap, + .fb_debug_enter = drm_fb_helper_debug_enter, + .fb_debug_leave = drm_fb_helper_debug_leave, +}; + +static int armada_fb_create(struct drm_fb_helper *fbh, + struct drm_fb_helper_surface_size *sizes) +{ + struct drm_device *dev = fbh->dev; + struct drm_mode_fb_cmd2 mode; + struct armada_framebuffer *dfb; + struct armada_gem_object *obj; + struct fb_info *info; + int size, ret; + void *ptr; + + memset(&mode, 0, sizeof(mode)); + mode.width = sizes->surface_width; + mode.height = sizes->surface_height; + mode.pitches[0] = armada_pitch(mode.width, sizes->surface_bpp); + mode.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, + sizes->surface_depth); + + size = mode.pitches[0] * mode.height; + obj = armada_gem_alloc_private_object(dev, size); + if (!obj) { + DRM_ERROR("failed to allocate fb memory\n"); + return -ENOMEM; + } + + ret = armada_gem_linear_back(dev, obj); + if (ret) { + drm_gem_object_unreference_unlocked(&obj->obj); + return ret; + } + + ptr = armada_gem_map_object(dev, obj); + if (!ptr) { + drm_gem_object_unreference_unlocked(&obj->obj); + return -ENOMEM; + } + + dfb = armada_framebuffer_create(dev, &mode, obj); + + /* + * A reference is now held by the framebuffer object if + * successful, otherwise this drops the ref for the error path. + */ + drm_gem_object_unreference_unlocked(&obj->obj); + + if (IS_ERR(dfb)) + return PTR_ERR(dfb); + + info = framebuffer_alloc(0, dev->dev); + if (!info) { + ret = -ENOMEM; + goto err_fballoc; + } + + ret = fb_alloc_cmap(&info->cmap, 256, 0); + if (ret) { + ret = -ENOMEM; + goto err_fbcmap; + } + + strlcpy(info->fix.id, "armada-drmfb", sizeof(info->fix.id)); + info->par = fbh; + info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; + info->fbops = &armada_fb_ops; + info->fix.smem_start = obj->phys_addr; + info->fix.smem_len = obj->obj.size; + info->screen_size = obj->obj.size; + info->screen_base = ptr; + fbh->fb = &dfb->fb; + fbh->fbdev = info; + drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth); + drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height); + + DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08llx\n", + dfb->fb.width, dfb->fb.height, dfb->fb.bits_per_pixel, + (unsigned long long)obj->phys_addr); + + return 0; + + err_fbcmap: + framebuffer_release(info); + err_fballoc: + dfb->fb.funcs->destroy(&dfb->fb); + return ret; +} + +static int armada_fb_probe(struct drm_fb_helper *fbh, + struct drm_fb_helper_surface_size *sizes) +{ + int ret = 0; + + if (!fbh->fb) { + ret = armada_fb_create(fbh, sizes); + if (ret == 0) + ret = 1; + } + return ret; +} + +static const struct drm_fb_helper_funcs armada_fb_helper_funcs = { + .gamma_set = armada_drm_crtc_gamma_set, + .gamma_get = armada_drm_crtc_gamma_get, + .fb_probe = armada_fb_probe, +}; + +int armada_fbdev_init(struct drm_device *dev) +{ + struct armada_private *priv = dev->dev_private; + struct drm_fb_helper *fbh; + int ret; + + fbh = devm_kzalloc(dev->dev, sizeof(*fbh), GFP_KERNEL); + if (!fbh) + return -ENOMEM; + + priv->fbdev = fbh; + + drm_fb_helper_prepare(dev, fbh, &armada_fb_helper_funcs); + + ret = drm_fb_helper_init(dev, fbh, 1, 1); + if (ret) { + DRM_ERROR("failed to initialize drm fb helper\n"); + goto err_fb_helper; + } + + ret = drm_fb_helper_single_add_all_connectors(fbh); + if (ret) { + DRM_ERROR("failed to add fb connectors\n"); + goto err_fb_setup; + } + + ret = drm_fb_helper_initial_config(fbh, 32); + if (ret) { + DRM_ERROR("failed to set initial config\n"); + goto err_fb_setup; + } + + return 0; + err_fb_setup: + drm_fb_helper_fini(fbh); + err_fb_helper: + priv->fbdev = NULL; + return ret; +} + +void armada_fbdev_lastclose(struct drm_device *dev) +{ + struct armada_private *priv = dev->dev_private; + + if (priv->fbdev) + drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev); +} + +void armada_fbdev_fini(struct drm_device *dev) +{ + struct armada_private *priv = dev->dev_private; + struct drm_fb_helper *fbh = priv->fbdev; + + if (fbh) { + struct fb_info *info = fbh->fbdev; + + if (info) { + unregister_framebuffer(info); + if (info->cmap.len) + fb_dealloc_cmap(&info->cmap); + framebuffer_release(info); + } + + drm_fb_helper_fini(fbh); + + if (fbh->fb) + fbh->fb->funcs->destroy(fbh->fb); + + priv->fbdev = NULL; + } +} diff --git a/kernel/drivers/gpu/drm/armada/armada_gem.c b/kernel/drivers/gpu/drm/armada/armada_gem.c new file mode 100644 index 000000000..580e10aca --- /dev/null +++ b/kernel/drivers/gpu/drm/armada/armada_gem.c @@ -0,0 +1,616 @@ +/* + * Copyright (C) 2012 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include "armada_drm.h" +#include "armada_gem.h" +#include +#include "armada_ioctlP.h" + +static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data); + unsigned long addr = (unsigned long)vmf->virtual_address; + unsigned long pfn = obj->phys_addr >> PAGE_SHIFT; + int ret; + + pfn += (addr - vma->vm_start) >> PAGE_SHIFT; + ret = vm_insert_pfn(vma, addr, pfn); + + switch (ret) { + case 0: + case -EBUSY: + return VM_FAULT_NOPAGE; + case -ENOMEM: + return VM_FAULT_OOM; + default: + return VM_FAULT_SIGBUS; + } +} + +const struct vm_operations_struct armada_gem_vm_ops = { + .fault = armada_gem_vm_fault, + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static size_t roundup_gem_size(size_t size) +{ + return roundup(size, PAGE_SIZE); +} + +/* dev->struct_mutex is held here */ +void armada_gem_free_object(struct drm_gem_object *obj) +{ + struct armada_gem_object *dobj = drm_to_armada_gem(obj); + + DRM_DEBUG_DRIVER("release obj %p\n", dobj); + + drm_gem_free_mmap_offset(&dobj->obj); + + if (dobj->page) { + /* page backed memory */ + unsigned int order = get_order(dobj->obj.size); + __free_pages(dobj->page, order); + } else if (dobj->linear) { + /* linear backed memory */ + drm_mm_remove_node(dobj->linear); + kfree(dobj->linear); + if (dobj->addr) + iounmap(dobj->addr); + } + + if (dobj->obj.import_attach) { + /* We only ever display imported data */ + dma_buf_unmap_attachment(dobj->obj.import_attach, dobj->sgt, + DMA_TO_DEVICE); + drm_prime_gem_destroy(&dobj->obj, NULL); + } + + drm_gem_object_release(&dobj->obj); + + kfree(dobj); +} + +int +armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj) +{ + struct armada_private *priv = dev->dev_private; + size_t size = obj->obj.size; + + if (obj->page || obj->linear) + return 0; + + /* + * If it is a small allocation (typically cursor, which will + * be 32x64 or 64x32 ARGB pixels) try to get it from the system. + * Framebuffers will never be this small (our minimum size for + * framebuffers is larger than this anyway.) Such objects are + * only accessed by the CPU so we don't need any special handing + * here. + */ + if (size <= 8192) { + unsigned int order = get_order(size); + struct page *p = alloc_pages(GFP_KERNEL, order); + + if (p) { + obj->addr = page_address(p); + obj->phys_addr = page_to_phys(p); + obj->page = p; + + memset(obj->addr, 0, PAGE_ALIGN(size)); + } + } + + /* + * We could grab something from CMA if it's enabled, but that + * involves building in a problem: + * + * CMA's interface uses dma_alloc_coherent(), which provides us + * with an CPU virtual address and a device address. + * + * The CPU virtual address may be either an address in the kernel + * direct mapped region (for example, as it would be on x86) or + * it may be remapped into another part of kernel memory space + * (eg, as it would be on ARM.) This means virt_to_phys() on the + * returned virtual address is invalid depending on the architecture + * implementation. + * + * The device address may also not be a physical address; it may + * be that there is some kind of remapping between the device and + * system RAM, which makes the use of the device address also + * unsafe to re-use as a physical address. + * + * This makes DRM usage of dma_alloc_coherent() in a generic way + * at best very questionable and unsafe. + */ + + /* Otherwise, grab it from our linear allocation */ + if (!obj->page) { + struct drm_mm_node *node; + unsigned align = min_t(unsigned, size, SZ_2M); + void __iomem *ptr; + int ret; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOSPC; + + mutex_lock(&dev->struct_mutex); + ret = drm_mm_insert_node(&priv->linear, node, size, align, + DRM_MM_SEARCH_DEFAULT); + mutex_unlock(&dev->struct_mutex); + if (ret) { + kfree(node); + return ret; + } + + obj->linear = node; + + /* Ensure that the memory we're returning is cleared. */ + ptr = ioremap_wc(obj->linear->start, size); + if (!ptr) { + mutex_lock(&dev->struct_mutex); + drm_mm_remove_node(obj->linear); + mutex_unlock(&dev->struct_mutex); + kfree(obj->linear); + obj->linear = NULL; + return -ENOMEM; + } + + memset_io(ptr, 0, size); + iounmap(ptr); + + obj->phys_addr = obj->linear->start; + obj->dev_addr = obj->linear->start; + } + + DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj, + (unsigned long long)obj->phys_addr, + (unsigned long long)obj->dev_addr); + + return 0; +} + +void * +armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj) +{ + /* only linear objects need to be ioremap'd */ + if (!dobj->addr && dobj->linear) + dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size); + return dobj->addr; +} + +struct armada_gem_object * +armada_gem_alloc_private_object(struct drm_device *dev, size_t size) +{ + struct armada_gem_object *obj; + + size = roundup_gem_size(size); + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) + return NULL; + + drm_gem_private_object_init(dev, &obj->obj, size); + obj->dev_addr = DMA_ERROR_CODE; + + DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size); + + return obj; +} + +struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev, + size_t size) +{ + struct armada_gem_object *obj; + struct address_space *mapping; + + size = roundup_gem_size(size); + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) + return NULL; + + if (drm_gem_object_init(dev, &obj->obj, size)) { + kfree(obj); + return NULL; + } + + obj->dev_addr = DMA_ERROR_CODE; + + mapping = file_inode(obj->obj.filp)->i_mapping; + mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE); + + DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size); + + return obj; +} + +/* Dumb alloc support */ +int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + struct armada_gem_object *dobj; + u32 handle; + size_t size; + int ret; + + args->pitch = armada_pitch(args->width, args->bpp); + args->size = size = args->pitch * args->height; + + dobj = armada_gem_alloc_private_object(dev, size); + if (dobj == NULL) + return -ENOMEM; + + ret = armada_gem_linear_back(dev, dobj); + if (ret) + goto err; + + ret = drm_gem_handle_create(file, &dobj->obj, &handle); + if (ret) + goto err; + + args->handle = handle; + + /* drop reference from allocate - handle holds it now */ + DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle); + err: + drm_gem_object_unreference_unlocked(&dobj->obj); + return ret; +} + +int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, + uint32_t handle, uint64_t *offset) +{ + struct armada_gem_object *obj; + int ret = 0; + + mutex_lock(&dev->struct_mutex); + obj = armada_gem_object_lookup(dev, file, handle); + if (!obj) { + DRM_ERROR("failed to lookup gem object\n"); + ret = -EINVAL; + goto err_unlock; + } + + /* Don't allow imported objects to be mapped */ + if (obj->obj.import_attach) { + ret = -EINVAL; + goto err_unlock; + } + + ret = drm_gem_create_mmap_offset(&obj->obj); + if (ret == 0) { + *offset = drm_vma_node_offset_addr(&obj->obj.vma_node); + DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle, *offset); + } + + drm_gem_object_unreference(&obj->obj); + err_unlock: + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +int armada_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, + uint32_t handle) +{ + return drm_gem_handle_delete(file, handle); +} + +/* Private driver gem ioctls */ +int armada_gem_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_armada_gem_create *args = data; + struct armada_gem_object *dobj; + size_t size; + u32 handle; + int ret; + + if (args->size == 0) + return -ENOMEM; + + size = args->size; + + dobj = armada_gem_alloc_object(dev, size); + if (dobj == NULL) + return -ENOMEM; + + ret = drm_gem_handle_create(file, &dobj->obj, &handle); + if (ret) + goto err; + + args->handle = handle; + + /* drop reference from allocate - handle holds it now */ + DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle); + err: + drm_gem_object_unreference_unlocked(&dobj->obj); + return ret; +} + +/* Map a shmem-backed object into process memory space */ +int armada_gem_mmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_armada_gem_mmap *args = data; + struct armada_gem_object *dobj; + unsigned long addr; + + dobj = armada_gem_object_lookup(dev, file, args->handle); + if (dobj == NULL) + return -ENOENT; + + if (!dobj->obj.filp) { + drm_gem_object_unreference(&dobj->obj); + return -EINVAL; + } + + addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE, + MAP_SHARED, args->offset); + drm_gem_object_unreference(&dobj->obj); + if (IS_ERR_VALUE(addr)) + return addr; + + args->addr = addr; + + return 0; +} + +int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_armada_gem_pwrite *args = data; + struct armada_gem_object *dobj; + char __user *ptr; + int ret; + + DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n", + args->handle, args->offset, args->size, args->ptr); + + if (args->size == 0) + return 0; + + ptr = (char __user *)(uintptr_t)args->ptr; + + if (!access_ok(VERIFY_READ, ptr, args->size)) + return -EFAULT; + + ret = fault_in_multipages_readable(ptr, args->size); + if (ret) + return ret; + + dobj = armada_gem_object_lookup(dev, file, args->handle); + if (dobj == NULL) + return -ENOENT; + + /* Must be a kernel-mapped object */ + if (!dobj->addr) + return -EINVAL; + + if (args->offset > dobj->obj.size || + args->size > dobj->obj.size - args->offset) { + DRM_ERROR("invalid size: object size %u\n", dobj->obj.size); + ret = -EINVAL; + goto unref; + } + + if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) { + ret = -EFAULT; + } else if (dobj->update) { + dobj->update(dobj->update_data); + ret = 0; + } + + unref: + drm_gem_object_unreference_unlocked(&dobj->obj); + return ret; +} + +/* Prime support */ +struct sg_table * +armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, + enum dma_data_direction dir) +{ + struct drm_gem_object *obj = attach->dmabuf->priv; + struct armada_gem_object *dobj = drm_to_armada_gem(obj); + struct scatterlist *sg; + struct sg_table *sgt; + int i, num; + + sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return NULL; + + if (dobj->obj.filp) { + struct address_space *mapping; + int count; + + count = dobj->obj.size / PAGE_SIZE; + if (sg_alloc_table(sgt, count, GFP_KERNEL)) + goto free_sgt; + + mapping = file_inode(dobj->obj.filp)->i_mapping; + + for_each_sg(sgt->sgl, sg, count, i) { + struct page *page; + + page = shmem_read_mapping_page(mapping, i); + if (IS_ERR(page)) { + num = i; + goto release; + } + + sg_set_page(sg, page, PAGE_SIZE, 0); + } + + if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) { + num = sgt->nents; + goto release; + } + } else if (dobj->page) { + /* Single contiguous page */ + if (sg_alloc_table(sgt, 1, GFP_KERNEL)) + goto free_sgt; + + sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0); + + if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) + goto free_table; + } else if (dobj->linear) { + /* Single contiguous physical region - no struct page */ + if (sg_alloc_table(sgt, 1, GFP_KERNEL)) + goto free_sgt; + sg_dma_address(sgt->sgl) = dobj->dev_addr; + sg_dma_len(sgt->sgl) = dobj->obj.size; + } else { + goto free_sgt; + } + return sgt; + + release: + for_each_sg(sgt->sgl, sg, num, i) + page_cache_release(sg_page(sg)); + free_table: + sg_free_table(sgt); + free_sgt: + kfree(sgt); + return NULL; +} + +static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, + struct sg_table *sgt, enum dma_data_direction dir) +{ + struct drm_gem_object *obj = attach->dmabuf->priv; + struct armada_gem_object *dobj = drm_to_armada_gem(obj); + int i; + + if (!dobj->linear) + dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); + + if (dobj->obj.filp) { + struct scatterlist *sg; + for_each_sg(sgt->sgl, sg, sgt->nents, i) + page_cache_release(sg_page(sg)); + } + + sg_free_table(sgt); + kfree(sgt); +} + +static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n) +{ + return NULL; +} + +static void +armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr) +{ +} + +static int +armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma) +{ + return -EINVAL; +} + +static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = { + .map_dma_buf = armada_gem_prime_map_dma_buf, + .unmap_dma_buf = armada_gem_prime_unmap_dma_buf, + .release = drm_gem_dmabuf_release, + .kmap_atomic = armada_gem_dmabuf_no_kmap, + .kunmap_atomic = armada_gem_dmabuf_no_kunmap, + .kmap = armada_gem_dmabuf_no_kmap, + .kunmap = armada_gem_dmabuf_no_kunmap, + .mmap = armada_gem_dmabuf_mmap, +}; + +struct dma_buf * +armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj, + int flags) +{ + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + + exp_info.ops = &armada_gem_prime_dmabuf_ops; + exp_info.size = obj->size; + exp_info.flags = O_RDWR; + exp_info.priv = obj; + + return dma_buf_export(&exp_info); +} + +struct drm_gem_object * +armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf) +{ + struct dma_buf_attachment *attach; + struct armada_gem_object *dobj; + + if (buf->ops == &armada_gem_prime_dmabuf_ops) { + struct drm_gem_object *obj = buf->priv; + if (obj->dev == dev) { + /* + * Importing our own dmabuf(s) increases the + * refcount on the gem object itself. + */ + drm_gem_object_reference(obj); + return obj; + } + } + + attach = dma_buf_attach(buf, dev->dev); + if (IS_ERR(attach)) + return ERR_CAST(attach); + + dobj = armada_gem_alloc_private_object(dev, buf->size); + if (!dobj) { + dma_buf_detach(buf, attach); + return ERR_PTR(-ENOMEM); + } + + dobj->obj.import_attach = attach; + get_dma_buf(buf); + + /* + * Don't call dma_buf_map_attachment() here - it maps the + * scatterlist immediately for DMA, and this is not always + * an appropriate thing to do. + */ + return &dobj->obj; +} + +int armada_gem_map_import(struct armada_gem_object *dobj) +{ + int ret; + + dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach, + DMA_TO_DEVICE); + if (!dobj->sgt) { + DRM_ERROR("dma_buf_map_attachment() returned NULL\n"); + return -EINVAL; + } + if (IS_ERR(dobj->sgt)) { + ret = PTR_ERR(dobj->sgt); + dobj->sgt = NULL; + DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret); + return ret; + } + if (dobj->sgt->nents > 1) { + DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n"); + return -EINVAL; + } + if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) { + DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n"); + return -EINVAL; + } + dobj->dev_addr = sg_dma_address(dobj->sgt->sgl); + return 0; +} diff --git a/kernel/drivers/gpu/drm/armada/armada_gem.h b/kernel/drivers/gpu/drm/armada/armada_gem.h new file mode 100644 index 000000000..b000ea3a8 --- /dev/null +++ b/kernel/drivers/gpu/drm/armada/armada_gem.h @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2012 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ARMADA_GEM_H +#define ARMADA_GEM_H + +#include + +/* GEM */ +struct armada_gem_object { + struct drm_gem_object obj; + void *addr; + phys_addr_t phys_addr; + resource_size_t dev_addr; + struct drm_mm_node *linear; /* for linear backed */ + struct page *page; /* for page backed */ + struct sg_table *sgt; /* for imported */ + void (*update)(void *); + void *update_data; +}; + +extern const struct vm_operations_struct armada_gem_vm_ops; + +#define drm_to_armada_gem(o) container_of(o, struct armada_gem_object, obj) + +void armada_gem_free_object(struct drm_gem_object *); +int armada_gem_linear_back(struct drm_device *, struct armada_gem_object *); +void *armada_gem_map_object(struct drm_device *, struct armada_gem_object *); +struct armada_gem_object *armada_gem_alloc_private_object(struct drm_device *, + size_t); +int armada_gem_dumb_create(struct drm_file *, struct drm_device *, + struct drm_mode_create_dumb *); +int armada_gem_dumb_map_offset(struct drm_file *, struct drm_device *, + uint32_t, uint64_t *); +int armada_gem_dumb_destroy(struct drm_file *, struct drm_device *, + uint32_t); +struct dma_buf *armada_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *obj, int flags); +struct drm_gem_object *armada_gem_prime_import(struct drm_device *, + struct dma_buf *); +int armada_gem_map_import(struct armada_gem_object *); + +static inline struct armada_gem_object *armada_gem_object_lookup( + struct drm_device *dev, struct drm_file *dfile, unsigned handle) +{ + struct drm_gem_object *obj = drm_gem_object_lookup(dev, dfile, handle); + + return obj ? drm_to_armada_gem(obj) : NULL; +} +#endif diff --git a/kernel/drivers/gpu/drm/armada/armada_hw.h b/kernel/drivers/gpu/drm/armada/armada_hw.h new file mode 100644 index 000000000..27319a833 --- /dev/null +++ b/kernel/drivers/gpu/drm/armada/armada_hw.h @@ -0,0 +1,318 @@ +/* + * Copyright (C) 2012 Russell King + * Rewritten from the dovefb driver, and Armada510 manuals. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ARMADA_HW_H +#define ARMADA_HW_H + +/* + * Note: the following registers are written from IRQ context: + * LCD_SPU_V_PORCH, LCD_SPU_ADV_REG, LCD_SPUT_V_H_TOTAL + * LCD_SPU_DMA_START_ADDR_[YUV][01], LCD_SPU_DMA_PITCH_YC, + * LCD_SPU_DMA_PITCH_UV, LCD_SPU_DMA_OVSA_HPXL_VLN, + * LCD_SPU_DMA_HPXL_VLN, LCD_SPU_DZM_HPXL_VLN, LCD_SPU_DMA_CTRL0 + */ +enum { + LCD_SPU_ADV_REG = 0x0084, /* Armada 510 */ + LCD_SPU_DMA_START_ADDR_Y0 = 0x00c0, + LCD_SPU_DMA_START_ADDR_U0 = 0x00c4, + LCD_SPU_DMA_START_ADDR_V0 = 0x00c8, + LCD_CFG_DMA_START_ADDR_0 = 0x00cc, + LCD_SPU_DMA_START_ADDR_Y1 = 0x00d0, + LCD_SPU_DMA_START_ADDR_U1 = 0x00d4, + LCD_SPU_DMA_START_ADDR_V1 = 0x00d8, + LCD_CFG_DMA_START_ADDR_1 = 0x00dc, + LCD_SPU_DMA_PITCH_YC = 0x00e0, + LCD_SPU_DMA_PITCH_UV = 0x00e4, + LCD_SPU_DMA_OVSA_HPXL_VLN = 0x00e8, + LCD_SPU_DMA_HPXL_VLN = 0x00ec, + LCD_SPU_DZM_HPXL_VLN = 0x00f0, + LCD_CFG_GRA_START_ADDR0 = 0x00f4, + LCD_CFG_GRA_START_ADDR1 = 0x00f8, + LCD_CFG_GRA_PITCH = 0x00fc, + LCD_SPU_GRA_OVSA_HPXL_VLN = 0x0100, + LCD_SPU_GRA_HPXL_VLN = 0x0104, + LCD_SPU_GZM_HPXL_VLN = 0x0108, + LCD_SPU_HWC_OVSA_HPXL_VLN = 0x010c, + LCD_SPU_HWC_HPXL_VLN = 0x0110, + LCD_SPUT_V_H_TOTAL = 0x0114, + LCD_SPU_V_H_ACTIVE = 0x0118, + LCD_SPU_H_PORCH = 0x011c, + LCD_SPU_V_PORCH = 0x0120, + LCD_SPU_BLANKCOLOR = 0x0124, + LCD_SPU_ALPHA_COLOR1 = 0x0128, + LCD_SPU_ALPHA_COLOR2 = 0x012c, + LCD_SPU_COLORKEY_Y = 0x0130, + LCD_SPU_COLORKEY_U = 0x0134, + LCD_SPU_COLORKEY_V = 0x0138, + LCD_CFG_RDREG4F = 0x013c, /* Armada 510 */ + LCD_SPU_SPI_RXDATA = 0x0140, + LCD_SPU_ISA_RXDATA = 0x0144, + LCD_SPU_HWC_RDDAT = 0x0158, + LCD_SPU_GAMMA_RDDAT = 0x015c, + LCD_SPU_PALETTE_RDDAT = 0x0160, + LCD_SPU_IOPAD_IN = 0x0178, + LCD_CFG_RDREG5F = 0x017c, + LCD_SPU_SPI_CTRL = 0x0180, + LCD_SPU_SPI_TXDATA = 0x0184, + LCD_SPU_SMPN_CTRL = 0x0188, + LCD_SPU_DMA_CTRL0 = 0x0190, + LCD_SPU_DMA_CTRL1 = 0x0194, + LCD_SPU_SRAM_CTRL = 0x0198, + LCD_SPU_SRAM_WRDAT = 0x019c, + LCD_SPU_SRAM_PARA0 = 0x01a0, /* Armada 510 */ + LCD_SPU_SRAM_PARA1 = 0x01a4, + LCD_CFG_SCLK_DIV = 0x01a8, + LCD_SPU_CONTRAST = 0x01ac, + LCD_SPU_SATURATION = 0x01b0, + LCD_SPU_CBSH_HUE = 0x01b4, + LCD_SPU_DUMB_CTRL = 0x01b8, + LCD_SPU_IOPAD_CONTROL = 0x01bc, + LCD_SPU_IRQ_ENA = 0x01c0, + LCD_SPU_IRQ_ISR = 0x01c4, +}; + +/* For LCD_SPU_ADV_REG */ +enum { + ADV_VSYNC_L_OFF = 0xfff << 20, + ADV_GRACOLORKEY = 1 << 19, + ADV_VIDCOLORKEY = 1 << 18, + ADV_HWC32BLEND = 1 << 15, + ADV_HWC32ARGB = 1 << 14, + ADV_HWC32ENABLE = 1 << 13, + ADV_VSYNCOFFEN = 1 << 12, + ADV_VSYNC_H_OFF = 0xfff << 0, +}; + +enum { + CFG_565 = 0, + CFG_1555 = 1, + CFG_888PACK = 2, + CFG_X888 = 3, + CFG_8888 = 4, + CFG_422PACK = 5, + CFG_422 = 6, + CFG_420 = 7, + CFG_PSEUDO4 = 9, + CFG_PSEUDO8 = 10, + CFG_SWAPRB = 1 << 4, + CFG_SWAPUV = 1 << 3, + CFG_SWAPYU = 1 << 2, + CFG_YUV2RGB = 1 << 1, +}; + +/* For LCD_SPU_DMA_CTRL0 */ +enum { + CFG_NOBLENDING = 1 << 31, + CFG_GAMMA_ENA = 1 << 30, + CFG_CBSH_ENA = 1 << 29, + CFG_PALETTE_ENA = 1 << 28, + CFG_ARBFAST_ENA = 1 << 27, + CFG_HWC_1BITMOD = 1 << 26, + CFG_HWC_1BITENA = 1 << 25, + CFG_HWC_ENA = 1 << 24, + CFG_DMAFORMAT = 0xf << 20, +#define CFG_DMA_FMT(x) ((x) << 20) + CFG_GRAFORMAT = 0xf << 16, +#define CFG_GRA_FMT(x) ((x) << 16) +#define CFG_GRA_MOD(x) ((x) << 8) + CFG_GRA_FTOGGLE = 1 << 15, + CFG_GRA_HSMOOTH = 1 << 14, + CFG_GRA_TSTMODE = 1 << 13, + CFG_GRA_ENA = 1 << 8, +#define CFG_DMA_MOD(x) ((x) << 0) + CFG_DMA_FTOGGLE = 1 << 7, + CFG_DMA_HSMOOTH = 1 << 6, + CFG_DMA_TSTMODE = 1 << 5, + CFG_DMA_ENA = 1 << 0, +}; + +enum { + CKMODE_DISABLE = 0, + CKMODE_Y = 1, + CKMODE_U = 2, + CKMODE_RGB = 3, + CKMODE_V = 4, + CKMODE_R = 5, + CKMODE_G = 6, + CKMODE_B = 7, +}; + +/* For LCD_SPU_DMA_CTRL1 */ +enum { + CFG_FRAME_TRIG = 1 << 31, + CFG_VSYNC_INV = 1 << 27, + CFG_CKMODE_MASK = 0x7 << 24, +#define CFG_CKMODE(x) ((x) << 24) + CFG_CARRY = 1 << 23, + CFG_GATED_CLK = 1 << 21, + CFG_PWRDN_ENA = 1 << 20, + CFG_DSCALE_MASK = 0x3 << 18, + CFG_DSCALE_NONE = 0x0 << 18, + CFG_DSCALE_HALF = 0x1 << 18, + CFG_DSCALE_QUAR = 0x2 << 18, + CFG_ALPHAM_MASK = 0x3 << 16, + CFG_ALPHAM_VIDEO = 0x0 << 16, + CFG_ALPHAM_GRA = 0x1 << 16, + CFG_ALPHAM_CFG = 0x2 << 16, + CFG_ALPHA_MASK = 0xff << 8, + CFG_PIXCMD_MASK = 0xff, +}; + +/* For LCD_SPU_SRAM_CTRL */ +enum { + SRAM_READ = 0 << 14, + SRAM_WRITE = 2 << 14, + SRAM_INIT = 3 << 14, + SRAM_HWC32_RAM1 = 0xc << 8, + SRAM_HWC32_RAM2 = 0xd << 8, + SRAM_HWC32_RAMR = SRAM_HWC32_RAM1, + SRAM_HWC32_RAMG = SRAM_HWC32_RAM2, + SRAM_HWC32_RAMB = 0xe << 8, + SRAM_HWC32_TRAN = 0xf << 8, + SRAM_HWC = 0xf << 8, +}; + +/* For LCD_SPU_SRAM_PARA1 */ +enum { + CFG_CSB_256x32 = 1 << 15, /* cursor */ + CFG_CSB_256x24 = 1 << 14, /* palette */ + CFG_CSB_256x8 = 1 << 13, /* gamma */ + CFG_PDWN1920x32 = 1 << 8, /* Armada 510: power down vscale ram */ + CFG_PDWN256x32 = 1 << 7, /* power down cursor */ + CFG_PDWN256x24 = 1 << 6, /* power down palette */ + CFG_PDWN256x8 = 1 << 5, /* power down gamma */ + CFG_PDWNHWC = 1 << 4, /* Armada 510: power down all hwc ram */ + CFG_PDWN32x32 = 1 << 3, /* power down slave->smart ram */ + CFG_PDWN16x66 = 1 << 2, /* power down UV fifo */ + CFG_PDWN32x66 = 1 << 1, /* power down Y fifo */ + CFG_PDWN64x66 = 1 << 0, /* power down graphic fifo */ +}; + +/* For LCD_CFG_SCLK_DIV */ +enum { + /* Armada 510 */ + SCLK_510_AXI = 0x0 << 30, + SCLK_510_EXTCLK0 = 0x1 << 30, + SCLK_510_PLL = 0x2 << 30, + SCLK_510_EXTCLK1 = 0x3 << 30, + SCLK_510_DIV_CHANGE = 1 << 29, + SCLK_510_FRAC_DIV_MASK = 0xfff << 16, + SCLK_510_INT_DIV_MASK = 0xffff << 0, + + /* Armada 16x */ + SCLK_16X_AHB = 0x0 << 28, + SCLK_16X_PCLK = 0x1 << 28, + SCLK_16X_AXI = 0x4 << 28, + SCLK_16X_PLL = 0x8 << 28, + SCLK_16X_FRAC_DIV_MASK = 0xfff << 16, + SCLK_16X_INT_DIV_MASK = 0xffff << 0, +}; + +/* For LCD_SPU_DUMB_CTRL */ +enum { + DUMB16_RGB565_0 = 0x0 << 28, + DUMB16_RGB565_1 = 0x1 << 28, + DUMB18_RGB666_0 = 0x2 << 28, + DUMB18_RGB666_1 = 0x3 << 28, + DUMB12_RGB444_0 = 0x4 << 28, + DUMB12_RGB444_1 = 0x5 << 28, + DUMB24_RGB888_0 = 0x6 << 28, + DUMB_BLANK = 0x7 << 28, + DUMB_MASK = 0xf << 28, + CFG_BIAS_OUT = 1 << 8, + CFG_REV_RGB = 1 << 7, + CFG_INV_CBLANK = 1 << 6, + CFG_INV_CSYNC = 1 << 5, /* Normally active high */ + CFG_INV_HENA = 1 << 4, + CFG_INV_VSYNC = 1 << 3, /* Normally active high */ + CFG_INV_HSYNC = 1 << 2, /* Normally active high */ + CFG_INV_PCLK = 1 << 1, + CFG_DUMB_ENA = 1 << 0, +}; + +/* For LCD_SPU_IOPAD_CONTROL */ +enum { + CFG_VSCALE_LN_EN = 3 << 18, + CFG_GRA_VM_ENA = 1 << 15, + CFG_DMA_VM_ENA = 1 << 13, + CFG_CMD_VM_ENA = 1 << 11, + CFG_CSC_MASK = 3 << 8, + CFG_CSC_YUV_CCIR709 = 1 << 9, + CFG_CSC_YUV_CCIR601 = 0 << 9, + CFG_CSC_RGB_STUDIO = 1 << 8, + CFG_CSC_RGB_COMPUTER = 0 << 8, + CFG_IOPAD_MASK = 0xf << 0, + CFG_IOPAD_DUMB24 = 0x0 << 0, + CFG_IOPAD_DUMB18SPI = 0x1 << 0, + CFG_IOPAD_DUMB18GPIO = 0x2 << 0, + CFG_IOPAD_DUMB16SPI = 0x3 << 0, + CFG_IOPAD_DUMB16GPIO = 0x4 << 0, + CFG_IOPAD_DUMB12GPIO = 0x5 << 0, + CFG_IOPAD_SMART18 = 0x6 << 0, + CFG_IOPAD_SMART16 = 0x7 << 0, + CFG_IOPAD_SMART8 = 0x8 << 0, +}; + +#define IOPAD_DUMB24 0x0 + +/* For LCD_SPU_IRQ_ENA */ +enum { + DMA_FRAME_IRQ0_ENA = 1 << 31, + DMA_FRAME_IRQ1_ENA = 1 << 30, + DMA_FRAME_IRQ_ENA = DMA_FRAME_IRQ0_ENA | DMA_FRAME_IRQ1_ENA, + DMA_FF_UNDERFLOW_ENA = 1 << 29, + GRA_FRAME_IRQ0_ENA = 1 << 27, + GRA_FRAME_IRQ1_ENA = 1 << 26, + GRA_FRAME_IRQ_ENA = GRA_FRAME_IRQ0_ENA | GRA_FRAME_IRQ1_ENA, + GRA_FF_UNDERFLOW_ENA = 1 << 25, + VSYNC_IRQ_ENA = 1 << 23, + DUMB_FRAMEDONE_ENA = 1 << 22, + TWC_FRAMEDONE_ENA = 1 << 21, + HWC_FRAMEDONE_ENA = 1 << 20, + SLV_IRQ_ENA = 1 << 19, + SPI_IRQ_ENA = 1 << 18, + PWRDN_IRQ_ENA = 1 << 17, + ERR_IRQ_ENA = 1 << 16, + CLEAN_SPU_IRQ_ISR = 0xffff, +}; + +/* For LCD_SPU_IRQ_ISR */ +enum { + DMA_FRAME_IRQ0 = 1 << 31, + DMA_FRAME_IRQ1 = 1 << 30, + DMA_FRAME_IRQ = DMA_FRAME_IRQ0 | DMA_FRAME_IRQ1, + DMA_FF_UNDERFLOW = 1 << 29, + GRA_FRAME_IRQ0 = 1 << 27, + GRA_FRAME_IRQ1 = 1 << 26, + GRA_FRAME_IRQ = GRA_FRAME_IRQ0 | GRA_FRAME_IRQ1, + GRA_FF_UNDERFLOW = 1 << 25, + VSYNC_IRQ = 1 << 23, + DUMB_FRAMEDONE = 1 << 22, + TWC_FRAMEDONE = 1 << 21, + HWC_FRAMEDONE = 1 << 20, + SLV_IRQ = 1 << 19, + SPI_IRQ = 1 << 18, + PWRDN_IRQ = 1 << 17, + ERR_IRQ = 1 << 16, + DMA_FRAME_IRQ0_LEVEL = 1 << 15, + DMA_FRAME_IRQ1_LEVEL = 1 << 14, + DMA_FRAME_CNT_ISR = 3 << 12, + GRA_FRAME_IRQ0_LEVEL = 1 << 11, + GRA_FRAME_IRQ1_LEVEL = 1 << 10, + GRA_FRAME_CNT_ISR = 3 << 8, + VSYNC_IRQ_LEVEL = 1 << 7, + DUMB_FRAMEDONE_LEVEL = 1 << 6, + TWC_FRAMEDONE_LEVEL = 1 << 5, + HWC_FRAMEDONE_LEVEL = 1 << 4, + SLV_FF_EMPTY = 1 << 3, + DMA_FF_ALLEMPTY = 1 << 2, + GRA_FF_ALLEMPTY = 1 << 1, + PWRDN_IRQ_LEVEL = 1 << 0, +}; + +#endif diff --git a/kernel/drivers/gpu/drm/armada/armada_ioctlP.h b/kernel/drivers/gpu/drm/armada/armada_ioctlP.h new file mode 100644 index 000000000..bd8c45620 --- /dev/null +++ b/kernel/drivers/gpu/drm/armada/armada_ioctlP.h @@ -0,0 +1,18 @@ +/* + * Copyright (C) 2012 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ARMADA_IOCTLP_H +#define ARMADA_IOCTLP_H + +#define ARMADA_IOCTL_PROTO(name)\ +extern int armada_##name##_ioctl(struct drm_device *, void *, struct drm_file *) + +ARMADA_IOCTL_PROTO(gem_create); +ARMADA_IOCTL_PROTO(gem_mmap); +ARMADA_IOCTL_PROTO(gem_pwrite); + +#endif diff --git a/kernel/drivers/gpu/drm/armada/armada_output.c b/kernel/drivers/gpu/drm/armada/armada_output.c new file mode 100644 index 000000000..abbc309fe --- /dev/null +++ b/kernel/drivers/gpu/drm/armada/armada_output.c @@ -0,0 +1,158 @@ +/* + * Copyright (C) 2012 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include "armada_output.h" +#include "armada_drm.h" + +struct armada_connector { + struct drm_connector conn; + const struct armada_output_type *type; +}; + +#define drm_to_armada_conn(c) container_of(c, struct armada_connector, conn) + +struct drm_encoder *armada_drm_connector_encoder(struct drm_connector *conn) +{ + struct drm_encoder *enc = conn->encoder; + + return enc ? enc : drm_encoder_find(conn->dev, conn->encoder_ids[0]); +} + +static enum drm_connector_status armada_drm_connector_detect( + struct drm_connector *conn, bool force) +{ + struct armada_connector *dconn = drm_to_armada_conn(conn); + enum drm_connector_status status = connector_status_disconnected; + + if (dconn->type->detect) { + status = dconn->type->detect(conn, force); + } else { + struct drm_encoder *enc = armada_drm_connector_encoder(conn); + + if (enc) + status = encoder_helper_funcs(enc)->detect(enc, conn); + } + + return status; +} + +static void armada_drm_connector_destroy(struct drm_connector *conn) +{ + struct armada_connector *dconn = drm_to_armada_conn(conn); + + drm_connector_unregister(conn); + drm_connector_cleanup(conn); + kfree(dconn); +} + +static int armada_drm_connector_set_property(struct drm_connector *conn, + struct drm_property *property, uint64_t value) +{ + struct armada_connector *dconn = drm_to_armada_conn(conn); + + if (!dconn->type->set_property) + return -EINVAL; + + return dconn->type->set_property(conn, property, value); +} + +static const struct drm_connector_funcs armada_drm_conn_funcs = { + .dpms = drm_helper_connector_dpms, + .fill_modes = drm_helper_probe_single_connector_modes, + .detect = armada_drm_connector_detect, + .destroy = armada_drm_connector_destroy, + .set_property = armada_drm_connector_set_property, +}; + +void armada_drm_encoder_prepare(struct drm_encoder *encoder) +{ + encoder_helper_funcs(encoder)->dpms(encoder, DRM_MODE_DPMS_OFF); +} + +void armada_drm_encoder_commit(struct drm_encoder *encoder) +{ + encoder_helper_funcs(encoder)->dpms(encoder, DRM_MODE_DPMS_ON); +} + +bool armada_drm_encoder_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted) +{ + return true; +} + +/* Shouldn't this be a generic helper function? */ +int armada_drm_slave_encoder_mode_valid(struct drm_connector *conn, + struct drm_display_mode *mode) +{ + struct drm_encoder *encoder = armada_drm_connector_encoder(conn); + int valid = MODE_BAD; + + if (encoder) { + struct drm_encoder_slave *slave = to_encoder_slave(encoder); + + valid = slave->slave_funcs->mode_valid(encoder, mode); + } + return valid; +} + +int armada_drm_slave_encoder_set_property(struct drm_connector *conn, + struct drm_property *property, uint64_t value) +{ + struct drm_encoder *encoder = armada_drm_connector_encoder(conn); + int rc = -EINVAL; + + if (encoder) { + struct drm_encoder_slave *slave = to_encoder_slave(encoder); + + rc = slave->slave_funcs->set_property(encoder, conn, property, + value); + } + return rc; +} + +int armada_output_create(struct drm_device *dev, + const struct armada_output_type *type, const void *data) +{ + struct armada_connector *dconn; + int ret; + + dconn = kzalloc(sizeof(*dconn), GFP_KERNEL); + if (!dconn) + return -ENOMEM; + + dconn->type = type; + + ret = drm_connector_init(dev, &dconn->conn, &armada_drm_conn_funcs, + type->connector_type); + if (ret) { + DRM_ERROR("unable to init connector\n"); + goto err_destroy_dconn; + } + + ret = type->create(&dconn->conn, data); + if (ret) + goto err_conn; + + ret = drm_connector_register(&dconn->conn); + if (ret) + goto err_sysfs; + + return 0; + + err_sysfs: + if (dconn->conn.encoder) + dconn->conn.encoder->funcs->destroy(dconn->conn.encoder); + err_conn: + drm_connector_cleanup(&dconn->conn); + err_destroy_dconn: + kfree(dconn); + return ret; +} diff --git a/kernel/drivers/gpu/drm/armada/armada_output.h b/kernel/drivers/gpu/drm/armada/armada_output.h new file mode 100644 index 000000000..3c4023e14 --- /dev/null +++ b/kernel/drivers/gpu/drm/armada/armada_output.h @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2012 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ARMADA_CONNETOR_H +#define ARMADA_CONNETOR_H + +#define encoder_helper_funcs(encoder) \ + ((const struct drm_encoder_helper_funcs *)encoder->helper_private) + +struct armada_output_type { + int connector_type; + enum drm_connector_status (*detect)(struct drm_connector *, bool); + int (*create)(struct drm_connector *, const void *); + int (*set_property)(struct drm_connector *, struct drm_property *, + uint64_t); +}; + +struct drm_encoder *armada_drm_connector_encoder(struct drm_connector *conn); + +void armada_drm_encoder_prepare(struct drm_encoder *encoder); +void armada_drm_encoder_commit(struct drm_encoder *encoder); + +bool armada_drm_encoder_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, struct drm_display_mode *adj); + +int armada_drm_slave_encoder_mode_valid(struct drm_connector *conn, + struct drm_display_mode *mode); + +int armada_drm_slave_encoder_set_property(struct drm_connector *conn, + struct drm_property *property, uint64_t value); + +int armada_output_create(struct drm_device *dev, + const struct armada_output_type *type, const void *data); + +#endif diff --git a/kernel/drivers/gpu/drm/armada/armada_overlay.c b/kernel/drivers/gpu/drm/armada/armada_overlay.c new file mode 100644 index 000000000..c5b06fdb4 --- /dev/null +++ b/kernel/drivers/gpu/drm/armada/armada_overlay.c @@ -0,0 +1,477 @@ +/* + * Copyright (C) 2012 Russell King + * Rewritten from the dovefb driver, and Armada510 manuals. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include "armada_crtc.h" +#include "armada_drm.h" +#include "armada_fb.h" +#include "armada_gem.h" +#include "armada_hw.h" +#include +#include "armada_ioctlP.h" + +struct armada_plane_properties { + uint32_t colorkey_yr; + uint32_t colorkey_ug; + uint32_t colorkey_vb; +#define K2R(val) (((val) >> 0) & 0xff) +#define K2G(val) (((val) >> 8) & 0xff) +#define K2B(val) (((val) >> 16) & 0xff) + int16_t brightness; + uint16_t contrast; + uint16_t saturation; + uint32_t colorkey_mode; +}; + +struct armada_plane { + struct drm_plane base; + spinlock_t lock; + struct drm_framebuffer *old_fb; + uint32_t src_hw; + uint32_t dst_hw; + uint32_t dst_yx; + uint32_t ctrl0; + struct { + struct armada_vbl_event update; + struct armada_regs regs[13]; + wait_queue_head_t wait; + } vbl; + struct armada_plane_properties prop; +}; +#define drm_to_armada_plane(p) container_of(p, struct armada_plane, base) + + +static void +armada_ovl_update_attr(struct armada_plane_properties *prop, + struct armada_crtc *dcrtc) +{ + writel_relaxed(prop->colorkey_yr, dcrtc->base + LCD_SPU_COLORKEY_Y); + writel_relaxed(prop->colorkey_ug, dcrtc->base + LCD_SPU_COLORKEY_U); + writel_relaxed(prop->colorkey_vb, dcrtc->base + LCD_SPU_COLORKEY_V); + + writel_relaxed(prop->brightness << 16 | prop->contrast, + dcrtc->base + LCD_SPU_CONTRAST); + /* Docs say 15:0, but it seems to actually be 31:16 on Armada 510 */ + writel_relaxed(prop->saturation << 16, + dcrtc->base + LCD_SPU_SATURATION); + writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE); + + spin_lock_irq(&dcrtc->irq_lock); + armada_updatel(prop->colorkey_mode | CFG_ALPHAM_GRA, + CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK, + dcrtc->base + LCD_SPU_DMA_CTRL1); + + armada_updatel(ADV_GRACOLORKEY, 0, dcrtc->base + LCD_SPU_ADV_REG); + spin_unlock_irq(&dcrtc->irq_lock); +} + +/* === Plane support === */ +static void armada_plane_vbl(struct armada_crtc *dcrtc, void *data) +{ + struct armada_plane *dplane = data; + struct drm_framebuffer *fb; + + armada_drm_crtc_update_regs(dcrtc, dplane->vbl.regs); + + spin_lock(&dplane->lock); + fb = dplane->old_fb; + dplane->old_fb = NULL; + spin_unlock(&dplane->lock); + + if (fb) + armada_drm_queue_unref_work(dcrtc->crtc.dev, fb); +} + +static unsigned armada_limit(int start, unsigned size, unsigned max) +{ + int end = start + size; + if (end < 0) + return 0; + if (start < 0) + start = 0; + return (unsigned)end > max ? max - start : end - start; +} + +static int +armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int crtc_x, int crtc_y, unsigned crtc_w, unsigned crtc_h, + uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h) +{ + struct armada_plane *dplane = drm_to_armada_plane(plane); + struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); + uint32_t val, ctrl0; + unsigned idx = 0; + int ret; + + crtc_w = armada_limit(crtc_x, crtc_w, dcrtc->crtc.mode.hdisplay); + crtc_h = armada_limit(crtc_y, crtc_h, dcrtc->crtc.mode.vdisplay); + ctrl0 = CFG_DMA_FMT(drm_fb_to_armada_fb(fb)->fmt) | + CFG_DMA_MOD(drm_fb_to_armada_fb(fb)->mod) | + CFG_CBSH_ENA | CFG_DMA_HSMOOTH | CFG_DMA_ENA; + + /* Does the position/size result in nothing to display? */ + if (crtc_w == 0 || crtc_h == 0) { + ctrl0 &= ~CFG_DMA_ENA; + } + + /* + * FIXME: if the starting point is off screen, we need to + * adjust src_x, src_y, src_w, src_h appropriately, and + * according to the scale. + */ + + if (!dcrtc->plane) { + dcrtc->plane = plane; + armada_ovl_update_attr(&dplane->prop, dcrtc); + } + + /* FIXME: overlay on an interlaced display */ + /* Just updating the position/size? */ + if (plane->fb == fb && dplane->ctrl0 == ctrl0) { + val = (src_h & 0xffff0000) | src_w >> 16; + dplane->src_hw = val; + writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_HPXL_VLN); + val = crtc_h << 16 | crtc_w; + dplane->dst_hw = val; + writel_relaxed(val, dcrtc->base + LCD_SPU_DZM_HPXL_VLN); + val = crtc_y << 16 | crtc_x; + dplane->dst_yx = val; + writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_OVSA_HPXL_VLN); + return 0; + } else if (~dplane->ctrl0 & ctrl0 & CFG_DMA_ENA) { + /* Power up the Y/U/V FIFOs on ENA 0->1 transitions */ + armada_updatel(0, CFG_PDWN16x66 | CFG_PDWN32x66, + dcrtc->base + LCD_SPU_SRAM_PARA1); + } + + ret = wait_event_timeout(dplane->vbl.wait, + list_empty(&dplane->vbl.update.node), + HZ/25); + if (ret < 0) + return ret; + + if (plane->fb != fb) { + struct armada_gem_object *obj = drm_fb_obj(fb); + uint32_t sy, su, sv; + + /* + * Take a reference on the new framebuffer - we want to + * hold on to it while the hardware is displaying it. + */ + drm_framebuffer_reference(fb); + + if (plane->fb) { + struct drm_framebuffer *older_fb; + + spin_lock_irq(&dplane->lock); + older_fb = dplane->old_fb; + dplane->old_fb = plane->fb; + spin_unlock_irq(&dplane->lock); + if (older_fb) + armada_drm_queue_unref_work(dcrtc->crtc.dev, + older_fb); + } + + src_y >>= 16; + src_x >>= 16; + sy = obj->dev_addr + fb->offsets[0] + src_y * fb->pitches[0] + + src_x * fb->bits_per_pixel / 8; + su = obj->dev_addr + fb->offsets[1] + src_y * fb->pitches[1] + + src_x; + sv = obj->dev_addr + fb->offsets[2] + src_y * fb->pitches[2] + + src_x; + + armada_reg_queue_set(dplane->vbl.regs, idx, sy, + LCD_SPU_DMA_START_ADDR_Y0); + armada_reg_queue_set(dplane->vbl.regs, idx, su, + LCD_SPU_DMA_START_ADDR_U0); + armada_reg_queue_set(dplane->vbl.regs, idx, sv, + LCD_SPU_DMA_START_ADDR_V0); + armada_reg_queue_set(dplane->vbl.regs, idx, sy, + LCD_SPU_DMA_START_ADDR_Y1); + armada_reg_queue_set(dplane->vbl.regs, idx, su, + LCD_SPU_DMA_START_ADDR_U1); + armada_reg_queue_set(dplane->vbl.regs, idx, sv, + LCD_SPU_DMA_START_ADDR_V1); + + val = fb->pitches[0] << 16 | fb->pitches[0]; + armada_reg_queue_set(dplane->vbl.regs, idx, val, + LCD_SPU_DMA_PITCH_YC); + val = fb->pitches[1] << 16 | fb->pitches[2]; + armada_reg_queue_set(dplane->vbl.regs, idx, val, + LCD_SPU_DMA_PITCH_UV); + } + + val = (src_h & 0xffff0000) | src_w >> 16; + if (dplane->src_hw != val) { + dplane->src_hw = val; + armada_reg_queue_set(dplane->vbl.regs, idx, val, + LCD_SPU_DMA_HPXL_VLN); + } + val = crtc_h << 16 | crtc_w; + if (dplane->dst_hw != val) { + dplane->dst_hw = val; + armada_reg_queue_set(dplane->vbl.regs, idx, val, + LCD_SPU_DZM_HPXL_VLN); + } + val = crtc_y << 16 | crtc_x; + if (dplane->dst_yx != val) { + dplane->dst_yx = val; + armada_reg_queue_set(dplane->vbl.regs, idx, val, + LCD_SPU_DMA_OVSA_HPXL_VLN); + } + if (dplane->ctrl0 != ctrl0) { + dplane->ctrl0 = ctrl0; + armada_reg_queue_mod(dplane->vbl.regs, idx, ctrl0, + CFG_CBSH_ENA | CFG_DMAFORMAT | CFG_DMA_FTOGGLE | + CFG_DMA_HSMOOTH | CFG_DMA_TSTMODE | + CFG_DMA_MOD(CFG_SWAPRB | CFG_SWAPUV | CFG_SWAPYU | + CFG_YUV2RGB) | CFG_DMA_ENA, + LCD_SPU_DMA_CTRL0); + } + if (idx) { + armada_reg_queue_end(dplane->vbl.regs, idx); + armada_drm_vbl_event_add(dcrtc, &dplane->vbl.update); + } + return 0; +} + +static int armada_plane_disable(struct drm_plane *plane) +{ + struct armada_plane *dplane = drm_to_armada_plane(plane); + struct drm_framebuffer *fb; + struct armada_crtc *dcrtc; + + if (!dplane->base.crtc) + return 0; + + dcrtc = drm_to_armada_crtc(dplane->base.crtc); + dcrtc->plane = NULL; + + spin_lock_irq(&dcrtc->irq_lock); + armada_drm_vbl_event_remove(dcrtc, &dplane->vbl.update); + armada_updatel(0, CFG_DMA_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0); + dplane->ctrl0 = 0; + spin_unlock_irq(&dcrtc->irq_lock); + + /* Power down the Y/U/V FIFOs */ + armada_updatel(CFG_PDWN16x66 | CFG_PDWN32x66, 0, + dcrtc->base + LCD_SPU_SRAM_PARA1); + + if (plane->fb) + drm_framebuffer_unreference(plane->fb); + + spin_lock_irq(&dplane->lock); + fb = dplane->old_fb; + dplane->old_fb = NULL; + spin_unlock_irq(&dplane->lock); + if (fb) + drm_framebuffer_unreference(fb); + + return 0; +} + +static void armada_plane_destroy(struct drm_plane *plane) +{ + kfree(plane); +} + +static int armada_plane_set_property(struct drm_plane *plane, + struct drm_property *property, uint64_t val) +{ + struct armada_private *priv = plane->dev->dev_private; + struct armada_plane *dplane = drm_to_armada_plane(plane); + bool update_attr = false; + + if (property == priv->colorkey_prop) { +#define CCC(v) ((v) << 24 | (v) << 16 | (v) << 8) + dplane->prop.colorkey_yr = CCC(K2R(val)); + dplane->prop.colorkey_ug = CCC(K2G(val)); + dplane->prop.colorkey_vb = CCC(K2B(val)); +#undef CCC + update_attr = true; + } else if (property == priv->colorkey_min_prop) { + dplane->prop.colorkey_yr &= ~0x00ff0000; + dplane->prop.colorkey_yr |= K2R(val) << 16; + dplane->prop.colorkey_ug &= ~0x00ff0000; + dplane->prop.colorkey_ug |= K2G(val) << 16; + dplane->prop.colorkey_vb &= ~0x00ff0000; + dplane->prop.colorkey_vb |= K2B(val) << 16; + update_attr = true; + } else if (property == priv->colorkey_max_prop) { + dplane->prop.colorkey_yr &= ~0xff000000; + dplane->prop.colorkey_yr |= K2R(val) << 24; + dplane->prop.colorkey_ug &= ~0xff000000; + dplane->prop.colorkey_ug |= K2G(val) << 24; + dplane->prop.colorkey_vb &= ~0xff000000; + dplane->prop.colorkey_vb |= K2B(val) << 24; + update_attr = true; + } else if (property == priv->colorkey_val_prop) { + dplane->prop.colorkey_yr &= ~0x0000ff00; + dplane->prop.colorkey_yr |= K2R(val) << 8; + dplane->prop.colorkey_ug &= ~0x0000ff00; + dplane->prop.colorkey_ug |= K2G(val) << 8; + dplane->prop.colorkey_vb &= ~0x0000ff00; + dplane->prop.colorkey_vb |= K2B(val) << 8; + update_attr = true; + } else if (property == priv->colorkey_alpha_prop) { + dplane->prop.colorkey_yr &= ~0x000000ff; + dplane->prop.colorkey_yr |= K2R(val); + dplane->prop.colorkey_ug &= ~0x000000ff; + dplane->prop.colorkey_ug |= K2G(val); + dplane->prop.colorkey_vb &= ~0x000000ff; + dplane->prop.colorkey_vb |= K2B(val); + update_attr = true; + } else if (property == priv->colorkey_mode_prop) { + dplane->prop.colorkey_mode &= ~CFG_CKMODE_MASK; + dplane->prop.colorkey_mode |= CFG_CKMODE(val); + update_attr = true; + } else if (property == priv->brightness_prop) { + dplane->prop.brightness = val - 256; + update_attr = true; + } else if (property == priv->contrast_prop) { + dplane->prop.contrast = val; + update_attr = true; + } else if (property == priv->saturation_prop) { + dplane->prop.saturation = val; + update_attr = true; + } + + if (update_attr && dplane->base.crtc) + armada_ovl_update_attr(&dplane->prop, + drm_to_armada_crtc(dplane->base.crtc)); + + return 0; +} + +static const struct drm_plane_funcs armada_plane_funcs = { + .update_plane = armada_plane_update, + .disable_plane = armada_plane_disable, + .destroy = armada_plane_destroy, + .set_property = armada_plane_set_property, +}; + +static const uint32_t armada_formats[] = { + DRM_FORMAT_UYVY, + DRM_FORMAT_YUYV, + DRM_FORMAT_YUV420, + DRM_FORMAT_YVU420, + DRM_FORMAT_YUV422, + DRM_FORMAT_YVU422, + DRM_FORMAT_VYUY, + DRM_FORMAT_YVYU, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_ABGR1555, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, +}; + +static struct drm_prop_enum_list armada_drm_colorkey_enum_list[] = { + { CKMODE_DISABLE, "disabled" }, + { CKMODE_Y, "Y component" }, + { CKMODE_U, "U component" }, + { CKMODE_V, "V component" }, + { CKMODE_RGB, "RGB" }, + { CKMODE_R, "R component" }, + { CKMODE_G, "G component" }, + { CKMODE_B, "B component" }, +}; + +static int armada_overlay_create_properties(struct drm_device *dev) +{ + struct armada_private *priv = dev->dev_private; + + if (priv->colorkey_prop) + return 0; + + priv->colorkey_prop = drm_property_create_range(dev, 0, + "colorkey", 0, 0xffffff); + priv->colorkey_min_prop = drm_property_create_range(dev, 0, + "colorkey_min", 0, 0xffffff); + priv->colorkey_max_prop = drm_property_create_range(dev, 0, + "colorkey_max", 0, 0xffffff); + priv->colorkey_val_prop = drm_property_create_range(dev, 0, + "colorkey_val", 0, 0xffffff); + priv->colorkey_alpha_prop = drm_property_create_range(dev, 0, + "colorkey_alpha", 0, 0xffffff); + priv->colorkey_mode_prop = drm_property_create_enum(dev, 0, + "colorkey_mode", + armada_drm_colorkey_enum_list, + ARRAY_SIZE(armada_drm_colorkey_enum_list)); + priv->brightness_prop = drm_property_create_range(dev, 0, + "brightness", 0, 256 + 255); + priv->contrast_prop = drm_property_create_range(dev, 0, + "contrast", 0, 0x7fff); + priv->saturation_prop = drm_property_create_range(dev, 0, + "saturation", 0, 0x7fff); + + if (!priv->colorkey_prop) + return -ENOMEM; + + return 0; +} + +int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs) +{ + struct armada_private *priv = dev->dev_private; + struct drm_mode_object *mobj; + struct armada_plane *dplane; + int ret; + + ret = armada_overlay_create_properties(dev); + if (ret) + return ret; + + dplane = kzalloc(sizeof(*dplane), GFP_KERNEL); + if (!dplane) + return -ENOMEM; + + spin_lock_init(&dplane->lock); + init_waitqueue_head(&dplane->vbl.wait); + armada_drm_vbl_event_init(&dplane->vbl.update, armada_plane_vbl, + dplane); + + drm_plane_init(dev, &dplane->base, crtcs, &armada_plane_funcs, + armada_formats, ARRAY_SIZE(armada_formats), false); + + dplane->prop.colorkey_yr = 0xfefefe00; + dplane->prop.colorkey_ug = 0x01010100; + dplane->prop.colorkey_vb = 0x01010100; + dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB); + dplane->prop.brightness = 0; + dplane->prop.contrast = 0x4000; + dplane->prop.saturation = 0x4000; + + mobj = &dplane->base.base; + drm_object_attach_property(mobj, priv->colorkey_prop, + 0x0101fe); + drm_object_attach_property(mobj, priv->colorkey_min_prop, + 0x0101fe); + drm_object_attach_property(mobj, priv->colorkey_max_prop, + 0x0101fe); + drm_object_attach_property(mobj, priv->colorkey_val_prop, + 0x0101fe); + drm_object_attach_property(mobj, priv->colorkey_alpha_prop, + 0x000000); + drm_object_attach_property(mobj, priv->colorkey_mode_prop, + CKMODE_RGB); + drm_object_attach_property(mobj, priv->brightness_prop, 256); + drm_object_attach_property(mobj, priv->contrast_prop, + dplane->prop.contrast); + drm_object_attach_property(mobj, priv->saturation_prop, + dplane->prop.saturation); + + return 0; +} diff --git a/kernel/drivers/gpu/drm/armada/armada_slave.c b/kernel/drivers/gpu/drm/armada/armada_slave.c new file mode 100644 index 000000000..00d0facb4 --- /dev/null +++ b/kernel/drivers/gpu/drm/armada/armada_slave.c @@ -0,0 +1,139 @@ +/* + * Copyright (C) 2012 Russell King + * Rewritten from the dovefb driver, and Armada510 manuals. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include "armada_drm.h" +#include "armada_output.h" +#include "armada_slave.h" + +static int armada_drm_slave_get_modes(struct drm_connector *conn) +{ + struct drm_encoder *enc = armada_drm_connector_encoder(conn); + int count = 0; + + if (enc) { + struct drm_encoder_slave *slave = to_encoder_slave(enc); + + count = slave->slave_funcs->get_modes(enc, conn); + } + + return count; +} + +static void armada_drm_slave_destroy(struct drm_encoder *enc) +{ + struct drm_encoder_slave *slave = to_encoder_slave(enc); + struct i2c_client *client = drm_i2c_encoder_get_client(enc); + + if (slave->slave_funcs) + slave->slave_funcs->destroy(enc); + if (client) + i2c_put_adapter(client->adapter); + + drm_encoder_cleanup(&slave->base); + kfree(slave); +} + +static const struct drm_encoder_funcs armada_drm_slave_encoder_funcs = { + .destroy = armada_drm_slave_destroy, +}; + +static const struct drm_connector_helper_funcs armada_drm_slave_helper_funcs = { + .get_modes = armada_drm_slave_get_modes, + .mode_valid = armada_drm_slave_encoder_mode_valid, + .best_encoder = armada_drm_connector_encoder, +}; + +static const struct drm_encoder_helper_funcs drm_slave_encoder_helpers = { + .dpms = drm_i2c_encoder_dpms, + .save = drm_i2c_encoder_save, + .restore = drm_i2c_encoder_restore, + .mode_fixup = drm_i2c_encoder_mode_fixup, + .prepare = drm_i2c_encoder_prepare, + .commit = drm_i2c_encoder_commit, + .mode_set = drm_i2c_encoder_mode_set, + .detect = drm_i2c_encoder_detect, +}; + +static int +armada_drm_conn_slave_create(struct drm_connector *conn, const void *data) +{ + const struct armada_drm_slave_config *config = data; + struct drm_encoder_slave *slave; + struct i2c_adapter *adap; + int ret; + + conn->interlace_allowed = config->interlace_allowed; + conn->doublescan_allowed = config->doublescan_allowed; + conn->polled = config->polled; + + drm_connector_helper_add(conn, &armada_drm_slave_helper_funcs); + + slave = kzalloc(sizeof(*slave), GFP_KERNEL); + if (!slave) + return -ENOMEM; + + slave->base.possible_crtcs = config->crtcs; + + adap = i2c_get_adapter(config->i2c_adapter_id); + if (!adap) { + kfree(slave); + return -EPROBE_DEFER; + } + + ret = drm_encoder_init(conn->dev, &slave->base, + &armada_drm_slave_encoder_funcs, + DRM_MODE_ENCODER_TMDS); + if (ret) { + DRM_ERROR("unable to init encoder\n"); + i2c_put_adapter(adap); + kfree(slave); + return ret; + } + + ret = drm_i2c_encoder_init(conn->dev, slave, adap, &config->info); + i2c_put_adapter(adap); + if (ret) { + DRM_ERROR("unable to init encoder slave\n"); + armada_drm_slave_destroy(&slave->base); + return ret; + } + + drm_encoder_helper_add(&slave->base, &drm_slave_encoder_helpers); + + ret = slave->slave_funcs->create_resources(&slave->base, conn); + if (ret) { + armada_drm_slave_destroy(&slave->base); + return ret; + } + + ret = drm_mode_connector_attach_encoder(conn, &slave->base); + if (ret) { + armada_drm_slave_destroy(&slave->base); + return ret; + } + + conn->encoder = &slave->base; + + return ret; +} + +static const struct armada_output_type armada_drm_conn_slave = { + .connector_type = DRM_MODE_CONNECTOR_HDMIA, + .create = armada_drm_conn_slave_create, + .set_property = armada_drm_slave_encoder_set_property, +}; + +int armada_drm_connector_slave_create(struct drm_device *dev, + const struct armada_drm_slave_config *config) +{ + return armada_output_create(dev, &armada_drm_conn_slave, config); +} diff --git a/kernel/drivers/gpu/drm/armada/armada_slave.h b/kernel/drivers/gpu/drm/armada/armada_slave.h new file mode 100644 index 000000000..bf2374c96 --- /dev/null +++ b/kernel/drivers/gpu/drm/armada/armada_slave.h @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2012 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ARMADA_SLAVE_H +#define ARMADA_SLAVE_H + +#include +#include + +struct armada_drm_slave_config { + int i2c_adapter_id; + uint32_t crtcs; + uint8_t polled; + bool interlace_allowed; + bool doublescan_allowed; + struct i2c_board_info info; +}; + +int armada_drm_connector_slave_create(struct drm_device *dev, + const struct armada_drm_slave_config *); + +#endif diff --git a/kernel/drivers/gpu/drm/ast/Kconfig b/kernel/drivers/gpu/drm/ast/Kconfig new file mode 100644 index 000000000..8a784c460 --- /dev/null +++ b/kernel/drivers/gpu/drm/ast/Kconfig @@ -0,0 +1,17 @@ +config DRM_AST + tristate "AST server chips" + depends on DRM && PCI + select DRM_TTM + select FB_SYS_COPYAREA + select FB_SYS_FILLRECT + select FB_SYS_IMAGEBLIT + select DRM_KMS_HELPER + select DRM_KMS_FB_HELPER + select DRM_TTM + help + Say yes for experimental AST GPU driver. Do not enable + this driver without having a working -modesetting, + and a version of AST that knows to fail if KMS + is bound to the driver. These GPUs are commonly found + in server chipsets. + diff --git a/kernel/drivers/gpu/drm/ast/Makefile b/kernel/drivers/gpu/drm/ast/Makefile new file mode 100644 index 000000000..171aa0622 --- /dev/null +++ b/kernel/drivers/gpu/drm/ast/Makefile @@ -0,0 +1,9 @@ +# +# Makefile for the drm device driver. This driver provides support for the +# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. + +ccflags-y := -Iinclude/drm + +ast-y := ast_drv.o ast_main.o ast_mode.o ast_fb.o ast_ttm.o ast_post.o ast_dp501.o + +obj-$(CONFIG_DRM_AST) := ast.o diff --git a/kernel/drivers/gpu/drm/ast/ast_dp501.c b/kernel/drivers/gpu/drm/ast/ast_dp501.c new file mode 100644 index 000000000..76f07f38b --- /dev/null +++ b/kernel/drivers/gpu/drm/ast/ast_dp501.c @@ -0,0 +1,434 @@ + +#include +#include +#include "ast_drv.h" +MODULE_FIRMWARE("ast_dp501_fw.bin"); + +int ast_load_dp501_microcode(struct drm_device *dev) +{ + struct ast_private *ast = dev->dev_private; + static char *fw_name = "ast_dp501_fw.bin"; + int err; + err = request_firmware(&ast->dp501_fw, fw_name, dev->dev); + if (err) + return err; + + return 0; +} + +static void send_ack(struct ast_private *ast) +{ + u8 sendack; + sendack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0xff); + sendack |= 0x80; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0x00, sendack); +} + +static void send_nack(struct ast_private *ast) +{ + u8 sendack; + sendack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0xff); + sendack &= ~0x80; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0x00, sendack); +} + +static bool wait_ack(struct ast_private *ast) +{ + u8 waitack; + u32 retry = 0; + do { + waitack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd2, 0xff); + waitack &= 0x80; + udelay(100); + } while ((!waitack) && (retry++ < 1000)); + + if (retry < 1000) + return true; + else + return false; +} + +static bool wait_nack(struct ast_private *ast) +{ + u8 waitack; + u32 retry = 0; + do { + waitack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd2, 0xff); + waitack &= 0x80; + udelay(100); + } while ((waitack) && (retry++ < 1000)); + + if (retry < 1000) + return true; + else + return false; +} + +static void set_cmd_trigger(struct ast_private *ast) +{ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, ~0x40, 0x40); +} + +static void clear_cmd_trigger(struct ast_private *ast) +{ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, ~0x40, 0x00); +} + +#if 0 +static bool wait_fw_ready(struct ast_private *ast) +{ + u8 waitready; + u32 retry = 0; + do { + waitready = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd2, 0xff); + waitready &= 0x40; + udelay(100); + } while ((!waitready) && (retry++ < 1000)); + + if (retry < 1000) + return true; + else + return false; +} +#endif + +static bool ast_write_cmd(struct drm_device *dev, u8 data) +{ + struct ast_private *ast = dev->dev_private; + int retry = 0; + if (wait_nack(ast)) { + send_nack(ast); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, data); + send_ack(ast); + set_cmd_trigger(ast); + do { + if (wait_ack(ast)) { + clear_cmd_trigger(ast); + send_nack(ast); + return true; + } + } while (retry++ < 100); + } + clear_cmd_trigger(ast); + send_nack(ast); + return false; +} + +static bool ast_write_data(struct drm_device *dev, u8 data) +{ + struct ast_private *ast = dev->dev_private; + + if (wait_nack(ast)) { + send_nack(ast); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, data); + send_ack(ast); + if (wait_ack(ast)) { + send_nack(ast); + return true; + } + } + send_nack(ast); + return false; +} + +#if 0 +static bool ast_read_data(struct drm_device *dev, u8 *data) +{ + struct ast_private *ast = dev->dev_private; + u8 tmp; + + *data = 0; + + if (wait_ack(ast) == false) + return false; + tmp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd3, 0xff); + *data = tmp; + if (wait_nack(ast) == false) { + send_nack(ast); + return false; + } + send_nack(ast); + return true; +} + +static void clear_cmd(struct ast_private *ast) +{ + send_nack(ast); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, 0x00); +} +#endif + +void ast_set_dp501_video_output(struct drm_device *dev, u8 mode) +{ + ast_write_cmd(dev, 0x40); + ast_write_data(dev, mode); + + msleep(10); +} + +static u32 get_fw_base(struct ast_private *ast) +{ + return ast_mindwm(ast, 0x1e6e2104) & 0x7fffffff; +} + +bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size) +{ + struct ast_private *ast = dev->dev_private; + u32 i, data; + u32 boot_address; + + data = ast_mindwm(ast, 0x1e6e2100) & 0x01; + if (data) { + boot_address = get_fw_base(ast); + for (i = 0; i < size; i += 4) + *(u32 *)(addr + i) = ast_mindwm(ast, boot_address + i); + return true; + } + return false; +} + +bool ast_launch_m68k(struct drm_device *dev) +{ + struct ast_private *ast = dev->dev_private; + u32 i, data, len = 0; + u32 boot_address; + u8 *fw_addr = NULL; + u8 jreg; + + data = ast_mindwm(ast, 0x1e6e2100) & 0x01; + if (!data) { + + if (ast->dp501_fw_addr) { + fw_addr = ast->dp501_fw_addr; + len = 32*1024; + } else if (ast->dp501_fw) { + fw_addr = (u8 *)ast->dp501_fw->data; + len = ast->dp501_fw->size; + } + /* Get BootAddress */ + ast_moutdwm(ast, 0x1e6e2000, 0x1688a8a8); + data = ast_mindwm(ast, 0x1e6e0004); + switch (data & 0x03) { + case 0: + boot_address = 0x44000000; + break; + default: + case 1: + boot_address = 0x48000000; + break; + case 2: + boot_address = 0x50000000; + break; + case 3: + boot_address = 0x60000000; + break; + } + boot_address -= 0x200000; /* -2MB */ + + /* copy image to buffer */ + for (i = 0; i < len; i += 4) { + data = *(u32 *)(fw_addr + i); + ast_moutdwm(ast, boot_address + i, data); + } + + /* Init SCU */ + ast_moutdwm(ast, 0x1e6e2000, 0x1688a8a8); + + /* Launch FW */ + ast_moutdwm(ast, 0x1e6e2104, 0x80000000 + boot_address); + ast_moutdwm(ast, 0x1e6e2100, 1); + + /* Update Scratch */ + data = ast_mindwm(ast, 0x1e6e2040) & 0xfffff1ff; /* D[11:9] = 100b: UEFI handling */ + data |= 0x800; + ast_moutdwm(ast, 0x1e6e2040, data); + + jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x99, 0xfc); /* D[1:0]: Reserved Video Buffer */ + jreg |= 0x02; + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x99, jreg); + } + return true; +} + +u8 ast_get_dp501_max_clk(struct drm_device *dev) +{ + struct ast_private *ast = dev->dev_private; + u32 boot_address, offset, data; + u8 linkcap[4], linkrate, linklanes, maxclk = 0xff; + + boot_address = get_fw_base(ast); + + /* validate FW version */ + offset = 0xf000; + data = ast_mindwm(ast, boot_address + offset); + if ((data & 0xf0) != 0x10) /* version: 1x */ + return maxclk; + + /* Read Link Capability */ + offset = 0xf014; + *(u32 *)linkcap = ast_mindwm(ast, boot_address + offset); + if (linkcap[2] == 0) { + linkrate = linkcap[0]; + linklanes = linkcap[1]; + data = (linkrate == 0x0a) ? (90 * linklanes) : (54 * linklanes); + if (data > 0xff) + data = 0xff; + maxclk = (u8)data; + } + return maxclk; +} + +bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata) +{ + struct ast_private *ast = dev->dev_private; + u32 i, boot_address, offset, data; + + boot_address = get_fw_base(ast); + + /* validate FW version */ + offset = 0xf000; + data = ast_mindwm(ast, boot_address + offset); + if ((data & 0xf0) != 0x10) + return false; + + /* validate PnP Monitor */ + offset = 0xf010; + data = ast_mindwm(ast, boot_address + offset); + if (!(data & 0x01)) + return false; + + /* Read EDID */ + offset = 0xf020; + for (i = 0; i < 128; i += 4) { + data = ast_mindwm(ast, boot_address + offset + i); + *(u32 *)(ediddata + i) = data; + } + + return true; +} + +static bool ast_init_dvo(struct drm_device *dev) +{ + struct ast_private *ast = dev->dev_private; + u8 jreg; + u32 data; + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + ast_write32(ast, 0x12000, 0x1688a8a8); + + jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + if (!(jreg & 0x80)) { + /* Init SCU DVO Settings */ + data = ast_read32(ast, 0x12008); + /* delay phase */ + data &= 0xfffff8ff; + data |= 0x00000500; + ast_write32(ast, 0x12008, data); + + if (ast->chip == AST2300) { + data = ast_read32(ast, 0x12084); + /* multi-pins for DVO single-edge */ + data |= 0xfffe0000; + ast_write32(ast, 0x12084, data); + + data = ast_read32(ast, 0x12088); + /* multi-pins for DVO single-edge */ + data |= 0x000fffff; + ast_write32(ast, 0x12088, data); + + data = ast_read32(ast, 0x12090); + /* multi-pins for DVO single-edge */ + data &= 0xffffffcf; + data |= 0x00000020; + ast_write32(ast, 0x12090, data); + } else { /* AST2400 */ + data = ast_read32(ast, 0x12088); + /* multi-pins for DVO single-edge */ + data |= 0x30000000; + ast_write32(ast, 0x12088, data); + + data = ast_read32(ast, 0x1208c); + /* multi-pins for DVO single-edge */ + data |= 0x000000cf; + ast_write32(ast, 0x1208c, data); + + data = ast_read32(ast, 0x120a4); + /* multi-pins for DVO single-edge */ + data |= 0xffff0000; + ast_write32(ast, 0x120a4, data); + + data = ast_read32(ast, 0x120a8); + /* multi-pins for DVO single-edge */ + data |= 0x0000000f; + ast_write32(ast, 0x120a8, data); + + data = ast_read32(ast, 0x12094); + /* multi-pins for DVO single-edge */ + data |= 0x00000002; + ast_write32(ast, 0x12094, data); + } + } + + /* Force to DVO */ + data = ast_read32(ast, 0x1202c); + data &= 0xfffbffff; + ast_write32(ast, 0x1202c, data); + + /* Init VGA DVO Settings */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); + return true; +} + + +static void ast_init_analog(struct drm_device *dev) +{ + struct ast_private *ast = dev->dev_private; + u32 data; + + /* + * Set DAC source to VGA mode in SCU2C via the P2A + * bridge. First configure the P2U to target the SCU + * in case it isn't at this stage. + */ + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + + /* Then unlock the SCU with the magic password */ + ast_write32(ast, 0x12000, 0x1688a8a8); + ast_write32(ast, 0x12000, 0x1688a8a8); + ast_write32(ast, 0x12000, 0x1688a8a8); + + /* Finally, clear bits [17:16] of SCU2c */ + data = ast_read32(ast, 0x1202c); + data &= 0xfffcffff; + ast_write32(ast, 0, data); + + /* Disable DVO */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x00); +} + +void ast_init_3rdtx(struct drm_device *dev) +{ + struct ast_private *ast = dev->dev_private; + u8 jreg; + + if (ast->chip == AST2300 || ast->chip == AST2400) { + jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff); + switch (jreg & 0x0e) { + case 0x04: + ast_init_dvo(dev); + break; + case 0x08: + ast_launch_m68k(dev); + break; + case 0x0c: + ast_init_dvo(dev); + break; + default: + if (ast->tx_chip_type == AST_TX_SIL164) + ast_init_dvo(dev); + else + ast_init_analog(dev); + } + } +} diff --git a/kernel/drivers/gpu/drm/ast/ast_dram_tables.h b/kernel/drivers/gpu/drm/ast/ast_dram_tables.h new file mode 100644 index 000000000..cc04539c0 --- /dev/null +++ b/kernel/drivers/gpu/drm/ast/ast_dram_tables.h @@ -0,0 +1,144 @@ +#ifndef AST_DRAM_TABLES_H +#define AST_DRAM_TABLES_H + +/* DRAM timing tables */ +struct ast_dramstruct { + u16 index; + u32 data; +}; + +static const struct ast_dramstruct ast2000_dram_table_data[] = { + { 0x0108, 0x00000000 }, + { 0x0120, 0x00004a21 }, + { 0xFF00, 0x00000043 }, + { 0x0000, 0xFFFFFFFF }, + { 0x0004, 0x00000089 }, + { 0x0008, 0x22331353 }, + { 0x000C, 0x0d07000b }, + { 0x0010, 0x11113333 }, + { 0x0020, 0x00110350 }, + { 0x0028, 0x1e0828f0 }, + { 0x0024, 0x00000001 }, + { 0x001C, 0x00000000 }, + { 0x0014, 0x00000003 }, + { 0xFF00, 0x00000043 }, + { 0x0018, 0x00000131 }, + { 0x0014, 0x00000001 }, + { 0xFF00, 0x00000043 }, + { 0x0018, 0x00000031 }, + { 0x0014, 0x00000001 }, + { 0xFF00, 0x00000043 }, + { 0x0028, 0x1e0828f1 }, + { 0x0024, 0x00000003 }, + { 0x002C, 0x1f0f28fb }, + { 0x0030, 0xFFFFFE01 }, + { 0xFFFF, 0xFFFFFFFF } +}; + +static const struct ast_dramstruct ast1100_dram_table_data[] = { + { 0x2000, 0x1688a8a8 }, + { 0x2020, 0x000041f0 }, + { 0xFF00, 0x00000043 }, + { 0x0000, 0xfc600309 }, + { 0x006C, 0x00909090 }, + { 0x0064, 0x00050000 }, + { 0x0004, 0x00000585 }, + { 0x0008, 0x0011030f }, + { 0x0010, 0x22201724 }, + { 0x0018, 0x1e29011a }, + { 0x0020, 0x00c82222 }, + { 0x0014, 0x01001523 }, + { 0x001C, 0x1024010d }, + { 0x0024, 0x00cb2522 }, + { 0x0038, 0xffffff82 }, + { 0x003C, 0x00000000 }, + { 0x0040, 0x00000000 }, + { 0x0044, 0x00000000 }, + { 0x0048, 0x00000000 }, + { 0x004C, 0x00000000 }, + { 0x0050, 0x00000000 }, + { 0x0054, 0x00000000 }, + { 0x0058, 0x00000000 }, + { 0x005C, 0x00000000 }, + { 0x0060, 0x032aa02a }, + { 0x0064, 0x002d3000 }, + { 0x0068, 0x00000000 }, + { 0x0070, 0x00000000 }, + { 0x0074, 0x00000000 }, + { 0x0078, 0x00000000 }, + { 0x007C, 0x00000000 }, + { 0x0034, 0x00000001 }, + { 0xFF00, 0x00000043 }, + { 0x002C, 0x00000732 }, + { 0x0030, 0x00000040 }, + { 0x0028, 0x00000005 }, + { 0x0028, 0x00000007 }, + { 0x0028, 0x00000003 }, + { 0x0028, 0x00000001 }, + { 0x000C, 0x00005a08 }, + { 0x002C, 0x00000632 }, + { 0x0028, 0x00000001 }, + { 0x0030, 0x000003c0 }, + { 0x0028, 0x00000003 }, + { 0x0030, 0x00000040 }, + { 0x0028, 0x00000003 }, + { 0x000C, 0x00005a21 }, + { 0x0034, 0x00007c03 }, + { 0x0120, 0x00004c41 }, + { 0xffff, 0xffffffff }, +}; + +static const struct ast_dramstruct ast2100_dram_table_data[] = { + { 0x2000, 0x1688a8a8 }, + { 0x2020, 0x00004120 }, + { 0xFF00, 0x00000043 }, + { 0x0000, 0xfc600309 }, + { 0x006C, 0x00909090 }, + { 0x0064, 0x00070000 }, + { 0x0004, 0x00000489 }, + { 0x0008, 0x0011030f }, + { 0x0010, 0x32302926 }, + { 0x0018, 0x274c0122 }, + { 0x0020, 0x00ce2222 }, + { 0x0014, 0x01001523 }, + { 0x001C, 0x1024010d }, + { 0x0024, 0x00cb2522 }, + { 0x0038, 0xffffff82 }, + { 0x003C, 0x00000000 }, + { 0x0040, 0x00000000 }, + { 0x0044, 0x00000000 }, + { 0x0048, 0x00000000 }, + { 0x004C, 0x00000000 }, + { 0x0050, 0x00000000 }, + { 0x0054, 0x00000000 }, + { 0x0058, 0x00000000 }, + { 0x005C, 0x00000000 }, + { 0x0060, 0x0f2aa02a }, + { 0x0064, 0x003f3005 }, + { 0x0068, 0x02020202 }, + { 0x0070, 0x00000000 }, + { 0x0074, 0x00000000 }, + { 0x0078, 0x00000000 }, + { 0x007C, 0x00000000 }, + { 0x0034, 0x00000001 }, + { 0xFF00, 0x00000043 }, + { 0x002C, 0x00000942 }, + { 0x0030, 0x00000040 }, + { 0x0028, 0x00000005 }, + { 0x0028, 0x00000007 }, + { 0x0028, 0x00000003 }, + { 0x0028, 0x00000001 }, + { 0x000C, 0x00005a08 }, + { 0x002C, 0x00000842 }, + { 0x0028, 0x00000001 }, + { 0x0030, 0x000003c0 }, + { 0x0028, 0x00000003 }, + { 0x0030, 0x00000040 }, + { 0x0028, 0x00000003 }, + { 0x000C, 0x00005a21 }, + { 0x0034, 0x00007c03 }, + { 0x0120, 0x00005061 }, + { 0xffff, 0xffffffff }, +}; + +#endif diff --git a/kernel/drivers/gpu/drm/ast/ast_drv.c b/kernel/drivers/gpu/drm/ast/ast_drv.c new file mode 100644 index 000000000..9a32d9dfd --- /dev/null +++ b/kernel/drivers/gpu/drm/ast/ast_drv.c @@ -0,0 +1,241 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ +/* + * Authors: Dave Airlie + */ +#include +#include + +#include +#include + +#include "ast_drv.h" + +int ast_modeset = -1; + +MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); +module_param_named(modeset, ast_modeset, int, 0400); + +#define PCI_VENDOR_ASPEED 0x1a03 + +static struct drm_driver driver; + +#define AST_VGA_DEVICE(id, info) { \ + .class = PCI_BASE_CLASS_DISPLAY << 16, \ + .class_mask = 0xff0000, \ + .vendor = PCI_VENDOR_ASPEED, \ + .device = id, \ + .subvendor = PCI_ANY_ID, \ + .subdevice = PCI_ANY_ID, \ + .driver_data = (unsigned long) info } + +static const struct pci_device_id pciidlist[] = { + AST_VGA_DEVICE(PCI_CHIP_AST2000, NULL), + AST_VGA_DEVICE(PCI_CHIP_AST2100, NULL), + /* AST_VGA_DEVICE(PCI_CHIP_AST1180, NULL), - don't bind to 1180 for now */ + {0, 0, 0}, +}; + +MODULE_DEVICE_TABLE(pci, pciidlist); + +static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_pci_dev(pdev, ent, &driver); +} + +static void +ast_pci_remove(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + + drm_put_dev(dev); +} + + + +static int ast_drm_freeze(struct drm_device *dev) +{ + drm_kms_helper_poll_disable(dev); + + pci_save_state(dev->pdev); + + console_lock(); + ast_fbdev_set_suspend(dev, 1); + console_unlock(); + return 0; +} + +static int ast_drm_thaw(struct drm_device *dev) +{ + int error = 0; + + ast_post_gpu(dev); + + drm_mode_config_reset(dev); + drm_helper_resume_force_mode(dev); + + console_lock(); + ast_fbdev_set_suspend(dev, 0); + console_unlock(); + return error; +} + +static int ast_drm_resume(struct drm_device *dev) +{ + int ret; + + if (pci_enable_device(dev->pdev)) + return -EIO; + + ret = ast_drm_thaw(dev); + if (ret) + return ret; + + drm_kms_helper_poll_enable(dev); + return 0; +} + +static int ast_pm_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *ddev = pci_get_drvdata(pdev); + int error; + + error = ast_drm_freeze(ddev); + if (error) + return error; + + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + return 0; +} +static int ast_pm_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *ddev = pci_get_drvdata(pdev); + return ast_drm_resume(ddev); +} + +static int ast_pm_freeze(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *ddev = pci_get_drvdata(pdev); + + if (!ddev || !ddev->dev_private) + return -ENODEV; + return ast_drm_freeze(ddev); + +} + +static int ast_pm_thaw(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *ddev = pci_get_drvdata(pdev); + return ast_drm_thaw(ddev); +} + +static int ast_pm_poweroff(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *ddev = pci_get_drvdata(pdev); + + return ast_drm_freeze(ddev); +} + +static const struct dev_pm_ops ast_pm_ops = { + .suspend = ast_pm_suspend, + .resume = ast_pm_resume, + .freeze = ast_pm_freeze, + .thaw = ast_pm_thaw, + .poweroff = ast_pm_poweroff, + .restore = ast_pm_resume, +}; + +static struct pci_driver ast_pci_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = ast_pci_probe, + .remove = ast_pci_remove, + .driver.pm = &ast_pm_ops, +}; + +static const struct file_operations ast_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, + .mmap = ast_mmap, + .poll = drm_poll, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif + .read = drm_read, +}; + +static struct drm_driver driver = { + .driver_features = DRIVER_MODESET | DRIVER_GEM, + + .load = ast_driver_load, + .unload = ast_driver_unload, + .set_busid = drm_pci_set_busid, + + .fops = &ast_fops, + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, + + .gem_free_object = ast_gem_free_object, + .dumb_create = ast_dumb_create, + .dumb_map_offset = ast_dumb_mmap_offset, + .dumb_destroy = drm_gem_dumb_destroy, + +}; + +static int __init ast_init(void) +{ +#ifdef CONFIG_VGA_CONSOLE + if (vgacon_text_force() && ast_modeset == -1) + return -EINVAL; +#endif + + if (ast_modeset == 0) + return -EINVAL; + return drm_pci_init(&driver, &ast_pci_driver); +} +static void __exit ast_exit(void) +{ + drm_pci_exit(&driver, &ast_pci_driver); +} + +module_init(ast_init); +module_exit(ast_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); + diff --git a/kernel/drivers/gpu/drm/ast/ast_drv.h b/kernel/drivers/gpu/drm/ast/ast_drv.h new file mode 100644 index 000000000..86205a28e --- /dev/null +++ b/kernel/drivers/gpu/drm/ast/ast_drv.h @@ -0,0 +1,403 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ +/* + * Authors: Dave Airlie + */ +#ifndef __AST_DRV_H__ +#define __AST_DRV_H__ + +#include + +#include +#include +#include +#include +#include + +#include + +#include +#include + +#define DRIVER_AUTHOR "Dave Airlie" + +#define DRIVER_NAME "ast" +#define DRIVER_DESC "AST" +#define DRIVER_DATE "20120228" + +#define DRIVER_MAJOR 0 +#define DRIVER_MINOR 1 +#define DRIVER_PATCHLEVEL 0 + +#define PCI_CHIP_AST2000 0x2000 +#define PCI_CHIP_AST2100 0x2010 +#define PCI_CHIP_AST1180 0x1180 + + +enum ast_chip { + AST2000, + AST2100, + AST1100, + AST2200, + AST2150, + AST2300, + AST2400, + AST1180, +}; + +enum ast_tx_chip { + AST_TX_NONE, + AST_TX_SIL164, + AST_TX_ITE66121, + AST_TX_DP501, +}; + +#define AST_DRAM_512Mx16 0 +#define AST_DRAM_1Gx16 1 +#define AST_DRAM_512Mx32 2 +#define AST_DRAM_1Gx32 3 +#define AST_DRAM_2Gx16 6 +#define AST_DRAM_4Gx16 7 + +struct ast_fbdev; + +struct ast_private { + struct drm_device *dev; + + void __iomem *regs; + void __iomem *ioregs; + + enum ast_chip chip; + bool vga2_clone; + uint32_t dram_bus_width; + uint32_t dram_type; + uint32_t mclk; + uint32_t vram_size; + + struct ast_fbdev *fbdev; + + int fb_mtrr; + + struct { + struct drm_global_reference mem_global_ref; + struct ttm_bo_global_ref bo_global_ref; + struct ttm_bo_device bdev; + } ttm; + + struct drm_gem_object *cursor_cache; + uint64_t cursor_cache_gpu_addr; + /* Acces to this cache is protected by the crtc->mutex of the only crtc + * we have. */ + struct ttm_bo_kmap_obj cache_kmap; + int next_cursor; + bool support_wide_screen; + + enum ast_tx_chip tx_chip_type; + u8 dp501_maxclk; + u8 *dp501_fw_addr; + const struct firmware *dp501_fw; /* dp501 fw */ +}; + +int ast_driver_load(struct drm_device *dev, unsigned long flags); +int ast_driver_unload(struct drm_device *dev); + +struct ast_gem_object; + +#define AST_IO_AR_PORT_WRITE (0x40) +#define AST_IO_MISC_PORT_WRITE (0x42) +#define AST_IO_VGA_ENABLE_PORT (0x43) +#define AST_IO_SEQ_PORT (0x44) +#define AST_IO_DAC_INDEX_READ (0x47) +#define AST_IO_DAC_INDEX_WRITE (0x48) +#define AST_IO_DAC_DATA (0x49) +#define AST_IO_GR_PORT (0x4E) +#define AST_IO_CRTC_PORT (0x54) +#define AST_IO_INPUT_STATUS1_READ (0x5A) +#define AST_IO_MISC_PORT_READ (0x4C) + +#define AST_IO_MM_OFFSET (0x380) + +#define __ast_read(x) \ +static inline u##x ast_read##x(struct ast_private *ast, u32 reg) { \ +u##x val = 0;\ +val = ioread##x(ast->regs + reg); \ +return val;\ +} + +__ast_read(8); +__ast_read(16); +__ast_read(32) + +#define __ast_io_read(x) \ +static inline u##x ast_io_read##x(struct ast_private *ast, u32 reg) { \ +u##x val = 0;\ +val = ioread##x(ast->ioregs + reg); \ +return val;\ +} + +__ast_io_read(8); +__ast_io_read(16); +__ast_io_read(32); + +#define __ast_write(x) \ +static inline void ast_write##x(struct ast_private *ast, u32 reg, u##x val) {\ + iowrite##x(val, ast->regs + reg);\ + } + +__ast_write(8); +__ast_write(16); +__ast_write(32); + +#define __ast_io_write(x) \ +static inline void ast_io_write##x(struct ast_private *ast, u32 reg, u##x val) {\ + iowrite##x(val, ast->ioregs + reg);\ + } + +__ast_io_write(8); +__ast_io_write(16); +#undef __ast_io_write + +static inline void ast_set_index_reg(struct ast_private *ast, + uint32_t base, uint8_t index, + uint8_t val) +{ + ast_io_write16(ast, base, ((u16)val << 8) | index); +} + +void ast_set_index_reg_mask(struct ast_private *ast, + uint32_t base, uint8_t index, + uint8_t mask, uint8_t val); +uint8_t ast_get_index_reg(struct ast_private *ast, + uint32_t base, uint8_t index); +uint8_t ast_get_index_reg_mask(struct ast_private *ast, + uint32_t base, uint8_t index, uint8_t mask); + +static inline void ast_open_key(struct ast_private *ast) +{ + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8); +} + +#define AST_VIDMEM_SIZE_8M 0x00800000 +#define AST_VIDMEM_SIZE_16M 0x01000000 +#define AST_VIDMEM_SIZE_32M 0x02000000 +#define AST_VIDMEM_SIZE_64M 0x04000000 +#define AST_VIDMEM_SIZE_128M 0x08000000 + +#define AST_VIDMEM_DEFAULT_SIZE AST_VIDMEM_SIZE_8M + +#define AST_MAX_HWC_WIDTH 64 +#define AST_MAX_HWC_HEIGHT 64 + +#define AST_HWC_SIZE (AST_MAX_HWC_WIDTH*AST_MAX_HWC_HEIGHT*2) +#define AST_HWC_SIGNATURE_SIZE 32 + +#define AST_DEFAULT_HWC_NUM 2 +/* define for signature structure */ +#define AST_HWC_SIGNATURE_CHECKSUM 0x00 +#define AST_HWC_SIGNATURE_SizeX 0x04 +#define AST_HWC_SIGNATURE_SizeY 0x08 +#define AST_HWC_SIGNATURE_X 0x0C +#define AST_HWC_SIGNATURE_Y 0x10 +#define AST_HWC_SIGNATURE_HOTSPOTX 0x14 +#define AST_HWC_SIGNATURE_HOTSPOTY 0x18 + + +struct ast_i2c_chan { + struct i2c_adapter adapter; + struct drm_device *dev; + struct i2c_algo_bit_data bit; +}; + +struct ast_connector { + struct drm_connector base; + struct ast_i2c_chan *i2c; +}; + +struct ast_crtc { + struct drm_crtc base; + u8 lut_r[256], lut_g[256], lut_b[256]; + struct drm_gem_object *cursor_bo; + uint64_t cursor_addr; + int cursor_width, cursor_height; + u8 offset_x, offset_y; +}; + +struct ast_encoder { + struct drm_encoder base; +}; + +struct ast_framebuffer { + struct drm_framebuffer base; + struct drm_gem_object *obj; +}; + +struct ast_fbdev { + struct drm_fb_helper helper; + struct ast_framebuffer afb; + struct list_head fbdev_list; + void *sysram; + int size; + struct ttm_bo_kmap_obj mapping; + int x1, y1, x2, y2; /* dirty rect */ + spinlock_t dirty_lock; +}; + +#define to_ast_crtc(x) container_of(x, struct ast_crtc, base) +#define to_ast_connector(x) container_of(x, struct ast_connector, base) +#define to_ast_encoder(x) container_of(x, struct ast_encoder, base) +#define to_ast_framebuffer(x) container_of(x, struct ast_framebuffer, base) + +struct ast_vbios_stdtable { + u8 misc; + u8 seq[4]; + u8 crtc[25]; + u8 ar[20]; + u8 gr[9]; +}; + +struct ast_vbios_enhtable { + u32 ht; + u32 hde; + u32 hfp; + u32 hsync; + u32 vt; + u32 vde; + u32 vfp; + u32 vsync; + u32 dclk_index; + u32 flags; + u32 refresh_rate; + u32 refresh_rate_index; + u32 mode_id; +}; + +struct ast_vbios_dclk_info { + u8 param1; + u8 param2; + u8 param3; +}; + +struct ast_vbios_mode_info { + struct ast_vbios_stdtable *std_table; + struct ast_vbios_enhtable *enh_table; +}; + +extern int ast_mode_init(struct drm_device *dev); +extern void ast_mode_fini(struct drm_device *dev); + +int ast_framebuffer_init(struct drm_device *dev, + struct ast_framebuffer *ast_fb, + struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_gem_object *obj); + +int ast_fbdev_init(struct drm_device *dev); +void ast_fbdev_fini(struct drm_device *dev); +void ast_fbdev_set_suspend(struct drm_device *dev, int state); + +struct ast_bo { + struct ttm_buffer_object bo; + struct ttm_placement placement; + struct ttm_bo_kmap_obj kmap; + struct drm_gem_object gem; + struct ttm_place placements[3]; + int pin_count; +}; +#define gem_to_ast_bo(gobj) container_of((gobj), struct ast_bo, gem) + +static inline struct ast_bo * +ast_bo(struct ttm_buffer_object *bo) +{ + return container_of(bo, struct ast_bo, bo); +} + + +#define to_ast_obj(x) container_of(x, struct ast_gem_object, base) + +#define AST_MM_ALIGN_SHIFT 4 +#define AST_MM_ALIGN_MASK ((1 << AST_MM_ALIGN_SHIFT) - 1) + +extern int ast_dumb_create(struct drm_file *file, + struct drm_device *dev, + struct drm_mode_create_dumb *args); + +extern void ast_gem_free_object(struct drm_gem_object *obj); +extern int ast_dumb_mmap_offset(struct drm_file *file, + struct drm_device *dev, + uint32_t handle, + uint64_t *offset); + +#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) + +int ast_mm_init(struct ast_private *ast); +void ast_mm_fini(struct ast_private *ast); + +int ast_bo_create(struct drm_device *dev, int size, int align, + uint32_t flags, struct ast_bo **pastbo); + +int ast_gem_create(struct drm_device *dev, + u32 size, bool iskernel, + struct drm_gem_object **obj); + +int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr); +int ast_bo_unpin(struct ast_bo *bo); + +static inline int ast_bo_reserve(struct ast_bo *bo, bool no_wait) +{ + int ret; + + ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL); + if (ret) { + if (ret != -ERESTARTSYS && ret != -EBUSY) + DRM_ERROR("reserve failed %p\n", bo); + return ret; + } + return 0; +} + +static inline void ast_bo_unreserve(struct ast_bo *bo) +{ + ttm_bo_unreserve(&bo->bo); +} + +void ast_ttm_placement(struct ast_bo *bo, int domain); +int ast_bo_push_sysram(struct ast_bo *bo); +int ast_mmap(struct file *filp, struct vm_area_struct *vma); + +/* ast post */ +void ast_enable_vga(struct drm_device *dev); +void ast_enable_mmio(struct drm_device *dev); +bool ast_is_vga_enabled(struct drm_device *dev); +void ast_post_gpu(struct drm_device *dev); +u32 ast_mindwm(struct ast_private *ast, u32 r); +void ast_moutdwm(struct ast_private *ast, u32 r, u32 v); +/* ast dp501 */ +int ast_load_dp501_microcode(struct drm_device *dev); +void ast_set_dp501_video_output(struct drm_device *dev, u8 mode); +bool ast_launch_m68k(struct drm_device *dev); +bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size); +bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata); +u8 ast_get_dp501_max_clk(struct drm_device *dev); +void ast_init_3rdtx(struct drm_device *dev); +#endif diff --git a/kernel/drivers/gpu/drm/ast/ast_fb.c b/kernel/drivers/gpu/drm/ast/ast_fb.c new file mode 100644 index 000000000..ff68eefae --- /dev/null +++ b/kernel/drivers/gpu/drm/ast/ast_fb.c @@ -0,0 +1,381 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ +/* + * Authors: Dave Airlie + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#include +#include +#include +#include +#include "ast_drv.h" + +static void ast_dirty_update(struct ast_fbdev *afbdev, + int x, int y, int width, int height) +{ + int i; + struct drm_gem_object *obj; + struct ast_bo *bo; + int src_offset, dst_offset; + int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8; + int ret = -EBUSY; + bool unmap = false; + bool store_for_later = false; + int x2, y2; + unsigned long flags; + + obj = afbdev->afb.obj; + bo = gem_to_ast_bo(obj); + + /* + * try and reserve the BO, if we fail with busy + * then the BO is being moved and we should + * store up the damage until later. + */ + if (drm_can_sleep()) + ret = ast_bo_reserve(bo, true); + if (ret) { + if (ret != -EBUSY) + return; + + store_for_later = true; + } + + x2 = x + width - 1; + y2 = y + height - 1; + spin_lock_irqsave(&afbdev->dirty_lock, flags); + + if (afbdev->y1 < y) + y = afbdev->y1; + if (afbdev->y2 > y2) + y2 = afbdev->y2; + if (afbdev->x1 < x) + x = afbdev->x1; + if (afbdev->x2 > x2) + x2 = afbdev->x2; + + if (store_for_later) { + afbdev->x1 = x; + afbdev->x2 = x2; + afbdev->y1 = y; + afbdev->y2 = y2; + spin_unlock_irqrestore(&afbdev->dirty_lock, flags); + return; + } + + afbdev->x1 = afbdev->y1 = INT_MAX; + afbdev->x2 = afbdev->y2 = 0; + spin_unlock_irqrestore(&afbdev->dirty_lock, flags); + + if (!bo->kmap.virtual) { + ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); + if (ret) { + DRM_ERROR("failed to kmap fb updates\n"); + ast_bo_unreserve(bo); + return; + } + unmap = true; + } + for (i = y; i <= y2; i++) { + /* assume equal stride for now */ + src_offset = dst_offset = i * afbdev->afb.base.pitches[0] + (x * bpp); + memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, (x2 - x + 1) * bpp); + + } + if (unmap) + ttm_bo_kunmap(&bo->kmap); + + ast_bo_unreserve(bo); +} + +static void ast_fillrect(struct fb_info *info, + const struct fb_fillrect *rect) +{ + struct ast_fbdev *afbdev = info->par; + sys_fillrect(info, rect); + ast_dirty_update(afbdev, rect->dx, rect->dy, rect->width, + rect->height); +} + +static void ast_copyarea(struct fb_info *info, + const struct fb_copyarea *area) +{ + struct ast_fbdev *afbdev = info->par; + sys_copyarea(info, area); + ast_dirty_update(afbdev, area->dx, area->dy, area->width, + area->height); +} + +static void ast_imageblit(struct fb_info *info, + const struct fb_image *image) +{ + struct ast_fbdev *afbdev = info->par; + sys_imageblit(info, image); + ast_dirty_update(afbdev, image->dx, image->dy, image->width, + image->height); +} + +static struct fb_ops astfb_ops = { + .owner = THIS_MODULE, + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = drm_fb_helper_set_par, + .fb_fillrect = ast_fillrect, + .fb_copyarea = ast_copyarea, + .fb_imageblit = ast_imageblit, + .fb_pan_display = drm_fb_helper_pan_display, + .fb_blank = drm_fb_helper_blank, + .fb_setcmap = drm_fb_helper_setcmap, + .fb_debug_enter = drm_fb_helper_debug_enter, + .fb_debug_leave = drm_fb_helper_debug_leave, +}; + +static int astfb_create_object(struct ast_fbdev *afbdev, + struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_gem_object **gobj_p) +{ + struct drm_device *dev = afbdev->helper.dev; + u32 bpp, depth; + u32 size; + struct drm_gem_object *gobj; + + int ret = 0; + drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp); + + size = mode_cmd->pitches[0] * mode_cmd->height; + ret = ast_gem_create(dev, size, true, &gobj); + if (ret) + return ret; + + *gobj_p = gobj; + return ret; +} + +static int astfb_create(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) +{ + struct ast_fbdev *afbdev = + container_of(helper, struct ast_fbdev, helper); + struct drm_device *dev = afbdev->helper.dev; + struct drm_mode_fb_cmd2 mode_cmd; + struct drm_framebuffer *fb; + struct fb_info *info; + int size, ret; + struct device *device = &dev->pdev->dev; + void *sysram; + struct drm_gem_object *gobj = NULL; + struct ast_bo *bo = NULL; + mode_cmd.width = sizes->surface_width; + mode_cmd.height = sizes->surface_height; + mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7)/8); + + mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, + sizes->surface_depth); + + size = mode_cmd.pitches[0] * mode_cmd.height; + + ret = astfb_create_object(afbdev, &mode_cmd, &gobj); + if (ret) { + DRM_ERROR("failed to create fbcon backing object %d\n", ret); + return ret; + } + bo = gem_to_ast_bo(gobj); + + sysram = vmalloc(size); + if (!sysram) + return -ENOMEM; + + info = framebuffer_alloc(0, device); + if (!info) { + ret = -ENOMEM; + goto out; + } + info->par = afbdev; + + ret = ast_framebuffer_init(dev, &afbdev->afb, &mode_cmd, gobj); + if (ret) + goto out; + + afbdev->sysram = sysram; + afbdev->size = size; + + fb = &afbdev->afb.base; + afbdev->helper.fb = fb; + afbdev->helper.fbdev = info; + + strcpy(info->fix.id, "astdrmfb"); + + info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; + info->fbops = &astfb_ops; + + ret = fb_alloc_cmap(&info->cmap, 256, 0); + if (ret) { + ret = -ENOMEM; + goto out; + } + + info->apertures = alloc_apertures(1); + if (!info->apertures) { + ret = -ENOMEM; + goto out; + } + info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0); + info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0); + + drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); + drm_fb_helper_fill_var(info, &afbdev->helper, sizes->fb_width, sizes->fb_height); + + info->screen_base = sysram; + info->screen_size = size; + + info->pixmap.flags = FB_PIXMAP_SYSTEM; + + DRM_DEBUG_KMS("allocated %dx%d\n", + fb->width, fb->height); + + return 0; +out: + return ret; +} + +static void ast_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, + u16 blue, int regno) +{ + struct ast_crtc *ast_crtc = to_ast_crtc(crtc); + ast_crtc->lut_r[regno] = red >> 8; + ast_crtc->lut_g[regno] = green >> 8; + ast_crtc->lut_b[regno] = blue >> 8; +} + +static void ast_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, int regno) +{ + struct ast_crtc *ast_crtc = to_ast_crtc(crtc); + *red = ast_crtc->lut_r[regno] << 8; + *green = ast_crtc->lut_g[regno] << 8; + *blue = ast_crtc->lut_b[regno] << 8; +} + +static const struct drm_fb_helper_funcs ast_fb_helper_funcs = { + .gamma_set = ast_fb_gamma_set, + .gamma_get = ast_fb_gamma_get, + .fb_probe = astfb_create, +}; + +static void ast_fbdev_destroy(struct drm_device *dev, + struct ast_fbdev *afbdev) +{ + struct fb_info *info; + struct ast_framebuffer *afb = &afbdev->afb; + if (afbdev->helper.fbdev) { + info = afbdev->helper.fbdev; + unregister_framebuffer(info); + if (info->cmap.len) + fb_dealloc_cmap(&info->cmap); + framebuffer_release(info); + } + + if (afb->obj) { + drm_gem_object_unreference_unlocked(afb->obj); + afb->obj = NULL; + } + drm_fb_helper_fini(&afbdev->helper); + + vfree(afbdev->sysram); + drm_framebuffer_unregister_private(&afb->base); + drm_framebuffer_cleanup(&afb->base); +} + +int ast_fbdev_init(struct drm_device *dev) +{ + struct ast_private *ast = dev->dev_private; + struct ast_fbdev *afbdev; + int ret; + + afbdev = kzalloc(sizeof(struct ast_fbdev), GFP_KERNEL); + if (!afbdev) + return -ENOMEM; + + ast->fbdev = afbdev; + spin_lock_init(&afbdev->dirty_lock); + + drm_fb_helper_prepare(dev, &afbdev->helper, &ast_fb_helper_funcs); + + ret = drm_fb_helper_init(dev, &afbdev->helper, + 1, 1); + if (ret) + goto free; + + ret = drm_fb_helper_single_add_all_connectors(&afbdev->helper); + if (ret) + goto fini; + + /* disable all the possible outputs/crtcs before entering KMS mode */ + drm_helper_disable_unused_functions(dev); + + ret = drm_fb_helper_initial_config(&afbdev->helper, 32); + if (ret) + goto fini; + + return 0; + +fini: + drm_fb_helper_fini(&afbdev->helper); +free: + kfree(afbdev); + return ret; +} + +void ast_fbdev_fini(struct drm_device *dev) +{ + struct ast_private *ast = dev->dev_private; + + if (!ast->fbdev) + return; + + ast_fbdev_destroy(dev, ast->fbdev); + kfree(ast->fbdev); + ast->fbdev = NULL; +} + +void ast_fbdev_set_suspend(struct drm_device *dev, int state) +{ + struct ast_private *ast = dev->dev_private; + + if (!ast->fbdev) + return; + + fb_set_suspend(ast->fbdev->helper.fbdev, state); +} diff --git a/kernel/drivers/gpu/drm/ast/ast_main.c b/kernel/drivers/gpu/drm/ast/ast_main.c new file mode 100644 index 000000000..035dacc93 --- /dev/null +++ b/kernel/drivers/gpu/drm/ast/ast_main.c @@ -0,0 +1,594 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ +/* + * Authors: Dave Airlie + */ +#include +#include "ast_drv.h" + + +#include +#include + +#include "ast_dram_tables.h" + +void ast_set_index_reg_mask(struct ast_private *ast, + uint32_t base, uint8_t index, + uint8_t mask, uint8_t val) +{ + u8 tmp; + ast_io_write8(ast, base, index); + tmp = (ast_io_read8(ast, base + 1) & mask) | val; + ast_set_index_reg(ast, base, index, tmp); +} + +uint8_t ast_get_index_reg(struct ast_private *ast, + uint32_t base, uint8_t index) +{ + uint8_t ret; + ast_io_write8(ast, base, index); + ret = ast_io_read8(ast, base + 1); + return ret; +} + +uint8_t ast_get_index_reg_mask(struct ast_private *ast, + uint32_t base, uint8_t index, uint8_t mask) +{ + uint8_t ret; + ast_io_write8(ast, base, index); + ret = ast_io_read8(ast, base + 1) & mask; + return ret; +} + + +static int ast_detect_chip(struct drm_device *dev, bool *need_post) +{ + struct ast_private *ast = dev->dev_private; + uint32_t data, jreg; + ast_open_key(ast); + + if (dev->pdev->device == PCI_CHIP_AST1180) { + ast->chip = AST1100; + DRM_INFO("AST 1180 detected\n"); + } else { + if (dev->pdev->revision >= 0x30) { + ast->chip = AST2400; + DRM_INFO("AST 2400 detected\n"); + } else if (dev->pdev->revision >= 0x20) { + ast->chip = AST2300; + DRM_INFO("AST 2300 detected\n"); + } else if (dev->pdev->revision >= 0x10) { + uint32_t data; + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + + data = ast_read32(ast, 0x1207c); + switch (data & 0x0300) { + case 0x0200: + ast->chip = AST1100; + DRM_INFO("AST 1100 detected\n"); + break; + case 0x0100: + ast->chip = AST2200; + DRM_INFO("AST 2200 detected\n"); + break; + case 0x0000: + ast->chip = AST2150; + DRM_INFO("AST 2150 detected\n"); + break; + default: + ast->chip = AST2100; + DRM_INFO("AST 2100 detected\n"); + break; + } + ast->vga2_clone = false; + } else { + ast->chip = AST2000; + DRM_INFO("AST 2000 detected\n"); + } + } + + /* + * If VGA isn't enabled, we need to enable now or subsequent + * access to the scratch registers will fail. We also inform + * our caller that it needs to POST the chip + * (Assumption: VGA not enabled -> need to POST) + */ + if (!ast_is_vga_enabled(dev)) { + ast_enable_vga(dev); + ast_enable_mmio(dev); + DRM_INFO("VGA not enabled on entry, requesting chip POST\n"); + *need_post = true; + } else + *need_post = false; + + /* Check if we support wide screen */ + switch (ast->chip) { + case AST1180: + ast->support_wide_screen = true; + break; + case AST2000: + ast->support_wide_screen = false; + break; + default: + jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + if (!(jreg & 0x80)) + ast->support_wide_screen = true; + else if (jreg & 0x01) + ast->support_wide_screen = true; + else { + ast->support_wide_screen = false; + /* Read SCU7c (silicon revision register) */ + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + data = ast_read32(ast, 0x1207c); + data &= 0x300; + if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ + ast->support_wide_screen = true; + if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ + ast->support_wide_screen = true; + } + break; + } + + /* Check 3rd Tx option (digital output afaik) */ + ast->tx_chip_type = AST_TX_NONE; + + /* + * VGACRA3 Enhanced Color Mode Register, check if DVO is already + * enabled, in that case, assume we have a SIL164 TMDS transmitter + * + * Don't make that assumption if we the chip wasn't enabled and + * is at power-on reset, otherwise we'll incorrectly "detect" a + * SIL164 when there is none. + */ + if (!*need_post) { + jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xff); + if (jreg & 0x80) + ast->tx_chip_type = AST_TX_SIL164; + } + + if ((ast->chip == AST2300) || (ast->chip == AST2400)) { + /* + * On AST2300 and 2400, look the configuration set by the SoC in + * the SOC scratch register #1 bits 11:8 (interestingly marked + * as "reserved" in the spec) + */ + jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff); + switch (jreg) { + case 0x04: + ast->tx_chip_type = AST_TX_SIL164; + break; + case 0x08: + ast->dp501_fw_addr = kzalloc(32*1024, GFP_KERNEL); + if (ast->dp501_fw_addr) { + /* backup firmware */ + if (ast_backup_fw(dev, ast->dp501_fw_addr, 32*1024)) { + kfree(ast->dp501_fw_addr); + ast->dp501_fw_addr = NULL; + } + } + /* fallthrough */ + case 0x0c: + ast->tx_chip_type = AST_TX_DP501; + } + } + + /* Print stuff for diagnostic purposes */ + switch(ast->tx_chip_type) { + case AST_TX_SIL164: + DRM_INFO("Using Sil164 TMDS transmitter\n"); + break; + case AST_TX_DP501: + DRM_INFO("Using DP501 DisplayPort transmitter\n"); + break; + default: + DRM_INFO("Analog VGA only\n"); + } + return 0; +} + +static int ast_get_dram_info(struct drm_device *dev) +{ + struct ast_private *ast = dev->dev_private; + uint32_t data, data2; + uint32_t denum, num, div, ref_pll; + + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + + + ast_write32(ast, 0x10000, 0xfc600309); + + do { + ; + } while (ast_read32(ast, 0x10000) != 0x01); + data = ast_read32(ast, 0x10004); + + if (data & 0x400) + ast->dram_bus_width = 16; + else + ast->dram_bus_width = 32; + + if (ast->chip == AST2300 || ast->chip == AST2400) { + switch (data & 0x03) { + case 0: + ast->dram_type = AST_DRAM_512Mx16; + break; + default: + case 1: + ast->dram_type = AST_DRAM_1Gx16; + break; + case 2: + ast->dram_type = AST_DRAM_2Gx16; + break; + case 3: + ast->dram_type = AST_DRAM_4Gx16; + break; + } + } else { + switch (data & 0x0c) { + case 0: + case 4: + ast->dram_type = AST_DRAM_512Mx16; + break; + case 8: + if (data & 0x40) + ast->dram_type = AST_DRAM_1Gx16; + else + ast->dram_type = AST_DRAM_512Mx32; + break; + case 0xc: + ast->dram_type = AST_DRAM_1Gx32; + break; + } + } + + data = ast_read32(ast, 0x10120); + data2 = ast_read32(ast, 0x10170); + if (data2 & 0x2000) + ref_pll = 14318; + else + ref_pll = 12000; + + denum = data & 0x1f; + num = (data & 0x3fe0) >> 5; + data = (data & 0xc000) >> 14; + switch (data) { + case 3: + div = 0x4; + break; + case 2: + case 1: + div = 0x2; + break; + default: + div = 0x1; + break; + } + ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); + return 0; +} + +static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb) +{ + struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb); + if (ast_fb->obj) + drm_gem_object_unreference_unlocked(ast_fb->obj); + + drm_framebuffer_cleanup(fb); + kfree(fb); +} + +static const struct drm_framebuffer_funcs ast_fb_funcs = { + .destroy = ast_user_framebuffer_destroy, +}; + + +int ast_framebuffer_init(struct drm_device *dev, + struct ast_framebuffer *ast_fb, + struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_gem_object *obj) +{ + int ret; + + drm_helper_mode_fill_fb_struct(&ast_fb->base, mode_cmd); + ast_fb->obj = obj; + ret = drm_framebuffer_init(dev, &ast_fb->base, &ast_fb_funcs); + if (ret) { + DRM_ERROR("framebuffer init failed %d\n", ret); + return ret; + } + return 0; +} + +static struct drm_framebuffer * +ast_user_framebuffer_create(struct drm_device *dev, + struct drm_file *filp, + struct drm_mode_fb_cmd2 *mode_cmd) +{ + struct drm_gem_object *obj; + struct ast_framebuffer *ast_fb; + int ret; + + obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]); + if (obj == NULL) + return ERR_PTR(-ENOENT); + + ast_fb = kzalloc(sizeof(*ast_fb), GFP_KERNEL); + if (!ast_fb) { + drm_gem_object_unreference_unlocked(obj); + return ERR_PTR(-ENOMEM); + } + + ret = ast_framebuffer_init(dev, ast_fb, mode_cmd, obj); + if (ret) { + drm_gem_object_unreference_unlocked(obj); + kfree(ast_fb); + return ERR_PTR(ret); + } + return &ast_fb->base; +} + +static const struct drm_mode_config_funcs ast_mode_funcs = { + .fb_create = ast_user_framebuffer_create, +}; + +static u32 ast_get_vram_info(struct drm_device *dev) +{ + struct ast_private *ast = dev->dev_private; + u8 jreg; + u32 vram_size; + ast_open_key(ast); + + vram_size = AST_VIDMEM_DEFAULT_SIZE; + jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xaa, 0xff); + switch (jreg & 3) { + case 0: vram_size = AST_VIDMEM_SIZE_8M; break; + case 1: vram_size = AST_VIDMEM_SIZE_16M; break; + case 2: vram_size = AST_VIDMEM_SIZE_32M; break; + case 3: vram_size = AST_VIDMEM_SIZE_64M; break; + } + + jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x99, 0xff); + switch (jreg & 0x03) { + case 1: + vram_size -= 0x100000; + break; + case 2: + vram_size -= 0x200000; + break; + case 3: + vram_size -= 0x400000; + break; + } + + return vram_size; +} + +int ast_driver_load(struct drm_device *dev, unsigned long flags) +{ + struct ast_private *ast; + bool need_post; + int ret = 0; + + ast = kzalloc(sizeof(struct ast_private), GFP_KERNEL); + if (!ast) + return -ENOMEM; + + dev->dev_private = ast; + ast->dev = dev; + + ast->regs = pci_iomap(dev->pdev, 1, 0); + if (!ast->regs) { + ret = -EIO; + goto out_free; + } + + /* + * If we don't have IO space at all, use MMIO now and + * assume the chip has MMIO enabled by default (rev 0x20 + * and higher). + */ + if (!(pci_resource_flags(dev->pdev, 2) & IORESOURCE_IO)) { + DRM_INFO("platform has no IO space, trying MMIO\n"); + ast->ioregs = ast->regs + AST_IO_MM_OFFSET; + } + + /* "map" IO regs if the above hasn't done so already */ + if (!ast->ioregs) { + ast->ioregs = pci_iomap(dev->pdev, 2, 0); + if (!ast->ioregs) { + ret = -EIO; + goto out_free; + } + } + + ast_detect_chip(dev, &need_post); + + if (ast->chip != AST1180) { + ast_get_dram_info(dev); + ast->vram_size = ast_get_vram_info(dev); + DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size); + } + + if (need_post) + ast_post_gpu(dev); + + ret = ast_mm_init(ast); + if (ret) + goto out_free; + + drm_mode_config_init(dev); + + dev->mode_config.funcs = (void *)&ast_mode_funcs; + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + dev->mode_config.preferred_depth = 24; + dev->mode_config.prefer_shadow = 1; + + if (ast->chip == AST2100 || + ast->chip == AST2200 || + ast->chip == AST2300 || + ast->chip == AST2400 || + ast->chip == AST1180) { + dev->mode_config.max_width = 1920; + dev->mode_config.max_height = 2048; + } else { + dev->mode_config.max_width = 1600; + dev->mode_config.max_height = 1200; + } + + ret = ast_mode_init(dev); + if (ret) + goto out_free; + + ret = ast_fbdev_init(dev); + if (ret) + goto out_free; + + return 0; +out_free: + kfree(ast); + dev->dev_private = NULL; + return ret; +} + +int ast_driver_unload(struct drm_device *dev) +{ + struct ast_private *ast = dev->dev_private; + + kfree(ast->dp501_fw_addr); + ast_mode_fini(dev); + ast_fbdev_fini(dev); + drm_mode_config_cleanup(dev); + + ast_mm_fini(ast); + pci_iounmap(dev->pdev, ast->ioregs); + pci_iounmap(dev->pdev, ast->regs); + kfree(ast); + return 0; +} + +int ast_gem_create(struct drm_device *dev, + u32 size, bool iskernel, + struct drm_gem_object **obj) +{ + struct ast_bo *astbo; + int ret; + + *obj = NULL; + + size = roundup(size, PAGE_SIZE); + if (size == 0) + return -EINVAL; + + ret = ast_bo_create(dev, size, 0, 0, &astbo); + if (ret) { + if (ret != -ERESTARTSYS) + DRM_ERROR("failed to allocate GEM object\n"); + return ret; + } + *obj = &astbo->gem; + return 0; +} + +int ast_dumb_create(struct drm_file *file, + struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + int ret; + struct drm_gem_object *gobj; + u32 handle; + + args->pitch = args->width * ((args->bpp + 7) / 8); + args->size = args->pitch * args->height; + + ret = ast_gem_create(dev, args->size, false, + &gobj); + if (ret) + return ret; + + ret = drm_gem_handle_create(file, gobj, &handle); + drm_gem_object_unreference_unlocked(gobj); + if (ret) + return ret; + + args->handle = handle; + return 0; +} + +static void ast_bo_unref(struct ast_bo **bo) +{ + struct ttm_buffer_object *tbo; + + if ((*bo) == NULL) + return; + + tbo = &((*bo)->bo); + ttm_bo_unref(&tbo); + *bo = NULL; +} + +void ast_gem_free_object(struct drm_gem_object *obj) +{ + struct ast_bo *ast_bo = gem_to_ast_bo(obj); + + ast_bo_unref(&ast_bo); +} + + +static inline u64 ast_bo_mmap_offset(struct ast_bo *bo) +{ + return drm_vma_node_offset_addr(&bo->bo.vma_node); +} +int +ast_dumb_mmap_offset(struct drm_file *file, + struct drm_device *dev, + uint32_t handle, + uint64_t *offset) +{ + struct drm_gem_object *obj; + int ret; + struct ast_bo *bo; + + mutex_lock(&dev->struct_mutex); + obj = drm_gem_object_lookup(dev, file, handle); + if (obj == NULL) { + ret = -ENOENT; + goto out_unlock; + } + + bo = gem_to_ast_bo(obj); + *offset = ast_bo_mmap_offset(bo); + + drm_gem_object_unreference(obj); + ret = 0; +out_unlock: + mutex_unlock(&dev->struct_mutex); + return ret; + +} + diff --git a/kernel/drivers/gpu/drm/ast/ast_mode.c b/kernel/drivers/gpu/drm/ast/ast_mode.c new file mode 100644 index 000000000..b7ee2634e --- /dev/null +++ b/kernel/drivers/gpu/drm/ast/ast_mode.c @@ -0,0 +1,1253 @@ +/* + * Copyright 2012 Red Hat Inc. + * Parts based on xf86-video-ast + * Copyright (c) 2005 ASPEED Technology Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ +/* + * Authors: Dave Airlie + */ +#include +#include +#include +#include +#include +#include "ast_drv.h" + +#include "ast_tables.h" + +static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev); +static void ast_i2c_destroy(struct ast_i2c_chan *i2c); +static int ast_cursor_set(struct drm_crtc *crtc, + struct drm_file *file_priv, + uint32_t handle, + uint32_t width, + uint32_t height); +static int ast_cursor_move(struct drm_crtc *crtc, + int x, int y); + +static inline void ast_load_palette_index(struct ast_private *ast, + u8 index, u8 red, u8 green, + u8 blue) +{ + ast_io_write8(ast, AST_IO_DAC_INDEX_WRITE, index); + ast_io_read8(ast, AST_IO_SEQ_PORT); + ast_io_write8(ast, AST_IO_DAC_DATA, red); + ast_io_read8(ast, AST_IO_SEQ_PORT); + ast_io_write8(ast, AST_IO_DAC_DATA, green); + ast_io_read8(ast, AST_IO_SEQ_PORT); + ast_io_write8(ast, AST_IO_DAC_DATA, blue); + ast_io_read8(ast, AST_IO_SEQ_PORT); +} + +static void ast_crtc_load_lut(struct drm_crtc *crtc) +{ + struct ast_private *ast = crtc->dev->dev_private; + struct ast_crtc *ast_crtc = to_ast_crtc(crtc); + int i; + + if (!crtc->enabled) + return; + + for (i = 0; i < 256; i++) + ast_load_palette_index(ast, i, ast_crtc->lut_r[i], + ast_crtc->lut_g[i], ast_crtc->lut_b[i]); +} + +static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + struct ast_vbios_mode_info *vbios_mode) +{ + struct ast_private *ast = crtc->dev->dev_private; + u32 refresh_rate_index = 0, mode_id, color_index, refresh_rate; + u32 hborder, vborder; + bool check_sync; + struct ast_vbios_enhtable *best = NULL; + + switch (crtc->primary->fb->bits_per_pixel) { + case 8: + vbios_mode->std_table = &vbios_stdtable[VGAModeIndex]; + color_index = VGAModeIndex - 1; + break; + case 16: + vbios_mode->std_table = &vbios_stdtable[HiCModeIndex]; + color_index = HiCModeIndex; + break; + case 24: + case 32: + vbios_mode->std_table = &vbios_stdtable[TrueCModeIndex]; + color_index = TrueCModeIndex; + break; + default: + return false; + } + + switch (crtc->mode.crtc_hdisplay) { + case 640: + vbios_mode->enh_table = &res_640x480[refresh_rate_index]; + break; + case 800: + vbios_mode->enh_table = &res_800x600[refresh_rate_index]; + break; + case 1024: + vbios_mode->enh_table = &res_1024x768[refresh_rate_index]; + break; + case 1280: + if (crtc->mode.crtc_vdisplay == 800) + vbios_mode->enh_table = &res_1280x800[refresh_rate_index]; + else + vbios_mode->enh_table = &res_1280x1024[refresh_rate_index]; + break; + case 1360: + vbios_mode->enh_table = &res_1360x768[refresh_rate_index]; + break; + case 1440: + vbios_mode->enh_table = &res_1440x900[refresh_rate_index]; + break; + case 1600: + if (crtc->mode.crtc_vdisplay == 900) + vbios_mode->enh_table = &res_1600x900[refresh_rate_index]; + else + vbios_mode->enh_table = &res_1600x1200[refresh_rate_index]; + break; + case 1680: + vbios_mode->enh_table = &res_1680x1050[refresh_rate_index]; + break; + case 1920: + if (crtc->mode.crtc_vdisplay == 1080) + vbios_mode->enh_table = &res_1920x1080[refresh_rate_index]; + else + vbios_mode->enh_table = &res_1920x1200[refresh_rate_index]; + break; + default: + return false; + } + + refresh_rate = drm_mode_vrefresh(mode); + check_sync = vbios_mode->enh_table->flags & WideScreenMode; + do { + struct ast_vbios_enhtable *loop = vbios_mode->enh_table; + + while (loop->refresh_rate != 0xff) { + if ((check_sync) && + (((mode->flags & DRM_MODE_FLAG_NVSYNC) && + (loop->flags & PVSync)) || + ((mode->flags & DRM_MODE_FLAG_PVSYNC) && + (loop->flags & NVSync)) || + ((mode->flags & DRM_MODE_FLAG_NHSYNC) && + (loop->flags & PHSync)) || + ((mode->flags & DRM_MODE_FLAG_PHSYNC) && + (loop->flags & NHSync)))) { + loop++; + continue; + } + if (loop->refresh_rate <= refresh_rate + && (!best || loop->refresh_rate > best->refresh_rate)) + best = loop; + loop++; + } + if (best || !check_sync) + break; + check_sync = 0; + } while (1); + if (best) + vbios_mode->enh_table = best; + + hborder = (vbios_mode->enh_table->flags & HBorder) ? 8 : 0; + vborder = (vbios_mode->enh_table->flags & VBorder) ? 8 : 0; + + adjusted_mode->crtc_htotal = vbios_mode->enh_table->ht; + adjusted_mode->crtc_hblank_start = vbios_mode->enh_table->hde + hborder; + adjusted_mode->crtc_hblank_end = vbios_mode->enh_table->ht - hborder; + adjusted_mode->crtc_hsync_start = vbios_mode->enh_table->hde + hborder + + vbios_mode->enh_table->hfp; + adjusted_mode->crtc_hsync_end = (vbios_mode->enh_table->hde + hborder + + vbios_mode->enh_table->hfp + + vbios_mode->enh_table->hsync); + + adjusted_mode->crtc_vtotal = vbios_mode->enh_table->vt; + adjusted_mode->crtc_vblank_start = vbios_mode->enh_table->vde + vborder; + adjusted_mode->crtc_vblank_end = vbios_mode->enh_table->vt - vborder; + adjusted_mode->crtc_vsync_start = vbios_mode->enh_table->vde + vborder + + vbios_mode->enh_table->vfp; + adjusted_mode->crtc_vsync_end = (vbios_mode->enh_table->vde + vborder + + vbios_mode->enh_table->vfp + + vbios_mode->enh_table->vsync); + + refresh_rate_index = vbios_mode->enh_table->refresh_rate_index; + mode_id = vbios_mode->enh_table->mode_id; + + if (ast->chip == AST1180) { + /* TODO 1180 */ + } else { + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8c, (u8)((color_index & 0xf) << 4)); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8d, refresh_rate_index & 0xff); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8e, mode_id & 0xff); + + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0x00); + if (vbios_mode->enh_table->flags & NewModeInfo) { + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92, crtc->primary->fb->bits_per_pixel); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x93, adjusted_mode->clock / 1000); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x94, adjusted_mode->crtc_hdisplay); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x95, adjusted_mode->crtc_hdisplay >> 8); + + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x96, adjusted_mode->crtc_vdisplay); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x97, adjusted_mode->crtc_vdisplay >> 8); + } + } + + return true; + + +} +static void ast_set_std_reg(struct drm_crtc *crtc, struct drm_display_mode *mode, + struct ast_vbios_mode_info *vbios_mode) +{ + struct ast_private *ast = crtc->dev->dev_private; + struct ast_vbios_stdtable *stdtable; + u32 i; + u8 jreg; + + stdtable = vbios_mode->std_table; + + jreg = stdtable->misc; + ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg); + + /* Set SEQ */ + ast_set_index_reg(ast, AST_IO_SEQ_PORT, 0x00, 0x03); + for (i = 0; i < 4; i++) { + jreg = stdtable->seq[i]; + if (!i) + jreg |= 0x20; + ast_set_index_reg(ast, AST_IO_SEQ_PORT, (i + 1) , jreg); + } + + /* Set CRTC */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00); + for (i = 0; i < 25; i++) + ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, stdtable->crtc[i]); + + /* set AR */ + jreg = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ); + for (i = 0; i < 20; i++) { + jreg = stdtable->ar[i]; + ast_io_write8(ast, AST_IO_AR_PORT_WRITE, (u8)i); + ast_io_write8(ast, AST_IO_AR_PORT_WRITE, jreg); + } + ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x14); + ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x00); + + jreg = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ); + ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x20); + + /* Set GR */ + for (i = 0; i < 9; i++) + ast_set_index_reg(ast, AST_IO_GR_PORT, i, stdtable->gr[i]); +} + +static void ast_set_crtc_reg(struct drm_crtc *crtc, struct drm_display_mode *mode, + struct ast_vbios_mode_info *vbios_mode) +{ + struct ast_private *ast = crtc->dev->dev_private; + u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, jregAE = 0; + u16 temp; + + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00); + + temp = (mode->crtc_htotal >> 3) - 5; + if (temp & 0x100) + jregAC |= 0x01; /* HT D[8] */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x00, 0x00, temp); + + temp = (mode->crtc_hdisplay >> 3) - 1; + if (temp & 0x100) + jregAC |= 0x04; /* HDE D[8] */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x01, 0x00, temp); + + temp = (mode->crtc_hblank_start >> 3) - 1; + if (temp & 0x100) + jregAC |= 0x10; /* HBS D[8] */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x02, 0x00, temp); + + temp = ((mode->crtc_hblank_end >> 3) - 1) & 0x7f; + if (temp & 0x20) + jreg05 |= 0x80; /* HBE D[5] */ + if (temp & 0x40) + jregAD |= 0x01; /* HBE D[5] */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x03, 0xE0, (temp & 0x1f)); + + temp = (mode->crtc_hsync_start >> 3) - 1; + if (temp & 0x100) + jregAC |= 0x40; /* HRS D[5] */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x04, 0x00, temp); + + temp = ((mode->crtc_hsync_end >> 3) - 1) & 0x3f; + if (temp & 0x20) + jregAD |= 0x04; /* HRE D[5] */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x05, 0x60, (u8)((temp & 0x1f) | jreg05)); + + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAC, 0x00, jregAC); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAD, 0x00, jregAD); + + /* vert timings */ + temp = (mode->crtc_vtotal) - 2; + if (temp & 0x100) + jreg07 |= 0x01; + if (temp & 0x200) + jreg07 |= 0x20; + if (temp & 0x400) + jregAE |= 0x01; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x06, 0x00, temp); + + temp = (mode->crtc_vsync_start) - 1; + if (temp & 0x100) + jreg07 |= 0x04; + if (temp & 0x200) + jreg07 |= 0x80; + if (temp & 0x400) + jregAE |= 0x08; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x10, 0x00, temp); + + temp = (mode->crtc_vsync_end - 1) & 0x3f; + if (temp & 0x10) + jregAE |= 0x20; + if (temp & 0x20) + jregAE |= 0x40; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x70, temp & 0xf); + + temp = mode->crtc_vdisplay - 1; + if (temp & 0x100) + jreg07 |= 0x02; + if (temp & 0x200) + jreg07 |= 0x40; + if (temp & 0x400) + jregAE |= 0x02; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x12, 0x00, temp); + + temp = mode->crtc_vblank_start - 1; + if (temp & 0x100) + jreg07 |= 0x08; + if (temp & 0x200) + jreg09 |= 0x20; + if (temp & 0x400) + jregAE |= 0x04; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x15, 0x00, temp); + + temp = mode->crtc_vblank_end - 1; + if (temp & 0x100) + jregAE |= 0x10; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x16, 0x00, temp); + + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x07, 0x00, jreg07); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x09, 0xdf, jreg09); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAE, 0x00, (jregAE | 0x80)); + + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x80); +} + +static void ast_set_offset_reg(struct drm_crtc *crtc) +{ + struct ast_private *ast = crtc->dev->dev_private; + + u16 offset; + + offset = crtc->primary->fb->pitches[0] >> 3; + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x13, (offset & 0xff)); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xb0, (offset >> 8) & 0x3f); +} + +static void ast_set_dclk_reg(struct drm_device *dev, struct drm_display_mode *mode, + struct ast_vbios_mode_info *vbios_mode) +{ + struct ast_private *ast = dev->dev_private; + struct ast_vbios_dclk_info *clk_info; + + clk_info = &dclk_table[vbios_mode->enh_table->dclk_index]; + + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xc0, 0x00, clk_info->param1); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xc1, 0x00, clk_info->param2); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xbb, 0x0f, + (clk_info->param3 & 0x80) | ((clk_info->param3 & 0x3) << 4)); +} + +static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode, + struct ast_vbios_mode_info *vbios_mode) +{ + struct ast_private *ast = crtc->dev->dev_private; + u8 jregA0 = 0, jregA3 = 0, jregA8 = 0; + + switch (crtc->primary->fb->bits_per_pixel) { + case 8: + jregA0 = 0x70; + jregA3 = 0x01; + jregA8 = 0x00; + break; + case 15: + case 16: + jregA0 = 0x70; + jregA3 = 0x04; + jregA8 = 0x02; + break; + case 32: + jregA0 = 0x70; + jregA3 = 0x08; + jregA8 = 0x02; + break; + } + + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa0, 0x8f, jregA0); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xf0, jregA3); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa8, 0xfd, jregA8); + + /* Set Threshold */ + if (ast->chip == AST2300 || ast->chip == AST2400) { + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x78); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x60); + } else if (ast->chip == AST2100 || + ast->chip == AST1100 || + ast->chip == AST2200 || + ast->chip == AST2150) { + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x3f); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x2f); + } else { + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x2f); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x1f); + } +} + +static void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mode, + struct ast_vbios_mode_info *vbios_mode) +{ + struct ast_private *ast = dev->dev_private; + u8 jreg; + + jreg = ast_io_read8(ast, AST_IO_MISC_PORT_READ); + jreg &= ~0xC0; + if (vbios_mode->enh_table->flags & NVSync) jreg |= 0x80; + if (vbios_mode->enh_table->flags & NHSync) jreg |= 0x40; + ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg); +} + +static bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode, + struct ast_vbios_mode_info *vbios_mode) +{ + switch (crtc->primary->fb->bits_per_pixel) { + case 8: + break; + default: + return false; + } + return true; +} + +static void ast_set_start_address_crt1(struct drm_crtc *crtc, unsigned offset) +{ + struct ast_private *ast = crtc->dev->dev_private; + u32 addr; + + addr = offset >> 2; + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x0d, (u8)(addr & 0xff)); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x0c, (u8)((addr >> 8) & 0xff)); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xaf, (u8)((addr >> 16) & 0xff)); + +} + +static void ast_crtc_dpms(struct drm_crtc *crtc, int mode) +{ + struct ast_private *ast = crtc->dev->dev_private; + + if (ast->chip == AST1180) + return; + + switch (mode) { + case DRM_MODE_DPMS_ON: + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0); + if (ast->tx_chip_type == AST_TX_DP501) + ast_set_dp501_video_output(crtc->dev, 1); + ast_crtc_load_lut(crtc); + break; + case DRM_MODE_DPMS_OFF: + if (ast->tx_chip_type == AST_TX_DP501) + ast_set_dp501_video_output(crtc->dev, 0); + ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x20); + break; + } +} + +static bool ast_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +/* ast is different - we will force move buffers out of VRAM */ +static int ast_crtc_do_set_base(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, int atomic) +{ + struct ast_private *ast = crtc->dev->dev_private; + struct drm_gem_object *obj; + struct ast_framebuffer *ast_fb; + struct ast_bo *bo; + int ret; + u64 gpu_addr; + + /* push the previous fb to system ram */ + if (!atomic && fb) { + ast_fb = to_ast_framebuffer(fb); + obj = ast_fb->obj; + bo = gem_to_ast_bo(obj); + ret = ast_bo_reserve(bo, false); + if (ret) + return ret; + ast_bo_push_sysram(bo); + ast_bo_unreserve(bo); + } + + ast_fb = to_ast_framebuffer(crtc->primary->fb); + obj = ast_fb->obj; + bo = gem_to_ast_bo(obj); + + ret = ast_bo_reserve(bo, false); + if (ret) + return ret; + + ret = ast_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr); + if (ret) { + ast_bo_unreserve(bo); + return ret; + } + + if (&ast->fbdev->afb == ast_fb) { + /* if pushing console in kmap it */ + ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); + if (ret) + DRM_ERROR("failed to kmap fbcon\n"); + } + ast_bo_unreserve(bo); + + ast_set_start_address_crt1(crtc, (u32)gpu_addr); + + return 0; +} + +static int ast_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + return ast_crtc_do_set_base(crtc, old_fb, x, y, 0); +} + +static int ast_crtc_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + int x, int y, + struct drm_framebuffer *old_fb) +{ + struct drm_device *dev = crtc->dev; + struct ast_private *ast = crtc->dev->dev_private; + struct ast_vbios_mode_info vbios_mode; + bool ret; + if (ast->chip == AST1180) { + DRM_ERROR("AST 1180 modesetting not supported\n"); + return -EINVAL; + } + + ret = ast_get_vbios_mode_info(crtc, mode, adjusted_mode, &vbios_mode); + if (ret == false) + return -EINVAL; + ast_open_key(ast); + + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04); + + ast_set_std_reg(crtc, adjusted_mode, &vbios_mode); + ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode); + ast_set_offset_reg(crtc); + ast_set_dclk_reg(dev, adjusted_mode, &vbios_mode); + ast_set_ext_reg(crtc, adjusted_mode, &vbios_mode); + ast_set_sync_reg(dev, adjusted_mode, &vbios_mode); + ast_set_dac_reg(crtc, adjusted_mode, &vbios_mode); + + ast_crtc_mode_set_base(crtc, x, y, old_fb); + + return 0; +} + +static void ast_crtc_disable(struct drm_crtc *crtc) +{ + +} + +static void ast_crtc_prepare(struct drm_crtc *crtc) +{ + +} + +static void ast_crtc_commit(struct drm_crtc *crtc) +{ + struct ast_private *ast = crtc->dev->dev_private; + ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0); +} + + +static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = { + .dpms = ast_crtc_dpms, + .mode_fixup = ast_crtc_mode_fixup, + .mode_set = ast_crtc_mode_set, + .mode_set_base = ast_crtc_mode_set_base, + .disable = ast_crtc_disable, + .load_lut = ast_crtc_load_lut, + .prepare = ast_crtc_prepare, + .commit = ast_crtc_commit, + +}; + +static void ast_crtc_reset(struct drm_crtc *crtc) +{ + +} + +static void ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t start, uint32_t size) +{ + struct ast_crtc *ast_crtc = to_ast_crtc(crtc); + int end = (start + size > 256) ? 256 : start + size, i; + + /* userspace palettes are always correct as is */ + for (i = start; i < end; i++) { + ast_crtc->lut_r[i] = red[i] >> 8; + ast_crtc->lut_g[i] = green[i] >> 8; + ast_crtc->lut_b[i] = blue[i] >> 8; + } + ast_crtc_load_lut(crtc); +} + + +static void ast_crtc_destroy(struct drm_crtc *crtc) +{ + drm_crtc_cleanup(crtc); + kfree(crtc); +} + +static const struct drm_crtc_funcs ast_crtc_funcs = { + .cursor_set = ast_cursor_set, + .cursor_move = ast_cursor_move, + .reset = ast_crtc_reset, + .set_config = drm_crtc_helper_set_config, + .gamma_set = ast_crtc_gamma_set, + .destroy = ast_crtc_destroy, +}; + +static int ast_crtc_init(struct drm_device *dev) +{ + struct ast_crtc *crtc; + int i; + + crtc = kzalloc(sizeof(struct ast_crtc), GFP_KERNEL); + if (!crtc) + return -ENOMEM; + + drm_crtc_init(dev, &crtc->base, &ast_crtc_funcs); + drm_mode_crtc_set_gamma_size(&crtc->base, 256); + drm_crtc_helper_add(&crtc->base, &ast_crtc_helper_funcs); + + for (i = 0; i < 256; i++) { + crtc->lut_r[i] = i; + crtc->lut_g[i] = i; + crtc->lut_b[i] = i; + } + return 0; +} + +static void ast_encoder_destroy(struct drm_encoder *encoder) +{ + drm_encoder_cleanup(encoder); + kfree(encoder); +} + + +static struct drm_encoder *ast_best_single_encoder(struct drm_connector *connector) +{ + int enc_id = connector->encoder_ids[0]; + /* pick the encoder ids */ + if (enc_id) + return drm_encoder_find(connector->dev, enc_id); + return NULL; +} + + +static const struct drm_encoder_funcs ast_enc_funcs = { + .destroy = ast_encoder_destroy, +}; + +static void ast_encoder_dpms(struct drm_encoder *encoder, int mode) +{ + +} + +static bool ast_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static void ast_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ +} + +static void ast_encoder_prepare(struct drm_encoder *encoder) +{ + +} + +static void ast_encoder_commit(struct drm_encoder *encoder) +{ + +} + + +static const struct drm_encoder_helper_funcs ast_enc_helper_funcs = { + .dpms = ast_encoder_dpms, + .mode_fixup = ast_mode_fixup, + .prepare = ast_encoder_prepare, + .commit = ast_encoder_commit, + .mode_set = ast_encoder_mode_set, +}; + +static int ast_encoder_init(struct drm_device *dev) +{ + struct ast_encoder *ast_encoder; + + ast_encoder = kzalloc(sizeof(struct ast_encoder), GFP_KERNEL); + if (!ast_encoder) + return -ENOMEM; + + drm_encoder_init(dev, &ast_encoder->base, &ast_enc_funcs, + DRM_MODE_ENCODER_DAC); + drm_encoder_helper_add(&ast_encoder->base, &ast_enc_helper_funcs); + + ast_encoder->base.possible_crtcs = 1; + return 0; +} + +static int ast_get_modes(struct drm_connector *connector) +{ + struct ast_connector *ast_connector = to_ast_connector(connector); + struct ast_private *ast = connector->dev->dev_private; + struct edid *edid; + int ret; + bool flags = false; + if (ast->tx_chip_type == AST_TX_DP501) { + ast->dp501_maxclk = 0xff; + edid = kmalloc(128, GFP_KERNEL); + if (!edid) + return -ENOMEM; + + flags = ast_dp501_read_edid(connector->dev, (u8 *)edid); + if (flags) + ast->dp501_maxclk = ast_get_dp501_max_clk(connector->dev); + else + kfree(edid); + } + if (!flags) + edid = drm_get_edid(connector, &ast_connector->i2c->adapter); + if (edid) { + drm_mode_connector_update_edid_property(&ast_connector->base, edid); + ret = drm_add_edid_modes(connector, edid); + kfree(edid); + return ret; + } else + drm_mode_connector_update_edid_property(&ast_connector->base, NULL); + return 0; +} + +static int ast_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct ast_private *ast = connector->dev->dev_private; + int flags = MODE_NOMODE; + uint32_t jtemp; + + if (ast->support_wide_screen) { + if ((mode->hdisplay == 1680) && (mode->vdisplay == 1050)) + return MODE_OK; + if ((mode->hdisplay == 1280) && (mode->vdisplay == 800)) + return MODE_OK; + if ((mode->hdisplay == 1440) && (mode->vdisplay == 900)) + return MODE_OK; + if ((mode->hdisplay == 1360) && (mode->vdisplay == 768)) + return MODE_OK; + if ((mode->hdisplay == 1600) && (mode->vdisplay == 900)) + return MODE_OK; + + if ((ast->chip == AST2100) || (ast->chip == AST2200) || (ast->chip == AST2300) || (ast->chip == AST2400) || (ast->chip == AST1180)) { + if ((mode->hdisplay == 1920) && (mode->vdisplay == 1080)) + return MODE_OK; + + if ((mode->hdisplay == 1920) && (mode->vdisplay == 1200)) { + jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff); + if (jtemp & 0x01) + return MODE_NOMODE; + else + return MODE_OK; + } + } + } + switch (mode->hdisplay) { + case 640: + if (mode->vdisplay == 480) flags = MODE_OK; + break; + case 800: + if (mode->vdisplay == 600) flags = MODE_OK; + break; + case 1024: + if (mode->vdisplay == 768) flags = MODE_OK; + break; + case 1280: + if (mode->vdisplay == 1024) flags = MODE_OK; + break; + case 1600: + if (mode->vdisplay == 1200) flags = MODE_OK; + break; + default: + return flags; + } + + return flags; +} + +static void ast_connector_destroy(struct drm_connector *connector) +{ + struct ast_connector *ast_connector = to_ast_connector(connector); + ast_i2c_destroy(ast_connector->i2c); + drm_connector_unregister(connector); + drm_connector_cleanup(connector); + kfree(connector); +} + +static enum drm_connector_status +ast_connector_detect(struct drm_connector *connector, bool force) +{ + return connector_status_connected; +} + +static const struct drm_connector_helper_funcs ast_connector_helper_funcs = { + .mode_valid = ast_mode_valid, + .get_modes = ast_get_modes, + .best_encoder = ast_best_single_encoder, +}; + +static const struct drm_connector_funcs ast_connector_funcs = { + .dpms = drm_helper_connector_dpms, + .detect = ast_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = ast_connector_destroy, +}; + +static int ast_connector_init(struct drm_device *dev) +{ + struct ast_connector *ast_connector; + struct drm_connector *connector; + struct drm_encoder *encoder; + + ast_connector = kzalloc(sizeof(struct ast_connector), GFP_KERNEL); + if (!ast_connector) + return -ENOMEM; + + connector = &ast_connector->base; + drm_connector_init(dev, connector, &ast_connector_funcs, DRM_MODE_CONNECTOR_VGA); + + drm_connector_helper_add(connector, &ast_connector_helper_funcs); + + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + + drm_connector_register(connector); + + connector->polled = DRM_CONNECTOR_POLL_CONNECT; + + encoder = list_first_entry(&dev->mode_config.encoder_list, struct drm_encoder, head); + drm_mode_connector_attach_encoder(connector, encoder); + + ast_connector->i2c = ast_i2c_create(dev); + if (!ast_connector->i2c) + DRM_ERROR("failed to add ddc bus for connector\n"); + + return 0; +} + +/* allocate cursor cache and pin at start of VRAM */ +static int ast_cursor_init(struct drm_device *dev) +{ + struct ast_private *ast = dev->dev_private; + int size; + int ret; + struct drm_gem_object *obj; + struct ast_bo *bo; + uint64_t gpu_addr; + + size = (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE) * AST_DEFAULT_HWC_NUM; + + ret = ast_gem_create(dev, size, true, &obj); + if (ret) + return ret; + bo = gem_to_ast_bo(obj); + ret = ast_bo_reserve(bo, false); + if (unlikely(ret != 0)) + goto fail; + + ret = ast_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr); + ast_bo_unreserve(bo); + if (ret) + goto fail; + + /* kmap the object */ + ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &ast->cache_kmap); + if (ret) + goto fail; + + ast->cursor_cache = obj; + ast->cursor_cache_gpu_addr = gpu_addr; + DRM_DEBUG_KMS("pinned cursor cache at %llx\n", ast->cursor_cache_gpu_addr); + return 0; +fail: + return ret; +} + +static void ast_cursor_fini(struct drm_device *dev) +{ + struct ast_private *ast = dev->dev_private; + ttm_bo_kunmap(&ast->cache_kmap); + drm_gem_object_unreference_unlocked(ast->cursor_cache); +} + +int ast_mode_init(struct drm_device *dev) +{ + ast_cursor_init(dev); + ast_crtc_init(dev); + ast_encoder_init(dev); + ast_connector_init(dev); + return 0; +} + +void ast_mode_fini(struct drm_device *dev) +{ + ast_cursor_fini(dev); +} + +static int get_clock(void *i2c_priv) +{ + struct ast_i2c_chan *i2c = i2c_priv; + struct ast_private *ast = i2c->dev->dev_private; + uint32_t val; + + val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4; + return val & 1 ? 1 : 0; +} + +static int get_data(void *i2c_priv) +{ + struct ast_i2c_chan *i2c = i2c_priv; + struct ast_private *ast = i2c->dev->dev_private; + uint32_t val; + + val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5; + return val & 1 ? 1 : 0; +} + +static void set_clock(void *i2c_priv, int clock) +{ + struct ast_i2c_chan *i2c = i2c_priv; + struct ast_private *ast = i2c->dev->dev_private; + int i; + u8 ujcrb7, jtemp; + + for (i = 0; i < 0x10000; i++) { + ujcrb7 = ((clock & 0x01) ? 0 : 1); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfe, ujcrb7); + jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01); + if (ujcrb7 == jtemp) + break; + } +} + +static void set_data(void *i2c_priv, int data) +{ + struct ast_i2c_chan *i2c = i2c_priv; + struct ast_private *ast = i2c->dev->dev_private; + int i; + u8 ujcrb7, jtemp; + + for (i = 0; i < 0x10000; i++) { + ujcrb7 = ((data & 0x01) ? 0 : 1) << 2; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfb, ujcrb7); + jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04); + if (ujcrb7 == jtemp) + break; + } +} + +static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev) +{ + struct ast_i2c_chan *i2c; + int ret; + + i2c = kzalloc(sizeof(struct ast_i2c_chan), GFP_KERNEL); + if (!i2c) + return NULL; + + i2c->adapter.owner = THIS_MODULE; + i2c->adapter.class = I2C_CLASS_DDC; + i2c->adapter.dev.parent = &dev->pdev->dev; + i2c->dev = dev; + i2c_set_adapdata(&i2c->adapter, i2c); + snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), + "AST i2c bit bus"); + i2c->adapter.algo_data = &i2c->bit; + + i2c->bit.udelay = 20; + i2c->bit.timeout = 2; + i2c->bit.data = i2c; + i2c->bit.setsda = set_data; + i2c->bit.setscl = set_clock; + i2c->bit.getsda = get_data; + i2c->bit.getscl = get_clock; + ret = i2c_bit_add_bus(&i2c->adapter); + if (ret) { + DRM_ERROR("Failed to register bit i2c\n"); + goto out_free; + } + + return i2c; +out_free: + kfree(i2c); + return NULL; +} + +static void ast_i2c_destroy(struct ast_i2c_chan *i2c) +{ + if (!i2c) + return; + i2c_del_adapter(&i2c->adapter); + kfree(i2c); +} + +static void ast_show_cursor(struct drm_crtc *crtc) +{ + struct ast_private *ast = crtc->dev->dev_private; + u8 jreg; + + jreg = 0x2; + /* enable ARGB cursor */ + jreg |= 1; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg); +} + +static void ast_hide_cursor(struct drm_crtc *crtc) +{ + struct ast_private *ast = crtc->dev->dev_private; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00); +} + +static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height) +{ + union { + u32 ul; + u8 b[4]; + } srcdata32[2], data32; + union { + u16 us; + u8 b[2]; + } data16; + u32 csum = 0; + s32 alpha_dst_delta, last_alpha_dst_delta; + u8 *srcxor, *dstxor; + int i, j; + u32 per_pixel_copy, two_pixel_copy; + + alpha_dst_delta = AST_MAX_HWC_WIDTH << 1; + last_alpha_dst_delta = alpha_dst_delta - (width << 1); + + srcxor = src; + dstxor = (u8 *)dst + last_alpha_dst_delta + (AST_MAX_HWC_HEIGHT - height) * alpha_dst_delta; + per_pixel_copy = width & 1; + two_pixel_copy = width >> 1; + + for (j = 0; j < height; j++) { + for (i = 0; i < two_pixel_copy; i++) { + srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0; + srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0; + data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4); + data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4); + data32.b[2] = srcdata32[1].b[1] | (srcdata32[1].b[0] >> 4); + data32.b[3] = srcdata32[1].b[3] | (srcdata32[1].b[2] >> 4); + + writel(data32.ul, dstxor); + csum += data32.ul; + + dstxor += 4; + srcxor += 8; + + } + + for (i = 0; i < per_pixel_copy; i++) { + srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0; + data16.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4); + data16.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4); + writew(data16.us, dstxor); + csum += (u32)data16.us; + + dstxor += 2; + srcxor += 4; + } + dstxor += last_alpha_dst_delta; + } + return csum; +} + +static int ast_cursor_set(struct drm_crtc *crtc, + struct drm_file *file_priv, + uint32_t handle, + uint32_t width, + uint32_t height) +{ + struct ast_private *ast = crtc->dev->dev_private; + struct ast_crtc *ast_crtc = to_ast_crtc(crtc); + struct drm_gem_object *obj; + struct ast_bo *bo; + uint64_t gpu_addr; + u32 csum; + int ret; + struct ttm_bo_kmap_obj uobj_map; + u8 *src, *dst; + bool src_isiomem, dst_isiomem; + if (!handle) { + ast_hide_cursor(crtc); + return 0; + } + + if (width > AST_MAX_HWC_WIDTH || height > AST_MAX_HWC_HEIGHT) + return -EINVAL; + + obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); + if (!obj) { + DRM_ERROR("Cannot find cursor object %x for crtc\n", handle); + return -ENOENT; + } + bo = gem_to_ast_bo(obj); + + ret = ast_bo_reserve(bo, false); + if (ret) + goto fail; + + ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &uobj_map); + + src = ttm_kmap_obj_virtual(&uobj_map, &src_isiomem); + dst = ttm_kmap_obj_virtual(&ast->cache_kmap, &dst_isiomem); + + if (src_isiomem == true) + DRM_ERROR("src cursor bo should be in main memory\n"); + if (dst_isiomem == false) + DRM_ERROR("dst bo should be in VRAM\n"); + + dst += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor; + + /* do data transfer to cursor cache */ + csum = copy_cursor_image(src, dst, width, height); + + /* write checksum + signature */ + ttm_bo_kunmap(&uobj_map); + ast_bo_unreserve(bo); + { + u8 *dst = (u8 *)ast->cache_kmap.virtual + (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE; + writel(csum, dst); + writel(width, dst + AST_HWC_SIGNATURE_SizeX); + writel(height, dst + AST_HWC_SIGNATURE_SizeY); + writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX); + writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY); + + /* set pattern offset */ + gpu_addr = ast->cursor_cache_gpu_addr; + gpu_addr += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor; + gpu_addr >>= 3; + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, gpu_addr & 0xff); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, (gpu_addr >> 8) & 0xff); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, (gpu_addr >> 16) & 0xff); + } + ast_crtc->cursor_width = width; + ast_crtc->cursor_height = height; + ast_crtc->offset_x = AST_MAX_HWC_WIDTH - width; + ast_crtc->offset_y = AST_MAX_HWC_WIDTH - height; + + ast->next_cursor = (ast->next_cursor + 1) % AST_DEFAULT_HWC_NUM; + + ast_show_cursor(crtc); + + drm_gem_object_unreference_unlocked(obj); + return 0; +fail: + drm_gem_object_unreference_unlocked(obj); + return ret; +} + +static int ast_cursor_move(struct drm_crtc *crtc, + int x, int y) +{ + struct ast_crtc *ast_crtc = to_ast_crtc(crtc); + struct ast_private *ast = crtc->dev->dev_private; + int x_offset, y_offset; + u8 *sig; + + sig = (u8 *)ast->cache_kmap.virtual + (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE; + writel(x, sig + AST_HWC_SIGNATURE_X); + writel(y, sig + AST_HWC_SIGNATURE_Y); + + x_offset = ast_crtc->offset_x; + y_offset = ast_crtc->offset_y; + if (x < 0) { + x_offset = (-x) + ast_crtc->offset_x; + x = 0; + } + + if (y < 0) { + y_offset = (-y) + ast_crtc->offset_y; + y = 0; + } + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc2, x_offset); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc3, y_offset); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc4, (x & 0xff)); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc5, ((x >> 8) & 0x0f)); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc6, (y & 0xff)); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07)); + + /* dummy write to fire HWC */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xCB, 0xFF, 0x00); + + return 0; +} diff --git a/kernel/drivers/gpu/drm/ast/ast_post.c b/kernel/drivers/gpu/drm/ast/ast_post.c new file mode 100644 index 000000000..810c51d92 --- /dev/null +++ b/kernel/drivers/gpu/drm/ast/ast_post.c @@ -0,0 +1,1657 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ +/* + * Authors: Dave Airlie + */ + +#include +#include "ast_drv.h" + +#include "ast_dram_tables.h" + +static void ast_init_dram_2300(struct drm_device *dev); + +void ast_enable_vga(struct drm_device *dev) +{ + struct ast_private *ast = dev->dev_private; + + ast_io_write8(ast, AST_IO_VGA_ENABLE_PORT, 0x01); + ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, 0x01); +} + +void ast_enable_mmio(struct drm_device *dev) +{ + struct ast_private *ast = dev->dev_private; + + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04); +} + + +bool ast_is_vga_enabled(struct drm_device *dev) +{ + struct ast_private *ast = dev->dev_private; + u8 ch; + + if (ast->chip == AST1180) { + /* TODO 1180 */ + } else { + ch = ast_io_read8(ast, AST_IO_VGA_ENABLE_PORT); + if (ch) { + ast_open_key(ast); + ch = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff); + return ch & 0x04; + } + } + return 0; +} + +static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff }; +static const u8 extreginfo_ast2300a0[] = { 0x0f, 0x04, 0x1c, 0xff }; +static const u8 extreginfo_ast2300[] = { 0x0f, 0x04, 0x1f, 0xff }; + +static void +ast_set_def_ext_reg(struct drm_device *dev) +{ + struct ast_private *ast = dev->dev_private; + u8 i, index, reg; + const u8 *ext_reg_info; + + /* reset scratch */ + for (i = 0x81; i <= 0x8f; i++) + ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, 0x00); + + if (ast->chip == AST2300 || ast->chip == AST2400) { + if (dev->pdev->revision >= 0x20) + ext_reg_info = extreginfo_ast2300; + else + ext_reg_info = extreginfo_ast2300a0; + } else + ext_reg_info = extreginfo; + + index = 0xa0; + while (*ext_reg_info != 0xff) { + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, index, 0x00, *ext_reg_info); + index++; + ext_reg_info++; + } + + /* disable standard IO/MEM decode if secondary */ + /* ast_set_index_reg-mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x3); */ + + /* Set Ext. Default */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x8c, 0x00, 0x01); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x00, 0x00); + + /* Enable RAMDAC for A1 */ + reg = 0x04; + if (ast->chip == AST2300 || ast->chip == AST2400) + reg |= 0x20; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff, reg); +} + +u32 ast_mindwm(struct ast_private *ast, u32 r) +{ + uint32_t data; + + ast_write32(ast, 0xf004, r & 0xffff0000); + ast_write32(ast, 0xf000, 0x1); + + do { + data = ast_read32(ast, 0xf004) & 0xffff0000; + } while (data != (r & 0xffff0000)); + return ast_read32(ast, 0x10000 + (r & 0x0000ffff)); +} + +void ast_moutdwm(struct ast_private *ast, u32 r, u32 v) +{ + uint32_t data; + ast_write32(ast, 0xf004, r & 0xffff0000); + ast_write32(ast, 0xf000, 0x1); + do { + data = ast_read32(ast, 0xf004) & 0xffff0000; + } while (data != (r & 0xffff0000)); + ast_write32(ast, 0x10000 + (r & 0x0000ffff), v); +} + +/* + * AST2100/2150 DLL CBR Setting + */ +#define CBR_SIZE_AST2150 ((16 << 10) - 1) +#define CBR_PASSNUM_AST2150 5 +#define CBR_THRESHOLD_AST2150 10 +#define CBR_THRESHOLD2_AST2150 10 +#define TIMEOUT_AST2150 5000000 + +#define CBR_PATNUM_AST2150 8 + +static const u32 pattern_AST2150[14] = { + 0xFF00FF00, + 0xCC33CC33, + 0xAA55AA55, + 0xFFFE0001, + 0x683501FE, + 0x0F1929B0, + 0x2D0B4346, + 0x60767F02, + 0x6FBE36A6, + 0x3A253035, + 0x3019686D, + 0x41C6167E, + 0x620152BF, + 0x20F050E0 +}; + +static u32 mmctestburst2_ast2150(struct ast_private *ast, u32 datagen) +{ + u32 data, timeout; + + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + ast_moutdwm(ast, 0x1e6e0070, 0x00000001 | (datagen << 3)); + timeout = 0; + do { + data = ast_mindwm(ast, 0x1e6e0070) & 0x40; + if (++timeout > TIMEOUT_AST2150) { + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + return 0xffffffff; + } + } while (!data); + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + ast_moutdwm(ast, 0x1e6e0070, 0x00000003 | (datagen << 3)); + timeout = 0; + do { + data = ast_mindwm(ast, 0x1e6e0070) & 0x40; + if (++timeout > TIMEOUT_AST2150) { + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + return 0xffffffff; + } + } while (!data); + data = (ast_mindwm(ast, 0x1e6e0070) & 0x80) >> 7; + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + return data; +} + +#if 0 /* unused in DDX driver - here for completeness */ +static u32 mmctestsingle2_ast2150(struct ast_private *ast, u32 datagen) +{ + u32 data, timeout; + + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + ast_moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3)); + timeout = 0; + do { + data = ast_mindwm(ast, 0x1e6e0070) & 0x40; + if (++timeout > TIMEOUT_AST2150) { + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + return 0xffffffff; + } + } while (!data); + data = (ast_mindwm(ast, 0x1e6e0070) & 0x80) >> 7; + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + return data; +} +#endif + +static int cbrtest_ast2150(struct ast_private *ast) +{ + int i; + + for (i = 0; i < 8; i++) + if (mmctestburst2_ast2150(ast, i)) + return 0; + return 1; +} + +static int cbrscan_ast2150(struct ast_private *ast, int busw) +{ + u32 patcnt, loop; + + for (patcnt = 0; patcnt < CBR_PATNUM_AST2150; patcnt++) { + ast_moutdwm(ast, 0x1e6e007c, pattern_AST2150[patcnt]); + for (loop = 0; loop < CBR_PASSNUM_AST2150; loop++) { + if (cbrtest_ast2150(ast)) + break; + } + if (loop == CBR_PASSNUM_AST2150) + return 0; + } + return 1; +} + + +static void cbrdlli_ast2150(struct ast_private *ast, int busw) +{ + u32 dll_min[4], dll_max[4], dlli, data, passcnt; + +cbr_start: + dll_min[0] = dll_min[1] = dll_min[2] = dll_min[3] = 0xff; + dll_max[0] = dll_max[1] = dll_max[2] = dll_max[3] = 0x0; + passcnt = 0; + + for (dlli = 0; dlli < 100; dlli++) { + ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24)); + data = cbrscan_ast2150(ast, busw); + if (data != 0) { + if (data & 0x1) { + if (dll_min[0] > dlli) + dll_min[0] = dlli; + if (dll_max[0] < dlli) + dll_max[0] = dlli; + } + passcnt++; + } else if (passcnt >= CBR_THRESHOLD_AST2150) + goto cbr_start; + } + if (dll_max[0] == 0 || (dll_max[0]-dll_min[0]) < CBR_THRESHOLD_AST2150) + goto cbr_start; + + dlli = dll_min[0] + (((dll_max[0] - dll_min[0]) * 7) >> 4); + ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24)); +} + + + +static void ast_init_dram_reg(struct drm_device *dev) +{ + struct ast_private *ast = dev->dev_private; + u8 j; + u32 data, temp, i; + const struct ast_dramstruct *dram_reg_info; + + j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + + if ((j & 0x80) == 0) { /* VGA only */ + if (ast->chip == AST2000) { + dram_reg_info = ast2000_dram_table_data; + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + ast_write32(ast, 0x10100, 0xa8); + + do { + ; + } while (ast_read32(ast, 0x10100) != 0xa8); + } else {/* AST2100/1100 */ + if (ast->chip == AST2100 || ast->chip == 2200) + dram_reg_info = ast2100_dram_table_data; + else + dram_reg_info = ast1100_dram_table_data; + + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + ast_write32(ast, 0x12000, 0x1688A8A8); + do { + ; + } while (ast_read32(ast, 0x12000) != 0x01); + + ast_write32(ast, 0x10000, 0xfc600309); + do { + ; + } while (ast_read32(ast, 0x10000) != 0x01); + } + + while (dram_reg_info->index != 0xffff) { + if (dram_reg_info->index == 0xff00) {/* delay fn */ + for (i = 0; i < 15; i++) + udelay(dram_reg_info->data); + } else if (dram_reg_info->index == 0x4 && ast->chip != AST2000) { + data = dram_reg_info->data; + if (ast->dram_type == AST_DRAM_1Gx16) + data = 0x00000d89; + else if (ast->dram_type == AST_DRAM_1Gx32) + data = 0x00000c8d; + + temp = ast_read32(ast, 0x12070); + temp &= 0xc; + temp <<= 2; + ast_write32(ast, 0x10000 + dram_reg_info->index, data | temp); + } else + ast_write32(ast, 0x10000 + dram_reg_info->index, dram_reg_info->data); + dram_reg_info++; + } + + /* AST 2100/2150 DRAM calibration */ + data = ast_read32(ast, 0x10120); + if (data == 0x5061) { /* 266Mhz */ + data = ast_read32(ast, 0x10004); + if (data & 0x40) + cbrdlli_ast2150(ast, 16); /* 16 bits */ + else + cbrdlli_ast2150(ast, 32); /* 32 bits */ + } + + switch (ast->chip) { + case AST2000: + temp = ast_read32(ast, 0x10140); + ast_write32(ast, 0x10140, temp | 0x40); + break; + case AST1100: + case AST2100: + case AST2200: + case AST2150: + temp = ast_read32(ast, 0x1200c); + ast_write32(ast, 0x1200c, temp & 0xfffffffd); + temp = ast_read32(ast, 0x12040); + ast_write32(ast, 0x12040, temp | 0x40); + break; + default: + break; + } + } + + /* wait ready */ + do { + j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + } while ((j & 0x40) == 0); +} + +void ast_post_gpu(struct drm_device *dev) +{ + u32 reg; + struct ast_private *ast = dev->dev_private; + + pci_read_config_dword(ast->dev->pdev, 0x04, ®); + reg |= 0x3; + pci_write_config_dword(ast->dev->pdev, 0x04, reg); + + ast_enable_vga(dev); + ast_enable_mmio(dev); + ast_open_key(ast); + ast_set_def_ext_reg(dev); + + if (ast->chip == AST2300 || ast->chip == AST2400) + ast_init_dram_2300(dev); + else + ast_init_dram_reg(dev); + + ast_init_3rdtx(dev); +} + +/* AST 2300 DRAM settings */ +#define AST_DDR3 0 +#define AST_DDR2 1 + +struct ast2300_dram_param { + u32 dram_type; + u32 dram_chipid; + u32 dram_freq; + u32 vram_size; + u32 odt; + u32 wodt; + u32 rodt; + u32 dram_config; + u32 reg_PERIOD; + u32 reg_MADJ; + u32 reg_SADJ; + u32 reg_MRS; + u32 reg_EMRS; + u32 reg_AC1; + u32 reg_AC2; + u32 reg_DQSIC; + u32 reg_DRV; + u32 reg_IOZ; + u32 reg_DQIDLY; + u32 reg_FREQ; + u32 madj_max; + u32 dll2_finetune_step; +}; + +/* + * DQSI DLL CBR Setting + */ +#define CBR_SIZE0 ((1 << 10) - 1) +#define CBR_SIZE1 ((4 << 10) - 1) +#define CBR_SIZE2 ((64 << 10) - 1) +#define CBR_PASSNUM 5 +#define CBR_PASSNUM2 5 +#define CBR_THRESHOLD 10 +#define CBR_THRESHOLD2 10 +#define TIMEOUT 5000000 +#define CBR_PATNUM 8 + +static const u32 pattern[8] = { + 0xFF00FF00, + 0xCC33CC33, + 0xAA55AA55, + 0x88778877, + 0x92CC4D6E, + 0x543D3CDE, + 0xF1E843C7, + 0x7C61D253 +}; + +static int mmc_test_burst(struct ast_private *ast, u32 datagen) +{ + u32 data, timeout; + + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + ast_moutdwm(ast, 0x1e6e0070, 0x000000c1 | (datagen << 3)); + timeout = 0; + do { + data = ast_mindwm(ast, 0x1e6e0070) & 0x3000; + if (data & 0x2000) { + return 0; + } + if (++timeout > TIMEOUT) { + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + return 0; + } + } while (!data); + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + return 1; +} + +static int mmc_test_burst2(struct ast_private *ast, u32 datagen) +{ + u32 data, timeout; + + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + ast_moutdwm(ast, 0x1e6e0070, 0x00000041 | (datagen << 3)); + timeout = 0; + do { + data = ast_mindwm(ast, 0x1e6e0070) & 0x1000; + if (++timeout > TIMEOUT) { + ast_moutdwm(ast, 0x1e6e0070, 0x0); + return -1; + } + } while (!data); + data = ast_mindwm(ast, 0x1e6e0078); + data = (data | (data >> 16)) & 0xffff; + ast_moutdwm(ast, 0x1e6e0070, 0x0); + return data; +} + +static int mmc_test_single(struct ast_private *ast, u32 datagen) +{ + u32 data, timeout; + + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + ast_moutdwm(ast, 0x1e6e0070, 0x000000c5 | (datagen << 3)); + timeout = 0; + do { + data = ast_mindwm(ast, 0x1e6e0070) & 0x3000; + if (data & 0x2000) + return 0; + if (++timeout > TIMEOUT) { + ast_moutdwm(ast, 0x1e6e0070, 0x0); + return 0; + } + } while (!data); + ast_moutdwm(ast, 0x1e6e0070, 0x0); + return 1; +} + +static int mmc_test_single2(struct ast_private *ast, u32 datagen) +{ + u32 data, timeout; + + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + ast_moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3)); + timeout = 0; + do { + data = ast_mindwm(ast, 0x1e6e0070) & 0x1000; + if (++timeout > TIMEOUT) { + ast_moutdwm(ast, 0x1e6e0070, 0x0); + return -1; + } + } while (!data); + data = ast_mindwm(ast, 0x1e6e0078); + data = (data | (data >> 16)) & 0xffff; + ast_moutdwm(ast, 0x1e6e0070, 0x0); + return data; +} + +static int cbr_test(struct ast_private *ast) +{ + u32 data; + int i; + data = mmc_test_single2(ast, 0); + if ((data & 0xff) && (data & 0xff00)) + return 0; + for (i = 0; i < 8; i++) { + data = mmc_test_burst2(ast, i); + if ((data & 0xff) && (data & 0xff00)) + return 0; + } + if (!data) + return 3; + else if (data & 0xff) + return 2; + return 1; +} + +static int cbr_scan(struct ast_private *ast) +{ + u32 data, data2, patcnt, loop; + + data2 = 3; + for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) { + ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]); + for (loop = 0; loop < CBR_PASSNUM2; loop++) { + if ((data = cbr_test(ast)) != 0) { + data2 &= data; + if (!data2) + return 0; + break; + } + } + if (loop == CBR_PASSNUM2) + return 0; + } + return data2; +} + +static u32 cbr_test2(struct ast_private *ast) +{ + u32 data; + + data = mmc_test_burst2(ast, 0); + if (data == 0xffff) + return 0; + data |= mmc_test_single2(ast, 0); + if (data == 0xffff) + return 0; + + return ~data & 0xffff; +} + +static u32 cbr_scan2(struct ast_private *ast) +{ + u32 data, data2, patcnt, loop; + + data2 = 0xffff; + for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) { + ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]); + for (loop = 0; loop < CBR_PASSNUM2; loop++) { + if ((data = cbr_test2(ast)) != 0) { + data2 &= data; + if (!data2) + return 0; + break; + } + } + if (loop == CBR_PASSNUM2) + return 0; + } + return data2; +} + +static u32 cbr_test3(struct ast_private *ast) +{ + if (!mmc_test_burst(ast, 0)) + return 0; + if (!mmc_test_single(ast, 0)) + return 0; + return 1; +} + +static u32 cbr_scan3(struct ast_private *ast) +{ + u32 patcnt, loop; + + for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) { + ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]); + for (loop = 0; loop < 2; loop++) { + if (cbr_test3(ast)) + break; + } + if (loop == 2) + return 0; + } + return 1; +} + +static bool finetuneDQI_L(struct ast_private *ast, struct ast2300_dram_param *param) +{ + u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, retry = 0; + bool status = false; +FINETUNE_START: + for (cnt = 0; cnt < 16; cnt++) { + dllmin[cnt] = 0xff; + dllmax[cnt] = 0x0; + } + passcnt = 0; + for (dlli = 0; dlli < 76; dlli++) { + ast_moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24)); + ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE1); + data = cbr_scan2(ast); + if (data != 0) { + mask = 0x00010001; + for (cnt = 0; cnt < 16; cnt++) { + if (data & mask) { + if (dllmin[cnt] > dlli) { + dllmin[cnt] = dlli; + } + if (dllmax[cnt] < dlli) { + dllmax[cnt] = dlli; + } + } + mask <<= 1; + } + passcnt++; + } else if (passcnt >= CBR_THRESHOLD2) { + break; + } + } + gold_sadj[0] = 0x0; + passcnt = 0; + for (cnt = 0; cnt < 16; cnt++) { + if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) { + gold_sadj[0] += dllmin[cnt]; + passcnt++; + } + } + if (retry++ > 10) + goto FINETUNE_DONE; + if (passcnt != 16) { + goto FINETUNE_START; + } + status = true; +FINETUNE_DONE: + gold_sadj[0] = gold_sadj[0] >> 4; + gold_sadj[1] = gold_sadj[0]; + + data = 0; + for (cnt = 0; cnt < 8; cnt++) { + data >>= 3; + if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) { + dlli = dllmin[cnt]; + if (gold_sadj[0] >= dlli) { + dlli = ((gold_sadj[0] - dlli) * 19) >> 5; + if (dlli > 3) { + dlli = 3; + } + } else { + dlli = ((dlli - gold_sadj[0]) * 19) >> 5; + if (dlli > 4) { + dlli = 4; + } + dlli = (8 - dlli) & 0x7; + } + data |= dlli << 21; + } + } + ast_moutdwm(ast, 0x1E6E0080, data); + + data = 0; + for (cnt = 8; cnt < 16; cnt++) { + data >>= 3; + if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) { + dlli = dllmin[cnt]; + if (gold_sadj[1] >= dlli) { + dlli = ((gold_sadj[1] - dlli) * 19) >> 5; + if (dlli > 3) { + dlli = 3; + } else { + dlli = (dlli - 1) & 0x7; + } + } else { + dlli = ((dlli - gold_sadj[1]) * 19) >> 5; + dlli += 1; + if (dlli > 4) { + dlli = 4; + } + dlli = (8 - dlli) & 0x7; + } + data |= dlli << 21; + } + } + ast_moutdwm(ast, 0x1E6E0084, data); + return status; +} /* finetuneDQI_L */ + +static void finetuneDQSI(struct ast_private *ast) +{ + u32 dlli, dqsip, dqidly; + u32 reg_mcr18, reg_mcr0c, passcnt[2], diff; + u32 g_dqidly, g_dqsip, g_margin, g_side; + u16 pass[32][2][2]; + char tag[2][76]; + + /* Disable DQI CBR */ + reg_mcr0c = ast_mindwm(ast, 0x1E6E000C); + reg_mcr18 = ast_mindwm(ast, 0x1E6E0018); + reg_mcr18 &= 0x0000ffff; + ast_moutdwm(ast, 0x1E6E0018, reg_mcr18); + + for (dlli = 0; dlli < 76; dlli++) { + tag[0][dlli] = 0x0; + tag[1][dlli] = 0x0; + } + for (dqidly = 0; dqidly < 32; dqidly++) { + pass[dqidly][0][0] = 0xff; + pass[dqidly][0][1] = 0x0; + pass[dqidly][1][0] = 0xff; + pass[dqidly][1][1] = 0x0; + } + for (dqidly = 0; dqidly < 32; dqidly++) { + passcnt[0] = passcnt[1] = 0; + for (dqsip = 0; dqsip < 2; dqsip++) { + ast_moutdwm(ast, 0x1E6E000C, 0); + ast_moutdwm(ast, 0x1E6E0018, reg_mcr18 | (dqidly << 16) | (dqsip << 23)); + ast_moutdwm(ast, 0x1E6E000C, reg_mcr0c); + for (dlli = 0; dlli < 76; dlli++) { + ast_moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24)); + ast_moutdwm(ast, 0x1E6E0070, 0); + ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE0); + if (cbr_scan3(ast)) { + if (dlli == 0) + break; + passcnt[dqsip]++; + tag[dqsip][dlli] = 'P'; + if (dlli < pass[dqidly][dqsip][0]) + pass[dqidly][dqsip][0] = (u16) dlli; + if (dlli > pass[dqidly][dqsip][1]) + pass[dqidly][dqsip][1] = (u16) dlli; + } else if (passcnt[dqsip] >= 5) + break; + else { + pass[dqidly][dqsip][0] = 0xff; + pass[dqidly][dqsip][1] = 0x0; + } + } + } + if (passcnt[0] == 0 && passcnt[1] == 0) + dqidly++; + } + /* Search margin */ + g_dqidly = g_dqsip = g_margin = g_side = 0; + + for (dqidly = 0; dqidly < 32; dqidly++) { + for (dqsip = 0; dqsip < 2; dqsip++) { + if (pass[dqidly][dqsip][0] > pass[dqidly][dqsip][1]) + continue; + diff = pass[dqidly][dqsip][1] - pass[dqidly][dqsip][0]; + if ((diff+2) < g_margin) + continue; + passcnt[0] = passcnt[1] = 0; + for (dlli = pass[dqidly][dqsip][0]; dlli > 0 && tag[dqsip][dlli] != 0; dlli--, passcnt[0]++); + for (dlli = pass[dqidly][dqsip][1]; dlli < 76 && tag[dqsip][dlli] != 0; dlli++, passcnt[1]++); + if (passcnt[0] > passcnt[1]) + passcnt[0] = passcnt[1]; + passcnt[1] = 0; + if (passcnt[0] > g_side) + passcnt[1] = passcnt[0] - g_side; + if (diff > (g_margin+1) && (passcnt[1] > 0 || passcnt[0] > 8)) { + g_margin = diff; + g_dqidly = dqidly; + g_dqsip = dqsip; + g_side = passcnt[0]; + } else if (passcnt[1] > 1 && g_side < 8) { + if (diff > g_margin) + g_margin = diff; + g_dqidly = dqidly; + g_dqsip = dqsip; + g_side = passcnt[0]; + } + } + } + reg_mcr18 = reg_mcr18 | (g_dqidly << 16) | (g_dqsip << 23); + ast_moutdwm(ast, 0x1E6E0018, reg_mcr18); + +} +static bool cbr_dll2(struct ast_private *ast, struct ast2300_dram_param *param) +{ + u32 dllmin[2], dllmax[2], dlli, data, passcnt, retry = 0; + bool status = false; + + finetuneDQSI(ast); + if (finetuneDQI_L(ast, param) == false) + return status; + +CBR_START2: + dllmin[0] = dllmin[1] = 0xff; + dllmax[0] = dllmax[1] = 0x0; + passcnt = 0; + for (dlli = 0; dlli < 76; dlli++) { + ast_moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24)); + ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE2); + data = cbr_scan(ast); + if (data != 0) { + if (data & 0x1) { + if (dllmin[0] > dlli) { + dllmin[0] = dlli; + } + if (dllmax[0] < dlli) { + dllmax[0] = dlli; + } + } + if (data & 0x2) { + if (dllmin[1] > dlli) { + dllmin[1] = dlli; + } + if (dllmax[1] < dlli) { + dllmax[1] = dlli; + } + } + passcnt++; + } else if (passcnt >= CBR_THRESHOLD) { + break; + } + } + if (retry++ > 10) + goto CBR_DONE2; + if (dllmax[0] == 0 || (dllmax[0]-dllmin[0]) < CBR_THRESHOLD) { + goto CBR_START2; + } + if (dllmax[1] == 0 || (dllmax[1]-dllmin[1]) < CBR_THRESHOLD) { + goto CBR_START2; + } + status = true; +CBR_DONE2: + dlli = (dllmin[1] + dllmax[1]) >> 1; + dlli <<= 8; + dlli += (dllmin[0] + dllmax[0]) >> 1; + ast_moutdwm(ast, 0x1E6E0068, ast_mindwm(ast, 0x1E720058) | (dlli << 16)); + return status; +} /* CBRDLL2 */ + +static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *param) +{ + u32 trap, trap_AC2, trap_MRS; + + ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8); + + /* Ger trap info */ + trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3; + trap_AC2 = 0x00020000 + (trap << 16); + trap_AC2 |= 0x00300000 + ((trap & 0x2) << 19); + trap_MRS = 0x00000010 + (trap << 4); + trap_MRS |= ((trap & 0x2) << 18); + + param->reg_MADJ = 0x00034C4C; + param->reg_SADJ = 0x00001800; + param->reg_DRV = 0x000000F0; + param->reg_PERIOD = param->dram_freq; + param->rodt = 0; + + switch (param->dram_freq) { + case 336: + ast_moutdwm(ast, 0x1E6E2020, 0x0190); + param->wodt = 0; + param->reg_AC1 = 0x22202725; + param->reg_AC2 = 0xAA007613 | trap_AC2; + param->reg_DQSIC = 0x000000BA; + param->reg_MRS = 0x04001400 | trap_MRS; + param->reg_EMRS = 0x00000000; + param->reg_IOZ = 0x00000023; + param->reg_DQIDLY = 0x00000074; + param->reg_FREQ = 0x00004DC0; + param->madj_max = 96; + param->dll2_finetune_step = 3; + switch (param->dram_chipid) { + default: + case AST_DRAM_512Mx16: + case AST_DRAM_1Gx16: + param->reg_AC2 = 0xAA007613 | trap_AC2; + break; + case AST_DRAM_2Gx16: + param->reg_AC2 = 0xAA00761C | trap_AC2; + break; + case AST_DRAM_4Gx16: + param->reg_AC2 = 0xAA007636 | trap_AC2; + break; + } + break; + default: + case 396: + ast_moutdwm(ast, 0x1E6E2020, 0x03F1); + param->wodt = 1; + param->reg_AC1 = 0x33302825; + param->reg_AC2 = 0xCC009617 | trap_AC2; + param->reg_DQSIC = 0x000000E2; + param->reg_MRS = 0x04001600 | trap_MRS; + param->reg_EMRS = 0x00000000; + param->reg_IOZ = 0x00000034; + param->reg_DRV = 0x000000FA; + param->reg_DQIDLY = 0x00000089; + param->reg_FREQ = 0x00005040; + param->madj_max = 96; + param->dll2_finetune_step = 4; + + switch (param->dram_chipid) { + default: + case AST_DRAM_512Mx16: + case AST_DRAM_1Gx16: + param->reg_AC2 = 0xCC009617 | trap_AC2; + break; + case AST_DRAM_2Gx16: + param->reg_AC2 = 0xCC009622 | trap_AC2; + break; + case AST_DRAM_4Gx16: + param->reg_AC2 = 0xCC00963F | trap_AC2; + break; + } + break; + + case 408: + ast_moutdwm(ast, 0x1E6E2020, 0x01F0); + param->wodt = 1; + param->reg_AC1 = 0x33302825; + param->reg_AC2 = 0xCC009617 | trap_AC2; + param->reg_DQSIC = 0x000000E2; + param->reg_MRS = 0x04001600 | trap_MRS; + param->reg_EMRS = 0x00000000; + param->reg_IOZ = 0x00000023; + param->reg_DRV = 0x000000FA; + param->reg_DQIDLY = 0x00000089; + param->reg_FREQ = 0x000050C0; + param->madj_max = 96; + param->dll2_finetune_step = 4; + + switch (param->dram_chipid) { + default: + case AST_DRAM_512Mx16: + case AST_DRAM_1Gx16: + param->reg_AC2 = 0xCC009617 | trap_AC2; + break; + case AST_DRAM_2Gx16: + param->reg_AC2 = 0xCC009622 | trap_AC2; + break; + case AST_DRAM_4Gx16: + param->reg_AC2 = 0xCC00963F | trap_AC2; + break; + } + + break; + case 456: + ast_moutdwm(ast, 0x1E6E2020, 0x0230); + param->wodt = 0; + param->reg_AC1 = 0x33302926; + param->reg_AC2 = 0xCD44961A; + param->reg_DQSIC = 0x000000FC; + param->reg_MRS = 0x00081830; + param->reg_EMRS = 0x00000000; + param->reg_IOZ = 0x00000045; + param->reg_DQIDLY = 0x00000097; + param->reg_FREQ = 0x000052C0; + param->madj_max = 88; + param->dll2_finetune_step = 4; + break; + case 504: + ast_moutdwm(ast, 0x1E6E2020, 0x0270); + param->wodt = 1; + param->reg_AC1 = 0x33302926; + param->reg_AC2 = 0xDE44A61D; + param->reg_DQSIC = 0x00000117; + param->reg_MRS = 0x00081A30; + param->reg_EMRS = 0x00000000; + param->reg_IOZ = 0x070000BB; + param->reg_DQIDLY = 0x000000A0; + param->reg_FREQ = 0x000054C0; + param->madj_max = 79; + param->dll2_finetune_step = 4; + break; + case 528: + ast_moutdwm(ast, 0x1E6E2020, 0x0290); + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x33302926; + param->reg_AC2 = 0xEF44B61E; + param->reg_DQSIC = 0x00000125; + param->reg_MRS = 0x00081A30; + param->reg_EMRS = 0x00000040; + param->reg_DRV = 0x000000F5; + param->reg_IOZ = 0x00000023; + param->reg_DQIDLY = 0x00000088; + param->reg_FREQ = 0x000055C0; + param->madj_max = 76; + param->dll2_finetune_step = 3; + break; + case 576: + ast_moutdwm(ast, 0x1E6E2020, 0x0140); + param->reg_MADJ = 0x00136868; + param->reg_SADJ = 0x00004534; + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x33302A37; + param->reg_AC2 = 0xEF56B61E; + param->reg_DQSIC = 0x0000013F; + param->reg_MRS = 0x00101A50; + param->reg_EMRS = 0x00000040; + param->reg_DRV = 0x000000FA; + param->reg_IOZ = 0x00000023; + param->reg_DQIDLY = 0x00000078; + param->reg_FREQ = 0x000057C0; + param->madj_max = 136; + param->dll2_finetune_step = 3; + break; + case 600: + ast_moutdwm(ast, 0x1E6E2020, 0x02E1); + param->reg_MADJ = 0x00136868; + param->reg_SADJ = 0x00004534; + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x32302A37; + param->reg_AC2 = 0xDF56B61F; + param->reg_DQSIC = 0x0000014D; + param->reg_MRS = 0x00101A50; + param->reg_EMRS = 0x00000004; + param->reg_DRV = 0x000000F5; + param->reg_IOZ = 0x00000023; + param->reg_DQIDLY = 0x00000078; + param->reg_FREQ = 0x000058C0; + param->madj_max = 132; + param->dll2_finetune_step = 3; + break; + case 624: + ast_moutdwm(ast, 0x1E6E2020, 0x0160); + param->reg_MADJ = 0x00136868; + param->reg_SADJ = 0x00004534; + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x32302A37; + param->reg_AC2 = 0xEF56B621; + param->reg_DQSIC = 0x0000015A; + param->reg_MRS = 0x02101A50; + param->reg_EMRS = 0x00000004; + param->reg_DRV = 0x000000F5; + param->reg_IOZ = 0x00000034; + param->reg_DQIDLY = 0x00000078; + param->reg_FREQ = 0x000059C0; + param->madj_max = 128; + param->dll2_finetune_step = 3; + break; + } /* switch freq */ + + switch (param->dram_chipid) { + case AST_DRAM_512Mx16: + param->dram_config = 0x130; + break; + default: + case AST_DRAM_1Gx16: + param->dram_config = 0x131; + break; + case AST_DRAM_2Gx16: + param->dram_config = 0x132; + break; + case AST_DRAM_4Gx16: + param->dram_config = 0x133; + break; + } /* switch size */ + + switch (param->vram_size) { + default: + case AST_VIDMEM_SIZE_8M: + param->dram_config |= 0x00; + break; + case AST_VIDMEM_SIZE_16M: + param->dram_config |= 0x04; + break; + case AST_VIDMEM_SIZE_32M: + param->dram_config |= 0x08; + break; + case AST_VIDMEM_SIZE_64M: + param->dram_config |= 0x0c; + break; + } + +} + +static void ddr3_init(struct ast_private *ast, struct ast2300_dram_param *param) +{ + u32 data, data2, retry = 0; + +ddr3_init_start: + ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); + ast_moutdwm(ast, 0x1E6E0018, 0x00000100); + ast_moutdwm(ast, 0x1E6E0024, 0x00000000); + ast_moutdwm(ast, 0x1E6E0034, 0x00000000); + udelay(10); + ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ); + ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ); + udelay(10); + ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000); + udelay(10); + + ast_moutdwm(ast, 0x1E6E0004, param->dram_config); + ast_moutdwm(ast, 0x1E6E0008, 0x90040f); + ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1); + ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2); + ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC); + ast_moutdwm(ast, 0x1E6E0080, 0x00000000); + ast_moutdwm(ast, 0x1E6E0084, 0x00000000); + ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY); + ast_moutdwm(ast, 0x1E6E0018, 0x4000A170); + ast_moutdwm(ast, 0x1E6E0018, 0x00002370); + ast_moutdwm(ast, 0x1E6E0038, 0x00000000); + ast_moutdwm(ast, 0x1E6E0040, 0xFF444444); + ast_moutdwm(ast, 0x1E6E0044, 0x22222222); + ast_moutdwm(ast, 0x1E6E0048, 0x22222222); + ast_moutdwm(ast, 0x1E6E004C, 0x00000002); + ast_moutdwm(ast, 0x1E6E0050, 0x80000000); + ast_moutdwm(ast, 0x1E6E0050, 0x00000000); + ast_moutdwm(ast, 0x1E6E0054, 0); + ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV); + ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ); + ast_moutdwm(ast, 0x1E6E0070, 0x00000000); + ast_moutdwm(ast, 0x1E6E0074, 0x00000000); + ast_moutdwm(ast, 0x1E6E0078, 0x00000000); + ast_moutdwm(ast, 0x1E6E007C, 0x00000000); + /* Wait MCLK2X lock to MCLK */ + do { + data = ast_mindwm(ast, 0x1E6E001C); + } while (!(data & 0x08000000)); + data = ast_mindwm(ast, 0x1E6E001C); + data = (data >> 8) & 0xff; + while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) { + data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4; + if ((data2 & 0xff) > param->madj_max) { + break; + } + ast_moutdwm(ast, 0x1E6E0064, data2); + if (data2 & 0x00100000) { + data2 = ((data2 & 0xff) >> 3) + 3; + } else { + data2 = ((data2 & 0xff) >> 2) + 5; + } + data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff; + data2 += data & 0xff; + data = data | (data2 << 8); + ast_moutdwm(ast, 0x1E6E0068, data); + udelay(10); + ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000); + udelay(10); + data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff; + ast_moutdwm(ast, 0x1E6E0018, data); + data = data | 0x200; + ast_moutdwm(ast, 0x1E6E0018, data); + do { + data = ast_mindwm(ast, 0x1E6E001C); + } while (!(data & 0x08000000)); + + data = ast_mindwm(ast, 0x1E6E001C); + data = (data >> 8) & 0xff; + } + ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0068) & 0xffff); + data = ast_mindwm(ast, 0x1E6E0018) | 0xC00; + ast_moutdwm(ast, 0x1E6E0018, data); + + ast_moutdwm(ast, 0x1E6E0034, 0x00000001); + ast_moutdwm(ast, 0x1E6E000C, 0x00000040); + udelay(50); + /* Mode Register Setting */ + ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100); + ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS); + ast_moutdwm(ast, 0x1E6E0028, 0x00000005); + ast_moutdwm(ast, 0x1E6E0028, 0x00000007); + ast_moutdwm(ast, 0x1E6E0028, 0x00000003); + ast_moutdwm(ast, 0x1E6E0028, 0x00000001); + ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS); + ast_moutdwm(ast, 0x1E6E000C, 0x00005C08); + ast_moutdwm(ast, 0x1E6E0028, 0x00000001); + + ast_moutdwm(ast, 0x1E6E000C, 0x00005C01); + data = 0; + if (param->wodt) { + data = 0x300; + } + if (param->rodt) { + data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3); + } + ast_moutdwm(ast, 0x1E6E0034, data | 0x3); + + /* Calibrate the DQSI delay */ + if ((cbr_dll2(ast, param) == false) && (retry++ < 10)) + goto ddr3_init_start; + + ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ); + /* ECC Memory Initialization */ +#ifdef ECC + ast_moutdwm(ast, 0x1E6E007C, 0x00000000); + ast_moutdwm(ast, 0x1E6E0070, 0x221); + do { + data = ast_mindwm(ast, 0x1E6E0070); + } while (!(data & 0x00001000)); + ast_moutdwm(ast, 0x1E6E0070, 0x00000000); + ast_moutdwm(ast, 0x1E6E0050, 0x80000000); + ast_moutdwm(ast, 0x1E6E0050, 0x00000000); +#endif + + +} + +static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *param) +{ + u32 trap, trap_AC2, trap_MRS; + + ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8); + + /* Ger trap info */ + trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3; + trap_AC2 = (trap << 20) | (trap << 16); + trap_AC2 += 0x00110000; + trap_MRS = 0x00000040 | (trap << 4); + + + param->reg_MADJ = 0x00034C4C; + param->reg_SADJ = 0x00001800; + param->reg_DRV = 0x000000F0; + param->reg_PERIOD = param->dram_freq; + param->rodt = 0; + + switch (param->dram_freq) { + case 264: + ast_moutdwm(ast, 0x1E6E2020, 0x0130); + param->wodt = 0; + param->reg_AC1 = 0x11101513; + param->reg_AC2 = 0x78117011; + param->reg_DQSIC = 0x00000092; + param->reg_MRS = 0x00000842; + param->reg_EMRS = 0x00000000; + param->reg_DRV = 0x000000F0; + param->reg_IOZ = 0x00000034; + param->reg_DQIDLY = 0x0000005A; + param->reg_FREQ = 0x00004AC0; + param->madj_max = 138; + param->dll2_finetune_step = 3; + break; + case 336: + ast_moutdwm(ast, 0x1E6E2020, 0x0190); + param->wodt = 1; + param->reg_AC1 = 0x22202613; + param->reg_AC2 = 0xAA009016 | trap_AC2; + param->reg_DQSIC = 0x000000BA; + param->reg_MRS = 0x00000A02 | trap_MRS; + param->reg_EMRS = 0x00000040; + param->reg_DRV = 0x000000FA; + param->reg_IOZ = 0x00000034; + param->reg_DQIDLY = 0x00000074; + param->reg_FREQ = 0x00004DC0; + param->madj_max = 96; + param->dll2_finetune_step = 3; + switch (param->dram_chipid) { + default: + case AST_DRAM_512Mx16: + param->reg_AC2 = 0xAA009012 | trap_AC2; + break; + case AST_DRAM_1Gx16: + param->reg_AC2 = 0xAA009016 | trap_AC2; + break; + case AST_DRAM_2Gx16: + param->reg_AC2 = 0xAA009023 | trap_AC2; + break; + case AST_DRAM_4Gx16: + param->reg_AC2 = 0xAA00903B | trap_AC2; + break; + } + break; + default: + case 396: + ast_moutdwm(ast, 0x1E6E2020, 0x03F1); + param->wodt = 1; + param->rodt = 0; + param->reg_AC1 = 0x33302714; + param->reg_AC2 = 0xCC00B01B | trap_AC2; + param->reg_DQSIC = 0x000000E2; + param->reg_MRS = 0x00000C02 | trap_MRS; + param->reg_EMRS = 0x00000040; + param->reg_DRV = 0x000000FA; + param->reg_IOZ = 0x00000034; + param->reg_DQIDLY = 0x00000089; + param->reg_FREQ = 0x00005040; + param->madj_max = 96; + param->dll2_finetune_step = 4; + + switch (param->dram_chipid) { + case AST_DRAM_512Mx16: + param->reg_AC2 = 0xCC00B016 | trap_AC2; + break; + default: + case AST_DRAM_1Gx16: + param->reg_AC2 = 0xCC00B01B | trap_AC2; + break; + case AST_DRAM_2Gx16: + param->reg_AC2 = 0xCC00B02B | trap_AC2; + break; + case AST_DRAM_4Gx16: + param->reg_AC2 = 0xCC00B03F | trap_AC2; + break; + } + + break; + + case 408: + ast_moutdwm(ast, 0x1E6E2020, 0x01F0); + param->wodt = 1; + param->rodt = 0; + param->reg_AC1 = 0x33302714; + param->reg_AC2 = 0xCC00B01B | trap_AC2; + param->reg_DQSIC = 0x000000E2; + param->reg_MRS = 0x00000C02 | trap_MRS; + param->reg_EMRS = 0x00000040; + param->reg_DRV = 0x000000FA; + param->reg_IOZ = 0x00000034; + param->reg_DQIDLY = 0x00000089; + param->reg_FREQ = 0x000050C0; + param->madj_max = 96; + param->dll2_finetune_step = 4; + + switch (param->dram_chipid) { + case AST_DRAM_512Mx16: + param->reg_AC2 = 0xCC00B016 | trap_AC2; + break; + default: + case AST_DRAM_1Gx16: + param->reg_AC2 = 0xCC00B01B | trap_AC2; + break; + case AST_DRAM_2Gx16: + param->reg_AC2 = 0xCC00B02B | trap_AC2; + break; + case AST_DRAM_4Gx16: + param->reg_AC2 = 0xCC00B03F | trap_AC2; + break; + } + + break; + case 456: + ast_moutdwm(ast, 0x1E6E2020, 0x0230); + param->wodt = 0; + param->reg_AC1 = 0x33302815; + param->reg_AC2 = 0xCD44B01E; + param->reg_DQSIC = 0x000000FC; + param->reg_MRS = 0x00000E72; + param->reg_EMRS = 0x00000000; + param->reg_DRV = 0x00000000; + param->reg_IOZ = 0x00000034; + param->reg_DQIDLY = 0x00000097; + param->reg_FREQ = 0x000052C0; + param->madj_max = 88; + param->dll2_finetune_step = 3; + break; + case 504: + ast_moutdwm(ast, 0x1E6E2020, 0x0261); + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x33302815; + param->reg_AC2 = 0xDE44C022; + param->reg_DQSIC = 0x00000117; + param->reg_MRS = 0x00000E72; + param->reg_EMRS = 0x00000040; + param->reg_DRV = 0x0000000A; + param->reg_IOZ = 0x00000045; + param->reg_DQIDLY = 0x000000A0; + param->reg_FREQ = 0x000054C0; + param->madj_max = 79; + param->dll2_finetune_step = 3; + break; + case 528: + ast_moutdwm(ast, 0x1E6E2020, 0x0120); + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x33302815; + param->reg_AC2 = 0xEF44D024; + param->reg_DQSIC = 0x00000125; + param->reg_MRS = 0x00000E72; + param->reg_EMRS = 0x00000004; + param->reg_DRV = 0x000000F9; + param->reg_IOZ = 0x00000045; + param->reg_DQIDLY = 0x000000A7; + param->reg_FREQ = 0x000055C0; + param->madj_max = 76; + param->dll2_finetune_step = 3; + break; + case 552: + ast_moutdwm(ast, 0x1E6E2020, 0x02A1); + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x43402915; + param->reg_AC2 = 0xFF44E025; + param->reg_DQSIC = 0x00000132; + param->reg_MRS = 0x00000E72; + param->reg_EMRS = 0x00000040; + param->reg_DRV = 0x0000000A; + param->reg_IOZ = 0x00000045; + param->reg_DQIDLY = 0x000000AD; + param->reg_FREQ = 0x000056C0; + param->madj_max = 76; + param->dll2_finetune_step = 3; + break; + case 576: + ast_moutdwm(ast, 0x1E6E2020, 0x0140); + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x43402915; + param->reg_AC2 = 0xFF44E027; + param->reg_DQSIC = 0x0000013F; + param->reg_MRS = 0x00000E72; + param->reg_EMRS = 0x00000004; + param->reg_DRV = 0x000000F5; + param->reg_IOZ = 0x00000045; + param->reg_DQIDLY = 0x000000B3; + param->reg_FREQ = 0x000057C0; + param->madj_max = 76; + param->dll2_finetune_step = 3; + break; + } + + switch (param->dram_chipid) { + case AST_DRAM_512Mx16: + param->dram_config = 0x100; + break; + default: + case AST_DRAM_1Gx16: + param->dram_config = 0x121; + break; + case AST_DRAM_2Gx16: + param->dram_config = 0x122; + break; + case AST_DRAM_4Gx16: + param->dram_config = 0x123; + break; + } /* switch size */ + + switch (param->vram_size) { + default: + case AST_VIDMEM_SIZE_8M: + param->dram_config |= 0x00; + break; + case AST_VIDMEM_SIZE_16M: + param->dram_config |= 0x04; + break; + case AST_VIDMEM_SIZE_32M: + param->dram_config |= 0x08; + break; + case AST_VIDMEM_SIZE_64M: + param->dram_config |= 0x0c; + break; + } +} + +static void ddr2_init(struct ast_private *ast, struct ast2300_dram_param *param) +{ + u32 data, data2, retry = 0; + +ddr2_init_start: + ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); + ast_moutdwm(ast, 0x1E6E0018, 0x00000100); + ast_moutdwm(ast, 0x1E6E0024, 0x00000000); + ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ); + ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ); + udelay(10); + ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000); + udelay(10); + + ast_moutdwm(ast, 0x1E6E0004, param->dram_config); + ast_moutdwm(ast, 0x1E6E0008, 0x90040f); + ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1); + ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2); + ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC); + ast_moutdwm(ast, 0x1E6E0080, 0x00000000); + ast_moutdwm(ast, 0x1E6E0084, 0x00000000); + ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY); + ast_moutdwm(ast, 0x1E6E0018, 0x4000A130); + ast_moutdwm(ast, 0x1E6E0018, 0x00002330); + ast_moutdwm(ast, 0x1E6E0038, 0x00000000); + ast_moutdwm(ast, 0x1E6E0040, 0xFF808000); + ast_moutdwm(ast, 0x1E6E0044, 0x88848466); + ast_moutdwm(ast, 0x1E6E0048, 0x44440008); + ast_moutdwm(ast, 0x1E6E004C, 0x00000000); + ast_moutdwm(ast, 0x1E6E0050, 0x80000000); + ast_moutdwm(ast, 0x1E6E0050, 0x00000000); + ast_moutdwm(ast, 0x1E6E0054, 0); + ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV); + ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ); + ast_moutdwm(ast, 0x1E6E0070, 0x00000000); + ast_moutdwm(ast, 0x1E6E0074, 0x00000000); + ast_moutdwm(ast, 0x1E6E0078, 0x00000000); + ast_moutdwm(ast, 0x1E6E007C, 0x00000000); + + /* Wait MCLK2X lock to MCLK */ + do { + data = ast_mindwm(ast, 0x1E6E001C); + } while (!(data & 0x08000000)); + data = ast_mindwm(ast, 0x1E6E001C); + data = (data >> 8) & 0xff; + while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) { + data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4; + if ((data2 & 0xff) > param->madj_max) { + break; + } + ast_moutdwm(ast, 0x1E6E0064, data2); + if (data2 & 0x00100000) { + data2 = ((data2 & 0xff) >> 3) + 3; + } else { + data2 = ((data2 & 0xff) >> 2) + 5; + } + data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff; + data2 += data & 0xff; + data = data | (data2 << 8); + ast_moutdwm(ast, 0x1E6E0068, data); + udelay(10); + ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000); + udelay(10); + data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff; + ast_moutdwm(ast, 0x1E6E0018, data); + data = data | 0x200; + ast_moutdwm(ast, 0x1E6E0018, data); + do { + data = ast_mindwm(ast, 0x1E6E001C); + } while (!(data & 0x08000000)); + + data = ast_mindwm(ast, 0x1E6E001C); + data = (data >> 8) & 0xff; + } + ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0008) & 0xffff); + data = ast_mindwm(ast, 0x1E6E0018) | 0xC00; + ast_moutdwm(ast, 0x1E6E0018, data); + + ast_moutdwm(ast, 0x1E6E0034, 0x00000001); + ast_moutdwm(ast, 0x1E6E000C, 0x00000000); + udelay(50); + /* Mode Register Setting */ + ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100); + ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS); + ast_moutdwm(ast, 0x1E6E0028, 0x00000005); + ast_moutdwm(ast, 0x1E6E0028, 0x00000007); + ast_moutdwm(ast, 0x1E6E0028, 0x00000003); + ast_moutdwm(ast, 0x1E6E0028, 0x00000001); + + ast_moutdwm(ast, 0x1E6E000C, 0x00005C08); + ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS); + ast_moutdwm(ast, 0x1E6E0028, 0x00000001); + ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS | 0x380); + ast_moutdwm(ast, 0x1E6E0028, 0x00000003); + ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS); + ast_moutdwm(ast, 0x1E6E0028, 0x00000003); + + ast_moutdwm(ast, 0x1E6E000C, 0x7FFF5C01); + data = 0; + if (param->wodt) { + data = 0x500; + } + if (param->rodt) { + data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3); + } + ast_moutdwm(ast, 0x1E6E0034, data | 0x3); + ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ); + + /* Calibrate the DQSI delay */ + if ((cbr_dll2(ast, param) == false) && (retry++ < 10)) + goto ddr2_init_start; + + /* ECC Memory Initialization */ +#ifdef ECC + ast_moutdwm(ast, 0x1E6E007C, 0x00000000); + ast_moutdwm(ast, 0x1E6E0070, 0x221); + do { + data = ast_mindwm(ast, 0x1E6E0070); + } while (!(data & 0x00001000)); + ast_moutdwm(ast, 0x1E6E0070, 0x00000000); + ast_moutdwm(ast, 0x1E6E0050, 0x80000000); + ast_moutdwm(ast, 0x1E6E0050, 0x00000000); +#endif + +} + +static void ast_init_dram_2300(struct drm_device *dev) +{ + struct ast_private *ast = dev->dev_private; + struct ast2300_dram_param param; + u32 temp; + u8 reg; + + reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + if ((reg & 0x80) == 0) {/* vga only */ + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + ast_write32(ast, 0x12000, 0x1688a8a8); + do { + ; + } while (ast_read32(ast, 0x12000) != 0x1); + + ast_write32(ast, 0x10000, 0xfc600309); + do { + ; + } while (ast_read32(ast, 0x10000) != 0x1); + + /* Slow down CPU/AHB CLK in VGA only mode */ + temp = ast_read32(ast, 0x12008); + temp |= 0x73; + ast_write32(ast, 0x12008, temp); + + param.dram_type = AST_DDR3; + if (temp & 0x01000000) + param.dram_type = AST_DDR2; + param.dram_chipid = ast->dram_type; + param.dram_freq = ast->mclk; + param.vram_size = ast->vram_size; + + if (param.dram_type == AST_DDR3) { + get_ddr3_info(ast, ¶m); + ddr3_init(ast, ¶m); + } else { + get_ddr2_info(ast, ¶m); + ddr2_init(ast, ¶m); + } + + temp = ast_mindwm(ast, 0x1e6e2040); + ast_moutdwm(ast, 0x1e6e2040, temp | 0x40); + } + + /* wait ready */ + do { + reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + } while ((reg & 0x40) == 0); +} + diff --git a/kernel/drivers/gpu/drm/ast/ast_tables.h b/kernel/drivers/gpu/drm/ast/ast_tables.h new file mode 100644 index 000000000..3608d5aa7 --- /dev/null +++ b/kernel/drivers/gpu/drm/ast/ast_tables.h @@ -0,0 +1,305 @@ +/* + * Copyright (c) 2005 ASPEED Technology Inc. + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that + * copyright notice and this permission notice appear in supporting + * documentation, and that the name of the authors not be used in + * advertising or publicity pertaining to distribution of the software without + * specific, written prior permission. The authors makes no representations + * about the suitability of this software for any purpose. It is provided + * "as is" without express or implied warranty. + * + * THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/* Ported from xf86-video-ast driver */ + +#ifndef AST_TABLES_H +#define AST_TABLES_H + +/* Std. Table Index Definition */ +#define TextModeIndex 0 +#define EGAModeIndex 1 +#define VGAModeIndex 2 +#define HiCModeIndex 3 +#define TrueCModeIndex 4 + +#define Charx8Dot 0x00000001 +#define HalfDCLK 0x00000002 +#define DoubleScanMode 0x00000004 +#define LineCompareOff 0x00000008 +#define HBorder 0x00000020 +#define VBorder 0x00000010 +#define WideScreenMode 0x00000100 +#define NewModeInfo 0x00000200 +#define NHSync 0x00000400 +#define PHSync 0x00000800 +#define NVSync 0x00001000 +#define PVSync 0x00002000 +#define SyncPP (PVSync | PHSync) +#define SyncPN (PVSync | NHSync) +#define SyncNP (NVSync | PHSync) +#define SyncNN (NVSync | NHSync) + +/* DCLK Index */ +#define VCLK25_175 0x00 +#define VCLK28_322 0x01 +#define VCLK31_5 0x02 +#define VCLK36 0x03 +#define VCLK40 0x04 +#define VCLK49_5 0x05 +#define VCLK50 0x06 +#define VCLK56_25 0x07 +#define VCLK65 0x08 +#define VCLK75 0x09 +#define VCLK78_75 0x0A +#define VCLK94_5 0x0B +#define VCLK108 0x0C +#define VCLK135 0x0D +#define VCLK157_5 0x0E +#define VCLK162 0x0F +/* #define VCLK193_25 0x10 */ +#define VCLK154 0x10 +#define VCLK83_5 0x11 +#define VCLK106_5 0x12 +#define VCLK146_25 0x13 +#define VCLK148_5 0x14 +#define VCLK71 0x15 +#define VCLK88_75 0x16 +#define VCLK119 0x17 +#define VCLK85_5 0x18 +#define VCLK97_75 0x19 +#define VCLK118_25 0x1A + +static struct ast_vbios_dclk_info dclk_table[] = { + {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */ + {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */ + {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */ + {0x76, 0x63, 0x01}, /* 03: VCLK36 */ + {0xEE, 0x67, 0x01}, /* 04: VCLK40 */ + {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */ + {0xC6, 0x64, 0x01}, /* 06: VCLK50 */ + {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */ + {0x80, 0x64, 0x00}, /* 08: VCLK65 */ + {0x7B, 0x63, 0x00}, /* 09: VCLK75 */ + {0x67, 0x62, 0x00}, /* 0A: VCLK78_75 */ + {0x7C, 0x62, 0x00}, /* 0B: VCLK94_5 */ + {0x8E, 0x62, 0x00}, /* 0C: VCLK108 */ + {0x85, 0x24, 0x00}, /* 0D: VCLK135 */ + {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ + {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ + {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ + {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */ + {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ + {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ + {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ + {0x47, 0x6c, 0x80}, /* 15: VCLK71 */ + {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */ + {0x77, 0x58, 0x80}, /* 17: VCLK119 */ + {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */ + {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */ + {0x3b, 0x2c, 0x81}, /* 1A: VCLK118_25 */ +}; + +static struct ast_vbios_stdtable vbios_stdtable[] = { + /* MD_2_3_400 */ + { + 0x67, + {0x00,0x03,0x00,0x02}, + {0x5f,0x4f,0x50,0x82,0x55,0x81,0xbf,0x1f, + 0x00,0x4f,0x0d,0x0e,0x00,0x00,0x00,0x00, + 0x9c,0x8e,0x8f,0x28,0x1f,0x96,0xb9,0xa3, + 0xff}, + {0x00,0x01,0x02,0x03,0x04,0x05,0x14,0x07, + 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f, + 0x0c,0x00,0x0f,0x08}, + {0x00,0x00,0x00,0x00,0x00,0x10,0x0e,0x00, + 0xff} + }, + /* Mode12/ExtEGATable */ + { + 0xe3, + {0x01,0x0f,0x00,0x06}, + {0x5f,0x4f,0x50,0x82,0x55,0x81,0x0b,0x3e, + 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00, + 0xe9,0x8b,0xdf,0x28,0x00,0xe7,0x04,0xe3, + 0xff}, + {0x00,0x01,0x02,0x03,0x04,0x05,0x14,0x07, + 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f, + 0x01,0x00,0x0f,0x00}, + {0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x0f, + 0xff} + }, + /* ExtVGATable */ + { + 0x2f, + {0x01,0x0f,0x00,0x0e}, + {0x5f,0x4f,0x50,0x82,0x54,0x80,0x0b,0x3e, + 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00, + 0xea,0x8c,0xdf,0x28,0x40,0xe7,0x04,0xa3, + 0xff}, + {0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07, + 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f, + 0x01,0x00,0x00,0x00}, + {0x00,0x00,0x00,0x00,0x00,0x40,0x05,0x0f, + 0xff} + }, + /* ExtHiCTable */ + { + 0x2f, + {0x01,0x0f,0x00,0x0e}, + {0x5f,0x4f,0x50,0x82,0x54,0x80,0x0b,0x3e, + 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00, + 0xea,0x8c,0xdf,0x28,0x40,0xe7,0x04,0xa3, + 0xff}, + {0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07, + 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f, + 0x01,0x00,0x00,0x00}, + {0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x0f, + 0xff} + }, + /* ExtTrueCTable */ + { + 0x2f, + {0x01,0x0f,0x00,0x0e}, + {0x5f,0x4f,0x50,0x82,0x54,0x80,0x0b,0x3e, + 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00, + 0xea,0x8c,0xdf,0x28,0x40,0xe7,0x04,0xa3, + 0xff}, + {0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07, + 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f, + 0x01,0x00,0x00,0x00}, + {0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x0f, + 0xff} + }, +}; + +static struct ast_vbios_enhtable res_640x480[] = { + { 800, 640, 8, 96, 525, 480, 2, 2, VCLK25_175, /* 60Hz */ + (SyncNN | HBorder | VBorder | Charx8Dot), 60, 1, 0x2E }, + { 832, 640, 16, 40, 520, 480, 1, 3, VCLK31_5, /* 72Hz */ + (SyncNN | HBorder | VBorder | Charx8Dot), 72, 2, 0x2E }, + { 840, 640, 16, 64, 500, 480, 1, 3, VCLK31_5, /* 75Hz */ + (SyncNN | Charx8Dot) , 75, 3, 0x2E }, + { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* 85Hz */ + (SyncNN | Charx8Dot) , 85, 4, 0x2E }, + { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* end */ + (SyncNN | Charx8Dot) , 0xFF, 4, 0x2E }, +}; + +static struct ast_vbios_enhtable res_800x600[] = { + {1024, 800, 24, 72, 625, 600, 1, 2, VCLK36, /* 56Hz */ + (SyncPP | Charx8Dot), 56, 1, 0x30 }, + {1056, 800, 40, 128, 628, 600, 1, 4, VCLK40, /* 60Hz */ + (SyncPP | Charx8Dot), 60, 2, 0x30 }, + {1040, 800, 56, 120, 666, 600, 37, 6, VCLK50, /* 72Hz */ + (SyncPP | Charx8Dot), 72, 3, 0x30 }, + {1056, 800, 16, 80, 625, 600, 1, 3, VCLK49_5, /* 75Hz */ + (SyncPP | Charx8Dot), 75, 4, 0x30 }, + {1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25, /* 85Hz */ + (SyncPP | Charx8Dot), 84, 5, 0x30 }, + {1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25, /* end */ + (SyncPP | Charx8Dot), 0xFF, 5, 0x30 }, +}; + + +static struct ast_vbios_enhtable res_1024x768[] = { + {1344, 1024, 24, 136, 806, 768, 3, 6, VCLK65, /* 60Hz */ + (SyncNN | Charx8Dot), 60, 1, 0x31 }, + {1328, 1024, 24, 136, 806, 768, 3, 6, VCLK75, /* 70Hz */ + (SyncNN | Charx8Dot), 70, 2, 0x31 }, + {1312, 1024, 16, 96, 800, 768, 1, 3, VCLK78_75, /* 75Hz */ + (SyncPP | Charx8Dot), 75, 3, 0x31 }, + {1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5, /* 85Hz */ + (SyncPP | Charx8Dot), 84, 4, 0x31 }, + {1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5, /* end */ + (SyncPP | Charx8Dot), 0xFF, 4, 0x31 }, +}; + +static struct ast_vbios_enhtable res_1280x1024[] = { + {1688, 1280, 48, 112, 1066, 1024, 1, 3, VCLK108, /* 60Hz */ + (SyncPP | Charx8Dot), 60, 1, 0x32 }, + {1688, 1280, 16, 144, 1066, 1024, 1, 3, VCLK135, /* 75Hz */ + (SyncPP | Charx8Dot), 75, 2, 0x32 }, + {1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5, /* 85Hz */ + (SyncPP | Charx8Dot), 85, 3, 0x32 }, + {1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5, /* end */ + (SyncPP | Charx8Dot), 0xFF, 3, 0x32 }, +}; + +static struct ast_vbios_enhtable res_1600x1200[] = { + {2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* 60Hz */ + (SyncPP | Charx8Dot), 60, 1, 0x33 }, + {2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* end */ + (SyncPP | Charx8Dot), 0xFF, 1, 0x33 }, +}; + +/* 16:9 */ +static struct ast_vbios_enhtable res_1360x768[] = { + {1792, 1360, 64,112, 795, 768, 3, 6, VCLK85_5, /* 60Hz */ + (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x39 }, + {1792, 1360, 64,112, 795, 768, 3, 6, VCLK85_5, /* end */ + (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x39 }, +}; + +static struct ast_vbios_enhtable res_1600x900[] = { + {1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60Hz CVT RB */ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x3A }, + {2112, 1600, 88,168, 934, 900, 3, 5, VCLK118_25, /* 60Hz CVT */ + (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x3A }, + {2112, 1600, 88,168, 934, 900, 3, 5, VCLK118_25, /* 60Hz CVT */ + (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 2, 0x3A }, +}; + +static struct ast_vbios_enhtable res_1920x1080[] = { + {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x38 }, + {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x38 }, +}; + + +/* 16:10 */ +static struct ast_vbios_enhtable res_1280x800[] = { + {1440, 1280, 48, 32, 823, 800, 3, 6, VCLK71, /* 60Hz RB */ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x35 }, + {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */ + (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x35 }, + {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */ + (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 2, 0x35 }, + +}; + +static struct ast_vbios_enhtable res_1440x900[] = { + {1600, 1440, 48, 32, 926, 900, 3, 6, VCLK88_75, /* 60Hz RB */ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x36 }, + {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */ + (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x36 }, + {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */ + (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 2, 0x36 }, +}; + +static struct ast_vbios_enhtable res_1680x1050[] = { + {1840, 1680, 48, 32, 1080, 1050, 3, 6, VCLK119, /* 60Hz RB */ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x37 }, + {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */ + (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x37 }, + {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */ + (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 2, 0x37 }, +}; + +static struct ast_vbios_enhtable res_1920x1200[] = { + {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz RB*/ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x34 }, + {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz RB */ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x34 }, +}; + +#endif diff --git a/kernel/drivers/gpu/drm/ast/ast_ttm.c b/kernel/drivers/gpu/drm/ast/ast_ttm.c new file mode 100644 index 000000000..08f82eae6 --- /dev/null +++ b/kernel/drivers/gpu/drm/ast/ast_ttm.c @@ -0,0 +1,435 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ +/* + * Authors: Dave Airlie + */ +#include +#include "ast_drv.h" +#include + +static inline struct ast_private * +ast_bdev(struct ttm_bo_device *bd) +{ + return container_of(bd, struct ast_private, ttm.bdev); +} + +static int +ast_ttm_mem_global_init(struct drm_global_reference *ref) +{ + return ttm_mem_global_init(ref->object); +} + +static void +ast_ttm_mem_global_release(struct drm_global_reference *ref) +{ + ttm_mem_global_release(ref->object); +} + +static int ast_ttm_global_init(struct ast_private *ast) +{ + struct drm_global_reference *global_ref; + int r; + + global_ref = &ast->ttm.mem_global_ref; + global_ref->global_type = DRM_GLOBAL_TTM_MEM; + global_ref->size = sizeof(struct ttm_mem_global); + global_ref->init = &ast_ttm_mem_global_init; + global_ref->release = &ast_ttm_mem_global_release; + r = drm_global_item_ref(global_ref); + if (r != 0) { + DRM_ERROR("Failed setting up TTM memory accounting " + "subsystem.\n"); + return r; + } + + ast->ttm.bo_global_ref.mem_glob = + ast->ttm.mem_global_ref.object; + global_ref = &ast->ttm.bo_global_ref.ref; + global_ref->global_type = DRM_GLOBAL_TTM_BO; + global_ref->size = sizeof(struct ttm_bo_global); + global_ref->init = &ttm_bo_global_init; + global_ref->release = &ttm_bo_global_release; + r = drm_global_item_ref(global_ref); + if (r != 0) { + DRM_ERROR("Failed setting up TTM BO subsystem.\n"); + drm_global_item_unref(&ast->ttm.mem_global_ref); + return r; + } + return 0; +} + +static void +ast_ttm_global_release(struct ast_private *ast) +{ + if (ast->ttm.mem_global_ref.release == NULL) + return; + + drm_global_item_unref(&ast->ttm.bo_global_ref.ref); + drm_global_item_unref(&ast->ttm.mem_global_ref); + ast->ttm.mem_global_ref.release = NULL; +} + + +static void ast_bo_ttm_destroy(struct ttm_buffer_object *tbo) +{ + struct ast_bo *bo; + + bo = container_of(tbo, struct ast_bo, bo); + + drm_gem_object_release(&bo->gem); + kfree(bo); +} + +static bool ast_ttm_bo_is_ast_bo(struct ttm_buffer_object *bo) +{ + if (bo->destroy == &ast_bo_ttm_destroy) + return true; + return false; +} + +static int +ast_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, + struct ttm_mem_type_manager *man) +{ + switch (type) { + case TTM_PL_SYSTEM: + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_MASK_CACHING; + man->default_caching = TTM_PL_FLAG_CACHED; + break; + case TTM_PL_VRAM: + man->func = &ttm_bo_manager_func; + man->flags = TTM_MEMTYPE_FLAG_FIXED | + TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_WC; + man->default_caching = TTM_PL_FLAG_WC; + break; + default: + DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); + return -EINVAL; + } + return 0; +} + +static void +ast_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) +{ + struct ast_bo *astbo = ast_bo(bo); + + if (!ast_ttm_bo_is_ast_bo(bo)) + return; + + ast_ttm_placement(astbo, TTM_PL_FLAG_SYSTEM); + *pl = astbo->placement; +} + +static int ast_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) +{ + struct ast_bo *astbo = ast_bo(bo); + + return drm_vma_node_verify_access(&astbo->gem.vma_node, filp); +} + +static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem) +{ + struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; + struct ast_private *ast = ast_bdev(bdev); + + mem->bus.addr = NULL; + mem->bus.offset = 0; + mem->bus.size = mem->num_pages << PAGE_SHIFT; + mem->bus.base = 0; + mem->bus.is_iomem = false; + if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) + return -EINVAL; + switch (mem->mem_type) { + case TTM_PL_SYSTEM: + /* system memory */ + return 0; + case TTM_PL_VRAM: + mem->bus.offset = mem->start << PAGE_SHIFT; + mem->bus.base = pci_resource_start(ast->dev->pdev, 0); + mem->bus.is_iomem = true; + break; + default: + return -EINVAL; + break; + } + return 0; +} + +static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +{ +} + +static int ast_bo_move(struct ttm_buffer_object *bo, + bool evict, bool interruptible, + bool no_wait_gpu, + struct ttm_mem_reg *new_mem) +{ + int r; + r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); + return r; +} + + +static void ast_ttm_backend_destroy(struct ttm_tt *tt) +{ + ttm_tt_fini(tt); + kfree(tt); +} + +static struct ttm_backend_func ast_tt_backend_func = { + .destroy = &ast_ttm_backend_destroy, +}; + + +static struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev, + unsigned long size, uint32_t page_flags, + struct page *dummy_read_page) +{ + struct ttm_tt *tt; + + tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL); + if (tt == NULL) + return NULL; + tt->func = &ast_tt_backend_func; + if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) { + kfree(tt); + return NULL; + } + return tt; +} + +static int ast_ttm_tt_populate(struct ttm_tt *ttm) +{ + return ttm_pool_populate(ttm); +} + +static void ast_ttm_tt_unpopulate(struct ttm_tt *ttm) +{ + ttm_pool_unpopulate(ttm); +} + +struct ttm_bo_driver ast_bo_driver = { + .ttm_tt_create = ast_ttm_tt_create, + .ttm_tt_populate = ast_ttm_tt_populate, + .ttm_tt_unpopulate = ast_ttm_tt_unpopulate, + .init_mem_type = ast_bo_init_mem_type, + .evict_flags = ast_bo_evict_flags, + .move = ast_bo_move, + .verify_access = ast_bo_verify_access, + .io_mem_reserve = &ast_ttm_io_mem_reserve, + .io_mem_free = &ast_ttm_io_mem_free, +}; + +int ast_mm_init(struct ast_private *ast) +{ + int ret; + struct drm_device *dev = ast->dev; + struct ttm_bo_device *bdev = &ast->ttm.bdev; + + ret = ast_ttm_global_init(ast); + if (ret) + return ret; + + ret = ttm_bo_device_init(&ast->ttm.bdev, + ast->ttm.bo_global_ref.ref.object, + &ast_bo_driver, + dev->anon_inode->i_mapping, + DRM_FILE_PAGE_OFFSET, + true); + if (ret) { + DRM_ERROR("Error initialising bo driver; %d\n", ret); + return ret; + } + + ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, + ast->vram_size >> PAGE_SHIFT); + if (ret) { + DRM_ERROR("Failed ttm VRAM init: %d\n", ret); + return ret; + } + + ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0), + pci_resource_len(dev->pdev, 0)); + + return 0; +} + +void ast_mm_fini(struct ast_private *ast) +{ + ttm_bo_device_release(&ast->ttm.bdev); + + ast_ttm_global_release(ast); + + arch_phys_wc_del(ast->fb_mtrr); +} + +void ast_ttm_placement(struct ast_bo *bo, int domain) +{ + u32 c = 0; + unsigned i; + + bo->placement.placement = bo->placements; + bo->placement.busy_placement = bo->placements; + if (domain & TTM_PL_FLAG_VRAM) + bo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; + if (domain & TTM_PL_FLAG_SYSTEM) + bo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; + if (!c) + bo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; + bo->placement.num_placement = c; + bo->placement.num_busy_placement = c; + for (i = 0; i < c; ++i) { + bo->placements[i].fpfn = 0; + bo->placements[i].lpfn = 0; + } +} + +int ast_bo_create(struct drm_device *dev, int size, int align, + uint32_t flags, struct ast_bo **pastbo) +{ + struct ast_private *ast = dev->dev_private; + struct ast_bo *astbo; + size_t acc_size; + int ret; + + astbo = kzalloc(sizeof(struct ast_bo), GFP_KERNEL); + if (!astbo) + return -ENOMEM; + + ret = drm_gem_object_init(dev, &astbo->gem, size); + if (ret) { + kfree(astbo); + return ret; + } + + astbo->bo.bdev = &ast->ttm.bdev; + + ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); + + acc_size = ttm_bo_dma_acc_size(&ast->ttm.bdev, size, + sizeof(struct ast_bo)); + + ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size, + ttm_bo_type_device, &astbo->placement, + align >> PAGE_SHIFT, false, NULL, acc_size, + NULL, NULL, ast_bo_ttm_destroy); + if (ret) + return ret; + + *pastbo = astbo; + return 0; +} + +static inline u64 ast_bo_gpu_offset(struct ast_bo *bo) +{ + return bo->bo.offset; +} + +int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr) +{ + int i, ret; + + if (bo->pin_count) { + bo->pin_count++; + if (gpu_addr) + *gpu_addr = ast_bo_gpu_offset(bo); + } + + ast_ttm_placement(bo, pl_flag); + for (i = 0; i < bo->placement.num_placement; i++) + bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; + ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); + if (ret) + return ret; + + bo->pin_count = 1; + if (gpu_addr) + *gpu_addr = ast_bo_gpu_offset(bo); + return 0; +} + +int ast_bo_unpin(struct ast_bo *bo) +{ + int i, ret; + if (!bo->pin_count) { + DRM_ERROR("unpin bad %p\n", bo); + return 0; + } + bo->pin_count--; + if (bo->pin_count) + return 0; + + for (i = 0; i < bo->placement.num_placement ; i++) + bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; + ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); + if (ret) + return ret; + + return 0; +} + +int ast_bo_push_sysram(struct ast_bo *bo) +{ + int i, ret; + if (!bo->pin_count) { + DRM_ERROR("unpin bad %p\n", bo); + return 0; + } + bo->pin_count--; + if (bo->pin_count) + return 0; + + if (bo->kmap.virtual) + ttm_bo_kunmap(&bo->kmap); + + ast_ttm_placement(bo, TTM_PL_FLAG_SYSTEM); + for (i = 0; i < bo->placement.num_placement ; i++) + bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; + + ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); + if (ret) { + DRM_ERROR("pushing to VRAM failed\n"); + return ret; + } + return 0; +} + +int ast_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct drm_file *file_priv; + struct ast_private *ast; + + if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) + return -EINVAL; + + file_priv = filp->private_data; + ast = file_priv->minor->dev->dev_private; + return ttm_bo_mmap(filp, vma, &ast->ttm.bdev); +} diff --git a/kernel/drivers/gpu/drm/ati_pcigart.c b/kernel/drivers/gpu/drm/ati_pcigart.c new file mode 100644 index 000000000..6c4d4b6eb --- /dev/null +++ b/kernel/drivers/gpu/drm/ati_pcigart.c @@ -0,0 +1,204 @@ +/** + * \file ati_pcigart.c + * ATI PCI GART support + * + * \author Gareth Hughes + */ + +/* + * Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com + * + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include + +#include + +# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */ + +static int drm_ati_alloc_pcigart_table(struct drm_device *dev, + struct drm_ati_pcigart_info *gart_info) +{ + gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size, + PAGE_SIZE); + if (gart_info->table_handle == NULL) + return -ENOMEM; + + return 0; +} + +static void drm_ati_free_pcigart_table(struct drm_device *dev, + struct drm_ati_pcigart_info *gart_info) +{ + drm_pci_free(dev, gart_info->table_handle); + gart_info->table_handle = NULL; +} + +int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info) +{ + struct drm_sg_mem *entry = dev->sg; + unsigned long pages; + int i; + int max_pages; + + /* we need to support large memory configurations */ + if (!entry) { + DRM_ERROR("no scatter/gather memory!\n"); + return 0; + } + + if (gart_info->bus_addr) { + + max_pages = (gart_info->table_size / sizeof(u32)); + pages = (entry->pages <= max_pages) + ? entry->pages : max_pages; + + for (i = 0; i < pages; i++) { + if (!entry->busaddr[i]) + break; + pci_unmap_page(dev->pdev, entry->busaddr[i], + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + } + + if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) + gart_info->bus_addr = 0; + } + + if (gart_info->gart_table_location == DRM_ATI_GART_MAIN && + gart_info->table_handle) { + drm_ati_free_pcigart_table(dev, gart_info); + } + + return 1; +} +EXPORT_SYMBOL(drm_ati_pcigart_cleanup); + +int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info) +{ + struct drm_local_map *map = &gart_info->mapping; + struct drm_sg_mem *entry = dev->sg; + void *address = NULL; + unsigned long pages; + u32 *pci_gart = NULL, page_base, gart_idx; + dma_addr_t bus_address = 0; + int i, j, ret = 0; + int max_ati_pages, max_real_pages; + + if (!entry) { + DRM_ERROR("no scatter/gather memory!\n"); + goto done; + } + + if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) { + DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n"); + + if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) { + DRM_ERROR("fail to set dma mask to 0x%Lx\n", + (unsigned long long)gart_info->table_mask); + ret = 1; + goto done; + } + + ret = drm_ati_alloc_pcigart_table(dev, gart_info); + if (ret) { + DRM_ERROR("cannot allocate PCI GART page!\n"); + goto done; + } + + pci_gart = gart_info->table_handle->vaddr; + address = gart_info->table_handle->vaddr; + bus_address = gart_info->table_handle->busaddr; + } else { + address = gart_info->addr; + bus_address = gart_info->bus_addr; + DRM_DEBUG("PCI: Gart Table: VRAM %08LX mapped at %08lX\n", + (unsigned long long)bus_address, + (unsigned long)address); + } + + + max_ati_pages = (gart_info->table_size / sizeof(u32)); + max_real_pages = max_ati_pages / (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); + pages = (entry->pages <= max_real_pages) + ? entry->pages : max_real_pages; + + if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) { + memset(pci_gart, 0, max_ati_pages * sizeof(u32)); + } else { + memset_io((void __iomem *)map->handle, 0, max_ati_pages * sizeof(u32)); + } + + gart_idx = 0; + for (i = 0; i < pages; i++) { + /* we need to support large memory configurations */ + entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i], + 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(dev->pdev, entry->busaddr[i])) { + DRM_ERROR("unable to map PCIGART pages!\n"); + drm_ati_pcigart_cleanup(dev, gart_info); + address = NULL; + bus_address = 0; + goto done; + } + page_base = (u32) entry->busaddr[i]; + + for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) { + u32 val; + + switch(gart_info->gart_reg_if) { + case DRM_ATI_GART_IGP: + val = page_base | 0xc; + break; + case DRM_ATI_GART_PCIE: + val = (page_base >> 8) | 0xc; + break; + default: + case DRM_ATI_GART_PCI: + val = page_base; + break; + } + if (gart_info->gart_table_location == + DRM_ATI_GART_MAIN) + pci_gart[gart_idx] = cpu_to_le32(val); + else + DRM_WRITE32(map, gart_idx * sizeof(u32), val); + gart_idx++; + page_base += ATI_PCIGART_PAGE_SIZE; + } + } + ret = 1; + +#if defined(__i386__) || defined(__x86_64__) + wbinvd(); +#else + mb(); +#endif + + done: + gart_info->addr = address; + gart_info->bus_addr = bus_address; + return ret; +} +EXPORT_SYMBOL(drm_ati_pcigart_init); diff --git a/kernel/drivers/gpu/drm/atmel-hlcdc/Kconfig b/kernel/drivers/gpu/drm/atmel-hlcdc/Kconfig new file mode 100644 index 000000000..99b4f0698 --- /dev/null +++ b/kernel/drivers/gpu/drm/atmel-hlcdc/Kconfig @@ -0,0 +1,11 @@ +config DRM_ATMEL_HLCDC + tristate "DRM Support for ATMEL HLCDC Display Controller" + depends on DRM && OF && COMMON_CLK && MFD_ATMEL_HLCDC && ARM + select DRM_GEM_CMA_HELPER + select DRM_KMS_HELPER + select DRM_KMS_FB_HELPER + select DRM_KMS_CMA_HELPER + select DRM_PANEL + help + Choose this option if you have an ATMEL SoC with an HLCDC display + controller (i.e. at91sam9n12, at91sam9x5 family or sama5d3 family). diff --git a/kernel/drivers/gpu/drm/atmel-hlcdc/Makefile b/kernel/drivers/gpu/drm/atmel-hlcdc/Makefile new file mode 100644 index 000000000..10ae426e6 --- /dev/null +++ b/kernel/drivers/gpu/drm/atmel-hlcdc/Makefile @@ -0,0 +1,7 @@ +atmel-hlcdc-dc-y := atmel_hlcdc_crtc.o \ + atmel_hlcdc_dc.o \ + atmel_hlcdc_layer.o \ + atmel_hlcdc_output.o \ + atmel_hlcdc_plane.o + +obj-$(CONFIG_DRM_ATMEL_HLCDC) += atmel-hlcdc-dc.o diff --git a/kernel/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/kernel/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c new file mode 100644 index 000000000..f69b92535 --- /dev/null +++ b/kernel/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -0,0 +1,367 @@ +/* + * Copyright (C) 2014 Traphandler + * Copyright (C) 2014 Free Electrons + * + * Author: Jean-Jacques Hiblot + * Author: Boris BREZILLON + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include +#include +#include + +#include +#include +#include + +#include