summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorDaniel Vetter2016-10-28 09:14:08 +0200
committerDaniel Vetter2016-10-28 09:14:08 +0200
commit96583ddbec291929880edefa2141c06c63e16aa5 (patch)
tree6ae6bd3aec381d7461a687e3314bdf949798c554 /drivers/gpu/drm
parentdrm/i915: Correct pipe fault reporting string (diff)
parentMerge branch 'linux-4.9' of git://github.com/skeggsb/linux into drm-next (diff)
downloadkernel-qcow2-linux-96583ddbec291929880edefa2141c06c63e16aa5.tar.gz
kernel-qcow2-linux-96583ddbec291929880edefa2141c06c63e16aa5.tar.xz
kernel-qcow2-linux-96583ddbec291929880edefa2141c06c63e16aa5.zip
Merge remote-tracking branch 'airlied/drm-next' into drm-intel-next-queued
Backmerge latest drm-next to pull in the s/fence/dma_fence/ rework, needed before we merge more i915 fencing patches. Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ObjectID.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h884
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c93
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c470
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h443
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h185
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c197
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c134
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c357
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h205
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c222
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_crtc.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c122
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c834
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c135
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c135
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c317
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c293
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c432
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c83
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c152
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c221
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c162
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c988
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.h2
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h23
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h2
-rwxr-xr-xdrivers/gpu/drm/amd/include/cgs_common.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c21
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c22
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c59
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h16
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/power_state.h9
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h9
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c6
-rwxr-xr-xdrivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c7
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c4
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h4
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c67
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h26
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c48
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c3
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c4
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c2
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c1
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c1
-rw-r--r--drivers/gpu/drm/bridge/Kconfig7
-rw-r--r--drivers/gpu/drm/bridge/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c1564
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.h1517
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c1
-rw-r--r--drivers/gpu/drm/drm_atomic.c8
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c8
-rw-r--r--drivers/gpu/drm/drm_blend.c32
-rw-r--r--drivers/gpu/drm/drm_dp_dual_mode_helper.c18
-rw-r--r--drivers/gpu/drm/drm_edid.c79
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c7
-rw-r--r--drivers/gpu/drm/drm_fops.c6
-rw-r--r--drivers/gpu/drm/drm_of.c28
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c46
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h4
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c7
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c32
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h18
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c41
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h8
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h2
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c4
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c1
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c35
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c1
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c12
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h2
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c28
-rw-r--r--drivers/gpu/drm/msm/msm_fence.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c14
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c2
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h24
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c80
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_led.c139
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_led.h57
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/firmware.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c82
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c139
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c129
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c137
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h4
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h4
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c35
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c1
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c1
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c1
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h10
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c56
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_sync.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c6
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c5
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c3
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c59
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c22
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c3
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c53
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c26
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c12
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c44
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2
278 files changed, 9026 insertions, 5538 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 248a05d02917..41bd2bf28f4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -24,7 +24,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
- amdgpu_gtt_mgr.o
+ amdgpu_gtt_mgr.o amdgpu_vram_mgr.o
# add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
index b8d66670bb17..06192698bd96 100644
--- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h
+++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
@@ -90,7 +90,6 @@
#define ENCODER_OBJECT_ID_INTERNAL_VCE 0x24
#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 0x25
#define ENCODER_OBJECT_ID_INTERNAL_AMCLK 0x27
-#define ENCODER_OBJECT_ID_VIRTUAL 0x28
#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF
@@ -120,7 +119,6 @@
#define CONNECTOR_OBJECT_ID_eDP 0x14
#define CONNECTOR_OBJECT_ID_MXM 0x15
#define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16
-#define CONNECTOR_OBJECT_ID_VIRTUAL 0x17
/* deleted */
@@ -149,7 +147,6 @@
#define GRAPH_OBJECT_ENUM_ID5 0x05
#define GRAPH_OBJECT_ENUM_ID6 0x06
#define GRAPH_OBJECT_ENUM_ID7 0x07
-#define GRAPH_OBJECT_ENUM_VIRTUAL 0x08
/****************************************************/
/* Graphics Object ID Bit definition */
@@ -411,10 +408,6 @@
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT)
-#define ENCODER_VIRTUAL_ENUM_VIRTUAL ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_VIRTUAL << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_VIRTUAL << OBJECT_ID_SHIFT)
-
/****************************************************/
/* Connector Object ID definition - Shared with BIOS */
/****************************************************/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 039b57e4644c..2ec7b3baeec2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -34,7 +34,7 @@
#include <linux/kref.h>
#include <linux/interval_tree.h>
#include <linux/hashtable.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
@@ -53,7 +53,11 @@
#include "amdgpu_ucode.h"
#include "amdgpu_ttm.h"
#include "amdgpu_gds.h"
+#include "amdgpu_sync.h"
+#include "amdgpu_ring.h"
+#include "amdgpu_vm.h"
#include "amd_powerplay.h"
+#include "amdgpu_dpm.h"
#include "amdgpu_acp.h"
#include "gpu_scheduler.h"
@@ -97,6 +101,7 @@ extern char *amdgpu_disable_cu;
extern int amdgpu_sclk_deep_sleep_en;
extern char *amdgpu_virtual_display;
extern unsigned amdgpu_pp_feature_mask;
+extern int amdgpu_vram_page_split;
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
@@ -107,12 +112,6 @@ extern unsigned amdgpu_pp_feature_mask;
#define AMDGPUFB_CONN_LIMIT 4
#define AMDGPU_BIOS_NUM_SCRATCH 8
-/* max number of rings */
-#define AMDGPU_MAX_RINGS 16
-#define AMDGPU_MAX_GFX_RINGS 1
-#define AMDGPU_MAX_COMPUTE_RINGS 8
-#define AMDGPU_MAX_VCE_RINGS 3
-
/* max number of IP instances */
#define AMDGPU_MAX_SDMA_INSTANCES 2
@@ -152,8 +151,6 @@ extern unsigned amdgpu_pp_feature_mask;
struct amdgpu_device;
struct amdgpu_ib;
-struct amdgpu_vm;
-struct amdgpu_ring;
struct amdgpu_cs_parser;
struct amdgpu_job;
struct amdgpu_irq_src;
@@ -198,21 +195,38 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev,
bool amdgpu_is_idle(struct amdgpu_device *adev,
enum amd_ip_block_type block_type);
+#define AMDGPU_MAX_IP_NUM 16
+
+struct amdgpu_ip_block_status {
+ bool valid;
+ bool sw;
+ bool hw;
+ bool late_initialized;
+ bool hang;
+};
+
struct amdgpu_ip_block_version {
- enum amd_ip_block_type type;
- u32 major;
- u32 minor;
- u32 rev;
+ const enum amd_ip_block_type type;
+ const u32 major;
+ const u32 minor;
+ const u32 rev;
const struct amd_ip_funcs *funcs;
};
+struct amdgpu_ip_block {
+ struct amdgpu_ip_block_status status;
+ const struct amdgpu_ip_block_version *version;
+};
+
int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
enum amd_ip_block_type type,
u32 major, u32 minor);
-const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
- struct amdgpu_device *adev,
- enum amd_ip_block_type type);
+struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
+ enum amd_ip_block_type type);
+
+int amdgpu_ip_block_add(struct amdgpu_device *adev,
+ const struct amdgpu_ip_block_version *ip_block_version);
/* provided by hw blocks that can move/clear data. e.g., gfx or sdma */
struct amdgpu_buffer_funcs {
@@ -286,47 +300,6 @@ struct amdgpu_ih_funcs {
void (*set_rptr)(struct amdgpu_device *adev);
};
-/* provided by hw blocks that expose a ring buffer for commands */
-struct amdgpu_ring_funcs {
- /* ring read/write ptr handling */
- u32 (*get_rptr)(struct amdgpu_ring *ring);
- u32 (*get_wptr)(struct amdgpu_ring *ring);
- void (*set_wptr)(struct amdgpu_ring *ring);
- /* validating and patching of IBs */
- int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
- /* command emit functions */
- void (*emit_ib)(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch);
- void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
- uint64_t seq, unsigned flags);
- void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
- void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
- uint64_t pd_addr);
- void (*emit_hdp_flush)(struct amdgpu_ring *ring);
- void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
- void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
- uint32_t gds_base, uint32_t gds_size,
- uint32_t gws_base, uint32_t gws_size,
- uint32_t oa_base, uint32_t oa_size);
- /* testing functions */
- int (*test_ring)(struct amdgpu_ring *ring);
- int (*test_ib)(struct amdgpu_ring *ring, long timeout);
- /* insert NOP packets */
- void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
- /* pad the indirect buffer to the necessary number of dw */
- void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
- unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
- void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
- /* note usage for clock and power gating */
- void (*begin_use)(struct amdgpu_ring *ring);
- void (*end_use)(struct amdgpu_ring *ring);
- void (*emit_switch_buffer) (struct amdgpu_ring *ring);
- void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
- unsigned (*get_emit_ib_size) (struct amdgpu_ring *ring);
- unsigned (*get_dma_frame_size) (struct amdgpu_ring *ring);
-};
-
/*
* BIOS.
*/
@@ -364,47 +337,6 @@ struct amdgpu_clock {
};
/*
- * Fences.
- */
-struct amdgpu_fence_driver {
- uint64_t gpu_addr;
- volatile uint32_t *cpu_addr;
- /* sync_seq is protected by ring emission lock */
- uint32_t sync_seq;
- atomic_t last_seq;
- bool initialized;
- struct amdgpu_irq_src *irq_src;
- unsigned irq_type;
- struct timer_list fallback_timer;
- unsigned num_fences_mask;
- spinlock_t lock;
- struct fence **fences;
-};
-
-/* some special values for the owner field */
-#define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul)
-#define AMDGPU_FENCE_OWNER_VM ((void*)1ul)
-
-#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
-#define AMDGPU_FENCE_FLAG_INT (1 << 1)
-
-int amdgpu_fence_driver_init(struct amdgpu_device *adev);
-void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
-void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
-
-int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
- unsigned num_hw_submission);
-int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
- struct amdgpu_irq_src *irq_src,
- unsigned irq_type);
-void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
-void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence);
-void amdgpu_fence_process(struct amdgpu_ring *ring);
-int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
-unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
-
-/*
* BO.
*/
struct amdgpu_bo_list_entry {
@@ -427,7 +359,7 @@ struct amdgpu_bo_va_mapping {
struct amdgpu_bo_va {
/* protected by bo being reserved */
struct list_head bo_list;
- struct fence *last_pt_update;
+ struct dma_fence *last_pt_update;
unsigned ref_count;
/* protected by vm mutex and spinlock */
@@ -464,7 +396,6 @@ struct amdgpu_bo {
*/
struct list_head va;
/* Constant after initialization */
- struct amdgpu_device *adev;
struct drm_gem_object gem_base;
struct amdgpu_bo *parent;
struct amdgpu_bo *shadow;
@@ -543,7 +474,7 @@ struct amdgpu_sa_bo {
struct amdgpu_sa_manager *manager;
unsigned soffset;
unsigned eoffset;
- struct fence *fence;
+ struct dma_fence *fence;
};
/*
@@ -561,27 +492,6 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
int amdgpu_mode_dumb_mmap(struct drm_file *filp,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p);
-/*
- * Synchronization
- */
-struct amdgpu_sync {
- DECLARE_HASHTABLE(fences, 4);
- struct fence *last_vm_update;
-};
-
-void amdgpu_sync_create(struct amdgpu_sync *sync);
-int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
- struct fence *f);
-int amdgpu_sync_resv(struct amdgpu_device *adev,
- struct amdgpu_sync *sync,
- struct reservation_object *resv,
- void *owner);
-struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
- struct amdgpu_ring *ring);
-struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
-void amdgpu_sync_free(struct amdgpu_sync *sync);
-int amdgpu_sync_init(void);
-void amdgpu_sync_fini(void);
int amdgpu_fence_slab_init(void);
void amdgpu_fence_slab_fini(void);
@@ -703,10 +613,10 @@ struct amdgpu_flip_work {
uint64_t base;
struct drm_pending_vblank_event *event;
struct amdgpu_bo *old_abo;
- struct fence *excl;
+ struct dma_fence *excl;
unsigned shared_count;
- struct fence **shared;
- struct fence_cb cb;
+ struct dma_fence **shared;
+ struct dma_fence_cb cb;
bool async;
};
@@ -723,14 +633,6 @@ struct amdgpu_ib {
uint32_t flags;
};
-enum amdgpu_ring_type {
- AMDGPU_RING_TYPE_GFX,
- AMDGPU_RING_TYPE_COMPUTE,
- AMDGPU_RING_TYPE_SDMA,
- AMDGPU_RING_TYPE_UVD,
- AMDGPU_RING_TYPE_VCE
-};
-
extern const struct amd_sched_backend_ops amdgpu_sched_ops;
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -742,214 +644,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job);
void amdgpu_job_free(struct amdgpu_job *job);
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct amd_sched_entity *entity, void *owner,
- struct fence **f);
-
-struct amdgpu_ring {
- struct amdgpu_device *adev;
- const struct amdgpu_ring_funcs *funcs;
- struct amdgpu_fence_driver fence_drv;
- struct amd_gpu_scheduler sched;
-
- struct amdgpu_bo *ring_obj;
- volatile uint32_t *ring;
- unsigned rptr_offs;
- unsigned wptr;
- unsigned wptr_old;
- unsigned ring_size;
- unsigned max_dw;
- int count_dw;
- uint64_t gpu_addr;
- uint32_t align_mask;
- uint32_t ptr_mask;
- bool ready;
- u32 nop;
- u32 idx;
- u32 me;
- u32 pipe;
- u32 queue;
- struct amdgpu_bo *mqd_obj;
- u32 doorbell_index;
- bool use_doorbell;
- unsigned wptr_offs;
- unsigned fence_offs;
- uint64_t current_ctx;
- enum amdgpu_ring_type type;
- char name[16];
- unsigned cond_exe_offs;
- u64 cond_exe_gpu_addr;
- volatile u32 *cond_exe_cpu_addr;
-#if defined(CONFIG_DEBUG_FS)
- struct dentry *ent;
-#endif
-};
-
-/*
- * VM
- */
-
-/* maximum number of VMIDs */
-#define AMDGPU_NUM_VM 16
-
-/* Maximum number of PTEs the hardware can write with one command */
-#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
-
-/* number of entries in page table */
-#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
-
-/* PTBs (Page Table Blocks) need to be aligned to 32K */
-#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
-
-/* LOG2 number of continuous pages for the fragment field */
-#define AMDGPU_LOG2_PAGES_PER_FRAG 4
-
-#define AMDGPU_PTE_VALID (1 << 0)
-#define AMDGPU_PTE_SYSTEM (1 << 1)
-#define AMDGPU_PTE_SNOOPED (1 << 2)
-
-/* VI only */
-#define AMDGPU_PTE_EXECUTABLE (1 << 4)
-
-#define AMDGPU_PTE_READABLE (1 << 5)
-#define AMDGPU_PTE_WRITEABLE (1 << 6)
-
-#define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7)
-
-/* How to programm VM fault handling */
-#define AMDGPU_VM_FAULT_STOP_NEVER 0
-#define AMDGPU_VM_FAULT_STOP_FIRST 1
-#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
-
-struct amdgpu_vm_pt {
- struct amdgpu_bo_list_entry entry;
- uint64_t addr;
- uint64_t shadow_addr;
-};
-
-struct amdgpu_vm {
- /* tree of virtual addresses mapped */
- struct rb_root va;
-
- /* protecting invalidated */
- spinlock_t status_lock;
-
- /* BOs moved, but not yet updated in the PT */
- struct list_head invalidated;
-
- /* BOs cleared in the PT because of a move */
- struct list_head cleared;
-
- /* BO mappings freed, but not yet updated in the PT */
- struct list_head freed;
-
- /* contains the page directory */
- struct amdgpu_bo *page_directory;
- unsigned max_pde_used;
- struct fence *page_directory_fence;
- uint64_t last_eviction_counter;
-
- /* array of page tables, one for each page directory entry */
- struct amdgpu_vm_pt *page_tables;
-
- /* for id and flush management per ring */
- struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS];
-
- /* protecting freed */
- spinlock_t freed_lock;
-
- /* Scheduler entity for page table updates */
- struct amd_sched_entity entity;
-
- /* client id */
- u64 client_id;
-};
-
-struct amdgpu_vm_id {
- struct list_head list;
- struct fence *first;
- struct amdgpu_sync active;
- struct fence *last_flush;
- atomic64_t owner;
-
- uint64_t pd_gpu_addr;
- /* last flushed PD/PT update */
- struct fence *flushed_updates;
-
- uint32_t current_gpu_reset_count;
-
- uint32_t gds_base;
- uint32_t gds_size;
- uint32_t gws_base;
- uint32_t gws_size;
- uint32_t oa_base;
- uint32_t oa_size;
-};
-
-struct amdgpu_vm_manager {
- /* Handling of VMIDs */
- struct mutex lock;
- unsigned num_ids;
- struct list_head ids_lru;
- struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
-
- /* Handling of VM fences */
- u64 fence_context;
- unsigned seqno[AMDGPU_MAX_RINGS];
-
- uint32_t max_pfn;
- /* vram base address for page table entry */
- u64 vram_base_offset;
- /* is vm enabled? */
- bool enabled;
- /* vm pte handling */
- const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
- struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS];
- unsigned vm_pte_num_rings;
- atomic_t vm_pte_next_ring;
- /* client id counter */
- atomic64_t client_counter;
-};
-
-void amdgpu_vm_manager_init(struct amdgpu_device *adev);
-void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
-int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
- struct list_head *validated,
- struct amdgpu_bo_list_entry *entry);
-void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct list_head *duplicates);
-void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
-int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
- struct amdgpu_sync *sync, struct fence *fence,
- struct amdgpu_job *job);
-int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
-void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
-int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
-int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
-int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_sync *sync);
-int amdgpu_vm_bo_update(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va,
- bool clear);
-void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
- struct amdgpu_bo *bo);
-struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
- struct amdgpu_bo *bo);
-struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_bo *bo);
-int amdgpu_vm_bo_map(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va,
- uint64_t addr, uint64_t offset,
- uint64_t size, uint32_t flags);
-int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va,
- uint64_t addr);
-void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va);
+ struct dma_fence **f);
/*
* context related structures
@@ -957,7 +652,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_ctx_ring {
uint64_t sequence;
- struct fence **fences;
+ struct dma_fence **fences;
struct amd_sched_entity entity;
};
@@ -966,7 +661,7 @@ struct amdgpu_ctx {
struct amdgpu_device *adev;
unsigned reset_counter;
spinlock_t ring_lock;
- struct fence **fences;
+ struct dma_fence **fences;
struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
bool preamble_presented;
};
@@ -982,8 +677,8 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
- struct fence *fence);
-struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+ struct dma_fence *fence);
+struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq);
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
@@ -1093,6 +788,16 @@ struct amdgpu_scratch {
/*
* GFX configurations
*/
+#define AMDGPU_GFX_MAX_SE 4
+#define AMDGPU_GFX_MAX_SH_PER_SE 2
+
+struct amdgpu_rb_config {
+ uint32_t rb_backend_disable;
+ uint32_t user_rb_backend_disable;
+ uint32_t raster_config;
+ uint32_t raster_config_1;
+};
+
struct amdgpu_gca_config {
unsigned max_shader_engines;
unsigned max_tile_pipes;
@@ -1121,6 +826,8 @@ struct amdgpu_gca_config {
uint32_t tile_mode_array[32];
uint32_t macrotile_mode_array[16];
+
+ struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE];
};
struct amdgpu_cu_info {
@@ -1133,6 +840,7 @@ struct amdgpu_gfx_funcs {
/* get the gpu clock counter */
uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
+ void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields);
};
struct amdgpu_gfx {
@@ -1181,23 +889,13 @@ struct amdgpu_gfx {
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned size, struct amdgpu_ib *ib);
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
- struct fence *f);
+ struct dma_fence *f);
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
- struct amdgpu_ib *ib, struct fence *last_vm_update,
- struct amdgpu_job *job, struct fence **f);
+ struct amdgpu_ib *ib, struct dma_fence *last_vm_update,
+ struct amdgpu_job *job, struct dma_fence **f);
int amdgpu_ib_pool_init(struct amdgpu_device *adev);
void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
-int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
-void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
-void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
-void amdgpu_ring_commit(struct amdgpu_ring *ring);
-void amdgpu_ring_undo(struct amdgpu_ring *ring);
-int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
- unsigned ring_size, u32 nop, u32 align_mask,
- struct amdgpu_irq_src *irq_src, unsigned irq_type,
- enum amdgpu_ring_type ring_type);
-void amdgpu_ring_fini(struct amdgpu_ring *ring);
/*
* CS.
@@ -1225,7 +923,7 @@ struct amdgpu_cs_parser {
struct amdgpu_bo_list *bo_list;
struct amdgpu_bo_list_entry vm_pd;
struct list_head validated;
- struct fence *fence;
+ struct dma_fence *fence;
uint64_t bytes_moved_threshold;
uint64_t bytes_moved;
struct amdgpu_bo_list_entry *evictable;
@@ -1245,7 +943,7 @@ struct amdgpu_job {
struct amdgpu_ring *ring;
struct amdgpu_sync sync;
struct amdgpu_ib *ibs;
- struct fence *fence; /* the hw fence */
+ struct dma_fence *fence; /* the hw fence */
uint32_t preamble_status;
uint32_t num_ibs;
void *owner;
@@ -1294,354 +992,6 @@ struct amdgpu_wb {
int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
-
-
-enum amdgpu_int_thermal_type {
- THERMAL_TYPE_NONE,
- THERMAL_TYPE_EXTERNAL,
- THERMAL_TYPE_EXTERNAL_GPIO,
- THERMAL_TYPE_RV6XX,
- THERMAL_TYPE_RV770,
- THERMAL_TYPE_ADT7473_WITH_INTERNAL,
- THERMAL_TYPE_EVERGREEN,
- THERMAL_TYPE_SUMO,
- THERMAL_TYPE_NI,
- THERMAL_TYPE_SI,
- THERMAL_TYPE_EMC2103_WITH_INTERNAL,
- THERMAL_TYPE_CI,
- THERMAL_TYPE_KV,
-};
-
-enum amdgpu_dpm_auto_throttle_src {
- AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
- AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
-};
-
-enum amdgpu_dpm_event_src {
- AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
- AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
- AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
- AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
- AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
-};
-
-#define AMDGPU_MAX_VCE_LEVELS 6
-
-enum amdgpu_vce_level {
- AMDGPU_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
- AMDGPU_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
- AMDGPU_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
- AMDGPU_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
- AMDGPU_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
- AMDGPU_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
-};
-
-struct amdgpu_ps {
- u32 caps; /* vbios flags */
- u32 class; /* vbios flags */
- u32 class2; /* vbios flags */
- /* UVD clocks */
- u32 vclk;
- u32 dclk;
- /* VCE clocks */
- u32 evclk;
- u32 ecclk;
- bool vce_active;
- enum amdgpu_vce_level vce_level;
- /* asic priv */
- void *ps_priv;
-};
-
-struct amdgpu_dpm_thermal {
- /* thermal interrupt work */
- struct work_struct work;
- /* low temperature threshold */
- int min_temp;
- /* high temperature threshold */
- int max_temp;
- /* was last interrupt low to high or high to low */
- bool high_to_low;
- /* interrupt source */
- struct amdgpu_irq_src irq;
-};
-
-enum amdgpu_clk_action
-{
- AMDGPU_SCLK_UP = 1,
- AMDGPU_SCLK_DOWN
-};
-
-struct amdgpu_blacklist_clocks
-{
- u32 sclk;
- u32 mclk;
- enum amdgpu_clk_action action;
-};
-
-struct amdgpu_clock_and_voltage_limits {
- u32 sclk;
- u32 mclk;
- u16 vddc;
- u16 vddci;
-};
-
-struct amdgpu_clock_array {
- u32 count;
- u32 *values;
-};
-
-struct amdgpu_clock_voltage_dependency_entry {
- u32 clk;
- u16 v;
-};
-
-struct amdgpu_clock_voltage_dependency_table {
- u32 count;
- struct amdgpu_clock_voltage_dependency_entry *entries;
-};
-
-union amdgpu_cac_leakage_entry {
- struct {
- u16 vddc;
- u32 leakage;
- };
- struct {
- u16 vddc1;
- u16 vddc2;
- u16 vddc3;
- };
-};
-
-struct amdgpu_cac_leakage_table {
- u32 count;
- union amdgpu_cac_leakage_entry *entries;
-};
-
-struct amdgpu_phase_shedding_limits_entry {
- u16 voltage;
- u32 sclk;
- u32 mclk;
-};
-
-struct amdgpu_phase_shedding_limits_table {
- u32 count;
- struct amdgpu_phase_shedding_limits_entry *entries;
-};
-
-struct amdgpu_uvd_clock_voltage_dependency_entry {
- u32 vclk;
- u32 dclk;
- u16 v;
-};
-
-struct amdgpu_uvd_clock_voltage_dependency_table {
- u8 count;
- struct amdgpu_uvd_clock_voltage_dependency_entry *entries;
-};
-
-struct amdgpu_vce_clock_voltage_dependency_entry {
- u32 ecclk;
- u32 evclk;
- u16 v;
-};
-
-struct amdgpu_vce_clock_voltage_dependency_table {
- u8 count;
- struct amdgpu_vce_clock_voltage_dependency_entry *entries;
-};
-
-struct amdgpu_ppm_table {
- u8 ppm_design;
- u16 cpu_core_number;
- u32 platform_tdp;
- u32 small_ac_platform_tdp;
- u32 platform_tdc;
- u32 small_ac_platform_tdc;
- u32 apu_tdp;
- u32 dgpu_tdp;
- u32 dgpu_ulv_power;
- u32 tj_max;
-};
-
-struct amdgpu_cac_tdp_table {
- u16 tdp;
- u16 configurable_tdp;
- u16 tdc;
- u16 battery_power_limit;
- u16 small_power_limit;
- u16 low_cac_leakage;
- u16 high_cac_leakage;
- u16 maximum_power_delivery_limit;
-};
-
-struct amdgpu_dpm_dynamic_state {
- struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk;
- struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk;
- struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk;
- struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk;
- struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk;
- struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
- struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
- struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
- struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
- struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk;
- struct amdgpu_clock_array valid_sclk_values;
- struct amdgpu_clock_array valid_mclk_values;
- struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc;
- struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac;
- u32 mclk_sclk_ratio;
- u32 sclk_mclk_delta;
- u16 vddc_vddci_delta;
- u16 min_vddc_for_pcie_gen2;
- struct amdgpu_cac_leakage_table cac_leakage_table;
- struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table;
- struct amdgpu_ppm_table *ppm_table;
- struct amdgpu_cac_tdp_table *cac_tdp_table;
-};
-
-struct amdgpu_dpm_fan {
- u16 t_min;
- u16 t_med;
- u16 t_high;
- u16 pwm_min;
- u16 pwm_med;
- u16 pwm_high;
- u8 t_hyst;
- u32 cycle_delay;
- u16 t_max;
- u8 control_mode;
- u16 default_max_fan_pwm;
- u16 default_fan_output_sensitivity;
- u16 fan_output_sensitivity;
- bool ucode_fan_control;
-};
-
-enum amdgpu_pcie_gen {
- AMDGPU_PCIE_GEN1 = 0,
- AMDGPU_PCIE_GEN2 = 1,
- AMDGPU_PCIE_GEN3 = 2,
- AMDGPU_PCIE_GEN_INVALID = 0xffff
-};
-
-enum amdgpu_dpm_forced_level {
- AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
- AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
- AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
- AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3,
-};
-
-struct amdgpu_vce_state {
- /* vce clocks */
- u32 evclk;
- u32 ecclk;
- /* gpu clocks */
- u32 sclk;
- u32 mclk;
- u8 clk_idx;
- u8 pstate;
-};
-
-struct amdgpu_dpm_funcs {
- int (*get_temperature)(struct amdgpu_device *adev);
- int (*pre_set_power_state)(struct amdgpu_device *adev);
- int (*set_power_state)(struct amdgpu_device *adev);
- void (*post_set_power_state)(struct amdgpu_device *adev);
- void (*display_configuration_changed)(struct amdgpu_device *adev);
- u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
- u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
- void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
- void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
- int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
- bool (*vblank_too_short)(struct amdgpu_device *adev);
- void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
- void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
- void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
- void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
- u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
- int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
- int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
- int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
- int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
- int (*get_sclk_od)(struct amdgpu_device *adev);
- int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
- int (*get_mclk_od)(struct amdgpu_device *adev);
- int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
-};
-
-struct amdgpu_dpm {
- struct amdgpu_ps *ps;
- /* number of valid power states */
- int num_ps;
- /* current power state that is active */
- struct amdgpu_ps *current_ps;
- /* requested power state */
- struct amdgpu_ps *requested_ps;
- /* boot up power state */
- struct amdgpu_ps *boot_ps;
- /* default uvd power state */
- struct amdgpu_ps *uvd_ps;
- /* vce requirements */
- struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS];
- enum amdgpu_vce_level vce_level;
- enum amd_pm_state_type state;
- enum amd_pm_state_type user_state;
- u32 platform_caps;
- u32 voltage_response_time;
- u32 backbias_response_time;
- void *priv;
- u32 new_active_crtcs;
- int new_active_crtc_count;
- u32 current_active_crtcs;
- int current_active_crtc_count;
- struct amdgpu_dpm_dynamic_state dyn_state;
- struct amdgpu_dpm_fan fan;
- u32 tdp_limit;
- u32 near_tdp_limit;
- u32 near_tdp_limit_adjusted;
- u32 sq_ramping_threshold;
- u32 cac_leakage;
- u16 tdp_od_limit;
- u32 tdp_adjustment;
- u16 load_line_slope;
- bool power_control;
- bool ac_power;
- /* special states active */
- bool thermal_active;
- bool uvd_active;
- bool vce_active;
- /* thermal handling */
- struct amdgpu_dpm_thermal thermal;
- /* forced levels */
- enum amdgpu_dpm_forced_level forced_level;
-};
-
-struct amdgpu_pm {
- struct mutex mutex;
- u32 current_sclk;
- u32 current_mclk;
- u32 default_sclk;
- u32 default_mclk;
- struct amdgpu_i2c_chan *i2c_bus;
- /* internal thermal controller on rv6xx+ */
- enum amdgpu_int_thermal_type int_thermal_type;
- struct device *int_hwmon_dev;
- /* fan control parameters */
- bool no_fan;
- u8 fan_pulses_per_revolution;
- u8 fan_min_rpm;
- u8 fan_max_rpm;
- /* dpm */
- bool dpm_enabled;
- bool sysfs_initialized;
- struct amdgpu_dpm dpm;
- const struct firmware *fw; /* SMC firmware */
- uint32_t fw_version;
- const struct amdgpu_dpm_funcs *funcs;
- uint32_t pcie_gen_mask;
- uint32_t pcie_mlw_mask;
- struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
-};
-
void amdgpu_get_pcie_info(struct amdgpu_device *adev);
/*
@@ -1939,14 +1289,6 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
-struct amdgpu_ip_block_status {
- bool valid;
- bool sw;
- bool hw;
- bool late_initialized;
- bool hang;
-};
-
struct amdgpu_device {
struct device *dev;
struct drm_device *ddev;
@@ -2102,9 +1444,8 @@ struct amdgpu_device {
/* GDS */
struct amdgpu_gds gds;
- const struct amdgpu_ip_block_version *ip_blocks;
+ struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
int num_ip_blocks;
- struct amdgpu_ip_block_status *ip_block_status;
struct mutex mn_lock;
DECLARE_HASHTABLE(mn_hash, 7);
@@ -2127,6 +1468,11 @@ struct amdgpu_device {
};
+static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
+{
+ return container_of(bdev, struct amdgpu_device, mman.bdev);
+}
+
bool amdgpu_device_is_px(struct drm_device *dev);
int amdgpu_device_init(struct amdgpu_device *adev,
struct drm_device *ddev,
@@ -2278,8 +1624,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
-#define amdgpu_ring_get_emit_ib_size(r) (r)->funcs->get_emit_ib_size((r))
-#define amdgpu_ring_get_dma_frame_size(r) (r)->funcs->get_dma_frame_size((r))
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
@@ -2301,108 +1645,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b))
#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
-#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
-#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
-#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
-#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
-#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
-#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
-#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
-
-#define amdgpu_dpm_read_sensor(adev, idx, value) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \
- -EINVAL)
-
-#define amdgpu_dpm_get_temperature(adev) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
- (adev)->pm.funcs->get_temperature((adev)))
-
-#define amdgpu_dpm_set_fan_control_mode(adev, m) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
- (adev)->pm.funcs->set_fan_control_mode((adev), (m)))
-
-#define amdgpu_dpm_get_fan_control_mode(adev) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
- (adev)->pm.funcs->get_fan_control_mode((adev)))
-
-#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
- (adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
-
-#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
- (adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
-
-#define amdgpu_dpm_get_sclk(adev, l) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
- (adev)->pm.funcs->get_sclk((adev), (l)))
-
-#define amdgpu_dpm_get_mclk(adev, l) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
- (adev)->pm.funcs->get_mclk((adev), (l)))
-
-
-#define amdgpu_dpm_force_performance_level(adev, l) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
- (adev)->pm.funcs->force_performance_level((adev), (l)))
-
-#define amdgpu_dpm_powergate_uvd(adev, g) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
- (adev)->pm.funcs->powergate_uvd((adev), (g)))
-
-#define amdgpu_dpm_powergate_vce(adev, g) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
- (adev)->pm.funcs->powergate_vce((adev), (g)))
-
-#define amdgpu_dpm_get_current_power_state(adev) \
- (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
-
-#define amdgpu_dpm_get_performance_level(adev) \
- (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)
-
-#define amdgpu_dpm_get_pp_num_states(adev, data) \
- (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
-
-#define amdgpu_dpm_get_pp_table(adev, table) \
- (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)
-
-#define amdgpu_dpm_set_pp_table(adev, buf, size) \
- (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)
-
-#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
- (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)
-
-#define amdgpu_dpm_force_clock_level(adev, type, level) \
- (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
-
-#define amdgpu_dpm_get_sclk_od(adev) \
- (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
-
-#define amdgpu_dpm_set_sclk_od(adev, value) \
- (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
-
-#define amdgpu_dpm_get_mclk_od(adev) \
- ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_set_mclk_od(adev, value) \
- ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
-
-#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \
- (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
-
#define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
/* Common functions */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 892d60fb225b..2f9f96cc9f65 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -265,14 +265,14 @@ static int acp_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- const struct amdgpu_ip_block_version *ip_version =
+ const struct amdgpu_ip_block *ip_block =
amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
- if (!ip_version)
+ if (!ip_block)
return -EINVAL;
r = amd_acp_hw_init(adev->acp.cgs_device,
- ip_version->major, ip_version->minor);
+ ip_block->version->major, ip_block->version->minor);
/* -ENODEV means board uses AZ rather than ACP */
if (r == -ENODEV)
return 0;
@@ -456,7 +456,7 @@ static int acp_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs acp_ip_funcs = {
+static const struct amd_ip_funcs acp_ip_funcs = {
.name = "acp_ip",
.early_init = acp_early_init,
.late_init = NULL,
@@ -472,3 +472,12 @@ const struct amd_ip_funcs acp_ip_funcs = {
.set_clockgating_state = acp_set_clockgating_state,
.set_powergating_state = acp_set_powergating_state,
};
+
+const struct amdgpu_ip_block_version acp_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_ACP,
+ .major = 2,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &acp_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
index 8a396313c86f..a288ce25c176 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
@@ -37,6 +37,6 @@ struct amdgpu_acp {
struct acp_pm_domain *acp_genpd;
};
-extern const struct amd_ip_funcs acp_ip_funcs;
+extern const struct amdgpu_ip_block_version acp_ip_block;
#endif /* __AMDGPU_ACP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 8e6bf548d689..56a86dd5789e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1115,49 +1115,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
return 0;
}
-uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev)
-{
- GET_ENGINE_CLOCK_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
- return le32_to_cpu(args.ulReturnEngineClock);
-}
-
-uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev)
-{
- GET_MEMORY_CLOCK_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
- return le32_to_cpu(args.ulReturnMemoryClock);
-}
-
-void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev,
- uint32_t eng_clock)
-{
- SET_ENGINE_CLOCK_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
-
- args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
-void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev,
- uint32_t mem_clock)
-{
- SET_MEMORY_CLOCK_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock);
-
- if (adev->flags & AMD_IS_APU)
- return;
-
- args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
u32 eng_clock, u32 mem_clock)
{
@@ -1256,45 +1213,6 @@ int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *
return amdgpu_atombios_get_max_vddc(adev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
}
-void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
- u16 voltage_level,
- u8 voltage_type)
-{
- union set_voltage args;
- int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
- u8 frev, crev, volt_index = voltage_level;
-
- if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
- return;
-
- /* 0xff01 is a flag rather then an actual voltage */
- if (voltage_level == 0xff01)
- return;
-
- switch (crev) {
- case 1:
- args.v1.ucVoltageType = voltage_type;
- args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
- args.v1.ucVoltageIndex = volt_index;
- break;
- case 2:
- args.v2.ucVoltageType = voltage_type;
- args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
- args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
- break;
- case 3:
- args.v3.ucVoltageType = voltage_type;
- args.v3.ucVoltageMode = ATOM_SET_VOLTAGE;
- args.v3.usVoltageLevel = cpu_to_le16(voltage_level);
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- return;
- }
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev,
u16 *leakage_id)
{
@@ -1784,6 +1702,19 @@ void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]);
}
+void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
+ bool hung)
+{
+ u32 tmp = RREG32(mmBIOS_SCRATCH_3);
+
+ if (hung)
+ tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+ else
+ tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+
+ WREG32(mmBIOS_SCRATCH_3, tmp);
+}
+
/* Atom needs data in little endian format
* so swap as appropriate when copying data to
* or from atom. Note that atom operates on
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 17356151db38..70e9acef5d9c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -163,16 +163,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
bool strobe_mode,
struct atom_mpll_param *mpll_param);
-uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev);
-uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev);
-void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev,
- uint32_t eng_clock);
-void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev,
- uint32_t mem_clock);
-void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
- u16 voltage_level,
- u8 voltage_type);
-
void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
u32 eng_clock, u32 mem_clock);
@@ -206,6 +196,8 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock);
void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev);
+void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
+ bool hung);
void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 345305235349..cc97eee93226 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -33,7 +33,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
{
unsigned long start_jiffies;
unsigned long end_jiffies;
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
int i, r;
start_jiffies = jiffies;
@@ -43,17 +43,17 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
false);
if (r)
goto exit_do_move;
- r = fence_wait(fence, false);
+ r = dma_fence_wait(fence, false);
if (r)
goto exit_do_move;
- fence_put(fence);
+ dma_fence_put(fence);
}
end_jiffies = jiffies;
r = jiffies_to_msecs(end_jiffies - start_jiffies);
exit_do_move:
if (fence)
- fence_put(fence);
+ dma_fence_put(fence);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 7a8bfa34682f..017556ca22e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -146,7 +146,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
switch(type) {
case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
case CGS_GPU_MEM_TYPE__VISIBLE_FB:
- flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
domain = AMDGPU_GEM_DOMAIN_VRAM;
if (max_offset > adev->mc.real_vram_size)
return -EINVAL;
@@ -157,7 +158,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
break;
case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
- flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+ flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
domain = AMDGPU_GEM_DOMAIN_VRAM;
if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
place.fpfn =
@@ -240,7 +242,7 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
r = amdgpu_bo_reserve(obj, false);
if (unlikely(r != 0))
return r;
- r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT,
+ r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains,
min_offset, max_offset, mcaddr);
amdgpu_bo_unreserve(obj);
return r;
@@ -624,11 +626,11 @@ static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
int i, r = -1;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].type == block_type) {
- r = adev->ip_blocks[i].funcs->set_clockgating_state(
+ if (adev->ip_blocks[i].version->type == block_type) {
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
(void *)adev,
state);
break;
@@ -645,11 +647,11 @@ static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
int i, r = -1;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].type == block_type) {
- r = adev->ip_blocks[i].funcs->set_powergating_state(
+ if (adev->ip_blocks[i].version->type == block_type) {
+ r = adev->ip_blocks[i].version->funcs->set_powergating_state(
(void *)adev,
state);
break;
@@ -685,15 +687,21 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
result = AMDGPU_UCODE_ID_CP_MEC1;
break;
case CGS_UCODE_ID_CP_MEC_JT2:
- if (adev->asic_type == CHIP_TONGA || adev->asic_type == CHIP_POLARIS11
- || adev->asic_type == CHIP_POLARIS10)
- result = AMDGPU_UCODE_ID_CP_MEC2;
- else
+ /* for VI. JT2 should be the same as JT1, because:
+ 1, MEC2 and MEC1 use exactly same FW.
+ 2, JT2 is not pached but JT1 is.
+ */
+ if (adev->asic_type >= CHIP_TOPAZ)
result = AMDGPU_UCODE_ID_CP_MEC1;
+ else
+ result = AMDGPU_UCODE_ID_CP_MEC2;
break;
case CGS_UCODE_ID_RLC_G:
result = AMDGPU_UCODE_ID_RLC_G;
break;
+ case CGS_UCODE_ID_STORAGE:
+ result = AMDGPU_UCODE_ID_STORAGE;
+ break;
default:
DRM_ERROR("Firmware type not supported\n");
}
@@ -776,12 +784,18 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
(type == CGS_UCODE_ID_CP_MEC_JT2)) {
- gpu_addr += le32_to_cpu(header->jt_offset) << 2;
+ gpu_addr += ALIGN(le32_to_cpu(header->header.ucode_size_bytes), PAGE_SIZE);
data_size = le32_to_cpu(header->jt_size) << 2;
}
- info->mc_addr = gpu_addr;
+
+ info->kptr = ucode->kaddr;
info->image_size = data_size;
+ info->mc_addr = gpu_addr;
info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
+
+ if (CGS_UCODE_ID_CP_MEC == type)
+ info->image_size = (header->jt_offset) << 2;
+
info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
} else {
@@ -851,6 +865,12 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
return 0;
}
+static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device)
+{
+ CGS_FUNC_ADEV;
+ return amdgpu_sriov_vf(adev);
+}
+
static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
struct cgs_system_info *sys_info)
{
@@ -1204,6 +1224,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
amdgpu_cgs_notify_dpm_enabled,
amdgpu_cgs_call_acpi_method,
amdgpu_cgs_query_system_info,
+ amdgpu_cgs_is_virtualization_enabled
};
static const struct cgs_os_ops amdgpu_cgs_os_ops = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index e3281d4e3e41..3af8ffb45b64 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -1517,88 +1517,6 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
.force = amdgpu_connector_dvi_force,
};
-static struct drm_encoder *
-amdgpu_connector_virtual_encoder(struct drm_connector *connector)
-{
- int enc_id = connector->encoder_ids[0];
- struct drm_encoder *encoder;
- int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
- if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
- return encoder;
- }
-
- /* pick the first one */
- if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
- return NULL;
-}
-
-static int amdgpu_connector_virtual_get_modes(struct drm_connector *connector)
-{
- struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
-
- if (encoder) {
- amdgpu_connector_add_common_modes(encoder, connector);
- }
-
- return 0;
-}
-
-static int amdgpu_connector_virtual_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
-static int
-amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode)
-{
- return 0;
-}
-
-static enum drm_connector_status
-
-amdgpu_connector_virtual_detect(struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
-static int
-amdgpu_connector_virtual_set_property(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val)
-{
- return 0;
-}
-
-static void amdgpu_connector_virtual_force(struct drm_connector *connector)
-{
- return;
-}
-
-static const struct drm_connector_helper_funcs amdgpu_connector_virtual_helper_funcs = {
- .get_modes = amdgpu_connector_virtual_get_modes,
- .mode_valid = amdgpu_connector_virtual_mode_valid,
- .best_encoder = amdgpu_connector_virtual_encoder,
-};
-
-static const struct drm_connector_funcs amdgpu_connector_virtual_funcs = {
- .dpms = amdgpu_connector_virtual_dpms,
- .detect = amdgpu_connector_virtual_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .set_property = amdgpu_connector_virtual_set_property,
- .destroy = amdgpu_connector_destroy,
- .force = amdgpu_connector_virtual_force,
-};
-
void
amdgpu_connector_add(struct amdgpu_device *adev,
uint32_t connector_id,
@@ -1983,17 +1901,6 @@ amdgpu_connector_add(struct amdgpu_device *adev,
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
break;
- case DRM_MODE_CONNECTOR_VIRTUAL:
- amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
- if (!amdgpu_dig_connector)
- goto failed;
- amdgpu_connector->con_priv = amdgpu_dig_connector;
- drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_virtual_funcs, connector_type);
- drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_virtual_helper_funcs);
- subpixel_order = SubPixelHorizontalRGB;
- connector->interlace_allowed = false;
- connector->doublescan_allowed = false;
- break;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index b0f6e6957536..a024217896fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -355,6 +355,7 @@ static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev,
static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
struct amdgpu_bo *bo)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
u64 initial_bytes_moved;
uint32_t domain;
int r;
@@ -372,9 +373,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
retry:
amdgpu_ttm_placement_from_domain(bo, domain);
- initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
+ initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
+ p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
initial_bytes_moved;
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
@@ -387,9 +388,9 @@ retry:
/* Last resort, try to evict something from the current working set */
static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
- struct amdgpu_bo_list_entry *lobj)
+ struct amdgpu_bo *validated)
{
- uint32_t domain = lobj->robj->allowed_domains;
+ uint32_t domain = validated->allowed_domains;
int r;
if (!p->evictable)
@@ -400,11 +401,12 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
struct amdgpu_bo_list_entry *candidate = p->evictable;
struct amdgpu_bo *bo = candidate->robj;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
u64 initial_bytes_moved;
uint32_t other;
/* If we reached our current BO we can forget it */
- if (candidate == lobj)
+ if (candidate->robj == validated)
break;
other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
@@ -420,9 +422,9 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
/* Good we can try to move this BO somewhere else */
amdgpu_ttm_placement_from_domain(bo, other);
- initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
+ initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
+ p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
initial_bytes_moved;
if (unlikely(r))
@@ -437,6 +439,23 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
return false;
}
+static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
+{
+ struct amdgpu_cs_parser *p = param;
+ int r;
+
+ do {
+ r = amdgpu_cs_bo_validate(p, bo);
+ } while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
+ if (r)
+ return r;
+
+ if (bo->shadow)
+ r = amdgpu_cs_bo_validate(p, bo);
+
+ return r;
+}
+
static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
struct list_head *validated)
{
@@ -464,18 +483,10 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
if (p->evictable == lobj)
p->evictable = NULL;
- do {
- r = amdgpu_cs_bo_validate(p, bo);
- } while (r == -ENOMEM && amdgpu_cs_try_evict(p, lobj));
+ r = amdgpu_cs_validate(p, bo);
if (r)
return r;
- if (bo->shadow) {
- r = amdgpu_cs_bo_validate(p, bo);
- if (r)
- return r;
- }
-
if (binding_userptr) {
drm_free_large(lobj->user_pages);
lobj->user_pages = NULL;
@@ -593,14 +604,19 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
list_splice(&need_pages, &p->validated);
}
- amdgpu_vm_get_pt_bos(p->adev, &fpriv->vm, &duplicates);
-
p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
p->bytes_moved = 0;
p->evictable = list_last_entry(&p->validated,
struct amdgpu_bo_list_entry,
tv.head);
+ r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
+ amdgpu_cs_validate, p);
+ if (r) {
+ DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
+ goto error_validate;
+ }
+
r = amdgpu_cs_list_validate(p, &duplicates);
if (r) {
DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
@@ -719,7 +735,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
}
- fence_put(parser->fence);
+ dma_fence_put(parser->fence);
if (parser->ctx)
amdgpu_ctx_put(parser->ctx);
@@ -756,7 +772,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
if (p->bo_list) {
for (i = 0; i < p->bo_list->num_entries; i++) {
- struct fence *f;
+ struct dma_fence *f;
/* ignore duplicates */
bo = p->bo_list->array[i].robj;
@@ -806,13 +822,14 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
/* Only for UVD/VCE VM emulation */
if (ring->funcs->parse_cs) {
- p->job->vm = NULL;
for (i = 0; i < p->job->num_ibs; i++) {
r = amdgpu_ring_parse_cs(ring, p, i);
if (r)
return r;
}
- } else {
+ }
+
+ if (p->job->vm) {
p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
r = amdgpu_bo_vm_update_pte(p, vm);
@@ -901,7 +918,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE;
kptr += chunk_ib->va_start - offset;
- r = amdgpu_ib_get(adev, NULL, chunk_ib->ib_bytes, ib);
+ r = amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib);
if (r) {
DRM_ERROR("Failed to get ib !\n");
return r;
@@ -916,9 +933,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
return r;
}
- ib->gpu_addr = chunk_ib->va_start;
}
+ ib->gpu_addr = chunk_ib->va_start;
ib->length_dw = chunk_ib->ib_bytes / 4;
ib->flags = chunk_ib->flags;
j++;
@@ -926,8 +943,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
/* UVD & VCE fw doesn't support user fences */
if (parser->job->uf_addr && (
- parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
- parser->job->ring->type == AMDGPU_RING_TYPE_VCE))
+ parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
+ parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
return -EINVAL;
return 0;
@@ -956,7 +973,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
for (j = 0; j < num_deps; ++j) {
struct amdgpu_ring *ring;
struct amdgpu_ctx *ctx;
- struct fence *fence;
+ struct dma_fence *fence;
r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
deps[j].ip_instance,
@@ -978,7 +995,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
} else if (fence) {
r = amdgpu_sync_fence(adev, &p->job->sync,
fence);
- fence_put(fence);
+ dma_fence_put(fence);
amdgpu_ctx_put(ctx);
if (r)
return r;
@@ -1008,7 +1025,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->owner = p->filp;
job->fence_ctx = entity->fence_context;
- p->fence = fence_get(&job->base.s_fence->finished);
+ p->fence = dma_fence_get(&job->base.s_fence->finished);
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
job->uf_sequence = cs->out.handle;
amdgpu_job_free_resources(job);
@@ -1091,7 +1108,7 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
struct amdgpu_ring *ring = NULL;
struct amdgpu_ctx *ctx;
- struct fence *fence;
+ struct dma_fence *fence;
long r;
r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
@@ -1107,8 +1124,8 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(fence))
r = PTR_ERR(fence);
else if (fence) {
- r = fence_wait_timeout(fence, true, timeout);
- fence_put(fence);
+ r = dma_fence_wait_timeout(fence, true, timeout);
+ dma_fence_put(fence);
} else
r = 1;
@@ -1195,6 +1212,15 @@ int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
if (unlikely(r))
return r;
+
+ if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
+ continue;
+
+ bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ amdgpu_ttm_placement_from_domain(bo, bo->allowed_domains);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ if (unlikely(r))
+ return r;
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index a5e2fcbef0f0..400c66ba4c6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -35,7 +35,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
kref_init(&ctx->refcount);
spin_lock_init(&ctx->ring_lock);
ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
- sizeof(struct fence*), GFP_KERNEL);
+ sizeof(struct dma_fence*), GFP_KERNEL);
if (!ctx->fences)
return -ENOMEM;
@@ -55,18 +55,18 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
rq, amdgpu_sched_jobs);
if (r)
- break;
+ goto failed;
}
- if (i < adev->num_rings) {
- for (j = 0; j < i; j++)
- amd_sched_entity_fini(&adev->rings[j]->sched,
- &ctx->rings[j].entity);
- kfree(ctx->fences);
- ctx->fences = NULL;
- return r;
- }
return 0;
+
+failed:
+ for (j = 0; j < i; j++)
+ amd_sched_entity_fini(&adev->rings[j]->sched,
+ &ctx->rings[j].entity);
+ kfree(ctx->fences);
+ ctx->fences = NULL;
+ return r;
}
static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
@@ -79,7 +79,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
for (j = 0; j < amdgpu_sched_jobs; ++j)
- fence_put(ctx->rings[i].fences[j]);
+ dma_fence_put(ctx->rings[i].fences[j]);
kfree(ctx->fences);
ctx->fences = NULL;
@@ -241,39 +241,39 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
}
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
- struct fence *fence)
+ struct dma_fence *fence)
{
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
uint64_t seq = cring->sequence;
unsigned idx = 0;
- struct fence *other = NULL;
+ struct dma_fence *other = NULL;
idx = seq & (amdgpu_sched_jobs - 1);
other = cring->fences[idx];
if (other) {
signed long r;
- r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
+ r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
if (r < 0)
DRM_ERROR("Error (%ld) waiting for fence!\n", r);
}
- fence_get(fence);
+ dma_fence_get(fence);
spin_lock(&ctx->ring_lock);
cring->fences[idx] = fence;
cring->sequence++;
spin_unlock(&ctx->ring_lock);
- fence_put(other);
+ dma_fence_put(other);
return seq;
}
-struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
- struct amdgpu_ring *ring, uint64_t seq)
+struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+ struct amdgpu_ring *ring, uint64_t seq)
{
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
- struct fence *fence;
+ struct dma_fence *fence;
spin_lock(&ctx->ring_lock);
@@ -288,7 +288,7 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
return NULL;
}
- fence = fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
+ fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
spin_unlock(&ctx->ring_lock);
return fence;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index b4f4a9239069..6958d4af017f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -264,7 +264,8 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
if (adev->vram_scratch.robj == NULL) {
r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &adev->vram_scratch.robj);
if (r) {
return r;
@@ -442,13 +443,9 @@ void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
static void amdgpu_wb_fini(struct amdgpu_device *adev)
{
if (adev->wb.wb_obj) {
- if (!amdgpu_bo_reserve(adev->wb.wb_obj, false)) {
- amdgpu_bo_kunmap(adev->wb.wb_obj);
- amdgpu_bo_unpin(adev->wb.wb_obj);
- amdgpu_bo_unreserve(adev->wb.wb_obj);
- }
- amdgpu_bo_unref(&adev->wb.wb_obj);
- adev->wb.wb = NULL;
+ amdgpu_bo_free_kernel(&adev->wb.wb_obj,
+ &adev->wb.gpu_addr,
+ (void **)&adev->wb.wb);
adev->wb.wb_obj = NULL;
}
}
@@ -467,33 +464,14 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
int r;
if (adev->wb.wb_obj == NULL) {
- r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
- &adev->wb.wb_obj);
+ r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * 4,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
+ &adev->wb.wb_obj, &adev->wb.gpu_addr,
+ (void **)&adev->wb.wb);
if (r) {
dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
return r;
}
- r = amdgpu_bo_reserve(adev->wb.wb_obj, false);
- if (unlikely(r != 0)) {
- amdgpu_wb_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->wb.wb_obj, AMDGPU_GEM_DOMAIN_GTT,
- &adev->wb.gpu_addr);
- if (r) {
- amdgpu_bo_unreserve(adev->wb.wb_obj);
- dev_warn(adev->dev, "(%d) pin WB bo failed\n", r);
- amdgpu_wb_fini(adev);
- return r;
- }
- r = amdgpu_bo_kmap(adev->wb.wb_obj, (void **)&adev->wb.wb);
- amdgpu_bo_unreserve(adev->wb.wb_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) map WB bo failed\n", r);
- amdgpu_wb_fini(adev);
- return r;
- }
adev->wb.num_wb = AMDGPU_MAX_WB;
memset(&adev->wb.used, 0, sizeof(adev->wb.used));
@@ -1051,6 +1029,13 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
amdgpu_vm_block_size);
amdgpu_vm_block_size = 9;
}
+
+ if ((amdgpu_vram_page_split != -1 && amdgpu_vram_page_split < 16) ||
+ !amdgpu_check_pot_argument(amdgpu_vram_page_split)) {
+ dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
+ amdgpu_vram_page_split);
+ amdgpu_vram_page_split = 1024;
+ }
}
/**
@@ -1125,11 +1110,11 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].type == block_type) {
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- state);
+ if (adev->ip_blocks[i].version->type == block_type) {
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ state);
if (r)
return r;
break;
@@ -1145,11 +1130,11 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev,
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].type == block_type) {
- r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev,
- state);
+ if (adev->ip_blocks[i].version->type == block_type) {
+ r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
+ state);
if (r)
return r;
break;
@@ -1164,10 +1149,10 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev,
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].type == block_type) {
- r = adev->ip_blocks[i].funcs->wait_for_idle((void *)adev);
+ if (adev->ip_blocks[i].version->type == block_type) {
+ r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
if (r)
return r;
break;
@@ -1183,23 +1168,22 @@ bool amdgpu_is_idle(struct amdgpu_device *adev,
int i;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].type == block_type)
- return adev->ip_blocks[i].funcs->is_idle((void *)adev);
+ if (adev->ip_blocks[i].version->type == block_type)
+ return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
}
return true;
}
-const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
- struct amdgpu_device *adev,
- enum amd_ip_block_type type)
+struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
+ enum amd_ip_block_type type)
{
int i;
for (i = 0; i < adev->num_ip_blocks; i++)
- if (adev->ip_blocks[i].type == type)
+ if (adev->ip_blocks[i].version->type == type)
return &adev->ip_blocks[i];
return NULL;
@@ -1220,38 +1204,75 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
enum amd_ip_block_type type,
u32 major, u32 minor)
{
- const struct amdgpu_ip_block_version *ip_block;
- ip_block = amdgpu_get_ip_block(adev, type);
+ struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
- if (ip_block && ((ip_block->major > major) ||
- ((ip_block->major == major) &&
- (ip_block->minor >= minor))))
+ if (ip_block && ((ip_block->version->major > major) ||
+ ((ip_block->version->major == major) &&
+ (ip_block->version->minor >= minor))))
return 0;
return 1;
}
-static void amdgpu_whether_enable_virtual_display(struct amdgpu_device *adev)
+/**
+ * amdgpu_ip_block_add
+ *
+ * @adev: amdgpu_device pointer
+ * @ip_block_version: pointer to the IP to add
+ *
+ * Adds the IP block driver information to the collection of IPs
+ * on the asic.
+ */
+int amdgpu_ip_block_add(struct amdgpu_device *adev,
+ const struct amdgpu_ip_block_version *ip_block_version)
+{
+ if (!ip_block_version)
+ return -EINVAL;
+
+ adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
+
+ return 0;
+}
+
+static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
{
adev->enable_virtual_display = false;
if (amdgpu_virtual_display) {
struct drm_device *ddev = adev->ddev;
const char *pci_address_name = pci_name(ddev->pdev);
- char *pciaddstr, *pciaddstr_tmp, *pciaddname;
+ char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
pciaddstr_tmp = pciaddstr;
- while ((pciaddname = strsep(&pciaddstr_tmp, ";"))) {
+ while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
+ pciaddname = strsep(&pciaddname_tmp, ",");
if (!strcmp(pci_address_name, pciaddname)) {
+ long num_crtc;
+ int res = -1;
+
adev->enable_virtual_display = true;
+
+ if (pciaddname_tmp)
+ res = kstrtol(pciaddname_tmp, 10,
+ &num_crtc);
+
+ if (!res) {
+ if (num_crtc < 1)
+ num_crtc = 1;
+ if (num_crtc > 6)
+ num_crtc = 6;
+ adev->mode_info.num_crtc = num_crtc;
+ } else {
+ adev->mode_info.num_crtc = 1;
+ }
break;
}
}
- DRM_INFO("virtual display string:%s, %s:virtual_display:%d\n",
- amdgpu_virtual_display, pci_address_name,
- adev->enable_virtual_display);
+ DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
+ amdgpu_virtual_display, pci_address_name,
+ adev->enable_virtual_display, adev->mode_info.num_crtc);
kfree(pciaddstr);
}
@@ -1261,7 +1282,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
{
int i, r;
- amdgpu_whether_enable_virtual_display(adev);
+ amdgpu_device_enable_virtual_display(adev);
switch (adev->asic_type) {
case CHIP_TOPAZ:
@@ -1313,33 +1334,24 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
return -EINVAL;
}
- adev->ip_block_status = kcalloc(adev->num_ip_blocks,
- sizeof(struct amdgpu_ip_block_status), GFP_KERNEL);
- if (adev->ip_block_status == NULL)
- return -ENOMEM;
-
- if (adev->ip_blocks == NULL) {
- DRM_ERROR("No IP blocks found!\n");
- return r;
- }
-
for (i = 0; i < adev->num_ip_blocks; i++) {
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
DRM_ERROR("disabled ip block: %d\n", i);
- adev->ip_block_status[i].valid = false;
+ adev->ip_blocks[i].status.valid = false;
} else {
- if (adev->ip_blocks[i].funcs->early_init) {
- r = adev->ip_blocks[i].funcs->early_init((void *)adev);
+ if (adev->ip_blocks[i].version->funcs->early_init) {
+ r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
if (r == -ENOENT) {
- adev->ip_block_status[i].valid = false;
+ adev->ip_blocks[i].status.valid = false;
} else if (r) {
- DRM_ERROR("early_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("early_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
} else {
- adev->ip_block_status[i].valid = true;
+ adev->ip_blocks[i].status.valid = true;
}
} else {
- adev->ip_block_status[i].valid = true;
+ adev->ip_blocks[i].status.valid = true;
}
}
}
@@ -1355,22 +1367,23 @@ static int amdgpu_init(struct amdgpu_device *adev)
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
if (r) {
- DRM_ERROR("sw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("sw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
- adev->ip_block_status[i].sw = true;
+ adev->ip_blocks[i].status.sw = true;
/* need to do gmc hw init early so we can allocate gpu mem */
- if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
r = amdgpu_vram_scratch_init(adev);
if (r) {
DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
return r;
}
- r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
if (r) {
DRM_ERROR("hw_init %d failed %d\n", i, r);
return r;
@@ -1380,22 +1393,23 @@ static int amdgpu_init(struct amdgpu_device *adev)
DRM_ERROR("amdgpu_wb_init failed %d\n", r);
return r;
}
- adev->ip_block_status[i].hw = true;
+ adev->ip_blocks[i].status.hw = true;
}
}
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].sw)
+ if (!adev->ip_blocks[i].status.sw)
continue;
/* gmc hw init is done early */
- if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
continue;
- r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
if (r) {
- DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
- adev->ip_block_status[i].hw = true;
+ adev->ip_blocks[i].status.hw = true;
}
return 0;
@@ -1406,25 +1420,26 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
int i = 0, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].funcs->late_init) {
- r = adev->ip_blocks[i].funcs->late_init((void *)adev);
+ if (adev->ip_blocks[i].version->funcs->late_init) {
+ r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
if (r) {
- DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("late_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
- adev->ip_block_status[i].late_initialized = true;
+ adev->ip_blocks[i].status.late_initialized = true;
}
/* skip CG for VCE/UVD, it's handled specially */
- if (adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_UVD &&
- adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_VCE) {
+ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
/* enable clockgating to save power */
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_GATE);
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_GATE);
if (r) {
DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
- adev->ip_blocks[i].funcs->name, r);
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
}
@@ -1439,68 +1454,71 @@ static int amdgpu_fini(struct amdgpu_device *adev)
/* need to disable SMC first */
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].hw)
+ if (!adev->ip_blocks[i].status.hw)
continue;
- if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
/* ungate blocks before hw fini so that we can shutdown the blocks safely */
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_UNGATE);
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_UNGATE);
if (r) {
DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
- adev->ip_blocks[i].funcs->name, r);
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
- r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
/* XXX handle errors */
if (r) {
DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
- adev->ip_blocks[i].funcs->name, r);
+ adev->ip_blocks[i].version->funcs->name, r);
}
- adev->ip_block_status[i].hw = false;
+ adev->ip_blocks[i].status.hw = false;
break;
}
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!adev->ip_block_status[i].hw)
+ if (!adev->ip_blocks[i].status.hw)
continue;
- if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
amdgpu_wb_fini(adev);
amdgpu_vram_scratch_fini(adev);
}
/* ungate blocks before hw fini so that we can shutdown the blocks safely */
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_UNGATE);
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_UNGATE);
if (r) {
- DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
- r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
/* XXX handle errors */
if (r) {
- DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
- adev->ip_block_status[i].hw = false;
+ adev->ip_blocks[i].status.hw = false;
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!adev->ip_block_status[i].sw)
+ if (!adev->ip_blocks[i].status.sw)
continue;
- r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
/* XXX handle errors */
if (r) {
- DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
- adev->ip_block_status[i].sw = false;
- adev->ip_block_status[i].valid = false;
+ adev->ip_blocks[i].status.sw = false;
+ adev->ip_blocks[i].status.valid = false;
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!adev->ip_block_status[i].late_initialized)
+ if (!adev->ip_blocks[i].status.late_initialized)
continue;
- if (adev->ip_blocks[i].funcs->late_fini)
- adev->ip_blocks[i].funcs->late_fini((void *)adev);
- adev->ip_block_status[i].late_initialized = false;
+ if (adev->ip_blocks[i].version->funcs->late_fini)
+ adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
+ adev->ip_blocks[i].status.late_initialized = false;
}
return 0;
@@ -1518,21 +1536,23 @@ static int amdgpu_suspend(struct amdgpu_device *adev)
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
/* ungate blocks so that suspend can properly shut them down */
if (i != AMD_IP_BLOCK_TYPE_SMC) {
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_UNGATE);
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_UNGATE);
if (r) {
- DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
}
/* XXX handle errors */
- r = adev->ip_blocks[i].funcs->suspend(adev);
+ r = adev->ip_blocks[i].version->funcs->suspend(adev);
/* XXX handle errors */
if (r) {
- DRM_ERROR("suspend of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("suspend of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
}
@@ -1544,11 +1564,12 @@ static int amdgpu_resume(struct amdgpu_device *adev)
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- r = adev->ip_blocks[i].funcs->resume(adev);
+ r = adev->ip_blocks[i].version->funcs->resume(adev);
if (r) {
- DRM_ERROR("resume of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("resume of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
}
@@ -1599,7 +1620,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->vm_manager.vm_pte_funcs = NULL;
adev->vm_manager.vm_pte_num_rings = 0;
adev->gart.gart_funcs = NULL;
- adev->fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
+ adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
adev->smc_rreg = &amdgpu_invalid_rreg;
adev->smc_wreg = &amdgpu_invalid_wreg;
@@ -1859,8 +1880,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
amdgpu_fence_driver_fini(adev);
amdgpu_fbdev_fini(adev);
r = amdgpu_fini(adev);
- kfree(adev->ip_block_status);
- adev->ip_block_status = NULL;
adev->accel_working = false;
/* free i2c buses */
amdgpu_i2c_fini(adev);
@@ -1956,7 +1975,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
r = amdgpu_suspend(adev);
- /* evict remaining vram memory */
+ /* evict remaining vram memory
+ * This second call to evict vram is to evict the gart page table
+ * using the CPU.
+ */
amdgpu_bo_evict_vram(adev);
pci_save_state(dev->pdev);
@@ -2096,13 +2118,13 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
bool asic_hang = false;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].funcs->check_soft_reset)
- adev->ip_block_status[i].hang =
- adev->ip_blocks[i].funcs->check_soft_reset(adev);
- if (adev->ip_block_status[i].hang) {
- DRM_INFO("IP block:%d is hang!\n", i);
+ if (adev->ip_blocks[i].version->funcs->check_soft_reset)
+ adev->ip_blocks[i].status.hang =
+ adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
+ if (adev->ip_blocks[i].status.hang) {
+ DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
asic_hang = true;
}
}
@@ -2114,11 +2136,11 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_block_status[i].hang &&
- adev->ip_blocks[i].funcs->pre_soft_reset) {
- r = adev->ip_blocks[i].funcs->pre_soft_reset(adev);
+ if (adev->ip_blocks[i].status.hang &&
+ adev->ip_blocks[i].version->funcs->pre_soft_reset) {
+ r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
if (r)
return r;
}
@@ -2132,13 +2154,13 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if ((adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) ||
- (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) ||
- (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_ACP) ||
- (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_DCE)) {
- if (adev->ip_block_status[i].hang) {
+ if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
+ if (adev->ip_blocks[i].status.hang) {
DRM_INFO("Some block need full reset!\n");
return true;
}
@@ -2152,11 +2174,11 @@ static int amdgpu_soft_reset(struct amdgpu_device *adev)
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_block_status[i].hang &&
- adev->ip_blocks[i].funcs->soft_reset) {
- r = adev->ip_blocks[i].funcs->soft_reset(adev);
+ if (adev->ip_blocks[i].status.hang &&
+ adev->ip_blocks[i].version->funcs->soft_reset) {
+ r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
if (r)
return r;
}
@@ -2170,11 +2192,11 @@ static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_block_status[i].hang &&
- adev->ip_blocks[i].funcs->post_soft_reset)
- r = adev->ip_blocks[i].funcs->post_soft_reset(adev);
+ if (adev->ip_blocks[i].status.hang &&
+ adev->ip_blocks[i].version->funcs->post_soft_reset)
+ r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
if (r)
return r;
}
@@ -2193,7 +2215,7 @@ bool amdgpu_need_backup(struct amdgpu_device *adev)
static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
- struct fence **fence)
+ struct dma_fence **fence)
{
uint32_t domain;
int r;
@@ -2312,30 +2334,30 @@ retry:
if (need_full_reset && amdgpu_need_backup(adev)) {
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_bo *bo, *tmp;
- struct fence *fence = NULL, *next = NULL;
+ struct dma_fence *fence = NULL, *next = NULL;
DRM_INFO("recover vram bo from shadow\n");
mutex_lock(&adev->shadow_list_lock);
list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
if (fence) {
- r = fence_wait(fence, false);
+ r = dma_fence_wait(fence, false);
if (r) {
WARN(r, "recovery from shadow isn't comleted\n");
break;
}
}
- fence_put(fence);
+ dma_fence_put(fence);
fence = next;
}
mutex_unlock(&adev->shadow_list_lock);
if (fence) {
- r = fence_wait(fence, false);
+ r = dma_fence_wait(fence, false);
if (r)
WARN(r, "recovery from shadow isn't comleted\n");
}
- fence_put(fence);
+ dma_fence_put(fence);
}
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -2531,6 +2553,13 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
se_bank = (*pos >> 24) & 0x3FF;
sh_bank = (*pos >> 34) & 0x3FF;
instance_bank = (*pos >> 44) & 0x3FF;
+
+ if (se_bank == 0x3FF)
+ se_bank = 0xFFFFFFFF;
+ if (sh_bank == 0x3FF)
+ sh_bank = 0xFFFFFFFF;
+ if (instance_bank == 0x3FF)
+ instance_bank = 0xFFFFFFFF;
use_bank = 1;
} else {
use_bank = 0;
@@ -2539,8 +2568,8 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
*pos &= 0x3FFFF;
if (use_bank) {
- if (sh_bank >= adev->gfx.config.max_sh_per_se ||
- se_bank >= adev->gfx.config.max_shader_engines)
+ if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
+ (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
return -EINVAL;
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se_bank,
@@ -2587,10 +2616,45 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
struct amdgpu_device *adev = f->f_inode->i_private;
ssize_t result = 0;
int r;
+ bool pm_pg_lock, use_bank;
+ unsigned instance_bank, sh_bank, se_bank;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ /* are we reading registers for which a PG lock is necessary? */
+ pm_pg_lock = (*pos >> 23) & 1;
+
+ if (*pos & (1ULL << 62)) {
+ se_bank = (*pos >> 24) & 0x3FF;
+ sh_bank = (*pos >> 34) & 0x3FF;
+ instance_bank = (*pos >> 44) & 0x3FF;
+
+ if (se_bank == 0x3FF)
+ se_bank = 0xFFFFFFFF;
+ if (sh_bank == 0x3FF)
+ sh_bank = 0xFFFFFFFF;
+ if (instance_bank == 0x3FF)
+ instance_bank = 0xFFFFFFFF;
+ use_bank = 1;
+ } else {
+ use_bank = 0;
+ }
+
+ *pos &= 0x3FFFF;
+
+ if (use_bank) {
+ if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
+ (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
+ return -EINVAL;
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se_bank,
+ sh_bank, instance_bank);
+ }
+
+ if (pm_pg_lock)
+ mutex_lock(&adev->pm.mutex);
+
while (size) {
uint32_t value;
@@ -2609,6 +2673,14 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
size -= 4;
}
+ if (use_bank) {
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ }
+
+ if (pm_pg_lock)
+ mutex_unlock(&adev->pm.mutex);
+
return result;
}
@@ -2871,6 +2943,56 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
return !r ? 4 : r;
}
+static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = f->f_inode->i_private;
+ int r, x;
+ ssize_t result=0;
+ uint32_t offset, se, sh, cu, wave, simd, data[32];
+
+ if (size & 3 || *pos & 3)
+ return -EINVAL;
+
+ /* decode offset */
+ offset = (*pos & 0x7F);
+ se = ((*pos >> 7) & 0xFF);
+ sh = ((*pos >> 15) & 0xFF);
+ cu = ((*pos >> 23) & 0xFF);
+ wave = ((*pos >> 31) & 0xFF);
+ simd = ((*pos >> 37) & 0xFF);
+
+ /* switch to the specific se/sh/cu */
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se, sh, cu);
+
+ x = 0;
+ if (adev->gfx.funcs->read_wave_data)
+ adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
+
+ amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ if (!x)
+ return -EINVAL;
+
+ while (size && (offset < x * 4)) {
+ uint32_t value;
+
+ value = data[offset >> 2];
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ result += 4;
+ buf += 4;
+ offset += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
static const struct file_operations amdgpu_debugfs_regs_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_read,
@@ -2908,6 +3030,12 @@ static const struct file_operations amdgpu_debugfs_sensors_fops = {
.llseek = default_llseek
};
+static const struct file_operations amdgpu_debugfs_wave_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_wave_read,
+ .llseek = default_llseek
+};
+
static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_fops,
&amdgpu_debugfs_regs_didt_fops,
@@ -2915,6 +3043,7 @@ static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_smc_fops,
&amdgpu_debugfs_gca_config_fops,
&amdgpu_debugfs_sensors_fops,
+ &amdgpu_debugfs_wave_fops,
};
static const char *debugfs_regs_names[] = {
@@ -2924,6 +3053,7 @@ static const char *debugfs_regs_names[] = {
"amdgpu_regs_smc",
"amdgpu_gca_config",
"amdgpu_sensors",
+ "amdgpu_wave",
};
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 083e2b429872..741144fcc7bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -35,29 +35,29 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
-static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb)
+static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct amdgpu_flip_work *work =
container_of(cb, struct amdgpu_flip_work, cb);
- fence_put(f);
+ dma_fence_put(f);
schedule_work(&work->flip_work.work);
}
static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
- struct fence **f)
+ struct dma_fence **f)
{
- struct fence *fence= *f;
+ struct dma_fence *fence= *f;
if (fence == NULL)
return false;
*f = NULL;
- if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
+ if (!dma_fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
return true;
- fence_put(fence);
+ dma_fence_put(fence);
return false;
}
@@ -68,9 +68,9 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
struct amdgpu_flip_work *work =
container_of(delayed_work, struct amdgpu_flip_work, flip_work);
struct amdgpu_device *adev = work->adev;
- struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id];
+ struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id];
- struct drm_crtc *crtc = &amdgpuCrtc->base;
+ struct drm_crtc *crtc = &amdgpu_crtc->base;
unsigned long flags;
unsigned i;
int vpos, hpos;
@@ -85,14 +85,14 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
/* Wait until we're out of the vertical blank period before the one
* targeted by the flip
*/
- if (amdgpuCrtc->enabled &&
+ if (amdgpu_crtc->enabled &&
(amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
&vpos, &hpos, NULL, NULL,
&crtc->hwmode)
& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
(int)(work->target_vblank -
- amdgpu_get_vblank_counter_kms(adev->ddev, amdgpuCrtc->crtc_id)) > 0) {
+ amdgpu_get_vblank_counter_kms(adev->ddev, amdgpu_crtc->crtc_id)) > 0) {
schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
return;
}
@@ -104,12 +104,12 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
/* Set the flip status */
- amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
+ amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
- amdgpuCrtc->crtc_id, amdgpuCrtc, work);
+ amdgpu_crtc->crtc_id, amdgpu_crtc, work);
}
@@ -244,9 +244,9 @@ unreserve:
cleanup:
amdgpu_bo_unref(&work->old_abo);
- fence_put(work->excl);
+ dma_fence_put(work->excl);
for (i = 0; i < work->shared_count; ++i)
- fence_put(work->shared[i]);
+ dma_fence_put(work->shared[i]);
kfree(work->shared);
kfree(work);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 14f57d9915e3..6ca0333ca4c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -553,9 +553,10 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
}
- for (i = 0; i < states->numEntries; i++) {
- if (i >= AMDGPU_MAX_VCE_LEVELS)
- break;
+ adev->pm.dpm.num_of_vce_states =
+ states->numEntries > AMD_MAX_VCE_LEVELS ?
+ AMD_MAX_VCE_LEVELS : states->numEntries;
+ for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
vce_clk = (VCEClockInfo *)
((u8 *)&array->entries[0] +
(state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
@@ -955,3 +956,12 @@ u8 amdgpu_encode_pci_lane_width(u32 lanes)
return encoded_lanes[lanes];
}
+
+struct amd_vce_state*
+amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx)
+{
+ if (idx < adev->pm.dpm.num_of_vce_states)
+ return &adev->pm.dpm.vce_states[idx];
+
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 3738a96c2619..bd85e35998e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -23,6 +23,446 @@
#ifndef __AMDGPU_DPM_H__
#define __AMDGPU_DPM_H__
+enum amdgpu_int_thermal_type {
+ THERMAL_TYPE_NONE,
+ THERMAL_TYPE_EXTERNAL,
+ THERMAL_TYPE_EXTERNAL_GPIO,
+ THERMAL_TYPE_RV6XX,
+ THERMAL_TYPE_RV770,
+ THERMAL_TYPE_ADT7473_WITH_INTERNAL,
+ THERMAL_TYPE_EVERGREEN,
+ THERMAL_TYPE_SUMO,
+ THERMAL_TYPE_NI,
+ THERMAL_TYPE_SI,
+ THERMAL_TYPE_EMC2103_WITH_INTERNAL,
+ THERMAL_TYPE_CI,
+ THERMAL_TYPE_KV,
+};
+
+enum amdgpu_dpm_auto_throttle_src {
+ AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
+ AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
+};
+
+enum amdgpu_dpm_event_src {
+ AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
+ AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
+ AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
+ AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
+ AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
+};
+
+struct amdgpu_ps {
+ u32 caps; /* vbios flags */
+ u32 class; /* vbios flags */
+ u32 class2; /* vbios flags */
+ /* UVD clocks */
+ u32 vclk;
+ u32 dclk;
+ /* VCE clocks */
+ u32 evclk;
+ u32 ecclk;
+ bool vce_active;
+ enum amd_vce_level vce_level;
+ /* asic priv */
+ void *ps_priv;
+};
+
+struct amdgpu_dpm_thermal {
+ /* thermal interrupt work */
+ struct work_struct work;
+ /* low temperature threshold */
+ int min_temp;
+ /* high temperature threshold */
+ int max_temp;
+ /* was last interrupt low to high or high to low */
+ bool high_to_low;
+ /* interrupt source */
+ struct amdgpu_irq_src irq;
+};
+
+enum amdgpu_clk_action
+{
+ AMDGPU_SCLK_UP = 1,
+ AMDGPU_SCLK_DOWN
+};
+
+struct amdgpu_blacklist_clocks
+{
+ u32 sclk;
+ u32 mclk;
+ enum amdgpu_clk_action action;
+};
+
+struct amdgpu_clock_and_voltage_limits {
+ u32 sclk;
+ u32 mclk;
+ u16 vddc;
+ u16 vddci;
+};
+
+struct amdgpu_clock_array {
+ u32 count;
+ u32 *values;
+};
+
+struct amdgpu_clock_voltage_dependency_entry {
+ u32 clk;
+ u16 v;
+};
+
+struct amdgpu_clock_voltage_dependency_table {
+ u32 count;
+ struct amdgpu_clock_voltage_dependency_entry *entries;
+};
+
+union amdgpu_cac_leakage_entry {
+ struct {
+ u16 vddc;
+ u32 leakage;
+ };
+ struct {
+ u16 vddc1;
+ u16 vddc2;
+ u16 vddc3;
+ };
+};
+
+struct amdgpu_cac_leakage_table {
+ u32 count;
+ union amdgpu_cac_leakage_entry *entries;
+};
+
+struct amdgpu_phase_shedding_limits_entry {
+ u16 voltage;
+ u32 sclk;
+ u32 mclk;
+};
+
+struct amdgpu_phase_shedding_limits_table {
+ u32 count;
+ struct amdgpu_phase_shedding_limits_entry *entries;
+};
+
+struct amdgpu_uvd_clock_voltage_dependency_entry {
+ u32 vclk;
+ u32 dclk;
+ u16 v;
+};
+
+struct amdgpu_uvd_clock_voltage_dependency_table {
+ u8 count;
+ struct amdgpu_uvd_clock_voltage_dependency_entry *entries;
+};
+
+struct amdgpu_vce_clock_voltage_dependency_entry {
+ u32 ecclk;
+ u32 evclk;
+ u16 v;
+};
+
+struct amdgpu_vce_clock_voltage_dependency_table {
+ u8 count;
+ struct amdgpu_vce_clock_voltage_dependency_entry *entries;
+};
+
+struct amdgpu_ppm_table {
+ u8 ppm_design;
+ u16 cpu_core_number;
+ u32 platform_tdp;
+ u32 small_ac_platform_tdp;
+ u32 platform_tdc;
+ u32 small_ac_platform_tdc;
+ u32 apu_tdp;
+ u32 dgpu_tdp;
+ u32 dgpu_ulv_power;
+ u32 tj_max;
+};
+
+struct amdgpu_cac_tdp_table {
+ u16 tdp;
+ u16 configurable_tdp;
+ u16 tdc;
+ u16 battery_power_limit;
+ u16 small_power_limit;
+ u16 low_cac_leakage;
+ u16 high_cac_leakage;
+ u16 maximum_power_delivery_limit;
+};
+
+struct amdgpu_dpm_dynamic_state {
+ struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk;
+ struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk;
+ struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk;
+ struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk;
+ struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk;
+ struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
+ struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
+ struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
+ struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
+ struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk;
+ struct amdgpu_clock_array valid_sclk_values;
+ struct amdgpu_clock_array valid_mclk_values;
+ struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc;
+ struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac;
+ u32 mclk_sclk_ratio;
+ u32 sclk_mclk_delta;
+ u16 vddc_vddci_delta;
+ u16 min_vddc_for_pcie_gen2;
+ struct amdgpu_cac_leakage_table cac_leakage_table;
+ struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table;
+ struct amdgpu_ppm_table *ppm_table;
+ struct amdgpu_cac_tdp_table *cac_tdp_table;
+};
+
+struct amdgpu_dpm_fan {
+ u16 t_min;
+ u16 t_med;
+ u16 t_high;
+ u16 pwm_min;
+ u16 pwm_med;
+ u16 pwm_high;
+ u8 t_hyst;
+ u32 cycle_delay;
+ u16 t_max;
+ u8 control_mode;
+ u16 default_max_fan_pwm;
+ u16 default_fan_output_sensitivity;
+ u16 fan_output_sensitivity;
+ bool ucode_fan_control;
+};
+
+enum amdgpu_pcie_gen {
+ AMDGPU_PCIE_GEN1 = 0,
+ AMDGPU_PCIE_GEN2 = 1,
+ AMDGPU_PCIE_GEN3 = 2,
+ AMDGPU_PCIE_GEN_INVALID = 0xffff
+};
+
+enum amdgpu_dpm_forced_level {
+ AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
+ AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
+ AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
+ AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3,
+};
+
+struct amdgpu_dpm_funcs {
+ int (*get_temperature)(struct amdgpu_device *adev);
+ int (*pre_set_power_state)(struct amdgpu_device *adev);
+ int (*set_power_state)(struct amdgpu_device *adev);
+ void (*post_set_power_state)(struct amdgpu_device *adev);
+ void (*display_configuration_changed)(struct amdgpu_device *adev);
+ u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
+ u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
+ void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
+ void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
+ int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
+ bool (*vblank_too_short)(struct amdgpu_device *adev);
+ void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
+ void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
+ void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
+ void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
+ u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
+ int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
+ int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
+ int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
+ int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
+ int (*get_sclk_od)(struct amdgpu_device *adev);
+ int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
+ int (*get_mclk_od)(struct amdgpu_device *adev);
+ int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
+ int (*check_state_equal)(struct amdgpu_device *adev,
+ struct amdgpu_ps *cps,
+ struct amdgpu_ps *rps,
+ bool *equal);
+
+ struct amd_vce_state* (*get_vce_clock_state)(struct amdgpu_device *adev, unsigned idx);
+};
+
+#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
+#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
+#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
+#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
+#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
+#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
+#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
+
+#define amdgpu_dpm_read_sensor(adev, idx, value) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \
+ -EINVAL)
+
+#define amdgpu_dpm_get_temperature(adev) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
+ (adev)->pm.funcs->get_temperature((adev)))
+
+#define amdgpu_dpm_set_fan_control_mode(adev, m) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
+ (adev)->pm.funcs->set_fan_control_mode((adev), (m)))
+
+#define amdgpu_dpm_get_fan_control_mode(adev) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
+ (adev)->pm.funcs->get_fan_control_mode((adev)))
+
+#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
+ (adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
+
+#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
+ (adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
+
+#define amdgpu_dpm_get_sclk(adev, l) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
+ (adev)->pm.funcs->get_sclk((adev), (l)))
+
+#define amdgpu_dpm_get_mclk(adev, l) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
+ (adev)->pm.funcs->get_mclk((adev), (l)))
+
+
+#define amdgpu_dpm_force_performance_level(adev, l) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
+ (adev)->pm.funcs->force_performance_level((adev), (l)))
+
+#define amdgpu_dpm_powergate_uvd(adev, g) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
+ (adev)->pm.funcs->powergate_uvd((adev), (g)))
+
+#define amdgpu_dpm_powergate_vce(adev, g) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
+ (adev)->pm.funcs->powergate_vce((adev), (g)))
+
+#define amdgpu_dpm_get_current_power_state(adev) \
+ (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
+
+#define amdgpu_dpm_get_performance_level(adev) \
+ (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)
+
+#define amdgpu_dpm_get_pp_num_states(adev, data) \
+ (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
+
+#define amdgpu_dpm_get_pp_table(adev, table) \
+ (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)
+
+#define amdgpu_dpm_set_pp_table(adev, buf, size) \
+ (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)
+
+#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
+ (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)
+
+#define amdgpu_dpm_force_clock_level(adev, type, level) \
+ (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
+
+#define amdgpu_dpm_get_sclk_od(adev) \
+ (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
+
+#define amdgpu_dpm_set_sclk_od(adev, value) \
+ (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
+
+#define amdgpu_dpm_get_mclk_od(adev) \
+ ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_set_mclk_od(adev, value) \
+ ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
+
+#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \
+ (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
+
+#define amgdpu_dpm_check_state_equal(adev, cps, rps, equal) (adev)->pm.funcs->check_state_equal((adev), (cps),(rps),(equal))
+
+#define amdgpu_dpm_get_vce_clock_state(adev, i) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)) : \
+ (adev)->pm.funcs->get_vce_clock_state((adev), (i)))
+
+struct amdgpu_dpm {
+ struct amdgpu_ps *ps;
+ /* number of valid power states */
+ int num_ps;
+ /* current power state that is active */
+ struct amdgpu_ps *current_ps;
+ /* requested power state */
+ struct amdgpu_ps *requested_ps;
+ /* boot up power state */
+ struct amdgpu_ps *boot_ps;
+ /* default uvd power state */
+ struct amdgpu_ps *uvd_ps;
+ /* vce requirements */
+ u32 num_of_vce_states;
+ struct amd_vce_state vce_states[AMD_MAX_VCE_LEVELS];
+ enum amd_vce_level vce_level;
+ enum amd_pm_state_type state;
+ enum amd_pm_state_type user_state;
+ enum amd_pm_state_type last_state;
+ enum amd_pm_state_type last_user_state;
+ u32 platform_caps;
+ u32 voltage_response_time;
+ u32 backbias_response_time;
+ void *priv;
+ u32 new_active_crtcs;
+ int new_active_crtc_count;
+ u32 current_active_crtcs;
+ int current_active_crtc_count;
+ struct amdgpu_dpm_dynamic_state dyn_state;
+ struct amdgpu_dpm_fan fan;
+ u32 tdp_limit;
+ u32 near_tdp_limit;
+ u32 near_tdp_limit_adjusted;
+ u32 sq_ramping_threshold;
+ u32 cac_leakage;
+ u16 tdp_od_limit;
+ u32 tdp_adjustment;
+ u16 load_line_slope;
+ bool power_control;
+ bool ac_power;
+ /* special states active */
+ bool thermal_active;
+ bool uvd_active;
+ bool vce_active;
+ /* thermal handling */
+ struct amdgpu_dpm_thermal thermal;
+ /* forced levels */
+ enum amdgpu_dpm_forced_level forced_level;
+};
+
+struct amdgpu_pm {
+ struct mutex mutex;
+ u32 current_sclk;
+ u32 current_mclk;
+ u32 default_sclk;
+ u32 default_mclk;
+ struct amdgpu_i2c_chan *i2c_bus;
+ /* internal thermal controller on rv6xx+ */
+ enum amdgpu_int_thermal_type int_thermal_type;
+ struct device *int_hwmon_dev;
+ /* fan control parameters */
+ bool no_fan;
+ u8 fan_pulses_per_revolution;
+ u8 fan_min_rpm;
+ u8 fan_max_rpm;
+ /* dpm */
+ bool dpm_enabled;
+ bool sysfs_initialized;
+ struct amdgpu_dpm dpm;
+ const struct firmware *fw; /* SMC firmware */
+ uint32_t fw_version;
+ const struct amdgpu_dpm_funcs *funcs;
+ uint32_t pcie_gen_mask;
+ uint32_t pcie_mlw_mask;
+ struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
+};
+
#define R600_SSTU_DFLT 0
#define R600_SST_DFLT 0x00C8
@@ -82,4 +522,7 @@ u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
u16 default_lanes);
u8 amdgpu_encode_pci_lane_width(u32 lanes);
+struct amd_vce_state*
+amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 71ed27eb3dde..6bb4d9e9afe4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -58,9 +58,10 @@
* - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer.
* - 3.7.0 - Add support for VCE clock list packet
* - 3.8.0 - Add support raster config init in the kernel
+ * - 3.9.0 - Add support for memory query info about VRAM and GTT.
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 8
+#define KMS_DRIVER_MINOR 9
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -85,6 +86,7 @@ int amdgpu_vm_size = 64;
int amdgpu_vm_block_size = -1;
int amdgpu_vm_fault_stop = 0;
int amdgpu_vm_debug = 0;
+int amdgpu_vram_page_split = 1024;
int amdgpu_exp_hw_support = 0;
int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2;
@@ -165,6 +167,9 @@ module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444);
MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)");
module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
+MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 1024, -1 = disable)");
+module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
+
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
@@ -201,7 +206,8 @@ module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
-MODULE_PARM_DESC(virtual_display, "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x;xxxx:xx:xx.x)");
+MODULE_PARM_DESC(virtual_display,
+ "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x)");
module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
static const struct pci_device_id pciidlist[] = {
@@ -381,6 +387,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6939, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
/* fiji */
{0x1002, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
+ {0x1002, 0x730F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
/* carrizo */
{0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
{0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 8d01aa24d68a..38bdc2d300a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -152,7 +152,8 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
aligned_size = ALIGN(size, PAGE_SIZE);
ret = amdgpu_gem_object_create(adev, aligned_size, 0,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
true, &gobj);
if (ret) {
printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 3a2e42f4b897..57552c79ec58 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -48,7 +48,7 @@
*/
struct amdgpu_fence {
- struct fence base;
+ struct dma_fence base;
/* RB, DMA, etc. */
struct amdgpu_ring *ring;
@@ -73,8 +73,8 @@ void amdgpu_fence_slab_fini(void)
/*
* Cast helper
*/
-static const struct fence_ops amdgpu_fence_ops;
-static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f)
+static const struct dma_fence_ops amdgpu_fence_ops;
+static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
{
struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
@@ -130,11 +130,11 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
* Emits a fence command on the requested ring (all asics).
* Returns 0 on success, -ENOMEM on failure.
*/
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_fence *fence;
- struct fence *old, **ptr;
+ struct dma_fence *old, **ptr;
uint32_t seq;
fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
@@ -143,10 +143,10 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
seq = ++ring->fence_drv.sync_seq;
fence->ring = ring;
- fence_init(&fence->base, &amdgpu_fence_ops,
- &ring->fence_drv.lock,
- adev->fence_context + ring->idx,
- seq);
+ dma_fence_init(&fence->base, &amdgpu_fence_ops,
+ &ring->fence_drv.lock,
+ adev->fence_context + ring->idx,
+ seq);
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, AMDGPU_FENCE_FLAG_INT);
@@ -155,12 +155,12 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
* emitting the fence would mess up the hardware ring buffer.
*/
old = rcu_dereference_protected(*ptr, 1);
- if (old && !fence_is_signaled(old)) {
+ if (old && !dma_fence_is_signaled(old)) {
DRM_INFO("rcu slot is busy\n");
- fence_wait(old, false);
+ dma_fence_wait(old, false);
}
- rcu_assign_pointer(*ptr, fence_get(&fence->base));
+ rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
*f = &fence->base;
@@ -211,7 +211,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
seq &= drv->num_fences_mask;
do {
- struct fence *fence, **ptr;
+ struct dma_fence *fence, **ptr;
++last_seq;
last_seq &= drv->num_fences_mask;
@@ -224,13 +224,13 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
if (!fence)
continue;
- r = fence_signal(fence);
+ r = dma_fence_signal(fence);
if (!r)
- FENCE_TRACE(fence, "signaled from irq context\n");
+ DMA_FENCE_TRACE(fence, "signaled from irq context\n");
else
BUG();
- fence_put(fence);
+ dma_fence_put(fence);
} while (last_seq != seq);
}
@@ -260,7 +260,7 @@ static void amdgpu_fence_fallback(unsigned long arg)
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
{
uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
- struct fence *fence, **ptr;
+ struct dma_fence *fence, **ptr;
int r;
if (!seq)
@@ -269,14 +269,14 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
rcu_read_lock();
fence = rcu_dereference(*ptr);
- if (!fence || !fence_get_rcu(fence)) {
+ if (!fence || !dma_fence_get_rcu(fence)) {
rcu_read_unlock();
return 0;
}
rcu_read_unlock();
- r = fence_wait(fence, false);
- fence_put(fence);
+ r = dma_fence_wait(fence, false);
+ dma_fence_put(fence);
return r;
}
@@ -452,7 +452,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
amd_sched_fini(&ring->sched);
del_timer_sync(&ring->fence_drv.fallback_timer);
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
- fence_put(ring->fence_drv.fences[j]);
+ dma_fence_put(ring->fence_drv.fences[j]);
kfree(ring->fence_drv.fences);
ring->fence_drv.fences = NULL;
ring->fence_drv.initialized = false;
@@ -541,12 +541,12 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
* Common fence implementation
*/
-static const char *amdgpu_fence_get_driver_name(struct fence *fence)
+static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
{
return "amdgpu";
}
-static const char *amdgpu_fence_get_timeline_name(struct fence *f)
+static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
{
struct amdgpu_fence *fence = to_amdgpu_fence(f);
return (const char *)fence->ring->name;
@@ -560,7 +560,7 @@ static const char *amdgpu_fence_get_timeline_name(struct fence *f)
* to fence_queue that checks if this fence is signaled, and if so it
* signals the fence and removes itself.
*/
-static bool amdgpu_fence_enable_signaling(struct fence *f)
+static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
{
struct amdgpu_fence *fence = to_amdgpu_fence(f);
struct amdgpu_ring *ring = fence->ring;
@@ -568,7 +568,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
if (!timer_pending(&ring->fence_drv.fallback_timer))
amdgpu_fence_schedule_fallback(ring);
- FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
+ DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
return true;
}
@@ -582,7 +582,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
*/
static void amdgpu_fence_free(struct rcu_head *rcu)
{
- struct fence *f = container_of(rcu, struct fence, rcu);
+ struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
struct amdgpu_fence *fence = to_amdgpu_fence(f);
kmem_cache_free(amdgpu_fence_slab, fence);
}
@@ -595,16 +595,16 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
* This function is called when the reference count becomes zero.
* It just RCU schedules freeing up the fence.
*/
-static void amdgpu_fence_release(struct fence *f)
+static void amdgpu_fence_release(struct dma_fence *f)
{
call_rcu(&f->rcu, amdgpu_fence_free);
}
-static const struct fence_ops amdgpu_fence_ops = {
+static const struct dma_fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name,
.enable_signaling = amdgpu_fence_enable_signaling,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = amdgpu_fence_release,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 21a1242fc13b..964d2a946ed5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -126,7 +126,8 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
if (adev->gart.robj == NULL) {
r = amdgpu_bo_create(adev, adev->gart.table_size,
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &adev->gart.robj);
if (r) {
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 3ad0bf6ce3e4..cd62f6ffde2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -116,10 +116,11 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
* Call from drm_gem_handle_create which appear in both new and open ioctl
* case.
*/
-int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+int amdgpu_gem_object_open(struct drm_gem_object *obj,
+ struct drm_file *file_priv)
{
struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = abo->adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va *bo_va;
@@ -142,7 +143,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = bo->adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
@@ -468,6 +469,16 @@ out:
return r;
}
+static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo)
+{
+ unsigned domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+
+ /* if anything is swapped out don't swap it in here,
+ just abort and wait for the next CS */
+
+ return domain == AMDGPU_GEM_DOMAIN_CPU ? -ERESTARTSYS : 0;
+}
+
/**
* amdgpu_gem_va_update_vm -update the bo_va in its VM
*
@@ -478,7 +489,8 @@ out:
* vital here, so they are not reported back to userspace.
*/
static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va, uint32_t operation)
+ struct amdgpu_bo_va *bo_va,
+ uint32_t operation)
{
struct ttm_validate_buffer tv, *entry;
struct amdgpu_bo_list_entry vm_pd;
@@ -501,7 +513,6 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (r)
goto error_print;
- amdgpu_vm_get_pt_bos(adev, bo_va->vm, &duplicates);
list_for_each_entry(entry, &list, head) {
domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
/* if anything is swapped out don't swap it in here,
@@ -509,13 +520,10 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (domain == AMDGPU_GEM_DOMAIN_CPU)
goto error_unreserve;
}
- list_for_each_entry(entry, &duplicates, head) {
- domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
- /* if anything is swapped out don't swap it in here,
- just abort and wait for the next CS */
- if (domain == AMDGPU_GEM_DOMAIN_CPU)
- goto error_unreserve;
- }
+ r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check,
+ NULL);
+ if (r)
+ goto error_unreserve;
r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
if (r)
@@ -536,8 +544,6 @@ error_print:
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
}
-
-
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -547,7 +553,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_bo *abo;
struct amdgpu_bo_va *bo_va;
- struct ttm_validate_buffer tv, tv_pd;
+ struct amdgpu_bo_list_entry vm_pd;
+ struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct list_head list, duplicates;
uint32_t invalid_flags, va_flags = 0;
@@ -592,9 +599,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
tv.shared = true;
list_add(&tv.head, &list);
- tv_pd.bo = &fpriv->vm.page_directory->tbo;
- tv_pd.shared = true;
- list_add(&tv_pd.head, &list);
+ amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index a074edd95c70..01a42b6a69a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -24,6 +24,7 @@
*/
#include <drm/drmP.h>
#include "amdgpu.h"
+#include "amdgpu_gfx.h"
/*
* GPU scratch registers helpers function.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 51321e154c09..e02044086445 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -27,6 +27,7 @@
int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg);
void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg);
-unsigned amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh);
+void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se,
+ unsigned max_sh);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index f86c84427778..3c634f02a3d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -168,6 +168,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
return -ENOMEM;
node->start = AMDGPU_BO_INVALID_OFFSET;
+ node->size = mem->num_pages;
mem->mm_node = node;
if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 6a6c86c9c169..216a9572d946 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -89,7 +89,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* Free an IB (all asics).
*/
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
- struct fence *f)
+ struct dma_fence *f)
{
amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
}
@@ -116,8 +116,8 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
* to SI there was just a DE IB.
*/
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
- struct amdgpu_ib *ibs, struct fence *last_vm_update,
- struct amdgpu_job *job, struct fence **f)
+ struct amdgpu_ib *ibs, struct dma_fence *last_vm_update,
+ struct amdgpu_job *job, struct dma_fence **f)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib *ib = &ibs[0];
@@ -152,8 +152,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return -EINVAL;
}
- alloc_size = amdgpu_ring_get_dma_frame_size(ring) +
- num_ibs * amdgpu_ring_get_emit_ib_size(ring);
+ alloc_size = ring->funcs->emit_frame_size + num_ibs *
+ ring->funcs->emit_ib_size;
r = amdgpu_ring_alloc(ring, alloc_size);
if (r) {
@@ -161,7 +161,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return r;
}
- if (ring->type == AMDGPU_RING_TYPE_SDMA && ring->funcs->init_cond_exec)
+ if (ring->funcs->init_cond_exec)
patch_offset = amdgpu_ring_init_cond_exec(ring);
if (vm) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 8c5807994073..a0de6286c453 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -81,7 +81,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
void amdgpu_job_free_resources(struct amdgpu_job *job)
{
- struct fence *f;
+ struct dma_fence *f;
unsigned i;
/* use sched fence if available */
@@ -95,7 +95,7 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
{
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
- fence_put(job->fence);
+ dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
kfree(job);
}
@@ -104,14 +104,14 @@ void amdgpu_job_free(struct amdgpu_job *job)
{
amdgpu_job_free_resources(job);
- fence_put(job->fence);
+ dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
kfree(job);
}
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct amd_sched_entity *entity, void *owner,
- struct fence **f)
+ struct dma_fence **f)
{
int r;
job->ring = ring;
@@ -125,19 +125,19 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
job->owner = owner;
job->fence_ctx = entity->fence_context;
- *f = fence_get(&job->base.s_fence->finished);
+ *f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
amd_sched_entity_push_job(&job->base);
return 0;
}
-static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
+static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
{
struct amdgpu_job *job = to_amdgpu_job(sched_job);
struct amdgpu_vm *vm = job->vm;
- struct fence *fence = amdgpu_sync_get_fence(&job->sync);
+ struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync);
if (fence == NULL && vm && !job->vm_id) {
struct amdgpu_ring *ring = job->ring;
@@ -155,9 +155,9 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
return fence;
}
-static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
+static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
{
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
struct amdgpu_job *job;
int r;
@@ -176,8 +176,8 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
/* if gpu reset, hw fence will be replaced here */
- fence_put(job->fence);
- job->fence = fence_get(fence);
+ dma_fence_put(job->fence);
+ job->fence = dma_fence_get(fence);
amdgpu_job_free_resources(job);
return fence;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index c2c7fb140338..78392671046a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -306,10 +306,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
}
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (adev->ip_blocks[i].type == type &&
- adev->ip_block_status[i].valid) {
- ip.hw_ip_version_major = adev->ip_blocks[i].major;
- ip.hw_ip_version_minor = adev->ip_blocks[i].minor;
+ if (adev->ip_blocks[i].version->type == type &&
+ adev->ip_blocks[i].status.valid) {
+ ip.hw_ip_version_major = adev->ip_blocks[i].version->major;
+ ip.hw_ip_version_minor = adev->ip_blocks[i].version->minor;
ip.capabilities_flags = 0;
ip.available_rings = ring_mask;
ip.ib_start_alignment = ib_start_alignment;
@@ -345,8 +345,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
}
for (i = 0; i < adev->num_ip_blocks; i++)
- if (adev->ip_blocks[i].type == type &&
- adev->ip_block_status[i].valid &&
+ if (adev->ip_blocks[i].version->type == type &&
+ adev->ip_blocks[i].status.valid &&
count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
count++;
@@ -411,6 +411,36 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return copy_to_user(out, &vram_gtt,
min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
}
+ case AMDGPU_INFO_MEMORY: {
+ struct drm_amdgpu_memory_info mem;
+
+ memset(&mem, 0, sizeof(mem));
+ mem.vram.total_heap_size = adev->mc.real_vram_size;
+ mem.vram.usable_heap_size =
+ adev->mc.real_vram_size - adev->vram_pin_size;
+ mem.vram.heap_usage = atomic64_read(&adev->vram_usage);
+ mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
+
+ mem.cpu_accessible_vram.total_heap_size =
+ adev->mc.visible_vram_size;
+ mem.cpu_accessible_vram.usable_heap_size =
+ adev->mc.visible_vram_size -
+ (adev->vram_pin_size - adev->invisible_pin_size);
+ mem.cpu_accessible_vram.heap_usage =
+ atomic64_read(&adev->vram_vis_usage);
+ mem.cpu_accessible_vram.max_allocation =
+ mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
+
+ mem.gtt.total_heap_size = adev->mc.gtt_size;
+ mem.gtt.usable_heap_size =
+ adev->mc.gtt_size - adev->gart_pin_size;
+ mem.gtt.heap_usage = atomic64_read(&adev->gtt_usage);
+ mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
+
+ return copy_to_user(out, &mem,
+ min((size_t)size, sizeof(mem)))
+ ? -EFAULT : 0;
+ }
case AMDGPU_INFO_READ_MMR_REG: {
unsigned n, alloc_size;
uint32_t *regs;
@@ -475,6 +505,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.ids_flags = 0;
if (adev->flags & AMD_IS_APU)
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
+ if (amdgpu_sriov_vf(adev))
+ dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
@@ -494,6 +526,24 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return copy_to_user(out, &dev_info,
min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
}
+ case AMDGPU_INFO_VCE_CLOCK_TABLE: {
+ unsigned i;
+ struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
+ struct amd_vce_state *vce_state;
+
+ for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
+ vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
+ if (vce_state) {
+ vce_clk_table.entries[i].sclk = vce_state->sclk;
+ vce_clk_table.entries[i].mclk = vce_state->mclk;
+ vce_clk_table.entries[i].eclk = vce_state->evclk;
+ vce_clk_table.num_valid_entries++;
+ }
+ }
+
+ return copy_to_user(out, &vce_clk_table,
+ min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
+ }
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 32fa7b7913f7..7ea3cacf9f9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -285,7 +285,7 @@ free_rmn:
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
{
unsigned long end = addr + amdgpu_bo_size(bo) - 1;
- struct amdgpu_device *adev = bo->adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_mn *rmn;
struct amdgpu_mn_node *node = NULL;
struct list_head bos;
@@ -340,7 +340,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
*/
void amdgpu_mn_unregister(struct amdgpu_bo *bo)
{
- struct amdgpu_device *adev = bo->adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_mn *rmn;
struct list_head *head;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 7b0eff7d060b..1e23334b07fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -341,8 +341,6 @@ struct amdgpu_mode_info {
int num_dig; /* number of dig blocks */
int disp_priority;
const struct amdgpu_display_funcs *funcs;
- struct hrtimer vblank_timer;
- enum amdgpu_interrupt_state vsync_timer_enabled;
};
#define AMDGPU_MAX_BL_LEVEL 0xFF
@@ -413,6 +411,9 @@ struct amdgpu_crtc {
u32 wm_high;
u32 lb_vblank_lead_lines;
struct drm_display_mode hw_mode;
+ /* for virtual dce */
+ struct hrtimer vblank_timer;
+ enum amdgpu_interrupt_state vsync_timer_enabled;
};
struct amdgpu_encoder_atom_dig {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index aa074fac0c7f..f0a0513ef4c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -88,18 +88,19 @@ static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
struct amdgpu_bo *bo;
bo = container_of(tbo, struct amdgpu_bo, tbo);
- amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL);
+ amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL);
drm_gem_object_release(&bo->gem_base);
amdgpu_bo_unref(&bo->parent);
if (!list_empty(&bo->shadow_list)) {
- mutex_lock(&bo->adev->shadow_list_lock);
+ mutex_lock(&adev->shadow_list_lock);
list_del_init(&bo->shadow_list);
- mutex_unlock(&bo->adev->shadow_list_lock);
+ mutex_unlock(&adev->shadow_list_lock);
}
kfree(bo->metadata);
kfree(bo);
@@ -121,12 +122,17 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
+ unsigned lpfn = 0;
+
+ /* This forces a reallocation if the flag wasn't set before */
+ if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
+ lpfn = adev->mc.real_vram_size >> PAGE_SHIFT;
if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS &&
!(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
adev->mc.visible_vram_size < adev->mc.real_vram_size) {
places[c].fpfn = visible_pfn;
- places[c].lpfn = 0;
+ places[c].lpfn = lpfn;
places[c].flags = TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM |
TTM_PL_FLAG_TOPDOWN;
@@ -134,7 +140,7 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
}
places[c].fpfn = 0;
- places[c].lpfn = 0;
+ places[c].lpfn = lpfn;
places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
@@ -205,8 +211,10 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
{
- amdgpu_ttm_placement_init(abo->adev, &abo->placement,
- abo->placements, domain, abo->flags);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
+
+ amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements,
+ domain, abo->flags);
}
static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
@@ -245,7 +253,8 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
int r;
r = amdgpu_bo_create(adev, size, align, true, domain,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, bo_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r);
@@ -351,7 +360,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
kfree(bo);
return r;
}
- bo->adev = adev;
INIT_LIST_HEAD(&bo->shadow_list);
INIT_LIST_HEAD(&bo->va);
bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
@@ -383,7 +391,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
- struct fence *fence;
+ struct dma_fence *fence;
if (adev->mman.buffer_funcs_ring == NULL ||
!adev->mman.buffer_funcs_ring->ready) {
@@ -403,9 +411,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
amdgpu_bo_fence(bo, fence, false);
amdgpu_bo_unreserve(bo);
- fence_put(bo->tbo.moving);
- bo->tbo.moving = fence_get(fence);
- fence_put(fence);
+ dma_fence_put(bo->tbo.moving);
+ bo->tbo.moving = dma_fence_get(fence);
+ dma_fence_put(fence);
}
*bo_ptr = bo;
@@ -491,7 +499,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
- struct fence **fence,
+ struct dma_fence **fence,
bool direct)
{
@@ -523,7 +531,7 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
- struct fence **fence,
+ struct dma_fence **fence,
bool direct)
{
@@ -616,6 +624,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
u64 min_offset, u64 max_offset,
u64 *gpu_addr)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r, i;
unsigned fpfn, lpfn;
@@ -643,18 +652,20 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return 0;
}
+
+ bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_ttm_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++) {
/* force to pin into visible video ram */
if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
(!max_offset || max_offset >
- bo->adev->mc.visible_vram_size)) {
+ adev->mc.visible_vram_size)) {
if (WARN_ON_ONCE(min_offset >
- bo->adev->mc.visible_vram_size))
+ adev->mc.visible_vram_size))
return -EINVAL;
fpfn = min_offset >> PAGE_SHIFT;
- lpfn = bo->adev->mc.visible_vram_size >> PAGE_SHIFT;
+ lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
} else {
fpfn = min_offset >> PAGE_SHIFT;
lpfn = max_offset >> PAGE_SHIFT;
@@ -669,12 +680,12 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r)) {
- dev_err(bo->adev->dev, "%p pin failed\n", bo);
+ dev_err(adev->dev, "%p pin failed\n", bo);
goto error;
}
r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
if (unlikely(r)) {
- dev_err(bo->adev->dev, "%p bind failed\n", bo);
+ dev_err(adev->dev, "%p bind failed\n", bo);
goto error;
}
@@ -682,11 +693,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
if (gpu_addr != NULL)
*gpu_addr = amdgpu_bo_gpu_offset(bo);
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
- bo->adev->vram_pin_size += amdgpu_bo_size(bo);
+ adev->vram_pin_size += amdgpu_bo_size(bo);
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
- bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
+ adev->invisible_pin_size += amdgpu_bo_size(bo);
} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
- bo->adev->gart_pin_size += amdgpu_bo_size(bo);
+ adev->gart_pin_size += amdgpu_bo_size(bo);
}
error:
@@ -700,10 +711,11 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
int amdgpu_bo_unpin(struct amdgpu_bo *bo)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r, i;
if (!bo->pin_count) {
- dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo);
+ dev_warn(adev->dev, "%p unpin not necessary\n", bo);
return 0;
}
bo->pin_count--;
@@ -715,16 +727,16 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r)) {
- dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
+ dev_err(adev->dev, "%p validate failed for unpin\n", bo);
goto error;
}
if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
- bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
+ adev->vram_pin_size -= amdgpu_bo_size(bo);
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
- bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
+ adev->invisible_pin_size -= amdgpu_bo_size(bo);
} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
- bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
+ adev->gart_pin_size -= amdgpu_bo_size(bo);
}
error:
@@ -849,6 +861,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
struct ttm_mem_reg *old_mem = &bo->mem;
@@ -856,21 +869,21 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
return;
abo = container_of(bo, struct amdgpu_bo, tbo);
- amdgpu_vm_bo_invalidate(abo->adev, abo);
+ amdgpu_vm_bo_invalidate(adev, abo);
/* update statistics */
if (!new_mem)
return;
/* move_notify is called before move happens */
- amdgpu_update_memory_usage(abo->adev, &bo->mem, new_mem);
+ amdgpu_update_memory_usage(adev, &bo->mem, new_mem);
trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
}
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
- struct amdgpu_device *adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
unsigned long offset, size, lpfn;
int i, r;
@@ -879,13 +892,14 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
return 0;
abo = container_of(bo, struct amdgpu_bo, tbo);
- adev = abo->adev;
if (bo->mem.mem_type != TTM_PL_VRAM)
return 0;
size = bo->mem.num_pages << PAGE_SHIFT;
offset = bo->mem.start << PAGE_SHIFT;
- if ((offset + size) <= adev->mc.visible_vram_size)
+ /* TODO: figure out how to map scattered VRAM to the CPU */
+ if ((offset + size) <= adev->mc.visible_vram_size &&
+ (abo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS))
return 0;
/* Can't move a pinned BO to visible VRAM */
@@ -893,6 +907,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
return -EINVAL;
/* hurrah the memory is not visible ! */
+ abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
for (i = 0; i < abo->placement.num_placement; i++) {
@@ -926,7 +941,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
* @shared: true if fence should be added shared
*
*/
-void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
+void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared)
{
struct reservation_object *resv = bo->tbo.resv;
@@ -954,6 +969,8 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
!bo->pin_count);
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
+ WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+ !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
return bo->tbo.offset;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 8255034d73eb..5cbf59ec0f68 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -71,12 +71,13 @@ static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
*/
static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r;
r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
- dev_err(bo->adev->dev, "%p reserve failed\n", bo);
+ dev_err(adev->dev, "%p reserve failed\n", bo);
return r;
}
return 0;
@@ -156,19 +157,19 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem);
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
-void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
+void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
- struct fence **fence, bool direct);
+ struct dma_fence **fence, bool direct);
int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
- struct fence **fence,
+ struct dma_fence **fence,
bool direct);
@@ -200,7 +201,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
unsigned size, unsigned align);
void amdgpu_sa_bo_free(struct amdgpu_device *adev,
struct amdgpu_sa_bo **sa_bo,
- struct fence *fence);
+ struct dma_fence *fence);
#if defined(CONFIG_DEBUG_FS)
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index accc908bdc88..274f3309aec9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -986,10 +986,10 @@ restart_search:
static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
{
- int i;
struct amdgpu_ps *ps;
enum amd_pm_state_type dpm_state;
int ret;
+ bool equal;
/* if dpm init failed */
if (!adev->pm.dpm_enabled)
@@ -1009,46 +1009,6 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
else
return;
- /* no need to reprogram if nothing changed unless we are on BTC+ */
- if (adev->pm.dpm.current_ps == adev->pm.dpm.requested_ps) {
- /* vce just modifies an existing state so force a change */
- if (ps->vce_active != adev->pm.dpm.vce_active)
- goto force;
- if (adev->flags & AMD_IS_APU) {
- /* for APUs if the num crtcs changed but state is the same,
- * all we need to do is update the display configuration.
- */
- if (adev->pm.dpm.new_active_crtcs != adev->pm.dpm.current_active_crtcs) {
- /* update display watermarks based on new power state */
- amdgpu_display_bandwidth_update(adev);
- /* update displays */
- amdgpu_dpm_display_configuration_changed(adev);
- adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
- adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
- }
- return;
- } else {
- /* for BTC+ if the num crtcs hasn't changed and state is the same,
- * nothing to do, if the num crtcs is > 1 and state is the same,
- * update display configuration.
- */
- if (adev->pm.dpm.new_active_crtcs ==
- adev->pm.dpm.current_active_crtcs) {
- return;
- } else if ((adev->pm.dpm.current_active_crtc_count > 1) &&
- (adev->pm.dpm.new_active_crtc_count > 1)) {
- /* update display watermarks based on new power state */
- amdgpu_display_bandwidth_update(adev);
- /* update displays */
- amdgpu_dpm_display_configuration_changed(adev);
- adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
- adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
- return;
- }
- }
- }
-
-force:
if (amdgpu_dpm == 1) {
printk("switching from power state:\n");
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
@@ -1059,31 +1019,21 @@ force:
/* update whether vce is active */
ps->vce_active = adev->pm.dpm.vce_active;
+ amdgpu_dpm_display_configuration_changed(adev);
+
ret = amdgpu_dpm_pre_set_power_state(adev);
if (ret)
return;
- /* update display watermarks based on new power state */
- amdgpu_display_bandwidth_update(adev);
+ if ((0 != amgdpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)))
+ equal = false;
- /* wait for the rings to drain */
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
- if (ring && ring->ready)
- amdgpu_fence_wait_empty(ring);
- }
+ if (equal)
+ return;
- /* program the new power state */
amdgpu_dpm_set_power_state(adev);
-
- /* update current power state */
- adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps;
-
amdgpu_dpm_post_set_power_state(adev);
- /* update displays */
- amdgpu_dpm_display_configuration_changed(adev);
-
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
@@ -1135,7 +1085,7 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
mutex_lock(&adev->pm.mutex);
adev->pm.dpm.vce_active = true;
/* XXX select vce level based on ring/task */
- adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
+ adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
mutex_unlock(&adev->pm.mutex);
} else {
mutex_lock(&adev->pm.mutex);
@@ -1276,20 +1226,20 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
struct drm_device *ddev = adev->ddev;
struct drm_crtc *crtc;
struct amdgpu_crtc *amdgpu_crtc;
+ int i = 0;
if (!adev->pm.dpm_enabled)
return;
- if (adev->pp_enabled) {
- int i = 0;
+ amdgpu_display_bandwidth_update(adev);
- amdgpu_display_bandwidth_update(adev);
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
- if (ring && ring->ready)
- amdgpu_fence_wait_empty(ring);
- }
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+ if (ring && ring->ready)
+ amdgpu_fence_wait_empty(ring);
+ }
+ if (adev->pp_enabled) {
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
} else {
mutex_lock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 7532ff822aa7..fa6baf31a35d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -299,7 +299,7 @@ static int amdgpu_pp_soft_reset(void *handle)
return ret;
}
-const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
+static const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
.name = "amdgpu_powerplay",
.early_init = amdgpu_pp_early_init,
.late_init = amdgpu_pp_late_init,
@@ -316,3 +316,12 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
.set_clockgating_state = amdgpu_pp_set_clockgating_state,
.set_powergating_state = amdgpu_pp_set_powergating_state,
};
+
+const struct amdgpu_ip_block_version amdgpu_pp_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
index da5cf47cfd99..c0c4bfdcdb14 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
@@ -23,11 +23,11 @@
*
*/
-#ifndef __AMDGPU_POPWERPLAY_H__
-#define __AMDGPU_POPWERPLAY_H__
+#ifndef __AMDGPU_POWERPLAY_H__
+#define __AMDGPU_POWERPLAY_H__
#include "amd_shared.h"
-extern const struct amd_ip_funcs amdgpu_pp_ip_funcs;
+extern const struct amdgpu_ip_block_version amdgpu_pp_ip_block;
-#endif /* __AMDSOC_DM_H__ */
+#endif /* __AMDGPU_POWERPLAY_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 3cb5e903cd62..4c992826d2d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -65,7 +65,7 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
{
/* Align requested size with padding so unlock_commit can
* pad safely */
- ndw = (ndw + ring->align_mask) & ~ring->align_mask;
+ ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
/* Make sure we aren't trying to allocate more space
* than the maximum for one submission
@@ -94,7 +94,7 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
int i;
for (i = 0; i < count; i++)
- amdgpu_ring_write(ring, ring->nop);
+ amdgpu_ring_write(ring, ring->funcs->nop);
}
/** amdgpu_ring_generic_pad_ib - pad IB with NOP packets
@@ -106,8 +106,8 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
*/
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
- while (ib->length_dw & ring->align_mask)
- ib->ptr[ib->length_dw++] = ring->nop;
+ while (ib->length_dw & ring->funcs->align_mask)
+ ib->ptr[ib->length_dw++] = ring->funcs->nop;
}
/**
@@ -125,8 +125,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
uint32_t count;
/* We pad to match fetch size */
- count = ring->align_mask + 1 - (ring->wptr & ring->align_mask);
- count %= ring->align_mask + 1;
+ count = ring->funcs->align_mask + 1 -
+ (ring->wptr & ring->funcs->align_mask);
+ count %= ring->funcs->align_mask + 1;
ring->funcs->insert_nop(ring, count);
mb();
@@ -163,9 +164,8 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
* Returns 0 on success, error on failure.
*/
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
- unsigned max_dw, u32 nop, u32 align_mask,
- struct amdgpu_irq_src *irq_src, unsigned irq_type,
- enum amdgpu_ring_type ring_type)
+ unsigned max_dw, struct amdgpu_irq_src *irq_src,
+ unsigned irq_type)
{
int r;
@@ -216,9 +216,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->ring_size = roundup_pow_of_two(max_dw * 4 *
amdgpu_sched_hw_submission);
- ring->align_mask = align_mask;
- ring->nop = nop;
- ring->type = ring_type;
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
new file mode 100644
index 000000000000..f2ad49c8e85b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#ifndef __AMDGPU_RING_H__
+#define __AMDGPU_RING_H__
+
+#include "gpu_scheduler.h"
+
+/* max number of rings */
+#define AMDGPU_MAX_RINGS 16
+#define AMDGPU_MAX_GFX_RINGS 1
+#define AMDGPU_MAX_COMPUTE_RINGS 8
+#define AMDGPU_MAX_VCE_RINGS 3
+
+/* some special values for the owner field */
+#define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul)
+#define AMDGPU_FENCE_OWNER_VM ((void*)1ul)
+
+#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
+#define AMDGPU_FENCE_FLAG_INT (1 << 1)
+
+enum amdgpu_ring_type {
+ AMDGPU_RING_TYPE_GFX,
+ AMDGPU_RING_TYPE_COMPUTE,
+ AMDGPU_RING_TYPE_SDMA,
+ AMDGPU_RING_TYPE_UVD,
+ AMDGPU_RING_TYPE_VCE
+};
+
+struct amdgpu_device;
+struct amdgpu_ring;
+struct amdgpu_ib;
+struct amdgpu_cs_parser;
+
+/*
+ * Fences.
+ */
+struct amdgpu_fence_driver {
+ uint64_t gpu_addr;
+ volatile uint32_t *cpu_addr;
+ /* sync_seq is protected by ring emission lock */
+ uint32_t sync_seq;
+ atomic_t last_seq;
+ bool initialized;
+ struct amdgpu_irq_src *irq_src;
+ unsigned irq_type;
+ struct timer_list fallback_timer;
+ unsigned num_fences_mask;
+ spinlock_t lock;
+ struct dma_fence **fences;
+};
+
+int amdgpu_fence_driver_init(struct amdgpu_device *adev);
+void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
+void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
+
+int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
+ unsigned num_hw_submission);
+int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
+ struct amdgpu_irq_src *irq_src,
+ unsigned irq_type);
+void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
+void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence);
+void amdgpu_fence_process(struct amdgpu_ring *ring);
+int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
+unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
+
+/*
+ * Rings.
+ */
+
+/* provided by hw blocks that expose a ring buffer for commands */
+struct amdgpu_ring_funcs {
+ enum amdgpu_ring_type type;
+ uint32_t align_mask;
+ u32 nop;
+
+ /* ring read/write ptr handling */
+ u32 (*get_rptr)(struct amdgpu_ring *ring);
+ u32 (*get_wptr)(struct amdgpu_ring *ring);
+ void (*set_wptr)(struct amdgpu_ring *ring);
+ /* validating and patching of IBs */
+ int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
+ /* constants to calculate how many DW are needed for an emit */
+ unsigned emit_frame_size;
+ unsigned emit_ib_size;
+ /* command emit functions */
+ void (*emit_ib)(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib,
+ unsigned vm_id, bool ctx_switch);
+ void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
+ uint64_t seq, unsigned flags);
+ void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
+ void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
+ uint64_t pd_addr);
+ void (*emit_hdp_flush)(struct amdgpu_ring *ring);
+ void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
+ void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
+ uint32_t gds_base, uint32_t gds_size,
+ uint32_t gws_base, uint32_t gws_size,
+ uint32_t oa_base, uint32_t oa_size);
+ /* testing functions */
+ int (*test_ring)(struct amdgpu_ring *ring);
+ int (*test_ib)(struct amdgpu_ring *ring, long timeout);
+ /* insert NOP packets */
+ void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
+ /* pad the indirect buffer to the necessary number of dw */
+ void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
+ unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
+ void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
+ /* note usage for clock and power gating */
+ void (*begin_use)(struct amdgpu_ring *ring);
+ void (*end_use)(struct amdgpu_ring *ring);
+ void (*emit_switch_buffer) (struct amdgpu_ring *ring);
+ void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
+};
+
+struct amdgpu_ring {
+ struct amdgpu_device *adev;
+ const struct amdgpu_ring_funcs *funcs;
+ struct amdgpu_fence_driver fence_drv;
+ struct amd_gpu_scheduler sched;
+
+ struct amdgpu_bo *ring_obj;
+ volatile uint32_t *ring;
+ unsigned rptr_offs;
+ unsigned wptr;
+ unsigned wptr_old;
+ unsigned ring_size;
+ unsigned max_dw;
+ int count_dw;
+ uint64_t gpu_addr;
+ uint32_t ptr_mask;
+ bool ready;
+ u32 idx;
+ u32 me;
+ u32 pipe;
+ u32 queue;
+ struct amdgpu_bo *mqd_obj;
+ u32 doorbell_index;
+ bool use_doorbell;
+ unsigned wptr_offs;
+ unsigned fence_offs;
+ uint64_t current_ctx;
+ char name[16];
+ unsigned cond_exe_offs;
+ u64 cond_exe_gpu_addr;
+ volatile u32 *cond_exe_cpu_addr;
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry *ent;
+#endif
+};
+
+int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
+void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
+void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
+void amdgpu_ring_commit(struct amdgpu_ring *ring);
+void amdgpu_ring_undo(struct amdgpu_ring *ring);
+int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
+ unsigned ring_size, struct amdgpu_irq_src *irq_src,
+ unsigned irq_type);
+void amdgpu_ring_fini(struct amdgpu_ring *ring);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index d8af37a845f4..fd26c4b8d793 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -147,7 +147,7 @@ static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
}
list_del_init(&sa_bo->olist);
list_del_init(&sa_bo->flist);
- fence_put(sa_bo->fence);
+ dma_fence_put(sa_bo->fence);
kfree(sa_bo);
}
@@ -161,7 +161,7 @@ static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
if (sa_bo->fence == NULL ||
- !fence_is_signaled(sa_bo->fence)) {
+ !dma_fence_is_signaled(sa_bo->fence)) {
return;
}
amdgpu_sa_bo_remove_locked(sa_bo);
@@ -244,7 +244,7 @@ static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
}
static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
- struct fence **fences,
+ struct dma_fence **fences,
unsigned *tries)
{
struct amdgpu_sa_bo *best_bo = NULL;
@@ -272,7 +272,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
sa_bo = list_first_entry(&sa_manager->flist[i],
struct amdgpu_sa_bo, flist);
- if (!fence_is_signaled(sa_bo->fence)) {
+ if (!dma_fence_is_signaled(sa_bo->fence)) {
fences[i] = sa_bo->fence;
continue;
}
@@ -314,7 +314,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
struct amdgpu_sa_bo **sa_bo,
unsigned size, unsigned align)
{
- struct fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
+ struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS];
unsigned count;
int i, r;
@@ -356,14 +356,14 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
if (fences[i])
- fences[count++] = fence_get(fences[i]);
+ fences[count++] = dma_fence_get(fences[i]);
if (count) {
spin_unlock(&sa_manager->wq.lock);
- t = fence_wait_any_timeout(fences, count, false,
- MAX_SCHEDULE_TIMEOUT);
+ t = dma_fence_wait_any_timeout(fences, count, false,
+ MAX_SCHEDULE_TIMEOUT);
for (i = 0; i < count; ++i)
- fence_put(fences[i]);
+ dma_fence_put(fences[i]);
r = (t > 0) ? 0 : t;
spin_lock(&sa_manager->wq.lock);
@@ -384,7 +384,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
}
void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
- struct fence *fence)
+ struct dma_fence *fence)
{
struct amdgpu_sa_manager *sa_manager;
@@ -394,10 +394,10 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
sa_manager = (*sa_bo)->manager;
spin_lock(&sa_manager->wq.lock);
- if (fence && !fence_is_signaled(fence)) {
+ if (fence && !dma_fence_is_signaled(fence)) {
uint32_t idx;
- (*sa_bo)->fence = fence_get(fence);
+ (*sa_bo)->fence = dma_fence_get(fence);
idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS;
list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 5c8d3022fb87..ed814e6d0207 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -34,7 +34,7 @@
struct amdgpu_sync_entry {
struct hlist_node node;
- struct fence *fence;
+ struct dma_fence *fence;
};
static struct kmem_cache *amdgpu_sync_slab;
@@ -60,7 +60,8 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
*
* Test if the fence was issued by us.
*/
-static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
+static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
+ struct dma_fence *f)
{
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
@@ -81,7 +82,7 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
*
* Extract who originally created the fence.
*/
-static void *amdgpu_sync_get_owner(struct fence *f)
+static void *amdgpu_sync_get_owner(struct dma_fence *f)
{
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
@@ -99,13 +100,14 @@ static void *amdgpu_sync_get_owner(struct fence *f)
*
* Either keep the existing fence or the new one, depending which one is later.
*/
-static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
+static void amdgpu_sync_keep_later(struct dma_fence **keep,
+ struct dma_fence *fence)
{
- if (*keep && fence_is_later(*keep, fence))
+ if (*keep && dma_fence_is_later(*keep, fence))
return;
- fence_put(*keep);
- *keep = fence_get(fence);
+ dma_fence_put(*keep);
+ *keep = dma_fence_get(fence);
}
/**
@@ -117,7 +119,7 @@ static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
* Tries to add the fence to an existing hash entry. Returns true when an entry
* was found, false otherwise.
*/
-static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f)
+static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
{
struct amdgpu_sync_entry *e;
@@ -139,7 +141,7 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f)
*
*/
int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
- struct fence *f)
+ struct dma_fence *f)
{
struct amdgpu_sync_entry *e;
@@ -158,7 +160,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
return -ENOMEM;
hash_add(sync->fences, &e->node, f->context);
- e->fence = fence_get(f);
+ e->fence = dma_fence_get(f);
return 0;
}
@@ -177,7 +179,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
void *owner)
{
struct reservation_object_list *flist;
- struct fence *f;
+ struct dma_fence *f;
void *fence_owner;
unsigned i;
int r = 0;
@@ -231,15 +233,15 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
* Returns the next fence not signaled yet without removing it from the sync
* object.
*/
-struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
- struct amdgpu_ring *ring)
+struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+ struct amdgpu_ring *ring)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
int i;
hash_for_each_safe(sync->fences, i, tmp, e, node) {
- struct fence *f = e->fence;
+ struct dma_fence *f = e->fence;
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
if (ring && s_fence) {
@@ -247,16 +249,16 @@ struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
* when they are scheduled.
*/
if (s_fence->sched == &ring->sched) {
- if (fence_is_signaled(&s_fence->scheduled))
+ if (dma_fence_is_signaled(&s_fence->scheduled))
continue;
return &s_fence->scheduled;
}
}
- if (fence_is_signaled(f)) {
+ if (dma_fence_is_signaled(f)) {
hash_del(&e->node);
- fence_put(f);
+ dma_fence_put(f);
kmem_cache_free(amdgpu_sync_slab, e);
continue;
}
@@ -274,11 +276,11 @@ struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
*
* Get and removes the next fence from the sync object not signaled yet.
*/
-struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
+struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
- struct fence *f;
+ struct dma_fence *f;
int i;
hash_for_each_safe(sync->fences, i, tmp, e, node) {
@@ -288,10 +290,10 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
hash_del(&e->node);
kmem_cache_free(amdgpu_sync_slab, e);
- if (!fence_is_signaled(f))
+ if (!dma_fence_is_signaled(f))
return f;
- fence_put(f);
+ dma_fence_put(f);
}
return NULL;
}
@@ -311,11 +313,11 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
hash_for_each_safe(sync->fences, i, tmp, e, node) {
hash_del(&e->node);
- fence_put(e->fence);
+ dma_fence_put(e->fence);
kmem_cache_free(amdgpu_sync_slab, e);
}
- fence_put(sync->last_vm_update);
+ dma_fence_put(sync->last_vm_update);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
new file mode 100644
index 000000000000..605be266e07f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#ifndef __AMDGPU_SYNC_H__
+#define __AMDGPU_SYNC_H__
+
+#include <linux/hashtable.h>
+
+struct dma_fence;
+struct reservation_object;
+struct amdgpu_device;
+struct amdgpu_ring;
+
+/*
+ * Container for fences used to sync command submissions.
+ */
+struct amdgpu_sync {
+ DECLARE_HASHTABLE(fences, 4);
+ struct dma_fence *last_vm_update;
+};
+
+void amdgpu_sync_create(struct amdgpu_sync *sync);
+int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+ struct dma_fence *f);
+int amdgpu_sync_resv(struct amdgpu_device *adev,
+ struct amdgpu_sync *sync,
+ struct reservation_object *resv,
+ void *owner);
+struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+ struct amdgpu_ring *ring);
+struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
+void amdgpu_sync_free(struct amdgpu_sync *sync);
+int amdgpu_sync_init(void);
+void amdgpu_sync_fini(void);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index b827c75e95de..e05a24325eeb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -78,7 +78,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
void *gtt_map, *vram_map;
void **gtt_start, **gtt_end;
void **vram_start, **vram_end;
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
@@ -118,13 +118,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
goto out_lclean_unpin;
}
- r = fence_wait(fence, false);
+ r = dma_fence_wait(fence, false);
if (r) {
DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
goto out_lclean_unpin;
}
- fence_put(fence);
+ dma_fence_put(fence);
r = amdgpu_bo_kmap(vram_obj, &vram_map);
if (r) {
@@ -163,13 +163,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
goto out_lclean_unpin;
}
- r = fence_wait(fence, false);
+ r = dma_fence_wait(fence, false);
if (r) {
DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
goto out_lclean_unpin;
}
- fence_put(fence);
+ dma_fence_put(fence);
r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
if (r) {
@@ -216,7 +216,7 @@ out_lclean:
amdgpu_bo_unref(&gtt_obj[i]);
}
if (fence)
- fence_put(fence);
+ dma_fence_put(fence);
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 067e5e683bb3..bb964a8ff938 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -104,7 +104,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
__field(struct amdgpu_device *, adev)
__field(struct amd_sched_job *, sched_job)
__field(struct amdgpu_ib *, ib)
- __field(struct fence *, fence)
+ __field(struct dma_fence *, fence)
__field(char *, ring_name)
__field(u32, num_ibs)
),
@@ -129,7 +129,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
__field(struct amdgpu_device *, adev)
__field(struct amd_sched_job *, sched_job)
__field(struct amdgpu_ib *, ib)
- __field(struct fence *, fence)
+ __field(struct dma_fence *, fence)
__field(char *, ring_name)
__field(u32, num_ibs)
),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index dcaf691f56b5..1821c05484d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -51,16 +51,6 @@
static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
-static struct amdgpu_device *amdgpu_get_adev(struct ttm_bo_device *bdev)
-{
- struct amdgpu_mman *mman;
- struct amdgpu_device *adev;
-
- mman = container_of(bdev, struct amdgpu_mman, bdev);
- adev = container_of(mman, struct amdgpu_device, mman);
- return adev;
-}
-
/*
* Global memory.
@@ -150,7 +140,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
{
struct amdgpu_device *adev;
- adev = amdgpu_get_adev(bdev);
+ adev = amdgpu_ttm_adev(bdev);
switch (type) {
case TTM_PL_SYSTEM:
@@ -168,7 +158,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
- man->func = &ttm_bo_manager_func;
+ man->func = &amdgpu_vram_mgr_func;
man->gpu_offset = adev->mc.vram_start;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -195,6 +185,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
static struct ttm_place placements = {
.fpfn = 0,
@@ -213,7 +204,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
abo = container_of(bo, struct amdgpu_bo, tbo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- if (abo->adev->mman.buffer_funcs_ring->ready == false) {
+ if (adev->mman.buffer_funcs_ring->ready == false) {
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
} else {
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
@@ -229,7 +220,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
* allocating address space for the BO.
*/
abo->placements[i].lpfn =
- abo->adev->mc.gtt_size >> PAGE_SHIFT;
+ adev->mc.gtt_size >> PAGE_SHIFT;
}
}
break;
@@ -260,63 +251,115 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo,
new_mem->mm_node = NULL;
}
-static int amdgpu_move_blit(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem,
- struct ttm_mem_reg *old_mem)
+static int amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
+ struct drm_mm_node *mm_node,
+ struct ttm_mem_reg *mem,
+ uint64_t *addr)
{
- struct amdgpu_device *adev;
- struct amdgpu_ring *ring;
- uint64_t old_start, new_start;
- struct fence *fence;
int r;
- adev = amdgpu_get_adev(bo->bdev);
- ring = adev->mman.buffer_funcs_ring;
-
- switch (old_mem->mem_type) {
+ switch (mem->mem_type) {
case TTM_PL_TT:
- r = amdgpu_ttm_bind(bo, old_mem);
+ r = amdgpu_ttm_bind(bo, mem);
if (r)
return r;
case TTM_PL_VRAM:
- old_start = (u64)old_mem->start << PAGE_SHIFT;
- old_start += bo->bdev->man[old_mem->mem_type].gpu_offset;
+ *addr = mm_node->start << PAGE_SHIFT;
+ *addr += bo->bdev->man[mem->mem_type].gpu_offset;
break;
default:
- DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
+ DRM_ERROR("Unknown placement %d\n", mem->mem_type);
return -EINVAL;
}
- switch (new_mem->mem_type) {
- case TTM_PL_TT:
- r = amdgpu_ttm_bind(bo, new_mem);
- if (r)
- return r;
- case TTM_PL_VRAM:
- new_start = (u64)new_mem->start << PAGE_SHIFT;
- new_start += bo->bdev->man[new_mem->mem_type].gpu_offset;
- break;
- default:
- DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
- return -EINVAL;
- }
+ return 0;
+}
+
+static int amdgpu_move_blit(struct ttm_buffer_object *bo,
+ bool evict, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem,
+ struct ttm_mem_reg *old_mem)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+ struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+
+ struct drm_mm_node *old_mm, *new_mm;
+ uint64_t old_start, old_size, new_start, new_size;
+ unsigned long num_pages;
+ struct dma_fence *fence = NULL;
+ int r;
+
+ BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
+
if (!ring->ready) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
}
- BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
+ old_mm = old_mem->mm_node;
+ r = amdgpu_mm_node_addr(bo, old_mm, old_mem, &old_start);
+ if (r)
+ return r;
+ old_size = old_mm->size;
+
- r = amdgpu_copy_buffer(ring, old_start, new_start,
- new_mem->num_pages * PAGE_SIZE, /* bytes */
- bo->resv, &fence, false);
+ new_mm = new_mem->mm_node;
+ r = amdgpu_mm_node_addr(bo, new_mm, new_mem, &new_start);
if (r)
return r;
+ new_size = new_mm->size;
+
+ num_pages = new_mem->num_pages;
+ while (num_pages) {
+ unsigned long cur_pages = min(old_size, new_size);
+ struct dma_fence *next;
+
+ r = amdgpu_copy_buffer(ring, old_start, new_start,
+ cur_pages * PAGE_SIZE,
+ bo->resv, &next, false);
+ if (r)
+ goto error;
+
+ dma_fence_put(fence);
+ fence = next;
+
+ num_pages -= cur_pages;
+ if (!num_pages)
+ break;
+
+ old_size -= cur_pages;
+ if (!old_size) {
+ r = amdgpu_mm_node_addr(bo, ++old_mm, old_mem,
+ &old_start);
+ if (r)
+ goto error;
+ old_size = old_mm->size;
+ } else {
+ old_start += cur_pages * PAGE_SIZE;
+ }
+
+ new_size -= cur_pages;
+ if (!new_size) {
+ r = amdgpu_mm_node_addr(bo, ++new_mm, new_mem,
+ &new_start);
+ if (r)
+ goto error;
+
+ new_size = new_mm->size;
+ } else {
+ new_start += cur_pages * PAGE_SIZE;
+ }
+ }
r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
- fence_put(fence);
+ dma_fence_put(fence);
+ return r;
+
+error:
+ if (fence)
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
return r;
}
@@ -332,7 +375,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
struct ttm_placement placement;
int r;
- adev = amdgpu_get_adev(bo->bdev);
+ adev = amdgpu_ttm_adev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
placement.num_placement = 1;
@@ -379,7 +422,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
struct ttm_place placements;
int r;
- adev = amdgpu_get_adev(bo->bdev);
+ adev = amdgpu_ttm_adev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
placement.num_placement = 1;
@@ -422,7 +465,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
if (WARN_ON_ONCE(abo->pin_count > 0))
return -EINVAL;
- adev = amdgpu_get_adev(bo->bdev);
+ adev = amdgpu_ttm_adev(bo->bdev);
/* remember the eviction */
if (evict)
@@ -475,7 +518,7 @@ memcpy:
static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- struct amdgpu_device *adev = amdgpu_get_adev(bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
mem->bus.addr = NULL;
mem->bus.offset = 0;
@@ -607,7 +650,7 @@ release_pages:
/* prepare the sg table with the user pages */
static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
{
- struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned nents;
int r;
@@ -639,7 +682,7 @@ release_sg:
static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
{
- struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
struct sg_page_iter sg_iter;
@@ -799,7 +842,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
struct amdgpu_device *adev;
struct amdgpu_ttm_tt *gtt;
- adev = amdgpu_get_adev(bdev);
+ adev = amdgpu_ttm_adev(bdev);
gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
if (gtt == NULL) {
@@ -843,7 +886,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
return 0;
}
- adev = amdgpu_get_adev(ttm->bdev);
+ adev = amdgpu_ttm_adev(ttm->bdev);
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
@@ -889,7 +932,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
if (slave)
return;
- adev = amdgpu_get_adev(ttm->bdev);
+ adev = amdgpu_ttm_adev(ttm->bdev);
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
@@ -1012,7 +1055,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo)
{
- struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
unsigned i, j;
for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) {
@@ -1029,7 +1072,7 @@ static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo)
static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo)
{
- struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
unsigned log2_size = min(ilog2(tbo->num_pages),
AMDGPU_TTM_LRU_SIZE - 1);
@@ -1060,12 +1103,37 @@ static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
return res;
}
+static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
+ const struct ttm_place *place)
+{
+ if (bo->mem.mem_type == TTM_PL_VRAM &&
+ bo->mem.start == AMDGPU_BO_INVALID_OFFSET) {
+ unsigned long num_pages = bo->mem.num_pages;
+ struct drm_mm_node *node = bo->mem.mm_node;
+
+ /* Check each drm MM node individually */
+ while (num_pages) {
+ if (place->fpfn < (node->start + node->size) &&
+ !(place->lpfn && place->lpfn <= node->start))
+ return true;
+
+ num_pages -= node->size;
+ ++node;
+ }
+
+ return false;
+ }
+
+ return ttm_bo_eviction_valuable(bo, place);
+}
+
static struct ttm_bo_driver amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate,
.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
.invalidate_caches = &amdgpu_invalidate_caches,
.init_mem_type = &amdgpu_init_mem_type,
+ .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
.evict_flags = &amdgpu_evict_flags,
.move = &amdgpu_bo_move,
.verify_access = &amdgpu_verify_access,
@@ -1119,7 +1187,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &adev->stollen_vga_memory);
if (r) {
return r;
@@ -1247,7 +1316,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
uint64_t dst_offset,
uint32_t byte_count,
struct reservation_object *resv,
- struct fence **fence, bool direct_submit)
+ struct dma_fence **fence, bool direct_submit)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
@@ -1294,7 +1363,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
if (direct_submit) {
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
NULL, NULL, fence);
- job->fence = fence_get(*fence);
+ job->fence = dma_fence_get(*fence);
if (r)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
amdgpu_job_free(job);
@@ -1315,9 +1384,9 @@ error_free:
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
struct reservation_object *resv,
- struct fence **fence)
+ struct dma_fence **fence)
{
- struct amdgpu_device *adev = bo->adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_job *job;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 9812c805326c..98ee384f0fca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -66,6 +66,7 @@ struct amdgpu_mman {
};
extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
+extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *tbo,
@@ -77,11 +78,11 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
uint64_t dst_offset,
uint32_t byte_count,
struct reservation_object *resv,
- struct fence **fence, bool direct_submit);
+ struct dma_fence **fence, bool direct_submit);
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
struct reservation_object *resv,
- struct fence **fence);
+ struct dma_fence **fence);
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index cb3d252f3c78..0f0b38191fac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -228,6 +228,9 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_firmware_info *ucode,
ucode->mc_addr = mc_addr;
ucode->kaddr = kptr;
+ if (ucode->ucode_id == AMDGPU_UCODE_ID_STORAGE)
+ return 0;
+
header = (const struct common_firmware_header *)ucode->fw->data;
memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
le32_to_cpu(header->ucode_array_offset_bytes)),
@@ -236,6 +239,31 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_firmware_info *ucode,
return 0;
}
+static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
+ uint64_t mc_addr, void *kptr)
+{
+ const struct gfx_firmware_header_v1_0 *header = NULL;
+ const struct common_firmware_header *comm_hdr = NULL;
+ uint8_t* src_addr = NULL;
+ uint8_t* dst_addr = NULL;
+
+ if (NULL == ucode->fw)
+ return 0;
+
+ comm_hdr = (const struct common_firmware_header *)ucode->fw->data;
+ header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
+ dst_addr = ucode->kaddr +
+ ALIGN(le32_to_cpu(comm_hdr->ucode_size_bytes),
+ PAGE_SIZE);
+ src_addr = (uint8_t *)ucode->fw->data +
+ le32_to_cpu(comm_hdr->ucode_array_offset_bytes) +
+ (le32_to_cpu(header->jt_offset) * 4);
+ memcpy(dst_addr, src_addr, le32_to_cpu(header->jt_size) * 4);
+
+ return 0;
+}
+
+
int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
{
struct amdgpu_bo **bo = &adev->firmware.fw_buf;
@@ -247,7 +275,8 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
const struct common_firmware_header *header = NULL;
err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
+ amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+ 0, NULL, NULL, bo);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
goto failed;
@@ -259,7 +288,8 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
goto failed_reserve;
}
- err = amdgpu_bo_pin(*bo, AMDGPU_GEM_DOMAIN_GTT, &fw_mc_addr);
+ err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+ &fw_mc_addr);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
goto failed_pin;
@@ -279,6 +309,13 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
header = (const struct common_firmware_header *)ucode->fw->data;
amdgpu_ucode_init_single_fw(ucode, fw_mc_addr + fw_offset,
fw_buf_ptr + fw_offset);
+ if (i == AMDGPU_UCODE_ID_CP_MEC1) {
+ const struct gfx_firmware_header_v1_0 *cp_hdr;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
+ amdgpu_ucode_patch_jt(ucode, fw_mc_addr + fw_offset,
+ fw_buf_ptr + fw_offset);
+ fw_offset += ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
+ }
fw_offset += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index e468be4e28fa..a8a4230729f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -130,6 +130,7 @@ enum AMDGPU_UCODE_ID {
AMDGPU_UCODE_ID_CP_MEC1,
AMDGPU_UCODE_ID_CP_MEC2,
AMDGPU_UCODE_ID_RLC_G,
+ AMDGPU_UCODE_ID_STORAGE,
AMDGPU_UCODE_ID_MAXIMUM,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e3281cacc586..fb270c7e7171 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -333,7 +333,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
for (i = 0; i < adev->uvd.max_handles; ++i) {
uint32_t handle = atomic_read(&adev->uvd.handles[i]);
if (handle != 0 && adev->uvd.filp[i] == filp) {
- struct fence *fence;
+ struct dma_fence *fence;
r = amdgpu_uvd_get_destroy_msg(ring, handle,
false, &fence);
@@ -342,8 +342,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
continue;
}
- fence_wait(fence, false);
- fence_put(fence);
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
adev->uvd.filp[i] = NULL;
atomic_set(&adev->uvd.handles[i], 0);
@@ -876,6 +876,9 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
struct amdgpu_ib *ib = &parser->job->ibs[ib_idx];
int r;
+ parser->job->vm = NULL;
+ ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
+
if (ib->length_dw % 16) {
DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
ib->length_dw);
@@ -909,14 +912,14 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
}
static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
- bool direct, struct fence **fence)
+ bool direct, struct dma_fence **fence)
{
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct list_head head;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
struct amdgpu_device *adev = ring->adev;
uint64_t addr;
int i, r;
@@ -931,7 +934,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (r)
return r;
- if (!bo->adev->uvd.address_64_bit) {
+ if (!ring->adev->uvd.address_64_bit) {
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_uvd_force_into_uvd_segment(bo);
}
@@ -960,7 +963,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (direct) {
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
- job->fence = fence_get(f);
+ job->fence = dma_fence_get(f);
if (r)
goto err_free;
@@ -975,9 +978,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ttm_eu_fence_buffer_objects(&ticket, &head, f);
if (fence)
- *fence = fence_get(f);
+ *fence = dma_fence_get(f);
amdgpu_bo_unref(&bo);
- fence_put(f);
+ dma_fence_put(f);
return 0;
@@ -993,7 +996,7 @@ err:
crash the vcpu so just try to emmit a dummy create/destroy msg to
avoid this */
int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct fence **fence)
+ struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_bo *bo;
@@ -1002,7 +1005,8 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &bo);
if (r)
return r;
@@ -1042,7 +1046,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
}
int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct fence **fence)
+ bool direct, struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_bo *bo;
@@ -1051,7 +1055,8 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &bo);
if (r)
return r;
@@ -1128,7 +1133,7 @@ void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
*/
int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
- struct fence *fence;
+ struct dma_fence *fence;
long r;
r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
@@ -1143,7 +1148,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto error;
}
- r = fence_wait_timeout(fence, false, timeout);
+ r = dma_fence_wait_timeout(fence, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out.\n");
r = -ETIMEDOUT;
@@ -1154,7 +1159,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = 0;
}
- fence_put(fence);
+ dma_fence_put(fence);
error:
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index c850009602d1..6249ba1bde2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -29,9 +29,9 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev);
int amdgpu_uvd_suspend(struct amdgpu_device *adev);
int amdgpu_uvd_resume(struct amdgpu_device *adev);
int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct fence **fence);
+ struct dma_fence **fence);
int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct fence **fence);
+ bool direct, struct dma_fence **fence);
void amdgpu_uvd_free_handles(struct amdgpu_device *adev,
struct drm_file *filp);
int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 7fe8fd884f06..69b66b9e7f57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -157,7 +157,8 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &adev->vce.vcpu_bo);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
@@ -395,12 +396,12 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
* Open up a stream for HW test
*/
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct fence **fence)
+ struct dma_fence **fence)
{
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
uint64_t dummy;
int i, r;
@@ -450,14 +451,14 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[i] = 0x0;
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
- job->fence = fence_get(f);
+ job->fence = dma_fence_get(f);
if (r)
goto err;
amdgpu_job_free(job);
if (fence)
- *fence = fence_get(f);
- fence_put(f);
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
return 0;
err:
@@ -476,12 +477,12 @@ err:
* Close up a stream for HW test or if userspace failed to do so
*/
int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct fence **fence)
+ bool direct, struct dma_fence **fence)
{
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -513,7 +514,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
if (direct) {
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
- job->fence = fence_get(f);
+ job->fence = dma_fence_get(f);
if (r)
goto err;
@@ -526,8 +527,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
}
if (fence)
- *fence = fence_get(f);
- fence_put(f);
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
return 0;
err:
@@ -641,6 +642,9 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
uint32_t *size = &tmp;
int i, r, idx = 0;
+ p->job->vm = NULL;
+ ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
+
r = amdgpu_cs_sysvm_access_required(p);
if (r)
return r;
@@ -788,6 +792,96 @@ out:
}
/**
+ * amdgpu_vce_cs_parse_vm - parse the command stream in VM mode
+ *
+ * @p: parser context
+ *
+ */
+int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
+{
+ struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
+ int session_idx = -1;
+ uint32_t destroyed = 0;
+ uint32_t created = 0;
+ uint32_t allocated = 0;
+ uint32_t tmp, handle = 0;
+ int i, r = 0, idx = 0;
+
+ while (idx < ib->length_dw) {
+ uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
+ uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
+
+ if ((len < 8) || (len & 3)) {
+ DRM_ERROR("invalid VCE command length (%d)!\n", len);
+ r = -EINVAL;
+ goto out;
+ }
+
+ switch (cmd) {
+ case 0x00000001: /* session */
+ handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
+ session_idx = amdgpu_vce_validate_handle(p, handle,
+ &allocated);
+ if (session_idx < 0) {
+ r = session_idx;
+ goto out;
+ }
+ break;
+
+ case 0x01000001: /* create */
+ created |= 1 << session_idx;
+ if (destroyed & (1 << session_idx)) {
+ destroyed &= ~(1 << session_idx);
+ allocated |= 1 << session_idx;
+
+ } else if (!(allocated & (1 << session_idx))) {
+ DRM_ERROR("Handle already in use!\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ break;
+
+ case 0x02000001: /* destroy */
+ destroyed |= 1 << session_idx;
+ break;
+
+ default:
+ break;
+ }
+
+ if (session_idx == -1) {
+ DRM_ERROR("no session command at start of IB\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ idx += len / 4;
+ }
+
+ if (allocated & ~created) {
+ DRM_ERROR("New session without create command!\n");
+ r = -ENOENT;
+ }
+
+out:
+ if (!r) {
+ /* No error, free all destroyed handle slots */
+ tmp = destroyed;
+ amdgpu_ib_free(p->adev, ib, NULL);
+ } else {
+ /* Error during parsing, free all allocated handle slots */
+ tmp = allocated;
+ }
+
+ for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
+ if (tmp & (1 << i))
+ atomic_set(&p->adev->vce.handles[i], 0);
+
+ return r;
+}
+
+/**
* amdgpu_vce_ring_emit_ib - execute indirect buffer
*
* @ring: engine to use
@@ -823,18 +917,6 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
amdgpu_ring_write(ring, VCE_CMD_END);
}
-unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 4; /* amdgpu_vce_ring_emit_ib */
-}
-
-unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
-}
-
/**
* amdgpu_vce_ring_test_ring - test if VCE ring is working
*
@@ -883,7 +965,7 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
*/
int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
long r;
/* skip vce ring1/2 ib test for now, since it's not reliable */
@@ -902,7 +984,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto error;
}
- r = fence_wait_timeout(fence, false, timeout);
+ r = dma_fence_wait_timeout(fence, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out.\n");
r = -ETIMEDOUT;
@@ -913,6 +995,6 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = 0;
}
error:
- fence_put(fence);
+ dma_fence_put(fence);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 12729d2852df..d98041f7508d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -29,11 +29,12 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
int amdgpu_vce_suspend(struct amdgpu_device *adev);
int amdgpu_vce_resume(struct amdgpu_device *adev);
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct fence **fence);
+ struct dma_fence **fence);
int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct fence **fence);
+ bool direct, struct dma_fence **fence);
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
+int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch);
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 06f24322e7c3..e480263387e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -25,7 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
-#include <linux/fence-array.h>
+#include <linux/dma-fence-array.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -116,38 +116,43 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
}
/**
- * amdgpu_vm_get_bos - add the vm BOs to a duplicates list
+ * amdgpu_vm_validate_pt_bos - validate the page table BOs
*
* @adev: amdgpu device pointer
* @vm: vm providing the BOs
- * @duplicates: head of duplicates list
+ * @validate: callback to do the validation
+ * @param: parameter for the validation callback
*
- * Add the page directory to the BO duplicates list
- * for command submission.
+ * Validate the page table BOs on command submission if neccessary.
*/
-void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct list_head *duplicates)
+int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ int (*validate)(void *p, struct amdgpu_bo *bo),
+ void *param)
{
uint64_t num_evictions;
unsigned i;
+ int r;
/* We only need to validate the page tables
* if they aren't already valid.
*/
num_evictions = atomic64_read(&adev->num_evictions);
if (num_evictions == vm->last_eviction_counter)
- return;
+ return 0;
/* add the vm page table to the list */
for (i = 0; i <= vm->max_pde_used; ++i) {
- struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
+ struct amdgpu_bo *bo = vm->page_tables[i].bo;
- if (!entry->robj)
+ if (!bo)
continue;
- list_add(&entry->tv.head, duplicates);
+ r = validate(param, bo);
+ if (r)
+ return r;
}
+ return 0;
}
/**
@@ -166,12 +171,12 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
spin_lock(&glob->lru_lock);
for (i = 0; i <= vm->max_pde_used; ++i) {
- struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
+ struct amdgpu_bo *bo = vm->page_tables[i].bo;
- if (!entry->robj)
+ if (!bo)
continue;
- ttm_bo_move_to_lru_tail(&entry->robj->tbo);
+ ttm_bo_move_to_lru_tail(&bo->tbo);
}
spin_unlock(&glob->lru_lock);
}
@@ -194,14 +199,14 @@ static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
* Allocate an id for the vm, adding fences to the sync obj as necessary.
*/
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
- struct amdgpu_sync *sync, struct fence *fence,
+ struct amdgpu_sync *sync, struct dma_fence *fence,
struct amdgpu_job *job)
{
struct amdgpu_device *adev = ring->adev;
uint64_t fence_context = adev->fence_context + ring->idx;
- struct fence *updates = sync->last_vm_update;
+ struct dma_fence *updates = sync->last_vm_update;
struct amdgpu_vm_id *id, *idle;
- struct fence **fences;
+ struct dma_fence **fences;
unsigned i;
int r = 0;
@@ -225,17 +230,17 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (&idle->list == &adev->vm_manager.ids_lru) {
u64 fence_context = adev->vm_manager.fence_context + ring->idx;
unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
- struct fence_array *array;
+ struct dma_fence_array *array;
unsigned j;
for (j = 0; j < i; ++j)
- fence_get(fences[j]);
+ dma_fence_get(fences[j]);
- array = fence_array_create(i, fences, fence_context,
+ array = dma_fence_array_create(i, fences, fence_context,
seqno, true);
if (!array) {
for (j = 0; j < i; ++j)
- fence_put(fences[j]);
+ dma_fence_put(fences[j]);
kfree(fences);
r = -ENOMEM;
goto error;
@@ -243,7 +248,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
r = amdgpu_sync_fence(ring->adev, sync, &array->base);
- fence_put(&array->base);
+ dma_fence_put(&array->base);
if (r)
goto error;
@@ -257,7 +262,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
/* Check if we can use a VMID already assigned to this VM */
i = ring->idx;
do {
- struct fence *flushed;
+ struct dma_fence *flushed;
id = vm->ids[i++];
if (i == AMDGPU_MAX_RINGS)
@@ -279,12 +284,12 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
continue;
if (id->last_flush->context != fence_context &&
- !fence_is_signaled(id->last_flush))
+ !dma_fence_is_signaled(id->last_flush))
continue;
flushed = id->flushed_updates;
if (updates &&
- (!flushed || fence_is_later(updates, flushed)))
+ (!flushed || dma_fence_is_later(updates, flushed)))
continue;
/* Good we can use this VMID. Remember this submission as
@@ -315,14 +320,14 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r)
goto error;
- fence_put(id->first);
- id->first = fence_get(fence);
+ dma_fence_put(id->first);
+ id->first = dma_fence_get(fence);
- fence_put(id->last_flush);
+ dma_fence_put(id->last_flush);
id->last_flush = NULL;
- fence_put(id->flushed_updates);
- id->flushed_updates = fence_get(updates);
+ dma_fence_put(id->flushed_updates);
+ id->flushed_updates = dma_fence_get(updates);
id->pd_gpu_addr = job->vm_pd_addr;
id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
@@ -341,9 +346,9 @@ error:
static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- const struct amdgpu_ip_block_version *ip_block;
+ const struct amdgpu_ip_block *ip_block;
- if (ring->type != AMDGPU_RING_TYPE_COMPUTE)
+ if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
/* only compute rings */
return false;
@@ -351,10 +356,10 @@ static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
if (!ip_block)
return false;
- if (ip_block->major <= 7) {
+ if (ip_block->version->major <= 7) {
/* gfx7 has no workaround */
return true;
- } else if (ip_block->major == 8) {
+ } else if (ip_block->version->major == 8) {
if (adev->gfx.mec_fw_version >= 673)
/* gfx8 is fixed in MEC firmware 673 */
return false;
@@ -393,7 +398,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
amdgpu_vm_is_gpu_reset(adev, id))) {
- struct fence *fence;
+ struct dma_fence *fence;
trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id);
amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
@@ -403,7 +408,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
return r;
mutex_lock(&adev->vm_manager.lock);
- fence_put(id->last_flush);
+ dma_fence_put(id->last_flush);
id->last_flush = fence;
mutex_unlock(&adev->vm_manager.lock);
}
@@ -537,7 +542,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_bo *bo)
{
struct amdgpu_ring *ring;
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
struct amdgpu_job *job;
struct amdgpu_pte_update_params params;
unsigned entries;
@@ -578,7 +583,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
goto error_free;
amdgpu_bo_fence(bo, fence, true);
- fence_put(fence);
+ dma_fence_put(fence);
return 0;
error_free:
@@ -612,32 +617,35 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
return result;
}
-static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- bool shadow)
+/*
+ * amdgpu_vm_update_pdes - make sure that page directory is valid
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ *
+ * Allocates new page tables if necessary
+ * and updates the page directory.
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
{
+ struct amdgpu_bo *shadow;
struct amdgpu_ring *ring;
- struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow :
- vm->page_directory;
- uint64_t pd_addr;
+ uint64_t pd_addr, shadow_addr;
uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
- uint64_t last_pde = ~0, last_pt = ~0;
+ uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
unsigned count = 0, pt_idx, ndw;
struct amdgpu_job *job;
struct amdgpu_pte_update_params params;
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
int r;
- if (!pd)
- return 0;
-
- r = amdgpu_ttm_bind(&pd->tbo, &pd->tbo.mem);
- if (r)
- return r;
-
- pd_addr = amdgpu_bo_gpu_offset(pd);
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+ shadow = vm->page_directory->shadow;
/* padding, etc. */
ndw = 64;
@@ -645,6 +653,17 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
/* assume the worst case */
ndw += vm->max_pde_used * 6;
+ pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
+ if (shadow) {
+ r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
+ if (r)
+ return r;
+ shadow_addr = amdgpu_bo_gpu_offset(shadow);
+ ndw *= 2;
+ } else {
+ shadow_addr = 0;
+ }
+
r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
if (r)
return r;
@@ -655,30 +674,26 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
/* walk over the address space and update the page directory */
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
- struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
+ struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo;
uint64_t pde, pt;
if (bo == NULL)
continue;
if (bo->shadow) {
- struct amdgpu_bo *shadow = bo->shadow;
+ struct amdgpu_bo *pt_shadow = bo->shadow;
- r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
+ r = amdgpu_ttm_bind(&pt_shadow->tbo,
+ &pt_shadow->tbo.mem);
if (r)
return r;
}
pt = amdgpu_bo_gpu_offset(bo);
- if (!shadow) {
- if (vm->page_tables[pt_idx].addr == pt)
- continue;
- vm->page_tables[pt_idx].addr = pt;
- } else {
- if (vm->page_tables[pt_idx].shadow_addr == pt)
- continue;
- vm->page_tables[pt_idx].shadow_addr = pt;
- }
+ if (vm->page_tables[pt_idx].addr == pt)
+ continue;
+
+ vm->page_tables[pt_idx].addr = pt;
pde = pd_addr + pt_idx * 8;
if (((last_pde + 8 * count) != pde) ||
@@ -686,6 +701,13 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
(count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
if (count) {
+ if (shadow)
+ amdgpu_vm_do_set_ptes(&params,
+ last_shadow,
+ last_pt, count,
+ incr,
+ AMDGPU_PTE_VALID);
+
amdgpu_vm_do_set_ptes(&params, last_pde,
last_pt, count, incr,
AMDGPU_PTE_VALID);
@@ -693,34 +715,44 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
count = 1;
last_pde = pde;
+ last_shadow = shadow_addr + pt_idx * 8;
last_pt = pt;
} else {
++count;
}
}
- if (count)
+ if (count) {
+ if (vm->page_directory->shadow)
+ amdgpu_vm_do_set_ptes(&params, last_shadow, last_pt,
+ count, incr, AMDGPU_PTE_VALID);
+
amdgpu_vm_do_set_ptes(&params, last_pde, last_pt,
count, incr, AMDGPU_PTE_VALID);
+ }
- if (params.ib->length_dw != 0) {
- amdgpu_ring_pad_ib(ring, params.ib);
- amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
+ if (params.ib->length_dw == 0) {
+ amdgpu_job_free(job);
+ return 0;
+ }
+
+ amdgpu_ring_pad_ib(ring, params.ib);
+ amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
+ AMDGPU_FENCE_OWNER_VM);
+ if (shadow)
+ amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv,
AMDGPU_FENCE_OWNER_VM);
- WARN_ON(params.ib->length_dw > ndw);
- r = amdgpu_job_submit(job, ring, &vm->entity,
- AMDGPU_FENCE_OWNER_VM, &fence);
- if (r)
- goto error_free;
- amdgpu_bo_fence(pd, fence, true);
- fence_put(vm->page_directory_fence);
- vm->page_directory_fence = fence_get(fence);
- fence_put(fence);
+ WARN_ON(params.ib->length_dw > ndw);
+ r = amdgpu_job_submit(job, ring, &vm->entity,
+ AMDGPU_FENCE_OWNER_VM, &fence);
+ if (r)
+ goto error_free;
- } else {
- amdgpu_job_free(job);
- }
+ amdgpu_bo_fence(vm->page_directory, fence, true);
+ dma_fence_put(vm->page_directory_fence);
+ vm->page_directory_fence = dma_fence_get(fence);
+ dma_fence_put(fence);
return 0;
@@ -729,29 +761,6 @@ error_free:
return r;
}
-/*
- * amdgpu_vm_update_pdes - make sure that page directory is valid
- *
- * @adev: amdgpu_device pointer
- * @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
- *
- * Allocates new page tables if necessary
- * and updates the page directory.
- * Returns 0 for success, error for failure.
- */
-int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
-{
- int r;
-
- r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
- if (r)
- return r;
- return amdgpu_vm_update_pd_or_shadow(adev, vm, false);
-}
-
/**
* amdgpu_vm_update_ptes - make sure that page tables are valid
*
@@ -781,11 +790,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
/* initialize the variables */
addr = start;
pt_idx = addr >> amdgpu_vm_block_size;
- pt = vm->page_tables[pt_idx].entry.robj;
+ pt = vm->page_tables[pt_idx].bo;
if (params->shadow) {
if (!pt->shadow)
return;
- pt = vm->page_tables[pt_idx].entry.robj->shadow;
+ pt = pt->shadow;
}
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
@@ -804,11 +813,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
/* walk over the address space and update the page tables */
while (addr < end) {
pt_idx = addr >> amdgpu_vm_block_size;
- pt = vm->page_tables[pt_idx].entry.robj;
+ pt = vm->page_tables[pt_idx].bo;
if (params->shadow) {
if (!pt->shadow)
return;
- pt = vm->page_tables[pt_idx].entry.robj->shadow;
+ pt = pt->shadow;
}
if ((addr & ~mask) == (end & ~mask))
@@ -929,20 +938,20 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
* Returns 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
- struct fence *exclusive,
+ struct dma_fence *exclusive,
uint64_t src,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
uint64_t start, uint64_t last,
uint32_t flags, uint64_t addr,
- struct fence **fence)
+ struct dma_fence **fence)
{
struct amdgpu_ring *ring;
void *owner = AMDGPU_FENCE_OWNER_VM;
unsigned nptes, ncmds, ndw;
struct amdgpu_job *job;
struct amdgpu_pte_update_params params;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
int r;
memset(&params, 0, sizeof(params));
@@ -1045,10 +1054,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
amdgpu_bo_fence(vm->page_directory, f, true);
if (fence) {
- fence_put(*fence);
- *fence = fence_get(f);
+ dma_fence_put(*fence);
+ *fence = dma_fence_get(f);
}
- fence_put(f);
+ dma_fence_put(f);
return 0;
error_free:
@@ -1065,8 +1074,8 @@ error_free:
* @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
* @mapping: mapped range and flags to use for the update
- * @addr: addr to set the area to
* @flags: HW flags for the mapping
+ * @nodes: array of drm_mm_nodes with the MC addresses
* @fence: optional resulting fence
*
* Split the mapping into smaller chunks so that each update fits
@@ -1074,17 +1083,16 @@ error_free:
* Returns 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
- struct fence *exclusive,
+ struct dma_fence *exclusive,
uint32_t gtt_flags,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
struct amdgpu_bo_va_mapping *mapping,
- uint32_t flags, uint64_t addr,
- struct fence **fence)
+ uint32_t flags,
+ struct drm_mm_node *nodes,
+ struct dma_fence **fence)
{
- const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE;
-
- uint64_t src = 0, start = mapping->it.start;
+ uint64_t pfn, src = 0, start = mapping->it.start;
int r;
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@@ -1097,23 +1105,40 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
trace_amdgpu_vm_bo_update(mapping);
- if (pages_addr) {
- if (flags == gtt_flags)
- src = adev->gart.table_addr + (addr >> 12) * 8;
- addr = 0;
+ pfn = mapping->offset >> PAGE_SHIFT;
+ if (nodes) {
+ while (pfn >= nodes->size) {
+ pfn -= nodes->size;
+ ++nodes;
+ }
}
- addr += mapping->offset;
- if (!pages_addr || src)
- return amdgpu_vm_bo_update_mapping(adev, exclusive,
- src, pages_addr, vm,
- start, mapping->it.last,
- flags, addr, fence);
+ do {
+ uint64_t max_entries;
+ uint64_t addr, last;
+
+ if (nodes) {
+ addr = nodes->start << PAGE_SHIFT;
+ max_entries = (nodes->size - pfn) *
+ (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+ } else {
+ addr = 0;
+ max_entries = S64_MAX;
+ }
- while (start != mapping->it.last + 1) {
- uint64_t last;
+ if (pages_addr) {
+ if (flags == gtt_flags)
+ src = adev->gart.table_addr +
+ (addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
+ else
+ max_entries = min(max_entries, 16ull * 1024ull);
+ addr = 0;
+ } else if (flags & AMDGPU_PTE_VALID) {
+ addr += adev->vm_manager.vram_base_offset;
+ }
+ addr += pfn << PAGE_SHIFT;
- last = min((uint64_t)mapping->it.last, start + max_size - 1);
+ last = min((uint64_t)mapping->it.last, start + max_entries - 1);
r = amdgpu_vm_bo_update_mapping(adev, exclusive,
src, pages_addr, vm,
start, last, flags, addr,
@@ -1121,9 +1146,14 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
if (r)
return r;
+ pfn += last - start + 1;
+ if (nodes && nodes->size == pfn) {
+ pfn = 0;
+ ++nodes;
+ }
start = last + 1;
- addr += max_size * AMDGPU_GPU_PAGE_SIZE;
- }
+
+ } while (unlikely(start != mapping->it.last + 1));
return 0;
}
@@ -1147,40 +1177,30 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
dma_addr_t *pages_addr = NULL;
uint32_t gtt_flags, flags;
struct ttm_mem_reg *mem;
- struct fence *exclusive;
- uint64_t addr;
+ struct drm_mm_node *nodes;
+ struct dma_fence *exclusive;
int r;
if (clear) {
mem = NULL;
- addr = 0;
+ nodes = NULL;
exclusive = NULL;
} else {
struct ttm_dma_tt *ttm;
mem = &bo_va->bo->tbo.mem;
- addr = (u64)mem->start << PAGE_SHIFT;
- switch (mem->mem_type) {
- case TTM_PL_TT:
+ nodes = mem->mm_node;
+ if (mem->mem_type == TTM_PL_TT) {
ttm = container_of(bo_va->bo->tbo.ttm, struct
ttm_dma_tt, ttm);
pages_addr = ttm->dma_address;
- break;
-
- case TTM_PL_VRAM:
- addr += adev->vm_manager.vram_base_offset;
- break;
-
- default:
- break;
}
-
exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
}
flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
- adev == bo_va->bo->adev) ? flags : 0;
+ adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ? flags : 0;
spin_lock(&vm->status_lock);
if (!list_empty(&bo_va->vm_status))
@@ -1190,7 +1210,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
list_for_each_entry(mapping, &bo_va->invalids, list) {
r = amdgpu_vm_bo_split_mapping(adev, exclusive,
gtt_flags, pages_addr, vm,
- mapping, flags, addr,
+ mapping, flags, nodes,
&bo_va->last_pt_update);
if (r)
return r;
@@ -1405,18 +1425,17 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
/* walk over the address space and allocate the page tables */
for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
struct reservation_object *resv = vm->page_directory->tbo.resv;
- struct amdgpu_bo_list_entry *entry;
struct amdgpu_bo *pt;
- entry = &vm->page_tables[pt_idx].entry;
- if (entry->robj)
+ if (vm->page_tables[pt_idx].bo)
continue;
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
AMDGPU_GPU_PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_SHADOW,
+ AMDGPU_GEM_CREATE_SHADOW |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, resv, &pt);
if (r)
goto error_free;
@@ -1442,11 +1461,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
}
}
- entry->robj = pt;
- entry->priority = 0;
- entry->tv.bo = &entry->robj->tbo;
- entry->tv.shared = true;
- entry->user_pages = NULL;
+ vm->page_tables[pt_idx].bo = pt;
vm->page_tables[pt_idx].addr = 0;
}
@@ -1547,7 +1562,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
kfree(mapping);
}
- fence_put(bo_va->last_pt_update);
+ dma_fence_put(bo_va->last_pt_update);
kfree(bo_va);
}
@@ -1626,7 +1641,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
r = amdgpu_bo_create(adev, pd_size, align, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_SHADOW,
+ AMDGPU_GEM_CREATE_SHADOW |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &vm->page_directory);
if (r)
goto error_free_sched_entity;
@@ -1697,7 +1713,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
}
for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
- struct amdgpu_bo *pt = vm->page_tables[i].entry.robj;
+ struct amdgpu_bo *pt = vm->page_tables[i].bo;
if (!pt)
continue;
@@ -1709,7 +1725,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_bo_unref(&vm->page_directory->shadow);
amdgpu_bo_unref(&vm->page_directory);
- fence_put(vm->page_directory_fence);
+ dma_fence_put(vm->page_directory_fence);
}
/**
@@ -1733,7 +1749,8 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
&adev->vm_manager.ids_lru);
}
- adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
+ adev->vm_manager.fence_context =
+ dma_fence_context_alloc(AMDGPU_MAX_RINGS);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
adev->vm_manager.seqno[i] = 0;
@@ -1755,8 +1772,8 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_NUM_VM; ++i) {
struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
- fence_put(adev->vm_manager.ids[i].first);
+ dma_fence_put(adev->vm_manager.ids[i].first);
amdgpu_sync_free(&adev->vm_manager.ids[i].active);
- fence_put(id->flushed_updates);
+ dma_fence_put(id->flushed_updates);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
new file mode 100644
index 000000000000..adbc2f5e5c7f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#ifndef __AMDGPU_VM_H__
+#define __AMDGPU_VM_H__
+
+#include <linux/rbtree.h>
+
+#include "gpu_scheduler.h"
+#include "amdgpu_sync.h"
+#include "amdgpu_ring.h"
+
+struct amdgpu_bo_va;
+struct amdgpu_job;
+struct amdgpu_bo_list_entry;
+
+/*
+ * GPUVM handling
+ */
+
+/* maximum number of VMIDs */
+#define AMDGPU_NUM_VM 16
+
+/* Maximum number of PTEs the hardware can write with one command */
+#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
+
+/* number of entries in page table */
+#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
+
+/* PTBs (Page Table Blocks) need to be aligned to 32K */
+#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
+
+/* LOG2 number of continuous pages for the fragment field */
+#define AMDGPU_LOG2_PAGES_PER_FRAG 4
+
+#define AMDGPU_PTE_VALID (1 << 0)
+#define AMDGPU_PTE_SYSTEM (1 << 1)
+#define AMDGPU_PTE_SNOOPED (1 << 2)
+
+/* VI only */
+#define AMDGPU_PTE_EXECUTABLE (1 << 4)
+
+#define AMDGPU_PTE_READABLE (1 << 5)
+#define AMDGPU_PTE_WRITEABLE (1 << 6)
+
+#define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7)
+
+/* How to programm VM fault handling */
+#define AMDGPU_VM_FAULT_STOP_NEVER 0
+#define AMDGPU_VM_FAULT_STOP_FIRST 1
+#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
+
+struct amdgpu_vm_pt {
+ struct amdgpu_bo *bo;
+ uint64_t addr;
+};
+
+struct amdgpu_vm {
+ /* tree of virtual addresses mapped */
+ struct rb_root va;
+
+ /* protecting invalidated */
+ spinlock_t status_lock;
+
+ /* BOs moved, but not yet updated in the PT */
+ struct list_head invalidated;
+
+ /* BOs cleared in the PT because of a move */
+ struct list_head cleared;
+
+ /* BO mappings freed, but not yet updated in the PT */
+ struct list_head freed;
+
+ /* contains the page directory */
+ struct amdgpu_bo *page_directory;
+ unsigned max_pde_used;
+ struct dma_fence *page_directory_fence;
+ uint64_t last_eviction_counter;
+
+ /* array of page tables, one for each page directory entry */
+ struct amdgpu_vm_pt *page_tables;
+
+ /* for id and flush management per ring */
+ struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS];
+
+ /* protecting freed */
+ spinlock_t freed_lock;
+
+ /* Scheduler entity for page table updates */
+ struct amd_sched_entity entity;
+
+ /* client id */
+ u64 client_id;
+};
+
+struct amdgpu_vm_id {
+ struct list_head list;
+ struct dma_fence *first;
+ struct amdgpu_sync active;
+ struct dma_fence *last_flush;
+ atomic64_t owner;
+
+ uint64_t pd_gpu_addr;
+ /* last flushed PD/PT update */
+ struct dma_fence *flushed_updates;
+
+ uint32_t current_gpu_reset_count;
+
+ uint32_t gds_base;
+ uint32_t gds_size;
+ uint32_t gws_base;
+ uint32_t gws_size;
+ uint32_t oa_base;
+ uint32_t oa_size;
+};
+
+struct amdgpu_vm_manager {
+ /* Handling of VMIDs */
+ struct mutex lock;
+ unsigned num_ids;
+ struct list_head ids_lru;
+ struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
+
+ /* Handling of VM fences */
+ u64 fence_context;
+ unsigned seqno[AMDGPU_MAX_RINGS];
+
+ uint32_t max_pfn;
+ /* vram base address for page table entry */
+ u64 vram_base_offset;
+ /* is vm enabled? */
+ bool enabled;
+ /* vm pte handling */
+ const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
+ struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS];
+ unsigned vm_pte_num_rings;
+ atomic_t vm_pte_next_ring;
+ /* client id counter */
+ atomic64_t client_counter;
+};
+
+void amdgpu_vm_manager_init(struct amdgpu_device *adev);
+void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
+int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
+ struct list_head *validated,
+ struct amdgpu_bo_list_entry *entry);
+int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ int (*callback)(void *p, struct amdgpu_bo *bo),
+ void *param);
+void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm);
+int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+ struct amdgpu_sync *sync, struct dma_fence *fence,
+ struct amdgpu_job *job);
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
+void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
+int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm);
+int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm);
+int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_sync *sync);
+int amdgpu_vm_bo_update(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va,
+ bool clear);
+void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo);
+struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo);
+struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo);
+int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va,
+ uint64_t addr, uint64_t offset,
+ uint64_t size, uint32_t flags);
+int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va,
+ uint64_t addr);
+void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
new file mode 100644
index 000000000000..180eed7c8bca
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#include <drm/drmP.h>
+#include "amdgpu.h"
+
+struct amdgpu_vram_mgr {
+ struct drm_mm mm;
+ spinlock_t lock;
+};
+
+/**
+ * amdgpu_vram_mgr_init - init VRAM manager and DRM MM
+ *
+ * @man: TTM memory type manager
+ * @p_size: maximum size of VRAM
+ *
+ * Allocate and initialize the VRAM manager.
+ */
+static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
+ unsigned long p_size)
+{
+ struct amdgpu_vram_mgr *mgr;
+
+ mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return -ENOMEM;
+
+ drm_mm_init(&mgr->mm, 0, p_size);
+ spin_lock_init(&mgr->lock);
+ man->priv = mgr;
+ return 0;
+}
+
+/**
+ * amdgpu_vram_mgr_fini - free and destroy VRAM manager
+ *
+ * @man: TTM memory type manager
+ *
+ * Destroy and free the VRAM manager, returns -EBUSY if ranges are still
+ * allocated inside it.
+ */
+static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
+{
+ struct amdgpu_vram_mgr *mgr = man->priv;
+
+ spin_lock(&mgr->lock);
+ if (!drm_mm_clean(&mgr->mm)) {
+ spin_unlock(&mgr->lock);
+ return -EBUSY;
+ }
+
+ drm_mm_takedown(&mgr->mm);
+ spin_unlock(&mgr->lock);
+ kfree(mgr);
+ man->priv = NULL;
+ return 0;
+}
+
+/**
+ * amdgpu_vram_mgr_new - allocate new ranges
+ *
+ * @man: TTM memory type manager
+ * @tbo: TTM BO we need this range for
+ * @place: placement flags and restrictions
+ * @mem: the resulting mem object
+ *
+ * Allocate VRAM for the given BO.
+ */
+static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *tbo,
+ const struct ttm_place *place,
+ struct ttm_mem_reg *mem)
+{
+ struct amdgpu_bo *bo = container_of(tbo, struct amdgpu_bo, tbo);
+ struct amdgpu_vram_mgr *mgr = man->priv;
+ struct drm_mm *mm = &mgr->mm;
+ struct drm_mm_node *nodes;
+ enum drm_mm_search_flags sflags = DRM_MM_SEARCH_DEFAULT;
+ enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
+ unsigned long lpfn, num_nodes, pages_per_node, pages_left;
+ unsigned i;
+ int r;
+
+ lpfn = place->lpfn;
+ if (!lpfn)
+ lpfn = man->size;
+
+ if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS ||
+ amdgpu_vram_page_split == -1) {
+ pages_per_node = ~0ul;
+ num_nodes = 1;
+ } else {
+ pages_per_node = max((uint32_t)amdgpu_vram_page_split,
+ mem->page_alignment);
+ num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
+ }
+
+ nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL);
+ if (!nodes)
+ return -ENOMEM;
+
+ if (place->flags & TTM_PL_FLAG_TOPDOWN) {
+ sflags = DRM_MM_SEARCH_BELOW;
+ aflags = DRM_MM_CREATE_TOP;
+ }
+
+ pages_left = mem->num_pages;
+
+ spin_lock(&mgr->lock);
+ for (i = 0; i < num_nodes; ++i) {
+ unsigned long pages = min(pages_left, pages_per_node);
+ uint32_t alignment = mem->page_alignment;
+
+ if (pages == pages_per_node)
+ alignment = pages_per_node;
+ else
+ sflags |= DRM_MM_SEARCH_BEST;
+
+ r = drm_mm_insert_node_in_range_generic(mm, &nodes[i], pages,
+ alignment, 0,
+ place->fpfn, lpfn,
+ sflags, aflags);
+ if (unlikely(r))
+ goto error;
+
+ pages_left -= pages;
+ }
+ spin_unlock(&mgr->lock);
+
+ mem->start = num_nodes == 1 ? nodes[0].start : AMDGPU_BO_INVALID_OFFSET;
+ mem->mm_node = nodes;
+
+ return 0;
+
+error:
+ while (i--)
+ drm_mm_remove_node(&nodes[i]);
+ spin_unlock(&mgr->lock);
+
+ kfree(nodes);
+ return r == -ENOSPC ? 0 : r;
+}
+
+/**
+ * amdgpu_vram_mgr_del - free ranges
+ *
+ * @man: TTM memory type manager
+ * @tbo: TTM BO we need this range for
+ * @place: placement flags and restrictions
+ * @mem: TTM memory object
+ *
+ * Free the allocated VRAM again.
+ */
+static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ struct amdgpu_vram_mgr *mgr = man->priv;
+ struct drm_mm_node *nodes = mem->mm_node;
+ unsigned pages = mem->num_pages;
+
+ if (!mem->mm_node)
+ return;
+
+ spin_lock(&mgr->lock);
+ while (pages) {
+ pages -= nodes->size;
+ drm_mm_remove_node(nodes);
+ ++nodes;
+ }
+ spin_unlock(&mgr->lock);
+
+ kfree(mem->mm_node);
+ mem->mm_node = NULL;
+}
+
+/**
+ * amdgpu_vram_mgr_debug - dump VRAM table
+ *
+ * @man: TTM memory type manager
+ * @prefix: text prefix
+ *
+ * Dump the table content using printk.
+ */
+static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
+ const char *prefix)
+{
+ struct amdgpu_vram_mgr *mgr = man->priv;
+
+ spin_lock(&mgr->lock);
+ drm_mm_debug_table(&mgr->mm, prefix);
+ spin_unlock(&mgr->lock);
+}
+
+const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = {
+ amdgpu_vram_mgr_init,
+ amdgpu_vram_mgr_fini,
+ amdgpu_vram_mgr_new,
+ amdgpu_vram_mgr_del,
+ amdgpu_vram_mgr_debug
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index f7d236f95e74..8c9bc75a9c2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -31,6 +31,7 @@
#include "atom.h"
#include "atom-bits.h"
#include "atombios_encoders.h"
+#include "atombios_crtc.h"
#include "amdgpu_atombios.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 1d8c375a3561..e9b1964d4e61 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -887,9 +887,6 @@ static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
{
struct ci_power_info *pi = ci_get_pi(adev);
- if (pi->uvd_power_gated == gate)
- return;
-
pi->uvd_power_gated = gate;
ci_update_uvd_dpm(adev, gate);
@@ -960,6 +957,12 @@ static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
sclk = ps->performance_levels[0].sclk;
}
+ if (adev->pm.pm_display_cfg.min_core_set_clock > sclk)
+ sclk = adev->pm.pm_display_cfg.min_core_set_clock;
+
+ if (adev->pm.pm_display_cfg.min_mem_set_clock > mclk)
+ mclk = adev->pm.pm_display_cfg.min_mem_set_clock;
+
if (rps->vce_active) {
if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
@@ -2201,6 +2204,11 @@ static int ci_upload_firmware(struct amdgpu_device *adev)
struct ci_power_info *pi = ci_get_pi(adev);
int i, ret;
+ if (amdgpu_ci_is_smc_running(adev)) {
+ DRM_INFO("smc is running, no need to load smc firmware\n");
+ return 0;
+ }
+
for (i = 0; i < adev->usec_timeout; i++) {
if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK)
break;
@@ -4190,8 +4198,15 @@ static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
{
struct ci_power_info *pi = ci_get_pi(adev);
u32 tmp;
+ int ret = 0;
if (!gate) {
+ /* turn the clocks on when decoding */
+ ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_UNGATE);
+ if (ret)
+ return ret;
+
if (pi->caps_uvd_dpm ||
(adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
pi->smc_state_table.UvdBootLevel = 0;
@@ -4203,9 +4218,17 @@ static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK;
tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT);
WREG32_SMC(ixDPM_TABLE_475, tmp);
+ ret = ci_enable_uvd_dpm(adev, true);
+ } else {
+ ret = ci_enable_uvd_dpm(adev, false);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_GATE);
}
- return ci_enable_uvd_dpm(adev, !gate);
+ return ret;
}
static u8 ci_get_vce_boot_level(struct amdgpu_device *adev)
@@ -4247,13 +4270,12 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev,
ret = ci_enable_vce_dpm(adev, true);
} else {
+ ret = ci_enable_vce_dpm(adev, false);
+ if (ret)
+ return ret;
/* turn the clocks off when not encoding */
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_GATE);
- if (ret)
- return ret;
-
- ret = ci_enable_vce_dpm(adev, false);
}
}
return ret;
@@ -5219,6 +5241,7 @@ static void ci_update_current_ps(struct amdgpu_device *adev,
pi->current_rps = *rps;
pi->current_ps = *new_ps;
pi->current_rps.ps_priv = &pi->current_ps;
+ adev->pm.dpm.current_ps = &pi->current_rps;
}
static void ci_update_requested_ps(struct amdgpu_device *adev,
@@ -5230,6 +5253,7 @@ static void ci_update_requested_ps(struct amdgpu_device *adev,
pi->requested_rps = *rps;
pi->requested_ps = *new_ps;
pi->requested_rps.ps_priv = &pi->requested_ps;
+ adev->pm.dpm.requested_ps = &pi->requested_rps;
}
static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
@@ -5267,8 +5291,6 @@ static int ci_dpm_enable(struct amdgpu_device *adev)
struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
int ret;
- if (amdgpu_ci_is_smc_running(adev))
- return -EINVAL;
if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
ci_enable_voltage_control(adev);
ret = ci_construct_voltage_tables(adev);
@@ -5689,7 +5711,7 @@ static int ci_parse_power_table(struct amdgpu_device *adev)
adev->pm.dpm.num_ps = state_array->ucNumEntries;
/* fill in the vce power states */
- for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
+ for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
u32 sclk, mclk;
clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
clock_info = (union pplib_clock_info *)
@@ -6094,6 +6116,56 @@ static void ci_dpm_print_power_state(struct amdgpu_device *adev,
amdgpu_dpm_print_ps_status(adev, rps);
}
+static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1,
+ const struct ci_pl *ci_cpl2)
+{
+ return ((ci_cpl1->mclk == ci_cpl2->mclk) &&
+ (ci_cpl1->sclk == ci_cpl2->sclk) &&
+ (ci_cpl1->pcie_gen == ci_cpl2->pcie_gen) &&
+ (ci_cpl1->pcie_lane == ci_cpl2->pcie_lane));
+}
+
+static int ci_check_state_equal(struct amdgpu_device *adev,
+ struct amdgpu_ps *cps,
+ struct amdgpu_ps *rps,
+ bool *equal)
+{
+ struct ci_ps *ci_cps;
+ struct ci_ps *ci_rps;
+ int i;
+
+ if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
+ return -EINVAL;
+
+ ci_cps = ci_get_ps(cps);
+ ci_rps = ci_get_ps(rps);
+
+ if (ci_cps == NULL) {
+ *equal = false;
+ return 0;
+ }
+
+ if (ci_cps->performance_level_count != ci_rps->performance_level_count) {
+
+ *equal = false;
+ return 0;
+ }
+
+ for (i = 0; i < ci_cps->performance_level_count; i++) {
+ if (!ci_are_power_levels_equal(&(ci_cps->performance_levels[i]),
+ &(ci_rps->performance_levels[i]))) {
+ *equal = false;
+ return 0;
+ }
+ }
+
+ /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
+ *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
+ *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
+
+ return 0;
+}
+
static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
{
struct ci_power_info *pi = ci_get_pi(adev);
@@ -6287,12 +6359,19 @@ static int ci_dpm_suspend(void *handle)
if (adev->pm.dpm_enabled) {
mutex_lock(&adev->pm.mutex);
- /* disable dpm */
- ci_dpm_disable(adev);
- /* reset the power state */
- adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+ amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
+ AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
+ amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
+ AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
+ adev->pm.dpm.last_user_state = adev->pm.dpm.user_state;
+ adev->pm.dpm.last_state = adev->pm.dpm.state;
+ adev->pm.dpm.user_state = POWER_STATE_TYPE_INTERNAL_BOOT;
+ adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_BOOT;
mutex_unlock(&adev->pm.mutex);
+ amdgpu_pm_compute_clocks(adev);
+
}
+
return 0;
}
@@ -6310,6 +6389,8 @@ static int ci_dpm_resume(void *handle)
adev->pm.dpm_enabled = false;
else
adev->pm.dpm_enabled = true;
+ adev->pm.dpm.user_state = adev->pm.dpm.last_user_state;
+ adev->pm.dpm.state = adev->pm.dpm.last_state;
mutex_unlock(&adev->pm.mutex);
if (adev->pm.dpm_enabled)
amdgpu_pm_compute_clocks(adev);
@@ -6644,6 +6725,8 @@ static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
.set_sclk_od = ci_dpm_set_sclk_od,
.get_mclk_od = ci_dpm_get_mclk_od,
.set_mclk_od = ci_dpm_set_mclk_od,
+ .check_state_equal = ci_check_state_equal,
+ .get_vce_clock_state = amdgpu_get_vce_clock_state,
};
static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
@@ -6662,3 +6745,12 @@ static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev)
adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs;
}
+
+const struct amdgpu_ip_block_version ci_dpm_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &ci_dpm_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index a845b6a93b79..302df85893ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1189,18 +1189,6 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
return r;
}
-static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
-{
- u32 tmp = RREG32(mmBIOS_SCRATCH_3);
-
- if (hung)
- tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
- else
- tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
-
- WREG32(mmBIOS_SCRATCH_3, tmp);
-}
-
/**
* cik_asic_reset - soft reset GPU
*
@@ -1213,11 +1201,12 @@ static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hu
static int cik_asic_reset(struct amdgpu_device *adev)
{
int r;
- cik_set_bios_scratch_engine_hung(adev, true);
+
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
r = cik_gpu_pci_config_reset(adev);
- cik_set_bios_scratch_engine_hung(adev, false);
+ amdgpu_atombios_scratch_regs_engine_hung(adev, false);
return r;
}
@@ -1641,745 +1630,6 @@ static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
}
-static const struct amdgpu_ip_block_version bonaire_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 2,
- .rev = 0,
- .funcs = &dce_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version bonaire_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 2,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version hawaii_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 5,
- .rev = 0,
- .funcs = &dce_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 3,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version hawaii_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 5,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 3,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version kabini_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 3,
- .rev = 0,
- .funcs = &dce_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version kabini_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 3,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version mullins_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 3,
- .rev = 0,
- .funcs = &dce_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version mullins_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 3,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version kaveri_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 1,
- .rev = 0,
- .funcs = &dce_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version kaveri_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 1,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-int cik_set_ip_blocks(struct amdgpu_device *adev)
-{
- if (adev->enable_virtual_display) {
- switch (adev->asic_type) {
- case CHIP_BONAIRE:
- adev->ip_blocks = bonaire_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks_vd);
- break;
- case CHIP_HAWAII:
- adev->ip_blocks = hawaii_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks_vd);
- break;
- case CHIP_KAVERI:
- adev->ip_blocks = kaveri_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks_vd);
- break;
- case CHIP_KABINI:
- adev->ip_blocks = kabini_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks_vd);
- break;
- case CHIP_MULLINS:
- adev->ip_blocks = mullins_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks_vd);
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
- }
- } else {
- switch (adev->asic_type) {
- case CHIP_BONAIRE:
- adev->ip_blocks = bonaire_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks);
- break;
- case CHIP_HAWAII:
- adev->ip_blocks = hawaii_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks);
- break;
- case CHIP_KAVERI:
- adev->ip_blocks = kaveri_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks);
- break;
- case CHIP_KABINI:
- adev->ip_blocks = kabini_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks);
- break;
- case CHIP_MULLINS:
- adev->ip_blocks = mullins_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks);
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
static const struct amdgpu_asic_funcs cik_asic_funcs =
{
.read_disabled_bios = &cik_read_disabled_bios,
@@ -2612,7 +1862,7 @@ static int cik_common_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs cik_common_ip_funcs = {
+static const struct amd_ip_funcs cik_common_ip_funcs = {
.name = "cik_common",
.early_init = cik_common_early_init,
.late_init = NULL,
@@ -2628,3 +1878,79 @@ const struct amd_ip_funcs cik_common_ip_funcs = {
.set_clockgating_state = cik_common_set_clockgating_state,
.set_powergating_state = cik_common_set_powergating_state,
};
+
+static const struct amdgpu_ip_block_version cik_common_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_common_ip_funcs,
+};
+
+int cik_set_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ amdgpu_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v8_2_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
+ amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ break;
+ case CHIP_HAWAII:
+ amdgpu_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v8_5_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v7_3_ip_block);
+ amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ break;
+ case CHIP_KAVERI:
+ amdgpu_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v8_1_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v7_1_ip_block);
+ amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ break;
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ amdgpu_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v8_3_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
+ amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h
index 5ebd2d7a0327..c4989f51ecef 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik.h
@@ -24,8 +24,6 @@
#ifndef __CIK_H__
#define __CIK_H__
-extern const struct amd_ip_funcs cik_common_ip_funcs;
-
void cik_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int cik_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index be3d6f79a864..319b32cdea84 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -413,7 +413,7 @@ static int cik_ih_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs cik_ih_ip_funcs = {
+static const struct amd_ip_funcs cik_ih_ip_funcs = {
.name = "cik_ih",
.early_init = cik_ih_early_init,
.late_init = NULL,
@@ -441,3 +441,12 @@ static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev)
if (adev->irq.ih_funcs == NULL)
adev->irq.ih_funcs = &cik_ih_funcs;
}
+
+const struct amdgpu_ip_block_version cik_ih_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.h b/drivers/gpu/drm/amd/amdgpu/cik_ih.h
index 6b0f375ec244..1d9ddee2868e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.h
@@ -24,6 +24,6 @@
#ifndef __CIK_IH_H__
#define __CIK_IH_H__
-extern const struct amd_ip_funcs cik_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version cik_ih_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index cb952acc7133..4c34dbc7a254 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -206,10 +206,10 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
for (i = 0; i < count; i++)
if (sdma && sdma->burst_nop && (i == 0))
- amdgpu_ring_write(ring, ring->nop |
+ amdgpu_ring_write(ring, ring->funcs->nop |
SDMA_NOP_COUNT(count - 1));
else
- amdgpu_ring_write(ring, ring->nop);
+ amdgpu_ring_write(ring, ring->funcs->nop);
}
/**
@@ -622,7 +622,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
unsigned index;
u32 tmp = 0;
u64 gpu_addr;
@@ -655,7 +655,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err1;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
@@ -675,7 +675,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
err1:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err0:
amdgpu_wb_free(adev, index);
return r;
@@ -848,22 +848,6 @@ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
}
-static unsigned cik_sdma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 7 + 4; /* cik_sdma_ring_emit_ib */
-}
-
-static unsigned cik_sdma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 6 + /* cik_sdma_ring_emit_hdp_flush */
- 3 + /* cik_sdma_ring_emit_hdp_invalidate */
- 6 + /* cik_sdma_ring_emit_pipeline_sync */
- 12 + /* cik_sdma_ring_emit_vm_flush */
- 9 + 9 + 9; /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
-}
-
static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
bool enable)
{
@@ -959,11 +943,10 @@ static int cik_sdma_sw_init(void *handle)
ring->ring_obj = NULL;
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
- SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
- AMDGPU_RING_TYPE_SDMA);
+ AMDGPU_SDMA_IRQ_TRAP0 :
+ AMDGPU_SDMA_IRQ_TRAP1);
if (r)
return r;
}
@@ -1207,7 +1190,7 @@ static int cik_sdma_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs cik_sdma_ip_funcs = {
+static const struct amd_ip_funcs cik_sdma_ip_funcs = {
.name = "cik_sdma",
.early_init = cik_sdma_early_init,
.late_init = NULL,
@@ -1225,10 +1208,19 @@ const struct amd_ip_funcs cik_sdma_ip_funcs = {
};
static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0),
.get_rptr = cik_sdma_ring_get_rptr,
.get_wptr = cik_sdma_ring_get_wptr,
.set_wptr = cik_sdma_ring_set_wptr,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 6 + /* cik_sdma_ring_emit_hdp_flush */
+ 3 + /* cik_sdma_ring_emit_hdp_invalidate */
+ 6 + /* cik_sdma_ring_emit_pipeline_sync */
+ 12 + /* cik_sdma_ring_emit_vm_flush */
+ 9 + 9 + 9, /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 4, /* cik_sdma_ring_emit_ib */
.emit_ib = cik_sdma_ring_emit_ib,
.emit_fence = cik_sdma_ring_emit_fence,
.emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync,
@@ -1239,8 +1231,6 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
.test_ib = cik_sdma_ring_test_ib,
.insert_nop = cik_sdma_ring_insert_nop,
.pad_ib = cik_sdma_ring_pad_ib,
- .get_emit_ib_size = cik_sdma_ring_get_emit_ib_size,
- .get_dma_frame_size = cik_sdma_ring_get_dma_frame_size,
};
static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
@@ -1352,3 +1342,12 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
}
}
+
+const struct amdgpu_ip_block_version cik_sdma_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_sdma_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.h b/drivers/gpu/drm/amd/amdgpu/cik_sdma.h
index 027727c677b8..a4a8fe01410b 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.h
@@ -24,6 +24,6 @@
#ifndef __CIK_SDMA_H__
#define __CIK_SDMA_H__
-extern const struct amd_ip_funcs cik_sdma_ip_funcs;
+extern const struct amdgpu_ip_block_version cik_sdma_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index 8659852aea9e..6cbd913fd12e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -43,6 +43,14 @@
#define CRTC4_REGISTER_OFFSET (0x477c - 0x1b7c)
#define CRTC5_REGISTER_OFFSET (0x4a7c - 0x1b7c)
+/* hpd instance offsets */
+#define HPD0_REGISTER_OFFSET (0x1807 - 0x1807)
+#define HPD1_REGISTER_OFFSET (0x180a - 0x1807)
+#define HPD2_REGISTER_OFFSET (0x180d - 0x1807)
+#define HPD3_REGISTER_OFFSET (0x1810 - 0x1807)
+#define HPD4_REGISTER_OFFSET (0x1813 - 0x1807)
+#define HPD5_REGISTER_OFFSET (0x1816 - 0x1807)
+
#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 3c082e143730..352b5fad5a06 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -1250,7 +1250,8 @@ static void cz_update_current_ps(struct amdgpu_device *adev,
pi->current_ps = *ps;
pi->current_rps = *rps;
- pi->current_rps.ps_priv = ps;
+ pi->current_rps.ps_priv = &pi->current_ps;
+ adev->pm.dpm.current_ps = &pi->current_rps;
}
@@ -1262,7 +1263,8 @@ static void cz_update_requested_ps(struct amdgpu_device *adev,
pi->requested_ps = *ps;
pi->requested_rps = *rps;
- pi->requested_rps.ps_priv = ps;
+ pi->requested_rps.ps_priv = &pi->requested_ps;
+ adev->pm.dpm.requested_ps = &pi->requested_rps;
}
@@ -2257,6 +2259,18 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
}
}
+static int cz_check_state_equal(struct amdgpu_device *adev,
+ struct amdgpu_ps *cps,
+ struct amdgpu_ps *rps,
+ bool *equal)
+{
+ if (equal == NULL)
+ return -EINVAL;
+
+ *equal = false;
+ return 0;
+}
+
const struct amd_ip_funcs cz_dpm_ip_funcs = {
.name = "cz_dpm",
.early_init = cz_dpm_early_init,
@@ -2289,6 +2303,7 @@ static const struct amdgpu_dpm_funcs cz_dpm_funcs = {
.vblank_too_short = NULL,
.powergate_uvd = cz_dpm_powergate_uvd,
.powergate_vce = cz_dpm_powergate_vce,
+ .check_state_equal = cz_check_state_equal,
};
static void cz_dpm_set_funcs(struct amdgpu_device *adev)
@@ -2296,3 +2311,12 @@ static void cz_dpm_set_funcs(struct amdgpu_device *adev)
if (NULL == adev->pm.funcs)
adev->pm.funcs = &cz_dpm_funcs;
}
+
+const struct amdgpu_ip_block_version cz_dpm_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cz_dpm_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index 3d23a70b6432..fe7cbb24da7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -394,7 +394,7 @@ static int cz_ih_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs cz_ih_ip_funcs = {
+static const struct amd_ip_funcs cz_ih_ip_funcs = {
.name = "cz_ih",
.early_init = cz_ih_early_init,
.late_init = NULL,
@@ -423,3 +423,11 @@ static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev)
adev->irq.ih_funcs = &cz_ih_funcs;
}
+const struct amdgpu_ip_block_version cz_ih_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cz_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.h b/drivers/gpu/drm/amd/amdgpu/cz_ih.h
index fc4057a2ecb9..14be7753221b 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.h
@@ -24,6 +24,6 @@
#ifndef __CZ_IH_H__
#define __CZ_IH_H__
-extern const struct amd_ip_funcs cz_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version cz_ih_ip_block;
#endif /* __CZ_IH_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 4108c686aa7c..199d3f7235d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -31,6 +31,7 @@
#include "atombios_encoders.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
+#include "dce_v10_0.h"
#include "dce/dce_10_0_d.h"
#include "dce/dce_10_0_sh_mask.h"
@@ -330,33 +331,12 @@ static int dce_v10_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
static bool dce_v10_0_hpd_sense(struct amdgpu_device *adev,
enum amdgpu_hpd_id hpd)
{
- int idx;
bool connected = false;
- switch (hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (hpd >= adev->mode_info.num_hpd)
return connected;
- }
- if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) &
+ if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
connected = true;
@@ -376,37 +356,16 @@ static void dce_v10_0_hpd_set_polarity(struct amdgpu_device *adev,
{
u32 tmp;
bool connected = dce_v10_0_hpd_sense(adev, hpd);
- int idx;
- switch (hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (hpd >= adev->mode_info.num_hpd)
return;
- }
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
if (connected)
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
else
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
}
/**
@@ -422,33 +381,12 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
u32 tmp;
- int idx;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
continue;
- }
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@@ -457,24 +395,24 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
* also avoid interrupt storms during dpms.
*/
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
continue;
}
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
- tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
DC_HPD_CONNECT_INT_DELAY,
AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
DC_HPD_DISCONNECT_INT_DELAY,
AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
- WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq,
@@ -495,37 +433,16 @@ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
u32 tmp;
- int idx;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
continue;
- }
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
amdgpu_irq_put(adev, &adev->hpd_irq,
amdgpu_connector->hpd.hpd);
@@ -3554,7 +3471,7 @@ static int dce_v10_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs dce_v10_0_ip_funcs = {
+static const struct amd_ip_funcs dce_v10_0_ip_funcs = {
.name = "dce_v10_0",
.early_init = dce_v10_0_early_init,
.late_init = NULL,
@@ -3885,3 +3802,21 @@ static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev)
adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs;
}
+
+const struct amdgpu_ip_block_version dce_v10_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 10,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_v10_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v10_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 10,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &dce_v10_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
index e3dc04d293e4..7a0747789f1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
@@ -24,7 +24,9 @@
#ifndef __DCE_V10_0_H__
#define __DCE_V10_0_H__
-extern const struct amd_ip_funcs dce_v10_0_ip_funcs;
+
+extern const struct amdgpu_ip_block_version dce_v10_0_ip_block;
+extern const struct amdgpu_ip_block_version dce_v10_1_ip_block;
void dce_v10_0_disable_dce(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index f264b8f17ad1..ecd000e35981 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -31,6 +31,7 @@
#include "atombios_encoders.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
+#include "dce_v11_0.h"
#include "dce/dce_11_0_d.h"
#include "dce/dce_11_0_sh_mask.h"
@@ -346,33 +347,12 @@ static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev,
enum amdgpu_hpd_id hpd)
{
- int idx;
bool connected = false;
- switch (hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (hpd >= adev->mode_info.num_hpd)
return connected;
- }
- if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) &
+ if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
connected = true;
@@ -392,37 +372,16 @@ static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev,
{
u32 tmp;
bool connected = dce_v11_0_hpd_sense(adev, hpd);
- int idx;
- switch (hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (hpd >= adev->mode_info.num_hpd)
return;
- }
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
if (connected)
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
else
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
}
/**
@@ -438,33 +397,12 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
u32 tmp;
- int idx;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
continue;
- }
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@@ -473,24 +411,24 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
* also avoid interrupt storms during dpms.
*/
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
continue;
}
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
- tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
DC_HPD_CONNECT_INT_DELAY,
AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
DC_HPD_DISCONNECT_INT_DELAY,
AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
- WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
@@ -510,37 +448,16 @@ static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
u32 tmp;
- int idx;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
continue;
- }
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
@@ -3611,7 +3528,7 @@ static int dce_v11_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs dce_v11_0_ip_funcs = {
+static const struct amd_ip_funcs dce_v11_0_ip_funcs = {
.name = "dce_v11_0",
.early_init = dce_v11_0_early_init,
.late_init = NULL,
@@ -3941,3 +3858,21 @@ static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev)
adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs;
}
+
+const struct amdgpu_ip_block_version dce_v11_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 11,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_v11_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v11_2_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 11,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &dce_v11_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
index 1f58a65ba2ef..0d878ca3acba 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
@@ -24,7 +24,8 @@
#ifndef __DCE_V11_0_H__
#define __DCE_V11_0_H__
-extern const struct amd_ip_funcs dce_v11_0_ip_funcs;
+extern const struct amdgpu_ip_block_version dce_v11_0_ip_block;
+extern const struct amdgpu_ip_block_version dce_v11_2_ip_block;
void dce_v11_0_disable_dce(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index b948d6cb1399..44547f951d92 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -46,6 +46,16 @@ static const u32 crtc_offsets[6] =
SI_CRTC5_REGISTER_OFFSET
};
+static const u32 hpd_offsets[] =
+{
+ DC_HPD1_INT_STATUS - DC_HPD1_INT_STATUS,
+ DC_HPD2_INT_STATUS - DC_HPD1_INT_STATUS,
+ DC_HPD3_INT_STATUS - DC_HPD1_INT_STATUS,
+ DC_HPD4_INT_STATUS - DC_HPD1_INT_STATUS,
+ DC_HPD5_INT_STATUS - DC_HPD1_INT_STATUS,
+ DC_HPD6_INT_STATUS - DC_HPD1_INT_STATUS,
+};
+
static const uint32_t dig_offsets[] = {
SI_CRTC0_REGISTER_OFFSET,
SI_CRTC1_REGISTER_OFFSET,
@@ -94,15 +104,6 @@ static const struct {
.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
} };
-static const uint32_t hpd_int_control_offsets[6] = {
- DC_HPD1_INT_CONTROL,
- DC_HPD2_INT_CONTROL,
- DC_HPD3_INT_CONTROL,
- DC_HPD4_INT_CONTROL,
- DC_HPD5_INT_CONTROL,
- DC_HPD6_INT_CONTROL,
-};
-
static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
u32 block_offset, u32 reg)
{
@@ -257,34 +258,11 @@ static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
{
bool connected = false;
- switch (hpd) {
- case AMDGPU_HPD_1:
- if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
- connected = true;
- break;
- case AMDGPU_HPD_2:
- if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
- connected = true;
- break;
- case AMDGPU_HPD_3:
- if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
- connected = true;
- break;
- case AMDGPU_HPD_4:
- if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
- connected = true;
- break;
- case AMDGPU_HPD_5:
- if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
- connected = true;
- break;
- case AMDGPU_HPD_6:
- if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
- connected = true;
- break;
- default:
- break;
- }
+ if (hpd >= adev->mode_info.num_hpd)
+ return connected;
+
+ if (RREG32(DC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPDx_SENSE)
+ connected = true;
return connected;
}
@@ -303,58 +281,15 @@ static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
u32 tmp;
bool connected = dce_v6_0_hpd_sense(adev, hpd);
- switch (hpd) {
- case AMDGPU_HPD_1:
- tmp = RREG32(DC_HPD1_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPDx_INT_POLARITY;
- else
- tmp |= DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD1_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_2:
- tmp = RREG32(DC_HPD2_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPDx_INT_POLARITY;
- else
- tmp |= DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD2_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_3:
- tmp = RREG32(DC_HPD3_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPDx_INT_POLARITY;
- else
- tmp |= DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD3_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_4:
- tmp = RREG32(DC_HPD4_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPDx_INT_POLARITY;
- else
- tmp |= DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD4_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_5:
- tmp = RREG32(DC_HPD5_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPDx_INT_POLARITY;
- else
- tmp |= DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD5_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_6:
- tmp = RREG32(DC_HPD6_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPDx_INT_POLARITY;
- else
- tmp |= DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD6_INT_CONTROL, tmp);
- break;
- default:
- break;
- }
+ if (hpd >= adev->mode_info.num_hpd)
+ return;
+
+ tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
}
/**
@@ -369,34 +304,17 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
- u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
- DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
+ u32 tmp;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- WREG32(DC_HPD1_CONTROL, tmp);
- break;
- case AMDGPU_HPD_2:
- WREG32(DC_HPD2_CONTROL, tmp);
- break;
- case AMDGPU_HPD_3:
- WREG32(DC_HPD3_CONTROL, tmp);
- break;
- case AMDGPU_HPD_4:
- WREG32(DC_HPD4_CONTROL, tmp);
- break;
- case AMDGPU_HPD_5:
- WREG32(DC_HPD5_CONTROL, tmp);
- break;
- case AMDGPU_HPD_6:
- WREG32(DC_HPD6_CONTROL, tmp);
- break;
- default:
- break;
- }
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
+ continue;
+
+ tmp = RREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+ tmp |= DC_HPDx_EN;
+ WREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@@ -405,34 +323,9 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
* also avoid interrupt storms during dpms.
*/
- u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
-
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL;
- break;
- case AMDGPU_HPD_2:
- dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL;
- break;
- case AMDGPU_HPD_3:
- dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL;
- break;
- case AMDGPU_HPD_4:
- dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL;
- break;
- case AMDGPU_HPD_5:
- dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL;
- break;
- case AMDGPU_HPD_6:
- dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL;
- break;
- default:
- continue;
- }
-
- dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
- dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
- WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+ tmp &= ~DC_HPDx_INT_EN;
+ WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
continue;
}
@@ -454,32 +347,18 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ u32 tmp;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- WREG32(DC_HPD1_CONTROL, 0);
- break;
- case AMDGPU_HPD_2:
- WREG32(DC_HPD2_CONTROL, 0);
- break;
- case AMDGPU_HPD_3:
- WREG32(DC_HPD3_CONTROL, 0);
- break;
- case AMDGPU_HPD_4:
- WREG32(DC_HPD4_CONTROL, 0);
- break;
- case AMDGPU_HPD_5:
- WREG32(DC_HPD5_CONTROL, 0);
- break;
- case AMDGPU_HPD_6:
- WREG32(DC_HPD6_CONTROL, 0);
- break;
- default:
- break;
- }
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
+ continue;
+
+ tmp = RREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+ tmp &= ~DC_HPDx_EN;
+ WREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
+
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
}
@@ -611,12 +490,55 @@ static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
bool render)
{
- if (!render)
+ if (!render)
WREG32(R_000300_VGA_RENDER_CONTROL,
RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
}
+static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
+{
+ int num_crtc = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ num_crtc = 6;
+ break;
+ case CHIP_OLAND:
+ num_crtc = 2;
+ break;
+ default:
+ num_crtc = 0;
+ }
+ return num_crtc;
+}
+
+void dce_v6_0_disable_dce(struct amdgpu_device *adev)
+{
+ /*Disable VGA render and enabled crtc, if has DCE engine*/
+ if (amdgpu_atombios_has_dce_engine_info(adev)) {
+ u32 tmp;
+ int crtc_enabled, i;
+
+ dce_v6_0_set_vga_render_state(adev, false);
+
+ /*Disable crtc*/
+ for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) {
+ crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) &
+ EVERGREEN_CRTC_MASTER_EN;
+ if (crtc_enabled) {
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+ tmp &= ~EVERGREEN_CRTC_MASTER_EN;
+ WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ }
+ }
+ }
+}
+
static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
{
@@ -2338,21 +2260,20 @@ static int dce_v6_0_early_init(void *handle)
dce_v6_0_set_display_funcs(adev);
dce_v6_0_set_irq_funcs(adev);
+ adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
+
switch (adev->asic_type) {
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
- adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
break;
case CHIP_OLAND:
- adev->mode_info.num_crtc = 2;
adev->mode_info.num_hpd = 2;
adev->mode_info.num_dig = 2;
break;
default:
- /* FIXME: not supported yet */
return -EINVAL;
}
@@ -2588,42 +2509,23 @@ static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
+ u32 dc_hpd_int_cntl;
- switch (type) {
- case AMDGPU_HPD_1:
- dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL;
- break;
- case AMDGPU_HPD_2:
- dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL;
- break;
- case AMDGPU_HPD_3:
- dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL;
- break;
- case AMDGPU_HPD_4:
- dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL;
- break;
- case AMDGPU_HPD_5:
- dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL;
- break;
- case AMDGPU_HPD_6:
- dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL;
- break;
- default:
+ if (type >= adev->mode_info.num_hpd) {
DRM_DEBUG("invalid hdp %d\n", type);
return 0;
}
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
- dc_hpd_int_cntl &= ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
- WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ dc_hpd_int_cntl = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type]);
+ dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
+ WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
- dc_hpd_int_cntl |= (DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
- WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ dc_hpd_int_cntl = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type]);
+ dc_hpd_int_cntl |= DC_HPDx_INT_EN;
+ WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
break;
default:
break;
@@ -2796,7 +2698,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- uint32_t disp_int, mask, int_control, tmp;
+ uint32_t disp_int, mask, tmp;
unsigned hpd;
if (entry->src_data >= adev->mode_info.num_hpd) {
@@ -2807,12 +2709,11 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
hpd = entry->src_data;
disp_int = RREG32(interrupt_status_offsets[hpd].reg);
mask = interrupt_status_offsets[hpd].hpd;
- int_control = hpd_int_control_offsets[hpd];
if (disp_int & mask) {
- tmp = RREG32(int_control);
+ tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
- WREG32(int_control, tmp);
+ WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
schedule_work(&adev->hotplug_work);
DRM_INFO("IH: HPD%d\n", hpd + 1);
}
@@ -2833,7 +2734,7 @@ static int dce_v6_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs dce_v6_0_ip_funcs = {
+static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
.name = "dce_v6_0",
.early_init = dce_v6_0_early_init,
.late_init = NULL,
@@ -3174,3 +3075,21 @@ static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
}
+
+const struct amdgpu_ip_block_version dce_v6_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_v6_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v6_4_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 6,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &dce_v6_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
index 6a5528105bb6..7b546b596de1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
@@ -24,6 +24,9 @@
#ifndef __DCE_V6_0_H__
#define __DCE_V6_0_H__
-extern const struct amd_ip_funcs dce_v6_0_ip_funcs;
+extern const struct amdgpu_ip_block_version dce_v6_0_ip_block;
+extern const struct amdgpu_ip_block_version dce_v6_4_ip_block;
+
+void dce_v6_0_disable_dce(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 5966166ec94c..979aedf4b74d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -31,6 +31,7 @@
#include "atombios_encoders.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
+#include "dce_v8_0.h"
#include "dce/dce_8_0_d.h"
#include "dce/dce_8_0_sh_mask.h"
@@ -56,6 +57,16 @@ static const u32 crtc_offsets[6] =
CRTC5_REGISTER_OFFSET
};
+static const u32 hpd_offsets[] =
+{
+ HPD0_REGISTER_OFFSET,
+ HPD1_REGISTER_OFFSET,
+ HPD2_REGISTER_OFFSET,
+ HPD3_REGISTER_OFFSET,
+ HPD4_REGISTER_OFFSET,
+ HPD5_REGISTER_OFFSET
+};
+
static const uint32_t dig_offsets[] = {
CRTC0_REGISTER_OFFSET,
CRTC1_REGISTER_OFFSET,
@@ -104,15 +115,6 @@ static const struct {
.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
} };
-static const uint32_t hpd_int_control_offsets[6] = {
- mmDC_HPD1_INT_CONTROL,
- mmDC_HPD2_INT_CONTROL,
- mmDC_HPD3_INT_CONTROL,
- mmDC_HPD4_INT_CONTROL,
- mmDC_HPD5_INT_CONTROL,
- mmDC_HPD6_INT_CONTROL,
-};
-
static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev,
u32 block_offset, u32 reg)
{
@@ -278,34 +280,12 @@ static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev,
{
bool connected = false;
- switch (hpd) {
- case AMDGPU_HPD_1:
- if (RREG32(mmDC_HPD1_INT_STATUS) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
- connected = true;
- break;
- case AMDGPU_HPD_2:
- if (RREG32(mmDC_HPD2_INT_STATUS) & DC_HPD2_INT_STATUS__DC_HPD2_SENSE_MASK)
- connected = true;
- break;
- case AMDGPU_HPD_3:
- if (RREG32(mmDC_HPD3_INT_STATUS) & DC_HPD3_INT_STATUS__DC_HPD3_SENSE_MASK)
- connected = true;
- break;
- case AMDGPU_HPD_4:
- if (RREG32(mmDC_HPD4_INT_STATUS) & DC_HPD4_INT_STATUS__DC_HPD4_SENSE_MASK)
- connected = true;
- break;
- case AMDGPU_HPD_5:
- if (RREG32(mmDC_HPD5_INT_STATUS) & DC_HPD5_INT_STATUS__DC_HPD5_SENSE_MASK)
- connected = true;
- break;
- case AMDGPU_HPD_6:
- if (RREG32(mmDC_HPD6_INT_STATUS) & DC_HPD6_INT_STATUS__DC_HPD6_SENSE_MASK)
- connected = true;
- break;
- default:
- break;
- }
+ if (hpd >= adev->mode_info.num_hpd)
+ return connected;
+
+ if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
+ DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
+ connected = true;
return connected;
}
@@ -324,58 +304,15 @@ static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
u32 tmp;
bool connected = dce_v8_0_hpd_sense(adev, hpd);
- switch (hpd) {
- case AMDGPU_HPD_1:
- tmp = RREG32(mmDC_HPD1_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
- else
- tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
- WREG32(mmDC_HPD1_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_2:
- tmp = RREG32(mmDC_HPD2_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK;
- else
- tmp |= DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK;
- WREG32(mmDC_HPD2_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_3:
- tmp = RREG32(mmDC_HPD3_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK;
- else
- tmp |= DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK;
- WREG32(mmDC_HPD3_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_4:
- tmp = RREG32(mmDC_HPD4_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK;
- else
- tmp |= DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK;
- WREG32(mmDC_HPD4_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_5:
- tmp = RREG32(mmDC_HPD5_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK;
- else
- tmp |= DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK;
- WREG32(mmDC_HPD5_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_6:
- tmp = RREG32(mmDC_HPD6_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK;
- else
- tmp |= DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK;
- WREG32(mmDC_HPD6_INT_CONTROL, tmp);
- break;
- default:
- break;
- }
+ if (hpd >= adev->mode_info.num_hpd)
+ return;
+
+ tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+ if (connected)
+ tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
+ else
+ tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
}
/**
@@ -390,35 +327,17 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
- u32 tmp = (0x9c4 << DC_HPD1_CONTROL__DC_HPD1_CONNECTION_TIMER__SHIFT) |
- (0xfa << DC_HPD1_CONTROL__DC_HPD1_RX_INT_TIMER__SHIFT) |
- DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
+ u32 tmp;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- WREG32(mmDC_HPD1_CONTROL, tmp);
- break;
- case AMDGPU_HPD_2:
- WREG32(mmDC_HPD2_CONTROL, tmp);
- break;
- case AMDGPU_HPD_3:
- WREG32(mmDC_HPD3_CONTROL, tmp);
- break;
- case AMDGPU_HPD_4:
- WREG32(mmDC_HPD4_CONTROL, tmp);
- break;
- case AMDGPU_HPD_5:
- WREG32(mmDC_HPD5_CONTROL, tmp);
- break;
- case AMDGPU_HPD_6:
- WREG32(mmDC_HPD6_CONTROL, tmp);
- break;
- default:
- break;
- }
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
+ continue;
+
+ tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+ tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
+ WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@@ -427,34 +346,9 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
* also avoid interrupt storms during dpms.
*/
- u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
-
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
- break;
- case AMDGPU_HPD_2:
- dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
- break;
- case AMDGPU_HPD_3:
- dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
- break;
- case AMDGPU_HPD_4:
- dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
- break;
- case AMDGPU_HPD_5:
- dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
- break;
- case AMDGPU_HPD_6:
- dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
- break;
- default:
- continue;
- }
-
- dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
- dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
- WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+ tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
continue;
}
@@ -475,32 +369,18 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ u32 tmp;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- WREG32(mmDC_HPD1_CONTROL, 0);
- break;
- case AMDGPU_HPD_2:
- WREG32(mmDC_HPD2_CONTROL, 0);
- break;
- case AMDGPU_HPD_3:
- WREG32(mmDC_HPD3_CONTROL, 0);
- break;
- case AMDGPU_HPD_4:
- WREG32(mmDC_HPD4_CONTROL, 0);
- break;
- case AMDGPU_HPD_5:
- WREG32(mmDC_HPD5_CONTROL, 0);
- break;
- case AMDGPU_HPD_6:
- WREG32(mmDC_HPD6_CONTROL, 0);
- break;
- default:
- break;
- }
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
+ continue;
+
+ tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+ tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
+ WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
+
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
}
@@ -3204,42 +3084,23 @@ static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
+ u32 dc_hpd_int_cntl;
- switch (type) {
- case AMDGPU_HPD_1:
- dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
- break;
- case AMDGPU_HPD_2:
- dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
- break;
- case AMDGPU_HPD_3:
- dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
- break;
- case AMDGPU_HPD_4:
- dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
- break;
- case AMDGPU_HPD_5:
- dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
- break;
- case AMDGPU_HPD_6:
- dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
- break;
- default:
+ if (type >= adev->mode_info.num_hpd) {
DRM_DEBUG("invalid hdp %d\n", type);
return 0;
}
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+ dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
- WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+ dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
- WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
break;
default:
break;
@@ -3412,7 +3273,7 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- uint32_t disp_int, mask, int_control, tmp;
+ uint32_t disp_int, mask, tmp;
unsigned hpd;
if (entry->src_data >= adev->mode_info.num_hpd) {
@@ -3423,12 +3284,11 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
hpd = entry->src_data;
disp_int = RREG32(interrupt_status_offsets[hpd].reg);
mask = interrupt_status_offsets[hpd].hpd;
- int_control = hpd_int_control_offsets[hpd];
if (disp_int & mask) {
- tmp = RREG32(int_control);
+ tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
- WREG32(int_control, tmp);
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
schedule_work(&adev->hotplug_work);
DRM_DEBUG("IH: HPD%d\n", hpd + 1);
}
@@ -3449,7 +3309,7 @@ static int dce_v8_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs dce_v8_0_ip_funcs = {
+static const struct amd_ip_funcs dce_v8_0_ip_funcs = {
.name = "dce_v8_0",
.early_init = dce_v8_0_early_init,
.late_init = NULL,
@@ -3779,3 +3639,48 @@ static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
}
+
+const struct amdgpu_ip_block_version dce_v8_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v8_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &dce_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v8_2_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &dce_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v8_3_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &dce_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v8_5_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 5,
+ .rev = 0,
+ .funcs = &dce_v8_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
index 7d0770c3a49b..13b802dd946a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
@@ -24,7 +24,11 @@
#ifndef __DCE_V8_0_H__
#define __DCE_V8_0_H__
-extern const struct amd_ip_funcs dce_v8_0_ip_funcs;
+extern const struct amdgpu_ip_block_version dce_v8_0_ip_block;
+extern const struct amdgpu_ip_block_version dce_v8_1_ip_block;
+extern const struct amdgpu_ip_block_version dce_v8_2_ip_block;
+extern const struct amdgpu_ip_block_version dce_v8_3_ip_block;
+extern const struct amdgpu_ip_block_version dce_v8_5_ip_block;
void dce_v8_0_disable_dce(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index c2bd9f045532..cc85676a68d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -27,6 +27,9 @@
#include "atom.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
+#ifdef CONFIG_DRM_AMDGPU_SI
+#include "dce_v6_0.h"
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
#include "dce_v8_0.h"
#endif
@@ -34,11 +37,13 @@
#include "dce_v11_0.h"
#include "dce_virtual.h"
+#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
+
+
static void dce_virtual_set_display_funcs(struct amdgpu_device *adev);
static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev);
-static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry);
+static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
+ int index);
/**
* dce_virtual_vblank_wait - vblank wait asic callback.
@@ -99,6 +104,14 @@ static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save)
{
switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ dce_v6_0_disable_dce(adev);
+ break;
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
case CHIP_HAWAII:
@@ -119,6 +132,9 @@ static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
dce_v11_0_disable_dce(adev);
break;
case CHIP_TOPAZ:
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_HAINAN:
+#endif
/* no DCE */
return;
default:
@@ -195,10 +211,9 @@ static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
amdgpu_crtc->enabled = true;
- /* Make sure VBLANK and PFLIP interrupts are still enabled */
+ /* Make sure VBLANK interrupts are still enabled */
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
- amdgpu_irq_update(adev, &adev->pageflip_irq, type);
drm_vblank_on(dev, amdgpu_crtc->crtc_id);
break;
case DRM_MODE_DPMS_STANDBY:
@@ -264,24 +279,6 @@ static bool dce_virtual_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct drm_encoder *encoder;
-
- /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- amdgpu_crtc->encoder = encoder;
- amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
- break;
- }
- }
- if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
- amdgpu_crtc->encoder = NULL;
- amdgpu_crtc->connector = NULL;
- return false;
- }
-
return true;
}
@@ -341,6 +338,7 @@ static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
amdgpu_crtc->encoder = NULL;
amdgpu_crtc->connector = NULL;
+ amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs);
return 0;
@@ -350,48 +348,128 @@ static int dce_virtual_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- adev->mode_info.vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
dce_virtual_set_display_funcs(adev);
dce_virtual_set_irq_funcs(adev);
- adev->mode_info.num_crtc = 1;
adev->mode_info.num_hpd = 1;
adev->mode_info.num_dig = 1;
return 0;
}
-static bool dce_virtual_get_connector_info(struct amdgpu_device *adev)
+static struct drm_encoder *
+dce_virtual_encoder(struct drm_connector *connector)
{
- struct amdgpu_i2c_bus_rec ddc_bus;
- struct amdgpu_router router;
- struct amdgpu_hpd hpd;
+ int enc_id = connector->encoder_ids[0];
+ struct drm_encoder *encoder;
+ int i;
- /* look up gpio for ddc, hpd */
- ddc_bus.valid = false;
- hpd.hpd = AMDGPU_HPD_NONE;
- /* needed for aux chan transactions */
- ddc_bus.hpd = hpd.hpd;
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == 0)
+ break;
- memset(&router, 0, sizeof(router));
- router.ddc_valid = false;
- router.cd_valid = false;
- amdgpu_display_add_connector(adev,
- 0,
- ATOM_DEVICE_CRT1_SUPPORT,
- DRM_MODE_CONNECTOR_VIRTUAL, &ddc_bus,
- CONNECTOR_OBJECT_ID_VIRTUAL,
- &hpd,
- &router);
+ encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ if (!encoder)
+ continue;
- amdgpu_display_add_encoder(adev, ENCODER_VIRTUAL_ENUM_VIRTUAL,
- ATOM_DEVICE_CRT1_SUPPORT,
- 0);
+ if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
+ return encoder;
+ }
- amdgpu_link_encoder_connector(adev->ddev);
+ /* pick the first one */
+ if (enc_id)
+ return drm_encoder_find(connector->dev, enc_id);
+ return NULL;
+}
+
+static int dce_virtual_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode = NULL;
+ unsigned i;
+ static const struct mode_size {
+ int w;
+ int h;
+ } common_modes[17] = {
+ { 640, 480},
+ { 720, 480},
+ { 800, 600},
+ { 848, 480},
+ {1024, 768},
+ {1152, 768},
+ {1280, 720},
+ {1280, 800},
+ {1280, 854},
+ {1280, 960},
+ {1280, 1024},
+ {1440, 900},
+ {1400, 1050},
+ {1680, 1050},
+ {1600, 1200},
+ {1920, 1080},
+ {1920, 1200}
+ };
+
+ for (i = 0; i < 17; i++) {
+ mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
+ drm_mode_probed_add(connector, mode);
+ }
- return true;
+ return 0;
+}
+
+static int dce_virtual_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static int
+dce_virtual_dpms(struct drm_connector *connector, int mode)
+{
+ return 0;
}
+static enum drm_connector_status
+dce_virtual_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static int
+dce_virtual_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ return 0;
+}
+
+static void dce_virtual_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static void dce_virtual_force(struct drm_connector *connector)
+{
+ return;
+}
+
+static const struct drm_connector_helper_funcs dce_virtual_connector_helper_funcs = {
+ .get_modes = dce_virtual_get_modes,
+ .mode_valid = dce_virtual_mode_valid,
+ .best_encoder = dce_virtual_encoder,
+};
+
+static const struct drm_connector_funcs dce_virtual_connector_funcs = {
+ .dpms = dce_virtual_dpms,
+ .detect = dce_virtual_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = dce_virtual_set_property,
+ .destroy = dce_virtual_destroy,
+ .force = dce_virtual_force,
+};
+
static int dce_virtual_sw_init(void *handle)
{
int r, i;
@@ -420,16 +498,16 @@ static int dce_virtual_sw_init(void *handle)
adev->ddev->mode_config.max_width = 16384;
adev->ddev->mode_config.max_height = 16384;
- /* allocate crtcs */
+ /* allocate crtcs, encoders, connectors */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
r = dce_virtual_crtc_init(adev, i);
if (r)
return r;
+ r = dce_virtual_connector_encoder_init(adev, i);
+ if (r)
+ return r;
}
- dce_virtual_get_connector_info(adev);
- amdgpu_print_display_setup(adev->ddev);
-
drm_kms_helper_poll_init(adev->ddev);
adev->mode_info.mode_config_initialized = true;
@@ -496,7 +574,7 @@ static int dce_virtual_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs dce_virtual_ip_funcs = {
+static const struct amd_ip_funcs dce_virtual_ip_funcs = {
.name = "dce_virtual",
.early_init = dce_virtual_early_init,
.late_init = NULL,
@@ -526,8 +604,8 @@ static void dce_virtual_encoder_commit(struct drm_encoder *encoder)
static void
dce_virtual_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
return;
}
@@ -547,10 +625,6 @@ static bool dce_virtual_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
-
- /* set the active encoder to connector routing */
- amdgpu_encoder_set_active_device(encoder);
-
return true;
}
@@ -576,45 +650,40 @@ static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
.destroy = dce_virtual_encoder_destroy,
};
-static void dce_virtual_encoder_add(struct amdgpu_device *adev,
- uint32_t encoder_enum,
- uint32_t supported_device,
- u16 caps)
+static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
+ int index)
{
- struct drm_device *dev = adev->ddev;
struct drm_encoder *encoder;
- struct amdgpu_encoder *amdgpu_encoder;
-
- /* see if we already added it */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- amdgpu_encoder = to_amdgpu_encoder(encoder);
- if (amdgpu_encoder->encoder_enum == encoder_enum) {
- amdgpu_encoder->devices |= supported_device;
- return;
- }
+ struct drm_connector *connector;
+
+ /* add a new encoder */
+ encoder = kzalloc(sizeof(struct drm_encoder), GFP_KERNEL);
+ if (!encoder)
+ return -ENOMEM;
+ encoder->possible_crtcs = 1 << index;
+ drm_encoder_init(adev->ddev, encoder, &dce_virtual_encoder_funcs,
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
+ drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
+ connector = kzalloc(sizeof(struct drm_connector), GFP_KERNEL);
+ if (!connector) {
+ kfree(encoder);
+ return -ENOMEM;
}
- /* add a new one */
- amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
- if (!amdgpu_encoder)
- return;
+ /* add a new connector */
+ drm_connector_init(adev->ddev, connector, &dce_virtual_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+ drm_connector_helper_add(connector, &dce_virtual_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+ drm_connector_register(connector);
- encoder = &amdgpu_encoder->base;
- encoder->possible_crtcs = 0x1;
- amdgpu_encoder->enc_priv = NULL;
- amdgpu_encoder->encoder_enum = encoder_enum;
- amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- amdgpu_encoder->devices = supported_device;
- amdgpu_encoder->rmx_type = RMX_OFF;
- amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
- amdgpu_encoder->is_ext_encoder = false;
- amdgpu_encoder->caps = caps;
-
- drm_encoder_init(dev, encoder, &dce_virtual_encoder_funcs,
- DRM_MODE_ENCODER_VIRTUAL, NULL);
- drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
- DRM_INFO("[FM]encoder: %d is VIRTUAL\n", amdgpu_encoder->encoder_id);
+ /* link them */
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ return 0;
}
static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
@@ -630,8 +699,8 @@ static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
.hpd_get_gpio_reg = &dce_virtual_hpd_get_gpio_reg,
.page_flip = &dce_virtual_page_flip,
.page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos,
- .add_encoder = &dce_virtual_encoder_add,
- .add_connector = &amdgpu_connector_add,
+ .add_encoder = NULL,
+ .add_connector = NULL,
.stop_mc_access = &dce_virtual_stop_mc_access,
.resume_mc_access = &dce_virtual_resume_mc_access,
};
@@ -642,107 +711,13 @@ static void dce_virtual_set_display_funcs(struct amdgpu_device *adev)
adev->mode_info.funcs = &dce_virtual_display_funcs;
}
-static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer)
-{
- struct amdgpu_mode_info *mode_info = container_of(vblank_timer, struct amdgpu_mode_info ,vblank_timer);
- struct amdgpu_device *adev = container_of(mode_info, struct amdgpu_device ,mode_info);
- unsigned crtc = 0;
- drm_handle_vblank(adev->ddev, crtc);
- dce_virtual_pageflip_irq(adev, NULL, NULL);
- hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
- return HRTIMER_NORESTART;
-}
-
-static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
- int crtc,
- enum amdgpu_interrupt_state state)
-{
- if (crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-
- if (state && !adev->mode_info.vsync_timer_enabled) {
- DRM_DEBUG("Enable software vsync timer\n");
- hrtimer_init(&adev->mode_info.vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer_set_expires(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD));
- adev->mode_info.vblank_timer.function = dce_virtual_vblank_timer_handle;
- hrtimer_start(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
- } else if (!state && adev->mode_info.vsync_timer_enabled) {
- DRM_DEBUG("Disable software vsync timer\n");
- hrtimer_cancel(&adev->mode_info.vblank_timer);
- }
-
- adev->mode_info.vsync_timer_enabled = state;
- DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state);
-}
-
-
-static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- switch (type) {
- case AMDGPU_CRTC_IRQ_VBLANK1:
- dce_virtual_set_crtc_vblank_interrupt_state(adev, 0, state);
- break;
- default:
- break;
- }
- return 0;
-}
-
-static void dce_virtual_crtc_vblank_int_ack(struct amdgpu_device *adev,
- int crtc)
-{
- if (crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-}
-
-static int dce_virtual_crtc_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- unsigned crtc = 0;
- unsigned irq_type = AMDGPU_CRTC_IRQ_VBLANK1;
-
- dce_virtual_crtc_vblank_int_ack(adev, crtc);
-
- if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev->ddev, crtc);
- }
- dce_virtual_pageflip_irq(adev, NULL, NULL);
- DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
- return 0;
-}
-
-static int dce_virtual_set_pageflip_irq_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *src,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- if (type >= adev->mode_info.num_crtc) {
- DRM_ERROR("invalid pageflip crtc %d\n", type);
- return -EINVAL;
- }
- DRM_DEBUG("[FM]set pageflip irq type %d state %d\n", type, state);
-
- return 0;
-}
-
-static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
+static int dce_virtual_pageflip(struct amdgpu_device *adev,
+ unsigned crtc_id)
{
unsigned long flags;
- unsigned crtc_id = 0;
struct amdgpu_crtc *amdgpu_crtc;
struct amdgpu_flip_work *works;
- crtc_id = 0;
amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
if (crtc_id >= adev->mode_info.num_crtc) {
@@ -781,22 +756,79 @@ static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
return 0;
}
+static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer)
+{
+ struct amdgpu_crtc *amdgpu_crtc = container_of(vblank_timer,
+ struct amdgpu_crtc, vblank_timer);
+ struct drm_device *ddev = amdgpu_crtc->base.dev;
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ drm_handle_vblank(ddev, amdgpu_crtc->crtc_id);
+ dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id);
+ hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD),
+ HRTIMER_MODE_REL);
+
+ return HRTIMER_NORESTART;
+}
+
+static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
+ int crtc,
+ enum amdgpu_interrupt_state state)
+{
+ if (crtc >= adev->mode_info.num_crtc) {
+ DRM_DEBUG("invalid crtc %d\n", crtc);
+ return;
+ }
+
+ if (state && !adev->mode_info.crtcs[crtc]->vsync_timer_enabled) {
+ DRM_DEBUG("Enable software vsync timer\n");
+ hrtimer_init(&adev->mode_info.crtcs[crtc]->vblank_timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_set_expires(&adev->mode_info.crtcs[crtc]->vblank_timer,
+ ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD));
+ adev->mode_info.crtcs[crtc]->vblank_timer.function =
+ dce_virtual_vblank_timer_handle;
+ hrtimer_start(&adev->mode_info.crtcs[crtc]->vblank_timer,
+ ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
+ } else if (!state && adev->mode_info.crtcs[crtc]->vsync_timer_enabled) {
+ DRM_DEBUG("Disable software vsync timer\n");
+ hrtimer_cancel(&adev->mode_info.crtcs[crtc]->vblank_timer);
+ }
+
+ adev->mode_info.crtcs[crtc]->vsync_timer_enabled = state;
+ DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state);
+}
+
+
+static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ if (type > AMDGPU_CRTC_IRQ_VBLANK6)
+ return -EINVAL;
+
+ dce_virtual_set_crtc_vblank_interrupt_state(adev, type, state);
+
+ return 0;
+}
+
static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = {
.set = dce_virtual_set_crtc_irq_state,
- .process = dce_virtual_crtc_irq,
-};
-
-static const struct amdgpu_irq_src_funcs dce_virtual_pageflip_irq_funcs = {
- .set = dce_virtual_set_pageflip_irq_state,
- .process = dce_virtual_pageflip_irq,
+ .process = NULL,
};
static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev)
{
adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs;
-
- adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
- adev->pageflip_irq.funcs = &dce_virtual_pageflip_irq_funcs;
}
+const struct amdgpu_ip_block_version dce_virtual_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.h b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
index e239243f6ebc..ed422012c8c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
@@ -24,8 +24,7 @@
#ifndef __DCE_VIRTUAL_H__
#define __DCE_VIRTUAL_H__
-extern const struct amd_ip_funcs dce_virtual_ip_funcs;
-#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
+extern const struct amdgpu_ip_block_version dce_virtual_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 40abb6b81c09..21c086e02e7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -1522,7 +1522,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
uint32_t scratch;
uint32_t tmp = 0;
long r;
@@ -1548,7 +1548,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err2;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
@@ -1569,7 +1569,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
err2:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err1:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
@@ -1940,7 +1940,7 @@ static int gfx_v6_0_cp_resume(struct amdgpu_device *adev)
static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
@@ -1966,7 +1966,7 @@ static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
/* write new base address */
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
@@ -2814,33 +2814,6 @@ static void gfx_v6_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
amdgpu_ring_write(ring, 0);
}
-static unsigned gfx_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 6; /* gfx_v6_0_ring_emit_ib */
-}
-
-static unsigned gfx_v6_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
-{
- return
- 5 + /* gfx_v6_0_ring_emit_hdp_flush */
- 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
- 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
- 7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
- 17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
- 3; /* gfx_v6_ring_emit_cntxcntl */
-}
-
-static unsigned gfx_v6_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
-{
- return
- 5 + /* gfx_v6_0_ring_emit_hdp_flush */
- 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
- 7 + /* gfx_v6_0_ring_emit_pipeline_sync */
- 17 + /* gfx_v6_0_ring_emit_vm_flush */
- 14 + 14 + 14; /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
-}
-
static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v6_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v6_0_select_se_sh,
@@ -2896,9 +2869,7 @@ static int gfx_v6_0_sw_init(void *handle)
ring->ring_obj = NULL;
sprintf(ring->name, "gfx");
r = amdgpu_ring_init(adev, ring, 1024,
- 0x80000000, 0xf,
- &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
- AMDGPU_RING_TYPE_GFX);
+ &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
if (r)
return r;
}
@@ -2920,9 +2891,7 @@ static int gfx_v6_0_sw_init(void *handle)
sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
r = amdgpu_ring_init(adev, ring, 1024,
- 0x80000000, 0xf,
- &adev->gfx.eop_irq, irq_type,
- AMDGPU_RING_TYPE_COMPUTE);
+ &adev->gfx.eop_irq, irq_type);
if (r)
return r;
}
@@ -3237,7 +3206,7 @@ static int gfx_v6_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
+static const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
.name = "gfx_v6_0",
.early_init = gfx_v6_0_early_init,
.late_init = NULL,
@@ -3255,10 +3224,20 @@ const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
+ .type = AMDGPU_RING_TYPE_GFX,
+ .align_mask = 0xff,
+ .nop = 0x80000000,
.get_rptr = gfx_v6_0_ring_get_rptr,
.get_wptr = gfx_v6_0_ring_get_wptr,
.set_wptr = gfx_v6_0_ring_set_wptr_gfx,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 5 + /* gfx_v6_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
+ 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+ 7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
+ 17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
+ 3, /* gfx_v6_ring_emit_cntxcntl */
+ .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
.emit_ib = gfx_v6_0_ring_emit_ib,
.emit_fence = gfx_v6_0_ring_emit_fence,
.emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
@@ -3269,15 +3248,22 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
.test_ib = gfx_v6_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.emit_cntxcntl = gfx_v6_ring_emit_cntxcntl,
- .get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
- .get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_gfx,
};
static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
+ .type = AMDGPU_RING_TYPE_COMPUTE,
+ .align_mask = 0xff,
+ .nop = 0x80000000,
.get_rptr = gfx_v6_0_ring_get_rptr,
.get_wptr = gfx_v6_0_ring_get_wptr,
.set_wptr = gfx_v6_0_ring_set_wptr_compute,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 5 + /* gfx_v6_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
+ 7 + /* gfx_v6_0_ring_emit_pipeline_sync */
+ 17 + /* gfx_v6_0_ring_emit_vm_flush */
+ 14 + 14 + 14, /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
.emit_ib = gfx_v6_0_ring_emit_ib,
.emit_fence = gfx_v6_0_ring_emit_fence,
.emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
@@ -3287,8 +3273,6 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
.test_ring = gfx_v6_0_ring_test_ring,
.test_ib = gfx_v6_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
- .get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
- .get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_compute,
};
static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -3360,3 +3344,12 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
cu_info->number = active_cu_number;
cu_info->ao_cu_mask = ao_cu_mask;
}
+
+const struct amdgpu_ip_block_version gfx_v6_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v6_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
index b9657e72b248..ced6fc42f688 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
@@ -24,6 +24,6 @@
#ifndef __GFX_V6_0_H__
#define __GFX_V6_0_H__
-extern const struct amd_ip_funcs gfx_v6_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gfx_v6_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 71116da9e782..5b631fd1a879 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2077,9 +2077,9 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask;
- int usepfp = ring->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
+ int usepfp = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
- if (ring->type == AMDGPU_RING_TYPE_COMPUTE) {
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
switch (ring->me) {
case 1:
ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
@@ -2286,7 +2286,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
uint32_t scratch;
uint32_t tmp = 0;
long r;
@@ -2312,7 +2312,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err2;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
@@ -2333,7 +2333,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
err2:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err1:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
@@ -3222,7 +3222,7 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
*/
static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
@@ -3262,7 +3262,7 @@ static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@@ -3391,7 +3391,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
if (adev->gfx.rlc.save_restore_obj == NULL) {
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL,
&adev->gfx.rlc.save_restore_obj);
if (r) {
@@ -3435,7 +3436,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
if (adev->gfx.rlc.clear_state_obj == NULL) {
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL,
&adev->gfx.rlc.clear_state_obj);
if (r) {
@@ -3475,7 +3477,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
if (adev->gfx.rlc.cp_table_obj == NULL) {
r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL,
&adev->gfx.rlc.cp_table_obj);
if (r) {
@@ -4354,44 +4357,40 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
}
-static unsigned gfx_v7_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
+static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
{
- return
- 4; /* gfx_v7_0_ring_emit_ib_gfx */
+ WREG32(mmSQ_IND_INDEX, (wave & 0xF) | ((simd & 0x3) << 4) | (address << 16) | (1 << 13));
+ return RREG32(mmSQ_IND_DATA);
}
-static unsigned gfx_v7_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
+static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
{
- return
- 20 + /* gfx_v7_0_ring_emit_gds_switch */
- 7 + /* gfx_v7_0_ring_emit_hdp_flush */
- 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
- 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
- 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
- 17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
- 3; /* gfx_v7_ring_emit_cntxcntl */
-}
-
-static unsigned gfx_v7_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
-{
- return
- 4; /* gfx_v7_0_ring_emit_ib_compute */
-}
-
-static unsigned gfx_v7_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
-{
- return
- 20 + /* gfx_v7_0_ring_emit_gds_switch */
- 7 + /* gfx_v7_0_ring_emit_hdp_flush */
- 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
- 7 + /* gfx_v7_0_ring_emit_pipeline_sync */
- 17 + /* gfx_v7_0_ring_emit_vm_flush */
- 7 + 7 + 7; /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
+ /* type 0 wave data */
+ dst[(*no_fields)++] = 0;
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
}
static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v7_0_select_se_sh,
+ .read_wave_data = &gfx_v7_0_read_wave_data,
};
static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
@@ -4643,9 +4642,7 @@ static int gfx_v7_0_sw_init(void *handle)
ring->ring_obj = NULL;
sprintf(ring->name, "gfx");
r = amdgpu_ring_init(adev, ring, 1024,
- PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
- &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
- AMDGPU_RING_TYPE_GFX);
+ &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
if (r)
return r;
}
@@ -4670,9 +4667,7 @@ static int gfx_v7_0_sw_init(void *handle)
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
/* type-2 packets are deprecated on MEC, use type-3 instead */
r = amdgpu_ring_init(adev, ring, 1024,
- PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
- &adev->gfx.eop_irq, irq_type,
- AMDGPU_RING_TYPE_COMPUTE);
+ &adev->gfx.eop_irq, irq_type);
if (r)
return r;
}
@@ -5123,7 +5118,7 @@ static int gfx_v7_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
+static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
.name = "gfx_v7_0",
.early_init = gfx_v7_0_early_init,
.late_init = gfx_v7_0_late_init,
@@ -5141,10 +5136,21 @@ const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
+ .type = AMDGPU_RING_TYPE_GFX,
+ .align_mask = 0xff,
+ .nop = PACKET3(PACKET3_NOP, 0x3FFF),
.get_rptr = gfx_v7_0_ring_get_rptr,
.get_wptr = gfx_v7_0_ring_get_wptr_gfx,
.set_wptr = gfx_v7_0_ring_set_wptr_gfx,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 20 + /* gfx_v7_0_ring_emit_gds_switch */
+ 7 + /* gfx_v7_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
+ 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
+ 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
+ 17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
+ 3, /* gfx_v7_ring_emit_cntxcntl */
+ .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */
.emit_ib = gfx_v7_0_ring_emit_ib_gfx,
.emit_fence = gfx_v7_0_ring_emit_fence_gfx,
.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
@@ -5157,15 +5163,23 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
- .get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_gfx,
- .get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_gfx,
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
+ .type = AMDGPU_RING_TYPE_COMPUTE,
+ .align_mask = 0xff,
+ .nop = PACKET3(PACKET3_NOP, 0x3FFF),
.get_rptr = gfx_v7_0_ring_get_rptr,
.get_wptr = gfx_v7_0_ring_get_wptr_compute,
.set_wptr = gfx_v7_0_ring_set_wptr_compute,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 20 + /* gfx_v7_0_ring_emit_gds_switch */
+ 7 + /* gfx_v7_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
+ 7 + /* gfx_v7_0_ring_emit_pipeline_sync */
+ 17 + /* gfx_v7_0_ring_emit_vm_flush */
+ 7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
+ .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_compute */
.emit_ib = gfx_v7_0_ring_emit_ib_compute,
.emit_fence = gfx_v7_0_ring_emit_fence_compute,
.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
@@ -5177,8 +5191,6 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
.test_ib = gfx_v7_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_compute,
- .get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_compute,
};
static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -5289,3 +5301,39 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
cu_info->number = active_cu_number;
cu_info->ao_cu_mask = ao_cu_mask;
}
+
+const struct amdgpu_ip_block_version gfx_v7_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gfx_v7_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gfx_v7_2_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gfx_v7_3_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
index 94e3ea147c26..2f5164cc0e53 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
@@ -24,6 +24,9 @@
#ifndef __GFX_V7_0_H__
#define __GFX_V7_0_H__
-extern const struct amd_ip_funcs gfx_v7_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gfx_v7_0_ip_block;
+extern const struct amdgpu_ip_block_version gfx_v7_1_ip_block;
+extern const struct amdgpu_ip_block_version gfx_v7_2_ip_block;
+extern const struct amdgpu_ip_block_version gfx_v7_3_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index ee6a48a09214..86a7ca5d8511 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -798,7 +798,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
uint32_t scratch;
uint32_t tmp = 0;
long r;
@@ -824,7 +824,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err2;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out.\n");
r = -ETIMEDOUT;
@@ -844,7 +844,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
}
err2:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err1:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
@@ -1058,6 +1058,19 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+ /* we need account JT in */
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
+
+ if (amdgpu_sriov_vf(adev)) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE];
+ info->ucode_id = AMDGPU_UCODE_ID_STORAGE;
+ info->fw = adev->gfx.mec_fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE);
+ }
+
if (adev->gfx.mec2_fw) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
@@ -1127,34 +1140,8 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG -
PACKET3_SET_CONTEXT_REG_START);
- switch (adev->asic_type) {
- case CHIP_TONGA:
- case CHIP_POLARIS10:
- buffer[count++] = cpu_to_le32(0x16000012);
- buffer[count++] = cpu_to_le32(0x0000002A);
- break;
- case CHIP_POLARIS11:
- buffer[count++] = cpu_to_le32(0x16000012);
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- case CHIP_FIJI:
- buffer[count++] = cpu_to_le32(0x3a00161a);
- buffer[count++] = cpu_to_le32(0x0000002e);
- break;
- case CHIP_TOPAZ:
- case CHIP_CARRIZO:
- buffer[count++] = cpu_to_le32(0x00000002);
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- case CHIP_STONEY:
- buffer[count++] = cpu_to_le32(0x00000000);
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- default:
- buffer[count++] = cpu_to_le32(0x00000000);
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- }
+ buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
+ buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1);
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
@@ -1273,7 +1260,8 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
if (adev->gfx.rlc.clear_state_obj == NULL) {
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL,
&adev->gfx.rlc.clear_state_obj);
if (r) {
@@ -1315,7 +1303,8 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
if (adev->gfx.rlc.cp_table_obj == NULL) {
r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL,
&adev->gfx.rlc.cp_table_obj);
if (r) {
@@ -1575,7 +1564,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
int r, i;
u32 tmp;
unsigned total_size, vgpr_offset, sgpr_offset;
@@ -1708,7 +1697,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
}
/* wait for the GPU to finish processing the IB */
- r = fence_wait(f, false);
+ r = dma_fence_wait(f, false);
if (r) {
DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
goto fail;
@@ -1729,7 +1718,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
fail:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
return r;
}
@@ -2045,10 +2034,8 @@ static int gfx_v8_0_sw_init(void *handle)
ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0;
}
- r = amdgpu_ring_init(adev, ring, 1024,
- PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
- &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
- AMDGPU_RING_TYPE_GFX);
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
+ AMDGPU_CP_IRQ_GFX_EOP);
if (r)
return r;
}
@@ -2072,10 +2059,8 @@ static int gfx_v8_0_sw_init(void *handle)
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
/* type-2 packets are deprecated on MEC, use type-3 instead */
- r = amdgpu_ring_init(adev, ring, 1024,
- PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
- &adev->gfx.eop_irq, irq_type,
- AMDGPU_RING_TYPE_COMPUTE);
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
+ irq_type);
if (r)
return r;
}
@@ -3679,6 +3664,21 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
num_rb_pipes);
}
+ /* cache the values for userspace */
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
+ adev->gfx.config.rb_config[i][j].rb_backend_disable =
+ RREG32(mmCC_RB_BACKEND_DISABLE);
+ adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
+ RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+ adev->gfx.config.rb_config[i][j].raster_config =
+ RREG32(mmPA_SC_RASTER_CONFIG);
+ adev->gfx.config.rb_config[i][j].raster_config_1 =
+ RREG32(mmPA_SC_RASTER_CONFIG_1);
+ }
+ }
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
}
@@ -4331,7 +4331,7 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
struct amdgpu_ring *ring;
u32 tmp;
u32 rb_bufsz;
- u64 rb_addr, rptr_addr;
+ u64 rb_addr, rptr_addr, wptr_gpu_addr;
int r;
/* Set the write pointer delay */
@@ -4362,6 +4362,9 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ WREG32(mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
+ WREG32(mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
mdelay(1);
WREG32(mmCP_RB0_CNTL, tmp);
@@ -5438,9 +5441,41 @@ static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
}
+static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
+{
+ WREG32(mmSQ_IND_INDEX, (wave & 0xF) | ((simd & 0x3) << 4) | (address << 16) | (1 << 13));
+ return RREG32(mmSQ_IND_DATA);
+}
+
+static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
+{
+ /* type 0 wave data */
+ dst[(*no_fields)++] = 0;
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
+}
+
+
static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v8_0_select_se_sh,
+ .read_wave_data = &gfx_v8_0_read_wave_data,
};
static int gfx_v8_0_early_init(void *handle)
@@ -6120,7 +6155,7 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask, reg_mem_engine;
- if (ring->type == AMDGPU_RING_TYPE_COMPUTE) {
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
switch (ring->me) {
case 1:
ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
@@ -6222,7 +6257,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
@@ -6240,11 +6275,7 @@ static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
-
- /* GFX8 emits 128 dw nop to prevent DE do vm_flush before CE finish CEIB */
- if (usepfp)
- amdgpu_ring_insert_nop(ring, 128);
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@@ -6360,42 +6391,6 @@ static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
amdgpu_ring_write(ring, 0);
}
-static unsigned gfx_v8_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
-{
- return
- 4; /* gfx_v8_0_ring_emit_ib_gfx */
-}
-
-static unsigned gfx_v8_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
-{
- return
- 20 + /* gfx_v8_0_ring_emit_gds_switch */
- 7 + /* gfx_v8_0_ring_emit_hdp_flush */
- 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
- 6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */
- 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
- 256 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
- 2 + /* gfx_v8_ring_emit_sb */
- 3; /* gfx_v8_ring_emit_cntxcntl */
-}
-
-static unsigned gfx_v8_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
-{
- return
- 4; /* gfx_v8_0_ring_emit_ib_compute */
-}
-
-static unsigned gfx_v8_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
-{
- return
- 20 + /* gfx_v8_0_ring_emit_gds_switch */
- 7 + /* gfx_v8_0_ring_emit_hdp_flush */
- 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
- 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
- 17 + /* gfx_v8_0_ring_emit_vm_flush */
- 7 + 7 + 7; /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
-}
-
static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
enum amdgpu_interrupt_state state)
{
@@ -6541,7 +6536,7 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
return 0;
}
-const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
+static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.name = "gfx_v8_0",
.early_init = gfx_v8_0_early_init,
.late_init = gfx_v8_0_late_init,
@@ -6562,10 +6557,22 @@ const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
+ .type = AMDGPU_RING_TYPE_GFX,
+ .align_mask = 0xff,
+ .nop = PACKET3(PACKET3_NOP, 0x3FFF),
.get_rptr = gfx_v8_0_ring_get_rptr,
.get_wptr = gfx_v8_0_ring_get_wptr_gfx,
.set_wptr = gfx_v8_0_ring_set_wptr_gfx,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 20 + /* gfx_v8_0_ring_emit_gds_switch */
+ 7 + /* gfx_v8_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
+ 6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */
+ 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
+ 128 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
+ 2 + /* gfx_v8_ring_emit_sb */
+ 3, /* gfx_v8_ring_emit_cntxcntl */
+ .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */
.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
@@ -6579,15 +6586,23 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_switch_buffer = gfx_v8_ring_emit_sb,
.emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
- .get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_gfx,
- .get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_gfx,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
+ .type = AMDGPU_RING_TYPE_COMPUTE,
+ .align_mask = 0xff,
+ .nop = PACKET3(PACKET3_NOP, 0x3FFF),
.get_rptr = gfx_v8_0_ring_get_rptr,
.get_wptr = gfx_v8_0_ring_get_wptr_compute,
.set_wptr = gfx_v8_0_ring_set_wptr_compute,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 20 + /* gfx_v8_0_ring_emit_gds_switch */
+ 7 + /* gfx_v8_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
+ 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
+ 17 + /* gfx_v8_0_ring_emit_vm_flush */
+ 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
+ .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */
.emit_ib = gfx_v8_0_ring_emit_ib_compute,
.emit_fence = gfx_v8_0_ring_emit_fence_compute,
.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
@@ -6599,8 +6614,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_compute,
- .get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_compute,
};
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -6753,3 +6766,21 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
cu_info->number = active_cu_number;
cu_info->ao_cu_mask = ao_cu_mask;
}
+
+const struct amdgpu_ip_block_version gfx_v8_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gfx_v8_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
index ebed1f829297..788cc3ab584b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
@@ -24,6 +24,7 @@
#ifndef __GFX_V8_0_H__
#define __GFX_V8_0_H__
-extern const struct amd_ip_funcs gfx_v8_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gfx_v8_0_ip_block;
+extern const struct amdgpu_ip_block_version gfx_v8_1_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index b13c8aaec078..1940d36bc304 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -1030,7 +1030,7 @@ static int gmc_v6_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
+static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
.name = "gmc_v6_0",
.early_init = gmc_v6_0_early_init,
.late_init = gmc_v6_0_late_init,
@@ -1069,3 +1069,11 @@ static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
adev->mc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
}
+const struct amdgpu_ip_block_version gmc_v6_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v6_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
index 42c4fc676cd4..ed2f64dec47a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
@@ -24,6 +24,6 @@
#ifndef __GMC_V6_0_H__
#define __GMC_V6_0_H__
-extern const struct amd_ip_funcs gmc_v6_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gmc_v6_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index aa0c4b964621..3a25f72980c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1235,7 +1235,7 @@ static int gmc_v7_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
+static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
.name = "gmc_v7_0",
.early_init = gmc_v7_0_early_init,
.late_init = gmc_v7_0_late_init,
@@ -1273,3 +1273,21 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
adev->mc.vm_fault.num_types = 1;
adev->mc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
}
+
+const struct amdgpu_ip_block_version gmc_v7_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gmc_v7_4_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
index 0b386b5d2f7a..ebce2966c1c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
@@ -24,6 +24,7 @@
#ifndef __GMC_V7_0_H__
#define __GMC_V7_0_H__
-extern const struct amd_ip_funcs gmc_v7_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gmc_v7_0_ip_block;
+extern const struct amdgpu_ip_block_version gmc_v7_4_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index c22ef140a542..74d7cc3f7e8c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1436,7 +1436,7 @@ static int gmc_v8_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
+static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
.name = "gmc_v8_0",
.early_init = gmc_v8_0_early_init,
.late_init = gmc_v8_0_late_init,
@@ -1477,3 +1477,30 @@ static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
adev->mc.vm_fault.num_types = 1;
adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
}
+
+const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gmc_v8_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gmc_v8_5_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 5,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
index fc5001a8119d..19b8a8aed204 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
@@ -24,6 +24,8 @@
#ifndef __GMC_V8_0_H__
#define __GMC_V8_0_H__
-extern const struct amd_ip_funcs gmc_v8_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gmc_v8_0_ip_block;
+extern const struct amdgpu_ip_block_version gmc_v8_1_ip_block;
+extern const struct amdgpu_ip_block_version gmc_v8_5_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 3b8906ce3511..ac21bb7bc0f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -392,7 +392,7 @@ static int iceland_ih_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs iceland_ih_ip_funcs = {
+static const struct amd_ip_funcs iceland_ih_ip_funcs = {
.name = "iceland_ih",
.early_init = iceland_ih_early_init,
.late_init = NULL,
@@ -421,3 +421,11 @@ static void iceland_ih_set_interrupt_funcs(struct amdgpu_device *adev)
adev->irq.ih_funcs = &iceland_ih_funcs;
}
+const struct amdgpu_ip_block_version iceland_ih_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &iceland_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.h b/drivers/gpu/drm/amd/amdgpu/iceland_ih.h
index 57558cddfbcb..3235f4277548 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.h
@@ -24,6 +24,6 @@
#ifndef __ICELAND_IH_H__
#define __ICELAND_IH_H__
-extern const struct amd_ip_funcs iceland_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version iceland_ih_ip_block;
#endif /* __ICELAND_IH_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index f8618a3881a8..b6f2e50636a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2796,7 +2796,7 @@ static int kv_parse_power_table(struct amdgpu_device *adev)
adev->pm.dpm.num_ps = state_array->ucNumEntries;
/* fill in the vce power states */
- for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
+ for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
u32 sclk;
clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
clock_info = (union pplib_clock_info *)
@@ -3243,6 +3243,18 @@ static int kv_dpm_set_powergating_state(void *handle,
return 0;
}
+static int kv_check_state_equal(struct amdgpu_device *adev,
+ struct amdgpu_ps *cps,
+ struct amdgpu_ps *rps,
+ bool *equal)
+{
+ if (equal == NULL)
+ return -EINVAL;
+
+ *equal = false;
+ return 0;
+}
+
const struct amd_ip_funcs kv_dpm_ip_funcs = {
.name = "kv_dpm",
.early_init = kv_dpm_early_init,
@@ -3273,6 +3285,8 @@ static const struct amdgpu_dpm_funcs kv_dpm_funcs = {
.force_performance_level = &kv_dpm_force_performance_level,
.powergate_uvd = &kv_dpm_powergate_uvd,
.enable_bapm = &kv_dpm_enable_bapm,
+ .get_vce_clock_state = amdgpu_get_vce_clock_state,
+ .check_state_equal = kv_check_state_equal,
};
static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev)
@@ -3291,3 +3305,12 @@ static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev)
adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs;
}
+
+const struct amdgpu_ip_block_version kv_dpm_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &kv_dpm_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 565dab3c7218..e81aa4682760 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -232,10 +232,10 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
for (i = 0; i < count; i++)
if (sdma && sdma->burst_nop && (i == 0))
- amdgpu_ring_write(ring, ring->nop |
+ amdgpu_ring_write(ring, ring->funcs->nop |
SDMA_PKT_NOP_HEADER_COUNT(count - 1));
else
- amdgpu_ring_write(ring, ring->nop);
+ amdgpu_ring_write(ring, ring->funcs->nop);
}
/**
@@ -668,7 +668,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
unsigned index;
u32 tmp = 0;
u64 gpu_addr;
@@ -705,7 +705,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err1;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
@@ -725,7 +725,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
err1:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err0:
amdgpu_wb_free(adev, index);
return r;
@@ -902,22 +902,6 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
-static unsigned sdma_v2_4_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 7 + 6; /* sdma_v2_4_ring_emit_ib */
-}
-
-static unsigned sdma_v2_4_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 6 + /* sdma_v2_4_ring_emit_hdp_flush */
- 3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
- 6 + /* sdma_v2_4_ring_emit_pipeline_sync */
- 12 + /* sdma_v2_4_ring_emit_vm_flush */
- 10 + 10 + 10; /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
-}
-
static int sdma_v2_4_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -965,11 +949,10 @@ static int sdma_v2_4_sw_init(void *handle)
ring->use_doorbell = false;
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
- SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
- AMDGPU_RING_TYPE_SDMA);
+ AMDGPU_SDMA_IRQ_TRAP0 :
+ AMDGPU_SDMA_IRQ_TRAP1);
if (r)
return r;
}
@@ -1204,7 +1187,7 @@ static int sdma_v2_4_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
+static const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
.name = "sdma_v2_4",
.early_init = sdma_v2_4_early_init,
.late_init = NULL,
@@ -1222,10 +1205,19 @@ const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
};
static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.get_rptr = sdma_v2_4_ring_get_rptr,
.get_wptr = sdma_v2_4_ring_get_wptr,
.set_wptr = sdma_v2_4_ring_set_wptr,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 6 + /* sdma_v2_4_ring_emit_hdp_flush */
+ 3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
+ 6 + /* sdma_v2_4_ring_emit_pipeline_sync */
+ 12 + /* sdma_v2_4_ring_emit_vm_flush */
+ 10 + 10 + 10, /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 6, /* sdma_v2_4_ring_emit_ib */
.emit_ib = sdma_v2_4_ring_emit_ib,
.emit_fence = sdma_v2_4_ring_emit_fence,
.emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync,
@@ -1236,8 +1228,6 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
.test_ib = sdma_v2_4_ring_test_ib,
.insert_nop = sdma_v2_4_ring_insert_nop,
.pad_ib = sdma_v2_4_ring_pad_ib,
- .get_emit_ib_size = sdma_v2_4_ring_get_emit_ib_size,
- .get_dma_frame_size = sdma_v2_4_ring_get_dma_frame_size,
};
static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
@@ -1350,3 +1340,12 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
}
}
+
+const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &sdma_v2_4_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h
index 07349f5ee10f..28b433729216 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h
@@ -24,6 +24,6 @@
#ifndef __SDMA_V2_4_H__
#define __SDMA_V2_4_H__
-extern const struct amd_ip_funcs sdma_v2_4_ip_funcs;
+extern const struct amdgpu_ip_block_version sdma_v2_4_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index a9d10941fb53..77f146587c60 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -392,10 +392,10 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
for (i = 0; i < count; i++)
if (sdma && sdma->burst_nop && (i == 0))
- amdgpu_ring_write(ring, ring->nop |
+ amdgpu_ring_write(ring, ring->funcs->nop |
SDMA_PKT_NOP_HEADER_COUNT(count - 1));
else
- amdgpu_ring_write(ring, ring->nop);
+ amdgpu_ring_write(ring, ring->funcs->nop);
}
/**
@@ -871,7 +871,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
unsigned index;
u32 tmp = 0;
u64 gpu_addr;
@@ -908,7 +908,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err1;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
@@ -927,7 +927,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
}
err1:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err0:
amdgpu_wb_free(adev, index);
return r;
@@ -1104,22 +1104,6 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
-static unsigned sdma_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 7 + 6; /* sdma_v3_0_ring_emit_ib */
-}
-
-static unsigned sdma_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 6 + /* sdma_v3_0_ring_emit_hdp_flush */
- 3 + /* sdma_v3_0_ring_emit_hdp_invalidate */
- 6 + /* sdma_v3_0_ring_emit_pipeline_sync */
- 12 + /* sdma_v3_0_ring_emit_vm_flush */
- 10 + 10 + 10; /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
-}
-
static int sdma_v3_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1177,11 +1161,10 @@ static int sdma_v3_0_sw_init(void *handle)
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
- SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
- AMDGPU_RING_TYPE_SDMA);
+ AMDGPU_SDMA_IRQ_TRAP0 :
+ AMDGPU_SDMA_IRQ_TRAP1);
if (r)
return r;
}
@@ -1544,7 +1527,7 @@ static int sdma_v3_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
+static const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
.name = "sdma_v3_0",
.early_init = sdma_v3_0_early_init,
.late_init = NULL,
@@ -1565,10 +1548,19 @@ const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.get_rptr = sdma_v3_0_ring_get_rptr,
.get_wptr = sdma_v3_0_ring_get_wptr,
.set_wptr = sdma_v3_0_ring_set_wptr,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 6 + /* sdma_v3_0_ring_emit_hdp_flush */
+ 3 + /* sdma_v3_0_ring_emit_hdp_invalidate */
+ 6 + /* sdma_v3_0_ring_emit_pipeline_sync */
+ 12 + /* sdma_v3_0_ring_emit_vm_flush */
+ 10 + 10 + 10, /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 6, /* sdma_v3_0_ring_emit_ib */
.emit_ib = sdma_v3_0_ring_emit_ib,
.emit_fence = sdma_v3_0_ring_emit_fence,
.emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync,
@@ -1579,8 +1571,6 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
.test_ib = sdma_v3_0_ring_test_ib,
.insert_nop = sdma_v3_0_ring_insert_nop,
.pad_ib = sdma_v3_0_ring_pad_ib,
- .get_emit_ib_size = sdma_v3_0_ring_get_emit_ib_size,
- .get_dma_frame_size = sdma_v3_0_ring_get_dma_frame_size,
};
static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -1693,3 +1683,21 @@ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
}
}
+
+const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &sdma_v3_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version sdma_v3_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 3,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &sdma_v3_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h
index 0cb9698a3054..7aa223d35f1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h
@@ -24,6 +24,7 @@
#ifndef __SDMA_V3_0_H__
#define __SDMA_V3_0_H__
-extern const struct amd_ip_funcs sdma_v3_0_ip_funcs;
+extern const struct amdgpu_ip_block_version sdma_v3_0_ip_block;
+extern const struct amdgpu_ip_block_version sdma_v3_1_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index dc9511c5ecb8..3ed8ad8725b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -39,6 +39,7 @@
#include "si_dma.h"
#include "dce_v6_0.h"
#include "si.h"
+#include "dce_virtual.h"
static const u32 tahiti_golden_registers[] =
{
@@ -905,7 +906,7 @@ static void si_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
}
-u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
+static u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags;
u32 r;
@@ -918,7 +919,7 @@ u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
return r;
}
-void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+static void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags;
@@ -1811,7 +1812,7 @@ static int si_common_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs si_common_ip_funcs = {
+static const struct amd_ip_funcs si_common_ip_funcs = {
.name = "si_common",
.early_init = si_common_early_init,
.late_init = NULL,
@@ -1828,119 +1829,13 @@ const struct amd_ip_funcs si_common_ip_funcs = {
.set_powergating_state = si_common_set_powergating_state,
};
-static const struct amdgpu_ip_block_version verde_ip_blocks[] =
+static const struct amdgpu_ip_block_version si_common_ip_block =
{
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_dma_ip_funcs,
- },
-/* {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 3,
- .minor = 1,
- .rev = 0,
- .funcs = &si_null_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_null_ip_funcs,
- },
- */
-};
-
-
-static const struct amdgpu_ip_block_version hainan_ip_blocks[] =
-{
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_dma_ip_funcs,
- },
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_common_ip_funcs,
};
int si_set_ip_blocks(struct amdgpu_device *adev)
@@ -1949,13 +1844,42 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
case CHIP_VERDE:
case CHIP_TAHITI:
case CHIP_PITCAIRN:
+ amdgpu_ip_block_add(adev, &si_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &si_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &si_dma_ip_block);
+ /* amdgpu_ip_block_add(adev, &uvd_v3_1_ip_block); */
+ /* amdgpu_ip_block_add(adev, &vce_v1_0_ip_block); */
+ break;
case CHIP_OLAND:
- adev->ip_blocks = verde_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(verde_ip_blocks);
+ amdgpu_ip_block_add(adev, &si_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &si_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v6_4_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &si_dma_ip_block);
+ /* amdgpu_ip_block_add(adev, &uvd_v3_1_ip_block); */
+ /* amdgpu_ip_block_add(adev, &vce_v1_0_ip_block); */
break;
case CHIP_HAINAN:
- adev->ip_blocks = hainan_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(hainan_ip_blocks);
+ amdgpu_ip_block_add(adev, &si_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &si_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &si_dma_ip_block);
break;
default:
BUG();
diff --git a/drivers/gpu/drm/amd/amdgpu/si.h b/drivers/gpu/drm/amd/amdgpu/si.h
index 959d7b63e0e5..589225080c24 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.h
+++ b/drivers/gpu/drm/amd/amdgpu/si.h
@@ -24,8 +24,6 @@
#ifndef __SI_H__
#define __SI_H__
-extern const struct amd_ip_funcs si_common_ip_funcs;
-
void si_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int si_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index de358193a8f9..3dd552ae0b59 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -274,7 +274,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
unsigned index;
u32 tmp = 0;
u64 gpu_addr;
@@ -305,7 +305,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err1;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
@@ -325,7 +325,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
err1:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err0:
amdgpu_wb_free(adev, index);
return r;
@@ -495,22 +495,6 @@ static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
}
-static unsigned si_dma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 7 + 3; /* si_dma_ring_emit_ib */
-}
-
-static unsigned si_dma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 3 + /* si_dma_ring_emit_hdp_flush */
- 3 + /* si_dma_ring_emit_hdp_invalidate */
- 6 + /* si_dma_ring_emit_pipeline_sync */
- 12 + /* si_dma_ring_emit_vm_flush */
- 9 + 9 + 9; /* si_dma_ring_emit_fence x3 for user fence, vm fence */
-}
-
static int si_dma_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -547,11 +531,10 @@ static int si_dma_sw_init(void *handle)
ring->use_doorbell = false;
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
- DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0), 0xf,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
- AMDGPU_RING_TYPE_SDMA);
+ AMDGPU_SDMA_IRQ_TRAP0 :
+ AMDGPU_SDMA_IRQ_TRAP1);
if (r)
return r;
}
@@ -762,7 +745,7 @@ static int si_dma_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs si_dma_ip_funcs = {
+static const struct amd_ip_funcs si_dma_ip_funcs = {
.name = "si_dma",
.early_init = si_dma_early_init,
.late_init = NULL,
@@ -780,10 +763,19 @@ const struct amd_ip_funcs si_dma_ip_funcs = {
};
static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0),
.get_rptr = si_dma_ring_get_rptr,
.get_wptr = si_dma_ring_get_wptr,
.set_wptr = si_dma_ring_set_wptr,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 3 + /* si_dma_ring_emit_hdp_flush */
+ 3 + /* si_dma_ring_emit_hdp_invalidate */
+ 6 + /* si_dma_ring_emit_pipeline_sync */
+ 12 + /* si_dma_ring_emit_vm_flush */
+ 9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */
.emit_ib = si_dma_ring_emit_ib,
.emit_fence = si_dma_ring_emit_fence,
.emit_pipeline_sync = si_dma_ring_emit_pipeline_sync,
@@ -794,8 +786,6 @@ static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
.test_ib = si_dma_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = si_dma_ring_pad_ib,
- .get_emit_ib_size = si_dma_ring_get_emit_ib_size,
- .get_dma_frame_size = si_dma_ring_get_dma_frame_size,
};
static void si_dma_set_ring_funcs(struct amdgpu_device *adev)
@@ -913,3 +903,12 @@ static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
}
}
+
+const struct amdgpu_ip_block_version si_dma_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_dma_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.h b/drivers/gpu/drm/amd/amdgpu/si_dma.h
index 3a3e0c78a54b..5ac1b8452fb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.h
@@ -24,6 +24,6 @@
#ifndef __SI_DMA_H__
#define __SI_DMA_H__
-extern const struct amd_ip_funcs si_dma_ip_funcs;
+extern const struct amdgpu_ip_block_version si_dma_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 3de7bca5854b..917213396787 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3171,6 +3171,7 @@ static void ni_update_current_ps(struct amdgpu_device *adev,
eg_pi->current_rps = *rps;
ni_pi->current_ps = *new_ps;
eg_pi->current_rps.ps_priv = &ni_pi->current_ps;
+ adev->pm.dpm.current_ps = &eg_pi->current_rps;
}
static void ni_update_requested_ps(struct amdgpu_device *adev,
@@ -3183,6 +3184,7 @@ static void ni_update_requested_ps(struct amdgpu_device *adev,
eg_pi->requested_rps = *rps;
ni_pi->requested_ps = *new_ps;
eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps;
+ adev->pm.dpm.requested_ps = &eg_pi->requested_rps;
}
static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev,
@@ -7320,7 +7322,7 @@ static int si_parse_power_table(struct amdgpu_device *adev)
adev->pm.dpm.num_ps = state_array->ucNumEntries;
/* fill in the vce power states */
- for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
+ for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
u32 sclk, mclk;
clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
clock_info = (union pplib_clock_info *)
@@ -7957,6 +7959,57 @@ static int si_dpm_early_init(void *handle)
return 0;
}
+static inline bool si_are_power_levels_equal(const struct rv7xx_pl *si_cpl1,
+ const struct rv7xx_pl *si_cpl2)
+{
+ return ((si_cpl1->mclk == si_cpl2->mclk) &&
+ (si_cpl1->sclk == si_cpl2->sclk) &&
+ (si_cpl1->pcie_gen == si_cpl2->pcie_gen) &&
+ (si_cpl1->vddc == si_cpl2->vddc) &&
+ (si_cpl1->vddci == si_cpl2->vddci));
+}
+
+static int si_check_state_equal(struct amdgpu_device *adev,
+ struct amdgpu_ps *cps,
+ struct amdgpu_ps *rps,
+ bool *equal)
+{
+ struct si_ps *si_cps;
+ struct si_ps *si_rps;
+ int i;
+
+ if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
+ return -EINVAL;
+
+ si_cps = si_get_ps(cps);
+ si_rps = si_get_ps(rps);
+
+ if (si_cps == NULL) {
+ printk("si_cps is NULL\n");
+ *equal = false;
+ return 0;
+ }
+
+ if (si_cps->performance_level_count != si_rps->performance_level_count) {
+ *equal = false;
+ return 0;
+ }
+
+ for (i = 0; i < si_cps->performance_level_count; i++) {
+ if (!si_are_power_levels_equal(&(si_cps->performance_levels[i]),
+ &(si_rps->performance_levels[i]))) {
+ *equal = false;
+ return 0;
+ }
+ }
+
+ /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
+ *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
+ *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
+
+ return 0;
+}
+
const struct amd_ip_funcs si_dpm_ip_funcs = {
.name = "si_dpm",
@@ -7991,6 +8044,8 @@ static const struct amdgpu_dpm_funcs si_dpm_funcs = {
.get_fan_control_mode = &si_dpm_get_fan_control_mode,
.set_fan_speed_percent = &si_dpm_set_fan_speed_percent,
.get_fan_speed_percent = &si_dpm_get_fan_speed_percent,
+ .check_state_equal = &si_check_state_equal,
+ .get_vce_clock_state = amdgpu_get_vce_clock_state,
};
static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev)
@@ -8010,3 +8065,11 @@ static void si_dpm_set_irq_funcs(struct amdgpu_device *adev)
adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs;
}
+const struct amdgpu_ip_block_version si_dpm_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_dpm_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 8fae3d4a2360..db0f36846661 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -268,7 +268,7 @@ static int si_ih_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs si_ih_ip_funcs = {
+static const struct amd_ip_funcs si_ih_ip_funcs = {
.name = "si_ih",
.early_init = si_ih_early_init,
.late_init = NULL,
@@ -297,3 +297,11 @@ static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev)
adev->irq.ih_funcs = &si_ih_funcs;
}
+const struct amdgpu_ip_block_version si_ih_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.h b/drivers/gpu/drm/amd/amdgpu/si_ih.h
index f3e3a954369c..42e64a53e24f 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.h
@@ -24,6 +24,6 @@
#ifndef __SI_IH_H__
#define __SI_IH_H__
-extern const struct amd_ip_funcs si_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version si_ih_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index b4ea229bb449..52b71ee58793 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -455,7 +455,7 @@ static int tonga_ih_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs tonga_ih_ip_funcs = {
+static const struct amd_ip_funcs tonga_ih_ip_funcs = {
.name = "tonga_ih",
.early_init = tonga_ih_early_init,
.late_init = NULL,
@@ -487,3 +487,11 @@ static void tonga_ih_set_interrupt_funcs(struct amdgpu_device *adev)
adev->irq.ih_funcs = &tonga_ih_funcs;
}
+const struct amdgpu_ip_block_version tonga_ih_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &tonga_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.h b/drivers/gpu/drm/amd/amdgpu/tonga_ih.h
index 7392d70fa4a7..499027eee5c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.h
@@ -24,6 +24,6 @@
#ifndef __TONGA_IH_H__
#define __TONGA_IH_H__
-extern const struct amd_ip_funcs tonga_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version tonga_ih_ip_block;
-#endif /* __CZ_IH_H__ */
+#endif /* __TONGA_IH_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index f6c941550b8f..8f9c7d55ddda 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -36,6 +36,9 @@
#include "bif/bif_4_1_d.h"
+#include "smu/smu_7_0_1_d.h"
+#include "smu/smu_7_0_1_sh_mask.h"
+
static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
static void uvd_v4_2_init_cg(struct amdgpu_device *adev);
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
@@ -116,8 +119,7 @@ static int uvd_v4_2_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
- &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
return r;
}
@@ -526,20 +528,6 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw);
}
-static unsigned uvd_v4_2_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 4; /* uvd_v4_2_ring_emit_ib */
-}
-
-static unsigned uvd_v4_2_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 2 + /* uvd_v4_2_ring_emit_hdp_flush */
- 2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
- 14; /* uvd_v4_2_ring_emit_fence x1 no user fence */
-}
-
/**
* uvd_v4_2_mc_resume - memory controller programming
*
@@ -698,18 +686,34 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static void uvd_v5_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+{
+ u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
+
+ if (enable)
+ tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
+ GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
+ else
+ tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
+ GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
+
+ WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
+}
+
static int uvd_v4_2_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
bool gate = false;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
- return 0;
-
if (state == AMD_CG_STATE_GATE)
gate = true;
+ uvd_v5_0_set_bypass_mode(adev, gate);
+
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
+ return 0;
+
uvd_v4_2_enable_mgcg(adev, gate);
return 0;
@@ -738,7 +742,7 @@ static int uvd_v4_2_set_powergating_state(void *handle,
}
}
-const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
+static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
.name = "uvd_v4_2",
.early_init = uvd_v4_2_early_init,
.late_init = NULL,
@@ -756,10 +760,18 @@ const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
};
static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_UVD,
+ .align_mask = 0xf,
+ .nop = PACKET0(mmUVD_NO_OP, 0),
.get_rptr = uvd_v4_2_ring_get_rptr,
.get_wptr = uvd_v4_2_ring_get_wptr,
.set_wptr = uvd_v4_2_ring_set_wptr,
.parse_cs = amdgpu_uvd_ring_parse_cs,
+ .emit_frame_size =
+ 2 + /* uvd_v4_2_ring_emit_hdp_flush */
+ 2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
+ 14, /* uvd_v4_2_ring_emit_fence x1 no user fence */
+ .emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */
.emit_ib = uvd_v4_2_ring_emit_ib,
.emit_fence = uvd_v4_2_ring_emit_fence,
.emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush,
@@ -770,8 +782,6 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
- .get_emit_ib_size = uvd_v4_2_ring_get_emit_ib_size,
- .get_dma_frame_size = uvd_v4_2_ring_get_dma_frame_size,
};
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
@@ -789,3 +799,12 @@ static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev)
adev->uvd.irq.num_types = 1;
adev->uvd.irq.funcs = &uvd_v4_2_irq_funcs;
}
+
+const struct amdgpu_ip_block_version uvd_v4_2_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 4,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &uvd_v4_2_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h
index 0a615dd50840..8a0444bb8b95 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h
@@ -24,6 +24,6 @@
#ifndef __UVD_V4_2_H__
#define __UVD_V4_2_H__
-extern const struct amd_ip_funcs uvd_v4_2_ip_funcs;
+extern const struct amdgpu_ip_block_version uvd_v4_2_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 400c16fe579e..95303e2d5f92 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -33,6 +33,8 @@
#include "oss/oss_2_0_sh_mask.h"
#include "bif/bif_5_0_d.h"
#include "vi.h"
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -112,8 +114,7 @@ static int uvd_v5_0_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
- &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
return r;
}
@@ -577,20 +578,6 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw);
}
-static unsigned uvd_v5_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 6; /* uvd_v5_0_ring_emit_ib */
-}
-
-static unsigned uvd_v5_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 2 + /* uvd_v5_0_ring_emit_hdp_flush */
- 2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
- 14; /* uvd_v5_0_ring_emit_fence x1 no user fence */
-}
-
static bool uvd_v5_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -737,6 +724,20 @@ static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
}
#endif
+static void uvd_v5_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+{
+ u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
+
+ if (enable)
+ tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
+ GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
+ else
+ tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
+ GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
+
+ WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
+}
+
static int uvd_v5_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -744,6 +745,8 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
static int curstate = -1;
+ uvd_v5_0_set_bypass_mode(adev, enable);
+
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
return 0;
@@ -789,7 +792,7 @@ static int uvd_v5_0_set_powergating_state(void *handle,
}
}
-const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
+static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
.name = "uvd_v5_0",
.early_init = uvd_v5_0_early_init,
.late_init = NULL,
@@ -807,10 +810,18 @@ const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_UVD,
+ .align_mask = 0xf,
+ .nop = PACKET0(mmUVD_NO_OP, 0),
.get_rptr = uvd_v5_0_ring_get_rptr,
.get_wptr = uvd_v5_0_ring_get_wptr,
.set_wptr = uvd_v5_0_ring_set_wptr,
.parse_cs = amdgpu_uvd_ring_parse_cs,
+ .emit_frame_size =
+ 2 + /* uvd_v5_0_ring_emit_hdp_flush */
+ 2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
+ 14, /* uvd_v5_0_ring_emit_fence x1 no user fence */
+ .emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */
.emit_ib = uvd_v5_0_ring_emit_ib,
.emit_fence = uvd_v5_0_ring_emit_fence,
.emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush,
@@ -821,8 +832,6 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
- .get_emit_ib_size = uvd_v5_0_ring_get_emit_ib_size,
- .get_dma_frame_size = uvd_v5_0_ring_get_dma_frame_size,
};
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -840,3 +849,12 @@ static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
adev->uvd.irq.num_types = 1;
adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs;
}
+
+const struct amdgpu_ip_block_version uvd_v5_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 5,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &uvd_v5_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h
index e3b3c49fa5de..2eaaea793ac5 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h
@@ -24,6 +24,6 @@
#ifndef __UVD_V5_0_H__
#define __UVD_V5_0_H__
-extern const struct amd_ip_funcs uvd_v5_0_ip_funcs;
+extern const struct amdgpu_ip_block_version uvd_v5_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index ab3df6d75656..a339b5ccb296 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -116,8 +116,7 @@ static int uvd_v6_0_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
- &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
return r;
}
@@ -725,31 +724,6 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0xE);
}
-static unsigned uvd_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 8; /* uvd_v6_0_ring_emit_ib */
-}
-
-static unsigned uvd_v6_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 2 + /* uvd_v6_0_ring_emit_hdp_flush */
- 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
- 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
- 14; /* uvd_v6_0_ring_emit_fence x1 no user fence */
-}
-
-static unsigned uvd_v6_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
-{
- return
- 2 + /* uvd_v6_0_ring_emit_hdp_flush */
- 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
- 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
- 20 + /* uvd_v6_0_ring_emit_vm_flush */
- 14 + 14; /* uvd_v6_0_ring_emit_fence x2 vm fence */
-}
-
static bool uvd_v6_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -961,7 +935,7 @@ static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
}
#endif
-static void uvd_v6_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+static void uvd_v6_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
{
u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
@@ -979,15 +953,14 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
- if (adev->asic_type == CHIP_FIJI ||
- adev->asic_type == CHIP_POLARIS10)
- uvd_v6_set_bypass_mode(adev, state == AMD_CG_STATE_GATE ? true : false);
+ uvd_v6_0_set_bypass_mode(adev, enable);
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
return 0;
- if (state == AMD_CG_STATE_GATE) {
+ if (enable) {
/* disable HW gating and enable Sw gating */
uvd_v6_0_set_sw_clock_gating(adev);
} else {
@@ -1027,7 +1000,7 @@ static int uvd_v6_0_set_powergating_state(void *handle,
}
}
-const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
+static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
.name = "uvd_v6_0",
.early_init = uvd_v6_0_early_init,
.late_init = NULL,
@@ -1048,10 +1021,19 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
+ .type = AMDGPU_RING_TYPE_UVD,
+ .align_mask = 0xf,
+ .nop = PACKET0(mmUVD_NO_OP, 0),
.get_rptr = uvd_v6_0_ring_get_rptr,
.get_wptr = uvd_v6_0_ring_get_wptr,
.set_wptr = uvd_v6_0_ring_set_wptr,
.parse_cs = amdgpu_uvd_ring_parse_cs,
+ .emit_frame_size =
+ 2 + /* uvd_v6_0_ring_emit_hdp_flush */
+ 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
+ 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
+ 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
+ .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
.emit_ib = uvd_v6_0_ring_emit_ib,
.emit_fence = uvd_v6_0_ring_emit_fence,
.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
@@ -1062,15 +1044,22 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
- .get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
- .get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size,
};
static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_UVD,
+ .align_mask = 0xf,
+ .nop = PACKET0(mmUVD_NO_OP, 0),
.get_rptr = uvd_v6_0_ring_get_rptr,
.get_wptr = uvd_v6_0_ring_get_wptr,
.set_wptr = uvd_v6_0_ring_set_wptr,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 2 + /* uvd_v6_0_ring_emit_hdp_flush */
+ 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
+ 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
+ 20 + /* uvd_v6_0_ring_emit_vm_flush */
+ 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
+ .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
.emit_ib = uvd_v6_0_ring_emit_ib,
.emit_fence = uvd_v6_0_ring_emit_fence,
.emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
@@ -1083,8 +1072,6 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
- .get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
- .get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size_vm,
};
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -1108,3 +1095,30 @@ static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
adev->uvd.irq.num_types = 1;
adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs;
}
+
+const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &uvd_v6_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 6,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &uvd_v6_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 6,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &uvd_v6_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h
index 6b92a2352986..d3d48c6428cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h
@@ -24,6 +24,8 @@
#ifndef __UVD_V6_0_H__
#define __UVD_V6_0_H__
-extern const struct amd_ip_funcs uvd_v6_0_ip_funcs;
+extern const struct amdgpu_ip_block_version uvd_v6_0_ip_block;
+extern const struct amdgpu_ip_block_version uvd_v6_2_ip_block;
+extern const struct amdgpu_ip_block_version uvd_v6_3_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 76e64ad04a53..38ed903dd6f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -224,8 +224,8 @@ static int vce_v2_0_sw_init(void *handle)
for (i = 0; i < adev->vce.num_rings; i++) {
ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i);
- r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
- &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+ r = amdgpu_ring_init(adev, ring, 512,
+ &adev->vce.irq, 0);
if (r)
return r;
}
@@ -592,7 +592,7 @@ static int vce_v2_0_set_powergating_state(void *handle,
return vce_v2_0_start(adev);
}
-const struct amd_ip_funcs vce_v2_0_ip_funcs = {
+static const struct amd_ip_funcs vce_v2_0_ip_funcs = {
.name = "vce_v2_0",
.early_init = vce_v2_0_early_init,
.late_init = NULL,
@@ -610,10 +610,15 @@ const struct amd_ip_funcs vce_v2_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_VCE,
+ .align_mask = 0xf,
+ .nop = VCE_CMD_NO_OP,
.get_rptr = vce_v2_0_ring_get_rptr,
.get_wptr = vce_v2_0_ring_get_wptr,
.set_wptr = vce_v2_0_ring_set_wptr,
.parse_cs = amdgpu_vce_ring_parse_cs,
+ .emit_frame_size = 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
+ .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
.emit_ib = amdgpu_vce_ring_emit_ib,
.emit_fence = amdgpu_vce_ring_emit_fence,
.test_ring = amdgpu_vce_ring_test_ring,
@@ -622,8 +627,6 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vce_ring_begin_use,
.end_use = amdgpu_vce_ring_end_use,
- .get_emit_ib_size = amdgpu_vce_ring_get_emit_ib_size,
- .get_dma_frame_size = amdgpu_vce_ring_get_dma_frame_size,
};
static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -644,3 +647,12 @@ static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev)
adev->vce.irq.num_types = 1;
adev->vce.irq.funcs = &vce_v2_0_irq_funcs;
};
+
+const struct amdgpu_ip_block_version vce_v2_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v2_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h
index 0d2ae8a01acd..4d15167654a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h
@@ -24,6 +24,6 @@
#ifndef __VCE_V2_0_H__
#define __VCE_V2_0_H__
-extern const struct amd_ip_funcs vce_v2_0_ip_funcs;
+extern const struct amdgpu_ip_block_version vce_v2_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 8533269ec160..5ed2930a8568 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -389,8 +389,7 @@ static int vce_v3_0_sw_init(void *handle)
for (i = 0; i < adev->vce.num_rings; i++) {
ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i);
- r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
- &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
if (r)
return r;
}
@@ -808,28 +807,7 @@ static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, seq);
}
-static unsigned vce_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 5; /* vce_v3_0_ring_emit_ib */
-}
-
-static unsigned vce_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 4 + /* vce_v3_0_emit_pipeline_sync */
- 6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
-}
-
-static unsigned vce_v3_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
-{
- return
- 6 + /* vce_v3_0_emit_vm_flush */
- 4 + /* vce_v3_0_emit_pipeline_sync */
- 6 + 6; /* amdgpu_vce_ring_emit_fence x2 vm fence */
-}
-
-const struct amd_ip_funcs vce_v3_0_ip_funcs = {
+static const struct amd_ip_funcs vce_v3_0_ip_funcs = {
.name = "vce_v3_0",
.early_init = vce_v3_0_early_init,
.late_init = NULL,
@@ -850,10 +828,17 @@ const struct amd_ip_funcs vce_v3_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
+ .type = AMDGPU_RING_TYPE_VCE,
+ .align_mask = 0xf,
+ .nop = VCE_CMD_NO_OP,
.get_rptr = vce_v3_0_ring_get_rptr,
.get_wptr = vce_v3_0_ring_get_wptr,
.set_wptr = vce_v3_0_ring_set_wptr,
.parse_cs = amdgpu_vce_ring_parse_cs,
+ .emit_frame_size =
+ 4 + /* vce_v3_0_emit_pipeline_sync */
+ 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
+ .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
.emit_ib = amdgpu_vce_ring_emit_ib,
.emit_fence = amdgpu_vce_ring_emit_fence,
.test_ring = amdgpu_vce_ring_test_ring,
@@ -862,15 +847,21 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vce_ring_begin_use,
.end_use = amdgpu_vce_ring_end_use,
- .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
- .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size,
};
static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCE,
+ .align_mask = 0xf,
+ .nop = VCE_CMD_NO_OP,
.get_rptr = vce_v3_0_ring_get_rptr,
.get_wptr = vce_v3_0_ring_get_wptr,
.set_wptr = vce_v3_0_ring_set_wptr,
- .parse_cs = NULL,
+ .parse_cs = amdgpu_vce_ring_parse_cs_vm,
+ .emit_frame_size =
+ 6 + /* vce_v3_0_emit_vm_flush */
+ 4 + /* vce_v3_0_emit_pipeline_sync */
+ 6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */
+ .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
.emit_ib = vce_v3_0_ring_emit_ib,
.emit_vm_flush = vce_v3_0_emit_vm_flush,
.emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
@@ -881,8 +872,6 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vce_ring_begin_use,
.end_use = amdgpu_vce_ring_end_use,
- .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
- .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size_vm,
};
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -910,3 +899,30 @@ static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev)
adev->vce.irq.num_types = 1;
adev->vce.irq.funcs = &vce_v3_0_irq_funcs;
};
+
+const struct amdgpu_ip_block_version vce_v3_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version vce_v3_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version vce_v3_4_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h
index b45af65da81f..08b908c7de0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h
@@ -24,6 +24,8 @@
#ifndef __VCE_V3_0_H__
#define __VCE_V3_0_H__
-extern const struct amd_ip_funcs vce_v3_0_ip_funcs;
+extern const struct amdgpu_ip_block_version vce_v3_0_ip_block;
+extern const struct amdgpu_ip_block_version vce_v3_1_ip_block;
+extern const struct amdgpu_ip_block_version vce_v3_4_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index c0d9aad7126f..25c0a71b257d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -121,8 +121,8 @@ static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
u32 r;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(mmSMC_IND_INDEX_0, (reg));
- r = RREG32(mmSMC_IND_DATA_0);
+ WREG32(mmSMC_IND_INDEX_11, (reg));
+ r = RREG32(mmSMC_IND_DATA_11);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return r;
}
@@ -132,8 +132,8 @@ static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
unsigned long flags;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(mmSMC_IND_INDEX_0, (reg));
- WREG32(mmSMC_IND_DATA_0, (v));
+ WREG32(mmSMC_IND_INDEX_11, (reg));
+ WREG32(mmSMC_IND_DATA_11, (v));
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
}
@@ -437,12 +437,12 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
/* take the smc lock since we are using the smc index */
spin_lock_irqsave(&adev->smc_idx_lock, flags);
/* set rom index to 0 */
- WREG32(mmSMC_IND_INDEX_0, ixROM_INDEX);
- WREG32(mmSMC_IND_DATA_0, 0);
+ WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX);
+ WREG32(mmSMC_IND_DATA_11, 0);
/* set index to data for continous read */
- WREG32(mmSMC_IND_INDEX_0, ixROM_DATA);
+ WREG32(mmSMC_IND_INDEX_11, ixROM_DATA);
for (i = 0; i < length_dw; i++)
- dw_ptr[i] = RREG32(mmSMC_IND_DATA_0);
+ dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return true;
@@ -556,21 +556,100 @@ static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] =
{mmPA_SC_RASTER_CONFIG_1, false, true},
};
-static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
- u32 sh_num, u32 reg_offset)
-{
- uint32_t val;
+static uint32_t vi_get_register_value(struct amdgpu_device *adev,
+ bool indexed, u32 se_num,
+ u32 sh_num, u32 reg_offset)
+{
+ if (indexed) {
+ uint32_t val;
+ unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
+ unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
+
+ switch (reg_offset) {
+ case mmCC_RB_BACKEND_DISABLE:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
+ case mmGC_USER_RB_BACKEND_DISABLE:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
+ case mmPA_SC_RASTER_CONFIG:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
+ case mmPA_SC_RASTER_CONFIG_1:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
+ }
- mutex_lock(&adev->grbm_idx_mutex);
- if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+ mutex_lock(&adev->grbm_idx_mutex);
+ if (se_num != 0xffffffff || sh_num != 0xffffffff)
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
- val = RREG32(reg_offset);
+ val = RREG32(reg_offset);
- if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
- return val;
+ if (se_num != 0xffffffff || sh_num != 0xffffffff)
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ return val;
+ } else {
+ unsigned idx;
+
+ switch (reg_offset) {
+ case mmGB_ADDR_CONFIG:
+ return adev->gfx.config.gb_addr_config;
+ case mmMC_ARB_RAMCFG:
+ return adev->gfx.config.mc_arb_ramcfg;
+ case mmGB_TILE_MODE0:
+ case mmGB_TILE_MODE1:
+ case mmGB_TILE_MODE2:
+ case mmGB_TILE_MODE3:
+ case mmGB_TILE_MODE4:
+ case mmGB_TILE_MODE5:
+ case mmGB_TILE_MODE6:
+ case mmGB_TILE_MODE7:
+ case mmGB_TILE_MODE8:
+ case mmGB_TILE_MODE9:
+ case mmGB_TILE_MODE10:
+ case mmGB_TILE_MODE11:
+ case mmGB_TILE_MODE12:
+ case mmGB_TILE_MODE13:
+ case mmGB_TILE_MODE14:
+ case mmGB_TILE_MODE15:
+ case mmGB_TILE_MODE16:
+ case mmGB_TILE_MODE17:
+ case mmGB_TILE_MODE18:
+ case mmGB_TILE_MODE19:
+ case mmGB_TILE_MODE20:
+ case mmGB_TILE_MODE21:
+ case mmGB_TILE_MODE22:
+ case mmGB_TILE_MODE23:
+ case mmGB_TILE_MODE24:
+ case mmGB_TILE_MODE25:
+ case mmGB_TILE_MODE26:
+ case mmGB_TILE_MODE27:
+ case mmGB_TILE_MODE28:
+ case mmGB_TILE_MODE29:
+ case mmGB_TILE_MODE30:
+ case mmGB_TILE_MODE31:
+ idx = (reg_offset - mmGB_TILE_MODE0);
+ return adev->gfx.config.tile_mode_array[idx];
+ case mmGB_MACROTILE_MODE0:
+ case mmGB_MACROTILE_MODE1:
+ case mmGB_MACROTILE_MODE2:
+ case mmGB_MACROTILE_MODE3:
+ case mmGB_MACROTILE_MODE4:
+ case mmGB_MACROTILE_MODE5:
+ case mmGB_MACROTILE_MODE6:
+ case mmGB_MACROTILE_MODE7:
+ case mmGB_MACROTILE_MODE8:
+ case mmGB_MACROTILE_MODE9:
+ case mmGB_MACROTILE_MODE10:
+ case mmGB_MACROTILE_MODE11:
+ case mmGB_MACROTILE_MODE12:
+ case mmGB_MACROTILE_MODE13:
+ case mmGB_MACROTILE_MODE14:
+ case mmGB_MACROTILE_MODE15:
+ idx = (reg_offset - mmGB_MACROTILE_MODE0);
+ return adev->gfx.config.macrotile_mode_array[idx];
+ default:
+ return RREG32(reg_offset);
+ }
+ }
}
static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
@@ -605,10 +684,9 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
if (reg_offset != asic_register_entry->reg_offset)
continue;
if (!asic_register_entry->untouched)
- *value = asic_register_entry->grbm_indexed ?
- vi_read_indexed_register(adev, se_num,
- sh_num, reg_offset) :
- RREG32(reg_offset);
+ *value = vi_get_register_value(adev,
+ asic_register_entry->grbm_indexed,
+ se_num, sh_num, reg_offset);
return 0;
}
}
@@ -618,10 +696,9 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
continue;
if (!vi_allowed_read_registers[i].untouched)
- *value = vi_allowed_read_registers[i].grbm_indexed ?
- vi_read_indexed_register(adev, se_num,
- sh_num, reg_offset) :
- RREG32(reg_offset);
+ *value = vi_get_register_value(adev,
+ vi_allowed_read_registers[i].grbm_indexed,
+ se_num, sh_num, reg_offset);
return 0;
}
return -EINVAL;
@@ -652,18 +729,6 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
return -EINVAL;
}
-static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
-{
- u32 tmp = RREG32(mmBIOS_SCRATCH_3);
-
- if (hung)
- tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
- else
- tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
-
- WREG32(mmBIOS_SCRATCH_3, tmp);
-}
-
/**
* vi_asic_reset - soft reset GPU
*
@@ -677,11 +742,11 @@ static int vi_asic_reset(struct amdgpu_device *adev)
{
int r;
- vi_set_bios_scratch_engine_hung(adev, true);
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
r = vi_gpu_pci_config_reset(adev);
- vi_set_bios_scratch_engine_hung(adev, false);
+ amdgpu_atombios_scratch_regs_engine_hung(adev, false);
return r;
}
@@ -781,734 +846,6 @@ static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
WREG32(mmBIF_DOORBELL_APER_EN, tmp);
}
-/* topaz has no DCE, UVD, VCE */
-static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 4,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 4,
- .rev = 0,
- .funcs = &iceland_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 4,
- .rev = 0,
- .funcs = &sdma_v2_4_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version topaz_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 4,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 4,
- .rev = 0,
- .funcs = &iceland_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 4,
- .rev = 0,
- .funcs = &sdma_v2_4_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &tonga_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 10,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_v10_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 5,
- .minor = 0,
- .rev = 0,
- .funcs = &uvd_v5_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version tonga_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &tonga_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 10,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 5,
- .minor = 0,
- .rev = 0,
- .funcs = &uvd_v5_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 5,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &tonga_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 10,
- .minor = 1,
- .rev = 0,
- .funcs = &dce_v10_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &uvd_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version fiji_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 5,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &tonga_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 10,
- .minor = 1,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &uvd_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version polaris11_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 1,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 1,
- .rev = 0,
- .funcs = &tonga_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 2,
- .rev = 0,
- .funcs = &dce_v11_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 1,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 6,
- .minor = 3,
- .rev = 0,
- .funcs = &uvd_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 4,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version polaris11_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 1,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 1,
- .rev = 0,
- .funcs = &tonga_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 2,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 1,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 6,
- .minor = 3,
- .rev = 0,
- .funcs = &uvd_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 4,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version cz_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &cz_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_v11_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &uvd_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-#if defined(CONFIG_DRM_AMD_ACP)
- {
- .type = AMD_IP_BLOCK_TYPE_ACP,
- .major = 2,
- .minor = 2,
- .rev = 0,
- .funcs = &acp_ip_funcs,
- },
-#endif
-};
-
-static const struct amdgpu_ip_block_version cz_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &cz_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &uvd_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-#if defined(CONFIG_DRM_AMD_ACP)
- {
- .type = AMD_IP_BLOCK_TYPE_ACP,
- .major = 2,
- .minor = 2,
- .rev = 0,
- .funcs = &acp_ip_funcs,
- },
-#endif
-};
-
-int vi_set_ip_blocks(struct amdgpu_device *adev)
-{
- if (adev->enable_virtual_display) {
- switch (adev->asic_type) {
- case CHIP_TOPAZ:
- adev->ip_blocks = topaz_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks_vd);
- break;
- case CHIP_FIJI:
- adev->ip_blocks = fiji_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks_vd);
- break;
- case CHIP_TONGA:
- adev->ip_blocks = tonga_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks_vd);
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS10:
- adev->ip_blocks = polaris11_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks_vd);
- break;
-
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- adev->ip_blocks = cz_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks_vd);
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
- }
- } else {
- switch (adev->asic_type) {
- case CHIP_TOPAZ:
- adev->ip_blocks = topaz_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
- break;
- case CHIP_FIJI:
- adev->ip_blocks = fiji_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
- break;
- case CHIP_TONGA:
- adev->ip_blocks = tonga_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS10:
- adev->ip_blocks = polaris11_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks);
- break;
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- adev->ip_blocks = cz_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
#define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044
#define ATI_REV_ID_FUSE_MACRO__SHIFT 9
#define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00
@@ -1593,7 +930,7 @@ static int vi_common_early_init(void *handle)
break;
case CHIP_TONGA:
adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG;
- adev->pg_flags = 0;
+ adev->pg_flags = AMD_PG_SUPPORT_UVD;
adev->external_rev_id = adev->rev_id + 0x14;
break;
case CHIP_POLARIS11:
@@ -1908,7 +1245,7 @@ static int vi_common_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs vi_common_ip_funcs = {
+static const struct amd_ip_funcs vi_common_ip_funcs = {
.name = "vi_common",
.early_init = vi_common_early_init,
.late_init = NULL,
@@ -1925,3 +1262,110 @@ const struct amd_ip_funcs vi_common_ip_funcs = {
.set_powergating_state = vi_common_set_powergating_state,
};
+static const struct amdgpu_ip_block_version vi_common_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vi_common_ip_funcs,
+};
+
+int vi_set_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_TOPAZ:
+ /* topaz has no DCE, UVD, VCE */
+ amdgpu_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v7_4_ip_block);
+ amdgpu_ip_block_add(adev, &iceland_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &sdma_v2_4_ip_block);
+ break;
+ case CHIP_FIJI:
+ amdgpu_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v8_5_ip_block);
+ amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v10_1_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
+ break;
+ case CHIP_TONGA:
+ amdgpu_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v10_0_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v5_0_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
+ break;
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ amdgpu_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block);
+ amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v11_2_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &sdma_v3_1_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v6_3_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v3_4_ip_block);
+ break;
+ case CHIP_CARRIZO:
+ amdgpu_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &cz_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v3_1_ip_block);
+#if defined(CONFIG_DRM_AMD_ACP)
+ amdgpu_ip_block_add(adev, &acp_ip_block);
+#endif
+ break;
+ case CHIP_STONEY:
+ amdgpu_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &cz_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block);
+ amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v6_2_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v3_4_ip_block);
+#if defined(CONFIG_DRM_AMD_ACP)
+ amdgpu_ip_block_add(adev, &acp_ip_block);
+#endif
+ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h
index 502094042462..575d7aed5d32 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi.h
@@ -24,8 +24,6 @@
#ifndef __VI_H__
#define __VI_H__
-extern const struct amd_ip_funcs vi_common_ip_funcs;
-
void vi_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int vi_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index bec8125bceb0..d1986276dbbd 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -84,6 +84,29 @@ enum amd_powergating_state {
AMD_PG_STATE_UNGATE,
};
+struct amd_vce_state {
+ /* vce clocks */
+ u32 evclk;
+ u32 ecclk;
+ /* gpu clocks */
+ u32 sclk;
+ u32 mclk;
+ u8 clk_idx;
+ u8 pstate;
+};
+
+
+#define AMD_MAX_VCE_LEVELS 6
+
+enum amd_vce_level {
+ AMD_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
+ AMD_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
+ AMD_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
+ AMD_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
+ AMD_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
+ AMD_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
+};
+
/* CG flags */
#define AMD_CG_SUPPORT_GFX_MGCG (1 << 0)
#define AMD_CG_SUPPORT_GFX_MGLS (1 << 1)
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h
index 3014d4a58c43..a9ef1562f43b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h
@@ -176,6 +176,8 @@
#define mmSMU1_SMU_SMC_IND_DATA 0x83
#define mmSMU2_SMU_SMC_IND_DATA 0x85
#define mmSMU3_SMU_SMC_IND_DATA 0x87
+#define mmSMC_IND_INDEX_11 0x1AC
+#define mmSMC_IND_DATA_11 0x1AD
#define ixRCU_UC_EVENTS 0xc0000004
#define ixRCU_MISC_CTRL 0xc0000010
#define ixCC_RCU_FUSES 0xc00c0000
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
index 933917479985..22dd4c2b7290 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
@@ -87,6 +87,8 @@
#define mmSMC_IND_DATA_6 0x8d
#define mmSMC_IND_INDEX_7 0x8e
#define mmSMC_IND_DATA_7 0x8f
+#define mmSMC_IND_INDEX_11 0x1AC
+#define mmSMC_IND_DATA_11 0x1AD
#define mmSMC_IND_ACCESS_CNTL 0x92
#define mmSMC_MESSAGE_0 0x94
#define mmSMC_RESP_0 0x95
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
index 44b1855cb8df..eca2b851f25f 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
@@ -90,6 +90,8 @@
#define mmSMC_IND_DATA_6 0x8d
#define mmSMC_IND_INDEX_7 0x8e
#define mmSMC_IND_DATA_7 0x8f
+#define mmSMC_IND_INDEX_11 0x1AC
+#define mmSMC_IND_DATA_11 0x1AD
#define mmSMC_IND_ACCESS_CNTL 0x92
#define mmSMC_MESSAGE_0 0x94
#define mmSMC_RESP_0 0x95
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index df7c18b6a02a..e4a1697ec1d3 100755
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -106,6 +106,7 @@ enum cgs_ucode_id {
CGS_UCODE_ID_CP_MEC_JT2,
CGS_UCODE_ID_GMCON_RENG,
CGS_UCODE_ID_RLC_G,
+ CGS_UCODE_ID_STORAGE,
CGS_UCODE_ID_MAXIMUM,
};
@@ -619,6 +620,8 @@ typedef int (*cgs_call_acpi_method)(struct cgs_device *cgs_device,
typedef int (*cgs_query_system_info)(struct cgs_device *cgs_device,
struct cgs_system_info *sys_info);
+typedef int (*cgs_is_virtualization_enabled_t)(void *cgs_device);
+
struct cgs_ops {
/* memory management calls (similar to KFD interface) */
cgs_gpu_mem_info_t gpu_mem_info;
@@ -670,6 +673,7 @@ struct cgs_ops {
cgs_call_acpi_method call_acpi_method;
/* get system info */
cgs_query_system_info query_system_info;
+ cgs_is_virtualization_enabled_t is_virtualization_enabled;
};
struct cgs_os_ops; /* To be define in OS-specific CGS header */
@@ -773,4 +777,6 @@ struct cgs_device
CGS_CALL(get_pci_resource, cgs_device, resource_type, size, offset, \
resource_base)
+#define cgs_is_virtualization_enabled(cgs_device) \
+ CGS_CALL(is_virtualization_enabled, cgs_device)
#endif /* _CGS_COMMON_H */
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 7174f7a68266..0b1f2205c2f1 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -436,7 +436,8 @@ static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state)
}
}
-int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input, void *output)
+static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id,
+ void *input, void *output)
{
int ret = 0;
struct pp_instance *pp_handle;
@@ -475,7 +476,7 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input,
return ret;
}
-enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
+static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
{
struct pp_hwmgr *hwmgr;
struct pp_power_state *state;
@@ -820,6 +821,21 @@ static int pp_dpm_read_sensor(void *handle, int idx, int32_t *value)
return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value);
}
+static struct amd_vce_state*
+pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
+{
+ struct pp_hwmgr *hwmgr;
+
+ if (handle) {
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ if (hwmgr && idx < hwmgr->num_vce_state_tables)
+ return &hwmgr->vce_states[idx];
+ }
+
+ return NULL;
+}
+
const struct amd_powerplay_funcs pp_dpm_funcs = {
.get_temperature = pp_dpm_get_temperature,
.load_firmware = pp_dpm_load_fw,
@@ -846,6 +862,7 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
.get_mclk_od = pp_dpm_get_mclk_od,
.set_mclk_od = pp_dpm_set_mclk_od,
.read_sensor = pp_dpm_read_sensor,
+ .get_vce_clock_state = pp_dpm_get_vce_clock_state,
};
static int amd_pp_instance_init(struct amd_pp_init *pp_init,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 960424913496..4b14f259a147 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -66,7 +66,7 @@ static const struct cz_power_state *cast_const_PhwCzPowerState(
return (struct cz_power_state *)hw_ps;
}
-uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr,
+static uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr,
uint32_t clock, uint32_t msg)
{
int i = 0;
@@ -1017,7 +1017,7 @@ static int cz_tf_program_bootup_state(struct pp_hwmgr *hwmgr, void *input,
return 0;
}
-int cz_tf_reset_acp_boot_level(struct pp_hwmgr *hwmgr, void *input,
+static int cz_tf_reset_acp_boot_level(struct pp_hwmgr *hwmgr, void *input,
void *output, void *storage, int result)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -1225,7 +1225,7 @@ static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
return 0;
}
-int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
+static int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -1239,7 +1239,7 @@ int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
return 0;
}
-int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
+static int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
struct phm_clock_voltage_dependency_table *table =
@@ -1277,7 +1277,7 @@ int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
return 0;
}
-int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
+static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -1533,7 +1533,7 @@ static int cz_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
return result;
}
-int cz_get_power_state_size(struct pp_hwmgr *hwmgr)
+static int cz_get_power_state_size(struct pp_hwmgr *hwmgr)
{
return sizeof(struct cz_power_state);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
index 1944d289f846..f5e8fda964f7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
@@ -25,6 +25,7 @@
#include "linux/delay.h"
#include "hwmgr.h"
#include "amd_acpi.h"
+#include "pp_acpi.h"
bool acpi_atcs_functions_supported(void *device, uint32_t index)
{
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index 7de701d8a450..baf0f3d4c2f0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -131,7 +131,7 @@ static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps)
/**
* Private Function to get the PowerPlay Table Address.
*/
-const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
+static const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
{
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
@@ -1049,7 +1049,7 @@ static int check_powerplay_tables(
return 0;
}
-int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr)
+static int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr)
{
int result = 0;
const ATOM_Tonga_POWERPLAYTABLE *powerplay_table;
@@ -1100,7 +1100,7 @@ int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr)
return result;
}
-int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr)
+static int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr)
{
struct phm_ppt_v1_information *pp_table_information =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -1211,7 +1211,7 @@ static int ppt_get_num_of_vce_state_table_entries_v1_0(struct pp_hwmgr *hwmgr)
}
static int ppt_get_vce_state_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t i,
- struct pp_vce_state *vce_state, void **clock_info, uint32_t *flag)
+ struct amd_vce_state *vce_state, void **clock_info, uint32_t *flag)
{
const ATOM_Tonga_VCE_State_Record *vce_state_record;
ATOM_Tonga_SCLK_Dependency_Record *sclk_dep_record;
@@ -1315,7 +1315,7 @@ int get_powerplay_table_entry_v1_0(struct pp_hwmgr *hwmgr,
hwmgr->num_vce_state_tables = i = ppt_get_num_of_vce_state_table_entries_v1_0(hwmgr);
- if ((i != 0) && (i <= PP_MAX_VCE_LEVELS)) {
+ if ((i != 0) && (i <= AMD_MAX_VCE_LEVELS)) {
for (j = 0; j < i; j++)
ppt_get_vce_state_table_entry_v1_0(hwmgr, j, &(hwmgr->vce_states[j]), NULL, &flags);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
index ccf7ebeaf892..a4e9cf429e62 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
@@ -1507,7 +1507,7 @@ static int init_phase_shedding_table(struct pp_hwmgr *hwmgr,
return 0;
}
-int get_number_of_vce_state_table_entries(
+static int get_number_of_vce_state_table_entries(
struct pp_hwmgr *hwmgr)
{
const ATOM_PPLIB_POWERPLAYTABLE *table =
@@ -1521,9 +1521,9 @@ int get_number_of_vce_state_table_entries(
return 0;
}
-int get_vce_state_table_entry(struct pp_hwmgr *hwmgr,
+static int get_vce_state_table_entry(struct pp_hwmgr *hwmgr,
unsigned long i,
- struct pp_vce_state *vce_state,
+ struct amd_vce_state *vce_state,
void **clock_info,
unsigned long *flag)
{
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index 6eb6db199250..cf2ee93d8475 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -75,7 +75,7 @@ int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
+static int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_uvd_power_gating(hwmgr)) {
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -91,7 +91,7 @@ int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
+static int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_vce_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr->smumgr,
@@ -99,7 +99,7 @@ int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
+static int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_vce_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr->smumgr,
@@ -107,7 +107,7 @@ int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
+static int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SamuPowerGating))
@@ -116,7 +116,7 @@ int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_powerup_samu(struct pp_hwmgr *hwmgr)
+static int smu7_powerup_samu(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SamuPowerGating))
@@ -149,15 +149,21 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
if (bgate) {
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_GATE);
+ AMD_CG_STATE_UNGATE);
+ cgs_set_powergating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_GATE);
smu7_update_uvd_dpm(hwmgr, true);
smu7_powerdown_uvd(hwmgr);
} else {
smu7_powerup_uvd(hwmgr);
- smu7_update_uvd_dpm(hwmgr, false);
+ cgs_set_powergating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_UNGATE);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_UNGATE);
+ AMD_CG_STATE_GATE);
+ smu7_update_uvd_dpm(hwmgr, false);
}
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 609996c84ad5..073e0bfa22a0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -89,7 +89,7 @@ enum DPM_EVENT_SRC {
static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
-struct smu7_power_state *cast_phw_smu7_power_state(
+static struct smu7_power_state *cast_phw_smu7_power_state(
struct pp_hw_power_state *hw_ps)
{
PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
@@ -99,7 +99,7 @@ struct smu7_power_state *cast_phw_smu7_power_state(
return (struct smu7_power_state *)hw_ps;
}
-const struct smu7_power_state *cast_const_phw_smu7_power_state(
+static const struct smu7_power_state *cast_const_phw_smu7_power_state(
const struct pp_hw_power_state *hw_ps)
{
PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
@@ -115,7 +115,7 @@ const struct smu7_power_state *cast_const_phw_smu7_power_state(
* @param hwmgr the address of the powerplay hardware manager.
* @return always 0
*/
-int smu7_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
+static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
{
cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
@@ -124,7 +124,7 @@ int smu7_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
return 0;
}
-uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
+static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
{
uint32_t speedCntl = 0;
@@ -135,7 +135,7 @@ uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
}
-int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
+static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
{
uint32_t link_width;
@@ -155,7 +155,7 @@ int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
* @param pHwMgr the address of the powerplay hardware manager.
* @return always PP_Result_OK
*/
-int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
+static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
{
if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable);
@@ -802,7 +802,7 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -1153,7 +1153,7 @@ static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
}
-int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
+static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
data->pcie_performance_request = true;
@@ -1161,7 +1161,7 @@ int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
int tmp_result = 0;
int result = 0;
@@ -1352,6 +1352,8 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct cgs_system_info sys_info = {0};
+ int result;
data->dll_default_on = false;
data->mclk_dpm0_activity_target = 0xa;
@@ -1439,6 +1441,18 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->pcie_lane_performance.min = 16;
data->pcie_lane_power_saving.max = 0;
data->pcie_lane_power_saving.min = 16;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
+ result = cgs_query_system_info(hwmgr->device, &sys_info);
+ if (!result) {
+ if (sys_info.value & AMD_PG_SUPPORT_UVD)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDPowerGating);
+ if (sys_info.value & AMD_PG_SUPPORT_VCE)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_VCEPowerGating);
+ }
}
/**
@@ -1864,7 +1878,7 @@ static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
+static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
{
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -2253,7 +2267,7 @@ static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data;
int result;
@@ -3672,14 +3686,16 @@ static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
}
-int smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
+static int
+smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
{
PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1;
}
-int smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
+static int
+smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
{
uint32_t num_active_displays = 0;
struct cgs_display_info info = {0};
@@ -3701,7 +3717,7 @@ int smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
* @param hwmgr the address of the powerplay hardware manager.
* @return always OK
*/
-int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
+static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
uint32_t num_active_displays = 0;
@@ -3751,7 +3767,7 @@ int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
{
return smu7_program_display_gap(hwmgr);
}
@@ -3775,13 +3791,14 @@ static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
}
-int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
+static int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
const void *thermal_interrupt_info)
{
return 0;
}
-bool smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
+static bool
+smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
bool is_update_required = false;
@@ -3810,7 +3827,9 @@ static inline bool smu7_are_power_levels_equal(const struct smu7_performance_lev
(pl1->pcie_lane == pl2->pcie_lane));
}
-int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
+static int smu7_check_states_equal(struct pp_hwmgr *hwmgr,
+ const struct pp_hw_power_state *pstate1,
+ const struct pp_hw_power_state *pstate2, bool *equal)
{
const struct smu7_power_state *psa;
const struct smu7_power_state *psb;
@@ -3843,7 +3862,7 @@ int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_sta
return 0;
}
-int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr)
+static int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -3972,7 +3991,7 @@ static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
+static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
{
int tmp_result, result = 0;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
index 3fb5e57a378b..eb3e83d7af31 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
@@ -359,6 +359,7 @@ struct amd_powerplay_funcs {
int (*get_mclk_od)(void *handle);
int (*set_mclk_od)(void *handle, uint32_t value);
int (*read_sensor)(void *handle, int idx, int32_t *value);
+ struct amd_vce_state* (*get_vce_clock_state)(void *handle, unsigned idx);
};
struct amd_powerplay {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 4f0fedd1e9d3..e38b999e3235 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -367,7 +367,7 @@ struct pp_table_func {
int (*pptable_get_vce_state_table_entry)(
struct pp_hwmgr *hwmgr,
unsigned long i,
- struct pp_vce_state *vce_state,
+ struct amd_vce_state *vce_state,
void **clock_info,
unsigned long *flag);
};
@@ -586,18 +586,6 @@ struct phm_microcode_version_info {
uint32_t NB;
};
-#define PP_MAX_VCE_LEVELS 6
-
-enum PP_VCE_LEVEL {
- PP_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
- PP_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
- PP_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
- PP_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
- PP_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
- PP_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
-};
-
-
enum PP_TABLE_VERSION {
PP_TABLE_V0 = 0,
PP_TABLE_V1,
@@ -620,7 +608,7 @@ struct pp_hwmgr {
void *hardcode_pp_table;
bool need_pp_table_upload;
- struct pp_vce_state vce_states[PP_MAX_VCE_LEVELS];
+ struct amd_vce_state vce_states[AMD_MAX_VCE_LEVELS];
uint32_t num_vce_state_tables;
enum amd_dpm_forced_level dpm_level;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/power_state.h b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
index 9ceaed9ac52a..827860fffe78 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/power_state.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
@@ -156,15 +156,6 @@ struct pp_power_state {
struct pp_hw_power_state hardware;
};
-
-/*Structure to hold a VCE state entry*/
-struct pp_vce_state {
- uint32_t evclk;
- uint32_t ecclk;
- uint32_t sclk;
- uint32_t mclk;
-};
-
enum PP_MMProfilingState {
PP_MMProfilingState_NA = 0,
PP_MMProfilingState_Started,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
index 3df5de2cdab0..8fe8ba9434ff 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
@@ -21,9 +21,6 @@
*
*/
-extern bool acpi_atcs_functions_supported(void *device,
- uint32_t index);
-extern int acpi_pcie_perf_request(void *device,
- uint8_t perf_req,
- bool advertise);
-extern bool acpi_atcs_notify_pcie_device_ready(void *device);
+bool acpi_atcs_functions_supported(void *device, uint32_t index);
+int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise);
+bool acpi_atcs_notify_pcie_device_ready(void *device);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
index 76310ac7ef0d..34523fe6ed6f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
@@ -2049,7 +2049,7 @@ int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
return 0;
}
-int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -2125,7 +2125,7 @@ uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
}
}
- printk("cant't get the offset of type %x member %x \n", type, member);
+ printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
return 0;
}
@@ -2150,7 +2150,7 @@ uint32_t fiji_get_mac_definition(uint32_t value)
return SMU73_MAX_LEVELS_MVDD;
}
- printk("cant't get the mac of %x \n", value);
+ printk(KERN_WARNING "can't get the mac of %x\n", value);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index 02fe1df855a9..b86e48fb40d1 100755
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -159,7 +159,7 @@ static int fiji_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
return result;
}
-int fiji_setup_pwr_virus(struct pp_smumgr *smumgr)
+static int fiji_setup_pwr_virus(struct pp_smumgr *smumgr)
{
int i, result = -1;
uint32_t reg, data;
@@ -224,7 +224,7 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr)
return result;
}
-int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
+static int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
{
int result = 0;
uint32_t table_start;
@@ -260,7 +260,7 @@ int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
return result;
}
-int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
+static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
{
int32_t vr_config;
uint32_t table_start;
@@ -299,7 +299,7 @@ int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
}
/* Work in Progress */
-int fiji_restore_vft_table(struct pp_smumgr *smumgr)
+static int fiji_restore_vft_table(struct pp_smumgr *smumgr)
{
struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
@@ -311,7 +311,7 @@ int fiji_restore_vft_table(struct pp_smumgr *smumgr)
}
/* Work in Progress */
-int fiji_save_vft_table(struct pp_smumgr *smumgr)
+static int fiji_save_vft_table(struct pp_smumgr *smumgr)
{
struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
@@ -322,7 +322,7 @@ int fiji_save_vft_table(struct pp_smumgr *smumgr)
return -EINVAL;
}
-int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
+static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
{
struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
index 8c889caba420..b579f0c175e6 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
@@ -2140,7 +2140,7 @@ uint32_t iceland_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold);
}
}
- printk("cant't get the offset of type %x member %x \n", type, member);
+ printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
return 0;
}
@@ -2163,7 +2163,7 @@ uint32_t iceland_get_mac_definition(uint32_t value)
return SMU71_MAX_LEVELS_MVDD;
}
- printk("cant't get the mac of %x \n", value);
+ printk(KERN_WARNING "can't get the mac of %x\n", value);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
index 4ccc0b72324d..006b22071685 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
@@ -2174,7 +2174,7 @@ uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
}
}
- printk("cant't get the offset of type %x member %x \n", type, member);
+ printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
return 0;
}
@@ -2201,7 +2201,7 @@ uint32_t polaris10_get_mac_definition(uint32_t value)
return SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
}
- printk("cant't get the mac of %x \n", value);
+ printk(KERN_WARNING "can't get the mac of %x\n", value);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 5c3598ab7dae..f38a68747df0 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -118,7 +118,7 @@ static int polaris10_perform_btc(struct pp_smumgr *smumgr)
}
-int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
+static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
{
uint32_t vr_config;
uint32_t dpm_table_start;
@@ -172,7 +172,8 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
return 0;
}
-int polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
+static int
+polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
{
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index 6af744f42ec9..6df0d6edfdd1 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -278,6 +278,9 @@ enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
case UCODE_ID_RLC_G:
result = CGS_UCODE_ID_RLC_G;
break;
+ case UCODE_ID_MEC_STORAGE:
+ result = CGS_UCODE_ID_STORAGE;
+ break;
default:
break;
}
@@ -452,6 +455,10 @@ int smu7_request_smu_load_fw(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
+ if (cgs_is_virtualization_enabled(smumgr->device))
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
index 76352f2423ae..919be435b49c 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -28,8 +28,6 @@
#include <pp_endian.h>
#define SMC_RAM_END 0x40000
-#define mmSMC_IND_INDEX_11 0x01AC
-#define mmSMC_IND_DATA_11 0x01AD
struct smu7_buffer_entry {
uint32_t data_size;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
index de2a24d85f48..d08f6f19b454 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
@@ -2651,7 +2651,7 @@ uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
}
}
- printk("cant't get the offset of type %x member %x\n", type, member);
+ printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
return 0;
}
@@ -2675,7 +2675,7 @@ uint32_t tonga_get_mac_definition(uint32_t value)
case SMU_MAX_LEVELS_MVDD:
return SMU72_MAX_LEVELS_MVDD;
}
- printk("cant't get the mac value %x\n", value);
+ printk(KERN_WARNING "can't get the mac value %x\n", value);
return 0;
}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
index b961a1c6caf3..dbd4fd3a810b 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -17,7 +17,7 @@ TRACE_EVENT(amd_sched_job,
TP_STRUCT__entry(
__field(struct amd_sched_entity *, entity)
__field(struct amd_sched_job *, sched_job)
- __field(struct fence *, fence)
+ __field(struct dma_fence *, fence)
__field(const char *, name)
__field(u32, job_count)
__field(int, hw_job_count)
@@ -42,7 +42,7 @@ TRACE_EVENT(amd_sched_process_job,
TP_PROTO(struct amd_sched_fence *fence),
TP_ARGS(fence),
TP_STRUCT__entry(
- __field(struct fence *, fence)
+ __field(struct dma_fence *, fence)
),
TP_fast_assign(
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 963a24d46a93..5364e6a7ec8f 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -32,7 +32,7 @@
static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
-static void amd_sched_process_job(struct fence *f, struct fence_cb *cb);
+static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
struct kmem_cache *sched_fence_slab;
atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
@@ -141,7 +141,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
return r;
atomic_set(&entity->fence_seq, 0);
- entity->fence_context = fence_context_alloc(2);
+ entity->fence_context = dma_fence_context_alloc(2);
return 0;
}
@@ -221,32 +221,32 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
kfifo_free(&entity->job_queue);
}
-static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
+static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct amd_sched_entity *entity =
container_of(cb, struct amd_sched_entity, cb);
entity->dependency = NULL;
- fence_put(f);
+ dma_fence_put(f);
amd_sched_wakeup(entity->sched);
}
-static void amd_sched_entity_clear_dep(struct fence *f, struct fence_cb *cb)
+static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct amd_sched_entity *entity =
container_of(cb, struct amd_sched_entity, cb);
entity->dependency = NULL;
- fence_put(f);
+ dma_fence_put(f);
}
static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
{
struct amd_gpu_scheduler *sched = entity->sched;
- struct fence * fence = entity->dependency;
+ struct dma_fence * fence = entity->dependency;
struct amd_sched_fence *s_fence;
if (fence->context == entity->fence_context) {
/* We can ignore fences from ourself */
- fence_put(entity->dependency);
+ dma_fence_put(entity->dependency);
return false;
}
@@ -257,23 +257,23 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
* Fence is from the same scheduler, only need to wait for
* it to be scheduled
*/
- fence = fence_get(&s_fence->scheduled);
- fence_put(entity->dependency);
+ fence = dma_fence_get(&s_fence->scheduled);
+ dma_fence_put(entity->dependency);
entity->dependency = fence;
- if (!fence_add_callback(fence, &entity->cb,
- amd_sched_entity_clear_dep))
+ if (!dma_fence_add_callback(fence, &entity->cb,
+ amd_sched_entity_clear_dep))
return true;
/* Ignore it when it is already scheduled */
- fence_put(fence);
+ dma_fence_put(fence);
return false;
}
- if (!fence_add_callback(entity->dependency, &entity->cb,
- amd_sched_entity_wakeup))
+ if (!dma_fence_add_callback(entity->dependency, &entity->cb,
+ amd_sched_entity_wakeup))
return true;
- fence_put(entity->dependency);
+ dma_fence_put(entity->dependency);
return false;
}
@@ -354,7 +354,8 @@ static void amd_sched_job_finish(struct work_struct *work)
sched->ops->free_job(s_job);
}
-static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb)
+static void amd_sched_job_finish_cb(struct dma_fence *f,
+ struct dma_fence_cb *cb)
{
struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
finish_cb);
@@ -388,8 +389,8 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
spin_lock(&sched->job_list_lock);
list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
- if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
- fence_put(s_job->s_fence->parent);
+ if (dma_fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
+ dma_fence_put(s_job->s_fence->parent);
s_job->s_fence->parent = NULL;
}
}
@@ -410,21 +411,21 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
struct amd_sched_fence *s_fence = s_job->s_fence;
- struct fence *fence;
+ struct dma_fence *fence;
spin_unlock(&sched->job_list_lock);
fence = sched->ops->run_job(s_job);
atomic_inc(&sched->hw_rq_count);
if (fence) {
- s_fence->parent = fence_get(fence);
- r = fence_add_callback(fence, &s_fence->cb,
- amd_sched_process_job);
+ s_fence->parent = dma_fence_get(fence);
+ r = dma_fence_add_callback(fence, &s_fence->cb,
+ amd_sched_process_job);
if (r == -ENOENT)
amd_sched_process_job(fence, &s_fence->cb);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n",
r);
- fence_put(fence);
+ dma_fence_put(fence);
} else {
DRM_ERROR("Failed to run job!\n");
amd_sched_process_job(NULL, &s_fence->cb);
@@ -446,8 +447,8 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
struct amd_sched_entity *entity = sched_job->s_entity;
trace_amd_sched_job(sched_job);
- fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
- amd_sched_job_finish_cb);
+ dma_fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
+ amd_sched_job_finish_cb);
wait_event(entity->sched->job_scheduled,
amd_sched_entity_in(sched_job));
}
@@ -511,7 +512,7 @@ amd_sched_select_entity(struct amd_gpu_scheduler *sched)
return entity;
}
-static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
+static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct amd_sched_fence *s_fence =
container_of(cb, struct amd_sched_fence, cb);
@@ -521,7 +522,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
amd_sched_fence_finished(s_fence);
trace_amd_sched_process_job(s_fence);
- fence_put(&s_fence->finished);
+ dma_fence_put(&s_fence->finished);
wake_up_interruptible(&sched->wake_up_worker);
}
@@ -547,7 +548,7 @@ static int amd_sched_main(void *param)
struct amd_sched_entity *entity = NULL;
struct amd_sched_fence *s_fence;
struct amd_sched_job *sched_job;
- struct fence *fence;
+ struct dma_fence *fence;
wait_event_interruptible(sched->wake_up_worker,
(!amd_sched_blocked(sched) &&
@@ -569,15 +570,15 @@ static int amd_sched_main(void *param)
fence = sched->ops->run_job(sched_job);
amd_sched_fence_scheduled(s_fence);
if (fence) {
- s_fence->parent = fence_get(fence);
- r = fence_add_callback(fence, &s_fence->cb,
- amd_sched_process_job);
+ s_fence->parent = dma_fence_get(fence);
+ r = dma_fence_add_callback(fence, &s_fence->cb,
+ amd_sched_process_job);
if (r == -ENOENT)
amd_sched_process_job(fence, &s_fence->cb);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n",
r);
- fence_put(fence);
+ dma_fence_put(fence);
} else {
DRM_ERROR("Failed to run job!\n");
amd_sched_process_job(NULL, &s_fence->cb);
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 7cbbbfb502ef..876aa43b57df 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -25,7 +25,7 @@
#define _GPU_SCHEDULER_H_
#include <linux/kfifo.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
struct amd_gpu_scheduler;
struct amd_sched_rq;
@@ -50,8 +50,8 @@ struct amd_sched_entity {
atomic_t fence_seq;
uint64_t fence_context;
- struct fence *dependency;
- struct fence_cb cb;
+ struct dma_fence *dependency;
+ struct dma_fence_cb cb;
};
/**
@@ -66,10 +66,10 @@ struct amd_sched_rq {
};
struct amd_sched_fence {
- struct fence scheduled;
- struct fence finished;
- struct fence_cb cb;
- struct fence *parent;
+ struct dma_fence scheduled;
+ struct dma_fence finished;
+ struct dma_fence_cb cb;
+ struct dma_fence *parent;
struct amd_gpu_scheduler *sched;
spinlock_t lock;
void *owner;
@@ -79,15 +79,15 @@ struct amd_sched_job {
struct amd_gpu_scheduler *sched;
struct amd_sched_entity *s_entity;
struct amd_sched_fence *s_fence;
- struct fence_cb finish_cb;
+ struct dma_fence_cb finish_cb;
struct work_struct finish_work;
struct list_head node;
struct delayed_work work_tdr;
};
-extern const struct fence_ops amd_sched_fence_ops_scheduled;
-extern const struct fence_ops amd_sched_fence_ops_finished;
-static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
+extern const struct dma_fence_ops amd_sched_fence_ops_scheduled;
+extern const struct dma_fence_ops amd_sched_fence_ops_finished;
+static inline struct amd_sched_fence *to_amd_sched_fence(struct dma_fence *f)
{
if (f->ops == &amd_sched_fence_ops_scheduled)
return container_of(f, struct amd_sched_fence, scheduled);
@@ -103,8 +103,8 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
* these functions should be implemented in driver side
*/
struct amd_sched_backend_ops {
- struct fence *(*dependency)(struct amd_sched_job *sched_job);
- struct fence *(*run_job)(struct amd_sched_job *sched_job);
+ struct dma_fence *(*dependency)(struct amd_sched_job *sched_job);
+ struct dma_fence *(*run_job)(struct amd_sched_job *sched_job);
void (*timedout_job)(struct amd_sched_job *sched_job);
void (*free_job)(struct amd_sched_job *sched_job);
};
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index 6b63beaf7574..c26fa298fe9e 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -42,46 +42,50 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
spin_lock_init(&fence->lock);
seq = atomic_inc_return(&entity->fence_seq);
- fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
- &fence->lock, entity->fence_context, seq);
- fence_init(&fence->finished, &amd_sched_fence_ops_finished,
- &fence->lock, entity->fence_context + 1, seq);
+ dma_fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
+ &fence->lock, entity->fence_context, seq);
+ dma_fence_init(&fence->finished, &amd_sched_fence_ops_finished,
+ &fence->lock, entity->fence_context + 1, seq);
return fence;
}
void amd_sched_fence_scheduled(struct amd_sched_fence *fence)
{
- int ret = fence_signal(&fence->scheduled);
+ int ret = dma_fence_signal(&fence->scheduled);
if (!ret)
- FENCE_TRACE(&fence->scheduled, "signaled from irq context\n");
+ DMA_FENCE_TRACE(&fence->scheduled,
+ "signaled from irq context\n");
else
- FENCE_TRACE(&fence->scheduled, "was already signaled\n");
+ DMA_FENCE_TRACE(&fence->scheduled,
+ "was already signaled\n");
}
void amd_sched_fence_finished(struct amd_sched_fence *fence)
{
- int ret = fence_signal(&fence->finished);
+ int ret = dma_fence_signal(&fence->finished);
if (!ret)
- FENCE_TRACE(&fence->finished, "signaled from irq context\n");
+ DMA_FENCE_TRACE(&fence->finished,
+ "signaled from irq context\n");
else
- FENCE_TRACE(&fence->finished, "was already signaled\n");
+ DMA_FENCE_TRACE(&fence->finished,
+ "was already signaled\n");
}
-static const char *amd_sched_fence_get_driver_name(struct fence *fence)
+static const char *amd_sched_fence_get_driver_name(struct dma_fence *fence)
{
return "amd_sched";
}
-static const char *amd_sched_fence_get_timeline_name(struct fence *f)
+static const char *amd_sched_fence_get_timeline_name(struct dma_fence *f)
{
struct amd_sched_fence *fence = to_amd_sched_fence(f);
return (const char *)fence->sched->name;
}
-static bool amd_sched_fence_enable_signaling(struct fence *f)
+static bool amd_sched_fence_enable_signaling(struct dma_fence *f)
{
return true;
}
@@ -95,10 +99,10 @@ static bool amd_sched_fence_enable_signaling(struct fence *f)
*/
static void amd_sched_fence_free(struct rcu_head *rcu)
{
- struct fence *f = container_of(rcu, struct fence, rcu);
+ struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
struct amd_sched_fence *fence = to_amd_sched_fence(f);
- fence_put(fence->parent);
+ dma_fence_put(fence->parent);
kmem_cache_free(sched_fence_slab, fence);
}
@@ -110,7 +114,7 @@ static void amd_sched_fence_free(struct rcu_head *rcu)
* This function is called when the reference count becomes zero.
* It just RCU schedules freeing up the fence.
*/
-static void amd_sched_fence_release_scheduled(struct fence *f)
+static void amd_sched_fence_release_scheduled(struct dma_fence *f)
{
struct amd_sched_fence *fence = to_amd_sched_fence(f);
@@ -124,27 +128,27 @@ static void amd_sched_fence_release_scheduled(struct fence *f)
*
* Drop the extra reference from the scheduled fence to the base fence.
*/
-static void amd_sched_fence_release_finished(struct fence *f)
+static void amd_sched_fence_release_finished(struct dma_fence *f)
{
struct amd_sched_fence *fence = to_amd_sched_fence(f);
- fence_put(&fence->scheduled);
+ dma_fence_put(&fence->scheduled);
}
-const struct fence_ops amd_sched_fence_ops_scheduled = {
+const struct dma_fence_ops amd_sched_fence_ops_scheduled = {
.get_driver_name = amd_sched_fence_get_driver_name,
.get_timeline_name = amd_sched_fence_get_timeline_name,
.enable_signaling = amd_sched_fence_enable_signaling,
.signaled = NULL,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = amd_sched_fence_release_scheduled,
};
-const struct fence_ops amd_sched_fence_ops_finished = {
+const struct dma_fence_ops amd_sched_fence_ops_finished = {
.get_driver_name = amd_sched_fence_get_driver_name,
.get_timeline_name = amd_sched_fence_get_timeline_name,
.enable_signaling = amd_sched_fence_enable_signaling,
.signaled = NULL,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = amd_sched_fence_release_finished,
};
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index fb6a418ce6be..6477d1a65266 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -453,7 +453,8 @@ static int hdlcd_probe(struct platform_device *pdev)
return -EAGAIN;
}
- component_match_add(&pdev->dev, &match, compare_dev, port);
+ drm_of_component_match_add(&pdev->dev, &match, compare_dev, port);
+ of_node_put(port);
return component_master_add_with_match(&pdev->dev, &hdlcd_master_ops,
match);
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 9280358b8f15..9f4739452a25 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -493,7 +493,9 @@ static int malidp_platform_probe(struct platform_device *pdev)
return -EAGAIN;
}
- component_match_add(&pdev->dev, &match, malidp_compare_dev, port);
+ drm_of_component_match_add(&pdev->dev, &match, malidp_compare_dev,
+ port);
+ of_node_put(port);
return component_master_add_with_match(&pdev->dev, &malidp_master_ops,
match);
}
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 1e0e68f608e4..94e46da9a758 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -254,7 +254,7 @@ static void armada_add_endpoints(struct device *dev,
continue;
}
- component_match_add(dev, match, compare_of, remote);
+ drm_of_component_match_add(dev, match, compare_of, remote);
of_node_put(remote);
}
}
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 608df4c90520..7134fdf49210 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -230,6 +230,7 @@ struct ttm_bo_driver ast_bo_driver = {
.ttm_tt_populate = ast_ttm_tt_populate,
.ttm_tt_unpopulate = ast_ttm_tt_unpopulate,
.init_mem_type = ast_bo_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = ast_bo_evict_flags,
.move = NULL,
.verify_access = ast_bo_verify_access,
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 269cfca9ca06..099a3c688c26 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -199,6 +199,7 @@ struct ttm_bo_driver bochs_bo_driver = {
.ttm_tt_populate = ttm_pool_populate,
.ttm_tt_unpopulate = ttm_pool_unpopulate,
.init_mem_type = bochs_bo_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = bochs_bo_evict_flags,
.move = NULL,
.verify_access = bochs_bo_verify_access,
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 10e12e74fc9f..bd6acc829f97 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -57,6 +57,13 @@ config DRM_PARADE_PS8622
---help---
Parade eDP-LVDS bridge chip driver.
+config DRM_SIL_SII8620
+ tristate "Silicon Image SII8620 HDMI/MHL bridge"
+ depends on OF
+ select DRM_KMS_HELPER
+ help
+ Silicon Image SII8620 HDMI/MHL bridge chip driver.
+
config DRM_SII902X
tristate "Silicon Image sii902x RGB/HDMI bridge"
depends on OF
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index cdf3a3cf765d..97ed1a5fea9a 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o
obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o
obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
+obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o
obj-$(CONFIG_DRM_SII902X) += sii902x.o
obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
new file mode 100644
index 000000000000..b2c267df7ee7
--- /dev/null
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -0,0 +1,1564 @@
+/*
+ * Silicon Image SiI8620 HDMI/MHL bridge driver
+ *
+ * Copyright (C) 2015, Samsung Electronics Co., Ltd.
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <drm/bridge/mhl.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include "sil-sii8620.h"
+
+#define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3)
+
+enum sii8620_mode {
+ CM_DISCONNECTED,
+ CM_DISCOVERY,
+ CM_MHL1,
+ CM_MHL3,
+ CM_ECBUS_S
+};
+
+enum sii8620_sink_type {
+ SINK_NONE,
+ SINK_HDMI,
+ SINK_DVI
+};
+
+enum sii8620_mt_state {
+ MT_STATE_READY,
+ MT_STATE_BUSY,
+ MT_STATE_DONE
+};
+
+struct sii8620 {
+ struct drm_bridge bridge;
+ struct device *dev;
+ struct clk *clk_xtal;
+ struct gpio_desc *gpio_reset;
+ struct gpio_desc *gpio_int;
+ struct regulator_bulk_data supplies[2];
+ struct mutex lock; /* context lock, protects fields below */
+ int error;
+ enum sii8620_mode mode;
+ enum sii8620_sink_type sink_type;
+ u8 cbus_status;
+ u8 stat[MHL_DST_SIZE];
+ u8 xstat[MHL_XDS_SIZE];
+ u8 devcap[MHL_DCAP_SIZE];
+ u8 xdevcap[MHL_XDC_SIZE];
+ u8 avif[19];
+ struct edid *edid;
+ unsigned int gen2_write_burst:1;
+ enum sii8620_mt_state mt_state;
+ struct list_head mt_queue;
+};
+
+struct sii8620_mt_msg;
+
+typedef void (*sii8620_mt_msg_cb)(struct sii8620 *ctx,
+ struct sii8620_mt_msg *msg);
+
+struct sii8620_mt_msg {
+ struct list_head node;
+ u8 reg[4];
+ u8 ret;
+ sii8620_mt_msg_cb send;
+ sii8620_mt_msg_cb recv;
+};
+
+static const u8 sii8620_i2c_page[] = {
+ 0x39, /* Main System */
+ 0x3d, /* TDM and HSIC */
+ 0x49, /* TMDS Receiver, MHL EDID */
+ 0x4d, /* eMSC, HDCP, HSIC */
+ 0x5d, /* MHL Spec */
+ 0x64, /* MHL CBUS */
+ 0x59, /* Hardware TPI (Transmitter Programming Interface) */
+ 0x61, /* eCBUS-S, eCBUS-D */
+};
+
+static void sii8620_fetch_edid(struct sii8620 *ctx);
+static void sii8620_set_upstream_edid(struct sii8620 *ctx);
+static void sii8620_enable_hpd(struct sii8620 *ctx);
+static void sii8620_mhl_disconnected(struct sii8620 *ctx);
+
+static int sii8620_clear_error(struct sii8620 *ctx)
+{
+ int ret = ctx->error;
+
+ ctx->error = 0;
+ return ret;
+}
+
+static void sii8620_read_buf(struct sii8620 *ctx, u16 addr, u8 *buf, int len)
+{
+ struct device *dev = ctx->dev;
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 data = addr;
+ struct i2c_msg msg[] = {
+ {
+ .addr = sii8620_i2c_page[addr >> 8],
+ .flags = client->flags,
+ .len = 1,
+ .buf = &data
+ },
+ {
+ .addr = sii8620_i2c_page[addr >> 8],
+ .flags = client->flags | I2C_M_RD,
+ .len = len,
+ .buf = buf
+ },
+ };
+ int ret;
+
+ if (ctx->error)
+ return;
+
+ ret = i2c_transfer(client->adapter, msg, 2);
+ dev_dbg(dev, "read at %04x: %*ph, %d\n", addr, len, buf, ret);
+
+ if (ret != 2) {
+ dev_err(dev, "Read at %#06x of %d bytes failed with code %d.\n",
+ addr, len, ret);
+ ctx->error = ret < 0 ? ret : -EIO;
+ }
+}
+
+static u8 sii8620_readb(struct sii8620 *ctx, u16 addr)
+{
+ u8 ret;
+
+ sii8620_read_buf(ctx, addr, &ret, 1);
+ return ret;
+}
+
+static void sii8620_write_buf(struct sii8620 *ctx, u16 addr, const u8 *buf,
+ int len)
+{
+ struct device *dev = ctx->dev;
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 data[2];
+ struct i2c_msg msg = {
+ .addr = sii8620_i2c_page[addr >> 8],
+ .flags = client->flags,
+ .len = len + 1,
+ };
+ int ret;
+
+ if (ctx->error)
+ return;
+
+ if (len > 1) {
+ msg.buf = kmalloc(len + 1, GFP_KERNEL);
+ if (!msg.buf) {
+ ctx->error = -ENOMEM;
+ return;
+ }
+ memcpy(msg.buf + 1, buf, len);
+ } else {
+ msg.buf = data;
+ msg.buf[1] = *buf;
+ }
+
+ msg.buf[0] = addr;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ dev_dbg(dev, "write at %04x: %*ph, %d\n", addr, len, buf, ret);
+
+ if (ret != 1) {
+ dev_err(dev, "Write at %#06x of %*ph failed with code %d.\n",
+ addr, len, buf, ret);
+ ctx->error = ret ?: -EIO;
+ }
+
+ if (len > 1)
+ kfree(msg.buf);
+}
+
+#define sii8620_write(ctx, addr, arr...) \
+({\
+ u8 d[] = { arr }; \
+ sii8620_write_buf(ctx, addr, d, ARRAY_SIZE(d)); \
+})
+
+static void __sii8620_write_seq(struct sii8620 *ctx, const u16 *seq, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i += 2)
+ sii8620_write(ctx, seq[i], seq[i + 1]);
+}
+
+#define sii8620_write_seq(ctx, seq...) \
+({\
+ const u16 d[] = { seq }; \
+ __sii8620_write_seq(ctx, d, ARRAY_SIZE(d)); \
+})
+
+#define sii8620_write_seq_static(ctx, seq...) \
+({\
+ static const u16 d[] = { seq }; \
+ __sii8620_write_seq(ctx, d, ARRAY_SIZE(d)); \
+})
+
+static void sii8620_setbits(struct sii8620 *ctx, u16 addr, u8 mask, u8 val)
+{
+ val = (val & mask) | (sii8620_readb(ctx, addr) & ~mask);
+ sii8620_write(ctx, addr, val);
+}
+
+static void sii8620_mt_cleanup(struct sii8620 *ctx)
+{
+ struct sii8620_mt_msg *msg, *n;
+
+ list_for_each_entry_safe(msg, n, &ctx->mt_queue, node) {
+ list_del(&msg->node);
+ kfree(msg);
+ }
+ ctx->mt_state = MT_STATE_READY;
+}
+
+static void sii8620_mt_work(struct sii8620 *ctx)
+{
+ struct sii8620_mt_msg *msg;
+
+ if (ctx->error)
+ return;
+ if (ctx->mt_state == MT_STATE_BUSY || list_empty(&ctx->mt_queue))
+ return;
+
+ if (ctx->mt_state == MT_STATE_DONE) {
+ ctx->mt_state = MT_STATE_READY;
+ msg = list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg,
+ node);
+ if (msg->recv)
+ msg->recv(ctx, msg);
+ list_del(&msg->node);
+ kfree(msg);
+ }
+
+ if (ctx->mt_state != MT_STATE_READY || list_empty(&ctx->mt_queue))
+ return;
+
+ ctx->mt_state = MT_STATE_BUSY;
+ msg = list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, node);
+ if (msg->send)
+ msg->send(ctx, msg);
+}
+
+static void sii8620_mt_msc_cmd_send(struct sii8620 *ctx,
+ struct sii8620_mt_msg *msg)
+{
+ switch (msg->reg[0]) {
+ case MHL_WRITE_STAT:
+ case MHL_SET_INT:
+ sii8620_write_buf(ctx, REG_MSC_CMD_OR_OFFSET, msg->reg + 1, 2);
+ sii8620_write(ctx, REG_MSC_COMMAND_START,
+ BIT_MSC_COMMAND_START_WRITE_STAT);
+ break;
+ case MHL_MSC_MSG:
+ sii8620_write_buf(ctx, REG_MSC_CMD_OR_OFFSET, msg->reg, 3);
+ sii8620_write(ctx, REG_MSC_COMMAND_START,
+ BIT_MSC_COMMAND_START_MSC_MSG);
+ break;
+ default:
+ dev_err(ctx->dev, "%s: command %#x not supported\n", __func__,
+ msg->reg[0]);
+ }
+}
+
+static struct sii8620_mt_msg *sii8620_mt_msg_new(struct sii8620 *ctx)
+{
+ struct sii8620_mt_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+
+ if (!msg)
+ ctx->error = -ENOMEM;
+ else
+ list_add_tail(&msg->node, &ctx->mt_queue);
+
+ return msg;
+}
+
+static void sii8620_mt_msc_cmd(struct sii8620 *ctx, u8 cmd, u8 arg1, u8 arg2)
+{
+ struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx);
+
+ if (!msg)
+ return;
+
+ msg->reg[0] = cmd;
+ msg->reg[1] = arg1;
+ msg->reg[2] = arg2;
+ msg->send = sii8620_mt_msc_cmd_send;
+}
+
+static void sii8620_mt_write_stat(struct sii8620 *ctx, u8 reg, u8 val)
+{
+ sii8620_mt_msc_cmd(ctx, MHL_WRITE_STAT, reg, val);
+}
+
+static inline void sii8620_mt_set_int(struct sii8620 *ctx, u8 irq, u8 mask)
+{
+ sii8620_mt_msc_cmd(ctx, MHL_SET_INT, irq, mask);
+}
+
+static void sii8620_mt_msc_msg(struct sii8620 *ctx, u8 cmd, u8 data)
+{
+ sii8620_mt_msc_cmd(ctx, MHL_MSC_MSG, cmd, data);
+}
+
+static void sii8620_mt_rap(struct sii8620 *ctx, u8 code)
+{
+ sii8620_mt_msc_msg(ctx, MHL_MSC_MSG_RAP, code);
+}
+
+static void sii8620_mt_read_devcap_send(struct sii8620 *ctx,
+ struct sii8620_mt_msg *msg)
+{
+ u8 ctrl = BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP
+ | BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO
+ | BIT_EDID_CTRL_EDID_MODE_EN;
+
+ if (msg->reg[0] == MHL_READ_XDEVCAP)
+ ctrl |= BIT_EDID_CTRL_XDEVCAP_EN;
+
+ sii8620_write_seq(ctx,
+ REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE,
+ REG_EDID_CTRL, ctrl,
+ REG_TPI_CBUS_START, BIT_TPI_CBUS_START_GET_DEVCAP_START
+ );
+}
+
+/* copy src to dst and set changed bits in src */
+static void sii8620_update_array(u8 *dst, u8 *src, int count)
+{
+ while (--count >= 0) {
+ *src ^= *dst;
+ *dst++ ^= *src++;
+ }
+}
+
+static void sii8620_mr_devcap(struct sii8620 *ctx)
+{
+ static const char * const sink_str[] = {
+ [SINK_NONE] = "NONE",
+ [SINK_HDMI] = "HDMI",
+ [SINK_DVI] = "DVI"
+ };
+
+ u8 dcap[MHL_DCAP_SIZE];
+ char sink_name[20];
+ struct device *dev = ctx->dev;
+
+ sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, dcap, MHL_DCAP_SIZE);
+ if (ctx->error < 0)
+ return;
+
+ dev_info(dev, "dcap: %*ph\n", MHL_DCAP_SIZE, dcap);
+ dev_info(dev, "detected dongle MHL %d.%d, ChipID %02x%02x:%02x%02x\n",
+ dcap[MHL_DCAP_MHL_VERSION] / 16,
+ dcap[MHL_DCAP_MHL_VERSION] % 16, dcap[MHL_DCAP_ADOPTER_ID_H],
+ dcap[MHL_DCAP_ADOPTER_ID_L], dcap[MHL_DCAP_DEVICE_ID_H],
+ dcap[MHL_DCAP_DEVICE_ID_L]);
+ sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE);
+
+ if (!(dcap[MHL_DCAP_CAT] & MHL_DCAP_CAT_SINK))
+ return;
+
+ sii8620_fetch_edid(ctx);
+ if (!ctx->edid) {
+ dev_err(ctx->dev, "Cannot fetch EDID\n");
+ sii8620_mhl_disconnected(ctx);
+ return;
+ }
+
+ if (drm_detect_hdmi_monitor(ctx->edid))
+ ctx->sink_type = SINK_HDMI;
+ else
+ ctx->sink_type = SINK_DVI;
+
+ drm_edid_get_monitor_name(ctx->edid, sink_name, ARRAY_SIZE(sink_name));
+
+ dev_info(dev, "detected sink(type: %s): %s\n",
+ sink_str[ctx->sink_type], sink_name);
+ sii8620_set_upstream_edid(ctx);
+ sii8620_enable_hpd(ctx);
+}
+
+static void sii8620_mr_xdevcap(struct sii8620 *ctx)
+{
+ sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, ctx->xdevcap,
+ MHL_XDC_SIZE);
+
+ sii8620_mt_write_stat(ctx, MHL_XDS_REG(CURR_ECBUS_MODE),
+ MHL_XDS_ECBUS_S | MHL_XDS_SLOT_MODE_8BIT);
+ sii8620_mt_rap(ctx, MHL_RAP_CBUS_MODE_UP);
+}
+
+static void sii8620_mt_read_devcap_recv(struct sii8620 *ctx,
+ struct sii8620_mt_msg *msg)
+{
+ u8 ctrl = BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP
+ | BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO
+ | BIT_EDID_CTRL_EDID_MODE_EN;
+
+ if (msg->reg[0] == MHL_READ_XDEVCAP)
+ ctrl |= BIT_EDID_CTRL_XDEVCAP_EN;
+
+ sii8620_write_seq(ctx,
+ REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE | BIT_INTR9_EDID_DONE
+ | BIT_INTR9_EDID_ERROR,
+ REG_EDID_CTRL, ctrl,
+ REG_EDID_FIFO_ADDR, 0
+ );
+
+ if (msg->reg[0] == MHL_READ_XDEVCAP)
+ sii8620_mr_xdevcap(ctx);
+ else
+ sii8620_mr_devcap(ctx);
+}
+
+static void sii8620_mt_read_devcap(struct sii8620 *ctx, bool xdevcap)
+{
+ struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx);
+
+ if (!msg)
+ return;
+
+ msg->reg[0] = xdevcap ? MHL_READ_XDEVCAP : MHL_READ_DEVCAP;
+ msg->send = sii8620_mt_read_devcap_send;
+ msg->recv = sii8620_mt_read_devcap_recv;
+}
+
+static void sii8620_fetch_edid(struct sii8620 *ctx)
+{
+ u8 lm_ddc, ddc_cmd, int3, cbus;
+ int fetched, i;
+ int edid_len = EDID_LENGTH;
+ u8 *edid;
+
+ sii8620_readb(ctx, REG_CBUS_STATUS);
+ lm_ddc = sii8620_readb(ctx, REG_LM_DDC);
+ ddc_cmd = sii8620_readb(ctx, REG_DDC_CMD);
+
+ sii8620_write_seq(ctx,
+ REG_INTR9_MASK, 0,
+ REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO,
+ REG_HDCP2X_POLL_CS, 0x71,
+ REG_HDCP2X_CTRL_0, BIT_HDCP2X_CTRL_0_HDCP2X_HDCPTX,
+ REG_LM_DDC, lm_ddc | BIT_LM_DDC_SW_TPI_EN_DISABLED,
+ );
+
+ for (i = 0; i < 256; ++i) {
+ u8 ddc_stat = sii8620_readb(ctx, REG_DDC_STATUS);
+
+ if (!(ddc_stat & BIT_DDC_STATUS_DDC_I2C_IN_PROG))
+ break;
+ sii8620_write(ctx, REG_DDC_STATUS,
+ BIT_DDC_STATUS_DDC_FIFO_EMPTY);
+ }
+
+ sii8620_write(ctx, REG_DDC_ADDR, 0x50 << 1);
+
+ edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ if (!edid) {
+ ctx->error = -ENOMEM;
+ return;
+ }
+
+#define FETCH_SIZE 16
+ for (fetched = 0; fetched < edid_len; fetched += FETCH_SIZE) {
+ sii8620_readb(ctx, REG_DDC_STATUS);
+ sii8620_write_seq(ctx,
+ REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_DDC_CMD_ABORT,
+ REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_DDC_CMD_CLEAR_FIFO,
+ REG_DDC_STATUS, BIT_DDC_STATUS_DDC_FIFO_EMPTY
+ );
+ sii8620_write_seq(ctx,
+ REG_DDC_SEGM, fetched >> 8,
+ REG_DDC_OFFSET, fetched & 0xff,
+ REG_DDC_DIN_CNT1, FETCH_SIZE,
+ REG_DDC_DIN_CNT2, 0,
+ REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK
+ );
+
+ do {
+ int3 = sii8620_readb(ctx, REG_INTR3);
+ cbus = sii8620_readb(ctx, REG_CBUS_STATUS);
+
+ if (int3 & BIT_DDC_CMD_DONE)
+ break;
+
+ if (!(cbus & BIT_CBUS_STATUS_CBUS_CONNECTED)) {
+ kfree(edid);
+ edid = NULL;
+ goto end;
+ }
+ } while (1);
+
+ sii8620_readb(ctx, REG_DDC_STATUS);
+ while (sii8620_readb(ctx, REG_DDC_DOUT_CNT) < FETCH_SIZE)
+ usleep_range(10, 20);
+
+ sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE);
+ if (fetched + FETCH_SIZE == EDID_LENGTH) {
+ u8 ext = ((struct edid *)edid)->extensions;
+
+ if (ext) {
+ u8 *new_edid;
+
+ edid_len += ext * EDID_LENGTH;
+ new_edid = krealloc(edid, edid_len, GFP_KERNEL);
+ if (!new_edid) {
+ kfree(edid);
+ ctx->error = -ENOMEM;
+ return;
+ }
+ edid = new_edid;
+ }
+ }
+
+ if (fetched + FETCH_SIZE == edid_len)
+ sii8620_write(ctx, REG_INTR3, int3);
+ }
+
+ sii8620_write(ctx, REG_LM_DDC, lm_ddc);
+
+end:
+ kfree(ctx->edid);
+ ctx->edid = (struct edid *)edid;
+}
+
+static void sii8620_set_upstream_edid(struct sii8620 *ctx)
+{
+ sii8620_setbits(ctx, REG_DPD, BIT_DPD_PDNRX12 | BIT_DPD_PDIDCK_N
+ | BIT_DPD_PD_MHL_CLK_N, 0xff);
+
+ sii8620_write_seq_static(ctx,
+ REG_RX_HDMI_CTRL3, 0x00,
+ REG_PKT_FILTER_0, 0xFF,
+ REG_PKT_FILTER_1, 0xFF,
+ REG_ALICE0_BW_I2C, 0x06
+ );
+
+ sii8620_setbits(ctx, REG_RX_HDMI_CLR_BUFFER,
+ BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_EN, 0xff);
+
+ sii8620_write_seq_static(ctx,
+ REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO
+ | BIT_EDID_CTRL_EDID_MODE_EN,
+ REG_EDID_FIFO_ADDR, 0,
+ );
+
+ sii8620_write_buf(ctx, REG_EDID_FIFO_WR_DATA, (u8 *)ctx->edid,
+ (ctx->edid->extensions + 1) * EDID_LENGTH);
+
+ sii8620_write_seq_static(ctx,
+ REG_EDID_CTRL, BIT_EDID_CTRL_EDID_PRIME_VALID
+ | BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO
+ | BIT_EDID_CTRL_EDID_MODE_EN,
+ REG_INTR5_MASK, BIT_INTR_SCDT_CHANGE,
+ REG_INTR9_MASK, 0
+ );
+}
+
+static void sii8620_xtal_set_rate(struct sii8620 *ctx)
+{
+ static const struct {
+ unsigned int rate;
+ u8 div;
+ u8 tp1;
+ } rates[] = {
+ { 19200, 0x04, 0x53 },
+ { 20000, 0x04, 0x62 },
+ { 24000, 0x05, 0x75 },
+ { 30000, 0x06, 0x92 },
+ { 38400, 0x0c, 0xbc },
+ };
+ unsigned long rate = clk_get_rate(ctx->clk_xtal) / 1000;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rates) - 1; ++i)
+ if (rate <= rates[i].rate)
+ break;
+
+ if (rate != rates[i].rate)
+ dev_err(ctx->dev, "xtal clock rate(%lukHz) not supported, setting MHL for %ukHz.\n",
+ rate, rates[i].rate);
+
+ sii8620_write(ctx, REG_DIV_CTL_MAIN, rates[i].div);
+ sii8620_write(ctx, REG_HDCP2X_TP1, rates[i].tp1);
+}
+
+static int sii8620_hw_on(struct sii8620 *ctx)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret)
+ return ret;
+ usleep_range(10000, 20000);
+ return clk_prepare_enable(ctx->clk_xtal);
+}
+
+static int sii8620_hw_off(struct sii8620 *ctx)
+{
+ clk_disable_unprepare(ctx->clk_xtal);
+ gpiod_set_value(ctx->gpio_reset, 1);
+ return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static void sii8620_hw_reset(struct sii8620 *ctx)
+{
+ usleep_range(10000, 20000);
+ gpiod_set_value(ctx->gpio_reset, 0);
+ usleep_range(5000, 20000);
+ gpiod_set_value(ctx->gpio_reset, 1);
+ usleep_range(10000, 20000);
+ gpiod_set_value(ctx->gpio_reset, 0);
+ msleep(300);
+}
+
+static void sii8620_cbus_reset(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST
+ | BIT_PWD_SRST_CBUS_RST_SW_EN,
+ REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST_SW_EN
+ );
+}
+
+static void sii8620_set_auto_zone(struct sii8620 *ctx)
+{
+ if (ctx->mode != CM_MHL1) {
+ sii8620_write_seq_static(ctx,
+ REG_TX_ZONE_CTL1, 0x0,
+ REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X
+ | BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL
+ | BIT_MHL_PLL_CTL0_ZONE_MASK_OE
+ );
+ } else {
+ sii8620_write_seq_static(ctx,
+ REG_TX_ZONE_CTL1, VAL_TX_ZONE_CTL1_TX_ZONE_CTRL_MODE,
+ REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X
+ | BIT_MHL_PLL_CTL0_ZONE_MASK_OE
+ );
+ }
+}
+
+static void sii8620_stop_video(struct sii8620 *ctx)
+{
+ u8 uninitialized_var(val);
+
+ sii8620_write_seq_static(ctx,
+ REG_TPI_INTR_EN, 0,
+ REG_HDCP2X_INTR0_MASK, 0,
+ REG_TPI_COPP_DATA2, 0,
+ REG_TPI_INTR_ST0, ~0,
+ );
+
+ switch (ctx->sink_type) {
+ case SINK_DVI:
+ val = BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN
+ | BIT_TPI_SC_TPI_AV_MUTE;
+ break;
+ case SINK_HDMI:
+ val = BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN
+ | BIT_TPI_SC_TPI_AV_MUTE
+ | BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI;
+ break;
+ default:
+ return;
+ }
+
+ sii8620_write(ctx, REG_TPI_SC, val);
+}
+
+static void sii8620_start_hdmi(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL
+ | BIT_RX_HDMI_CTRL2_USE_AV_MUTE,
+ REG_VID_OVRRD, BIT_VID_OVRRD_PP_AUTO_DISABLE
+ | BIT_VID_OVRRD_M1080P_OVRRD,
+ REG_VID_MODE, 0,
+ REG_MHL_TOP_CTL, 0x1,
+ REG_MHLTX_CTL6, 0xa0,
+ REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL),
+ REG_TPI_OUTPUT, VAL_TPI_FORMAT(RGB, FULL),
+ );
+
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+ MHL_DST_LM_CLK_MODE_NORMAL |
+ MHL_DST_LM_PATH_ENABLED);
+
+ sii8620_set_auto_zone(ctx);
+
+ sii8620_write(ctx, REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI);
+
+ sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif,
+ ARRAY_SIZE(ctx->avif));
+
+ sii8620_write(ctx, REG_PKT_FILTER_0, 0xa1, 0x2);
+}
+
+static void sii8620_start_video(struct sii8620 *ctx)
+{
+ if (ctx->mode < CM_MHL3)
+ sii8620_stop_video(ctx);
+
+ switch (ctx->sink_type) {
+ case SINK_HDMI:
+ sii8620_start_hdmi(ctx);
+ break;
+ case SINK_DVI:
+ default:
+ break;
+ }
+}
+
+static void sii8620_disable_hpd(struct sii8620 *ctx)
+{
+ sii8620_setbits(ctx, REG_EDID_CTRL, BIT_EDID_CTRL_EDID_PRIME_VALID, 0);
+ sii8620_write_seq_static(ctx,
+ REG_HPD_CTRL, BIT_HPD_CTRL_HPD_OUT_OVR_EN,
+ REG_INTR8_MASK, 0
+ );
+}
+
+static void sii8620_enable_hpd(struct sii8620 *ctx)
+{
+ sii8620_setbits(ctx, REG_TMDS_CSTAT_P3,
+ BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS
+ | BIT_TMDS_CSTAT_P3_CLR_AVI, ~0);
+ sii8620_write_seq_static(ctx,
+ REG_HPD_CTRL, BIT_HPD_CTRL_HPD_OUT_OVR_EN
+ | BIT_HPD_CTRL_HPD_HIGH,
+ );
+}
+
+static void sii8620_enable_gen2_write_burst(struct sii8620 *ctx)
+{
+ if (ctx->gen2_write_burst)
+ return;
+
+ sii8620_write_seq_static(ctx,
+ REG_MDT_RCV_TIMEOUT, 100,
+ REG_MDT_RCV_CTRL, BIT_MDT_RCV_CTRL_MDT_RCV_EN
+ );
+ ctx->gen2_write_burst = 1;
+}
+
+static void sii8620_disable_gen2_write_burst(struct sii8620 *ctx)
+{
+ if (!ctx->gen2_write_burst)
+ return;
+
+ sii8620_write_seq_static(ctx,
+ REG_MDT_XMIT_CTRL, 0,
+ REG_MDT_RCV_CTRL, 0
+ );
+ ctx->gen2_write_burst = 0;
+}
+
+static void sii8620_start_gen2_write_burst(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_MDT_INT_1_MASK, BIT_MDT_RCV_TIMEOUT
+ | BIT_MDT_RCV_SM_ABORT_PKT_RCVD | BIT_MDT_RCV_SM_ERROR
+ | BIT_MDT_XMIT_TIMEOUT | BIT_MDT_XMIT_SM_ABORT_PKT_RCVD
+ | BIT_MDT_XMIT_SM_ERROR,
+ REG_MDT_INT_0_MASK, BIT_MDT_XFIFO_EMPTY
+ | BIT_MDT_IDLE_AFTER_HAWB_DISABLE
+ | BIT_MDT_RFIFO_DATA_RDY
+ );
+ sii8620_enable_gen2_write_burst(ctx);
+}
+
+static void sii8620_mhl_discover(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT
+ | BIT_DISC_CTRL9_DISC_PULSE_PROCEED,
+ REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_5K, VAL_PUP_20K),
+ REG_CBUS_DISC_INTR0_MASK, BIT_MHL3_EST_INT
+ | BIT_MHL_EST_INT
+ | BIT_NOT_MHL_EST_INT
+ | BIT_CBUS_MHL3_DISCON_INT
+ | BIT_CBUS_MHL12_DISCON_INT
+ | BIT_RGND_READY_INT,
+ REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X
+ | BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL
+ | BIT_MHL_PLL_CTL0_ZONE_MASK_OE,
+ REG_MHL_DP_CTL0, BIT_MHL_DP_CTL0_DP_OE
+ | BIT_MHL_DP_CTL0_TX_OE_OVR,
+ REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
+ REG_MHL_DP_CTL1, 0xA2,
+ REG_MHL_DP_CTL2, 0x03,
+ REG_MHL_DP_CTL3, 0x35,
+ REG_MHL_DP_CTL5, 0x02,
+ REG_MHL_DP_CTL6, 0x02,
+ REG_MHL_DP_CTL7, 0x03,
+ REG_COC_CTLC, 0xFF,
+ REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12
+ | BIT_DPD_OSC_EN | BIT_DPD_PWRON_HSIC,
+ REG_COC_INTR_MASK, BIT_COC_PLL_LOCK_STATUS_CHANGE
+ | BIT_COC_CALIBRATION_DONE,
+ REG_CBUS_INT_1_MASK, BIT_CBUS_MSC_ABORT_RCVD
+ | BIT_CBUS_CMD_ABORT,
+ REG_CBUS_INT_0_MASK, BIT_CBUS_MSC_MT_DONE
+ | BIT_CBUS_HPD_CHG
+ | BIT_CBUS_MSC_MR_WRITE_STAT
+ | BIT_CBUS_MSC_MR_MSC_MSG
+ | BIT_CBUS_MSC_MR_WRITE_BURST
+ | BIT_CBUS_MSC_MR_SET_INT
+ | BIT_CBUS_MSC_MT_DONE_NACK
+ );
+}
+
+static void sii8620_peer_specific_init(struct sii8620 *ctx)
+{
+ if (ctx->mode == CM_MHL3)
+ sii8620_write_seq_static(ctx,
+ REG_SYS_CTRL1, BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD,
+ REG_EMSCINTRMASK1,
+ BIT_EMSCINTR1_EMSC_TRAINING_COMMA_ERR
+ );
+ else
+ sii8620_write_seq_static(ctx,
+ REG_HDCP2X_INTR0_MASK, 0x00,
+ REG_EMSCINTRMASK1, 0x00,
+ REG_HDCP2X_INTR0, 0xFF,
+ REG_INTR1, 0xFF,
+ REG_SYS_CTRL1, BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD
+ | BIT_SYS_CTRL1_TX_CTRL_HDMI
+ );
+}
+
+#define SII8620_MHL_VERSION 0x32
+#define SII8620_SCRATCHPAD_SIZE 16
+#define SII8620_INT_STAT_SIZE 0x33
+
+static void sii8620_set_dev_cap(struct sii8620 *ctx)
+{
+ static const u8 devcap[MHL_DCAP_SIZE] = {
+ [MHL_DCAP_MHL_VERSION] = SII8620_MHL_VERSION,
+ [MHL_DCAP_CAT] = MHL_DCAP_CAT_SOURCE | MHL_DCAP_CAT_POWER,
+ [MHL_DCAP_ADOPTER_ID_H] = 0x01,
+ [MHL_DCAP_ADOPTER_ID_L] = 0x41,
+ [MHL_DCAP_VID_LINK_MODE] = MHL_DCAP_VID_LINK_RGB444
+ | MHL_DCAP_VID_LINK_PPIXEL
+ | MHL_DCAP_VID_LINK_16BPP,
+ [MHL_DCAP_AUD_LINK_MODE] = MHL_DCAP_AUD_LINK_2CH,
+ [MHL_DCAP_VIDEO_TYPE] = MHL_DCAP_VT_GRAPHICS,
+ [MHL_DCAP_LOG_DEV_MAP] = MHL_DCAP_LD_GUI,
+ [MHL_DCAP_BANDWIDTH] = 0x0f,
+ [MHL_DCAP_FEATURE_FLAG] = MHL_DCAP_FEATURE_RCP_SUPPORT
+ | MHL_DCAP_FEATURE_RAP_SUPPORT
+ | MHL_DCAP_FEATURE_SP_SUPPORT,
+ [MHL_DCAP_SCRATCHPAD_SIZE] = SII8620_SCRATCHPAD_SIZE,
+ [MHL_DCAP_INT_STAT_SIZE] = SII8620_INT_STAT_SIZE,
+ };
+ static const u8 xdcap[MHL_XDC_SIZE] = {
+ [MHL_XDC_ECBUS_SPEEDS] = MHL_XDC_ECBUS_S_075
+ | MHL_XDC_ECBUS_S_8BIT,
+ [MHL_XDC_TMDS_SPEEDS] = MHL_XDC_TMDS_150
+ | MHL_XDC_TMDS_300 | MHL_XDC_TMDS_600,
+ [MHL_XDC_ECBUS_ROLES] = MHL_XDC_DEV_HOST,
+ [MHL_XDC_LOG_DEV_MAPX] = MHL_XDC_LD_PHONE,
+ };
+
+ sii8620_write_buf(ctx, REG_MHL_DEVCAP_0, devcap, ARRAY_SIZE(devcap));
+ sii8620_write_buf(ctx, REG_MHL_EXTDEVCAP_0, xdcap, ARRAY_SIZE(xdcap));
+}
+
+static void sii8620_mhl_init(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K),
+ REG_CBUS_MSC_COMPAT_CTRL,
+ BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN,
+ );
+
+ sii8620_peer_specific_init(ctx);
+
+ sii8620_disable_hpd(ctx);
+
+ sii8620_write_seq_static(ctx,
+ REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO,
+ REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT
+ | BIT_DISC_CTRL9_WAKE_PULSE_BYPASS,
+ REG_TMDS0_CCTRL1, 0x90,
+ REG_TMDS_CLK_EN, 0x01,
+ REG_TMDS_CH_EN, 0x11,
+ REG_BGR_BIAS, 0x87,
+ REG_ALICE0_ZONE_CTRL, 0xE8,
+ REG_ALICE0_MODE_CTRL, 0x04,
+ );
+ sii8620_setbits(ctx, REG_LM_DDC, BIT_LM_DDC_SW_TPI_EN_DISABLED, 0);
+ sii8620_write_seq_static(ctx,
+ REG_TPI_HW_OPT3, 0x76,
+ REG_TMDS_CCTRL, BIT_TMDS_CCTRL_TMDS_OE,
+ REG_TPI_DTD_B2, 79,
+ );
+ sii8620_set_dev_cap(ctx);
+ sii8620_write_seq_static(ctx,
+ REG_MDT_XMIT_TIMEOUT, 100,
+ REG_MDT_XMIT_CTRL, 0x03,
+ REG_MDT_XFIFO_STAT, 0x00,
+ REG_MDT_RCV_TIMEOUT, 100,
+ REG_CBUS_LINK_CTRL_8, 0x1D,
+ );
+
+ sii8620_start_gen2_write_burst(ctx);
+ sii8620_write_seq_static(ctx,
+ REG_BIST_CTRL, 0x00,
+ REG_COC_CTL1, 0x10,
+ REG_COC_CTL2, 0x18,
+ REG_COC_CTLF, 0x07,
+ REG_COC_CTL11, 0xF8,
+ REG_COC_CTL17, 0x61,
+ REG_COC_CTL18, 0x46,
+ REG_COC_CTL19, 0x15,
+ REG_COC_CTL1A, 0x01,
+ REG_MHL_COC_CTL3, BIT_MHL_COC_CTL3_COC_AECHO_EN,
+ REG_MHL_COC_CTL4, 0x2D,
+ REG_MHL_COC_CTL5, 0xF9,
+ REG_MSC_HEARTBEAT_CTRL, 0x27,
+ );
+ sii8620_disable_gen2_write_burst(ctx);
+
+ /* currently MHL3 is not supported, so we force version to 0 */
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(VERSION), 0);
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(CONNECTED_RDY),
+ MHL_DST_CONN_DCAP_RDY | MHL_DST_CONN_XDEVCAPP_SUPP
+ | MHL_DST_CONN_POW_STAT);
+ sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE), MHL_INT_RC_DCAP_CHG);
+}
+
+static void sii8620_set_mode(struct sii8620 *ctx, enum sii8620_mode mode)
+{
+ if (ctx->mode == mode)
+ return;
+
+ ctx->mode = mode;
+
+ switch (mode) {
+ case CM_MHL1:
+ sii8620_write_seq_static(ctx,
+ REG_CBUS_MSC_COMPAT_CTRL, 0x02,
+ REG_M3_CTRL, VAL_M3_CTRL_MHL1_2_VALUE,
+ REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12
+ | BIT_DPD_OSC_EN,
+ REG_COC_INTR_MASK, 0
+ );
+ break;
+ case CM_MHL3:
+ sii8620_write_seq_static(ctx,
+ REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
+ REG_COC_CTL0, 0x40,
+ REG_MHL_COC_CTL1, 0x07
+ );
+ break;
+ case CM_DISCONNECTED:
+ break;
+ default:
+ dev_err(ctx->dev, "%s mode %d not supported\n", __func__, mode);
+ break;
+ }
+
+ sii8620_set_auto_zone(ctx);
+
+ if (mode != CM_MHL1)
+ return;
+
+ sii8620_write_seq_static(ctx,
+ REG_MHL_DP_CTL0, 0xBC,
+ REG_MHL_DP_CTL1, 0xBB,
+ REG_MHL_DP_CTL3, 0x48,
+ REG_MHL_DP_CTL5, 0x39,
+ REG_MHL_DP_CTL2, 0x2A,
+ REG_MHL_DP_CTL6, 0x2A,
+ REG_MHL_DP_CTL7, 0x08
+ );
+}
+
+static void sii8620_disconnect(struct sii8620 *ctx)
+{
+ sii8620_disable_gen2_write_burst(ctx);
+ sii8620_stop_video(ctx);
+ msleep(50);
+ sii8620_cbus_reset(ctx);
+ sii8620_set_mode(ctx, CM_DISCONNECTED);
+ sii8620_write_seq_static(ctx,
+ REG_COC_CTL0, 0x40,
+ REG_CBUS3_CNVT, 0x84,
+ REG_COC_CTL14, 0x00,
+ REG_COC_CTL0, 0x40,
+ REG_HRXCTRL3, 0x07,
+ REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X
+ | BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL
+ | BIT_MHL_PLL_CTL0_ZONE_MASK_OE,
+ REG_MHL_DP_CTL0, BIT_MHL_DP_CTL0_DP_OE
+ | BIT_MHL_DP_CTL0_TX_OE_OVR,
+ REG_MHL_DP_CTL1, 0xBB,
+ REG_MHL_DP_CTL3, 0x48,
+ REG_MHL_DP_CTL5, 0x3F,
+ REG_MHL_DP_CTL2, 0x2F,
+ REG_MHL_DP_CTL6, 0x2A,
+ REG_MHL_DP_CTL7, 0x03
+ );
+ sii8620_disable_hpd(ctx);
+ sii8620_write_seq_static(ctx,
+ REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
+ REG_MHL_COC_CTL1, 0x07,
+ REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K),
+ REG_DISC_CTRL8, 0x00,
+ REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT
+ | BIT_DISC_CTRL9_WAKE_PULSE_BYPASS,
+ REG_INT_CTRL, 0x00,
+ REG_MSC_HEARTBEAT_CTRL, 0x27,
+ REG_DISC_CTRL1, 0x25,
+ REG_CBUS_DISC_INTR0, (u8)~BIT_RGND_READY_INT,
+ REG_CBUS_DISC_INTR0_MASK, BIT_RGND_READY_INT,
+ REG_MDT_INT_1, 0xff,
+ REG_MDT_INT_1_MASK, 0x00,
+ REG_MDT_INT_0, 0xff,
+ REG_MDT_INT_0_MASK, 0x00,
+ REG_COC_INTR, 0xff,
+ REG_COC_INTR_MASK, 0x00,
+ REG_TRXINTH, 0xff,
+ REG_TRXINTMH, 0x00,
+ REG_CBUS_INT_0, 0xff,
+ REG_CBUS_INT_0_MASK, 0x00,
+ REG_CBUS_INT_1, 0xff,
+ REG_CBUS_INT_1_MASK, 0x00,
+ REG_EMSCINTR, 0xff,
+ REG_EMSCINTRMASK, 0x00,
+ REG_EMSCINTR1, 0xff,
+ REG_EMSCINTRMASK1, 0x00,
+ REG_INTR8, 0xff,
+ REG_INTR8_MASK, 0x00,
+ REG_TPI_INTR_ST0, 0xff,
+ REG_TPI_INTR_EN, 0x00,
+ REG_HDCP2X_INTR0, 0xff,
+ REG_HDCP2X_INTR0_MASK, 0x00,
+ REG_INTR9, 0xff,
+ REG_INTR9_MASK, 0x00,
+ REG_INTR3, 0xff,
+ REG_INTR3_MASK, 0x00,
+ REG_INTR5, 0xff,
+ REG_INTR5_MASK, 0x00,
+ REG_INTR2, 0xff,
+ REG_INTR2_MASK, 0x00,
+ );
+ memset(ctx->stat, 0, sizeof(ctx->stat));
+ memset(ctx->xstat, 0, sizeof(ctx->xstat));
+ memset(ctx->devcap, 0, sizeof(ctx->devcap));
+ memset(ctx->xdevcap, 0, sizeof(ctx->xdevcap));
+ ctx->cbus_status = 0;
+ ctx->sink_type = SINK_NONE;
+ kfree(ctx->edid);
+ ctx->edid = NULL;
+ sii8620_mt_cleanup(ctx);
+}
+
+static void sii8620_mhl_disconnected(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K),
+ REG_CBUS_MSC_COMPAT_CTRL,
+ BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN
+ );
+ sii8620_disconnect(ctx);
+}
+
+static void sii8620_irq_disc(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_CBUS_DISC_INTR0);
+
+ if (stat & VAL_CBUS_MHL_DISCON)
+ sii8620_mhl_disconnected(ctx);
+
+ if (stat & BIT_RGND_READY_INT) {
+ u8 stat2 = sii8620_readb(ctx, REG_DISC_STAT2);
+
+ if ((stat2 & MSK_DISC_STAT2_RGND) == VAL_RGND_1K) {
+ sii8620_mhl_discover(ctx);
+ } else {
+ sii8620_write_seq_static(ctx,
+ REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT
+ | BIT_DISC_CTRL9_NOMHL_EST
+ | BIT_DISC_CTRL9_WAKE_PULSE_BYPASS,
+ REG_CBUS_DISC_INTR0_MASK, BIT_RGND_READY_INT
+ | BIT_CBUS_MHL3_DISCON_INT
+ | BIT_CBUS_MHL12_DISCON_INT
+ | BIT_NOT_MHL_EST_INT
+ );
+ }
+ }
+ if (stat & BIT_MHL_EST_INT)
+ sii8620_mhl_init(ctx);
+
+ sii8620_write(ctx, REG_CBUS_DISC_INTR0, stat);
+}
+
+static void sii8620_irq_g2wb(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_MDT_INT_0);
+
+ if (stat & BIT_MDT_IDLE_AFTER_HAWB_DISABLE)
+ dev_dbg(ctx->dev, "HAWB idle\n");
+
+ sii8620_write(ctx, REG_MDT_INT_0, stat);
+}
+
+static void sii8620_status_changed_dcap(struct sii8620 *ctx)
+{
+ if (ctx->stat[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY) {
+ sii8620_set_mode(ctx, CM_MHL1);
+ sii8620_peer_specific_init(ctx);
+ sii8620_write(ctx, REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE
+ | BIT_INTR9_EDID_DONE | BIT_INTR9_EDID_ERROR);
+ }
+}
+
+static void sii8620_status_changed_path(struct sii8620 *ctx)
+{
+ if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) {
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+ MHL_DST_LM_CLK_MODE_NORMAL
+ | MHL_DST_LM_PATH_ENABLED);
+ sii8620_mt_read_devcap(ctx, false);
+ } else {
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+ MHL_DST_LM_CLK_MODE_NORMAL);
+ }
+}
+
+static void sii8620_msc_mr_write_stat(struct sii8620 *ctx)
+{
+ u8 st[MHL_DST_SIZE], xst[MHL_XDS_SIZE];
+
+ sii8620_read_buf(ctx, REG_MHL_STAT_0, st, MHL_DST_SIZE);
+ sii8620_read_buf(ctx, REG_MHL_EXTSTAT_0, xst, MHL_XDS_SIZE);
+
+ sii8620_update_array(ctx->stat, st, MHL_DST_SIZE);
+ sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE);
+
+ if (st[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY)
+ sii8620_status_changed_dcap(ctx);
+
+ if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED)
+ sii8620_status_changed_path(ctx);
+}
+
+static void sii8620_msc_mr_set_int(struct sii8620 *ctx)
+{
+ u8 ints[MHL_INT_SIZE];
+
+ sii8620_read_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE);
+ sii8620_write_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE);
+}
+
+static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx)
+{
+ struct device *dev = ctx->dev;
+
+ if (list_empty(&ctx->mt_queue)) {
+ dev_err(dev, "unexpected MSC MT response\n");
+ return NULL;
+ }
+
+ return list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, node);
+}
+
+static void sii8620_msc_mt_done(struct sii8620 *ctx)
+{
+ struct sii8620_mt_msg *msg = sii8620_msc_msg_first(ctx);
+
+ if (!msg)
+ return;
+
+ msg->ret = sii8620_readb(ctx, REG_MSC_MT_RCVD_DATA0);
+ ctx->mt_state = MT_STATE_DONE;
+}
+
+static void sii8620_msc_mr_msc_msg(struct sii8620 *ctx)
+{
+ struct sii8620_mt_msg *msg = sii8620_msc_msg_first(ctx);
+ u8 buf[2];
+
+ if (!msg)
+ return;
+
+ sii8620_read_buf(ctx, REG_MSC_MR_MSC_MSG_RCVD_1ST_DATA, buf, 2);
+
+ switch (buf[0]) {
+ case MHL_MSC_MSG_RAPK:
+ msg->ret = buf[1];
+ ctx->mt_state = MT_STATE_DONE;
+ break;
+ default:
+ dev_err(ctx->dev, "%s message type %d,%d not supported",
+ __func__, buf[0], buf[1]);
+ }
+}
+
+static void sii8620_irq_msc(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_CBUS_INT_0);
+
+ if (stat & ~BIT_CBUS_HPD_CHG)
+ sii8620_write(ctx, REG_CBUS_INT_0, stat & ~BIT_CBUS_HPD_CHG);
+
+ if (stat & BIT_CBUS_HPD_CHG) {
+ u8 cbus_stat = sii8620_readb(ctx, REG_CBUS_STATUS);
+
+ if ((cbus_stat ^ ctx->cbus_status) & BIT_CBUS_STATUS_CBUS_HPD) {
+ sii8620_write(ctx, REG_CBUS_INT_0, BIT_CBUS_HPD_CHG);
+ } else {
+ stat ^= BIT_CBUS_STATUS_CBUS_HPD;
+ cbus_stat ^= BIT_CBUS_STATUS_CBUS_HPD;
+ }
+ ctx->cbus_status = cbus_stat;
+ }
+
+ if (stat & BIT_CBUS_MSC_MR_WRITE_STAT)
+ sii8620_msc_mr_write_stat(ctx);
+
+ if (stat & BIT_CBUS_MSC_MR_SET_INT)
+ sii8620_msc_mr_set_int(ctx);
+
+ if (stat & BIT_CBUS_MSC_MT_DONE)
+ sii8620_msc_mt_done(ctx);
+
+ if (stat & BIT_CBUS_MSC_MR_MSC_MSG)
+ sii8620_msc_mr_msc_msg(ctx);
+}
+
+static void sii8620_irq_coc(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_COC_INTR);
+
+ sii8620_write(ctx, REG_COC_INTR, stat);
+}
+
+static void sii8620_irq_merr(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_CBUS_INT_1);
+
+ sii8620_write(ctx, REG_CBUS_INT_1, stat);
+}
+
+static void sii8620_irq_edid(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_INTR9);
+
+ sii8620_write(ctx, REG_INTR9, stat);
+
+ if (stat & BIT_INTR9_DEVCAP_DONE)
+ ctx->mt_state = MT_STATE_DONE;
+}
+
+static void sii8620_scdt_high(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_INTR8_MASK, BIT_CEA_NEW_AVI | BIT_CEA_NEW_VSI,
+ REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI,
+ );
+}
+
+static void sii8620_scdt_low(struct sii8620 *ctx)
+{
+ sii8620_write(ctx, REG_TMDS_CSTAT_P3,
+ BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS |
+ BIT_TMDS_CSTAT_P3_CLR_AVI);
+
+ sii8620_stop_video(ctx);
+
+ sii8620_write(ctx, REG_INTR8_MASK, 0);
+}
+
+static void sii8620_irq_scdt(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_INTR5);
+
+ if (stat & BIT_INTR_SCDT_CHANGE) {
+ u8 cstat = sii8620_readb(ctx, REG_TMDS_CSTAT_P3);
+
+ if (cstat & BIT_TMDS_CSTAT_P3_SCDT)
+ sii8620_scdt_high(ctx);
+ else
+ sii8620_scdt_low(ctx);
+ }
+
+ sii8620_write(ctx, REG_INTR5, stat);
+}
+
+static void sii8620_new_vsi(struct sii8620 *ctx)
+{
+ u8 vsif[11];
+
+ sii8620_write(ctx, REG_RX_HDMI_CTRL2,
+ VAL_RX_HDMI_CTRL2_DEFVAL |
+ BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI);
+ sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, vsif,
+ ARRAY_SIZE(vsif));
+}
+
+static void sii8620_new_avi(struct sii8620 *ctx)
+{
+ sii8620_write(ctx, REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL);
+ sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, ctx->avif,
+ ARRAY_SIZE(ctx->avif));
+}
+
+static void sii8620_irq_infr(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_INTR8)
+ & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI);
+
+ sii8620_write(ctx, REG_INTR8, stat);
+
+ if (stat & BIT_CEA_NEW_VSI)
+ sii8620_new_vsi(ctx);
+
+ if (stat & BIT_CEA_NEW_AVI)
+ sii8620_new_avi(ctx);
+
+ if (stat & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI))
+ sii8620_start_video(ctx);
+}
+
+/* endian agnostic, non-volatile version of test_bit */
+static bool sii8620_test_bit(unsigned int nr, const u8 *addr)
+{
+ return 1 & (addr[nr / BITS_PER_BYTE] >> (nr % BITS_PER_BYTE));
+}
+
+static irqreturn_t sii8620_irq_thread(int irq, void *data)
+{
+ static const struct {
+ int bit;
+ void (*handler)(struct sii8620 *ctx);
+ } irq_vec[] = {
+ { BIT_FAST_INTR_STAT_DISC, sii8620_irq_disc },
+ { BIT_FAST_INTR_STAT_G2WB, sii8620_irq_g2wb },
+ { BIT_FAST_INTR_STAT_COC, sii8620_irq_coc },
+ { BIT_FAST_INTR_STAT_MSC, sii8620_irq_msc },
+ { BIT_FAST_INTR_STAT_MERR, sii8620_irq_merr },
+ { BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid },
+ { BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt },
+ { BIT_FAST_INTR_STAT_INFR, sii8620_irq_infr },
+ };
+ struct sii8620 *ctx = data;
+ u8 stats[LEN_FAST_INTR_STAT];
+ int i, ret;
+
+ mutex_lock(&ctx->lock);
+
+ sii8620_read_buf(ctx, REG_FAST_INTR_STAT, stats, ARRAY_SIZE(stats));
+ for (i = 0; i < ARRAY_SIZE(irq_vec); ++i)
+ if (sii8620_test_bit(irq_vec[i].bit, stats))
+ irq_vec[i].handler(ctx);
+
+ sii8620_mt_work(ctx);
+
+ ret = sii8620_clear_error(ctx);
+ if (ret) {
+ dev_err(ctx->dev, "Error during IRQ handling, %d.\n", ret);
+ sii8620_mhl_disconnected(ctx);
+ }
+ mutex_unlock(&ctx->lock);
+
+ return IRQ_HANDLED;
+}
+
+static void sii8620_cable_in(struct sii8620 *ctx)
+{
+ struct device *dev = ctx->dev;
+ u8 ver[5];
+ int ret;
+
+ ret = sii8620_hw_on(ctx);
+ if (ret) {
+ dev_err(dev, "Error powering on, %d.\n", ret);
+ return;
+ }
+ sii8620_hw_reset(ctx);
+
+ sii8620_read_buf(ctx, REG_VND_IDL, ver, ARRAY_SIZE(ver));
+ ret = sii8620_clear_error(ctx);
+ if (ret) {
+ dev_err(dev, "Error accessing I2C bus, %d.\n", ret);
+ return;
+ }
+
+ dev_info(dev, "ChipID %02x%02x:%02x%02x rev %02x.\n", ver[1], ver[0],
+ ver[3], ver[2], ver[4]);
+
+ sii8620_write(ctx, REG_DPD,
+ BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 | BIT_DPD_OSC_EN);
+
+ sii8620_xtal_set_rate(ctx);
+ sii8620_disconnect(ctx);
+
+ sii8620_write_seq_static(ctx,
+ REG_MHL_CBUS_CTL0, VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONG
+ | VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_734,
+ REG_MHL_CBUS_CTL1, VAL_MHL_CBUS_CTL1_1115_OHM,
+ REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 | BIT_DPD_OSC_EN,
+ );
+
+ ret = sii8620_clear_error(ctx);
+ if (ret) {
+ dev_err(dev, "Error accessing I2C bus, %d.\n", ret);
+ return;
+ }
+
+ enable_irq(to_i2c_client(ctx->dev)->irq);
+}
+
+static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct sii8620, bridge);
+}
+
+static bool sii8620_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct sii8620 *ctx = bridge_to_sii8620(bridge);
+ bool ret = false;
+ int max_clock = 74250;
+
+ mutex_lock(&ctx->lock);
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ goto out;
+
+ if (ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL)
+ max_clock = 300000;
+
+ ret = mode->clock <= max_clock;
+
+out:
+ mutex_unlock(&ctx->lock);
+
+ return ret;
+}
+
+static const struct drm_bridge_funcs sii8620_bridge_funcs = {
+ .mode_fixup = sii8620_mode_fixup,
+};
+
+static int sii8620_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct sii8620 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = dev;
+ mutex_init(&ctx->lock);
+ INIT_LIST_HEAD(&ctx->mt_queue);
+
+ ctx->clk_xtal = devm_clk_get(dev, "xtal");
+ if (IS_ERR(ctx->clk_xtal)) {
+ dev_err(dev, "failed to get xtal clock from DT\n");
+ return PTR_ERR(ctx->clk_xtal);
+ }
+
+ if (!client->irq) {
+ dev_err(dev, "no irq provided\n");
+ return -EINVAL;
+ }
+ irq_set_status_flags(client->irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ sii8620_irq_thread,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "sii8620", ctx);
+
+ ctx->gpio_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->gpio_reset)) {
+ dev_err(dev, "failed to get reset gpio from DT\n");
+ return PTR_ERR(ctx->gpio_reset);
+ }
+
+ ctx->supplies[0].supply = "cvcc10";
+ ctx->supplies[1].supply = "iovcc18";
+ ret = devm_regulator_bulk_get(dev, 2, ctx->supplies);
+ if (ret)
+ return ret;
+
+ i2c_set_clientdata(client, ctx);
+
+ ctx->bridge.funcs = &sii8620_bridge_funcs;
+ ctx->bridge.of_node = dev->of_node;
+ drm_bridge_add(&ctx->bridge);
+
+ sii8620_cable_in(ctx);
+
+ return 0;
+}
+
+static int sii8620_remove(struct i2c_client *client)
+{
+ struct sii8620 *ctx = i2c_get_clientdata(client);
+
+ disable_irq(to_i2c_client(ctx->dev)->irq);
+ drm_bridge_remove(&ctx->bridge);
+ sii8620_hw_off(ctx);
+
+ return 0;
+}
+
+static const struct of_device_id sii8620_dt_match[] = {
+ { .compatible = "sil,sii8620" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sii8620_dt_match);
+
+static const struct i2c_device_id sii8620_id[] = {
+ { "sii8620", 0 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, sii8620_id);
+static struct i2c_driver sii8620_driver = {
+ .driver = {
+ .name = "sii8620",
+ .of_match_table = of_match_ptr(sii8620_dt_match),
+ },
+ .probe = sii8620_probe,
+ .remove = sii8620_remove,
+ .id_table = sii8620_id,
+};
+
+module_i2c_driver(sii8620_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.h b/drivers/gpu/drm/bridge/sil-sii8620.h
new file mode 100644
index 000000000000..6ff616a4f6ce
--- /dev/null
+++ b/drivers/gpu/drm/bridge/sil-sii8620.h
@@ -0,0 +1,1517 @@
+/*
+ * Registers of Silicon Image SiI8620 Mobile HD Transmitter
+ *
+ * Copyright (C) 2015, Samsung Electronics Co., Ltd.
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * Based on MHL driver for Android devices.
+ * Copyright (C) 2013-2014 Silicon Image, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SIL_SII8620_H__
+#define __SIL_SII8620_H__
+
+/* Vendor ID Low byte, default value: 0x01 */
+#define REG_VND_IDL 0x0000
+
+/* Vendor ID High byte, default value: 0x00 */
+#define REG_VND_IDH 0x0001
+
+/* Device ID Low byte, default value: 0x60 */
+#define REG_DEV_IDL 0x0002
+
+/* Device ID High byte, default value: 0x86 */
+#define REG_DEV_IDH 0x0003
+
+/* Device Revision, default value: 0x10 */
+#define REG_DEV_REV 0x0004
+
+/* OTP DBYTE510, default value: 0x00 */
+#define REG_OTP_DBYTE510 0x0006
+
+/* System Control #1, default value: 0x00 */
+#define REG_SYS_CTRL1 0x0008
+#define BIT_SYS_CTRL1_OTPVMUTEOVR_SET BIT(7)
+#define BIT_SYS_CTRL1_VSYNCPIN BIT(6)
+#define BIT_SYS_CTRL1_OTPADROPOVR_SET BIT(5)
+#define BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD BIT(4)
+#define BIT_SYS_CTRL1_OTP2XVOVR_EN BIT(3)
+#define BIT_SYS_CTRL1_OTP2XAOVR_EN BIT(2)
+#define BIT_SYS_CTRL1_TX_CTRL_HDMI BIT(1)
+#define BIT_SYS_CTRL1_OTPAMUTEOVR_SET BIT(0)
+
+/* System Control DPD, default value: 0x90 */
+#define REG_DPD 0x000b
+#define BIT_DPD_PWRON_PLL BIT(7)
+#define BIT_DPD_PDNTX12 BIT(6)
+#define BIT_DPD_PDNRX12 BIT(5)
+#define BIT_DPD_OSC_EN BIT(4)
+#define BIT_DPD_PWRON_HSIC BIT(3)
+#define BIT_DPD_PDIDCK_N BIT(2)
+#define BIT_DPD_PD_MHL_CLK_N BIT(1)
+
+/* Dual link Control, default value: 0x00 */
+#define REG_DCTL 0x000d
+#define BIT_DCTL_TDM_LCLK_PHASE BIT(7)
+#define BIT_DCTL_HSIC_CLK_PHASE BIT(6)
+#define BIT_DCTL_CTS_TCK_PHASE BIT(5)
+#define BIT_DCTL_EXT_DDC_SEL BIT(4)
+#define BIT_DCTL_TRANSCODE BIT(3)
+#define BIT_DCTL_HSIC_RX_STROBE_PHASE BIT(2)
+#define BIT_DCTL_HSIC_TX_BIST_START_SEL BIT(1)
+#define BIT_DCTL_TCLKNX_PHASE BIT(0)
+
+/* PWD Software Reset, default value: 0x20 */
+#define REG_PWD_SRST 0x000e
+#define BIT_PWD_SRST_COC_DOC_RST BIT(7)
+#define BIT_PWD_SRST_CBUS_RST_SW BIT(6)
+#define BIT_PWD_SRST_CBUS_RST_SW_EN BIT(5)
+#define BIT_PWD_SRST_MHLFIFO_RST BIT(4)
+#define BIT_PWD_SRST_CBUS_RST BIT(3)
+#define BIT_PWD_SRST_SW_RST_AUTO BIT(2)
+#define BIT_PWD_SRST_HDCP2X_SW_RST BIT(1)
+#define BIT_PWD_SRST_SW_RST BIT(0)
+
+/* AKSV_1, default value: 0x00 */
+#define REG_AKSV_1 0x001d
+
+/* Video H Resolution #1, default value: 0x00 */
+#define REG_H_RESL 0x003a
+
+/* Video Mode, default value: 0x00 */
+#define REG_VID_MODE 0x004a
+#define BIT_VID_MODE_M1080P BIT(6)
+
+/* Video Input Mode, default value: 0xc0 */
+#define REG_VID_OVRRD 0x0051
+#define BIT_VID_OVRRD_PP_AUTO_DISABLE BIT(7)
+#define BIT_VID_OVRRD_M1080P_OVRRD BIT(6)
+#define BIT_VID_OVRRD_MINIVSYNC_ON BIT(5)
+#define BIT_VID_OVRRD_3DCONV_EN_FRAME_PACK BIT(4)
+#define BIT_VID_OVRRD_ENABLE_AUTO_PATH_EN BIT(3)
+#define BIT_VID_OVRRD_ENRGB2YCBCR_OVRRD BIT(2)
+#define BIT_VID_OVRRD_ENDOWNSAMPLE_OVRRD BIT(0)
+
+/* I2C Address reassignment, default value: 0x00 */
+#define REG_PAGE_MHLSPEC_ADDR 0x0057
+#define REG_PAGE7_ADDR 0x0058
+#define REG_PAGE8_ADDR 0x005c
+
+/* Fast Interrupt Status, default value: 0x00 */
+#define REG_FAST_INTR_STAT 0x005f
+#define LEN_FAST_INTR_STAT 7
+#define BIT_FAST_INTR_STAT_TIMR 8
+#define BIT_FAST_INTR_STAT_INT2 9
+#define BIT_FAST_INTR_STAT_DDC 10
+#define BIT_FAST_INTR_STAT_SCDT 11
+#define BIT_FAST_INTR_STAT_INFR 13
+#define BIT_FAST_INTR_STAT_EDID 14
+#define BIT_FAST_INTR_STAT_HDCP 15
+#define BIT_FAST_INTR_STAT_MSC 16
+#define BIT_FAST_INTR_STAT_MERR 17
+#define BIT_FAST_INTR_STAT_G2WB 18
+#define BIT_FAST_INTR_STAT_G2WB_ERR 19
+#define BIT_FAST_INTR_STAT_DISC 28
+#define BIT_FAST_INTR_STAT_BLOCK 30
+#define BIT_FAST_INTR_STAT_LTRN 31
+#define BIT_FAST_INTR_STAT_HDCP2 32
+#define BIT_FAST_INTR_STAT_TDM 42
+#define BIT_FAST_INTR_STAT_COC 51
+
+/* GPIO Control, default value: 0x15 */
+#define REG_GPIO_CTRL1 0x006e
+#define BIT_CTRL1_GPIO_I_8 BIT(5)
+#define BIT_CTRL1_GPIO_OEN_8 BIT(4)
+#define BIT_CTRL1_GPIO_I_7 BIT(3)
+#define BIT_CTRL1_GPIO_OEN_7 BIT(2)
+#define BIT_CTRL1_GPIO_I_6 BIT(1)
+#define BIT_CTRL1_GPIO_OEN_6 BIT(0)
+
+/* Interrupt Control, default value: 0x06 */
+#define REG_INT_CTRL 0x006f
+#define BIT_INT_CTRL_SOFTWARE_WP BIT(7)
+#define BIT_INT_CTRL_INTR_OD BIT(2)
+#define BIT_INT_CTRL_INTR_POLARITY BIT(1)
+
+/* Interrupt State, default value: 0x00 */
+#define REG_INTR_STATE 0x0070
+#define BIT_INTR_STATE_INTR_STATE BIT(0)
+
+/* Interrupt Source #1, default value: 0x00 */
+#define REG_INTR1 0x0071
+
+/* Interrupt Source #2, default value: 0x00 */
+#define REG_INTR2 0x0072
+
+/* Interrupt Source #3, default value: 0x01 */
+#define REG_INTR3 0x0073
+#define BIT_DDC_CMD_DONE BIT(3)
+
+/* Interrupt Source #5, default value: 0x00 */
+#define REG_INTR5 0x0074
+
+/* Interrupt #1 Mask, default value: 0x00 */
+#define REG_INTR1_MASK 0x0075
+
+/* Interrupt #2 Mask, default value: 0x00 */
+#define REG_INTR2_MASK 0x0076
+
+/* Interrupt #3 Mask, default value: 0x00 */
+#define REG_INTR3_MASK 0x0077
+
+/* Interrupt #5 Mask, default value: 0x00 */
+#define REG_INTR5_MASK 0x0078
+#define BIT_INTR_SCDT_CHANGE BIT(0)
+
+/* Hot Plug Connection Control, default value: 0x45 */
+#define REG_HPD_CTRL 0x0079
+#define BIT_HPD_CTRL_HPD_DS_SIGNAL BIT(7)
+#define BIT_HPD_CTRL_HPD_OUT_OD_EN BIT(6)
+#define BIT_HPD_CTRL_HPD_HIGH BIT(5)
+#define BIT_HPD_CTRL_HPD_OUT_OVR_EN BIT(4)
+#define BIT_HPD_CTRL_GPIO_I_1 BIT(3)
+#define BIT_HPD_CTRL_GPIO_OEN_1 BIT(2)
+#define BIT_HPD_CTRL_GPIO_I_0 BIT(1)
+#define BIT_HPD_CTRL_GPIO_OEN_0 BIT(0)
+
+/* GPIO Control, default value: 0x55 */
+#define REG_GPIO_CTRL 0x007a
+#define BIT_CTRL_GPIO_I_5 BIT(7)
+#define BIT_CTRL_GPIO_OEN_5 BIT(6)
+#define BIT_CTRL_GPIO_I_4 BIT(5)
+#define BIT_CTRL_GPIO_OEN_4 BIT(4)
+#define BIT_CTRL_GPIO_I_3 BIT(3)
+#define BIT_CTRL_GPIO_OEN_3 BIT(2)
+#define BIT_CTRL_GPIO_I_2 BIT(1)
+#define BIT_CTRL_GPIO_OEN_2 BIT(0)
+
+/* Interrupt Source 7, default value: 0x00 */
+#define REG_INTR7 0x007b
+
+/* Interrupt Source 8, default value: 0x00 */
+#define REG_INTR8 0x007c
+
+/* Interrupt #7 Mask, default value: 0x00 */
+#define REG_INTR7_MASK 0x007d
+
+/* Interrupt #8 Mask, default value: 0x00 */
+#define REG_INTR8_MASK 0x007e
+#define BIT_CEA_NEW_VSI BIT(2)
+#define BIT_CEA_NEW_AVI BIT(1)
+
+/* IEEE, default value: 0x10 */
+#define REG_TMDS_CCTRL 0x0080
+#define BIT_TMDS_CCTRL_TMDS_OE BIT(4)
+
+/* TMDS Control #4, default value: 0x02 */
+#define REG_TMDS_CTRL4 0x0085
+#define BIT_TMDS_CTRL4_SCDT_CKDT_SEL BIT(1)
+#define BIT_TMDS_CTRL4_TX_EN_BY_SCDT BIT(0)
+
+/* BIST CNTL, default value: 0x00 */
+#define REG_BIST_CTRL 0x00bb
+#define BIT_RXBIST_VGB_EN BIT(7)
+#define BIT_TXBIST_VGB_EN BIT(6)
+#define BIT_BIST_START_SEL BIT(5)
+#define BIT_BIST_START_BIT BIT(4)
+#define BIT_BIST_ALWAYS_ON BIT(3)
+#define BIT_BIST_TRANS BIT(2)
+#define BIT_BIST_RESET BIT(1)
+#define BIT_BIST_EN BIT(0)
+
+/* BIST DURATION0, default value: 0x00 */
+#define REG_BIST_TEST_SEL 0x00bd
+#define MSK_BIST_TEST_SEL_BIST_PATT_SEL 0x0f
+
+/* BIST VIDEO_MODE, default value: 0x00 */
+#define REG_BIST_VIDEO_MODE 0x00be
+#define MSK_BIST_VIDEO_MODE_BIST_VIDEO_MODE_3_0 0x0f
+
+/* BIST DURATION0, default value: 0x00 */
+#define REG_BIST_DURATION_0 0x00bf
+
+/* BIST DURATION1, default value: 0x00 */
+#define REG_BIST_DURATION_1 0x00c0
+
+/* BIST DURATION2, default value: 0x00 */
+#define REG_BIST_DURATION_2 0x00c1
+
+/* BIST 8BIT_PATTERN, default value: 0x00 */
+#define REG_BIST_8BIT_PATTERN 0x00c2
+
+/* LM DDC, default value: 0x80 */
+#define REG_LM_DDC 0x00c7
+#define BIT_LM_DDC_SW_TPI_EN_DISABLED BIT(7)
+
+#define BIT_LM_DDC_VIDEO_MUTE_EN BIT(5)
+#define BIT_LM_DDC_DDC_TPI_SW BIT(2)
+#define BIT_LM_DDC_DDC_GRANT BIT(1)
+#define BIT_LM_DDC_DDC_GPU_REQUEST BIT(0)
+
+/* DDC I2C Manual, default value: 0x03 */
+#define REG_DDC_MANUAL 0x00ec
+#define BIT_DDC_MANUAL_MAN_DDC BIT(7)
+#define BIT_DDC_MANUAL_VP_SEL BIT(6)
+#define BIT_DDC_MANUAL_DSDA BIT(5)
+#define BIT_DDC_MANUAL_DSCL BIT(4)
+#define BIT_DDC_MANUAL_GCP_HW_CTL_EN BIT(3)
+#define BIT_DDC_MANUAL_DDCM_ABORT_WP BIT(2)
+#define BIT_DDC_MANUAL_IO_DSDA BIT(1)
+#define BIT_DDC_MANUAL_IO_DSCL BIT(0)
+
+/* DDC I2C Target Slave Address, default value: 0x00 */
+#define REG_DDC_ADDR 0x00ed
+#define MSK_DDC_ADDR_DDC_ADDR 0xfe
+
+/* DDC I2C Target Segment Address, default value: 0x00 */
+#define REG_DDC_SEGM 0x00ee
+
+/* DDC I2C Target Offset Address, default value: 0x00 */
+#define REG_DDC_OFFSET 0x00ef
+
+/* DDC I2C Data In count #1, default value: 0x00 */
+#define REG_DDC_DIN_CNT1 0x00f0
+
+/* DDC I2C Data In count #2, default value: 0x00 */
+#define REG_DDC_DIN_CNT2 0x00f1
+#define MSK_DDC_DIN_CNT2_DDC_DIN_CNT_9_8 0x03
+
+/* DDC I2C Status, default value: 0x04 */
+#define REG_DDC_STATUS 0x00f2
+#define BIT_DDC_STATUS_DDC_BUS_LOW BIT(6)
+#define BIT_DDC_STATUS_DDC_NO_ACK BIT(5)
+#define BIT_DDC_STATUS_DDC_I2C_IN_PROG BIT(4)
+#define BIT_DDC_STATUS_DDC_FIFO_FULL BIT(3)
+#define BIT_DDC_STATUS_DDC_FIFO_EMPTY BIT(2)
+#define BIT_DDC_STATUS_DDC_FIFO_READ_IN_SUE BIT(1)
+#define BIT_DDC_STATUS_DDC_FIFO_WRITE_IN_USE BIT(0)
+
+/* DDC I2C Command, default value: 0x70 */
+#define REG_DDC_CMD 0x00f3
+#define BIT_DDC_CMD_HDCP_DDC_EN BIT(6)
+#define BIT_DDC_CMD_SDA_DEL_EN BIT(5)
+#define BIT_DDC_CMD_DDC_FLT_EN BIT(4)
+
+#define MSK_DDC_CMD_DDC_CMD 0x0f
+#define VAL_DDC_CMD_ENH_DDC_READ_NO_ACK 0x04
+#define VAL_DDC_CMD_DDC_CMD_CLEAR_FIFO 0x09
+#define VAL_DDC_CMD_DDC_CMD_ABORT 0x0f
+
+/* DDC I2C FIFO Data In/Out, default value: 0x00 */
+#define REG_DDC_DATA 0x00f4
+
+/* DDC I2C Data Out Counter, default value: 0x00 */
+#define REG_DDC_DOUT_CNT 0x00f5
+#define BIT_DDC_DOUT_CNT_DDC_DELAY_CNT_8 BIT(7)
+#define MSK_DDC_DOUT_CNT_DDC_DATA_OUT_CNT 0x1f
+
+/* DDC I2C Delay Count, default value: 0x14 */
+#define REG_DDC_DELAY_CNT 0x00f6
+
+/* Test Control, default value: 0x80 */
+#define REG_TEST_TXCTRL 0x00f7
+#define BIT_TEST_TXCTRL_RCLK_REF_SEL BIT(7)
+#define BIT_TEST_TXCTRL_PCLK_REF_SEL BIT(6)
+#define MSK_TEST_TXCTRL_BYPASS_PLL_CLK 0x3c
+#define BIT_TEST_TXCTRL_HDMI_MODE BIT(1)
+#define BIT_TEST_TXCTRL_TST_PLLCK BIT(0)
+
+/* CBUS Address, default value: 0x00 */
+#define REG_PAGE_CBUS_ADDR 0x00f8
+
+/* I2C Device Address re-assignment */
+#define REG_PAGE1_ADDR 0x00fc
+#define REG_PAGE2_ADDR 0x00fd
+#define REG_PAGE3_ADDR 0x00fe
+#define REG_HW_TPI_ADDR 0x00ff
+
+/* USBT CTRL0, default value: 0x00 */
+#define REG_UTSRST 0x0100
+#define BIT_UTSRST_FC_SRST BIT(5)
+#define BIT_UTSRST_KEEPER_SRST BIT(4)
+#define BIT_UTSRST_HTX_SRST BIT(3)
+#define BIT_UTSRST_TRX_SRST BIT(2)
+#define BIT_UTSRST_TTX_SRST BIT(1)
+#define BIT_UTSRST_HRX_SRST BIT(0)
+
+/* HSIC RX Control3, default value: 0x07 */
+#define REG_HRXCTRL3 0x0104
+#define MSK_HRXCTRL3_HRX_AFFCTRL 0xf0
+#define BIT_HRXCTRL3_HRX_OUT_EN BIT(2)
+#define BIT_HRXCTRL3_STATUS_EN BIT(1)
+#define BIT_HRXCTRL3_HRX_STAY_RESET BIT(0)
+
+/* HSIC RX INT Registers */
+#define REG_HRXINTL 0x0111
+#define REG_HRXINTH 0x0112
+
+/* TDM TX NUMBITS, default value: 0x0c */
+#define REG_TTXNUMB 0x0116
+#define MSK_TTXNUMB_TTX_AFFCTRL_3_0 0xf0
+#define BIT_TTXNUMB_TTX_COM1_AT_SYNC_WAIT BIT(3)
+#define MSK_TTXNUMB_TTX_NUMBPS_2_0 0x07
+
+/* TDM TX NUMSPISYM, default value: 0x04 */
+#define REG_TTXSPINUMS 0x0117
+
+/* TDM TX NUMHSICSYM, default value: 0x14 */
+#define REG_TTXHSICNUMS 0x0118
+
+/* TDM TX NUMTOTSYM, default value: 0x18 */
+#define REG_TTXTOTNUMS 0x0119
+
+/* TDM TX INT Low, default value: 0x00 */
+#define REG_TTXINTL 0x0136
+#define BIT_TTXINTL_TTX_INTR7 BIT(7)
+#define BIT_TTXINTL_TTX_INTR6 BIT(6)
+#define BIT_TTXINTL_TTX_INTR5 BIT(5)
+#define BIT_TTXINTL_TTX_INTR4 BIT(4)
+#define BIT_TTXINTL_TTX_INTR3 BIT(3)
+#define BIT_TTXINTL_TTX_INTR2 BIT(2)
+#define BIT_TTXINTL_TTX_INTR1 BIT(1)
+#define BIT_TTXINTL_TTX_INTR0 BIT(0)
+
+/* TDM TX INT High, default value: 0x00 */
+#define REG_TTXINTH 0x0137
+#define BIT_TTXINTH_TTX_INTR15 BIT(7)
+#define BIT_TTXINTH_TTX_INTR14 BIT(6)
+#define BIT_TTXINTH_TTX_INTR13 BIT(5)
+#define BIT_TTXINTH_TTX_INTR12 BIT(4)
+#define BIT_TTXINTH_TTX_INTR11 BIT(3)
+#define BIT_TTXINTH_TTX_INTR10 BIT(2)
+#define BIT_TTXINTH_TTX_INTR9 BIT(1)
+#define BIT_TTXINTH_TTX_INTR8 BIT(0)
+
+/* TDM RX Control, default value: 0x1c */
+#define REG_TRXCTRL 0x013b
+#define BIT_TRXCTRL_TRX_CLR_WVALLOW BIT(4)
+#define BIT_TRXCTRL_TRX_FROM_SE_COC BIT(3)
+#define MSK_TRXCTRL_TRX_NUMBPS_2_0 0x07
+
+/* TDM RX NUMSPISYM, default value: 0x04 */
+#define REG_TRXSPINUMS 0x013c
+
+/* TDM RX NUMHSICSYM, default value: 0x14 */
+#define REG_TRXHSICNUMS 0x013d
+
+/* TDM RX NUMTOTSYM, default value: 0x18 */
+#define REG_TRXTOTNUMS 0x013e
+
+/* TDM RX Status 2nd, default value: 0x00 */
+#define REG_TRXSTA2 0x015c
+
+/* TDM RX INT Low, default value: 0x00 */
+#define REG_TRXINTL 0x0163
+
+/* TDM RX INT High, default value: 0x00 */
+#define REG_TRXINTH 0x0164
+
+/* TDM RX INTMASK High, default value: 0x00 */
+#define REG_TRXINTMH 0x0166
+
+/* HSIC TX CRTL, default value: 0x00 */
+#define REG_HTXCTRL 0x0169
+#define BIT_HTXCTRL_HTX_ALLSBE_SOP BIT(4)
+#define BIT_HTXCTRL_HTX_RGDINV_USB BIT(3)
+#define BIT_HTXCTRL_HTX_RSPTDM_BUSY BIT(2)
+#define BIT_HTXCTRL_HTX_DRVCONN1 BIT(1)
+#define BIT_HTXCTRL_HTX_DRVRST1 BIT(0)
+
+/* HSIC TX INT Low, default value: 0x00 */
+#define REG_HTXINTL 0x017d
+
+/* HSIC TX INT High, default value: 0x00 */
+#define REG_HTXINTH 0x017e
+
+/* HSIC Keeper, default value: 0x00 */
+#define REG_KEEPER 0x0181
+#define MSK_KEEPER_KEEPER_MODE_1_0 0x03
+
+/* HSIC Flow Control General, default value: 0x02 */
+#define REG_FCGC 0x0183
+#define BIT_FCGC_HSIC_FC_HOSTMODE BIT(1)
+#define BIT_FCGC_HSIC_FC_ENABLE BIT(0)
+
+/* HSIC Flow Control CTR13, default value: 0xfc */
+#define REG_FCCTR13 0x0191
+
+/* HSIC Flow Control CTR14, default value: 0xff */
+#define REG_FCCTR14 0x0192
+
+/* HSIC Flow Control CTR15, default value: 0xff */
+#define REG_FCCTR15 0x0193
+
+/* HSIC Flow Control CTR50, default value: 0x03 */
+#define REG_FCCTR50 0x01b6
+
+/* HSIC Flow Control INTR0, default value: 0x00 */
+#define REG_FCINTR0 0x01ec
+#define REG_FCINTR1 0x01ed
+#define REG_FCINTR2 0x01ee
+#define REG_FCINTR3 0x01ef
+#define REG_FCINTR4 0x01f0
+#define REG_FCINTR5 0x01f1
+#define REG_FCINTR6 0x01f2
+#define REG_FCINTR7 0x01f3
+
+/* TDM Low Latency, default value: 0x20 */
+#define REG_TDMLLCTL 0x01fc
+#define MSK_TDMLLCTL_TRX_LL_SEL_MANUAL 0xc0
+#define MSK_TDMLLCTL_TRX_LL_SEL_MODE 0x30
+#define MSK_TDMLLCTL_TTX_LL_SEL_MANUAL 0x0c
+#define BIT_TDMLLCTL_TTX_LL_TIE_LOW BIT(1)
+#define BIT_TDMLLCTL_TTX_LL_SEL_MODE BIT(0)
+
+/* TMDS 0 Clock Control, default value: 0x10 */
+#define REG_TMDS0_CCTRL1 0x0210
+#define MSK_TMDS0_CCTRL1_TEST_SEL 0xc0
+#define MSK_TMDS0_CCTRL1_CLK1X_CTL 0x30
+
+/* TMDS Clock Enable, default value: 0x00 */
+#define REG_TMDS_CLK_EN 0x0211
+#define BIT_TMDS_CLK_EN_CLK_EN BIT(0)
+
+/* TMDS Channel Enable, default value: 0x00 */
+#define REG_TMDS_CH_EN 0x0212
+#define BIT_TMDS_CH_EN_CH0_EN BIT(4)
+#define BIT_TMDS_CH_EN_CH12_EN BIT(0)
+
+/* BGR_BIAS, default value: 0x07 */
+#define REG_BGR_BIAS 0x0215
+#define BIT_BGR_BIAS_BGR_EN BIT(7)
+#define MSK_BGR_BIAS_BIAS_BGR_D 0x0f
+
+/* TMDS 0 Digital I2C BW, default value: 0x0a */
+#define REG_ALICE0_BW_I2C 0x0231
+
+/* TMDS 0 Digital Zone Control, default value: 0xe0 */
+#define REG_ALICE0_ZONE_CTRL 0x024c
+#define BIT_ALICE0_ZONE_CTRL_ICRST_N BIT(7)
+#define BIT_ALICE0_ZONE_CTRL_USE_INT_DIV20 BIT(6)
+#define MSK_ALICE0_ZONE_CTRL_SZONE_I2C 0x30
+#define MSK_ALICE0_ZONE_CTRL_ZONE_CTRL 0x0f
+
+/* TMDS 0 Digital PLL Mode Control, default value: 0x00 */
+#define REG_ALICE0_MODE_CTRL 0x024d
+#define MSK_ALICE0_MODE_CTRL_PLL_MODE_I2C 0x0c
+#define MSK_ALICE0_MODE_CTRL_DIV20_CTRL 0x03
+
+/* MHL Tx Control 6th, default value: 0xa0 */
+#define REG_MHLTX_CTL6 0x0285
+#define MSK_MHLTX_CTL6_EMI_SEL 0xe0
+#define MSK_MHLTX_CTL6_TX_CLK_SHAPE_9_8 0x03
+
+/* Packet Filter0, default value: 0x00 */
+#define REG_PKT_FILTER_0 0x0290
+#define BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT BIT(7)
+#define BIT_PKT_FILTER_0_DROP_CEA_CP_PKT BIT(6)
+#define BIT_PKT_FILTER_0_DROP_MPEG_PKT BIT(5)
+#define BIT_PKT_FILTER_0_DROP_SPIF_PKT BIT(4)
+#define BIT_PKT_FILTER_0_DROP_AIF_PKT BIT(3)
+#define BIT_PKT_FILTER_0_DROP_AVI_PKT BIT(2)
+#define BIT_PKT_FILTER_0_DROP_CTS_PKT BIT(1)
+#define BIT_PKT_FILTER_0_DROP_GCP_PKT BIT(0)
+
+/* Packet Filter1, default value: 0x00 */
+#define REG_PKT_FILTER_1 0x0291
+#define BIT_PKT_FILTER_1_VSI_OVERRIDE_DIS BIT(7)
+#define BIT_PKT_FILTER_1_AVI_OVERRIDE_DIS BIT(6)
+#define BIT_PKT_FILTER_1_DROP_AUDIO_PKT BIT(3)
+#define BIT_PKT_FILTER_1_DROP_GEN2_PKT BIT(2)
+#define BIT_PKT_FILTER_1_DROP_GEN_PKT BIT(1)
+#define BIT_PKT_FILTER_1_DROP_VSIF_PKT BIT(0)
+
+/* TMDS Clock Status, default value: 0x10 */
+#define REG_TMDS_CSTAT_P3 0x02a0
+#define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_CLR_MUTE BIT(7)
+#define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_SET_MUTE BIT(6)
+#define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_NEW_CP BIT(5)
+#define BIT_TMDS_CSTAT_P3_CLR_AVI BIT(3)
+#define BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS BIT(2)
+#define BIT_TMDS_CSTAT_P3_SCDT BIT(1)
+#define BIT_TMDS_CSTAT_P3_CKDT BIT(0)
+
+/* RX_HDMI Control, default value: 0x10 */
+#define REG_RX_HDMI_CTRL0 0x02a1
+#define BIT_RX_HDMI_CTRL0_BYP_DVIFILT_SYNC BIT(5)
+#define BIT_RX_HDMI_CTRL0_HDMI_MODE_EN_ITSELF_CLR BIT(4)
+#define BIT_RX_HDMI_CTRL0_HDMI_MODE_SW_VALUE BIT(3)
+#define BIT_RX_HDMI_CTRL0_HDMI_MODE_OVERWRITE BIT(2)
+#define BIT_RX_HDMI_CTRL0_RX_HDMI_HDMI_MODE_EN BIT(1)
+#define BIT_RX_HDMI_CTRL0_RX_HDMI_HDMI_MODE BIT(0)
+
+/* RX_HDMI Control, default value: 0x38 */
+#define REG_RX_HDMI_CTRL2 0x02a3
+#define MSK_RX_HDMI_CTRL2_IDLE_CNT 0xf0
+#define VAL_RX_HDMI_CTRL2_IDLE_CNT(n) ((n) << 4)
+#define BIT_RX_HDMI_CTRL2_USE_AV_MUTE BIT(3)
+#define BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI BIT(0)
+
+/* RX_HDMI Control, default value: 0x0f */
+#define REG_RX_HDMI_CTRL3 0x02a4
+#define MSK_RX_HDMI_CTRL3_PP_MODE_CLK_EN 0x0f
+
+/* rx_hdmi Clear Buffer, default value: 0x00 */
+#define REG_RX_HDMI_CLR_BUFFER 0x02ac
+#define MSK_RX_HDMI_CLR_BUFFER_AIF4VSI_CMP 0xc0
+#define BIT_RX_HDMI_CLR_BUFFER_USE_AIF4VSI BIT(5)
+#define BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_W_AVI BIT(4)
+#define BIT_RX_HDMI_CLR_BUFFER_VSI_IEEE_ID_CHK_EN BIT(3)
+#define BIT_RX_HDMI_CLR_BUFFER_SWAP_VSI_IEEE_ID BIT(2)
+#define BIT_RX_HDMI_CLR_BUFFER_AIF_CLR_EN BIT(1)
+#define BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_EN BIT(0)
+
+/* RX_HDMI VSI Header1, default value: 0x00 */
+#define REG_RX_HDMI_MON_PKT_HEADER1 0x02b8
+
+/* RX_HDMI VSI MHL Monitor, default value: 0x3c */
+#define REG_RX_HDMI_VSIF_MHL_MON 0x02d7
+
+#define MSK_RX_HDMI_VSIF_MHL_MON_RX_HDMI_MHL_3D_FORMAT 0x3c
+#define MSK_RX_HDMI_VSIF_MHL_MON_RX_HDMI_MHL_VID_FORMAT 0x03
+
+/* Interrupt Source 9, default value: 0x00 */
+#define REG_INTR9 0x02e0
+#define BIT_INTR9_EDID_ERROR BIT(6)
+#define BIT_INTR9_EDID_DONE BIT(5)
+#define BIT_INTR9_DEVCAP_DONE BIT(4)
+
+/* Interrupt 9 Mask, default value: 0x00 */
+#define REG_INTR9_MASK 0x02e1
+
+/* TPI CBUS Start, default value: 0x00 */
+#define REG_TPI_CBUS_START 0x02e2
+#define BIT_TPI_CBUS_START_RCP_REQ_START BIT(7)
+#define BIT_TPI_CBUS_START_RCPK_REPLY_START BIT(6)
+#define BIT_TPI_CBUS_START_RCPE_REPLY_START BIT(5)
+#define BIT_TPI_CBUS_START_PUT_LINK_MODE_START BIT(4)
+#define BIT_TPI_CBUS_START_PUT_DCAPCHG_START BIT(3)
+#define BIT_TPI_CBUS_START_PUT_DCAPRDY_START BIT(2)
+#define BIT_TPI_CBUS_START_GET_EDID_START_0 BIT(1)
+#define BIT_TPI_CBUS_START_GET_DEVCAP_START BIT(0)
+
+/* EDID Control, default value: 0x10 */
+#define REG_EDID_CTRL 0x02e3
+#define BIT_EDID_CTRL_EDID_PRIME_VALID BIT(7)
+#define BIT_EDID_CTRL_XDEVCAP_EN BIT(6)
+#define BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP BIT(5)
+#define BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO BIT(4)
+#define BIT_EDID_CTRL_EDID_FIFO_ACCESS_ALWAYS_EN BIT(3)
+#define BIT_EDID_CTRL_EDID_FIFO_BLOCK_SEL BIT(2)
+#define BIT_EDID_CTRL_INVALID_BKSV BIT(1)
+#define BIT_EDID_CTRL_EDID_MODE_EN BIT(0)
+
+/* EDID FIFO Addr, default value: 0x00 */
+#define REG_EDID_FIFO_ADDR 0x02e9
+
+/* EDID FIFO Write Data, default value: 0x00 */
+#define REG_EDID_FIFO_WR_DATA 0x02ea
+
+/* EDID/DEVCAP FIFO Internal Addr, default value: 0x00 */
+#define REG_EDID_FIFO_ADDR_MON 0x02eb
+
+/* EDID FIFO Read Data, default value: 0x00 */
+#define REG_EDID_FIFO_RD_DATA 0x02ec
+
+/* EDID DDC Segment Pointer, default value: 0x00 */
+#define REG_EDID_START_EXT 0x02ed
+
+/* TX IP BIST CNTL and Status, default value: 0x00 */
+#define REG_TX_IP_BIST_CNTLSTA 0x02f2
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_QUARTER_CLK_SEL BIT(6)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_DONE BIT(5)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_ON BIT(4)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_RUN BIT(3)
+#define BIT_TX_IP_BIST_CNTLSTA_TXCLK_HALF_SEL BIT(2)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_EN BIT(1)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_SEL BIT(0)
+
+/* TX IP BIST INST LOW, default value: 0x00 */
+#define REG_TX_IP_BIST_INST_LOW 0x02f3
+#define REG_TX_IP_BIST_INST_HIGH 0x02f4
+
+/* TX IP BIST PATTERN LOW, default value: 0x00 */
+#define REG_TX_IP_BIST_PAT_LOW 0x02f5
+#define REG_TX_IP_BIST_PAT_HIGH 0x02f6
+
+/* TX IP BIST CONFIGURE LOW, default value: 0x00 */
+#define REG_TX_IP_BIST_CONF_LOW 0x02f7
+#define REG_TX_IP_BIST_CONF_HIGH 0x02f8
+
+/* E-MSC General Control, default value: 0x80 */
+#define REG_GENCTL 0x0300
+#define BIT_GENCTL_SPEC_TRANS_DIS BIT(7)
+#define BIT_GENCTL_DIS_XMIT_ERR_STATE BIT(6)
+#define BIT_GENCTL_SPI_MISO_EDGE BIT(5)
+#define BIT_GENCTL_SPI_MOSI_EDGE BIT(4)
+#define BIT_GENCTL_CLR_EMSC_RFIFO BIT(3)
+#define BIT_GENCTL_CLR_EMSC_XFIFO BIT(2)
+#define BIT_GENCTL_START_TRAIN_SEQ BIT(1)
+#define BIT_GENCTL_EMSC_EN BIT(0)
+
+/* E-MSC Comma ErrorCNT, default value: 0x03 */
+#define REG_COMMECNT 0x0305
+#define BIT_COMMECNT_I2C_TO_EMSC_EN BIT(7)
+#define MSK_COMMECNT_COMMA_CHAR_ERR_CNT 0x0f
+
+/* E-MSC RFIFO ByteCnt, default value: 0x00 */
+#define REG_EMSCRFIFOBCNTL 0x031a
+#define REG_EMSCRFIFOBCNTH 0x031b
+
+/* SPI Burst Cnt Status, default value: 0x00 */
+#define REG_SPIBURSTCNT 0x031e
+
+/* SPI Burst Status and SWRST, default value: 0x00 */
+#define REG_SPIBURSTSTAT 0x0322
+#define BIT_SPIBURSTSTAT_SPI_HDCPRST BIT(7)
+#define BIT_SPIBURSTSTAT_SPI_CBUSRST BIT(6)
+#define BIT_SPIBURSTSTAT_SPI_SRST BIT(5)
+#define BIT_SPIBURSTSTAT_EMSC_NORMAL_MODE BIT(0)
+
+/* E-MSC 1st Interrupt, default value: 0x00 */
+#define REG_EMSCINTR 0x0323
+#define BIT_EMSCINTR_EMSC_XFIFO_EMPTY BIT(7)
+#define BIT_EMSCINTR_EMSC_XMIT_ACK_TOUT BIT(6)
+#define BIT_EMSCINTR_EMSC_RFIFO_READ_ERR BIT(5)
+#define BIT_EMSCINTR_EMSC_XFIFO_WRITE_ERR BIT(4)
+#define BIT_EMSCINTR_EMSC_COMMA_CHAR_ERR BIT(3)
+#define BIT_EMSCINTR_EMSC_XMIT_DONE BIT(2)
+#define BIT_EMSCINTR_EMSC_XMIT_GNT_TOUT BIT(1)
+#define BIT_EMSCINTR_SPI_DVLD BIT(0)
+
+/* E-MSC Interrupt Mask, default value: 0x00 */
+#define REG_EMSCINTRMASK 0x0324
+
+/* I2C E-MSC XMIT FIFO Write Port, default value: 0x00 */
+#define REG_EMSC_XMIT_WRITE_PORT 0x032a
+
+/* I2C E-MSC RCV FIFO Write Port, default value: 0x00 */
+#define REG_EMSC_RCV_READ_PORT 0x032b
+
+/* E-MSC 2nd Interrupt, default value: 0x00 */
+#define REG_EMSCINTR1 0x032c
+#define BIT_EMSCINTR1_EMSC_TRAINING_COMMA_ERR BIT(0)
+
+/* E-MSC Interrupt Mask, default value: 0x00 */
+#define REG_EMSCINTRMASK1 0x032d
+#define BIT_EMSCINTRMASK1_EMSC_INTRMASK1_0 BIT(0)
+
+/* MHL Top Ctl, default value: 0x00 */
+#define REG_MHL_TOP_CTL 0x0330
+#define BIT_MHL_TOP_CTL_MHL3_DOC_SEL BIT(7)
+#define BIT_MHL_TOP_CTL_MHL_PP_SEL BIT(6)
+#define MSK_MHL_TOP_CTL_IF_TIMING_CTL 0x03
+
+/* MHL DataPath 1st Ctl, default value: 0xbc */
+#define REG_MHL_DP_CTL0 0x0331
+#define BIT_MHL_DP_CTL0_DP_OE BIT(7)
+#define BIT_MHL_DP_CTL0_TX_OE_OVR BIT(6)
+#define MSK_MHL_DP_CTL0_TX_OE 0x3f
+
+/* MHL DataPath 2nd Ctl, default value: 0xbb */
+#define REG_MHL_DP_CTL1 0x0332
+#define MSK_MHL_DP_CTL1_CK_SWING_CTL 0xf0
+#define MSK_MHL_DP_CTL1_DT_SWING_CTL 0x0f
+
+/* MHL DataPath 3rd Ctl, default value: 0x2f */
+#define REG_MHL_DP_CTL2 0x0333
+#define BIT_MHL_DP_CTL2_CLK_BYPASS_EN BIT(7)
+#define MSK_MHL_DP_CTL2_DAMP_TERM_SEL 0x30
+#define MSK_MHL_DP_CTL2_CK_TERM_SEL 0x0c
+#define MSK_MHL_DP_CTL2_DT_TERM_SEL 0x03
+
+/* MHL DataPath 4th Ctl, default value: 0x48 */
+#define REG_MHL_DP_CTL3 0x0334
+#define MSK_MHL_DP_CTL3_DT_DRV_VNBC_CTL 0xf0
+#define MSK_MHL_DP_CTL3_DT_DRV_VNB_CTL 0x0f
+
+/* MHL DataPath 5th Ctl, default value: 0x48 */
+#define REG_MHL_DP_CTL4 0x0335
+#define MSK_MHL_DP_CTL4_CK_DRV_VNBC_CTL 0xf0
+#define MSK_MHL_DP_CTL4_CK_DRV_VNB_CTL 0x0f
+
+/* MHL DataPath 6th Ctl, default value: 0x3f */
+#define REG_MHL_DP_CTL5 0x0336
+#define BIT_MHL_DP_CTL5_RSEN_EN_OVR BIT(7)
+#define BIT_MHL_DP_CTL5_RSEN_EN BIT(6)
+#define MSK_MHL_DP_CTL5_DAMP_TERM_VGS_CTL 0x30
+#define MSK_MHL_DP_CTL5_CK_TERM_VGS_CTL 0x0c
+#define MSK_MHL_DP_CTL5_DT_TERM_VGS_CTL 0x03
+
+/* MHL PLL 1st Ctl, default value: 0x05 */
+#define REG_MHL_PLL_CTL0 0x0337
+#define BIT_MHL_PLL_CTL0_AUD_CLK_EN BIT(7)
+
+#define MSK_MHL_PLL_CTL0_AUD_CLK_RATIO 0x70
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_10 0x70
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_6 0x60
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_4 0x50
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_2 0x40
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_5 0x30
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_3 0x20
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_2_PRIME 0x10
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_1 0x00
+
+#define MSK_MHL_PLL_CTL0_HDMI_CLK_RATIO 0x0c
+#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_4X 0x0c
+#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_2X 0x08
+#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X 0x04
+#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_HALF_X 0x00
+
+#define BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL BIT(1)
+#define BIT_MHL_PLL_CTL0_ZONE_MASK_OE BIT(0)
+
+/* MHL PLL 3rd Ctl, default value: 0x80 */
+#define REG_MHL_PLL_CTL2 0x0339
+#define BIT_MHL_PLL_CTL2_CLKDETECT_EN BIT(7)
+#define BIT_MHL_PLL_CTL2_MEAS_FVCO BIT(3)
+#define BIT_MHL_PLL_CTL2_PLL_FAST_LOCK BIT(2)
+#define MSK_MHL_PLL_CTL2_PLL_LF_SEL 0x03
+
+/* MHL CBUS 1st Ctl, default value: 0x12 */
+#define REG_MHL_CBUS_CTL0 0x0340
+#define BIT_MHL_CBUS_CTL0_CBUS_RGND_TEST_MODE BIT(7)
+
+#define MSK_MHL_CBUS_CTL0_CBUS_RGND_VTH_CTL 0x30
+#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_734 0x00
+#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_747 0x10
+#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_740 0x20
+#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_754 0x30
+
+#define MSK_MHL_CBUS_CTL0_CBUS_RES_TEST_SEL 0x0c
+
+#define MSK_MHL_CBUS_CTL0_CBUS_DRV_SEL 0x03
+#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_WEAKEST 0x00
+#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_WEAK 0x01
+#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONG 0x02
+#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONGEST 0x03
+
+/* MHL CBUS 2nd Ctl, default value: 0x03 */
+#define REG_MHL_CBUS_CTL1 0x0341
+#define MSK_MHL_CBUS_CTL1_CBUS_RGND_RES_CTL 0x07
+#define VAL_MHL_CBUS_CTL1_0888_OHM 0x00
+#define VAL_MHL_CBUS_CTL1_1115_OHM 0x04
+#define VAL_MHL_CBUS_CTL1_1378_OHM 0x07
+
+/* MHL CoC 1st Ctl, default value: 0xc3 */
+#define REG_MHL_COC_CTL0 0x0342
+#define BIT_MHL_COC_CTL0_COC_BIAS_EN BIT(7)
+#define MSK_MHL_COC_CTL0_COC_BIAS_CTL 0x70
+#define MSK_MHL_COC_CTL0_COC_TERM_CTL 0x07
+
+/* MHL CoC 2nd Ctl, default value: 0x87 */
+#define REG_MHL_COC_CTL1 0x0343
+#define BIT_MHL_COC_CTL1_COC_EN BIT(7)
+#define MSK_MHL_COC_CTL1_COC_DRV_CTL 0x3f
+
+/* MHL CoC 4th Ctl, default value: 0x00 */
+#define REG_MHL_COC_CTL3 0x0345
+#define BIT_MHL_COC_CTL3_COC_AECHO_EN BIT(0)
+
+/* MHL CoC 5th Ctl, default value: 0x28 */
+#define REG_MHL_COC_CTL4 0x0346
+#define MSK_MHL_COC_CTL4_COC_IF_CTL 0xf0
+#define MSK_MHL_COC_CTL4_COC_SLEW_CTL 0x0f
+
+/* MHL CoC 6th Ctl, default value: 0x0d */
+#define REG_MHL_COC_CTL5 0x0347
+
+/* MHL DoC 1st Ctl, default value: 0x18 */
+#define REG_MHL_DOC_CTL0 0x0349
+#define BIT_MHL_DOC_CTL0_DOC_RXDATA_EN BIT(7)
+#define MSK_MHL_DOC_CTL0_DOC_DM_TERM 0x38
+#define MSK_MHL_DOC_CTL0_DOC_OPMODE 0x06
+#define BIT_MHL_DOC_CTL0_DOC_RXBIAS_EN BIT(0)
+
+/* MHL DataPath 7th Ctl, default value: 0x2a */
+#define REG_MHL_DP_CTL6 0x0350
+#define BIT_MHL_DP_CTL6_DP_TAP2_SGN BIT(5)
+#define BIT_MHL_DP_CTL6_DP_TAP2_EN BIT(4)
+#define BIT_MHL_DP_CTL6_DP_TAP1_SGN BIT(3)
+#define BIT_MHL_DP_CTL6_DP_TAP1_EN BIT(2)
+#define BIT_MHL_DP_CTL6_DT_PREDRV_FEEDCAP_EN BIT(1)
+#define BIT_MHL_DP_CTL6_DP_PRE_POST_SEL BIT(0)
+
+/* MHL DataPath 8th Ctl, default value: 0x06 */
+#define REG_MHL_DP_CTL7 0x0351
+#define MSK_MHL_DP_CTL7_DT_DRV_VBIAS_CASCTL 0xf0
+#define MSK_MHL_DP_CTL7_DT_DRV_IREF_CTL 0x0f
+
+/* Tx Zone Ctl1, default value: 0x00 */
+#define REG_TX_ZONE_CTL1 0x0361
+#define VAL_TX_ZONE_CTL1_TX_ZONE_CTRL_MODE 0x08
+
+/* MHL3 Tx Zone Ctl, default value: 0x00 */
+#define REG_MHL3_TX_ZONE_CTL 0x0364
+#define BIT_MHL3_TX_ZONE_CTL_MHL2_INTPLT_ZONE_MANU_EN BIT(7)
+#define MSK_MHL3_TX_ZONE_CTL_MHL3_TX_ZONE 0x03
+
+#define MSK_TX_ZONE_CTL3_TX_ZONE 0x03
+#define VAL_TX_ZONE_CTL3_TX_ZONE_6GBPS 0x00
+#define VAL_TX_ZONE_CTL3_TX_ZONE_3GBPS 0x01
+#define VAL_TX_ZONE_CTL3_TX_ZONE_1_5GBPS 0x02
+
+/* HDCP Polling Control and Status, default value: 0x70 */
+#define REG_HDCP2X_POLL_CS 0x0391
+
+#define BIT_HDCP2X_POLL_CS_HDCP2X_MSG_SZ_CLR_OPTION BIT(6)
+#define BIT_HDCP2X_POLL_CS_HDCP2X_RPT_READY_CLR_OPTION BIT(5)
+#define BIT_HDCP2X_POLL_CS_HDCP2X_REAUTH_REQ_CLR_OPTION BIT(4)
+#define MSK_HDCP2X_POLL_CS_ 0x0c
+#define BIT_HDCP2X_POLL_CS_HDCP2X_DIS_POLL_GNT BIT(1)
+#define BIT_HDCP2X_POLL_CS_HDCP2X_DIS_POLL_EN BIT(0)
+
+/* HDCP Interrupt 0, default value: 0x00 */
+#define REG_HDCP2X_INTR0 0x0398
+
+/* HDCP Interrupt 0 Mask, default value: 0x00 */
+#define REG_HDCP2X_INTR0_MASK 0x0399
+
+/* HDCP General Control 0, default value: 0x02 */
+#define REG_HDCP2X_CTRL_0 0x03a0
+#define BIT_HDCP2X_CTRL_0_HDCP2X_ENCRYPT_EN BIT(7)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_POLINT_SEL BIT(6)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_POLINT_OVR BIT(5)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_PRECOMPUTE BIT(4)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_HDMIMODE BIT(3)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_REPEATER BIT(2)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_HDCPTX BIT(1)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_EN BIT(0)
+
+/* HDCP General Control 1, default value: 0x08 */
+#define REG_HDCP2X_CTRL_1 0x03a1
+#define MSK_HDCP2X_CTRL_1_HDCP2X_REAUTH_MSK_3_0 0xf0
+#define BIT_HDCP2X_CTRL_1_HDCP2X_HPD_SW BIT(3)
+#define BIT_HDCP2X_CTRL_1_HDCP2X_HPD_OVR BIT(2)
+#define BIT_HDCP2X_CTRL_1_HDCP2X_CTL3MSK BIT(1)
+#define BIT_HDCP2X_CTRL_1_HDCP2X_REAUTH_SW BIT(0)
+
+/* HDCP Misc Control, default value: 0x00 */
+#define REG_HDCP2X_MISC_CTRL 0x03a5
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_XFER_START BIT(4)
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_WR_START BIT(3)
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_WR BIT(2)
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_RCVID_RD_START BIT(1)
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_RCVID_RD BIT(0)
+
+/* HDCP RPT SMNG K, default value: 0x00 */
+#define REG_HDCP2X_RPT_SMNG_K 0x03a6
+
+/* HDCP RPT SMNG In, default value: 0x00 */
+#define REG_HDCP2X_RPT_SMNG_IN 0x03a7
+
+/* HDCP Auth Status, default value: 0x00 */
+#define REG_HDCP2X_AUTH_STAT 0x03aa
+
+/* HDCP RPT RCVID Out, default value: 0x00 */
+#define REG_HDCP2X_RPT_RCVID_OUT 0x03ac
+
+/* HDCP TP1, default value: 0x62 */
+#define REG_HDCP2X_TP1 0x03b4
+
+/* HDCP GP Out 0, default value: 0x00 */
+#define REG_HDCP2X_GP_OUT0 0x03c7
+
+/* HDCP Repeater RCVR ID 0, default value: 0x00 */
+#define REG_HDCP2X_RPT_RCVR_ID0 0x03d1
+
+/* HDCP DDCM Status, default value: 0x00 */
+#define REG_HDCP2X_DDCM_STS 0x03d8
+#define MSK_HDCP2X_DDCM_STS_HDCP2X_DDCM_ERR_STS_3_0 0xf0
+#define MSK_HDCP2X_DDCM_STS_HDCP2X_DDCM_CTL_CS_3_0 0x0f
+
+/* HDMI2MHL3 Control, default value: 0x0a */
+#define REG_M3_CTRL 0x03e0
+#define BIT_M3_CTRL_H2M_SWRST BIT(4)
+#define BIT_M3_CTRL_SW_MHL3_SEL BIT(3)
+#define BIT_M3_CTRL_M3AV_EN BIT(2)
+#define BIT_M3_CTRL_ENC_TMDS BIT(1)
+#define BIT_M3_CTRL_MHL3_MASTER_EN BIT(0)
+
+#define VAL_M3_CTRL_MHL1_2_VALUE (BIT_M3_CTRL_SW_MHL3_SEL \
+ | BIT_M3_CTRL_ENC_TMDS)
+#define VAL_M3_CTRL_MHL3_VALUE (BIT_M3_CTRL_SW_MHL3_SEL \
+ | BIT_M3_CTRL_M3AV_EN \
+ | BIT_M3_CTRL_ENC_TMDS \
+ | BIT_M3_CTRL_MHL3_MASTER_EN)
+
+/* HDMI2MHL3 Port0 Control, default value: 0x04 */
+#define REG_M3_P0CTRL 0x03e1
+#define BIT_M3_P0CTRL_MHL3_P0_HDCP_ENC_EN BIT(4)
+#define BIT_M3_P0CTRL_MHL3_P0_UNLIMIT_EN BIT(3)
+#define BIT_M3_P0CTRL_MHL3_P0_HDCP_EN BIT(2)
+#define BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED BIT(1)
+#define BIT_M3_P0CTRL_MHL3_P0_PORT_EN BIT(0)
+
+#define REG_M3_POSTM 0x03e2
+#define MSK_M3_POSTM_RRP_DECODE 0xf8
+#define MSK_M3_POSTM_MHL3_P0_STM_ID 0x07
+
+/* HDMI2MHL3 Scramble Control, default value: 0x41 */
+#define REG_M3_SCTRL 0x03e6
+#define MSK_M3_SCTRL_MHL3_SR_LENGTH 0xf0
+#define BIT_M3_SCTRL_MHL3_SCRAMBLER_EN BIT(0)
+
+/* HSIC Div Ctl, default value: 0x05 */
+#define REG_DIV_CTL_MAIN 0x03f2
+#define MSK_DIV_CTL_MAIN_PRE_DIV_CTL_MAIN 0x1c
+#define MSK_DIV_CTL_MAIN_FB_DIV_CTL_MAIN 0x03
+
+/* MHL Capability 1st Byte, default value: 0x00 */
+#define REG_MHL_DEVCAP_0 0x0400
+
+/* MHL Interrupt 1st Byte, default value: 0x00 */
+#define REG_MHL_INT_0 0x0420
+
+/* Device Status 1st byte, default value: 0x00 */
+#define REG_MHL_STAT_0 0x0430
+
+/* CBUS Scratch Pad 1st Byte, default value: 0x00 */
+#define REG_MHL_SCRPAD_0 0x0440
+
+/* MHL Extended Capability 1st Byte, default value: 0x00 */
+#define REG_MHL_EXTDEVCAP_0 0x0480
+
+/* Device Extended Status 1st byte, default value: 0x00 */
+#define REG_MHL_EXTSTAT_0 0x0490
+
+/* TPI DTD Byte2, default value: 0x00 */
+#define REG_TPI_DTD_B2 0x0602
+
+#define VAL_TPI_QUAN_RANGE_LIMITED 0x01
+#define VAL_TPI_QUAN_RANGE_FULL 0x02
+#define VAL_TPI_FORMAT_RGB 0x00
+#define VAL_TPI_FORMAT_YCBCR444 0x01
+#define VAL_TPI_FORMAT_YCBCR422 0x02
+#define VAL_TPI_FORMAT_INTERNAL_RGB 0x03
+#define VAL_TPI_FORMAT(_fmt, _qr) \
+ (VAL_TPI_FORMAT_##_fmt | (VAL_TPI_QUAN_RANGE_##_qr << 2))
+
+/* Input Format, default value: 0x00 */
+#define REG_TPI_INPUT 0x0609
+#define BIT_TPI_INPUT_EXTENDEDBITMODE BIT(7)
+#define BIT_TPI_INPUT_ENDITHER BIT(6)
+#define MSK_TPI_INPUT_INPUT_QUAN_RANGE 0x0c
+#define MSK_TPI_INPUT_INPUT_FORMAT 0x03
+
+/* Output Format, default value: 0x00 */
+#define REG_TPI_OUTPUT 0x060a
+#define BIT_TPI_OUTPUT_CSCMODE709 BIT(4)
+#define MSK_TPI_OUTPUT_OUTPUT_QUAN_RANGE 0x0c
+#define MSK_TPI_OUTPUT_OUTPUT_FORMAT 0x03
+
+/* TPI AVI Check Sum, default value: 0x00 */
+#define REG_TPI_AVI_CHSUM 0x060c
+
+/* TPI System Control, default value: 0x00 */
+#define REG_TPI_SC 0x061a
+#define BIT_TPI_SC_TPI_UPDATE_FLG BIT(7)
+#define BIT_TPI_SC_TPI_REAUTH_CTL BIT(6)
+#define BIT_TPI_SC_TPI_OUTPUT_MODE_1 BIT(5)
+#define BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN BIT(4)
+#define BIT_TPI_SC_TPI_AV_MUTE BIT(3)
+#define BIT_TPI_SC_DDC_GPU_REQUEST BIT(2)
+#define BIT_TPI_SC_DDC_TPI_SW BIT(1)
+#define BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI BIT(0)
+
+/* TPI COPP Query Data, default value: 0x00 */
+#define REG_TPI_COPP_DATA1 0x0629
+#define BIT_TPI_COPP_DATA1_COPP_GPROT BIT(7)
+#define BIT_TPI_COPP_DATA1_COPP_LPROT BIT(6)
+#define MSK_TPI_COPP_DATA1_COPP_LINK_STATUS 0x30
+#define VAL_TPI_COPP_LINK_STATUS_NORMAL 0x00
+#define VAL_TPI_COPP_LINK_STATUS_LINK_LOST 0x10
+#define VAL_TPI_COPP_LINK_STATUS_RENEGOTIATION_REQ 0x20
+#define VAL_TPI_COPP_LINK_STATUS_LINK_SUSPENDED 0x30
+#define BIT_TPI_COPP_DATA1_COPP_HDCP_REP BIT(3)
+#define BIT_TPI_COPP_DATA1_COPP_CONNTYPE_0 BIT(2)
+#define BIT_TPI_COPP_DATA1_COPP_PROTYPE BIT(1)
+#define BIT_TPI_COPP_DATA1_COPP_CONNTYPE_1 BIT(0)
+
+/* TPI COPP Control Data, default value: 0x00 */
+#define REG_TPI_COPP_DATA2 0x062a
+#define BIT_TPI_COPP_DATA2_INTR_ENCRYPTION BIT(5)
+#define BIT_TPI_COPP_DATA2_KSV_FORWARD BIT(4)
+#define BIT_TPI_COPP_DATA2_INTERM_RI_CHECK_EN BIT(3)
+#define BIT_TPI_COPP_DATA2_DOUBLE_RI_CHECK BIT(2)
+#define BIT_TPI_COPP_DATA2_DDC_SHORT_RI_RD BIT(1)
+#define BIT_TPI_COPP_DATA2_COPP_PROTLEVEL BIT(0)
+
+/* TPI Interrupt Enable, default value: 0x00 */
+#define REG_TPI_INTR_EN 0x063c
+
+/* TPI Interrupt Status Low Byte, default value: 0x00 */
+#define REG_TPI_INTR_ST0 0x063d
+#define BIT_TPI_INTR_ST0_TPI_AUTH_CHNGE_STAT BIT(7)
+#define BIT_TPI_INTR_ST0_TPI_V_RDY_STAT BIT(6)
+#define BIT_TPI_INTR_ST0_TPI_COPP_CHNGE_STAT BIT(5)
+#define BIT_TPI_INTR_ST0_KSV_FIFO_FIRST_STAT BIT(3)
+#define BIT_TPI_INTR_ST0_READ_BKSV_BCAPS_DONE_STAT BIT(2)
+#define BIT_TPI_INTR_ST0_READ_BKSV_BCAPS_ERR_STAT BIT(1)
+#define BIT_TPI_INTR_ST0_READ_BKSV_ERR_STAT BIT(0)
+
+/* TPI DS BCAPS Status, default value: 0x00 */
+#define REG_TPI_DS_BCAPS 0x0644
+
+/* TPI BStatus1, default value: 0x00 */
+#define REG_TPI_BSTATUS1 0x0645
+#define BIT_TPI_BSTATUS1_DS_DEV_EXCEED BIT(7)
+#define MSK_TPI_BSTATUS1_DS_DEV_CNT 0x7f
+
+/* TPI BStatus2, default value: 0x10 */
+#define REG_TPI_BSTATUS2 0x0646
+#define MSK_TPI_BSTATUS2_DS_BSTATUS 0xe0
+#define BIT_TPI_BSTATUS2_DS_HDMI_MODE BIT(4)
+#define BIT_TPI_BSTATUS2_DS_CASC_EXCEED BIT(3)
+#define MSK_TPI_BSTATUS2_DS_DEPTH 0x07
+
+/* TPI HW Optimization Control #3, default value: 0x00 */
+#define REG_TPI_HW_OPT3 0x06bb
+#define BIT_TPI_HW_OPT3_DDC_DEBUG BIT(7)
+#define BIT_TPI_HW_OPT3_RI_CHECK_SKIP BIT(3)
+#define BIT_TPI_HW_OPT3_TPI_DDC_BURST_MODE BIT(2)
+#define MSK_TPI_HW_OPT3_TPI_DDC_REQ_LEVEL 0x03
+
+/* TPI Info Frame Select, default value: 0x00 */
+#define REG_TPI_INFO_FSEL 0x06bf
+#define BIT_TPI_INFO_FSEL_TPI_INFO_EN BIT(7)
+#define BIT_TPI_INFO_FSEL_TPI_INFO_RPT BIT(6)
+#define BIT_TPI_INFO_FSEL_TPI_INFO_READ_FLAG BIT(5)
+#define MSK_TPI_INFO_FSEL_TPI_INFO_SEL 0x07
+
+/* TPI Info Byte #0, default value: 0x00 */
+#define REG_TPI_INFO_B0 0x06c0
+
+/* CoC Status, default value: 0x00 */
+#define REG_COC_STAT_0 0x0700
+#define REG_COC_STAT_1 0x0701
+#define REG_COC_STAT_2 0x0702
+#define REG_COC_STAT_3 0x0703
+#define REG_COC_STAT_4 0x0704
+#define REG_COC_STAT_5 0x0705
+
+/* CoC 1st Ctl, default value: 0x40 */
+#define REG_COC_CTL0 0x0710
+
+/* CoC 2nd Ctl, default value: 0x0a */
+#define REG_COC_CTL1 0x0711
+#define MSK_COC_CTL1_COC_CTRL1_7_6 0xc0
+#define MSK_COC_CTL1_COC_CTRL1_5_0 0x3f
+
+/* CoC 3rd Ctl, default value: 0x14 */
+#define REG_COC_CTL2 0x0712
+#define MSK_COC_CTL2_COC_CTRL2_7_6 0xc0
+#define MSK_COC_CTL2_COC_CTRL2_5_0 0x3f
+
+/* CoC 4th Ctl, default value: 0x40 */
+#define REG_COC_CTL3 0x0713
+#define BIT_COC_CTL3_COC_CTRL3_7 BIT(7)
+#define MSK_COC_CTL3_COC_CTRL3_6_0 0x7f
+
+/* CoC 7th Ctl, default value: 0x00 */
+#define REG_COC_CTL6 0x0716
+#define BIT_COC_CTL6_COC_CTRL6_7 BIT(7)
+#define BIT_COC_CTL6_COC_CTRL6_6 BIT(6)
+#define MSK_COC_CTL6_COC_CTRL6_5_0 0x3f
+
+/* CoC 8th Ctl, default value: 0x06 */
+#define REG_COC_CTL7 0x0717
+#define BIT_COC_CTL7_COC_CTRL7_7 BIT(7)
+#define BIT_COC_CTL7_COC_CTRL7_6 BIT(6)
+#define BIT_COC_CTL7_COC_CTRL7_5 BIT(5)
+#define MSK_COC_CTL7_COC_CTRL7_4_3 0x18
+#define MSK_COC_CTL7_COC_CTRL7_2_0 0x07
+
+/* CoC 10th Ctl, default value: 0x00 */
+#define REG_COC_CTL9 0x0719
+
+/* CoC 11th Ctl, default value: 0x00 */
+#define REG_COC_CTLA 0x071a
+
+/* CoC 12th Ctl, default value: 0x00 */
+#define REG_COC_CTLB 0x071b
+
+/* CoC 13th Ctl, default value: 0x0f */
+#define REG_COC_CTLC 0x071c
+
+/* CoC 14th Ctl, default value: 0x0a */
+#define REG_COC_CTLD 0x071d
+#define BIT_COC_CTLD_COC_CTRLD_7 BIT(7)
+#define MSK_COC_CTLD_COC_CTRLD_6_0 0x7f
+
+/* CoC 15th Ctl, default value: 0x0a */
+#define REG_COC_CTLE 0x071e
+#define BIT_COC_CTLE_COC_CTRLE_7 BIT(7)
+#define MSK_COC_CTLE_COC_CTRLE_6_0 0x7f
+
+/* CoC 16th Ctl, default value: 0x00 */
+#define REG_COC_CTLF 0x071f
+#define MSK_COC_CTLF_COC_CTRLF_7_3 0xf8
+#define MSK_COC_CTLF_COC_CTRLF_2_0 0x07
+
+/* CoC 18th Ctl, default value: 0x32 */
+#define REG_COC_CTL11 0x0721
+#define MSK_COC_CTL11_COC_CTRL11_7_4 0xf0
+#define MSK_COC_CTL11_COC_CTRL11_3_0 0x0f
+
+/* CoC 21st Ctl, default value: 0x00 */
+#define REG_COC_CTL14 0x0724
+#define MSK_COC_CTL14_COC_CTRL14_7_4 0xf0
+#define MSK_COC_CTL14_COC_CTRL14_3_0 0x0f
+
+/* CoC 22nd Ctl, default value: 0x00 */
+#define REG_COC_CTL15 0x0725
+#define BIT_COC_CTL15_COC_CTRL15_7 BIT(7)
+#define MSK_COC_CTL15_COC_CTRL15_6_4 0x70
+#define MSK_COC_CTL15_COC_CTRL15_3_0 0x0f
+
+/* CoC Interrupt, default value: 0x00 */
+#define REG_COC_INTR 0x0726
+
+/* CoC Interrupt Mask, default value: 0x00 */
+#define REG_COC_INTR_MASK 0x0727
+#define BIT_COC_PLL_LOCK_STATUS_CHANGE BIT(0)
+#define BIT_COC_CALIBRATION_DONE BIT(1)
+
+/* CoC Misc Ctl, default value: 0x00 */
+#define REG_COC_MISC_CTL0 0x0728
+#define BIT_COC_MISC_CTL0_FSM_MON BIT(7)
+
+/* CoC 24th Ctl, default value: 0x00 */
+#define REG_COC_CTL17 0x072a
+#define MSK_COC_CTL17_COC_CTRL17_7_4 0xf0
+#define MSK_COC_CTL17_COC_CTRL17_3_0 0x0f
+
+/* CoC 25th Ctl, default value: 0x00 */
+#define REG_COC_CTL18 0x072b
+#define MSK_COC_CTL18_COC_CTRL18_7_4 0xf0
+#define MSK_COC_CTL18_COC_CTRL18_3_0 0x0f
+
+/* CoC 26th Ctl, default value: 0x00 */
+#define REG_COC_CTL19 0x072c
+#define MSK_COC_CTL19_COC_CTRL19_7_4 0xf0
+#define MSK_COC_CTL19_COC_CTRL19_3_0 0x0f
+
+/* CoC 27th Ctl, default value: 0x00 */
+#define REG_COC_CTL1A 0x072d
+#define MSK_COC_CTL1A_COC_CTRL1A_7_2 0xfc
+#define MSK_COC_CTL1A_COC_CTRL1A_1_0 0x03
+
+/* DoC 9th Status, default value: 0x00 */
+#define REG_DOC_STAT_8 0x0740
+
+/* DoC 10th Status, default value: 0x00 */
+#define REG_DOC_STAT_9 0x0741
+
+/* DoC 5th CFG, default value: 0x00 */
+#define REG_DOC_CFG4 0x074e
+#define MSK_DOC_CFG4_DBG_STATE_DOC_FSM 0x0f
+
+/* DoC 1st Ctl, default value: 0x40 */
+#define REG_DOC_CTL0 0x0751
+
+/* DoC 7th Ctl, default value: 0x00 */
+#define REG_DOC_CTL6 0x0757
+#define BIT_DOC_CTL6_DOC_CTRL6_7 BIT(7)
+#define BIT_DOC_CTL6_DOC_CTRL6_6 BIT(6)
+#define MSK_DOC_CTL6_DOC_CTRL6_5_4 0x30
+#define MSK_DOC_CTL6_DOC_CTRL6_3_0 0x0f
+
+/* DoC 8th Ctl, default value: 0x00 */
+#define REG_DOC_CTL7 0x0758
+#define BIT_DOC_CTL7_DOC_CTRL7_7 BIT(7)
+#define BIT_DOC_CTL7_DOC_CTRL7_6 BIT(6)
+#define BIT_DOC_CTL7_DOC_CTRL7_5 BIT(5)
+#define MSK_DOC_CTL7_DOC_CTRL7_4_3 0x18
+#define MSK_DOC_CTL7_DOC_CTRL7_2_0 0x07
+
+/* DoC 9th Ctl, default value: 0x00 */
+#define REG_DOC_CTL8 0x076c
+#define BIT_DOC_CTL8_DOC_CTRL8_7 BIT(7)
+#define MSK_DOC_CTL8_DOC_CTRL8_6_4 0x70
+#define MSK_DOC_CTL8_DOC_CTRL8_3_2 0x0c
+#define MSK_DOC_CTL8_DOC_CTRL8_1_0 0x03
+
+/* DoC 10th Ctl, default value: 0x00 */
+#define REG_DOC_CTL9 0x076d
+
+/* DoC 11th Ctl, default value: 0x00 */
+#define REG_DOC_CTLA 0x076e
+
+/* DoC 15th Ctl, default value: 0x00 */
+#define REG_DOC_CTLE 0x0772
+#define BIT_DOC_CTLE_DOC_CTRLE_7 BIT(7)
+#define BIT_DOC_CTLE_DOC_CTRLE_6 BIT(6)
+#define MSK_DOC_CTLE_DOC_CTRLE_5_4 0x30
+#define MSK_DOC_CTLE_DOC_CTRLE_3_0 0x0f
+
+/* Interrupt Mask 1st, default value: 0x00 */
+#define REG_MHL_INT_0_MASK 0x0580
+
+/* Interrupt Mask 2nd, default value: 0x00 */
+#define REG_MHL_INT_1_MASK 0x0581
+
+/* Interrupt Mask 3rd, default value: 0x00 */
+#define REG_MHL_INT_2_MASK 0x0582
+
+/* Interrupt Mask 4th, default value: 0x00 */
+#define REG_MHL_INT_3_MASK 0x0583
+
+/* MDT Receive Time Out, default value: 0x00 */
+#define REG_MDT_RCV_TIMEOUT 0x0584
+
+/* MDT Transmit Time Out, default value: 0x00 */
+#define REG_MDT_XMIT_TIMEOUT 0x0585
+
+/* MDT Receive Control, default value: 0x00 */
+#define REG_MDT_RCV_CTRL 0x0586
+#define BIT_MDT_RCV_CTRL_MDT_RCV_EN BIT(7)
+#define BIT_MDT_RCV_CTRL_MDT_DELAY_RCV_EN BIT(6)
+#define BIT_MDT_RCV_CTRL_MDT_RFIFO_OVER_WR_EN BIT(4)
+#define BIT_MDT_RCV_CTRL_MDT_XFIFO_OVER_WR_EN BIT(3)
+#define BIT_MDT_RCV_CTRL_MDT_DISABLE BIT(2)
+#define BIT_MDT_RCV_CTRL_MDT_RFIFO_CLR_ALL BIT(1)
+#define BIT_MDT_RCV_CTRL_MDT_RFIFO_CLR_CUR BIT(0)
+
+/* MDT Receive Read Port, default value: 0x00 */
+#define REG_MDT_RCV_READ_PORT 0x0587
+
+/* MDT Transmit Control, default value: 0x70 */
+#define REG_MDT_XMIT_CTRL 0x0588
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_EN BIT(7)
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_CMD_MERGE_EN BIT(6)
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_FIXED_BURST_LEN BIT(5)
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_FIXED_AID BIT(4)
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_SINGLE_RUN_EN BIT(3)
+#define BIT_MDT_XMIT_CTRL_MDT_CLR_ABORT_WAIT BIT(2)
+#define BIT_MDT_XMIT_CTRL_MDT_XFIFO_CLR_ALL BIT(1)
+#define BIT_MDT_XMIT_CTRL_MDT_XFIFO_CLR_CUR BIT(0)
+
+/* MDT Receive WRITE Port, default value: 0x00 */
+#define REG_MDT_XMIT_WRITE_PORT 0x0589
+
+/* MDT RFIFO Status, default value: 0x00 */
+#define REG_MDT_RFIFO_STAT 0x058a
+#define MSK_MDT_RFIFO_STAT_MDT_RFIFO_CNT 0xe0
+#define MSK_MDT_RFIFO_STAT_MDT_RFIFO_CUR_BYTE_CNT 0x1f
+
+/* MDT XFIFO Status, default value: 0x80 */
+#define REG_MDT_XFIFO_STAT 0x058b
+#define MSK_MDT_XFIFO_STAT_MDT_XFIFO_LEVEL_AVAIL 0xe0
+#define BIT_MDT_XFIFO_STAT_MDT_XMIT_PRE_HS_EN BIT(4)
+#define MSK_MDT_XFIFO_STAT_MDT_WRITE_BURST_LEN 0x0f
+
+/* MDT Interrupt 0, default value: 0x0c */
+#define REG_MDT_INT_0 0x058c
+#define BIT_MDT_RFIFO_DATA_RDY BIT(0)
+#define BIT_MDT_IDLE_AFTER_HAWB_DISABLE BIT(2)
+#define BIT_MDT_XFIFO_EMPTY BIT(3)
+
+/* MDT Interrupt 0 Mask, default value: 0x00 */
+#define REG_MDT_INT_0_MASK 0x058d
+
+/* MDT Interrupt 1, default value: 0x00 */
+#define REG_MDT_INT_1 0x058e
+#define BIT_MDT_RCV_TIMEOUT BIT(0)
+#define BIT_MDT_RCV_SM_ABORT_PKT_RCVD BIT(1)
+#define BIT_MDT_RCV_SM_ERROR BIT(2)
+#define BIT_MDT_XMIT_TIMEOUT BIT(5)
+#define BIT_MDT_XMIT_SM_ABORT_PKT_RCVD BIT(6)
+#define BIT_MDT_XMIT_SM_ERROR BIT(7)
+
+/* MDT Interrupt 1 Mask, default value: 0x00 */
+#define REG_MDT_INT_1_MASK 0x058f
+
+/* CBUS Vendor ID, default value: 0x01 */
+#define REG_CBUS_VENDOR_ID 0x0590
+
+/* CBUS Connection Status, default value: 0x00 */
+#define REG_CBUS_STATUS 0x0591
+#define BIT_CBUS_STATUS_MHL_CABLE_PRESENT BIT(4)
+#define BIT_CBUS_STATUS_MSC_HB_SUCCESS BIT(3)
+#define BIT_CBUS_STATUS_CBUS_HPD BIT(2)
+#define BIT_CBUS_STATUS_MHL_MODE BIT(1)
+#define BIT_CBUS_STATUS_CBUS_CONNECTED BIT(0)
+
+/* CBUS Interrupt 1st, default value: 0x00 */
+#define REG_CBUS_INT_0 0x0592
+#define BIT_CBUS_MSC_MT_DONE_NACK BIT(7)
+#define BIT_CBUS_MSC_MR_SET_INT BIT(6)
+#define BIT_CBUS_MSC_MR_WRITE_BURST BIT(5)
+#define BIT_CBUS_MSC_MR_MSC_MSG BIT(4)
+#define BIT_CBUS_MSC_MR_WRITE_STAT BIT(3)
+#define BIT_CBUS_HPD_CHG BIT(2)
+#define BIT_CBUS_MSC_MT_DONE BIT(1)
+#define BIT_CBUS_CNX_CHG BIT(0)
+
+/* CBUS Interrupt Mask 1st, default value: 0x00 */
+#define REG_CBUS_INT_0_MASK 0x0593
+
+/* CBUS Interrupt 2nd, default value: 0x00 */
+#define REG_CBUS_INT_1 0x0594
+#define BIT_CBUS_CMD_ABORT BIT(6)
+#define BIT_CBUS_MSC_ABORT_RCVD BIT(3)
+#define BIT_CBUS_DDC_ABORT BIT(2)
+#define BIT_CBUS_CEC_ABORT BIT(1)
+
+/* CBUS Interrupt Mask 2nd, default value: 0x00 */
+#define REG_CBUS_INT_1_MASK 0x0595
+
+/* CBUS DDC Abort Interrupt, default value: 0x00 */
+#define REG_DDC_ABORT_INT 0x0598
+
+/* CBUS DDC Abort Interrupt Mask, default value: 0x00 */
+#define REG_DDC_ABORT_INT_MASK 0x0599
+
+/* CBUS MSC Requester Abort Interrupt, default value: 0x00 */
+#define REG_MSC_MT_ABORT_INT 0x059a
+
+/* CBUS MSC Requester Abort Interrupt Mask, default value: 0x00 */
+#define REG_MSC_MT_ABORT_INT_MASK 0x059b
+
+/* CBUS MSC Responder Abort Interrupt, default value: 0x00 */
+#define REG_MSC_MR_ABORT_INT 0x059c
+
+/* CBUS MSC Responder Abort Interrupt Mask, default value: 0x00 */
+#define REG_MSC_MR_ABORT_INT_MASK 0x059d
+
+/* CBUS RX DISCOVERY interrupt, default value: 0x00 */
+#define REG_CBUS_RX_DISC_INT0 0x059e
+
+/* CBUS RX DISCOVERY Interrupt Mask, default value: 0x00 */
+#define REG_CBUS_RX_DISC_INT0_MASK 0x059f
+
+/* CBUS_Link_Layer Control #8, default value: 0x00 */
+#define REG_CBUS_LINK_CTRL_8 0x05a7
+
+/* MDT State Machine Status, default value: 0x00 */
+#define REG_MDT_SM_STAT 0x05b5
+#define MSK_MDT_SM_STAT_MDT_RCV_STATE 0xf0
+#define MSK_MDT_SM_STAT_MDT_XMIT_STATE 0x0f
+
+/* CBUS MSC command trigger, default value: 0x00 */
+#define REG_MSC_COMMAND_START 0x05b8
+#define BIT_MSC_COMMAND_START_DEBUG BIT(5)
+#define BIT_MSC_COMMAND_START_WRITE_BURST BIT(4)
+#define BIT_MSC_COMMAND_START_WRITE_STAT BIT(3)
+#define BIT_MSC_COMMAND_START_READ_DEVCAP BIT(2)
+#define BIT_MSC_COMMAND_START_MSC_MSG BIT(1)
+#define BIT_MSC_COMMAND_START_PEER BIT(0)
+
+/* CBUS MSC Command/Offset, default value: 0x00 */
+#define REG_MSC_CMD_OR_OFFSET 0x05b9
+
+/* CBUS MSC Transmit Data */
+#define REG_MSC_1ST_TRANSMIT_DATA 0x05ba
+#define REG_MSC_2ND_TRANSMIT_DATA 0x05bb
+
+/* CBUS MSC Requester Received Data */
+#define REG_MSC_MT_RCVD_DATA0 0x05bc
+#define REG_MSC_MT_RCVD_DATA1 0x05bd
+
+/* CBUS MSC Responder MSC_MSG Received Data */
+#define REG_MSC_MR_MSC_MSG_RCVD_1ST_DATA 0x05bf
+#define REG_MSC_MR_MSC_MSG_RCVD_2ND_DATA 0x05c0
+
+/* CBUS MSC Heartbeat Control, default value: 0x27 */
+#define REG_MSC_HEARTBEAT_CTRL 0x05c4
+#define BIT_MSC_HEARTBEAT_CTRL_MSC_HB_EN BIT(7)
+#define MSK_MSC_HEARTBEAT_CTRL_MSC_HB_FAIL_LIMIT 0x70
+#define MSK_MSC_HEARTBEAT_CTRL_MSC_HB_PERIOD_MSB 0x0f
+
+/* CBUS MSC Compatibility Control, default value: 0x02 */
+#define REG_CBUS_MSC_COMPAT_CTRL 0x05c7
+#define BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN BIT(7)
+#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_MSC_ON_CBUS BIT(6)
+#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_DDC_ON_CBUS BIT(5)
+#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_GET_DDC_ERRORCODE BIT(3)
+#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_GET_VS1_ERRORCODE BIT(2)
+
+/* CBUS3 Converter Control, default value: 0x24 */
+#define REG_CBUS3_CNVT 0x05dc
+#define MSK_CBUS3_CNVT_CBUS3_RETRYLMT 0xf0
+#define MSK_CBUS3_CNVT_CBUS3_PEERTOUT_SEL 0x0c
+#define BIT_CBUS3_CNVT_TEARCBUS_EN BIT(1)
+#define BIT_CBUS3_CNVT_CBUS3CNVT_EN BIT(0)
+
+/* Discovery Control1, default value: 0x24 */
+#define REG_DISC_CTRL1 0x05e0
+#define BIT_DISC_CTRL1_CBUS_INTR_EN BIT(7)
+#define BIT_DISC_CTRL1_HB_ONLY BIT(6)
+#define MSK_DISC_CTRL1_DISC_ATT 0x30
+#define MSK_DISC_CTRL1_DISC_CYC 0x0c
+#define BIT_DISC_CTRL1_DISC_EN BIT(0)
+
+#define VAL_PUP_OFF 0
+#define VAL_PUP_20K 1
+#define VAL_PUP_5K 2
+
+/* Discovery Control4, default value: 0x80 */
+#define REG_DISC_CTRL4 0x05e3
+#define MSK_DISC_CTRL4_CBUSDISC_PUP_SEL 0xc0
+#define MSK_DISC_CTRL4_CBUSIDLE_PUP_SEL 0x30
+#define VAL_DISC_CTRL4(pup_disc, pup_idle) (((pup_disc) << 6) | (pup_idle << 4))
+
+/* Discovery Control5, default value: 0x03 */
+#define REG_DISC_CTRL5 0x05e4
+#define BIT_DISC_CTRL5_DSM_OVRIDE BIT(3)
+#define MSK_DISC_CTRL5_CBUSMHL_PUP_SEL 0x03
+
+/* Discovery Control8, default value: 0x81 */
+#define REG_DISC_CTRL8 0x05e7
+#define BIT_DISC_CTRL8_NOMHLINT_CLR_BYPASS BIT(7)
+#define BIT_DISC_CTRL8_DELAY_CBUS_INTR_EN BIT(0)
+
+/* Discovery Control9, default value: 0x54 */
+#define REG_DISC_CTRL9 0x05e8
+#define BIT_DISC_CTRL9_MHL3_RSEN_BYP BIT(7)
+#define BIT_DISC_CTRL9_MHL3DISC_EN BIT(6)
+#define BIT_DISC_CTRL9_WAKE_DRVFLT BIT(4)
+#define BIT_DISC_CTRL9_NOMHL_EST BIT(3)
+#define BIT_DISC_CTRL9_DISC_PULSE_PROCEED BIT(2)
+#define BIT_DISC_CTRL9_WAKE_PULSE_BYPASS BIT(1)
+#define BIT_DISC_CTRL9_VBUS_OUTPUT_CAPABILITY_SRC BIT(0)
+
+/* Discovery Status1, default value: 0x00 */
+#define REG_DISC_STAT1 0x05eb
+#define BIT_DISC_STAT1_PSM_OVRIDE BIT(5)
+#define MSK_DISC_STAT1_DISC_SM 0x0f
+
+/* Discovery Status2, default value: 0x00 */
+#define REG_DISC_STAT2 0x05ec
+#define BIT_DISC_STAT2_CBUS_OE_POL BIT(6)
+#define BIT_DISC_STAT2_CBUS_SATUS BIT(5)
+#define BIT_DISC_STAT2_RSEN BIT(4)
+
+#define MSK_DISC_STAT2_MHL_VRSN 0x0c
+#define VAL_DISC_STAT2_DEFAULT 0x00
+#define VAL_DISC_STAT2_MHL1_2 0x04
+#define VAL_DISC_STAT2_MHL3 0x08
+#define VAL_DISC_STAT2_RESERVED 0x0c
+
+#define MSK_DISC_STAT2_RGND 0x03
+#define VAL_RGND_OPEN 0x00
+#define VAL_RGND_2K 0x01
+#define VAL_RGND_1K 0x02
+#define VAL_RGND_SHORT 0x03
+
+/* Interrupt CBUS_reg1 INTR0, default value: 0x00 */
+#define REG_CBUS_DISC_INTR0 0x05ed
+#define BIT_RGND_READY_INT BIT(6)
+#define BIT_CBUS_MHL12_DISCON_INT BIT(5)
+#define BIT_CBUS_MHL3_DISCON_INT BIT(4)
+#define BIT_NOT_MHL_EST_INT BIT(3)
+#define BIT_MHL_EST_INT BIT(2)
+#define BIT_MHL3_EST_INT BIT(1)
+#define VAL_CBUS_MHL_DISCON (BIT_CBUS_MHL12_DISCON_INT \
+ | BIT_CBUS_MHL3_DISCON_INT \
+ | BIT_NOT_MHL_EST_INT)
+
+/* Interrupt CBUS_reg1 INTR0 Mask, default value: 0x00 */
+#define REG_CBUS_DISC_INTR0_MASK 0x05ee
+
+#endif /* __SIL_SII8620_H__ */
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index bb2438dd8733..de52b20800e1 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -230,6 +230,7 @@ struct ttm_bo_driver cirrus_bo_driver = {
.ttm_tt_populate = cirrus_ttm_tt_populate,
.ttm_tt_unpopulate = cirrus_ttm_tt_unpopulate,
.init_mem_type = cirrus_bo_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = cirrus_bo_evict_flags,
.move = NULL,
.verify_access = cirrus_bo_verify_access,
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index f81706387889..c32fb3c1d6f0 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -705,8 +705,7 @@ int drm_atomic_plane_set_property(struct drm_plane *plane,
state->src_w = val;
} else if (property == config->prop_src_h) {
state->src_h = val;
- } else if (property == config->rotation_property ||
- property == plane->rotation_property) {
+ } else if (property == plane->rotation_property) {
if (!is_power_of_2(val & DRM_ROTATE_MASK))
return -EINVAL;
state->rotation = val;
@@ -766,8 +765,7 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
*val = state->src_w;
} else if (property == config->prop_src_h) {
*val = state->src_h;
- } else if (property == config->rotation_property ||
- property == plane->rotation_property) {
+ } else if (property == plane->rotation_property) {
*val = state->rotation;
} else if (property == plane->zpos_property) {
*val = state->zpos;
@@ -1465,7 +1463,7 @@ EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
static struct drm_pending_vblank_event *create_vblank_event(
struct drm_device *dev, struct drm_file *file_priv,
- struct fence *fence, uint64_t user_data)
+ struct dma_fence *fence, uint64_t user_data)
{
struct drm_pending_vblank_event *e = NULL;
int ret;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index f9362760bfb2..75ad01d595fd 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -30,7 +30,7 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include "drm_crtc_internal.h"
@@ -1017,7 +1017,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
* drm_atomic_helper_swap_state() so it uses the current plane state (and
* just uses the atomic state to find the changed planes)
*
- * Returns zero if success or < 0 if fence_wait() fails.
+ * Returns zero if success or < 0 if dma_fence_wait() fails.
*/
int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
struct drm_atomic_state *state,
@@ -1041,11 +1041,11 @@ int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
* still interrupt the operation. Instead of blocking until the
* timer expires, make the wait interruptible.
*/
- ret = fence_wait(plane_state->fence, pre_swap);
+ ret = dma_fence_wait(plane_state->fence, pre_swap);
if (ret)
return ret;
- fence_put(plane_state->fence);
+ dma_fence_put(plane_state->fence);
plane_state->fence = NULL;
}
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index e52aece30900..1f2412c7ccfd 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -89,7 +89,7 @@
* On top of this basic transformation additional properties can be exposed by
* the driver:
*
- * - Rotation is set up with drm_mode_create_rotation_property(). It adds a
+ * - Rotation is set up with drm_plane_create_rotation_property(). It adds a
* rotation and reflection step between the source and destination rectangles.
* Without this property the rectangle is only scaled, but not rotated or
* reflected.
@@ -105,18 +105,12 @@
*/
/**
- * drm_mode_create_rotation_property - create a new rotation property
- * @dev: DRM device
+ * drm_plane_create_rotation_property - create a new rotation property
+ * @plane: drm plane
+ * @rotation: initial value of the rotation property
* @supported_rotations: bitmask of supported rotations and reflections
*
* This creates a new property with the selected support for transformations.
- * The resulting property should be stored in @rotation_property in
- * &drm_mode_config. It then must be attached to each plane which supports
- * rotations using drm_object_attach_property().
- *
- * FIXME: Probably better if the rotation property is created on each plane,
- * like the zpos property. Otherwise it's not possible to allow different
- * rotation modes on different planes.
*
* Since a rotation by 180° degress is the same as reflecting both along the x
* and the y axis the rotation property is somewhat redundant. Drivers can use
@@ -144,24 +138,6 @@
* rotation. After reflection, the rotation is applied to the image sampled from
* the source rectangle, before scaling it to fit the destination rectangle.
*/
-struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
- unsigned int supported_rotations)
-{
- static const struct drm_prop_enum_list props[] = {
- { __builtin_ffs(DRM_ROTATE_0) - 1, "rotate-0" },
- { __builtin_ffs(DRM_ROTATE_90) - 1, "rotate-90" },
- { __builtin_ffs(DRM_ROTATE_180) - 1, "rotate-180" },
- { __builtin_ffs(DRM_ROTATE_270) - 1, "rotate-270" },
- { __builtin_ffs(DRM_REFLECT_X) - 1, "reflect-x" },
- { __builtin_ffs(DRM_REFLECT_Y) - 1, "reflect-y" },
- };
-
- return drm_property_create_bitmask(dev, 0, "rotation",
- props, ARRAY_SIZE(props),
- supported_rotations);
-}
-EXPORT_SYMBOL(drm_mode_create_rotation_property);
-
int drm_plane_create_rotation_property(struct drm_plane *plane,
unsigned int rotation,
unsigned int supported_rotations)
diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
index 488355bdafb9..e02563966271 100644
--- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
@@ -142,6 +142,11 @@ static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN])
sizeof(dp_dual_mode_hdmi_id)) == 0;
}
+static bool is_type1_adaptor(uint8_t adaptor_id)
+{
+ return adaptor_id == 0 || adaptor_id == 0xff;
+}
+
static bool is_type2_adaptor(uint8_t adaptor_id)
{
return adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 |
@@ -193,6 +198,8 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter)
*/
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_HDMI_ID,
hdmi_id, sizeof(hdmi_id));
+ DRM_DEBUG_KMS("DP dual mode HDMI ID: %*pE (err %zd)\n",
+ ret ? 0 : (int)sizeof(hdmi_id), hdmi_id, ret);
if (ret)
return DRM_DP_DUAL_MODE_UNKNOWN;
@@ -210,6 +217,8 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter)
*/
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID,
&adaptor_id, sizeof(adaptor_id));
+ DRM_DEBUG_KMS("DP dual mode adaptor ID: %02x (err %zd)\n",
+ adaptor_id, ret);
if (ret == 0) {
if (is_lspcon_adaptor(hdmi_id, adaptor_id))
return DRM_DP_DUAL_MODE_LSPCON;
@@ -219,6 +228,15 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter)
else
return DRM_DP_DUAL_MODE_TYPE2_DVI;
}
+ /*
+ * If neither a proper type 1 ID nor a broken type 1 adaptor
+ * as described above, assume type 1, but let the user know
+ * that we may have misdetected the type.
+ */
+ if (!is_type1_adaptor(adaptor_id) && adaptor_id != hdmi_id[0])
+ DRM_ERROR("Unexpected DP dual mode adaptor ID %02x\n",
+ adaptor_id);
+
}
if (is_hdmi_adaptor(hdmi_id))
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 95de47ba1e77..9506933b41cd 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1260,6 +1260,34 @@ drm_do_probe_ddc_edid(void *data, u8 *buf, unsigned int block, size_t len)
return ret == xfers ? 0 : -1;
}
+static void connector_bad_edid(struct drm_connector *connector,
+ u8 *edid, int num_blocks)
+{
+ int i;
+
+ if (connector->bad_edid_counter++ && !(drm_debug & DRM_UT_KMS))
+ return;
+
+ dev_warn(connector->dev->dev,
+ "%s: EDID is invalid:\n",
+ connector->name);
+ for (i = 0; i < num_blocks; i++) {
+ u8 *block = edid + i * EDID_LENGTH;
+ char prefix[20];
+
+ if (drm_edid_is_zero(block, EDID_LENGTH))
+ sprintf(prefix, "\t[%02x] ZERO ", i);
+ else if (!drm_edid_block_valid(block, i, false, NULL))
+ sprintf(prefix, "\t[%02x] BAD ", i);
+ else
+ sprintf(prefix, "\t[%02x] GOOD ", i);
+
+ print_hex_dump(KERN_WARNING,
+ prefix, DUMP_PREFIX_NONE, 16, 1,
+ block, EDID_LENGTH, false);
+ }
+}
+
/**
* drm_do_get_edid - get EDID data using a custom EDID block read function
* @connector: connector we're probing
@@ -1283,7 +1311,6 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
{
int i, j = 0, valid_extensions = 0;
u8 *edid, *new;
- bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
return NULL;
@@ -1292,7 +1319,7 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
for (i = 0; i < 4; i++) {
if (get_edid_block(data, edid, 0, EDID_LENGTH))
goto out;
- if (drm_edid_block_valid(edid, 0, print_bad_edid,
+ if (drm_edid_block_valid(edid, 0, false,
&connector->edid_corrupt))
break;
if (i == 0 && drm_edid_is_zero(edid, EDID_LENGTH)) {
@@ -1304,54 +1331,60 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
goto carp;
/* if there's no extensions, we're done */
- if (edid[0x7e] == 0)
+ valid_extensions = edid[0x7e];
+ if (valid_extensions == 0)
return (struct edid *)edid;
- new = krealloc(edid, (edid[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
+ new = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
if (!new)
goto out;
edid = new;
for (j = 1; j <= edid[0x7e]; j++) {
- u8 *block = edid + (valid_extensions + 1) * EDID_LENGTH;
+ u8 *block = edid + j * EDID_LENGTH;
for (i = 0; i < 4; i++) {
if (get_edid_block(data, block, j, EDID_LENGTH))
goto out;
- if (drm_edid_block_valid(block, j,
- print_bad_edid, NULL)) {
- valid_extensions++;
+ if (drm_edid_block_valid(block, j, false, NULL))
break;
- }
}
- if (i == 4 && print_bad_edid) {
- dev_warn(connector->dev->dev,
- "%s: Ignoring invalid EDID block %d.\n",
- connector->name, j);
-
- connector->bad_edid_counter++;
- }
+ if (i == 4)
+ valid_extensions--;
}
if (valid_extensions != edid[0x7e]) {
+ u8 *base;
+
+ connector_bad_edid(connector, edid, edid[0x7e] + 1);
+
edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
edid[0x7e] = valid_extensions;
- new = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+
+ new = kmalloc((valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
if (!new)
goto out;
+
+ base = new;
+ for (i = 0; i <= edid[0x7e]; i++) {
+ u8 *block = edid + i * EDID_LENGTH;
+
+ if (!drm_edid_block_valid(block, i, false, NULL))
+ continue;
+
+ memcpy(base, block, EDID_LENGTH);
+ base += EDID_LENGTH;
+ }
+
+ kfree(edid);
edid = new;
}
return (struct edid *)edid;
carp:
- if (print_bad_edid) {
- dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
- connector->name, j);
- }
- connector->bad_edid_counter++;
-
+ connector_bad_edid(connector, edid, 1);
out:
kfree(edid);
return NULL;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index e0d428f9d1cb..83dbae0fabcf 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -392,15 +392,10 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
if (plane->type != DRM_PLANE_TYPE_PRIMARY)
drm_plane_force_disable(plane);
- if (plane->rotation_property) {
+ if (plane->rotation_property)
drm_mode_plane_set_obj_prop(plane,
plane->rotation_property,
DRM_ROTATE_0);
- } else if (dev->mode_config.rotation_property) {
- drm_mode_plane_set_obj_prop(plane,
- dev->mode_config.rotation_property,
- DRM_ROTATE_0);
- }
}
for (i = 0; i < fb_helper->crtc_count; i++) {
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 8bed5f459182..cf993dbf602e 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -665,7 +665,7 @@ void drm_event_cancel_free(struct drm_device *dev,
spin_unlock_irqrestore(&dev->event_lock, flags);
if (p->fence)
- fence_put(p->fence);
+ dma_fence_put(p->fence);
kfree(p);
}
@@ -696,8 +696,8 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
}
if (e->fence) {
- fence_signal(e->fence);
- fence_put(e->fence);
+ dma_fence_signal(e->fence);
+ dma_fence_put(e->fence);
}
if (!e->file_priv) {
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index bc98bb94264d..47848ed8ca48 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -6,6 +6,11 @@
#include <drm/drm_crtc.h>
#include <drm/drm_of.h>
+static void drm_release_of(struct device *dev, void *data)
+{
+ of_node_put(data);
+}
+
/**
* drm_crtc_port_mask - find the mask of a registered CRTC by port OF node
* @dev: DRM device
@@ -64,6 +69,24 @@ uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
EXPORT_SYMBOL(drm_of_find_possible_crtcs);
/**
+ * drm_of_component_match_add - Add a component helper OF node match rule
+ * @master: master device
+ * @matchptr: component match pointer
+ * @compare: compare function used for matching component
+ * @node: of_node
+ */
+void drm_of_component_match_add(struct device *master,
+ struct component_match **matchptr,
+ int (*compare)(struct device *, void *),
+ struct device_node *node)
+{
+ of_node_get(node);
+ component_match_add_release(master, matchptr, drm_release_of,
+ compare, node);
+}
+EXPORT_SYMBOL_GPL(drm_of_component_match_add);
+
+/**
* drm_of_component_probe - Generic probe function for a component based master
* @dev: master device containing the OF node
* @compare_of: compare function used for matching components
@@ -101,7 +124,7 @@ int drm_of_component_probe(struct device *dev,
continue;
}
- component_match_add(dev, &match, compare_of, port);
+ drm_of_component_match_add(dev, &match, compare_of, port);
of_node_put(port);
}
@@ -140,7 +163,8 @@ int drm_of_component_probe(struct device *dev,
continue;
}
- component_match_add(dev, &match, compare_of, remote);
+ drm_of_component_match_add(dev, &match, compare_of,
+ remote);
of_node_put(remote);
}
of_node_put(port);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index aa687669e22b..0dee6acbd880 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -16,6 +16,7 @@
#include <linux/component.h>
#include <linux/of_platform.h>
+#include <drm/drm_of.h>
#include "etnaviv_drv.h"
#include "etnaviv_gpu.h"
@@ -629,8 +630,8 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
if (!core_node)
break;
- component_match_add(&pdev->dev, &match, compare_of,
- core_node);
+ drm_of_component_match_add(&pdev->dev, &match,
+ compare_of, core_node);
of_node_put(core_node);
}
} else if (dev->platform_data) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 3755ef935af4..7d066a91d778 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -466,10 +466,10 @@ int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
}
#ifdef CONFIG_DEBUG_FS
-static void etnaviv_gem_describe_fence(struct fence *fence,
+static void etnaviv_gem_describe_fence(struct dma_fence *fence,
const char *type, struct seq_file *m)
{
- if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
seq_printf(m, "\t%9s: %s %s seq %u\n",
type,
fence->ops->get_driver_name(fence),
@@ -482,7 +482,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct reservation_object *robj = etnaviv_obj->resv;
struct reservation_object_list *fobj;
- struct fence *fence;
+ struct dma_fence *fence;
unsigned long off = drm_vma_node_start(&obj->vma_node);
seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index b1254f885fed..d2211825e5c8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -15,7 +15,7 @@
*/
#include <linux/component.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/moduleparam.h>
#include <linux/of_device.h>
#include "etnaviv_dump.h"
@@ -882,7 +882,7 @@ static void recover_worker(struct work_struct *work)
for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
if (!gpu->event[i].used)
continue;
- fence_signal(gpu->event[i].fence);
+ dma_fence_signal(gpu->event[i].fence);
gpu->event[i].fence = NULL;
gpu->event[i].used = false;
complete(&gpu->event_free);
@@ -952,55 +952,55 @@ static void hangcheck_disable(struct etnaviv_gpu *gpu)
/* fence object management */
struct etnaviv_fence {
struct etnaviv_gpu *gpu;
- struct fence base;
+ struct dma_fence base;
};
-static inline struct etnaviv_fence *to_etnaviv_fence(struct fence *fence)
+static inline struct etnaviv_fence *to_etnaviv_fence(struct dma_fence *fence)
{
return container_of(fence, struct etnaviv_fence, base);
}
-static const char *etnaviv_fence_get_driver_name(struct fence *fence)
+static const char *etnaviv_fence_get_driver_name(struct dma_fence *fence)
{
return "etnaviv";
}
-static const char *etnaviv_fence_get_timeline_name(struct fence *fence)
+static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence)
{
struct etnaviv_fence *f = to_etnaviv_fence(fence);
return dev_name(f->gpu->dev);
}
-static bool etnaviv_fence_enable_signaling(struct fence *fence)
+static bool etnaviv_fence_enable_signaling(struct dma_fence *fence)
{
return true;
}
-static bool etnaviv_fence_signaled(struct fence *fence)
+static bool etnaviv_fence_signaled(struct dma_fence *fence)
{
struct etnaviv_fence *f = to_etnaviv_fence(fence);
return fence_completed(f->gpu, f->base.seqno);
}
-static void etnaviv_fence_release(struct fence *fence)
+static void etnaviv_fence_release(struct dma_fence *fence)
{
struct etnaviv_fence *f = to_etnaviv_fence(fence);
kfree_rcu(f, base.rcu);
}
-static const struct fence_ops etnaviv_fence_ops = {
+static const struct dma_fence_ops etnaviv_fence_ops = {
.get_driver_name = etnaviv_fence_get_driver_name,
.get_timeline_name = etnaviv_fence_get_timeline_name,
.enable_signaling = etnaviv_fence_enable_signaling,
.signaled = etnaviv_fence_signaled,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = etnaviv_fence_release,
};
-static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
+static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
{
struct etnaviv_fence *f;
@@ -1010,8 +1010,8 @@ static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
f->gpu = gpu;
- fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
- gpu->fence_context, ++gpu->next_fence);
+ dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
+ gpu->fence_context, ++gpu->next_fence);
return &f->base;
}
@@ -1021,7 +1021,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
{
struct reservation_object *robj = etnaviv_obj->resv;
struct reservation_object_list *fobj;
- struct fence *fence;
+ struct dma_fence *fence;
int i, ret;
if (!exclusive) {
@@ -1039,7 +1039,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
/* Wait on any existing exclusive fence which isn't our own */
fence = reservation_object_get_excl(robj);
if (fence && fence->context != context) {
- ret = fence_wait(fence, true);
+ ret = dma_fence_wait(fence, true);
if (ret)
return ret;
}
@@ -1052,7 +1052,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
fence = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(robj));
if (fence->context != context) {
- ret = fence_wait(fence, true);
+ ret = dma_fence_wait(fence, true);
if (ret)
return ret;
}
@@ -1158,11 +1158,11 @@ static void retire_worker(struct work_struct *work)
mutex_lock(&gpu->lock);
list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) {
- if (!fence_is_signaled(cmdbuf->fence))
+ if (!dma_fence_is_signaled(cmdbuf->fence))
break;
list_del(&cmdbuf->node);
- fence_put(cmdbuf->fence);
+ dma_fence_put(cmdbuf->fence);
for (i = 0; i < cmdbuf->nr_bos; i++) {
struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i];
@@ -1275,7 +1275,7 @@ void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
{
- struct fence *fence;
+ struct dma_fence *fence;
unsigned int event, i;
int ret;
@@ -1391,7 +1391,7 @@ static irqreturn_t irq_handler(int irq, void *data)
}
while ((event = ffs(intr)) != 0) {
- struct fence *fence;
+ struct dma_fence *fence;
event -= 1;
@@ -1401,7 +1401,7 @@ static irqreturn_t irq_handler(int irq, void *data)
fence = gpu->event[event].fence;
gpu->event[event].fence = NULL;
- fence_signal(fence);
+ dma_fence_signal(fence);
/*
* Events can be processed out of order. Eg,
@@ -1553,7 +1553,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
return ret;
gpu->drm = drm;
- gpu->fence_context = fence_context_alloc(1);
+ gpu->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&gpu->fence_spinlock);
INIT_LIST_HEAD(&gpu->active_cmd_list);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 73c278dc3706..8c6b824e9d0a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -89,7 +89,7 @@ struct etnaviv_chip_identity {
struct etnaviv_event {
bool used;
- struct fence *fence;
+ struct dma_fence *fence;
};
struct etnaviv_cmdbuf;
@@ -163,7 +163,7 @@ struct etnaviv_cmdbuf {
/* vram node used if the cmdbuf is mapped through the MMUv2 */
struct drm_mm_node vram_node;
/* fence after which this buffer is to be disposed */
- struct fence *fence;
+ struct dma_fence *fence;
/* target exec state */
u32 exec_state;
/* per GPU in-flight list */
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 90377a609c98..e88fde18c946 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -24,6 +24,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_of.h>
#include "kirin_drm_drv.h"
@@ -260,14 +261,13 @@ static struct device_node *kirin_get_remote_node(struct device_node *np)
DRM_ERROR("no valid endpoint node\n");
return ERR_PTR(-ENODEV);
}
- of_node_put(endpoint);
remote = of_graph_get_remote_port_parent(endpoint);
+ of_node_put(endpoint);
if (!remote) {
DRM_ERROR("no valid remote node\n");
return ERR_PTR(-ENODEV);
}
- of_node_put(remote);
if (!of_device_is_available(remote)) {
DRM_ERROR("not available for remote node\n");
@@ -294,7 +294,8 @@ static int kirin_drm_platform_probe(struct platform_device *pdev)
if (IS_ERR(remote))
return PTR_ERR(remote);
- component_match_add(dev, &match, compare_of, remote);
+ drm_of_component_match_add(dev, &match, compare_of, remote);
+ of_node_put(remote);
return component_master_add_with_match(dev, &kirin_drm_ops, match);
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 9798d400d817..af8683e0dd54 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -1289,7 +1289,8 @@ static void tda998x_audio_shutdown(struct device *dev, void *data)
mutex_unlock(&priv->audio_mutex);
}
-int tda998x_audio_digital_mute(struct device *dev, void *data, bool enable)
+static int
+tda998x_audio_digital_mute(struct device *dev, void *data, bool enable)
{
struct tda998x_priv *priv = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 74ede1f53372..f9af2a00625e 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -26,12 +26,12 @@
#include "i915_drv.h"
-static const char *i915_fence_get_driver_name(struct fence *fence)
+static const char *i915_fence_get_driver_name(struct dma_fence *fence)
{
return "i915";
}
-static const char *i915_fence_get_timeline_name(struct fence *fence)
+static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
{
/* Timelines are bound by eviction to a VM. However, since
* we only have a global seqno at the moment, we only have
@@ -42,12 +42,12 @@ static const char *i915_fence_get_timeline_name(struct fence *fence)
return "global";
}
-static bool i915_fence_signaled(struct fence *fence)
+static bool i915_fence_signaled(struct dma_fence *fence)
{
return i915_gem_request_completed(to_request(fence));
}
-static bool i915_fence_enable_signaling(struct fence *fence)
+static bool i915_fence_enable_signaling(struct dma_fence *fence)
{
if (i915_fence_signaled(fence))
return false;
@@ -56,7 +56,7 @@ static bool i915_fence_enable_signaling(struct fence *fence)
return true;
}
-static signed long i915_fence_wait(struct fence *fence,
+static signed long i915_fence_wait(struct dma_fence *fence,
bool interruptible,
signed long timeout_jiffies)
{
@@ -85,26 +85,26 @@ static signed long i915_fence_wait(struct fence *fence,
return timeout_jiffies;
}
-static void i915_fence_value_str(struct fence *fence, char *str, int size)
+static void i915_fence_value_str(struct dma_fence *fence, char *str, int size)
{
snprintf(str, size, "%u", fence->seqno);
}
-static void i915_fence_timeline_value_str(struct fence *fence, char *str,
+static void i915_fence_timeline_value_str(struct dma_fence *fence, char *str,
int size)
{
snprintf(str, size, "%u",
intel_engine_get_seqno(to_request(fence)->engine));
}
-static void i915_fence_release(struct fence *fence)
+static void i915_fence_release(struct dma_fence *fence)
{
struct drm_i915_gem_request *req = to_request(fence);
kmem_cache_free(req->i915->requests, req);
}
-const struct fence_ops i915_fence_ops = {
+const struct dma_fence_ops i915_fence_ops = {
.get_driver_name = i915_fence_get_driver_name,
.get_timeline_name = i915_fence_get_timeline_name,
.enable_signaling = i915_fence_enable_signaling,
@@ -388,8 +388,8 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
* The reference count is incremented atomically. If it is zero,
* the lookup knows the request is unallocated and complete. Otherwise,
* it is either still in use, or has been reallocated and reset
- * with fence_init(). This increment is safe for release as we check
- * that the request we have a reference to and matches the active
+ * with dma_fence_init(). This increment is safe for release as we
+ * check that the request we have a reference to and matches the active
* request.
*
* Before we increment the refcount, we chase the request->engine
@@ -412,11 +412,11 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
goto err;
spin_lock_init(&req->lock);
- fence_init(&req->fence,
- &i915_fence_ops,
- &req->lock,
- engine->fence_context,
- seqno);
+ dma_fence_init(&req->fence,
+ &i915_fence_ops,
+ &req->lock,
+ engine->fence_context,
+ seqno);
i915_sw_fence_init(&req->submit, submit_notify);
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 974bd7bcc801..bceeaa3a5193 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -25,7 +25,7 @@
#ifndef I915_GEM_REQUEST_H
#define I915_GEM_REQUEST_H
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include "i915_gem.h"
#include "i915_sw_fence.h"
@@ -62,7 +62,7 @@ struct intel_signal_node {
* The requests are reference counted.
*/
struct drm_i915_gem_request {
- struct fence fence;
+ struct dma_fence fence;
spinlock_t lock;
/** On Which ring this request was generated */
@@ -145,9 +145,9 @@ struct drm_i915_gem_request {
struct list_head execlist_link;
};
-extern const struct fence_ops i915_fence_ops;
+extern const struct dma_fence_ops i915_fence_ops;
-static inline bool fence_is_i915(struct fence *fence)
+static inline bool fence_is_i915(struct dma_fence *fence)
{
return fence->ops == &i915_fence_ops;
}
@@ -172,7 +172,7 @@ i915_gem_request_get_engine(struct drm_i915_gem_request *req)
}
static inline struct drm_i915_gem_request *
-to_request(struct fence *fence)
+to_request(struct dma_fence *fence)
{
/* We assume that NULL fence/request are interoperable */
BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0);
@@ -183,19 +183,19 @@ to_request(struct fence *fence)
static inline struct drm_i915_gem_request *
i915_gem_request_get(struct drm_i915_gem_request *req)
{
- return to_request(fence_get(&req->fence));
+ return to_request(dma_fence_get(&req->fence));
}
static inline struct drm_i915_gem_request *
i915_gem_request_get_rcu(struct drm_i915_gem_request *req)
{
- return to_request(fence_get_rcu(&req->fence));
+ return to_request(dma_fence_get_rcu(&req->fence));
}
static inline void
i915_gem_request_put(struct drm_i915_gem_request *req)
{
- fence_put(&req->fence);
+ dma_fence_put(&req->fence);
}
static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
@@ -497,7 +497,7 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active)
* compiler.
*
* The atomic operation at the heart of
- * i915_gem_request_get_rcu(), see fence_get_rcu(), is
+ * i915_gem_request_get_rcu(), see dma_fence_get_rcu(), is
* atomic_inc_not_zero() which is only a full memory barrier
* when successful. That is, if i915_gem_request_get_rcu()
* returns the request (and so with the reference counted
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 1e5cbc585ca2..8185002d7ec8 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -8,7 +8,7 @@
*/
#include <linux/slab.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/reservation.h>
#include "i915_sw_fence.h"
@@ -226,49 +226,50 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
return pending;
}
-struct dma_fence_cb {
- struct fence_cb base;
+struct i915_sw_dma_fence_cb {
+ struct dma_fence_cb base;
struct i915_sw_fence *fence;
- struct fence *dma;
+ struct dma_fence *dma;
struct timer_list timer;
};
static void timer_i915_sw_fence_wake(unsigned long data)
{
- struct dma_fence_cb *cb = (struct dma_fence_cb *)data;
+ struct i915_sw_dma_fence_cb *cb = (struct i915_sw_dma_fence_cb *)data;
printk(KERN_WARNING "asynchronous wait on fence %s:%s:%x timed out\n",
cb->dma->ops->get_driver_name(cb->dma),
cb->dma->ops->get_timeline_name(cb->dma),
cb->dma->seqno);
- fence_put(cb->dma);
+ dma_fence_put(cb->dma);
cb->dma = NULL;
i915_sw_fence_commit(cb->fence);
cb->timer.function = NULL;
}
-static void dma_i915_sw_fence_wake(struct fence *dma, struct fence_cb *data)
+static void dma_i915_sw_fence_wake(struct dma_fence *dma,
+ struct dma_fence_cb *data)
{
- struct dma_fence_cb *cb = container_of(data, typeof(*cb), base);
+ struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
del_timer_sync(&cb->timer);
if (cb->timer.function)
i915_sw_fence_commit(cb->fence);
- fence_put(cb->dma);
+ dma_fence_put(cb->dma);
kfree(cb);
}
int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
- struct fence *dma,
+ struct dma_fence *dma,
unsigned long timeout,
gfp_t gfp)
{
- struct dma_fence_cb *cb;
+ struct i915_sw_dma_fence_cb *cb;
int ret;
- if (fence_is_signaled(dma))
+ if (dma_fence_is_signaled(dma))
return 0;
cb = kmalloc(sizeof(*cb), gfp);
@@ -276,7 +277,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
if (!gfpflags_allow_blocking(gfp))
return -ENOMEM;
- return fence_wait(dma, false);
+ return dma_fence_wait(dma, false);
}
cb->fence = i915_sw_fence_get(fence);
@@ -287,11 +288,11 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
timer_i915_sw_fence_wake, (unsigned long)cb,
TIMER_IRQSAFE);
if (timeout) {
- cb->dma = fence_get(dma);
+ cb->dma = dma_fence_get(dma);
mod_timer(&cb->timer, round_jiffies_up(jiffies + timeout));
}
- ret = fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake);
+ ret = dma_fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake);
if (ret == 0) {
ret = 1;
} else {
@@ -305,16 +306,16 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct reservation_object *resv,
- const struct fence_ops *exclude,
+ const struct dma_fence_ops *exclude,
bool write,
unsigned long timeout,
gfp_t gfp)
{
- struct fence *excl;
+ struct dma_fence *excl;
int ret = 0, pending;
if (write) {
- struct fence **shared;
+ struct dma_fence **shared;
unsigned int count, i;
ret = reservation_object_get_fences_rcu(resv,
@@ -339,7 +340,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
}
for (i = 0; i < count; i++)
- fence_put(shared[i]);
+ dma_fence_put(shared[i]);
kfree(shared);
} else {
excl = reservation_object_get_excl_rcu(resv);
@@ -356,7 +357,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
ret |= pending;
}
- fence_put(excl);
+ dma_fence_put(excl);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 373141602ca4..cd239e92f67f 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -16,8 +16,8 @@
#include <linux/wait.h>
struct completion;
-struct fence;
-struct fence_ops;
+struct dma_fence;
+struct dma_fence_ops;
struct reservation_object;
struct i915_sw_fence {
@@ -47,12 +47,12 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
struct i915_sw_fence *after,
wait_queue_t *wq);
int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
- struct fence *dma,
+ struct dma_fence *dma,
unsigned long timeout,
gfp_t gfp);
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct reservation_object *resv,
- const struct fence_ops *exclude,
+ const struct dma_fence_ops *exclude,
bool write,
unsigned long timeout,
gfp_t gfp);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 178798002a73..5c912c25f7d3 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -491,7 +491,7 @@ TRACE_EVENT(i915_gem_ring_dispatch,
__entry->ring = req->engine->id;
__entry->seqno = req->fence.seqno;
__entry->flags = flags;
- fence_enable_sw_signaling(&req->fence);
+ dma_fence_enable_sw_signaling(&req->fence);
),
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 23fc1042fed4..56efcc507ea2 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -464,7 +464,7 @@ static int intel_breadcrumbs_signaler(void *arg)
&request->signaling.wait);
local_bh_disable();
- fence_signal(&request->fence);
+ dma_fence_signal(&request->fence);
local_bh_enable(); /* kick start the tasklets */
/* Find the next oldest signal. Note that as we have
@@ -502,7 +502,7 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
struct rb_node *parent, **p;
bool first, wakeup;
- /* locked by fence_enable_sw_signaling() */
+ /* locked by dma_fence_enable_sw_signaling() */
assert_spin_locked(&request->lock);
request->signaling.wait.tsk = b->signaler;
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 2dc94812bea5..8cceb345aa0f 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -245,7 +245,7 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
INIT_LIST_HEAD(&engine->execlist_queue);
spin_lock_init(&engine->execlist_lock);
- engine->fence_context = fence_context_alloc(1);
+ engine->fence_context = dma_fence_context_alloc(1);
intel_engine_init_requests(engine);
intel_engine_init_hangcheck(engine);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index db61aa5f32ef..296f541fbe2f 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -18,6 +18,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
#include <linux/component.h>
#include <linux/iommu.h>
#include <linux/of_address.h>
@@ -416,7 +417,8 @@ static int mtk_drm_probe(struct platform_device *pdev)
comp_type == MTK_DPI) {
dev_info(dev, "Adding component match for %s\n",
node->full_name);
- component_match_add(dev, &match, compare_of, node);
+ drm_of_component_match_add(dev, &match, compare_of,
+ node);
} else {
struct mtk_ddp_comp *comp;
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 919b35f2ad24..83272b456329 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -230,6 +230,7 @@ struct ttm_bo_driver mgag200_bo_driver = {
.ttm_tt_populate = mgag200_ttm_tt_populate,
.ttm_tt_unpopulate = mgag200_ttm_tt_unpopulate,
.init_mem_type = mgag200_bo_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = mgag200_bo_evict_flags,
.move = NULL,
.verify_access = mgag200_bo_verify_access,
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 5127b75dbf40..7250ffc6322f 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -25,9 +25,6 @@ bool hang_debug = false;
MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)");
module_param_named(hang_debug, hang_debug, bool, 0600);
-struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
-struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
-
static const struct adreno_info gpulist[] = {
{
.rev = ADRENO_REV(3, 0, 5, ANY_ID),
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index a54f6e036b4a..07d99bdf7c99 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -311,4 +311,7 @@ static inline void adreno_gpu_write(struct adreno_gpu *gpu,
gpu_write(&gpu->base, reg - 1, data);
}
+struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
+
#endif /* __ADRENO_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 951c002b05df..cf50d3ec8d1b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -75,15 +75,12 @@ static void mdp5_plane_install_rotation_property(struct drm_device *dev,
!(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP))
return;
- if (!dev->mode_config.rotation_property)
- dev->mode_config.rotation_property =
- drm_mode_create_rotation_property(dev,
- DRM_ROTATE_0 | DRM_REFLECT_X | DRM_REFLECT_Y);
-
- if (dev->mode_config.rotation_property)
- drm_object_attach_property(&plane->base,
- dev->mode_config.rotation_property,
- DRM_ROTATE_0);
+ drm_plane_create_rotation_property(plane,
+ DRM_ROTATE_0,
+ DRM_ROTATE_0 |
+ DRM_ROTATE_180 |
+ DRM_REFLECT_X |
+ DRM_REFLECT_Y);
}
/* helper to install properties which are common to planes and crtcs */
@@ -289,6 +286,8 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
plane_enabled(old_state), plane_enabled(state));
if (plane_enabled(state)) {
+ unsigned int rotation;
+
format = to_mdp_format(msm_framebuffer_format(state->fb));
if (MDP_FORMAT_IS_YUV(format) &&
!pipe_supports_yuv(mdp5_plane->caps)) {
@@ -309,8 +308,13 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
- hflip = !!(state->rotation & DRM_REFLECT_X);
- vflip = !!(state->rotation & DRM_REFLECT_Y);
+ rotation = drm_rotation_simplify(state->rotation,
+ DRM_ROTATE_0 |
+ DRM_REFLECT_X |
+ DRM_REFLECT_Y);
+ hflip = !!(rotation & DRM_REFLECT_X);
+ vflip = !!(rotation & DRM_REFLECT_Y);
+
if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) ||
(hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) {
dev_err(plane->dev->dev,
@@ -681,6 +685,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
int pe_top[COMP_MAX], pe_bottom[COMP_MAX];
uint32_t hdecm = 0, vdecm = 0;
uint32_t pix_format;
+ unsigned int rotation;
bool vflip, hflip;
unsigned long flags;
int ret;
@@ -743,8 +748,12 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
config |= get_scale_config(format, src_h, crtc_h, false);
DBG("scale config = %x", config);
- hflip = !!(pstate->rotation & DRM_REFLECT_X);
- vflip = !!(pstate->rotation & DRM_REFLECT_Y);
+ rotation = drm_rotation_simplify(pstate->rotation,
+ DRM_ROTATE_0 |
+ DRM_REFLECT_X |
+ DRM_REFLECT_Y);
+ hflip = !!(rotation & DRM_REFLECT_X);
+ vflip = !!(rotation & DRM_REFLECT_Y);
spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 663f2b6ef091..3c853733c99a 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -18,6 +18,7 @@
#ifdef CONFIG_DEBUG_FS
#include "msm_drv.h"
#include "msm_gpu.h"
+#include "msm_debugfs.h"
static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
{
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index fb5c0b0a7594..84d38eaea585 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -15,6 +15,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <drm/drm_of.h>
+
#include "msm_drv.h"
#include "msm_debugfs.h"
#include "msm_fence.h"
@@ -919,8 +921,8 @@ static int add_components_mdp(struct device *mdp_dev,
continue;
}
- component_match_add(master_dev, matchptr, compare_of, intf);
-
+ drm_of_component_match_add(master_dev, matchptr, compare_of,
+ intf);
of_node_put(intf);
of_node_put(ep_node);
}
@@ -962,8 +964,8 @@ static int add_display_components(struct device *dev,
put_device(mdp_dev);
/* add the MDP component itself */
- component_match_add(dev, matchptr, compare_of,
- mdp_dev->of_node);
+ drm_of_component_match_add(dev, matchptr, compare_of,
+ mdp_dev->of_node);
} else {
/* MDP4 */
mdp_dev = dev;
@@ -996,7 +998,7 @@ static int add_gpu_components(struct device *dev,
if (!np)
return 0;
- component_match_add(dev, matchptr, compare_of, np);
+ drm_of_component_match_add(dev, matchptr, compare_of, np);
of_node_put(np);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index d0da52f2a806..940bf4992fe2 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -217,7 +217,7 @@ void msm_gem_vunmap(struct drm_gem_object *obj);
int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive);
void msm_gem_move_to_active(struct drm_gem_object *obj,
- struct msm_gpu *gpu, bool exclusive, struct fence *fence);
+ struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence);
void msm_gem_move_to_inactive(struct drm_gem_object *obj);
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
int msm_gem_cpu_fini(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index a9b9b1c95a2e..3f299c537b77 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -15,7 +15,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include "msm_drv.h"
#include "msm_fence.h"
@@ -32,7 +32,7 @@ msm_fence_context_alloc(struct drm_device *dev, const char *name)
fctx->dev = dev;
fctx->name = name;
- fctx->context = fence_context_alloc(1);
+ fctx->context = dma_fence_context_alloc(1);
init_waitqueue_head(&fctx->event);
spin_lock_init(&fctx->spinlock);
@@ -100,52 +100,52 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
struct msm_fence {
struct msm_fence_context *fctx;
- struct fence base;
+ struct dma_fence base;
};
-static inline struct msm_fence *to_msm_fence(struct fence *fence)
+static inline struct msm_fence *to_msm_fence(struct dma_fence *fence)
{
return container_of(fence, struct msm_fence, base);
}
-static const char *msm_fence_get_driver_name(struct fence *fence)
+static const char *msm_fence_get_driver_name(struct dma_fence *fence)
{
return "msm";
}
-static const char *msm_fence_get_timeline_name(struct fence *fence)
+static const char *msm_fence_get_timeline_name(struct dma_fence *fence)
{
struct msm_fence *f = to_msm_fence(fence);
return f->fctx->name;
}
-static bool msm_fence_enable_signaling(struct fence *fence)
+static bool msm_fence_enable_signaling(struct dma_fence *fence)
{
return true;
}
-static bool msm_fence_signaled(struct fence *fence)
+static bool msm_fence_signaled(struct dma_fence *fence)
{
struct msm_fence *f = to_msm_fence(fence);
return fence_completed(f->fctx, f->base.seqno);
}
-static void msm_fence_release(struct fence *fence)
+static void msm_fence_release(struct dma_fence *fence)
{
struct msm_fence *f = to_msm_fence(fence);
kfree_rcu(f, base.rcu);
}
-static const struct fence_ops msm_fence_ops = {
+static const struct dma_fence_ops msm_fence_ops = {
.get_driver_name = msm_fence_get_driver_name,
.get_timeline_name = msm_fence_get_timeline_name,
.enable_signaling = msm_fence_enable_signaling,
.signaled = msm_fence_signaled,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = msm_fence_release,
};
-struct fence *
+struct dma_fence *
msm_fence_alloc(struct msm_fence_context *fctx)
{
struct msm_fence *f;
@@ -156,8 +156,8 @@ msm_fence_alloc(struct msm_fence_context *fctx)
f->fctx = fctx;
- fence_init(&f->base, &msm_fence_ops, &fctx->spinlock,
- fctx->context, ++fctx->last_fence);
+ dma_fence_init(&f->base, &msm_fence_ops, &fctx->spinlock,
+ fctx->context, ++fctx->last_fence);
return &f->base;
}
diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h
index ceb5b3d314b4..56061aa1959d 100644
--- a/drivers/gpu/drm/msm/msm_fence.h
+++ b/drivers/gpu/drm/msm/msm_fence.h
@@ -41,6 +41,6 @@ int msm_queue_fence_cb(struct msm_fence_context *fctx,
struct msm_fence_cb *cb, uint32_t fence);
void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence);
-struct fence * msm_fence_alloc(struct msm_fence_context *fctx);
+struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx);
#endif
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index b6ac27e31929..57db7dbbb618 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -521,7 +521,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct reservation_object_list *fobj;
- struct fence *fence;
+ struct dma_fence *fence;
int i, ret;
if (!exclusive) {
@@ -540,7 +540,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
fence = reservation_object_get_excl(msm_obj->resv);
/* don't need to wait on our own fences, since ring is fifo */
if (fence && (fence->context != fctx->context)) {
- ret = fence_wait(fence, true);
+ ret = dma_fence_wait(fence, true);
if (ret)
return ret;
}
@@ -553,7 +553,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
fence = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(msm_obj->resv));
if (fence->context != fctx->context) {
- ret = fence_wait(fence, true);
+ ret = dma_fence_wait(fence, true);
if (ret)
return ret;
}
@@ -563,7 +563,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
}
void msm_gem_move_to_active(struct drm_gem_object *obj,
- struct msm_gpu *gpu, bool exclusive, struct fence *fence)
+ struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
@@ -616,10 +616,10 @@ int msm_gem_cpu_fini(struct drm_gem_object *obj)
}
#ifdef CONFIG_DEBUG_FS
-static void describe_fence(struct fence *fence, const char *type,
+static void describe_fence(struct dma_fence *fence, const char *type,
struct seq_file *m)
{
- if (!fence_is_signaled(fence))
+ if (!dma_fence_is_signaled(fence))
seq_printf(m, "\t%9s: %s %s seq %u\n", type,
fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence),
@@ -631,7 +631,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct reservation_object *robj = msm_obj->resv;
struct reservation_object_list *fobj;
- struct fence *fence;
+ struct dma_fence *fence;
uint64_t off = drm_vma_node_start(&obj->vma_node);
const char *madv;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index b2f13cfe945e..2cb8551fda70 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -104,7 +104,7 @@ struct msm_gem_submit {
struct list_head node; /* node in gpu submit_list */
struct list_head bo_list;
struct ww_acquire_ctx ticket;
- struct fence *fence;
+ struct dma_fence *fence;
struct pid *pid; /* submitting process */
bool valid; /* true if no cmdstream patching needed */
unsigned int nr_cmds;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index b6a0f37a65f3..25e8786fa4ca 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -60,7 +60,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
void msm_gem_submit_free(struct msm_gem_submit *submit)
{
- fence_put(submit->fence);
+ dma_fence_put(submit->fence);
list_del(&submit->node);
put_pid(submit->pid);
kfree(submit);
@@ -380,7 +380,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct msm_file_private *ctx = file->driver_priv;
struct msm_gem_submit *submit;
struct msm_gpu *gpu = priv->gpu;
- struct fence *in_fence = NULL;
+ struct dma_fence *in_fence = NULL;
struct sync_file *sync_file = NULL;
int out_fence_fd = -1;
unsigned i;
@@ -439,7 +439,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
*/
if (in_fence->context != gpu->fctx->context) {
- ret = fence_wait(in_fence, true);
+ ret = dma_fence_wait(in_fence, true);
if (ret)
goto out;
}
@@ -542,7 +542,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
out:
if (in_fence)
- fence_put(in_fence);
+ dma_fence_put(in_fence);
submit_cleanup(submit);
if (ret)
msm_gem_submit_free(submit);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 5bb09838b5ae..3249707e6834 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -476,7 +476,7 @@ static void retire_submits(struct msm_gpu *gpu)
submit = list_first_entry(&gpu->submit_list,
struct msm_gem_submit, node);
- if (fence_is_signaled(submit->fence)) {
+ if (dma_fence_is_signaled(submit->fence)) {
retire_submit(gpu, submit);
} else {
break;
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 2527bf4ca5d9..fde6e3656636 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -22,6 +22,7 @@ nouveau-$(CONFIG_DEBUG_FS) += nouveau_debugfs.o
nouveau-y += nouveau_drm.o
nouveau-y += nouveau_hwmon.o
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
+nouveau-$(CONFIG_LEDS_CLASS) += nouveau_led.o
nouveau-y += nouveau_nvif.o
nouveau-$(CONFIG_NOUVEAU_PLATFORM_DRIVER) += nouveau_platform.o
nouveau-y += nouveau_usif.o # userspace <-> nvif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
index a47d46dda704..b7a54e605469 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
@@ -6,6 +6,7 @@ enum dcb_gpio_func_name {
DCB_GPIO_TVDAC1 = 0x2d,
DCB_GPIO_FAN = 0x09,
DCB_GPIO_FAN_SENSE = 0x3d,
+ DCB_GPIO_LOGO_LED_PWM = 0x84,
DCB_GPIO_UNUSED = 0xff,
DCB_GPIO_VID0 = 0x04,
DCB_GPIO_VID1 = 0x05,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
index 9cb97477248b..e933d3eede70 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
@@ -1,10 +1,16 @@
#ifndef __NVBIOS_ICCSENSE_H__
#define __NVBIOS_ICCSENSE_H__
+struct pwr_rail_resistor_t {
+ u8 mohm;
+ bool enabled;
+};
+
struct pwr_rail_t {
u8 mode;
u8 extdev_id;
- u8 resistor_mohm;
- u8 rail;
+ u8 resistor_count;
+ struct pwr_rail_resistor_t resistors[3];
+ u16 config;
};
struct nvbios_iccsense {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h
index 6633c6db9281..8fa1294c27b7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h
@@ -1,6 +1,9 @@
#ifndef __NVBIOS_VMAP_H__
#define __NVBIOS_VMAP_H__
struct nvbios_vmap {
+ u8 max0;
+ u8 max1;
+ u8 max2;
};
u16 nvbios_vmap_table(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
@@ -8,7 +11,7 @@ u16 nvbios_vmap_parse(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_vmap *);
struct nvbios_vmap_entry {
- u8 unk0;
+ u8 mode;
u8 link;
u32 min;
u32 max;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
index b0df610cec2b..23f3d1b93ebb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
@@ -13,8 +13,9 @@ struct nvbios_volt {
u32 base;
/* GPIO mode */
- u8 vidmask;
- s16 step;
+ bool ranged;
+ u8 vidmask;
+ s16 step;
/* PWM mode */
u32 pwm_freq;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h
new file mode 100644
index 000000000000..87f804fc3a88
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h
@@ -0,0 +1,24 @@
+#ifndef __NVBIOS_VPSTATE_H__
+#define __NVBIOS_VPSTATE_H__
+struct nvbios_vpstate_header {
+ u32 offset;
+
+ u8 version;
+ u8 hlen;
+ u8 ecount;
+ u8 elen;
+ u8 scount;
+ u8 slen;
+
+ u8 base_id;
+ u8 boost_id;
+ u8 tdp_id;
+};
+struct nvbios_vpstate_entry {
+ u8 pstate;
+ u16 clock_mhz;
+};
+int nvbios_vpstate_parse(struct nvkm_bios *, struct nvbios_vpstate_header *);
+int nvbios_vpstate_entry(struct nvkm_bios *, struct nvbios_vpstate_header *,
+ u8 idx, struct nvbios_vpstate_entry *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
index fb54417bc458..e5275f742977 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
@@ -6,6 +6,10 @@
struct nvbios_pll;
struct nvkm_pll_vals;
+#define NVKM_CLK_CSTATE_DEFAULT -1 /* POSTed default */
+#define NVKM_CLK_CSTATE_BASE -2 /* pstate base */
+#define NVKM_CLK_CSTATE_HIGHEST -3 /* highest possible */
+
enum nv_clk_src {
nv_clk_src_crystal,
nv_clk_src_href,
@@ -52,6 +56,7 @@ struct nvkm_cstate {
struct list_head head;
u8 voltage;
u32 domain[nv_clk_src_max];
+ u8 id;
};
struct nvkm_pstate {
@@ -67,7 +72,8 @@ struct nvkm_pstate {
struct nvkm_domain {
enum nv_clk_src name;
u8 bios; /* 0xff for none */
-#define NVKM_CLK_DOM_FLAG_CORE 0x01
+#define NVKM_CLK_DOM_FLAG_CORE 0x01
+#define NVKM_CLK_DOM_FLAG_VPSTATE 0x02
u8 flags;
const char *mname;
int mdiv;
@@ -93,10 +99,16 @@ struct nvkm_clk {
int ustate_ac; /* user-requested (-1 disabled, -2 perfmon) */
int ustate_dc; /* user-requested (-1 disabled, -2 perfmon) */
int astate; /* perfmon adjustment (base) */
- int tstate; /* thermal adjustment (max-) */
int dstate; /* display adjustment (min+) */
+ u8 temp;
bool allow_reclock;
+#define NVKM_CLK_BOOST_NONE 0x0
+#define NVKM_CLK_BOOST_BIOS 0x1
+#define NVKM_CLK_BOOST_FULL 0x2
+ u8 boost_mode;
+ u32 base_khz;
+ u32 boost_khz;
/*XXX: die, these are here *only* to support the completely
* bat-shit insane what-was-nouveau_hw.c code
@@ -110,7 +122,7 @@ int nvkm_clk_read(struct nvkm_clk *, enum nv_clk_src);
int nvkm_clk_ustate(struct nvkm_clk *, int req, int pwr);
int nvkm_clk_astate(struct nvkm_clk *, int req, int rel, bool wait);
int nvkm_clk_dstate(struct nvkm_clk *, int req, int rel);
-int nvkm_clk_tstate(struct nvkm_clk *, int req, int rel);
+int nvkm_clk_tstate(struct nvkm_clk *, u8 temperature);
int nv04_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
int nv40_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
index b765f4ffcde6..08ef9983c643 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
@@ -15,12 +15,28 @@ struct nvkm_volt {
u32 max_uv;
u32 min_uv;
+
+ /*
+ * These are fully functional map entries creating a sw ceiling for
+ * the voltage. These all can describe different kind of curves, so
+ * that for any given temperature a different one can return the lowest
+ * value of all three.
+ */
+ u8 max0_id;
+ u8 max1_id;
+ u8 max2_id;
+
+ int speedo;
};
+int nvkm_volt_map(struct nvkm_volt *volt, u8 id, u8 temperature);
+int nvkm_volt_map_min(struct nvkm_volt *volt, u8 id);
int nvkm_volt_get(struct nvkm_volt *);
-int nvkm_volt_set_id(struct nvkm_volt *, u8 id, int condition);
+int nvkm_volt_set_id(struct nvkm_volt *, u8 id, u8 min_id, u8 temp,
+ int condition);
int nv40_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
+int gf100_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
int gk104_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
int gk20a_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
int gm20b_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index f5101be806cb..5e2c5685b4dd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -232,6 +232,7 @@ nouveau_backlight_init(struct drm_device *dev)
case NV_DEVICE_INFO_V0_TESLA:
case NV_DEVICE_INFO_V0_FERMI:
case NV_DEVICE_INFO_V0_KEPLER:
+ case NV_DEVICE_INFO_V0_MAXWELL:
return nv50_backlight_init(connector);
default:
break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 0067586eb015..18eb061ccafb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -31,10 +31,8 @@
#define DCB_LOC_ON_CHIP 0
-#define ROM16(x) le16_to_cpu(*(u16 *)&(x))
-#define ROM32(x) le32_to_cpu(*(u32 *)&(x))
-#define ROM48(x) ({ u8 *p = &(x); (u64)ROM16(p[4]) << 32 | ROM32(p[0]); })
-#define ROM64(x) le64_to_cpu(*(u64 *)&(x))
+#define ROM16(x) get_unaligned_le16(&(x))
+#define ROM32(x) get_unaligned_le32(&(x))
#define ROMPTR(d,x) ({ \
struct nouveau_drm *drm = nouveau_drm((d)); \
ROM16(x) ? &drm->vbios.data[ROM16(x)] : NULL; \
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 343b8659472c..e0c0007689e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -83,13 +83,13 @@ nv10_bo_get_tile_region(struct drm_device *dev, int i)
static void
nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
- struct fence *fence)
+ struct dma_fence *fence)
{
struct nouveau_drm *drm = nouveau_drm(dev);
if (tile) {
spin_lock(&drm->tile.lock);
- tile->fence = (struct nouveau_fence *)fence_get(fence);
+ tile->fence = (struct nouveau_fence *)dma_fence_get(fence);
tile->used = false;
spin_unlock(&drm->tile.lock);
}
@@ -1243,7 +1243,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct drm_device *dev = drm->dev;
- struct fence *fence = reservation_object_get_excl(bo->resv);
+ struct dma_fence *fence = reservation_object_get_excl(bo->resv);
nv10_bo_put_tile_region(dev, *old_tile, fence);
*old_tile = new_tile;
@@ -1561,6 +1561,7 @@ struct ttm_bo_driver nouveau_bo_driver = {
.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
.invalidate_caches = nouveau_bo_invalidate_caches,
.init_mem_type = nouveau_bo_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = nouveau_bo_evict_flags,
.move_notify = nouveau_bo_move_ntfy,
.move = nouveau_bo_move,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 3100fd88a015..6adf94789417 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -47,6 +47,7 @@
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
#include "nouveau_vga.h"
+#include "nouveau_led.h"
#include "nouveau_hwmon.h"
#include "nouveau_acpi.h"
#include "nouveau_bios.h"
@@ -475,6 +476,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
nouveau_hwmon_init(dev);
nouveau_accel_init(drm);
nouveau_fbcon_init(dev);
+ nouveau_led_init(dev);
if (nouveau_runtime_pm != 0) {
pm_runtime_use_autosuspend(dev->dev);
@@ -510,6 +512,7 @@ nouveau_drm_unload(struct drm_device *dev)
pm_runtime_forbid(dev->dev);
}
+ nouveau_led_fini(dev);
nouveau_fbcon_fini(dev);
nouveau_accel_fini(drm);
nouveau_hwmon_fini(dev);
@@ -561,6 +564,8 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
struct nouveau_cli *cli;
int ret;
+ nouveau_led_suspend(dev);
+
if (dev->mode_config.num_crtc) {
NV_INFO(drm, "suspending console...\n");
nouveau_fbcon_set_suspend(dev, 1);
@@ -649,6 +654,8 @@ nouveau_do_resume(struct drm_device *dev, bool runtime)
nouveau_fbcon_set_suspend(dev, 0);
}
+ nouveau_led_resume(dev);
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 822a0212cd48..c0e2b3207503 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -166,6 +166,9 @@ struct nouveau_drm {
struct nouveau_hwmon *hwmon;
struct nouveau_debugfs *debugfs;
+ /* led management */
+ struct nouveau_led *led;
+
/* display power reference */
bool have_disp_power_ref;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 4bb9ab892ae1..e9529ee6bc23 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -28,7 +28,7 @@
#include <linux/ktime.h>
#include <linux/hrtimer.h>
-#include <trace/events/fence.h>
+#include <trace/events/dma_fence.h>
#include <nvif/cl826e.h>
#include <nvif/notify.h>
@@ -38,11 +38,11 @@
#include "nouveau_dma.h"
#include "nouveau_fence.h"
-static const struct fence_ops nouveau_fence_ops_uevent;
-static const struct fence_ops nouveau_fence_ops_legacy;
+static const struct dma_fence_ops nouveau_fence_ops_uevent;
+static const struct dma_fence_ops nouveau_fence_ops_legacy;
static inline struct nouveau_fence *
-from_fence(struct fence *fence)
+from_fence(struct dma_fence *fence)
{
return container_of(fence, struct nouveau_fence, base);
}
@@ -58,23 +58,23 @@ nouveau_fence_signal(struct nouveau_fence *fence)
{
int drop = 0;
- fence_signal_locked(&fence->base);
+ dma_fence_signal_locked(&fence->base);
list_del(&fence->head);
rcu_assign_pointer(fence->channel, NULL);
- if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) {
+ if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) {
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
if (!--fctx->notify_ref)
drop = 1;
}
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
return drop;
}
static struct nouveau_fence *
-nouveau_local_fence(struct fence *fence, struct nouveau_drm *drm) {
+nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm) {
struct nouveau_fence_priv *priv = (void*)drm->fence;
if (fence->ops != &nouveau_fence_ops_legacy &&
@@ -201,7 +201,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
struct nouveau_fence_work {
struct work_struct work;
- struct fence_cb cb;
+ struct dma_fence_cb cb;
void (*func)(void *);
void *data;
};
@@ -214,7 +214,7 @@ nouveau_fence_work_handler(struct work_struct *kwork)
kfree(work);
}
-static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb)
+static void nouveau_fence_work_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct nouveau_fence_work *work = container_of(cb, typeof(*work), cb);
@@ -222,12 +222,12 @@ static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb)
}
void
-nouveau_fence_work(struct fence *fence,
+nouveau_fence_work(struct dma_fence *fence,
void (*func)(void *), void *data)
{
struct nouveau_fence_work *work;
- if (fence_is_signaled(fence))
+ if (dma_fence_is_signaled(fence))
goto err;
work = kmalloc(sizeof(*work), GFP_KERNEL);
@@ -245,7 +245,7 @@ nouveau_fence_work(struct fence *fence,
work->func = func;
work->data = data;
- if (fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0)
+ if (dma_fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0)
goto err_free;
return;
@@ -266,17 +266,17 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
fence->timeout = jiffies + (15 * HZ);
if (priv->uevent)
- fence_init(&fence->base, &nouveau_fence_ops_uevent,
- &fctx->lock, fctx->context, ++fctx->sequence);
+ dma_fence_init(&fence->base, &nouveau_fence_ops_uevent,
+ &fctx->lock, fctx->context, ++fctx->sequence);
else
- fence_init(&fence->base, &nouveau_fence_ops_legacy,
- &fctx->lock, fctx->context, ++fctx->sequence);
+ dma_fence_init(&fence->base, &nouveau_fence_ops_legacy,
+ &fctx->lock, fctx->context, ++fctx->sequence);
kref_get(&fctx->fence_ref);
- trace_fence_emit(&fence->base);
+ trace_dma_fence_emit(&fence->base);
ret = fctx->emit(fence);
if (!ret) {
- fence_get(&fence->base);
+ dma_fence_get(&fence->base);
spin_lock_irq(&fctx->lock);
if (nouveau_fence_update(chan, fctx))
@@ -298,7 +298,7 @@ nouveau_fence_done(struct nouveau_fence *fence)
struct nouveau_channel *chan;
unsigned long flags;
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
return true;
spin_lock_irqsave(&fctx->lock, flags);
@@ -307,11 +307,11 @@ nouveau_fence_done(struct nouveau_fence *fence)
nvif_notify_put(&fctx->notify);
spin_unlock_irqrestore(&fctx->lock, flags);
}
- return fence_is_signaled(&fence->base);
+ return dma_fence_is_signaled(&fence->base);
}
static long
-nouveau_fence_wait_legacy(struct fence *f, bool intr, long wait)
+nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait)
{
struct nouveau_fence *fence = from_fence(f);
unsigned long sleep_time = NSEC_PER_MSEC / 1000;
@@ -378,7 +378,7 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
if (!lazy)
return nouveau_fence_wait_busy(fence, intr);
- ret = fence_wait_timeout(&fence->base, intr, 15 * HZ);
+ ret = dma_fence_wait_timeout(&fence->base, intr, 15 * HZ);
if (ret < 0)
return ret;
else if (!ret)
@@ -391,7 +391,7 @@ int
nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr)
{
struct nouveau_fence_chan *fctx = chan->fence;
- struct fence *fence;
+ struct dma_fence *fence;
struct reservation_object *resv = nvbo->bo.resv;
struct reservation_object_list *fobj;
struct nouveau_fence *f;
@@ -421,7 +421,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
}
if (must_wait)
- ret = fence_wait(fence, intr);
+ ret = dma_fence_wait(fence, intr);
return ret;
}
@@ -446,7 +446,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
}
if (must_wait)
- ret = fence_wait(fence, intr);
+ ret = dma_fence_wait(fence, intr);
}
return ret;
@@ -456,7 +456,7 @@ void
nouveau_fence_unref(struct nouveau_fence **pfence)
{
if (*pfence)
- fence_put(&(*pfence)->base);
+ dma_fence_put(&(*pfence)->base);
*pfence = NULL;
}
@@ -484,12 +484,12 @@ nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
return ret;
}
-static const char *nouveau_fence_get_get_driver_name(struct fence *fence)
+static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence)
{
return "nouveau";
}
-static const char *nouveau_fence_get_timeline_name(struct fence *f)
+static const char *nouveau_fence_get_timeline_name(struct dma_fence *f)
{
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
@@ -503,7 +503,7 @@ static const char *nouveau_fence_get_timeline_name(struct fence *f)
* result. The drm node should still be there, so we can derive the index from
* the fence context.
*/
-static bool nouveau_fence_is_signaled(struct fence *f)
+static bool nouveau_fence_is_signaled(struct dma_fence *f)
{
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
@@ -519,7 +519,7 @@ static bool nouveau_fence_is_signaled(struct fence *f)
return ret;
}
-static bool nouveau_fence_no_signaling(struct fence *f)
+static bool nouveau_fence_no_signaling(struct dma_fence *f)
{
struct nouveau_fence *fence = from_fence(f);
@@ -530,30 +530,30 @@ static bool nouveau_fence_no_signaling(struct fence *f)
WARN_ON(atomic_read(&fence->base.refcount.refcount) <= 1);
/*
- * This needs uevents to work correctly, but fence_add_callback relies on
+ * This needs uevents to work correctly, but dma_fence_add_callback relies on
* being able to enable signaling. It will still get signaled eventually,
* just not right away.
*/
if (nouveau_fence_is_signaled(f)) {
list_del(&fence->head);
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
return false;
}
return true;
}
-static void nouveau_fence_release(struct fence *f)
+static void nouveau_fence_release(struct dma_fence *f)
{
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
kref_put(&fctx->fence_ref, nouveau_fence_context_put);
- fence_free(&fence->base);
+ dma_fence_free(&fence->base);
}
-static const struct fence_ops nouveau_fence_ops_legacy = {
+static const struct dma_fence_ops nouveau_fence_ops_legacy = {
.get_driver_name = nouveau_fence_get_get_driver_name,
.get_timeline_name = nouveau_fence_get_timeline_name,
.enable_signaling = nouveau_fence_no_signaling,
@@ -562,7 +562,7 @@ static const struct fence_ops nouveau_fence_ops_legacy = {
.release = nouveau_fence_release
};
-static bool nouveau_fence_enable_signaling(struct fence *f)
+static bool nouveau_fence_enable_signaling(struct dma_fence *f)
{
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
@@ -573,18 +573,18 @@ static bool nouveau_fence_enable_signaling(struct fence *f)
ret = nouveau_fence_no_signaling(f);
if (ret)
- set_bit(FENCE_FLAG_USER_BITS, &fence->base.flags);
+ set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags);
else if (!--fctx->notify_ref)
nvif_notify_put(&fctx->notify);
return ret;
}
-static const struct fence_ops nouveau_fence_ops_uevent = {
+static const struct dma_fence_ops nouveau_fence_ops_uevent = {
.get_driver_name = nouveau_fence_get_get_driver_name,
.get_timeline_name = nouveau_fence_get_timeline_name,
.enable_signaling = nouveau_fence_enable_signaling,
.signaled = nouveau_fence_is_signaled,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = NULL
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 64c4ce7115ad..41f3c019e534 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -1,14 +1,14 @@
#ifndef __NOUVEAU_FENCE_H__
#define __NOUVEAU_FENCE_H__
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <nvif/notify.h>
struct nouveau_drm;
struct nouveau_bo;
struct nouveau_fence {
- struct fence base;
+ struct dma_fence base;
struct list_head head;
@@ -24,7 +24,7 @@ void nouveau_fence_unref(struct nouveau_fence **);
int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
bool nouveau_fence_done(struct nouveau_fence *);
-void nouveau_fence_work(struct fence *, void (*)(void *), void *);
+void nouveau_fence_work(struct dma_fence *, void (*)(void *), void *);
int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 0bd7164bc817..7f083c95f422 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -119,7 +119,7 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
struct reservation_object *resv = nvbo->bo.resv;
struct reservation_object_list *fobj;
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
fobj = reservation_object_get_list(resv);
diff --git a/drivers/gpu/drm/nouveau/nouveau_led.c b/drivers/gpu/drm/nouveau/nouveau_led.c
new file mode 100644
index 000000000000..3e2f1b6cd4df
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_led.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2016 Martin Peres
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ * Martin Peres <martin.peres@free.fr>
+ */
+
+#include <linux/leds.h>
+
+#include "nouveau_led.h"
+#include <nvkm/subdev/gpio.h>
+
+static enum led_brightness
+nouveau_led_get_brightness(struct led_classdev *led)
+{
+ struct drm_device *drm_dev = container_of(led, struct nouveau_led, led)->dev;
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct nvif_object *device = &drm->device.object;
+ u32 div, duty;
+
+ div = nvif_rd32(device, 0x61c880) & 0x00ffffff;
+ duty = nvif_rd32(device, 0x61c884) & 0x00ffffff;
+
+ if (div > 0)
+ return duty * LED_FULL / div;
+ else
+ return 0;
+}
+
+static void
+nouveau_led_set_brightness(struct led_classdev *led, enum led_brightness value)
+{
+ struct drm_device *drm_dev = container_of(led, struct nouveau_led, led)->dev;
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct nvif_object *device = &drm->device.object;
+
+ u32 input_clk = 27e6; /* PDISPLAY.SOR[1].PWM is connected to the crystal */
+ u32 freq = 100; /* this is what nvidia uses and it should be good-enough */
+ u32 div, duty;
+
+ div = input_clk / freq;
+ duty = value * div / LED_FULL;
+
+ /* for now, this is safe to directly poke those registers because:
+ * - A: nvidia never puts the logo led to any other PWM controler
+ * than PDISPLAY.SOR[1].PWM.
+ * - B: nouveau does not touch these registers anywhere else
+ */
+ nvif_wr32(device, 0x61c880, div);
+ nvif_wr32(device, 0x61c884, 0xc0000000 | duty);
+}
+
+
+int
+nouveau_led_init(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nvkm_gpio *gpio = nvxx_gpio(&drm->device);
+ struct dcb_gpio_func logo_led;
+ int ret;
+
+ if (!gpio)
+ return 0;
+
+ /* check that there is a GPIO controlling the logo LED */
+ if (nvkm_gpio_find(gpio, 0, DCB_GPIO_LOGO_LED_PWM, 0xff, &logo_led))
+ return 0;
+
+ drm->led = kzalloc(sizeof(*drm->led), GFP_KERNEL);
+ if (!drm->led)
+ return -ENOMEM;
+ drm->led->dev = dev;
+
+ drm->led->led.name = "nvidia-logo";
+ drm->led->led.max_brightness = 255;
+ drm->led->led.brightness_get = nouveau_led_get_brightness;
+ drm->led->led.brightness_set = nouveau_led_set_brightness;
+
+ ret = led_classdev_register(dev->dev, &drm->led->led);
+ if (ret) {
+ kfree(drm->led);
+ return ret;
+ }
+
+ return 0;
+}
+
+void
+nouveau_led_suspend(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+
+ if (drm->led)
+ led_classdev_suspend(&drm->led->led);
+}
+
+void
+nouveau_led_resume(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+
+ if (drm->led)
+ led_classdev_resume(&drm->led->led);
+}
+
+void
+nouveau_led_fini(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+
+ if (drm->led) {
+ led_classdev_unregister(&drm->led->led);
+ kfree(drm->led);
+ drm->led = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_led.h b/drivers/gpu/drm/nouveau/nouveau_led.h
new file mode 100644
index 000000000000..187ecdb82002
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_led.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2015 Martin Peres
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres <martin.peres@free.fr>
+ */
+
+#ifndef __NOUVEAU_LED_H__
+#define __NOUVEAU_LED_H__
+
+#include "nouveau_drv.h"
+
+struct led_classdev;
+
+struct nouveau_led {
+ struct drm_device *dev;
+
+ struct led_classdev led;
+};
+
+static inline struct nouveau_led *
+nouveau_led(struct drm_device *dev)
+{
+ return nouveau_drm(dev)->led;
+}
+
+/* nouveau_led.c */
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
+int nouveau_led_init(struct drm_device *dev);
+void nouveau_led_suspend(struct drm_device *dev);
+void nouveau_led_resume(struct drm_device *dev);
+void nouveau_led_fini(struct drm_device *dev);
+#else
+static inline int nouveau_led_init(struct drm_device *dev) { return 0; };
+static inline void nouveau_led_suspend(struct drm_device *dev) { };
+static inline void nouveau_led_resume(struct drm_device *dev) { };
+static inline void nouveau_led_fini(struct drm_device *dev) { };
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index 1915b7b82a59..fa8f2375c398 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -110,6 +110,6 @@ nv04_fence_create(struct nouveau_drm *drm)
priv->base.context_new = nv04_fence_context_new;
priv->base.context_del = nv04_fence_context_del;
priv->base.contexts = 15;
- priv->base.context_base = fence_context_alloc(priv->base.contexts);
+ priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 4e3de34ff6f4..f99fcf56928a 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -107,7 +107,7 @@ nv10_fence_create(struct nouveau_drm *drm)
priv->base.context_new = nv10_fence_context_new;
priv->base.context_del = nv10_fence_context_del;
priv->base.contexts = 31;
- priv->base.context_base = fence_context_alloc(priv->base.contexts);
+ priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
spin_lock_init(&priv->lock);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 7d5e562a55c5..79bc01111351 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -126,7 +126,7 @@ nv17_fence_create(struct nouveau_drm *drm)
priv->base.context_new = nv17_fence_context_new;
priv->base.context_del = nv10_fence_context_del;
priv->base.contexts = 31;
- priv->base.context_base = fence_context_alloc(priv->base.contexts);
+ priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
spin_lock_init(&priv->lock);
ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 4d6f202b7770..8c5295414578 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -97,7 +97,7 @@ nv50_fence_create(struct nouveau_drm *drm)
priv->base.context_new = nv50_fence_context_new;
priv->base.context_del = nv10_fence_context_del;
priv->base.contexts = 127;
- priv->base.context_base = fence_context_alloc(priv->base.contexts);
+ priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
spin_lock_init(&priv->lock);
ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 18bde9d8e6d6..23ef04b4e0b2 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -229,7 +229,7 @@ nv84_fence_create(struct nouveau_drm *drm)
priv->base.context_del = nv84_fence_context_del;
priv->base.contexts = fifo->nr;
- priv->base.context_base = fence_context_alloc(priv->base.contexts);
+ priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
priv->base.uevent = true;
/* Use VRAM if there is any ; otherwise fallback to system memory */
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
index 34ecd4a7e0c1..058ff46b5f16 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
@@ -20,6 +20,7 @@
* DEALINGS IN THE SOFTWARE.
*/
#include <core/device.h>
+#include <core/firmware.h>
/**
* nvkm_firmware_get - load firmware from the official nvidia/chip/ directory
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 7218a067a6c5..53d171729353 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -1357,7 +1357,7 @@ nvc0_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.ce[1] = gf100_ce_new,
.disp = gt215_disp_new,
@@ -1394,7 +1394,7 @@ nvc1_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.disp = gt215_disp_new,
.dma = gf100_dma_new,
@@ -1430,7 +1430,7 @@ nvc3_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.disp = gt215_disp_new,
.dma = gf100_dma_new,
@@ -1466,7 +1466,7 @@ nvc4_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.ce[1] = gf100_ce_new,
.disp = gt215_disp_new,
@@ -1503,7 +1503,7 @@ nvc8_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.ce[1] = gf100_ce_new,
.disp = gt215_disp_new,
@@ -1540,7 +1540,7 @@ nvce_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.ce[1] = gf100_ce_new,
.disp = gt215_disp_new,
@@ -1577,7 +1577,7 @@ nvcf_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.disp = gt215_disp_new,
.dma = gf100_dma_new,
@@ -1612,6 +1612,7 @@ nvd7_chipset = {
.pci = gf106_pci_new,
.therm = gf119_therm_new,
.timer = nv41_timer_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.disp = gf119_disp_new,
.dma = gf119_dma_new,
@@ -1647,7 +1648,7 @@ nvd9_chipset = {
.pmu = gf119_pmu_new,
.therm = gf119_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.disp = gf119_disp_new,
.dma = gf119_dma_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
index 62ad0300cfa5..0030cd9543b2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -1665,14 +1665,31 @@ nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
*pdevice = &pdev->device;
pdev->pdev = pci_dev;
- return nvkm_device_ctor(&nvkm_device_pci_func, quirk, &pci_dev->dev,
- pci_is_pcie(pci_dev) ? NVKM_DEVICE_PCIE :
- pci_find_capability(pci_dev, PCI_CAP_ID_AGP) ?
- NVKM_DEVICE_AGP : NVKM_DEVICE_PCI,
- (u64)pci_domain_nr(pci_dev->bus) << 32 |
- pci_dev->bus->number << 16 |
- PCI_SLOT(pci_dev->devfn) << 8 |
- PCI_FUNC(pci_dev->devfn), name,
- cfg, dbg, detect, mmio, subdev_mask,
- &pdev->device);
+ ret = nvkm_device_ctor(&nvkm_device_pci_func, quirk, &pci_dev->dev,
+ pci_is_pcie(pci_dev) ? NVKM_DEVICE_PCIE :
+ pci_find_capability(pci_dev, PCI_CAP_ID_AGP) ?
+ NVKM_DEVICE_AGP : NVKM_DEVICE_PCI,
+ (u64)pci_domain_nr(pci_dev->bus) << 32 |
+ pci_dev->bus->number << 16 |
+ PCI_SLOT(pci_dev->devfn) << 8 |
+ PCI_FUNC(pci_dev->devfn), name,
+ cfg, dbg, detect, mmio, subdev_mask,
+ &pdev->device);
+
+ if (ret)
+ return ret;
+
+ /*
+ * Set a preliminary DMA mask based on the .dma_bits member of the
+ * MMU subdevice. This allows other subdevices to create DMA mappings
+ * in their init() or oneinit() methods, which may be called before the
+ * TTM layer sets the DMA mask definitively.
+ * This is necessary for platforms where the default DMA mask of 32
+ * does not cover any system memory, i.e., when all RAM is > 4 GB.
+ */
+ if (subdev_mask & BIT(NVKM_SUBDEV_MMU))
+ dma_set_mask_and_coherent(&pci_dev->dev,
+ DMA_BIT_MASK(pdev->device.mmu->dma_bits));
+
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
index 1bb9d661e9b3..4510cb6e10a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
@@ -45,14 +45,6 @@ static const struct nvkm_output_func
g94_sor_output_func = {
};
-int
-g94_sor_output_new(struct nvkm_disp *disp, int index,
- struct dcb_output *dcbE, struct nvkm_output **poutp)
-{
- return nvkm_output_new_(&g94_sor_output_func, disp,
- index, dcbE, poutp);
-}
-
/*******************************************************************************
* DisplayPort
******************************************************************************/
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
index f1e15a4d4f64..b4e3c50badc7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
@@ -187,6 +187,7 @@ nv30_gr = {
{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
{ -1, -1, 0x0397, &nv04_gr_object }, /* rankine */
+ { -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
{}
}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
index 300f5ed5de0b..e7ed04b935cd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
@@ -123,6 +123,7 @@ nv34_gr = {
{ -1, -1, 0x0389, &nv04_gr_object }, /* sifm (nv30) */
{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
+ { -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
{ -1, -1, 0x0697, &nv04_gr_object }, /* rankine */
{}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
index 740df0f52c38..5e8abacbacc6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
@@ -124,6 +124,7 @@ nv35_gr = {
{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
{ -1, -1, 0x0497, &nv04_gr_object }, /* rankine */
+ { -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
{}
}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
index 370dcd8ff7b5..6eff637ac301 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
@@ -84,7 +84,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
start = 0x0100000000ULL;
limit = start + device->func->resource_size(device, 3);
- ret = nvkm_vm_new(device, start, limit, start, &bar3_lock, &vm);
+ ret = nvkm_vm_new(device, start, limit - start, start, &bar3_lock, &vm);
if (ret)
return ret;
@@ -117,7 +117,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
start = 0x0000000000ULL;
limit = start + device->func->resource_size(device, 1);
- ret = nvkm_vm_new(device, start, limit--, start, &bar1_lock, &vm);
+ ret = nvkm_vm_new(device, start, limit-- - start, start, &bar1_lock, &vm);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
index dbcb0ef21587..be57220a2e01 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
@@ -31,6 +31,7 @@ nvkm-y += nvkm/subdev/bios/timing.o
nvkm-y += nvkm/subdev/bios/therm.o
nvkm-y += nvkm/subdev/bios/vmap.o
nvkm-y += nvkm/subdev/bios/volt.o
+nvkm-y += nvkm/subdev/bios/vpstate.o
nvkm-y += nvkm/subdev/bios/xpio.o
nvkm-y += nvkm/subdev/bios/M0203.o
nvkm-y += nvkm/subdev/bios/M0205.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
index 084328028af1..aafd5e17b1c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
@@ -23,6 +23,7 @@
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
+#include <subdev/bios/extdev.h>
#include <subdev/bios/iccsense.h>
static u16
@@ -77,23 +78,47 @@ nvbios_iccsense_parse(struct nvkm_bios *bios, struct nvbios_iccsense *iccsense)
return -ENOMEM;
for (i = 0; i < cnt; ++i) {
+ struct nvbios_extdev_func extdev;
struct pwr_rail_t *rail = &iccsense->rail[i];
+ u8 res_start = 0;
+ int r;
+
entry = table + hdr + i * len;
switch(ver) {
case 0x10:
rail->mode = nvbios_rd08(bios, entry + 0x1);
rail->extdev_id = nvbios_rd08(bios, entry + 0x2);
- rail->resistor_mohm = nvbios_rd08(bios, entry + 0x3);
- rail->rail = nvbios_rd08(bios, entry + 0x4);
+ res_start = 0x3;
break;
case 0x20:
rail->mode = nvbios_rd08(bios, entry);
rail->extdev_id = nvbios_rd08(bios, entry + 0x1);
- rail->resistor_mohm = nvbios_rd08(bios, entry + 0x5);
- rail->rail = nvbios_rd08(bios, entry + 0x6);
+ res_start = 0x5;
+ break;
+ };
+
+ if (nvbios_extdev_parse(bios, rail->extdev_id, &extdev))
+ continue;
+
+ switch (extdev.type) {
+ case NVBIOS_EXTDEV_INA209:
+ case NVBIOS_EXTDEV_INA219:
+ rail->resistor_count = 1;
+ break;
+ case NVBIOS_EXTDEV_INA3221:
+ rail->resistor_count = 3;
+ break;
+ default:
+ rail->resistor_count = 0;
break;
};
+
+ for (r = 0; r < rail->resistor_count; ++r) {
+ rail->resistors[r].mohm = nvbios_rd08(bios, entry + res_start + r * 2);
+ rail->resistors[r].enabled = !(nvbios_rd08(bios, entry + res_start + r * 2 + 1) & 0x40);
+ }
+ rail->config = nvbios_rd16(bios, entry + res_start + rail->resistor_count * 2);
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
index 2f13db745948..32bd8b1d154f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
@@ -61,7 +61,17 @@ nvbios_vmap_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
memset(info, 0x00, sizeof(*info));
switch (!!vmap * *ver) {
case 0x10:
+ info->max0 = 0xff;
+ info->max1 = 0xff;
+ info->max2 = 0xff;
+ break;
case 0x20:
+ info->max0 = nvbios_rd08(bios, vmap + 0x7);
+ info->max1 = nvbios_rd08(bios, vmap + 0x8);
+ if (*len >= 0xc)
+ info->max2 = nvbios_rd08(bios, vmap + 0xc);
+ else
+ info->max2 = 0xff;
break;
}
return vmap;
@@ -95,7 +105,7 @@ nvbios_vmap_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
info->arg[2] = nvbios_rd32(bios, vmap + 0x10);
break;
case 0x20:
- info->unk0 = nvbios_rd08(bios, vmap + 0x00);
+ info->mode = nvbios_rd08(bios, vmap + 0x00);
info->link = nvbios_rd08(bios, vmap + 0x01);
info->min = nvbios_rd32(bios, vmap + 0x02);
info->max = nvbios_rd32(bios, vmap + 0x06);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
index 6e0a33648be9..4504822ace51 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
@@ -75,20 +75,24 @@ nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
case 0x12:
info->type = NVBIOS_VOLT_GPIO;
info->vidmask = nvbios_rd08(bios, volt + 0x04);
+ info->ranged = false;
break;
case 0x20:
info->type = NVBIOS_VOLT_GPIO;
info->vidmask = nvbios_rd08(bios, volt + 0x05);
+ info->ranged = false;
break;
case 0x30:
info->type = NVBIOS_VOLT_GPIO;
info->vidmask = nvbios_rd08(bios, volt + 0x04);
+ info->ranged = false;
break;
case 0x40:
info->type = NVBIOS_VOLT_GPIO;
info->base = nvbios_rd32(bios, volt + 0x04);
info->step = nvbios_rd16(bios, volt + 0x08);
info->vidmask = nvbios_rd08(bios, volt + 0x0b);
+ info->ranged = true; /* XXX: find the flag byte */
/*XXX*/
info->min = 0;
info->max = info->base;
@@ -104,9 +108,11 @@ nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
info->pwm_freq = nvbios_rd32(bios, volt + 0x5) / 1000;
info->pwm_range = nvbios_rd32(bios, volt + 0x16);
} else {
- info->type = NVBIOS_VOLT_GPIO;
- info->vidmask = nvbios_rd08(bios, volt + 0x06);
- info->step = nvbios_rd16(bios, volt + 0x16);
+ info->type = NVBIOS_VOLT_GPIO;
+ info->vidmask = nvbios_rd08(bios, volt + 0x06);
+ info->step = nvbios_rd16(bios, volt + 0x16);
+ info->ranged =
+ !!(nvbios_rd08(bios, volt + 0x4) & 0x2);
}
break;
}
@@ -142,7 +148,10 @@ nvbios_volt_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
info->vid = nvbios_rd08(bios, volt + 0x01) >> 2;
break;
case 0x40:
+ break;
case 0x50:
+ info->voltage = nvbios_rd32(bios, volt) & 0x001fffff;
+ info->vid = (nvbios_rd32(bios, volt) >> 23) & 0xff;
break;
}
return volt;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c
new file mode 100644
index 000000000000..f199270163d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2016 Karol Herbst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Karol Herbst
+ */
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/vpstate.h>
+
+static u32
+nvbios_vpstate_offset(struct nvkm_bios *b)
+{
+ struct bit_entry bit_P;
+
+ if (!bit_entry(b, 'P', &bit_P)) {
+ if (bit_P.version == 2)
+ return nvbios_rd32(b, bit_P.offset + 0x38);
+ }
+
+ return 0x0000;
+}
+
+int
+nvbios_vpstate_parse(struct nvkm_bios *b, struct nvbios_vpstate_header *h)
+{
+ if (!h)
+ return -EINVAL;
+
+ h->offset = nvbios_vpstate_offset(b);
+ if (!h->offset)
+ return -ENODEV;
+
+ h->version = nvbios_rd08(b, h->offset);
+ switch (h->version) {
+ case 0x10:
+ h->hlen = nvbios_rd08(b, h->offset + 0x1);
+ h->elen = nvbios_rd08(b, h->offset + 0x2);
+ h->slen = nvbios_rd08(b, h->offset + 0x3);
+ h->scount = nvbios_rd08(b, h->offset + 0x4);
+ h->ecount = nvbios_rd08(b, h->offset + 0x5);
+
+ h->base_id = nvbios_rd08(b, h->offset + 0x0f);
+ h->boost_id = nvbios_rd08(b, h->offset + 0x10);
+ h->tdp_id = nvbios_rd08(b, h->offset + 0x11);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+int
+nvbios_vpstate_entry(struct nvkm_bios *b, struct nvbios_vpstate_header *h,
+ u8 idx, struct nvbios_vpstate_entry *e)
+{
+ u32 offset;
+
+ if (!e || !h || idx > h->ecount)
+ return -EINVAL;
+
+ offset = h->offset + h->hlen + idx * (h->elen + (h->slen * h->scount));
+ e->pstate = nvbios_rd08(b, offset);
+ e->clock_mhz = nvbios_rd16(b, offset + 0x5);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index 7102c25320fc..fa1c12185e19 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -27,6 +27,7 @@
#include <subdev/bios/boost.h>
#include <subdev/bios/cstep.h>
#include <subdev/bios/perf.h>
+#include <subdev/bios/vpstate.h>
#include <subdev/fb.h>
#include <subdev/therm.h>
#include <subdev/volt.h>
@@ -74,6 +75,88 @@ nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
/******************************************************************************
* C-States
*****************************************************************************/
+static bool
+nvkm_cstate_valid(struct nvkm_clk *clk, struct nvkm_cstate *cstate,
+ u32 max_volt, int temp)
+{
+ const struct nvkm_domain *domain = clk->domains;
+ struct nvkm_volt *volt = clk->subdev.device->volt;
+ int voltage;
+
+ while (domain && domain->name != nv_clk_src_max) {
+ if (domain->flags & NVKM_CLK_DOM_FLAG_VPSTATE) {
+ u32 freq = cstate->domain[domain->name];
+ switch (clk->boost_mode) {
+ case NVKM_CLK_BOOST_NONE:
+ if (clk->base_khz && freq > clk->base_khz)
+ return false;
+ case NVKM_CLK_BOOST_BIOS:
+ if (clk->boost_khz && freq > clk->boost_khz)
+ return false;
+ }
+ }
+ domain++;
+ }
+
+ if (!volt)
+ return true;
+
+ voltage = nvkm_volt_map(volt, cstate->voltage, temp);
+ if (voltage < 0)
+ return false;
+ return voltage <= min(max_volt, volt->max_uv);
+}
+
+static struct nvkm_cstate *
+nvkm_cstate_find_best(struct nvkm_clk *clk, struct nvkm_pstate *pstate,
+ struct nvkm_cstate *start)
+{
+ struct nvkm_device *device = clk->subdev.device;
+ struct nvkm_volt *volt = device->volt;
+ struct nvkm_cstate *cstate;
+ int max_volt;
+
+ if (!pstate || !start)
+ return NULL;
+
+ if (!volt)
+ return start;
+
+ max_volt = volt->max_uv;
+ if (volt->max0_id != 0xff)
+ max_volt = min(max_volt,
+ nvkm_volt_map(volt, volt->max0_id, clk->temp));
+ if (volt->max1_id != 0xff)
+ max_volt = min(max_volt,
+ nvkm_volt_map(volt, volt->max1_id, clk->temp));
+ if (volt->max2_id != 0xff)
+ max_volt = min(max_volt,
+ nvkm_volt_map(volt, volt->max2_id, clk->temp));
+
+ for (cstate = start; &cstate->head != &pstate->list;
+ cstate = list_entry(cstate->head.prev, typeof(*cstate), head)) {
+ if (nvkm_cstate_valid(clk, cstate, max_volt, clk->temp))
+ break;
+ }
+
+ return cstate;
+}
+
+static struct nvkm_cstate *
+nvkm_cstate_get(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
+{
+ struct nvkm_cstate *cstate;
+ if (cstatei == NVKM_CLK_CSTATE_HIGHEST)
+ return list_last_entry(&pstate->list, typeof(*cstate), head);
+ else {
+ list_for_each_entry(cstate, &pstate->list, head) {
+ if (cstate->id == cstatei)
+ return cstate;
+ }
+ }
+ return NULL;
+}
+
static int
nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
{
@@ -85,7 +168,8 @@ nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
int ret;
if (!list_empty(&pstate->list)) {
- cstate = list_entry(pstate->list.prev, typeof(*cstate), head);
+ cstate = nvkm_cstate_get(clk, pstate, cstatei);
+ cstate = nvkm_cstate_find_best(clk, pstate, cstate);
} else {
cstate = &pstate->base;
}
@@ -99,7 +183,8 @@ nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
}
if (volt) {
- ret = nvkm_volt_set_id(volt, cstate->voltage, +1);
+ ret = nvkm_volt_set_id(volt, cstate->voltage,
+ pstate->base.voltage, clk->temp, +1);
if (ret && ret != -ENODEV) {
nvkm_error(subdev, "failed to raise voltage: %d\n", ret);
return ret;
@@ -113,7 +198,8 @@ nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
}
if (volt) {
- ret = nvkm_volt_set_id(volt, cstate->voltage, -1);
+ ret = nvkm_volt_set_id(volt, cstate->voltage,
+ pstate->base.voltage, clk->temp, -1);
if (ret && ret != -ENODEV)
nvkm_error(subdev, "failed to lower voltage: %d\n", ret);
}
@@ -138,6 +224,7 @@ static int
nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
{
struct nvkm_bios *bios = clk->subdev.device->bios;
+ struct nvkm_volt *volt = clk->subdev.device->volt;
const struct nvkm_domain *domain = clk->domains;
struct nvkm_cstate *cstate = NULL;
struct nvbios_cstepX cstepX;
@@ -148,12 +235,16 @@ nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
if (!data)
return -ENOENT;
+ if (volt && nvkm_volt_map_min(volt, cstepX.voltage) > volt->max_uv)
+ return -EINVAL;
+
cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
if (!cstate)
return -ENOMEM;
*cstate = pstate->base;
cstate->voltage = cstepX.voltage;
+ cstate->id = idx;
while (domain && domain->name != nv_clk_src_max) {
if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
@@ -175,7 +266,7 @@ static int
nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
{
struct nvkm_subdev *subdev = &clk->subdev;
- struct nvkm_ram *ram = subdev->device->fb->ram;
+ struct nvkm_fb *fb = subdev->device->fb;
struct nvkm_pci *pci = subdev->device->pci;
struct nvkm_pstate *pstate;
int ret, idx = 0;
@@ -190,7 +281,8 @@ nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
nvkm_pcie_set_link(pci, pstate->pcie_speed, pstate->pcie_width);
- if (ram && ram->func->calc) {
+ if (fb && fb->ram && fb->ram->func->calc) {
+ struct nvkm_ram *ram = fb->ram;
int khz = pstate->base.domain[nv_clk_src_mem];
do {
ret = ram->func->calc(ram, khz);
@@ -200,7 +292,7 @@ nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
ram->func->tidy(ram);
}
- return nvkm_cstate_prog(clk, pstate, 0);
+ return nvkm_cstate_prog(clk, pstate, NVKM_CLK_CSTATE_HIGHEST);
}
static void
@@ -214,14 +306,14 @@ nvkm_pstate_work(struct work_struct *work)
return;
clk->pwrsrc = power_supply_is_system_supplied();
- nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d D %d\n",
+ nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d°C D %d\n",
clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
- clk->astate, clk->tstate, clk->dstate);
+ clk->astate, clk->temp, clk->dstate);
pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc;
if (clk->state_nr && pstate != -1) {
pstate = (pstate < 0) ? clk->astate : pstate;
- pstate = min(pstate, clk->state_nr - 1 + clk->tstate);
+ pstate = min(pstate, clk->state_nr - 1);
pstate = max(pstate, clk->dstate);
} else {
pstate = clk->pstate = -1;
@@ -448,13 +540,12 @@ nvkm_clk_astate(struct nvkm_clk *clk, int req, int rel, bool wait)
}
int
-nvkm_clk_tstate(struct nvkm_clk *clk, int req, int rel)
+nvkm_clk_tstate(struct nvkm_clk *clk, u8 temp)
{
- if (!rel) clk->tstate = req;
- if ( rel) clk->tstate += rel;
- clk->tstate = min(clk->tstate, 0);
- clk->tstate = max(clk->tstate, -(clk->state_nr - 1));
- return nvkm_pstate_calc(clk, true);
+ if (clk->temp == temp)
+ return 0;
+ clk->temp = temp;
+ return nvkm_pstate_calc(clk, false);
}
int
@@ -524,9 +615,9 @@ nvkm_clk_init(struct nvkm_subdev *subdev)
return clk->func->init(clk);
clk->astate = clk->state_nr - 1;
- clk->tstate = 0;
clk->dstate = 0;
clk->pstate = -1;
+ clk->temp = 90; /* reasonable default value */
nvkm_pstate_calc(clk, true);
return 0;
}
@@ -561,10 +652,22 @@ int
nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
int index, bool allow_reclock, struct nvkm_clk *clk)
{
+ struct nvkm_subdev *subdev = &clk->subdev;
+ struct nvkm_bios *bios = device->bios;
int ret, idx, arglen;
const char *mode;
+ struct nvbios_vpstate_header h;
+
+ nvkm_subdev_ctor(&nvkm_clk, device, index, subdev);
+
+ if (bios && !nvbios_vpstate_parse(bios, &h)) {
+ struct nvbios_vpstate_entry base, boost;
+ if (!nvbios_vpstate_entry(bios, &h, h.boost_id, &boost))
+ clk->boost_khz = boost.clock_mhz * 1000;
+ if (!nvbios_vpstate_entry(bios, &h, h.base_id, &base))
+ clk->base_khz = base.clock_mhz * 1000;
+ }
- nvkm_subdev_ctor(&nvkm_clk, device, index, &clk->subdev);
clk->func = func;
INIT_LIST_HEAD(&clk->states);
clk->domains = func->domains;
@@ -607,6 +710,8 @@ nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
if (mode)
clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen);
+ clk->boost_mode = nvkm_longopt(device->cfgopt, "NvBoost",
+ NVKM_CLK_BOOST_NONE);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
index 89d5543118cf..7f67f9f5a550 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
@@ -457,7 +457,7 @@ gf100_clk = {
{ nv_clk_src_hubk06 , 0x00 },
{ nv_clk_src_hubk01 , 0x01 },
{ nv_clk_src_copy , 0x02 },
- { nv_clk_src_gpc , 0x03, 0, "core", 2000 },
+ { nv_clk_src_gpc , 0x03, NVKM_CLK_DOM_FLAG_VPSTATE, "core", 2000 },
{ nv_clk_src_rop , 0x04 },
{ nv_clk_src_mem , 0x05, 0, "memory", 1000 },
{ nv_clk_src_vdec , 0x06 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
index 06bc0d2d6ae1..0b37e3da7feb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
@@ -491,7 +491,7 @@ gk104_clk = {
.domains = {
{ nv_clk_src_crystal, 0xff },
{ nv_clk_src_href , 0xff },
- { nv_clk_src_gpc , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 },
+ { nv_clk_src_gpc , 0x00, NVKM_CLK_DOM_FLAG_CORE | NVKM_CLK_DOM_FLAG_VPSTATE, "core", 2000 },
{ nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE },
{ nv_clk_src_rop , 0x02, NVKM_CLK_DOM_FLAG_CORE },
{ nv_clk_src_mem , 0x03, 0, "memory", 500 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index 76433cc66fff..3841ad6be99e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -50,24 +50,33 @@ gf100_fb_intr(struct nvkm_fb *base)
}
int
-gf100_fb_oneinit(struct nvkm_fb *fb)
+gf100_fb_oneinit(struct nvkm_fb *base)
{
- struct nvkm_device *device = fb->subdev.device;
+ struct gf100_fb *fb = gf100_fb(base);
+ struct nvkm_device *device = fb->base.subdev.device;
int ret, size = 0x1000;
size = nvkm_longopt(device->cfgopt, "MmuDebugBufferSize", size);
size = min(size, 0x1000);
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
- false, &fb->mmu_rd);
+ false, &fb->base.mmu_rd);
if (ret)
return ret;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
- false, &fb->mmu_wr);
+ false, &fb->base.mmu_wr);
if (ret)
return ret;
+ fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (fb->r100c10_page) {
+ fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(device->dev, fb->r100c10))
+ return -EFAULT;
+ }
+
return 0;
}
@@ -123,14 +132,6 @@ gf100_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
nvkm_fb_ctor(func, device, index, &fb->base);
*pfb = &fb->base;
- fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (fb->r100c10_page) {
- fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(device->dev, fb->r100c10))
- return -EFAULT;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
index 1b5fb02eab2a..0595e0722bfc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
@@ -210,6 +210,23 @@ nv50_fb_intr(struct nvkm_fb *base)
nvkm_fifo_chan_put(fifo, flags, &chan);
}
+static int
+nv50_fb_oneinit(struct nvkm_fb *base)
+{
+ struct nv50_fb *fb = nv50_fb(base);
+ struct nvkm_device *device = fb->base.subdev.device;
+
+ fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (fb->r100c08_page) {
+ fb->r100c08 = dma_map_page(device->dev, fb->r100c08_page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(device->dev, fb->r100c08))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
static void
nv50_fb_init(struct nvkm_fb *base)
{
@@ -245,6 +262,7 @@ nv50_fb_dtor(struct nvkm_fb *base)
static const struct nvkm_fb_func
nv50_fb_ = {
.dtor = nv50_fb_dtor,
+ .oneinit = nv50_fb_oneinit,
.init = nv50_fb_init,
.intr = nv50_fb_intr,
.ram_new = nv50_fb_ram_new,
@@ -263,16 +281,6 @@ nv50_fb_new_(const struct nv50_fb_func *func, struct nvkm_device *device,
fb->func = func;
*pfb = &fb->base;
- fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (fb->r100c08_page) {
- fb->r100c08 = dma_map_page(device->dev, fb->r100c08_page, 0,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(device->dev, fb->r100c08))
- return -EFAULT;
- } else {
- nvkm_warn(&fb->base.subdev, "failed 100c08 page alloc\n");
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
index b9ec0ae6723a..b60068b7d8f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
@@ -24,6 +24,7 @@ int gf100_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
int gf100_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **);
void gf100_ram_put(struct nvkm_ram *, struct nvkm_mem **);
+int gk104_ram_ctor(struct nvkm_fb *, struct nvkm_ram **, u32);
int gk104_ram_init(struct nvkm_ram *ram);
/* RAM type-specific MR calculation routines */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index 1fa3ade468ae..7904fa41acef 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -259,7 +259,9 @@ gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq)
ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
ram_block(fuc);
- ram_wr32(fuc, 0x62c000, 0x0f0f0000);
+
+ if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+ ram_wr32(fuc, 0x62c000, 0x0f0f0000);
/* MR1: turn termination on early, for some reason.. */
if ((ram->base.mr[1] & 0x03c) != 0x030) {
@@ -658,7 +660,9 @@ gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq)
gk104_ram_train(fuc, 0x80020000, 0x01000000);
ram_unblock(fuc);
- ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
+
+ if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+ ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
if (next->bios.rammap_11_08_01)
data = 0x00000800;
@@ -706,7 +710,9 @@ gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq)
ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
ram_block(fuc);
- ram_wr32(fuc, 0x62c000, 0x0f0f0000);
+
+ if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+ ram_wr32(fuc, 0x62c000, 0x0f0f0000);
if (vc == 1 && ram_have(fuc, gpio2E)) {
u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[1]);
@@ -936,7 +942,9 @@ gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq)
ram_nsec(fuc, 1000);
ram_unblock(fuc);
- ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
+
+ if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+ ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
if (next->bios.rammap_11_08_01)
data = 0x00000800;
@@ -1530,6 +1538,12 @@ gk104_ram_func = {
int
gk104_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
+ return gk104_ram_ctor(fb, pram, 0x022554);
+}
+
+int
+gk104_ram_ctor(struct nvkm_fb *fb, struct nvkm_ram **pram, u32 maskaddr)
+{
struct nvkm_subdev *subdev = &fb->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_bios *bios = device->bios;
@@ -1544,7 +1558,7 @@ gk104_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
return -ENOMEM;
*pram = &ram->base;
- ret = gf100_ram_ctor(&gk104_ram_func, fb, 0x022554, &ram->base);
+ ret = gf100_ram_ctor(&gk104_ram_func, fb, maskaddr, &ram->base);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
index 43d807f6ca71..ac862d1d77bd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
@@ -23,18 +23,8 @@
*/
#include "ram.h"
-static const struct nvkm_ram_func
-gm107_ram_func = {
- .init = gk104_ram_init,
- .get = gf100_ram_get,
- .put = gf100_ram_put,
-};
-
int
gm107_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
- if (!(*pram = kzalloc(sizeof(**pram), GFP_KERNEL)))
- return -ENOMEM;
-
- return gf100_ram_ctor(&gm107_ram_func, fb, 0x021c14, *pram);
+ return gk104_ram_ctor(fb, pram, 0x021c14);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
index b7159b338fac..1a4ab825852c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
@@ -29,7 +29,7 @@ gk20a_ibus_init_ibus_ring(struct nvkm_subdev *ibus)
nvkm_mask(device, 0x137250, 0x3f, 0);
nvkm_mask(device, 0x000200, 0x20, 0);
- usleep_range(20, 30);
+ udelay(20);
nvkm_mask(device, 0x000200, 0x20, 0x20);
nvkm_wr32(device, 0x12004c, 0x4);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
index 41bd5d0f7692..658355fc9354 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
@@ -96,60 +96,12 @@ nvkm_iccsense_ina3221_read(struct nvkm_iccsense *iccsense,
}
static void
-nvkm_iccsense_ina209_config(struct nvkm_iccsense *iccsense,
- struct nvkm_iccsense_sensor *sensor)
-{
- struct nvkm_subdev *subdev = &iccsense->subdev;
- /* configuration:
- * 0x0007: 0x0007 shunt and bus continous
- * 0x0078: 0x0078 128 samples shunt
- * 0x0780: 0x0780 128 samples bus
- * 0x1800: 0x0000 +-40 mV shunt range
- * 0x2000: 0x0000 16V FSR
- */
- u16 value = 0x07ff;
- nvkm_debug(subdev, "config for sensor id %i: 0x%x\n", sensor->id, value);
- nv_wr16i2cr(sensor->i2c, sensor->addr, 0x00, value);
-}
-
-static void
-nvkm_iccsense_ina3221_config(struct nvkm_iccsense *iccsense,
- struct nvkm_iccsense_sensor *sensor)
-{
- struct nvkm_subdev *subdev = &iccsense->subdev;
- /* configuration:
- * 0x0007: 0x0007 shunt and bus continous
- * 0x0031: 0x0000 140 us conversion time shunt
- * 0x01c0: 0x0000 140 us conversion time bus
- * 0x0f00: 0x0f00 1024 samples
- * 0x7000: 0x?000 channels
- */
- u16 value = 0x0e07;
- if (sensor->rail_mask & 0x1)
- value |= 0x1 << 14;
- if (sensor->rail_mask & 0x2)
- value |= 0x1 << 13;
- if (sensor->rail_mask & 0x4)
- value |= 0x1 << 12;
- nvkm_debug(subdev, "config for sensor id %i: 0x%x\n", sensor->id, value);
- nv_wr16i2cr(sensor->i2c, sensor->addr, 0x00, value);
-}
-
-static void
nvkm_iccsense_sensor_config(struct nvkm_iccsense *iccsense,
struct nvkm_iccsense_sensor *sensor)
{
- switch (sensor->type) {
- case NVBIOS_EXTDEV_INA209:
- case NVBIOS_EXTDEV_INA219:
- nvkm_iccsense_ina209_config(iccsense, sensor);
- break;
- case NVBIOS_EXTDEV_INA3221:
- nvkm_iccsense_ina3221_config(iccsense, sensor);
- break;
- default:
- break;
- }
+ struct nvkm_subdev *subdev = &iccsense->subdev;
+ nvkm_trace(subdev, "write config of extdev %i: 0x%04x\n", sensor->id, sensor->config);
+ nv_wr16i2cr(sensor->i2c, sensor->addr, 0x00, sensor->config);
}
int
@@ -196,7 +148,6 @@ nvkm_iccsense_dtor(struct nvkm_subdev *subdev)
static struct nvkm_iccsense_sensor*
nvkm_iccsense_create_sensor(struct nvkm_iccsense *iccsense, u8 id)
{
-
struct nvkm_subdev *subdev = &iccsense->subdev;
struct nvkm_bios *bios = subdev->device->bios;
struct nvkm_i2c *i2c = subdev->device->i2c;
@@ -245,7 +196,7 @@ nvkm_iccsense_create_sensor(struct nvkm_iccsense *iccsense, u8 id)
sensor->type = extdev.type;
sensor->i2c = &i2c_bus->i2c;
sensor->addr = addr;
- sensor->rail_mask = 0x0;
+ sensor->config = 0x0;
return sensor;
}
@@ -273,48 +224,56 @@ nvkm_iccsense_oneinit(struct nvkm_subdev *subdev)
iccsense->data_valid = true;
for (i = 0; i < stbl.nr_entry; ++i) {
- struct pwr_rail_t *r = &stbl.rail[i];
- struct nvkm_iccsense_rail *rail;
+ struct pwr_rail_t *pwr_rail = &stbl.rail[i];
struct nvkm_iccsense_sensor *sensor;
- int (*read)(struct nvkm_iccsense *,
- struct nvkm_iccsense_rail *);
+ int r;
- if (!r->mode || r->resistor_mohm == 0)
+ if (pwr_rail->mode != 1 || !pwr_rail->resistor_count)
continue;
- sensor = nvkm_iccsense_get_sensor(iccsense, r->extdev_id);
+ sensor = nvkm_iccsense_get_sensor(iccsense, pwr_rail->extdev_id);
if (!sensor)
continue;
- switch (sensor->type) {
- case NVBIOS_EXTDEV_INA209:
- if (r->rail != 0)
- continue;
- read = nvkm_iccsense_ina209_read;
- break;
- case NVBIOS_EXTDEV_INA219:
- if (r->rail != 0)
+ if (!sensor->config)
+ sensor->config = pwr_rail->config;
+ else if (sensor->config != pwr_rail->config)
+ nvkm_error(subdev, "config mismatch found for extdev %i\n", pwr_rail->extdev_id);
+
+ for (r = 0; r < pwr_rail->resistor_count; ++r) {
+ struct nvkm_iccsense_rail *rail;
+ struct pwr_rail_resistor_t *res = &pwr_rail->resistors[r];
+ int (*read)(struct nvkm_iccsense *,
+ struct nvkm_iccsense_rail *);
+
+ if (!res->mohm || !res->enabled)
continue;
- read = nvkm_iccsense_ina219_read;
- break;
- case NVBIOS_EXTDEV_INA3221:
- if (r->rail >= 3)
+
+ switch (sensor->type) {
+ case NVBIOS_EXTDEV_INA209:
+ read = nvkm_iccsense_ina209_read;
+ break;
+ case NVBIOS_EXTDEV_INA219:
+ read = nvkm_iccsense_ina219_read;
+ break;
+ case NVBIOS_EXTDEV_INA3221:
+ read = nvkm_iccsense_ina3221_read;
+ break;
+ default:
continue;
- read = nvkm_iccsense_ina3221_read;
- break;
- default:
- continue;
+ }
+
+ rail = kmalloc(sizeof(*rail), GFP_KERNEL);
+ if (!rail)
+ return -ENOMEM;
+
+ rail->read = read;
+ rail->sensor = sensor;
+ rail->idx = r;
+ rail->mohm = res->mohm;
+ nvkm_debug(subdev, "create rail for extdev %i: { idx: %i, mohm: %i }\n", pwr_rail->extdev_id, r, rail->mohm);
+ list_add_tail(&rail->head, &iccsense->rails);
}
-
- rail = kmalloc(sizeof(*rail), GFP_KERNEL);
- if (!rail)
- return -ENOMEM;
- sensor->rail_mask |= 1 << r->rail;
- rail->read = read;
- rail->sensor = sensor;
- rail->idx = r->rail;
- rail->mohm = r->resistor_mohm;
- list_add_tail(&rail->head, &iccsense->rails);
}
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
index b72c31d2f908..e90e0f6ed008 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
@@ -10,7 +10,7 @@ struct nvkm_iccsense_sensor {
enum nvbios_extdev_type type;
struct i2c_adapter *i2c;
u8 addr;
- u8 rail_mask;
+ u16 config;
};
struct nvkm_iccsense_rail {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
index 45a2f8e784f9..9abfa5e2fe9f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
@@ -23,8 +23,8 @@
*/
#include "mxms.h"
-#define ROM16(x) le16_to_cpu(*(u16 *)&(x))
-#define ROM32(x) le32_to_cpu(*(u32 *)&(x))
+#define ROM16(x) get_unaligned_le16(&(x))
+#define ROM32(x) get_unaligned_le32(&(x))
static u8 *
mxms_data(struct nvkm_mxm *mxm)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
index c34076223b7b..bcd179ba11d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
@@ -1,6 +1,7 @@
nvkm-y += nvkm/subdev/volt/base.o
nvkm-y += nvkm/subdev/volt/gpio.o
nvkm-y += nvkm/subdev/volt/nv40.o
+nvkm-y += nvkm/subdev/volt/gf100.o
nvkm-y += nvkm/subdev/volt/gk104.o
nvkm-y += nvkm/subdev/volt/gk20a.o
nvkm-y += nvkm/subdev/volt/gm20b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
index 1c3d23b0e84a..e8569b04b55d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
@@ -26,6 +26,7 @@
#include <subdev/bios.h>
#include <subdev/bios/vmap.h>
#include <subdev/bios/volt.h>
+#include <subdev/therm.h>
int
nvkm_volt_get(struct nvkm_volt *volt)
@@ -50,23 +51,35 @@ static int
nvkm_volt_set(struct nvkm_volt *volt, u32 uv)
{
struct nvkm_subdev *subdev = &volt->subdev;
- int i, ret = -EINVAL;
+ int i, ret = -EINVAL, best_err = volt->max_uv, best = -1;
if (volt->func->volt_set)
return volt->func->volt_set(volt, uv);
for (i = 0; i < volt->vid_nr; i++) {
- if (volt->vid[i].uv == uv) {
- ret = volt->func->vid_set(volt, volt->vid[i].vid);
- nvkm_debug(subdev, "set %duv: %d\n", uv, ret);
+ int err = volt->vid[i].uv - uv;
+ if (err < 0 || err > best_err)
+ continue;
+
+ best_err = err;
+ best = i;
+ if (best_err == 0)
break;
- }
}
+
+ if (best == -1) {
+ nvkm_error(subdev, "couldn't set %iuv\n", uv);
+ return ret;
+ }
+
+ ret = volt->func->vid_set(volt, volt->vid[best].vid);
+ nvkm_debug(subdev, "set req %duv to %duv: %d\n", uv,
+ volt->vid[best].uv, ret);
return ret;
}
-static int
-nvkm_volt_map(struct nvkm_volt *volt, u8 id)
+int
+nvkm_volt_map_min(struct nvkm_volt *volt, u8 id)
{
struct nvkm_bios *bios = volt->subdev.device->bios;
struct nvbios_vmap_entry info;
@@ -76,7 +89,7 @@ nvkm_volt_map(struct nvkm_volt *volt, u8 id)
vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info);
if (vmap) {
if (info.link != 0xff) {
- int ret = nvkm_volt_map(volt, info.link);
+ int ret = nvkm_volt_map_min(volt, info.link);
if (ret < 0)
return ret;
info.min += ret;
@@ -88,19 +101,79 @@ nvkm_volt_map(struct nvkm_volt *volt, u8 id)
}
int
-nvkm_volt_set_id(struct nvkm_volt *volt, u8 id, int condition)
+nvkm_volt_map(struct nvkm_volt *volt, u8 id, u8 temp)
+{
+ struct nvkm_bios *bios = volt->subdev.device->bios;
+ struct nvbios_vmap_entry info;
+ u8 ver, len;
+ u16 vmap;
+
+ vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info);
+ if (vmap) {
+ s64 result;
+
+ if (volt->speedo < 0)
+ return volt->speedo;
+
+ if (ver == 0x10 || (ver == 0x20 && info.mode == 0)) {
+ result = div64_s64((s64)info.arg[0], 10);
+ result += div64_s64((s64)info.arg[1] * volt->speedo, 10);
+ result += div64_s64((s64)info.arg[2] * volt->speedo * volt->speedo, 100000);
+ } else if (ver == 0x20) {
+ switch (info.mode) {
+ /* 0x0 handled above! */
+ case 0x1:
+ result = ((s64)info.arg[0] * 15625) >> 18;
+ result += ((s64)info.arg[1] * volt->speedo * 15625) >> 18;
+ result += ((s64)info.arg[2] * temp * 15625) >> 10;
+ result += ((s64)info.arg[3] * volt->speedo * temp * 15625) >> 18;
+ result += ((s64)info.arg[4] * volt->speedo * volt->speedo * 15625) >> 30;
+ result += ((s64)info.arg[5] * temp * temp * 15625) >> 18;
+ break;
+ case 0x3:
+ result = (info.min + info.max) / 2;
+ break;
+ case 0x2:
+ default:
+ result = info.min;
+ break;
+ }
+ } else {
+ return -ENODEV;
+ }
+
+ result = min(max(result, (s64)info.min), (s64)info.max);
+
+ if (info.link != 0xff) {
+ int ret = nvkm_volt_map(volt, info.link, temp);
+ if (ret < 0)
+ return ret;
+ result += ret;
+ }
+ return result;
+ }
+
+ return id ? id * 10000 : -ENODEV;
+}
+
+int
+nvkm_volt_set_id(struct nvkm_volt *volt, u8 id, u8 min_id, u8 temp,
+ int condition)
{
int ret;
if (volt->func->set_id)
return volt->func->set_id(volt, id, condition);
- ret = nvkm_volt_map(volt, id);
+ ret = nvkm_volt_map(volt, id, temp);
if (ret >= 0) {
int prev = nvkm_volt_get(volt);
if (!condition || prev < 0 ||
(condition < 0 && ret < prev) ||
(condition > 0 && ret > prev)) {
+ int min = nvkm_volt_map(volt, min_id, temp);
+ if (min >= 0)
+ ret = max(min, ret);
ret = nvkm_volt_set(volt, ret);
} else {
ret = 0;
@@ -112,6 +185,7 @@ nvkm_volt_set_id(struct nvkm_volt *volt, u8 id, int condition)
static void
nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
{
+ struct nvkm_subdev *subdev = &bios->subdev;
struct nvbios_volt_entry ivid;
struct nvbios_volt info;
u8 ver, hdr, cnt, len;
@@ -119,7 +193,8 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
int i;
data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info);
- if (data && info.vidmask && info.base && info.step) {
+ if (data && info.vidmask && info.base && info.step && info.ranged) {
+ nvkm_debug(subdev, "found ranged based VIDs\n");
volt->min_uv = info.min;
volt->max_uv = info.max;
for (i = 0; i < info.vidmask + 1; i++) {
@@ -132,7 +207,8 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
info.base += info.step;
}
volt->vid_mask = info.vidmask;
- } else if (data && info.vidmask) {
+ } else if (data && info.vidmask && !info.ranged) {
+ nvkm_debug(subdev, "found entry based VIDs\n");
volt->min_uv = 0xffffffff;
volt->max_uv = 0;
for (i = 0; i < cnt; i++) {
@@ -154,6 +230,14 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
}
static int
+nvkm_volt_speedo_read(struct nvkm_volt *volt)
+{
+ if (volt->func->speedo_read)
+ return volt->func->speedo_read(volt);
+ return -EINVAL;
+}
+
+static int
nvkm_volt_init(struct nvkm_subdev *subdev)
{
struct nvkm_volt *volt = nvkm_volt(subdev);
@@ -167,6 +251,21 @@ nvkm_volt_init(struct nvkm_subdev *subdev)
return 0;
}
+static int
+nvkm_volt_oneinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_volt *volt = nvkm_volt(subdev);
+
+ volt->speedo = nvkm_volt_speedo_read(volt);
+ if (volt->speedo > 0)
+ nvkm_debug(&volt->subdev, "speedo %x\n", volt->speedo);
+
+ if (volt->func->oneinit)
+ return volt->func->oneinit(volt);
+
+ return 0;
+}
+
static void *
nvkm_volt_dtor(struct nvkm_subdev *subdev)
{
@@ -177,6 +276,7 @@ static const struct nvkm_subdev_func
nvkm_volt = {
.dtor = nvkm_volt_dtor,
.init = nvkm_volt_init,
+ .oneinit = nvkm_volt_oneinit,
};
void
@@ -191,9 +291,22 @@ nvkm_volt_ctor(const struct nvkm_volt_func *func, struct nvkm_device *device,
/* Assuming the non-bios device should build the voltage table later */
if (bios) {
+ u8 ver, hdr, cnt, len;
+ struct nvbios_vmap vmap;
+
nvkm_volt_parse_bios(bios, volt);
nvkm_debug(&volt->subdev, "min: %iuv max: %iuv\n",
volt->min_uv, volt->max_uv);
+
+ if (nvbios_vmap_parse(bios, &ver, &hdr, &cnt, &len, &vmap)) {
+ volt->max0_id = vmap.max0;
+ volt->max1_id = vmap.max1;
+ volt->max2_id = vmap.max2;
+ } else {
+ volt->max0_id = 0xff;
+ volt->max1_id = 0xff;
+ volt->max2_id = 0xff;
+ }
}
if (volt->vid_nr) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c
new file mode 100644
index 000000000000..d9ed6925ca64
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2016 Karol Herbst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Karol Herbst
+ */
+#include "priv.h"
+
+#include <subdev/fuse.h>
+
+static int
+gf100_volt_speedo_read(struct nvkm_volt *volt)
+{
+ struct nvkm_device *device = volt->subdev.device;
+ struct nvkm_fuse *fuse = device->fuse;
+
+ if (!fuse)
+ return -EINVAL;
+
+ return nvkm_fuse_read(fuse, 0x1cc);
+}
+
+int
+gf100_volt_oneinit(struct nvkm_volt *volt)
+{
+ struct nvkm_subdev *subdev = &volt->subdev;
+ if (volt->speedo <= 0)
+ nvkm_error(subdev, "couldn't find speedo value, volting not "
+ "possible\n");
+ return 0;
+}
+
+static const struct nvkm_volt_func
+gf100_volt = {
+ .oneinit = gf100_volt_oneinit,
+ .vid_get = nvkm_voltgpio_get,
+ .vid_set = nvkm_voltgpio_set,
+ .speedo_read = gf100_volt_speedo_read,
+};
+
+int
+gf100_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+{
+ struct nvkm_volt *volt;
+ int ret;
+
+ ret = nvkm_volt_new_(&gf100_volt, device, index, &volt);
+ *pvolt = volt;
+ if (ret)
+ return ret;
+
+ return nvkm_voltgpio_init(volt);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
index 420bd84d8483..b2c5d1166a13 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
@@ -27,6 +27,7 @@
#include <subdev/gpio.h>
#include <subdev/bios.h>
#include <subdev/bios/volt.h>
+#include <subdev/fuse.h>
#define gk104_volt(p) container_of((p), struct gk104_volt, base)
struct gk104_volt {
@@ -64,13 +65,33 @@ gk104_volt_set(struct nvkm_volt *base, u32 uv)
return 0;
}
+static int
+gk104_volt_speedo_read(struct nvkm_volt *volt)
+{
+ struct nvkm_device *device = volt->subdev.device;
+ struct nvkm_fuse *fuse = device->fuse;
+ int ret;
+
+ if (!fuse)
+ return -EINVAL;
+
+ nvkm_wr32(device, 0x122634, 0x0);
+ ret = nvkm_fuse_read(fuse, 0x3a8);
+ nvkm_wr32(device, 0x122634, 0x41);
+ return ret;
+}
+
static const struct nvkm_volt_func
gk104_volt_pwm = {
+ .oneinit = gf100_volt_oneinit,
.volt_get = gk104_volt_get,
.volt_set = gk104_volt_set,
+ .speedo_read = gk104_volt_speedo_read,
}, gk104_volt_gpio = {
+ .oneinit = gf100_volt_oneinit,
.vid_get = nvkm_voltgpio_get,
.vid_set = nvkm_voltgpio_set,
+ .speedo_read = gk104_volt_speedo_read,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
index d5140d991161..354bafe4b4e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
@@ -9,11 +9,13 @@ int nvkm_volt_new_(const struct nvkm_volt_func *, struct nvkm_device *,
int index, struct nvkm_volt **);
struct nvkm_volt_func {
+ int (*oneinit)(struct nvkm_volt *);
int (*volt_get)(struct nvkm_volt *);
int (*volt_set)(struct nvkm_volt *, u32 uv);
int (*vid_get)(struct nvkm_volt *);
int (*vid_set)(struct nvkm_volt *, u8 vid);
int (*set_id)(struct nvkm_volt *, u8 id, int condition);
+ int (*speedo_read)(struct nvkm_volt *);
};
int nvkm_voltgpio_init(struct nvkm_volt *);
@@ -23,4 +25,6 @@ int nvkm_voltgpio_set(struct nvkm_volt *, u8);
int nvkm_voltpwm_init(struct nvkm_volt *volt);
int nvkm_voltpwm_get(struct nvkm_volt *volt);
int nvkm_voltpwm_set(struct nvkm_volt *volt, u32 uv);
+
+int gf100_volt_oneinit(struct nvkm_volt *);
#endif
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 5f3e5ad99de7..84995ebc6ffc 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -31,7 +31,7 @@
* Definitions taken from spice-protocol, plus kernel driver specific bits.
*/
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/workqueue.h>
#include <linux/firmware.h>
#include <linux/platform_device.h>
@@ -190,7 +190,7 @@ enum {
* spice-protocol/qxl_dev.h */
#define QXL_MAX_RES 96
struct qxl_release {
- struct fence base;
+ struct dma_fence base;
int id;
int type;
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index cd83f050cf3e..50b4e522f05f 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -21,7 +21,7 @@
*/
#include "qxl_drv.h"
#include "qxl_object.h"
-#include <trace/events/fence.h>
+#include <trace/events/dma_fence.h>
/*
* drawable cmd cache - allocate a bunch of VRAM pages, suballocate
@@ -40,23 +40,24 @@
static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
-static const char *qxl_get_driver_name(struct fence *fence)
+static const char *qxl_get_driver_name(struct dma_fence *fence)
{
return "qxl";
}
-static const char *qxl_get_timeline_name(struct fence *fence)
+static const char *qxl_get_timeline_name(struct dma_fence *fence)
{
return "release";
}
-static bool qxl_nop_signaling(struct fence *fence)
+static bool qxl_nop_signaling(struct dma_fence *fence)
{
/* fences are always automatically signaled, so just pretend we did this.. */
return true;
}
-static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout)
+static long qxl_fence_wait(struct dma_fence *fence, bool intr,
+ signed long timeout)
{
struct qxl_device *qdev;
struct qxl_release *release;
@@ -71,7 +72,7 @@ static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout)
retry:
sc++;
- if (fence_is_signaled(fence))
+ if (dma_fence_is_signaled(fence))
goto signaled;
qxl_io_notify_oom(qdev);
@@ -80,11 +81,11 @@ retry:
if (!qxl_queue_garbage_collect(qdev, true))
break;
- if (fence_is_signaled(fence))
+ if (dma_fence_is_signaled(fence))
goto signaled;
}
- if (fence_is_signaled(fence))
+ if (dma_fence_is_signaled(fence))
goto signaled;
if (have_drawable_releases || sc < 4) {
@@ -96,9 +97,9 @@ retry:
return 0;
if (have_drawable_releases && sc > 300) {
- FENCE_WARN(fence, "failed to wait on release %llu "
- "after spincount %d\n",
- fence->context & ~0xf0000000, sc);
+ DMA_FENCE_WARN(fence, "failed to wait on release %llu "
+ "after spincount %d\n",
+ fence->context & ~0xf0000000, sc);
goto signaled;
}
goto retry;
@@ -115,7 +116,7 @@ signaled:
return end - cur;
}
-static const struct fence_ops qxl_fence_ops = {
+static const struct dma_fence_ops qxl_fence_ops = {
.get_driver_name = qxl_get_driver_name,
.get_timeline_name = qxl_get_timeline_name,
.enable_signaling = qxl_nop_signaling,
@@ -192,8 +193,8 @@ qxl_release_free(struct qxl_device *qdev,
WARN_ON(list_empty(&release->bos));
qxl_release_free_list(release);
- fence_signal(&release->base);
- fence_put(&release->base);
+ dma_fence_signal(&release->base);
+ dma_fence_put(&release->base);
} else {
qxl_release_free_list(release);
kfree(release);
@@ -453,9 +454,9 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
* Since we never really allocated a context and we don't want to conflict,
* set the highest bits. This will break if we really allow exporting of dma-bufs.
*/
- fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
- release->id | 0xf0000000, release->base.seqno);
- trace_fence_emit(&release->base);
+ dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
+ release->id | 0xf0000000, release->base.seqno);
+ trace_dma_fence_emit(&release->base);
driver = bdev->driver;
glob = bo->glob;
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index e26c82db948b..11761330a6b8 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -387,6 +387,7 @@ static struct ttm_bo_driver qxl_bo_driver = {
.ttm_tt_unpopulate = &qxl_ttm_tt_unpopulate,
.invalidate_caches = &qxl_invalidate_caches,
.init_mem_type = &qxl_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &qxl_evict_flags,
.move = &qxl_bo_move,
.verify_access = &qxl_verify_access,
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 56bb758f4e33..fa4f8f008e4d 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -28,6 +28,7 @@
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "radeon_audio.h"
+#include "radeon_asic.h"
#include "atom.h"
#include <linux/backlight.h>
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index d960d3915408..f8b05090232a 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -27,6 +27,7 @@
*/
#include <drm/drmP.h>
#include "radeon.h"
+#include "radeon_asic.h"
#include "evergreend.h"
#include "evergreen_reg_safe.h"
#include "cayman_reg_safe.h"
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index b69c8de35bd3..595a19736458 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -28,6 +28,7 @@
#include <linux/kernel.h>
#include <drm/drmP.h>
#include "radeon.h"
+#include "radeon_asic.h"
#include "r600d.h"
#include "r600_reg_safe.h"
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 1b0dcad916b0..44e0c5ed6418 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -66,7 +66,7 @@
#include <linux/kref.h>
#include <linux/interval_tree.h>
#include <linux/hashtable.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
@@ -367,7 +367,7 @@ struct radeon_fence_driver {
};
struct radeon_fence {
- struct fence base;
+ struct dma_fence base;
struct radeon_device *rdev;
uint64_t seq;
@@ -746,7 +746,7 @@ struct radeon_flip_work {
uint64_t base;
struct drm_pending_vblank_event *event;
struct radeon_bo *old_rbo;
- struct fence *fence;
+ struct dma_fence *fence;
bool async;
};
@@ -2514,9 +2514,9 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v);
/*
* Cast helper
*/
-extern const struct fence_ops radeon_fence_ops;
+extern const struct dma_fence_ops radeon_fence_ops;
-static inline struct radeon_fence *to_radeon_fence(struct fence *f)
+static inline struct radeon_fence *to_radeon_fence(struct dma_fence *f)
{
struct radeon_fence *__f = container_of(f, struct radeon_fence, base);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 5df3ec73021b..4134759a6823 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -29,6 +29,7 @@
#include "atom.h"
#include "atom-bits.h"
+#include "radeon_asic.h"
extern void
radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 38e396dae0a9..c1135feb93c1 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -29,6 +29,7 @@
#include <drm/radeon_drm.h>
#include "radeon_reg.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "atom.h"
/* 10 khz */
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index eb92aef46e3c..0be8d5cd7826 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1320,7 +1320,7 @@ int radeon_device_init(struct radeon_device *rdev,
for (i = 0; i < RADEON_NUM_RINGS; i++) {
rdev->ring[i].idx = i;
}
- rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
+ rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS);
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
radeon_family_name[rdev->family], pdev->vendor, pdev->device,
@@ -1651,7 +1651,10 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
radeon_suspend(rdev);
radeon_hpd_fini(rdev);
- /* evict remaining vram memory */
+ /* evict remaining vram memory
+ * This second call to evict vram is to evict the gart page table
+ * using the CPU.
+ */
radeon_bo_evict_vram(rdev);
radeon_agp_suspend(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index cdb8cb568c15..e7409e8a9f87 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -437,7 +437,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
down_read(&rdev->exclusive_lock);
}
} else
- r = fence_wait(work->fence, false);
+ r = dma_fence_wait(work->fence, false);
if (r)
DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
@@ -447,7 +447,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
* confused about which BO the CRTC is scanning out
*/
- fence_put(work->fence);
+ dma_fence_put(work->fence);
work->fence = NULL;
}
@@ -542,7 +542,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
DRM_ERROR("failed to pin new rbo buffer before flip\n");
goto cleanup;
}
- work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
+ work->fence = dma_fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
radeon_bo_unreserve(new_rbo);
@@ -617,7 +617,7 @@ pflip_cleanup:
cleanup:
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
- fence_put(work->fence);
+ dma_fence_put(work->fence);
kfree(work);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index de504ea29c06..6d1237d6e1b8 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -223,7 +223,8 @@ radeon_dp_mst_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-struct drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector)
+static struct
+drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -341,7 +342,8 @@ const struct drm_dp_mst_topology_cbs mst_cbs = {
.hotplug = radeon_dp_mst_hotplug,
};
-struct radeon_connector *radeon_mst_find_connector(struct drm_encoder *encoder)
+static struct
+radeon_connector *radeon_mst_find_connector(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
@@ -597,7 +599,7 @@ static const struct drm_encoder_helper_funcs radeon_mst_helper_funcs = {
.commit = radeon_mst_encoder_commit,
};
-void radeon_dp_mst_encoder_destroy(struct drm_encoder *encoder)
+static void radeon_dp_mst_encoder_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
kfree(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 7ef075acde9c..ef09f0a63754 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -141,8 +141,10 @@ int radeon_fence_emit(struct radeon_device *rdev,
(*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring];
(*fence)->ring = ring;
(*fence)->is_vm_update = false;
- fence_init(&(*fence)->base, &radeon_fence_ops,
- &rdev->fence_queue.lock, rdev->fence_context + ring, seq);
+ dma_fence_init(&(*fence)->base, &radeon_fence_ops,
+ &rdev->fence_queue.lock,
+ rdev->fence_context + ring,
+ seq);
radeon_fence_ring_emit(rdev, ring, *fence);
trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
radeon_fence_schedule_check(rdev, ring);
@@ -169,18 +171,18 @@ static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int fl
*/
seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
if (seq >= fence->seq) {
- int ret = fence_signal_locked(&fence->base);
+ int ret = dma_fence_signal_locked(&fence->base);
if (!ret)
- FENCE_TRACE(&fence->base, "signaled from irq context\n");
+ DMA_FENCE_TRACE(&fence->base, "signaled from irq context\n");
else
- FENCE_TRACE(&fence->base, "was already signaled\n");
+ DMA_FENCE_TRACE(&fence->base, "was already signaled\n");
radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
__remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
} else
- FENCE_TRACE(&fence->base, "pending\n");
+ DMA_FENCE_TRACE(&fence->base, "pending\n");
return 0;
}
@@ -351,7 +353,7 @@ static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
return false;
}
-static bool radeon_fence_is_signaled(struct fence *f)
+static bool radeon_fence_is_signaled(struct dma_fence *f)
{
struct radeon_fence *fence = to_radeon_fence(f);
struct radeon_device *rdev = fence->rdev;
@@ -381,7 +383,7 @@ static bool radeon_fence_is_signaled(struct fence *f)
* to fence_queue that checks if this fence is signaled, and if so it
* signals the fence and removes itself.
*/
-static bool radeon_fence_enable_signaling(struct fence *f)
+static bool radeon_fence_enable_signaling(struct dma_fence *f)
{
struct radeon_fence *fence = to_radeon_fence(f);
struct radeon_device *rdev = fence->rdev;
@@ -414,9 +416,9 @@ static bool radeon_fence_enable_signaling(struct fence *f)
fence->fence_wake.private = NULL;
fence->fence_wake.func = radeon_fence_check_signaled;
__add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
- fence_get(f);
+ dma_fence_get(f);
- FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
+ DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
return true;
}
@@ -436,9 +438,9 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
int ret;
- ret = fence_signal(&fence->base);
+ ret = dma_fence_signal(&fence->base);
if (!ret)
- FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
+ DMA_FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
return true;
}
return false;
@@ -552,7 +554,7 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo
* exclusive_lock is not held in that case.
*/
if (WARN_ON_ONCE(!to_radeon_fence(&fence->base)))
- return fence_wait(&fence->base, intr);
+ return dma_fence_wait(&fence->base, intr);
seq[fence->ring] = fence->seq;
r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout);
@@ -560,9 +562,9 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo
return r;
}
- r_sig = fence_signal(&fence->base);
+ r_sig = dma_fence_signal(&fence->base);
if (!r_sig)
- FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
+ DMA_FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
return r;
}
@@ -697,7 +699,7 @@ int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
*/
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
{
- fence_get(&fence->base);
+ dma_fence_get(&fence->base);
return fence;
}
@@ -714,7 +716,7 @@ void radeon_fence_unref(struct radeon_fence **fence)
*fence = NULL;
if (tmp) {
- fence_put(&tmp->base);
+ dma_fence_put(&tmp->base);
}
}
@@ -1028,12 +1030,12 @@ int radeon_debugfs_fence_init(struct radeon_device *rdev)
#endif
}
-static const char *radeon_fence_get_driver_name(struct fence *fence)
+static const char *radeon_fence_get_driver_name(struct dma_fence *fence)
{
return "radeon";
}
-static const char *radeon_fence_get_timeline_name(struct fence *f)
+static const char *radeon_fence_get_timeline_name(struct dma_fence *f)
{
struct radeon_fence *fence = to_radeon_fence(f);
switch (fence->ring) {
@@ -1051,16 +1053,16 @@ static const char *radeon_fence_get_timeline_name(struct fence *f)
static inline bool radeon_test_signaled(struct radeon_fence *fence)
{
- return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
+ return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
}
struct radeon_wait_cb {
- struct fence_cb base;
+ struct dma_fence_cb base;
struct task_struct *task;
};
static void
-radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
+radeon_fence_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct radeon_wait_cb *wait =
container_of(cb, struct radeon_wait_cb, base);
@@ -1068,7 +1070,7 @@ radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
wake_up_process(wait->task);
}
-static signed long radeon_fence_default_wait(struct fence *f, bool intr,
+static signed long radeon_fence_default_wait(struct dma_fence *f, bool intr,
signed long t)
{
struct radeon_fence *fence = to_radeon_fence(f);
@@ -1077,7 +1079,7 @@ static signed long radeon_fence_default_wait(struct fence *f, bool intr,
cb.task = current;
- if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
+ if (dma_fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
return t;
while (t > 0) {
@@ -1105,12 +1107,12 @@ static signed long radeon_fence_default_wait(struct fence *f, bool intr,
}
__set_current_state(TASK_RUNNING);
- fence_remove_callback(f, &cb.base);
+ dma_fence_remove_callback(f, &cb.base);
return t;
}
-const struct fence_ops radeon_fence_ops = {
+const struct dma_fence_ops radeon_fence_ops = {
.get_driver_name = radeon_fence_get_driver_name,
.get_timeline_name = radeon_fence_get_timeline_name,
.enable_signaling = radeon_fence_enable_signaling,
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 868c3ba2efaa..222a1fa41d7c 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -27,6 +27,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
+#include "radeon_asic.h"
#include "atom.h"
#include <linux/backlight.h>
#ifdef CONFIG_PMAC_BACKLIGHT
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 4b6542538ff9..326ad068c15a 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -47,6 +47,7 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev);
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
static void radeon_pm_update_profile(struct radeon_device *rdev);
static void radeon_pm_set_clocks(struct radeon_device *rdev);
+static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev);
int radeon_pm_get_type_index(struct radeon_device *rdev,
enum radeon_pm_state_type ps_type,
@@ -79,6 +80,8 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
}
mutex_unlock(&rdev->pm.mutex);
+ /* allow new DPM state to be picked */
+ radeon_pm_compute_clocks_dpm(rdev);
} else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (rdev->pm.profile == PM_PROFILE_AUTO) {
mutex_lock(&rdev->pm.mutex);
@@ -882,7 +885,8 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
/* balanced states don't exist at the moment */
if (dpm_state == POWER_STATE_TYPE_BALANCED)
- dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+ dpm_state = rdev->pm.dpm.ac_power ?
+ POWER_STATE_TYPE_PERFORMANCE : POWER_STATE_TYPE_BATTERY;
restart_search:
/* Pick the best power state based on current conditions */
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
index 02ac8a1de4ff..be5d7a38d3aa 100644
--- a/drivers/gpu/drm/radeon/radeon_sync.c
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -92,7 +92,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
bool shared)
{
struct reservation_object_list *flist;
- struct fence *f;
+ struct dma_fence *f;
struct radeon_fence *fence;
unsigned i;
int r = 0;
@@ -103,7 +103,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
if (fence && fence->rdev == rdev)
radeon_sync_fence(sync, fence);
else if (f)
- r = fence_wait(f, true);
+ r = dma_fence_wait(f, true);
flist = reservation_object_get_list(resv);
if (shared || !flist || r)
@@ -116,7 +116,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
if (fence && fence->rdev == rdev)
radeon_sync_fence(sync, fence);
else
- r = fence_wait(f, true);
+ r = dma_fence_wait(f, true);
if (r)
break;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 3de5e6e21662..0cf03ccbf0a7 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -863,6 +863,7 @@ static struct ttm_bo_driver radeon_bo_driver = {
.ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
.invalidate_caches = &radeon_invalidate_caches,
.init_mem_type = &radeon_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &radeon_evict_flags,
.move = &radeon_bo_move,
.verify_access = &radeon_verify_access,
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 0cd0e7bdee55..d34d1cf33895 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -467,7 +467,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
{
int32_t *msg, msg_type, handle;
unsigned img_size = 0;
- struct fence *f;
+ struct dma_fence *f;
void *ptr;
int i, r;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index e402be8821c4..143280dc0851 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -7858,7 +7858,7 @@ static void si_program_aspm(struct radeon_device *rdev)
}
}
-int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev)
+static int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev)
{
unsigned i;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 8c8cbe837e61..6fe161192bb4 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -20,6 +20,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/module.h>
@@ -388,7 +389,7 @@ static void rockchip_add_endpoints(struct device *dev,
continue;
}
- component_match_add(dev, match, compare_of, remote);
+ drm_of_component_match_add(dev, match, compare_of, remote);
of_node_put(remote);
}
}
@@ -437,7 +438,8 @@ static int rockchip_drm_platform_probe(struct platform_device *pdev)
}
of_node_put(iommu);
- component_match_add(dev, &match, compare_of, port->parent);
+ drm_of_component_match_add(dev, &match, compare_of,
+ port->parent);
of_node_put(port);
}
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 7087499969bc..6aead2013b62 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -17,6 +17,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_of.h>
#include "sti_crtc.h"
#include "sti_drv.h"
@@ -424,8 +425,8 @@ static int sti_platform_probe(struct platform_device *pdev)
child_np = of_get_next_available_child(node, NULL);
while (child_np) {
- component_match_add(dev, &match, compare_of, child_np);
- of_node_put(child_np);
+ drm_of_component_match_add(dev, &match, compare_of,
+ child_np);
child_np = of_get_next_available_child(node, child_np);
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 0da9862ad8ed..b3c4ad605e81 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -18,6 +18,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_of.h>
#include "sun4i_crtc.h"
#include "sun4i_drv.h"
@@ -239,7 +240,7 @@ static int sun4i_drv_add_endpoints(struct device *dev,
/* Add current component */
DRM_DEBUG_DRIVER("Adding component %s\n",
of_node_full_name(node));
- component_match_add(dev, match, compare_of, node);
+ drm_of_component_match_add(dev, match, compare_of, node);
count++;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index 68e895021005..06a4c584f3cb 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -10,6 +10,7 @@
#include <linux/component.h>
#include <linux/of_graph.h>
+#include <drm/drm_of.h>
#include "tilcdc_drv.h"
#include "tilcdc_external.h"
@@ -160,7 +161,8 @@ int tilcdc_get_external_components(struct device *dev,
dev_dbg(dev, "Subdevice node '%s' found\n", node->name);
if (match)
- component_match_add(dev, match, dev_match_of, node);
+ drm_of_component_match_add(dev, match, dev_match_of,
+ node);
of_node_put(node);
count++;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index fc6217dfe401..f6ff579e8918 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -148,7 +148,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
BUG_ON(!list_empty(&bo->ddestroy));
ttm_tt_destroy(bo->ttm);
atomic_dec(&bo->glob->bo_count);
- fence_put(bo->moving);
+ dma_fence_put(bo->moving);
if (bo->resv == &bo->ttm_resv)
reservation_object_fini(&bo->ttm_resv);
mutex_destroy(&bo->wu_mutex);
@@ -426,20 +426,20 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
{
struct reservation_object_list *fobj;
- struct fence *fence;
+ struct dma_fence *fence;
int i;
fobj = reservation_object_get_list(bo->resv);
fence = reservation_object_get_excl(bo->resv);
if (fence && !fence->ops->signaled)
- fence_enable_sw_signaling(fence);
+ dma_fence_enable_sw_signaling(fence);
for (i = 0; fobj && i < fobj->shared_count; ++i) {
fence = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(bo->resv));
if (!fence->ops->signaled)
- fence_enable_sw_signaling(fence);
+ dma_fence_enable_sw_signaling(fence);
}
}
@@ -717,6 +717,20 @@ out:
return ret;
}
+bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
+ const struct ttm_place *place)
+{
+ /* Don't evict this BO if it's outside of the
+ * requested placement range
+ */
+ if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
+ (place->lpfn && place->lpfn <= bo->mem.start))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL(ttm_bo_eviction_valuable);
+
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
uint32_t mem_type,
const struct ttm_place *place,
@@ -731,21 +745,16 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &man->lru, lru) {
ret = __ttm_bo_reserve(bo, false, true, NULL);
- if (!ret) {
- if (place && (place->fpfn || place->lpfn)) {
- /* Don't evict this BO if it's outside of the
- * requested placement range
- */
- if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
- (place->lpfn && place->lpfn <= bo->mem.start)) {
- __ttm_bo_unreserve(bo);
- ret = -EBUSY;
- continue;
- }
- }
+ if (ret)
+ continue;
- break;
+ if (place && !bdev->driver->eviction_valuable(bo, place)) {
+ __ttm_bo_unreserve(bo);
+ ret = -EBUSY;
+ continue;
}
+
+ break;
}
if (ret) {
@@ -792,11 +801,11 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
- struct fence *fence;
+ struct dma_fence *fence;
int ret;
spin_lock(&man->move_lock);
- fence = fence_get(man->move);
+ fence = dma_fence_get(man->move);
spin_unlock(&man->move_lock);
if (fence) {
@@ -806,7 +815,7 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
if (unlikely(ret))
return ret;
- fence_put(bo->moving);
+ dma_fence_put(bo->moving);
bo->moving = fence;
}
@@ -1286,7 +1295,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
{
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_bo_global *glob = bdev->glob;
- struct fence *fence;
+ struct dma_fence *fence;
int ret;
/*
@@ -1309,12 +1318,12 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
spin_unlock(&glob->lru_lock);
spin_lock(&man->move_lock);
- fence = fence_get(man->move);
+ fence = dma_fence_get(man->move);
spin_unlock(&man->move_lock);
if (fence) {
- ret = fence_wait(fence, false);
- fence_put(fence);
+ ret = dma_fence_wait(fence, false);
+ dma_fence_put(fence);
if (ret) {
if (allow_errors) {
return ret;
@@ -1343,7 +1352,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
mem_type);
return ret;
}
- fence_put(man->move);
+ dma_fence_put(man->move);
man->use_type = false;
man->has_type = false;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index bf6e21655c57..d0459b392e5e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -644,7 +644,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
EXPORT_SYMBOL(ttm_bo_kunmap);
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
- struct fence *fence,
+ struct dma_fence *fence,
bool evict,
struct ttm_mem_reg *new_mem)
{
@@ -674,8 +674,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
* operation has completed.
*/
- fence_put(bo->moving);
- bo->moving = fence_get(fence);
+ dma_fence_put(bo->moving);
+ bo->moving = dma_fence_get(fence);
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)
@@ -706,7 +706,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
- struct fence *fence, bool evict,
+ struct dma_fence *fence, bool evict,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -730,8 +730,8 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
* operation has completed.
*/
- fence_put(bo->moving);
- bo->moving = fence_get(fence);
+ dma_fence_put(bo->moving);
+ bo->moving = dma_fence_get(fence);
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)
@@ -761,16 +761,16 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
*/
spin_lock(&from->move_lock);
- if (!from->move || fence_is_later(fence, from->move)) {
- fence_put(from->move);
- from->move = fence_get(fence);
+ if (!from->move || dma_fence_is_later(fence, from->move)) {
+ dma_fence_put(from->move);
+ from->move = dma_fence_get(fence);
}
spin_unlock(&from->move_lock);
ttm_bo_free_old_node(bo);
- fence_put(bo->moving);
- bo->moving = fence_get(fence);
+ dma_fence_put(bo->moving);
+ bo->moving = dma_fence_get(fence);
} else {
/**
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index a6ed9d5e5167..4748aedc933a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -54,7 +54,7 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
/*
* Quick non-stalling check for idle.
*/
- if (fence_is_signaled(bo->moving))
+ if (dma_fence_is_signaled(bo->moving))
goto out_clear;
/*
@@ -67,14 +67,14 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
goto out_unlock;
up_read(&vma->vm_mm->mmap_sem);
- (void) fence_wait(bo->moving, true);
+ (void) dma_fence_wait(bo->moving, true);
goto out_unlock;
}
/*
* Ordinary wait.
*/
- ret = fence_wait(bo->moving, true);
+ ret = dma_fence_wait(bo->moving, true);
if (unlikely(ret != 0)) {
ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
VM_FAULT_NOPAGE;
@@ -82,7 +82,7 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
}
out_clear:
- fence_put(bo->moving);
+ dma_fence_put(bo->moving);
bo->moving = NULL;
out_unlock:
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index a80717b35dc6..d35bc491e8de 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -179,7 +179,8 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
- struct list_head *list, struct fence *fence)
+ struct list_head *list,
+ struct dma_fence *fence)
{
struct ttm_validate_buffer *entry;
struct ttm_buffer_object *bo;
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 5c57c1ffa1f9..488909a21ed8 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -28,56 +28,57 @@
#define VGEM_FENCE_TIMEOUT (10*HZ)
struct vgem_fence {
- struct fence base;
+ struct dma_fence base;
struct spinlock lock;
struct timer_list timer;
};
-static const char *vgem_fence_get_driver_name(struct fence *fence)
+static const char *vgem_fence_get_driver_name(struct dma_fence *fence)
{
return "vgem";
}
-static const char *vgem_fence_get_timeline_name(struct fence *fence)
+static const char *vgem_fence_get_timeline_name(struct dma_fence *fence)
{
return "unbound";
}
-static bool vgem_fence_signaled(struct fence *fence)
+static bool vgem_fence_signaled(struct dma_fence *fence)
{
return false;
}
-static bool vgem_fence_enable_signaling(struct fence *fence)
+static bool vgem_fence_enable_signaling(struct dma_fence *fence)
{
return true;
}
-static void vgem_fence_release(struct fence *base)
+static void vgem_fence_release(struct dma_fence *base)
{
struct vgem_fence *fence = container_of(base, typeof(*fence), base);
del_timer_sync(&fence->timer);
- fence_free(&fence->base);
+ dma_fence_free(&fence->base);
}
-static void vgem_fence_value_str(struct fence *fence, char *str, int size)
+static void vgem_fence_value_str(struct dma_fence *fence, char *str, int size)
{
snprintf(str, size, "%u", fence->seqno);
}
-static void vgem_fence_timeline_value_str(struct fence *fence, char *str,
+static void vgem_fence_timeline_value_str(struct dma_fence *fence, char *str,
int size)
{
- snprintf(str, size, "%u", fence_is_signaled(fence) ? fence->seqno : 0);
+ snprintf(str, size, "%u",
+ dma_fence_is_signaled(fence) ? fence->seqno : 0);
}
-static const struct fence_ops vgem_fence_ops = {
+static const struct dma_fence_ops vgem_fence_ops = {
.get_driver_name = vgem_fence_get_driver_name,
.get_timeline_name = vgem_fence_get_timeline_name,
.enable_signaling = vgem_fence_enable_signaling,
.signaled = vgem_fence_signaled,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = vgem_fence_release,
.fence_value_str = vgem_fence_value_str,
@@ -88,11 +89,11 @@ static void vgem_fence_timeout(unsigned long data)
{
struct vgem_fence *fence = (struct vgem_fence *)data;
- fence_signal(&fence->base);
+ dma_fence_signal(&fence->base);
}
-static struct fence *vgem_fence_create(struct vgem_file *vfile,
- unsigned int flags)
+static struct dma_fence *vgem_fence_create(struct vgem_file *vfile,
+ unsigned int flags)
{
struct vgem_fence *fence;
@@ -101,8 +102,8 @@ static struct fence *vgem_fence_create(struct vgem_file *vfile,
return NULL;
spin_lock_init(&fence->lock);
- fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
- fence_context_alloc(1), 1);
+ dma_fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
+ dma_fence_context_alloc(1), 1);
setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence);
@@ -157,7 +158,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
struct vgem_file *vfile = file->driver_priv;
struct reservation_object *resv;
struct drm_gem_object *obj;
- struct fence *fence;
+ struct dma_fence *fence;
int ret;
if (arg->flags & ~VGEM_FENCE_WRITE)
@@ -209,8 +210,8 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
}
err_fence:
if (ret) {
- fence_signal(fence);
- fence_put(fence);
+ dma_fence_signal(fence);
+ dma_fence_put(fence);
}
err:
drm_gem_object_unreference_unlocked(obj);
@@ -239,7 +240,7 @@ int vgem_fence_signal_ioctl(struct drm_device *dev,
{
struct vgem_file *vfile = file->driver_priv;
struct drm_vgem_fence_signal *arg = data;
- struct fence *fence;
+ struct dma_fence *fence;
int ret = 0;
if (arg->flags)
@@ -253,11 +254,11 @@ int vgem_fence_signal_ioctl(struct drm_device *dev,
if (IS_ERR(fence))
return PTR_ERR(fence);
- if (fence_is_signaled(fence))
+ if (dma_fence_is_signaled(fence))
ret = -ETIMEDOUT;
- fence_signal(fence);
- fence_put(fence);
+ dma_fence_signal(fence);
+ dma_fence_put(fence);
return ret;
}
@@ -271,8 +272,8 @@ int vgem_fence_open(struct vgem_file *vfile)
static int __vgem_fence_idr_fini(int id, void *p, void *data)
{
- fence_signal(p);
- fence_put(p);
+ dma_fence_signal(p);
+ dma_fence_put(p);
return 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index ae59080d63d1..ec1ebdcfe80b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -82,7 +82,7 @@ struct virtio_gpu_fence_driver {
};
struct virtio_gpu_fence {
- struct fence f;
+ struct dma_fence f;
struct virtio_gpu_fence_driver *drv;
struct list_head node;
uint64_t seq;
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index f3f70fa8a4c7..23353521f903 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -26,22 +26,22 @@
#include <drm/drmP.h>
#include "virtgpu_drv.h"
-static const char *virtio_get_driver_name(struct fence *f)
+static const char *virtio_get_driver_name(struct dma_fence *f)
{
return "virtio_gpu";
}
-static const char *virtio_get_timeline_name(struct fence *f)
+static const char *virtio_get_timeline_name(struct dma_fence *f)
{
return "controlq";
}
-static bool virtio_enable_signaling(struct fence *f)
+static bool virtio_enable_signaling(struct dma_fence *f)
{
return true;
}
-static bool virtio_signaled(struct fence *f)
+static bool virtio_signaled(struct dma_fence *f)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
@@ -50,26 +50,26 @@ static bool virtio_signaled(struct fence *f)
return false;
}
-static void virtio_fence_value_str(struct fence *f, char *str, int size)
+static void virtio_fence_value_str(struct dma_fence *f, char *str, int size)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
snprintf(str, size, "%llu", fence->seq);
}
-static void virtio_timeline_value_str(struct fence *f, char *str, int size)
+static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq));
}
-static const struct fence_ops virtio_fence_ops = {
+static const struct dma_fence_ops virtio_fence_ops = {
.get_driver_name = virtio_get_driver_name,
.get_timeline_name = virtio_get_timeline_name,
.enable_signaling = virtio_enable_signaling,
.signaled = virtio_signaled,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.fence_value_str = virtio_fence_value_str,
.timeline_value_str = virtio_timeline_value_str,
};
@@ -88,9 +88,9 @@ int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
spin_lock_irqsave(&drv->lock, irq_flags);
(*fence)->drv = drv;
(*fence)->seq = ++drv->sync_seq;
- fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock,
- drv->context, (*fence)->seq);
- fence_get(&(*fence)->f);
+ dma_fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock,
+ drv->context, (*fence)->seq);
+ dma_fence_get(&(*fence)->f);
list_add_tail(&(*fence)->node, &drv->fences);
spin_unlock_irqrestore(&drv->lock, irq_flags);
@@ -111,9 +111,9 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
list_for_each_entry_safe(fence, tmp, &drv->fences, node) {
if (last_seq < fence->seq)
continue;
- fence_signal_locked(&fence->f);
+ dma_fence_signal_locked(&fence->f);
list_del(&fence->node);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
}
spin_unlock_irqrestore(&drv->lock, irq_flags);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 818478b4c4f0..61f3a963af95 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -172,7 +172,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
/* fence the command bo */
virtio_gpu_unref_list(&validate_list);
drm_free_large(buflist);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
return 0;
out_unresv:
@@ -298,7 +298,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
drm_gem_object_release(obj);
if (vgdev->has_virgl_3d) {
virtio_gpu_unref_list(&validate_list);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
}
return ret;
}
@@ -309,13 +309,13 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
if (vgdev->has_virgl_3d) {
virtio_gpu_unref_list(&validate_list);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
}
return 0;
fail_unref:
if (vgdev->has_virgl_3d) {
virtio_gpu_unref_list(&validate_list);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
}
//fail_obj:
// drm_gem_object_handle_unreference_unlocked(obj);
@@ -383,7 +383,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
reservation_object_add_excl_fence(qobj->tbo.resv,
&fence->f);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
out_unres:
virtio_gpu_object_unreserve(qobj);
out:
@@ -431,7 +431,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
args->level, &box, &fence);
reservation_object_add_excl_fence(qobj->tbo.resv,
&fence->f);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
}
out_unres:
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 036b0fbae0fb..1235519853f4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -159,7 +159,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func);
virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func);
- vgdev->fence_drv.context = fence_context_alloc(1);
+ vgdev->fence_drv.context = dma_fence_context_alloc(1);
spin_lock_init(&vgdev->fence_drv.lock);
INIT_LIST_HEAD(&vgdev->fence_drv.fences);
INIT_LIST_HEAD(&vgdev->cap_cache);
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index ba28c0f6f28a..cb75f0663ba0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -152,7 +152,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
if (!ret) {
reservation_object_add_excl_fence(bo->tbo.resv,
&fence->f);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
fence = NULL;
virtio_gpu_object_unreserve(bo);
virtio_gpu_object_wait(bo, false);
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index 80482ac5f95d..4a1de9f81193 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -425,6 +425,7 @@ static struct ttm_bo_driver virtio_gpu_bo_driver = {
.ttm_tt_unpopulate = &virtio_gpu_ttm_tt_unpopulate,
.invalidate_caches = &virtio_gpu_invalidate_caches,
.init_mem_type = &virtio_gpu_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &virtio_gpu_evict_flags,
.move = &virtio_gpu_bo_move,
.verify_access = &virtio_gpu_verify_access,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 78b75ee3c931..c894a48a74a6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -849,6 +849,7 @@ struct ttm_bo_driver vmw_bo_driver = {
.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
.invalidate_caches = vmw_invalidate_caches,
.init_mem_type = vmw_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = vmw_evict_flags,
.move = NULL,
.verify_access = vmw_verify_access,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 26ac8e80a478..6541dd8b82dc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -108,7 +108,7 @@ fman_from_fence(struct vmw_fence_obj *fence)
* objects with actions attached to them.
*/
-static void vmw_fence_obj_destroy(struct fence *f)
+static void vmw_fence_obj_destroy(struct dma_fence *f)
{
struct vmw_fence_obj *fence =
container_of(f, struct vmw_fence_obj, base);
@@ -123,17 +123,17 @@ static void vmw_fence_obj_destroy(struct fence *f)
fence->destroy(fence);
}
-static const char *vmw_fence_get_driver_name(struct fence *f)
+static const char *vmw_fence_get_driver_name(struct dma_fence *f)
{
return "vmwgfx";
}
-static const char *vmw_fence_get_timeline_name(struct fence *f)
+static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
{
return "svga";
}
-static bool vmw_fence_enable_signaling(struct fence *f)
+static bool vmw_fence_enable_signaling(struct dma_fence *f)
{
struct vmw_fence_obj *fence =
container_of(f, struct vmw_fence_obj, base);
@@ -152,12 +152,12 @@ static bool vmw_fence_enable_signaling(struct fence *f)
}
struct vmwgfx_wait_cb {
- struct fence_cb base;
+ struct dma_fence_cb base;
struct task_struct *task;
};
static void
-vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb)
+vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct vmwgfx_wait_cb *wait =
container_of(cb, struct vmwgfx_wait_cb, base);
@@ -167,7 +167,7 @@ vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb)
static void __vmw_fences_update(struct vmw_fence_manager *fman);
-static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout)
+static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
{
struct vmw_fence_obj *fence =
container_of(f, struct vmw_fence_obj, base);
@@ -197,7 +197,7 @@ static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout)
while (ret > 0) {
__vmw_fences_update(fman);
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &f->flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
break;
if (intr)
@@ -225,7 +225,7 @@ out:
return ret;
}
-static struct fence_ops vmw_fence_ops = {
+static struct dma_fence_ops vmw_fence_ops = {
.get_driver_name = vmw_fence_get_driver_name,
.get_timeline_name = vmw_fence_get_timeline_name,
.enable_signaling = vmw_fence_enable_signaling,
@@ -298,7 +298,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
fman->event_fence_action_size =
ttm_round_pot(sizeof(struct vmw_event_fence_action));
mutex_init(&fman->goal_irq_mutex);
- fman->ctx = fence_context_alloc(1);
+ fman->ctx = dma_fence_context_alloc(1);
return fman;
}
@@ -326,8 +326,8 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
unsigned long irq_flags;
int ret = 0;
- fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
- fman->ctx, seqno);
+ dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
+ fman->ctx, seqno);
INIT_LIST_HEAD(&fence->seq_passed_actions);
fence->destroy = destroy;
@@ -431,7 +431,7 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
u32 goal_seqno;
u32 *fifo_mem;
- if (fence_is_signaled_locked(&fence->base))
+ if (dma_fence_is_signaled_locked(&fence->base))
return false;
fifo_mem = fman->dev_priv->mmio_virt;
@@ -459,7 +459,7 @@ rerun:
list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
list_del_init(&fence->head);
- fence_signal_locked(&fence->base);
+ dma_fence_signal_locked(&fence->base);
INIT_LIST_HEAD(&action_list);
list_splice_init(&fence->seq_passed_actions,
&action_list);
@@ -500,18 +500,18 @@ bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
{
struct vmw_fence_manager *fman = fman_from_fence(fence);
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
return 1;
vmw_fences_update(fman);
- return fence_is_signaled(&fence->base);
+ return dma_fence_is_signaled(&fence->base);
}
int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
bool interruptible, unsigned long timeout)
{
- long ret = fence_wait_timeout(&fence->base, interruptible, timeout);
+ long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
if (likely(ret > 0))
return 0;
@@ -530,7 +530,7 @@ void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
static void vmw_fence_destroy(struct vmw_fence_obj *fence)
{
- fence_free(&fence->base);
+ dma_fence_free(&fence->base);
}
int vmw_fence_create(struct vmw_fence_manager *fman,
@@ -669,7 +669,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
struct vmw_fence_obj *fence =
list_entry(fman->fence_list.prev, struct vmw_fence_obj,
head);
- fence_get(&fence->base);
+ dma_fence_get(&fence->base);
spin_unlock_irq(&fman->lock);
ret = vmw_fence_obj_wait(fence, false, false,
@@ -677,7 +677,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
if (unlikely(ret != 0)) {
list_del_init(&fence->head);
- fence_signal(&fence->base);
+ dma_fence_signal(&fence->base);
INIT_LIST_HEAD(&action_list);
list_splice_init(&fence->seq_passed_actions,
&action_list);
@@ -685,7 +685,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
}
BUG_ON(!list_empty(&fence->head));
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
spin_lock_irq(&fman->lock);
}
spin_unlock_irq(&fman->lock);
@@ -884,7 +884,7 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
spin_lock_irqsave(&fman->lock, irq_flags);
fman->pending_actions[action->type]++;
- if (fence_is_signaled_locked(&fence->base)) {
+ if (dma_fence_is_signaled_locked(&fence->base)) {
struct list_head action_list;
INIT_LIST_HEAD(&action_list);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index 83ae301ee141..d9d85aa6ed20 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -27,7 +27,7 @@
#ifndef _VMWGFX_FENCE_H_
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#define VMW_FENCE_WAIT_TIMEOUT (5*HZ)
@@ -52,7 +52,7 @@ struct vmw_fence_action {
};
struct vmw_fence_obj {
- struct fence base;
+ struct dma_fence base;
struct list_head head;
struct list_head seq_passed_actions;
@@ -71,14 +71,14 @@ vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
*fence_p = NULL;
if (fence)
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
}
static inline struct vmw_fence_obj *
vmw_fence_obj_reference(struct vmw_fence_obj *fence)
{
if (fence)
- fence_get(&fence->base);
+ dma_fence_get(&fence->base);
return fence;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 1a85fb2d4dc6..8e86d6d4141b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1454,7 +1454,7 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo,
if (fence == NULL) {
vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
reservation_object_add_excl_fence(bo->resv, &fence->base);
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
} else
reservation_object_add_excl_fence(bo->resv, &fence->base);
}