summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c33
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c33
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c20
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c37
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c4
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c3
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c17
-rw-r--r--drivers/gpu/drm/i915/intel_display.c15
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c33
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c4
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c8
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c228
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c11
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c92
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c47
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.h3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.c18
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c27
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c67
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c110
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c44
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c57
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c2
57 files changed, 850 insertions, 514 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 502b94fb116a..b31d121a876b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -39,6 +39,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
{
struct drm_gem_object *gobj;
unsigned long size;
+ int r;
gobj = drm_gem_object_lookup(p->filp, data->handle);
if (gobj == NULL)
@@ -50,20 +51,26 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
p->uf_entry.tv.shared = true;
p->uf_entry.user_pages = NULL;
- size = amdgpu_bo_size(p->uf_entry.robj);
- if (size != PAGE_SIZE || (data->offset + 8) > size)
- return -EINVAL;
-
- *offset = data->offset;
-
drm_gem_object_put_unlocked(gobj);
+ size = amdgpu_bo_size(p->uf_entry.robj);
+ if (size != PAGE_SIZE || (data->offset + 8) > size) {
+ r = -EINVAL;
+ goto error_unref;
+ }
+
if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
- amdgpu_bo_unref(&p->uf_entry.robj);
- return -EINVAL;
+ r = -EINVAL;
+ goto error_unref;
}
+ *offset = data->offset;
+
return 0;
+
+error_unref:
+ amdgpu_bo_unref(&p->uf_entry.robj);
+ return r;
}
static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
@@ -1012,13 +1019,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
if (r)
return r;
- if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) {
- parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
- if (!parser->ctx->preamble_presented) {
- parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
- parser->ctx->preamble_presented = true;
- }
- }
+ if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
+ parser->job->preamble_status |=
+ AMDGPU_PREAMBLE_IB_PRESENT;
if (parser->ring && parser->ring != ring)
return -EINVAL;
@@ -1207,26 +1210,24 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
int r;
+ job = p->job;
+ p->job = NULL;
+
+ r = drm_sched_job_init(&job->base, entity, p->filp);
+ if (r)
+ goto error_unlock;
+
+ /* No memory allocation is allowed while holding the mn lock */
amdgpu_mn_lock(p->mn);
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
struct amdgpu_bo *bo = e->robj;
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
- amdgpu_mn_unlock(p->mn);
- return -ERESTARTSYS;
+ r = -ERESTARTSYS;
+ goto error_abort;
}
}
- job = p->job;
- p->job = NULL;
-
- r = drm_sched_job_init(&job->base, entity, p->filp);
- if (r) {
- amdgpu_job_free(job);
- amdgpu_mn_unlock(p->mn);
- return r;
- }
-
job->owner = p->filp;
p->fence = dma_fence_get(&job->base.s_fence->finished);
@@ -1241,6 +1242,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
amdgpu_cs_post_dependencies(p);
+ if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
+ !p->ctx->preamble_presented) {
+ job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
+ p->ctx->preamble_presented = true;
+ }
+
cs->out.handle = seq;
job->uf_sequence = seq;
@@ -1258,6 +1265,15 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
amdgpu_mn_unlock(p->mn);
return 0;
+
+error_abort:
+ dma_fence_put(&job->base.s_fence->finished);
+ job->base.s_fence = NULL;
+ amdgpu_mn_unlock(p->mn);
+
+error_unlock:
+ amdgpu_job_free(job);
+ return r;
}
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 8ab5ccbc14ac..39bf2ce548c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2063,6 +2063,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
static enum amd_ip_block_type ip_order[] = {
AMD_IP_BLOCK_TYPE_GMC,
AMD_IP_BLOCK_TYPE_COMMON,
+ AMD_IP_BLOCK_TYPE_PSP,
AMD_IP_BLOCK_TYPE_IH,
};
@@ -2093,7 +2094,6 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
static enum amd_ip_block_type ip_order[] = {
AMD_IP_BLOCK_TYPE_SMC,
- AMD_IP_BLOCK_TYPE_PSP,
AMD_IP_BLOCK_TYPE_DCE,
AMD_IP_BLOCK_TYPE_GFX,
AMD_IP_BLOCK_TYPE_SDMA,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 5518e623fed2..51b5e977ca88 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -164,8 +164,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return r;
}
+ need_ctx_switch = ring->current_ctx != fence_ctx;
if (ring->funcs->emit_pipeline_sync && job &&
((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) ||
+ (amdgpu_sriov_vf(adev) && need_ctx_switch) ||
amdgpu_vm_need_pipeline_sync(ring, job))) {
need_pipe_sync = true;
dma_fence_put(tmp);
@@ -196,7 +198,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
}
skip_preamble = ring->current_ctx == fence_ctx;
- need_ctx_switch = ring->current_ctx != fence_ctx;
if (job && ring->funcs->emit_cntxcntl) {
if (need_ctx_switch)
status |= AMDGPU_HAVE_CTX_SWITCH;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 8f98629fbe59..7b4e657a95c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1932,14 +1932,6 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
amdgpu_fence_wait_empty(ring);
}
- mutex_lock(&adev->pm.mutex);
- /* update battery/ac status */
- if (power_supply_is_system_supplied() > 0)
- adev->pm.ac_power = true;
- else
- adev->pm.ac_power = false;
- mutex_unlock(&adev->pm.mutex);
-
if (adev->powerplay.pp_funcs->dispatch_tasks) {
if (!amdgpu_device_has_dc_support(adev)) {
mutex_lock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index ece0ac703e27..b17771dd5ce7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -172,6 +172,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
* is validated on next vm use to avoid fault.
* */
list_move_tail(&base->vm_status, &vm->evicted);
+ base->moved = true;
}
/**
@@ -369,7 +370,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
uint64_t addr;
int r;
- addr = amdgpu_bo_gpu_offset(bo);
entries = amdgpu_bo_size(bo) / 8;
if (pte_support_ats) {
@@ -401,6 +401,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
if (r)
goto error;
+ addr = amdgpu_bo_gpu_offset(bo);
if (ats_entries) {
uint64_t ats_value;
@@ -2483,28 +2484,52 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
* amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
*
* @adev: amdgpu_device pointer
- * @vm_size: the default vm size if it's set auto
+ * @min_vm_size: the minimum vm size in GB if it's set auto
* @fragment_size_default: Default PTE fragment size
* @max_level: max VMPT level
* @max_bits: max address space size in bits
*
*/
-void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
uint32_t fragment_size_default, unsigned max_level,
unsigned max_bits)
{
+ unsigned int max_size = 1 << (max_bits - 30);
+ unsigned int vm_size;
uint64_t tmp;
/* adjust vm size first */
if (amdgpu_vm_size != -1) {
- unsigned max_size = 1 << (max_bits - 30);
-
vm_size = amdgpu_vm_size;
if (vm_size > max_size) {
dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
amdgpu_vm_size, max_size);
vm_size = max_size;
}
+ } else {
+ struct sysinfo si;
+ unsigned int phys_ram_gb;
+
+ /* Optimal VM size depends on the amount of physical
+ * RAM available. Underlying requirements and
+ * assumptions:
+ *
+ * - Need to map system memory and VRAM from all GPUs
+ * - VRAM from other GPUs not known here
+ * - Assume VRAM <= system memory
+ * - On GFX8 and older, VM space can be segmented for
+ * different MTYPEs
+ * - Need to allow room for fragmentation, guard pages etc.
+ *
+ * This adds up to a rough guess of system memory x3.
+ * Round up to power of two to maximize the available
+ * VM size with the given page table size.
+ */
+ si_meminfo(&si);
+ phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
+ (1 << 30) - 1) >> 30;
+ vm_size = roundup_pow_of_two(
+ min(max(phys_ram_gb * 3, min_vm_size), max_size));
}
adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 67a15d439ac0..9fa9df0c5e7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -321,7 +321,7 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va);
-void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
uint32_t fragment_size_default, unsigned max_level,
unsigned max_bits);
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 5cd45210113f..5a9534a82d40 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -5664,6 +5664,11 @@ static int gfx_v8_0_set_powergating_state(void *handle,
if (amdgpu_sriov_vf(adev))
return 0;
+ if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_RLC_SMU_HS |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_GFX_DMG))
+ adev->gfx.rlc.funcs->enter_safe_mode(adev);
switch (adev->asic_type) {
case CHIP_CARRIZO:
case CHIP_STONEY:
@@ -5713,7 +5718,11 @@ static int gfx_v8_0_set_powergating_state(void *handle,
default:
break;
}
-
+ if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_RLC_SMU_HS |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_GFX_DMG))
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 75317f283c69..ad151fefa41f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -632,12 +632,6 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
amdgpu_gart_table_vram_unpin(adev);
}
-static void gmc_v6_0_gart_fini(struct amdgpu_device *adev)
-{
- amdgpu_gart_table_vram_free(adev);
- amdgpu_gart_fini(adev);
-}
-
static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
u32 status, u32 addr, u32 mc_client)
{
@@ -935,8 +929,9 @@ static int gmc_v6_0_sw_fini(void *handle)
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
- gmc_v6_0_gart_fini(adev);
+ amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev);
+ amdgpu_gart_fini(adev);
release_firmware(adev->gmc.fw);
adev->gmc.fw = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 36dc367c4b45..f8d8a3a73e42 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -747,19 +747,6 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
}
/**
- * gmc_v7_0_gart_fini - vm fini callback
- *
- * @adev: amdgpu_device pointer
- *
- * Tears down the driver GART/VM setup (CIK).
- */
-static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
-{
- amdgpu_gart_table_vram_free(adev);
- amdgpu_gart_fini(adev);
-}
-
-/**
* gmc_v7_0_vm_decode_fault - print human readable fault info
*
* @adev: amdgpu_device pointer
@@ -1095,8 +1082,9 @@ static int gmc_v7_0_sw_fini(void *handle)
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
kfree(adev->gmc.vm_fault_info);
- gmc_v7_0_gart_fini(adev);
+ amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev);
+ amdgpu_gart_fini(adev);
release_firmware(adev->gmc.fw);
adev->gmc.fw = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 70fc97b59b4f..9333109b210d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -969,19 +969,6 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
}
/**
- * gmc_v8_0_gart_fini - vm fini callback
- *
- * @adev: amdgpu_device pointer
- *
- * Tears down the driver GART/VM setup (CIK).
- */
-static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
-{
- amdgpu_gart_table_vram_free(adev);
- amdgpu_gart_fini(adev);
-}
-
-/**
* gmc_v8_0_vm_decode_fault - print human readable fault info
*
* @adev: amdgpu_device pointer
@@ -1199,8 +1186,9 @@ static int gmc_v8_0_sw_fini(void *handle)
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
kfree(adev->gmc.vm_fault_info);
- gmc_v8_0_gart_fini(adev);
+ amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev);
+ amdgpu_gart_fini(adev);
release_firmware(adev->gmc.fw);
adev->gmc.fw = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 399a5db27649..72f8018fa2a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -942,26 +942,12 @@ static int gmc_v9_0_sw_init(void *handle)
return 0;
}
-/**
- * gmc_v9_0_gart_fini - vm fini callback
- *
- * @adev: amdgpu_device pointer
- *
- * Tears down the driver GART/VM setup (CIK).
- */
-static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
-{
- amdgpu_gart_table_vram_free(adev);
- amdgpu_gart_fini(adev);
-}
-
static int gmc_v9_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
- gmc_v9_0_gart_fini(adev);
/*
* TODO:
@@ -974,7 +960,9 @@ static int gmc_v9_0_sw_fini(void *handle)
*/
amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
+ amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev);
+ amdgpu_gart_fini(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 3f57f6463dc8..cb79a93c2eb7 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -65,8 +65,6 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
int min_temp, int max_temp);
static int kv_init_fps_limits(struct amdgpu_device *adev);
-static void kv_dpm_powergate_uvd(void *handle, bool gate);
-static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate);
static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate);
static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate);
@@ -1354,8 +1352,6 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
return ret;
}
- kv_update_current_ps(adev, adev->pm.dpm.boot_ps);
-
if (adev->irq.installed &&
amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
@@ -1374,6 +1370,8 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
static void kv_dpm_disable(struct amdgpu_device *adev)
{
+ struct kv_power_info *pi = kv_get_pi(adev);
+
amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
@@ -1387,8 +1385,10 @@ static void kv_dpm_disable(struct amdgpu_device *adev)
/* powerup blocks */
kv_dpm_powergate_acp(adev, false);
kv_dpm_powergate_samu(adev, false);
- kv_dpm_powergate_vce(adev, false);
- kv_dpm_powergate_uvd(adev, false);
+ if (pi->caps_vce_pg) /* power on the VCE block */
+ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
+ if (pi->caps_uvd_pg) /* power on the UVD block */
+ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
kv_enable_smc_cac(adev, false);
kv_enable_didt(adev, false);
@@ -1551,7 +1551,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
int ret;
if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) {
- kv_dpm_powergate_vce(adev, false);
if (pi->caps_stable_p_state)
pi->vce_boot_level = table->count - 1;
else
@@ -1573,7 +1572,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
kv_enable_vce_dpm(adev, true);
} else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) {
kv_enable_vce_dpm(adev, false);
- kv_dpm_powergate_vce(adev, true);
}
return 0;
@@ -1702,24 +1700,32 @@ static void kv_dpm_powergate_uvd(void *handle, bool gate)
}
}
-static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
+static void kv_dpm_powergate_vce(void *handle, bool gate)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
-
- if (pi->vce_power_gated == gate)
- return;
+ int ret;
pi->vce_power_gated = gate;
- if (!pi->caps_vce_pg)
- return;
-
- if (gate)
- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
- else
- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
+ if (gate) {
+ /* stop the VCE block */
+ ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ kv_enable_vce_dpm(adev, false);
+ if (pi->caps_vce_pg) /* power off the VCE block */
+ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
+ } else {
+ if (pi->caps_vce_pg) /* power on the VCE block */
+ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
+ kv_enable_vce_dpm(adev, true);
+ /* re-init the VCE block */
+ ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_UNGATE);
+ }
}
+
static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate)
{
struct kv_power_info *pi = kv_get_pi(adev);
@@ -3061,7 +3067,7 @@ static int kv_dpm_hw_init(void *handle)
else
adev->pm.dpm_enabled = true;
mutex_unlock(&adev->pm.mutex);
-
+ amdgpu_pm_compute_clocks(adev);
return ret;
}
@@ -3313,6 +3319,9 @@ static int kv_set_powergating_by_smu(void *handle,
case AMD_IP_BLOCK_TYPE_UVD:
kv_dpm_powergate_uvd(handle, gate);
break;
+ case AMD_IP_BLOCK_TYPE_VCE:
+ kv_dpm_powergate_vce(handle, gate);
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index e7ca4623cfb9..7c3b634d8d5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -70,6 +70,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
@@ -81,7 +82,8 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
- SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0)
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_WATERMK, 0xfc000000, 0x00000000)
};
static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
@@ -109,7 +111,8 @@ static const struct soc15_reg_golden golden_settings_sdma_4_1[] =
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
- SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0)
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
};
static const struct soc15_reg_golden golden_settings_sdma_4_2[] =
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index db327b412562..1de96995e690 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -6887,7 +6887,6 @@ static int si_dpm_enable(struct amdgpu_device *adev)
si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
si_thermal_start_thermal_controller(adev);
- ni_update_current_ps(adev, boot_ps);
return 0;
}
@@ -7763,7 +7762,7 @@ static int si_dpm_hw_init(void *handle)
else
adev->pm.dpm_enabled = true;
mutex_unlock(&adev->pm.mutex);
-
+ amdgpu_pm_compute_clocks(adev);
return ret;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index fbe878ae1e8c..4ba0003a9d32 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -480,12 +480,20 @@ void pp_rv_set_display_requirement(struct pp_smu *pp,
{
struct dc_context *ctx = pp->ctx;
struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ struct pp_display_clock_request clock = {0};
- if (!pp_funcs || !pp_funcs->display_configuration_changed)
+ if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
return;
- amdgpu_dpm_display_configuration_changed(adev);
+ clock.clock_type = amd_pp_dcf_clock;
+ clock.clock_freq_in_khz = req->hard_min_dcefclk_khz;
+ pp_funcs->display_clock_voltage_request(pp_handle, &clock);
+
+ clock.clock_type = amd_pp_f_clock;
+ clock.clock_freq_in_khz = req->hard_min_fclk_khz;
+ pp_funcs->display_clock_voltage_request(pp_handle, &clock);
}
void pp_rv_set_wm_ranges(struct pp_smu *pp,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 567867915d32..37eaf72ace54 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -754,8 +754,12 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
* fail-safe mode
*/
if (dc_is_hdmi_signal(link->connector_signal) ||
- dc_is_dvi_signal(link->connector_signal))
+ dc_is_dvi_signal(link->connector_signal)) {
+ if (prev_sink != NULL)
+ dc_sink_release(prev_sink);
+
return false;
+ }
default:
break;
}
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 6e3f56684f4e..51ed99a37803 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -170,20 +170,22 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
unsigned int tiling_mode = 0;
unsigned int stride = 0;
- switch (info->drm_format_mod << 10) {
- case PLANE_CTL_TILED_LINEAR:
+ switch (info->drm_format_mod) {
+ case DRM_FORMAT_MOD_LINEAR:
tiling_mode = I915_TILING_NONE;
break;
- case PLANE_CTL_TILED_X:
+ case I915_FORMAT_MOD_X_TILED:
tiling_mode = I915_TILING_X;
stride = info->stride;
break;
- case PLANE_CTL_TILED_Y:
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
tiling_mode = I915_TILING_Y;
stride = info->stride;
break;
default:
- gvt_dbg_core("not supported tiling mode\n");
+ gvt_dbg_core("invalid drm_format_mod %llx for tiling\n",
+ info->drm_format_mod);
}
obj->tiling_and_stride = tiling_mode | stride;
} else {
@@ -222,9 +224,26 @@ static int vgpu_get_plane_info(struct drm_device *dev,
info->height = p.height;
info->stride = p.stride;
info->drm_format = p.drm_format;
- info->drm_format_mod = p.tiled;
+
+ switch (p.tiled) {
+ case PLANE_CTL_TILED_LINEAR:
+ info->drm_format_mod = DRM_FORMAT_MOD_LINEAR;
+ break;
+ case PLANE_CTL_TILED_X:
+ info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
+ break;
+ case PLANE_CTL_TILED_Y:
+ info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
+ break;
+ case PLANE_CTL_TILED_YF:
+ info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
+ break;
+ default:
+ gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
+ }
+
info->size = (((p.stride * p.height * p.bpp) / 8) +
- (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ (PAGE_SIZE - 1)) >> PAGE_SHIFT;
} else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
if (ret)
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index face664be3e8..481896fb712a 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -220,8 +220,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
if (IS_SKYLAKE(dev_priv)
|| IS_KABYLAKE(dev_priv)
|| IS_BROXTON(dev_priv)) {
- plane->tiled = (val & PLANE_CTL_TILED_MASK) >>
- _PLANE_CTL_TILED_SHIFT;
+ plane->tiled = val & PLANE_CTL_TILED_MASK;
fmt = skl_format_to_drm(
val & PLANE_CTL_FORMAT_MASK,
val & PLANE_CTL_ORDER_RGBX,
@@ -260,7 +259,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
return -EINVAL;
}
- plane->stride = intel_vgpu_get_stride(vgpu, pipe, (plane->tiled << 10),
+ plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled,
(IS_SKYLAKE(dev_priv)
|| IS_KABYLAKE(dev_priv)
|| IS_BROXTON(dev_priv)) ?
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h
index cb055f3c81a2..60c155085029 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.h
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h
@@ -101,7 +101,7 @@ struct intel_gvt;
/* color space conversion and gamma correction are not included */
struct intel_vgpu_primary_plane_format {
u8 enabled; /* plane is enabled */
- u8 tiled; /* X-tiled */
+ u32 tiled; /* tiling mode: linear, X-tiled, Y tiled, etc */
u8 bpp; /* bits per pixel */
u32 hw_format; /* format field in the PRI_CTL register */
u32 drm_format; /* format in DRM definition */
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 7a58ca555197..72afa518edd9 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1296,6 +1296,19 @@ static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
return 0;
}
+static int gen9_dbuf_ctl_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ if (vgpu_vreg(vgpu, offset) & DBUF_POWER_REQUEST)
+ vgpu_vreg(vgpu, offset) |= DBUF_POWER_STATE;
+ else
+ vgpu_vreg(vgpu, offset) &= ~DBUF_POWER_STATE;
+
+ return 0;
+}
+
static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
@@ -1525,9 +1538,15 @@ static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu,
u32 v = *(u32 *)p_data;
u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0;
- vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
- vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
- vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
+ switch (offset) {
+ case _PHY_CTL_FAMILY_EDP:
+ vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
+ break;
+ case _PHY_CTL_FAMILY_DDI:
+ vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
+ vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
+ break;
+ }
vgpu_vreg(vgpu, offset) = v;
@@ -2812,6 +2831,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL,
skl_power_well_ctl_write);
+ MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
+
MMIO_D(_MMIO(0xa210), D_SKL_PLUS);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
@@ -2987,8 +3008,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
NULL, gen9_trtte_write);
MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
- MMIO_D(_MMIO(0x45008), D_SKL_PLUS);
-
MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
@@ -3025,7 +3044,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
- NULL, NULL);
+ NULL, NULL);
+ MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
MMIO_D(_MMIO(0x4ab8), D_KBL);
MMIO_D(_MMIO(0x2248), D_KBL | D_SKL);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index a45f46d8537f..c7afee37b2b8 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -32,6 +32,7 @@
#include <linux/device.h>
#include <linux/mm.h>
#include <linux/mmu_context.h>
+#include <linux/sched/mm.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/rbtree.h>
@@ -1792,16 +1793,21 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
info = (struct kvmgt_guest_info *)handle;
kvm = info->kvm;
- if (kthread)
+ if (kthread) {
+ if (!mmget_not_zero(kvm->mm))
+ return -EFAULT;
use_mm(kvm->mm);
+ }
idx = srcu_read_lock(&kvm->srcu);
ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
kvm_read_guest(kvm, gpa, buf, len);
srcu_read_unlock(&kvm->srcu, idx);
- if (kthread)
+ if (kthread) {
unuse_mm(kvm->mm);
+ mmput(kvm->mm);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 42e1e6bdcc2c..e872f4847fbe 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -562,11 +562,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
* performace for batch mmio read/write, so we need
* handle forcewake mannually.
*/
- intel_runtime_pm_get(dev_priv);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
switch_mmio(pre, next, ring_id);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- intel_runtime_pm_put(dev_priv);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index fa75a2eead90..b0d3a43ccd03 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -42,8 +42,6 @@
#define DEVICE_TYPE_EFP3 0x20
#define DEVICE_TYPE_EFP4 0x10
-#define DEV_SIZE 38
-
struct opregion_header {
u8 signature[16];
u32 size;
@@ -63,6 +61,10 @@ struct bdb_data_header {
u16 size; /* data size */
} __packed;
+/* For supporting windows guest with opregion, here hardcode the emulated
+ * bdb header version as '186', and the corresponding child_device_config
+ * length should be '33' but not '38'.
+ */
struct efp_child_device_config {
u16 handle;
u16 device_type;
@@ -109,12 +111,6 @@ struct efp_child_device_config {
u8 mipi_bridge_type; /* 171 */
u16 device_class_ext;
u8 dvo_function;
- u8 dp_usb_type_c:1; /* 195 */
- u8 skip6:7;
- u8 dp_usb_type_c_2x_gpio_index; /* 195 */
- u16 dp_usb_type_c_2x_gpio_pin; /* 195 */
- u8 iboost_dp:4; /* 196 */
- u8 iboost_hdmi:4; /* 196 */
} __packed;
struct vbt {
@@ -155,7 +151,7 @@ static void virt_vbt_generation(struct vbt *v)
v->header.bdb_offset = offsetof(struct vbt, bdb_header);
strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK");
- v->bdb_header.version = 186; /* child_dev_size = 38 */
+ v->bdb_header.version = 186; /* child_dev_size = 33 */
v->bdb_header.header_size = sizeof(v->bdb_header);
v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header)
@@ -169,11 +165,13 @@ static void virt_vbt_generation(struct vbt *v)
/* child device */
num_child = 4; /* each port has one child */
+ v->general_definitions.child_dev_size =
+ sizeof(struct efp_child_device_config);
v->general_definitions_header.id = BDB_GENERAL_DEFINITIONS;
/* size will include child devices */
v->general_definitions_header.size =
- sizeof(struct bdb_general_definitions) + num_child * DEV_SIZE;
- v->general_definitions.child_dev_size = DEV_SIZE;
+ sizeof(struct bdb_general_definitions) +
+ num_child * v->general_definitions.child_dev_size;
/* portA */
v->child0.handle = DEVICE_TYPE_EFP1;
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 09d7bb72b4ff..c32e7d5e8629 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -47,11 +47,15 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
return false;
}
+/* We give 2 seconds higher prio for vGPU during start */
+#define GVT_SCHED_VGPU_PRI_TIME 2
+
struct vgpu_sched_data {
struct list_head lru_list;
struct intel_vgpu *vgpu;
bool active;
-
+ bool pri_sched;
+ ktime_t pri_time;
ktime_t sched_in_time;
ktime_t sched_time;
ktime_t left_ts;
@@ -183,6 +187,14 @@ static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
if (!vgpu_has_pending_workload(vgpu_data->vgpu))
continue;
+ if (vgpu_data->pri_sched) {
+ if (ktime_before(ktime_get(), vgpu_data->pri_time)) {
+ vgpu = vgpu_data->vgpu;
+ break;
+ } else
+ vgpu_data->pri_sched = false;
+ }
+
/* Return the vGPU only if it has time slice left */
if (vgpu_data->left_ts > 0) {
vgpu = vgpu_data->vgpu;
@@ -202,6 +214,7 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct vgpu_sched_data *vgpu_data;
struct intel_vgpu *vgpu = NULL;
+
/* no active vgpu or has already had a target */
if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
goto out;
@@ -209,12 +222,13 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
vgpu = find_busy_vgpu(sched_data);
if (vgpu) {
scheduler->next_vgpu = vgpu;
-
- /* Move the last used vGPU to the tail of lru_list */
vgpu_data = vgpu->sched_data;
- list_del_init(&vgpu_data->lru_list);
- list_add_tail(&vgpu_data->lru_list,
- &sched_data->lru_runq_head);
+ if (!vgpu_data->pri_sched) {
+ /* Move the last used vGPU to the tail of lru_list */
+ list_del_init(&vgpu_data->lru_list);
+ list_add_tail(&vgpu_data->lru_list,
+ &sched_data->lru_runq_head);
+ }
} else {
scheduler->next_vgpu = gvt->idle_vgpu;
}
@@ -328,11 +342,17 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
{
struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
+ ktime_t now;
if (!list_empty(&vgpu_data->lru_list))
return;
- list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head);
+ now = ktime_get();
+ vgpu_data->pri_time = ktime_add(now,
+ ktime_set(GVT_SCHED_VGPU_PRI_TIME, 0));
+ vgpu_data->pri_sched = true;
+
+ list_add(&vgpu_data->lru_list, &sched_data->lru_runq_head);
if (!hrtimer_active(&sched_data->timer))
hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
@@ -426,6 +446,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
&vgpu->gvt->scheduler;
int ring_id;
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
if (!vgpu_data->active)
return;
@@ -444,6 +465,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
scheduler->current_vgpu = NULL;
}
+ intel_runtime_pm_get(dev_priv);
spin_lock_bh(&scheduler->mmio_context_lock);
for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
if (scheduler->engine_owner[ring_id] == vgpu) {
@@ -452,5 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
}
}
spin_unlock_bh(&scheduler->mmio_context_lock);
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&vgpu->gvt->sched_lock);
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 08ec7446282e..9e63cd47b60f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -10422,7 +10422,7 @@ enum skl_power_gate {
_ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
_ICL_DSC0_PICTURE_PARAMETER_SET_4_PC)
#define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB, \
_ICL_DSC1_PICTURE_PARAMETER_SET_4_PC)
#define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16)
#define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0)
@@ -10437,7 +10437,7 @@ enum skl_power_gate {
_ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \
_ICL_DSC0_PICTURE_PARAMETER_SET_5_PC)
#define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB, \
_ICL_DSC1_PICTURE_PARAMETER_SET_5_PC)
#define DSC_SCALE_DEC_INTINT(scale_dec) ((scale_dec) << 16)
#define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0)
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 11d834f94220..98358b4b36de 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -199,7 +199,6 @@ vma_create(struct drm_i915_gem_object *obj,
vma->flags |= I915_VMA_GGTT;
list_add(&vma->obj_link, &obj->vma_list);
} else {
- i915_ppgtt_get(i915_vm_to_ppgtt(vm));
list_add_tail(&vma->obj_link, &obj->vma_list);
}
@@ -807,9 +806,6 @@ static void __i915_vma_destroy(struct i915_vma *vma)
if (vma->obj)
rb_erase(&vma->obj_node, &vma->obj->vma_tree);
- if (!i915_vma_is_ggtt(vma))
- i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
-
rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) {
GEM_BUG_ON(i915_gem_active_isset(&iter->base));
kfree(iter);
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index b725835b47ef..769f3f586661 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -962,9 +962,6 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv)
{
int ret;
- if (INTEL_INFO(dev_priv)->num_pipes == 0)
- return;
-
ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops);
if (ret < 0) {
DRM_ERROR("failed to add audio component (%d)\n", ret);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 8761513f3532..c9af34861d9e 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -2708,7 +2708,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
intel_dp_stop_link_train(intel_dp);
- intel_ddi_enable_pipe_clock(crtc_state);
+ if (!is_mst)
+ intel_ddi_enable_pipe_clock(crtc_state);
}
static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
@@ -2810,14 +2811,14 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
bool is_mst = intel_crtc_has_type(old_crtc_state,
INTEL_OUTPUT_DP_MST);
- intel_ddi_disable_pipe_clock(old_crtc_state);
-
- /*
- * Power down sink before disabling the port, otherwise we end
- * up getting interrupts from the sink on detecting link loss.
- */
- if (!is_mst)
+ if (!is_mst) {
+ intel_ddi_disable_pipe_clock(old_crtc_state);
+ /*
+ * Power down sink before disabling the port, otherwise we end
+ * up getting interrupts from the sink on detecting link loss.
+ */
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+ }
intel_disable_ddi_buf(encoder);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index ed3fa1c8a983..d2951096bca0 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2988,6 +2988,7 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
int w = drm_rect_width(&plane_state->base.src) >> 16;
int h = drm_rect_height(&plane_state->base.src) >> 16;
int dst_x = plane_state->base.dst.x1;
+ int dst_w = drm_rect_width(&plane_state->base.dst);
int pipe_src_w = crtc_state->pipe_src_w;
int max_width = skl_max_plane_width(fb, 0, rotation);
int max_height = 4096;
@@ -3009,10 +3010,10 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
* screen may cause FIFO underflow and display corruption.
*/
if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
- (dst_x + w < 4 || dst_x > pipe_src_w - 4)) {
+ (dst_x + dst_w < 4 || dst_x > pipe_src_w - 4)) {
DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n",
- dst_x + w < 4 ? "end" : "start",
- dst_x + w < 4 ? dst_x + w : dst_x,
+ dst_x + dst_w < 4 ? "end" : "start",
+ dst_x + dst_w < 4 ? dst_x + dst_w : dst_x,
4, pipe_src_w - 4);
return -ERANGE;
}
@@ -5078,10 +5079,14 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
mutex_lock(&dev_priv->pcu_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
mutex_unlock(&dev_priv->pcu_lock);
- /* wait for pcode to finish disabling IPS, which may take up to 42ms */
+ /*
+ * Wait for PCODE to finish disabling IPS. The BSpec specified
+ * 42ms timeout value leads to occasional timeouts so use 100ms
+ * instead.
+ */
if (intel_wait_for_register(dev_priv,
IPS_CTL, IPS_ENABLE, 0,
- 42))
+ 100))
DRM_ERROR("Timed out waiting for IPS disable\n");
} else {
I915_WRITE(IPS_CTL, 0);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index cd0f649b57a5..1193202766a2 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -4160,18 +4160,6 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
}
-/*
- * If display is now connected check links status,
- * there has been known issues of link loss triggering
- * long pulse.
- *
- * Some sinks (eg. ASUS PB287Q) seem to perform some
- * weird HPD ping pong during modesets. So we can apparently
- * end up with HPD going low during a modeset, and then
- * going back up soon after. And once that happens we must
- * retrain the link to get a picture. That's in case no
- * userspace component reacted to intermittent HPD dip.
- */
int intel_dp_retrain_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx)
{
@@ -4661,7 +4649,8 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
}
static int
-intel_dp_long_pulse(struct intel_connector *connector)
+intel_dp_long_pulse(struct intel_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
@@ -4720,6 +4709,22 @@ intel_dp_long_pulse(struct intel_connector *connector)
*/
status = connector_status_disconnected;
goto out;
+ } else {
+ /*
+ * If display is now connected check links status,
+ * there has been known issues of link loss triggering
+ * long pulse.
+ *
+ * Some sinks (eg. ASUS PB287Q) seem to perform some
+ * weird HPD ping pong during modesets. So we can apparently
+ * end up with HPD going low during a modeset, and then
+ * going back up soon after. And once that happens we must
+ * retrain the link to get a picture. That's in case no
+ * userspace component reacted to intermittent HPD dip.
+ */
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
+ intel_dp_retrain_link(encoder, ctx);
}
/*
@@ -4781,7 +4786,7 @@ intel_dp_detect(struct drm_connector *connector,
return ret;
}
- status = intel_dp_long_pulse(intel_dp->attached_connector);
+ status = intel_dp_long_pulse(intel_dp->attached_connector, ctx);
}
intel_dp->detect_done = false;
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 7e3e01607643..4ecd65375603 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -166,6 +166,8 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
+ intel_ddi_disable_pipe_clock(old_crtc_state);
+
/* this can fail */
drm_dp_check_act_status(&intel_dp->mst_mgr);
/* and this can also fail */
@@ -252,6 +254,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
I915_WRITE(DP_TP_STATUS(port), temp);
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
+
+ intel_ddi_enable_pipe_clock(pipe_config);
}
static void intel_mst_enable_dp(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index a9076402dcb0..192972a7d287 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -943,8 +943,12 @@ static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port,
ret = i2c_transfer(adapter, &msg, 1);
if (ret == 1)
- return 0;
- return ret >= 0 ? -EIO : ret;
+ ret = 0;
+ else if (ret >= 0)
+ ret = -EIO;
+
+ kfree(write_buf);
+ return ret;
}
static
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index 5dae16ccd9f1..3e085c5f2b81 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -74,7 +74,7 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon,
DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n",
lspcon_mode_name(mode));
- wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 100);
+ wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 400);
if (current_mode != mode)
DRM_ERROR("LSPCON mode hasn't settled\n");
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index c2f10d899329..443dfaefd7a6 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -181,8 +181,9 @@ struct intel_overlay {
u32 brightness, contrast, saturation;
u32 old_xscale, old_yscale;
/* register access */
- u32 flip_addr;
struct drm_i915_gem_object *reg_bo;
+ struct overlay_registers __iomem *regs;
+ u32 flip_addr;
/* flip handling */
struct i915_gem_active last_flip;
};
@@ -210,29 +211,6 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
PCI_DEVFN(0, 0), I830_CLOCK_GATE, val);
}
-static struct overlay_registers __iomem *
-intel_overlay_map_regs(struct intel_overlay *overlay)
-{
- struct drm_i915_private *dev_priv = overlay->i915;
- struct overlay_registers __iomem *regs;
-
- if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
- regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
- else
- regs = io_mapping_map_wc(&dev_priv->ggtt.iomap,
- overlay->flip_addr,
- PAGE_SIZE);
-
- return regs;
-}
-
-static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
- struct overlay_registers __iomem *regs)
-{
- if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
- io_mapping_unmap(regs);
-}
-
static void intel_overlay_submit_request(struct intel_overlay *overlay,
struct i915_request *rq,
i915_gem_retire_fn retire)
@@ -784,13 +762,13 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
struct drm_i915_gem_object *new_bo,
struct put_image_params *params)
{
- int ret, tmp_width;
- struct overlay_registers __iomem *regs;
- bool scale_changed = false;
+ struct overlay_registers __iomem *regs = overlay->regs;
struct drm_i915_private *dev_priv = overlay->i915;
u32 swidth, swidthsw, sheight, ostride;
enum pipe pipe = overlay->crtc->pipe;
+ bool scale_changed = false;
struct i915_vma *vma;
+ int ret, tmp_width;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
@@ -815,30 +793,19 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (!overlay->active) {
u32 oconfig;
- regs = intel_overlay_map_regs(overlay);
- if (!regs) {
- ret = -ENOMEM;
- goto out_unpin;
- }
+
oconfig = OCONF_CC_OUT_8BIT;
if (IS_GEN4(dev_priv))
oconfig |= OCONF_CSC_MODE_BT709;
oconfig |= pipe == 0 ?
OCONF_PIPE_A : OCONF_PIPE_B;
iowrite32(oconfig, &regs->OCONFIG);
- intel_overlay_unmap_regs(overlay, regs);
ret = intel_overlay_on(overlay);
if (ret != 0)
goto out_unpin;
}
- regs = intel_overlay_map_regs(overlay);
- if (!regs) {
- ret = -ENOMEM;
- goto out_unpin;
- }
-
iowrite32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS);
iowrite32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ);
@@ -882,8 +849,6 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
iowrite32(overlay_cmd_reg(params), &regs->OCMD);
- intel_overlay_unmap_regs(overlay, regs);
-
ret = intel_overlay_continue(overlay, vma, scale_changed);
if (ret)
goto out_unpin;
@@ -901,7 +866,6 @@ out_pin_section:
int intel_overlay_switch_off(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
- struct overlay_registers __iomem *regs;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -918,9 +882,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
if (ret != 0)
return ret;
- regs = intel_overlay_map_regs(overlay);
- iowrite32(0, &regs->OCMD);
- intel_overlay_unmap_regs(overlay, regs);
+ iowrite32(0, &overlay->regs->OCMD);
return intel_overlay_off(overlay);
}
@@ -1305,7 +1267,6 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
struct drm_intel_overlay_attrs *attrs = data;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_overlay *overlay;
- struct overlay_registers __iomem *regs;
int ret;
overlay = dev_priv->overlay;
@@ -1345,15 +1306,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
overlay->contrast = attrs->contrast;
overlay->saturation = attrs->saturation;
- regs = intel_overlay_map_regs(overlay);
- if (!regs) {
- ret = -ENOMEM;
- goto out_unlock;
- }
-
- update_reg_attrs(overlay, regs);
-
- intel_overlay_unmap_regs(overlay, regs);
+ update_reg_attrs(overlay, overlay->regs);
if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
if (IS_GEN2(dev_priv))
@@ -1386,12 +1339,47 @@ out_unlock:
return ret;
}
+static int get_registers(struct intel_overlay *overlay, bool use_phys)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_stolen(overlay->i915, PAGE_SIZE);
+ if (obj == NULL)
+ obj = i915_gem_object_create_internal(overlay->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_put_bo;
+ }
+
+ if (use_phys)
+ overlay->flip_addr = sg_dma_address(obj->mm.pages->sgl);
+ else
+ overlay->flip_addr = i915_ggtt_offset(vma);
+ overlay->regs = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+
+ if (IS_ERR(overlay->regs)) {
+ err = PTR_ERR(overlay->regs);
+ goto err_put_bo;
+ }
+
+ overlay->reg_bo = obj;
+ return 0;
+
+err_put_bo:
+ i915_gem_object_put(obj);
+ return err;
+}
+
void intel_setup_overlay(struct drm_i915_private *dev_priv)
{
struct intel_overlay *overlay;
- struct drm_i915_gem_object *reg_bo;
- struct overlay_registers __iomem *regs;
- struct i915_vma *vma = NULL;
int ret;
if (!HAS_OVERLAY(dev_priv))
@@ -1401,46 +1389,8 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
if (!overlay)
return;
- mutex_lock(&dev_priv->drm.struct_mutex);
- if (WARN_ON(dev_priv->overlay))
- goto out_free;
-
overlay->i915 = dev_priv;
- reg_bo = NULL;
- if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
- reg_bo = i915_gem_object_create_stolen(dev_priv, PAGE_SIZE);
- if (reg_bo == NULL)
- reg_bo = i915_gem_object_create(dev_priv, PAGE_SIZE);
- if (IS_ERR(reg_bo))
- goto out_free;
- overlay->reg_bo = reg_bo;
-
- if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) {
- ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
- if (ret) {
- DRM_ERROR("failed to attach phys overlay regs\n");
- goto out_free_bo;
- }
- overlay->flip_addr = reg_bo->phys_handle->busaddr;
- } else {
- vma = i915_gem_object_ggtt_pin(reg_bo, NULL,
- 0, PAGE_SIZE, PIN_MAPPABLE);
- if (IS_ERR(vma)) {
- DRM_ERROR("failed to pin overlay register bo\n");
- ret = PTR_ERR(vma);
- goto out_free_bo;
- }
- overlay->flip_addr = i915_ggtt_offset(vma);
-
- ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
- if (ret) {
- DRM_ERROR("failed to move overlay register bo into the GTT\n");
- goto out_unpin_bo;
- }
- }
-
- /* init all values */
overlay->color_key = 0x0101fe;
overlay->color_key_enabled = true;
overlay->brightness = -19;
@@ -1449,44 +1399,51 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
init_request_active(&overlay->last_flip, NULL);
- regs = intel_overlay_map_regs(overlay);
- if (!regs)
- goto out_unpin_bo;
+ mutex_lock(&dev_priv->drm.struct_mutex);
+
+ ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
+ if (ret)
+ goto out_free;
+
+ ret = i915_gem_object_set_to_gtt_domain(overlay->reg_bo, true);
+ if (ret)
+ goto out_reg_bo;
- memset_io(regs, 0, sizeof(struct overlay_registers));
- update_polyphase_filter(regs);
- update_reg_attrs(overlay, regs);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_overlay_unmap_regs(overlay, regs);
+ memset_io(overlay->regs, 0, sizeof(struct overlay_registers));
+ update_polyphase_filter(overlay->regs);
+ update_reg_attrs(overlay, overlay->regs);
dev_priv->overlay = overlay;
- mutex_unlock(&dev_priv->drm.struct_mutex);
- DRM_INFO("initialized overlay support\n");
+ DRM_INFO("Initialized overlay support.\n");
return;
-out_unpin_bo:
- if (vma)
- i915_vma_unpin(vma);
-out_free_bo:
- i915_gem_object_put(reg_bo);
+out_reg_bo:
+ i915_gem_object_put(overlay->reg_bo);
out_free:
mutex_unlock(&dev_priv->drm.struct_mutex);
kfree(overlay);
- return;
}
void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
{
- if (!dev_priv->overlay)
+ struct intel_overlay *overlay;
+
+ overlay = fetch_and_zero(&dev_priv->overlay);
+ if (!overlay)
return;
- /* The bo's should be free'd by the generic code already.
+ /*
+ * The bo's should be free'd by the generic code already.
* Furthermore modesetting teardown happens beforehand so the
- * hardware should be off already */
- WARN_ON(dev_priv->overlay->active);
+ * hardware should be off already.
+ */
+ WARN_ON(overlay->active);
+
+ i915_gem_object_put(overlay->reg_bo);
- i915_gem_object_put(dev_priv->overlay->reg_bo);
- kfree(dev_priv->overlay);
+ kfree(overlay);
}
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
@@ -1498,37 +1455,11 @@ struct intel_overlay_error_state {
u32 isr;
};
-static struct overlay_registers __iomem *
-intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
-{
- struct drm_i915_private *dev_priv = overlay->i915;
- struct overlay_registers __iomem *regs;
-
- if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
- /* Cast to make sparse happy, but it's wc memory anyway, so
- * equivalent to the wc io mapping on X86. */
- regs = (struct overlay_registers __iomem *)
- overlay->reg_bo->phys_handle->vaddr;
- else
- regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.iomap,
- overlay->flip_addr);
-
- return regs;
-}
-
-static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
- struct overlay_registers __iomem *regs)
-{
- if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
- io_mapping_unmap_atomic(regs);
-}
-
struct intel_overlay_error_state *
intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
{
struct intel_overlay *overlay = dev_priv->overlay;
struct intel_overlay_error_state *error;
- struct overlay_registers __iomem *regs;
if (!overlay || !overlay->active)
return NULL;
@@ -1541,18 +1472,9 @@ intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
error->isr = I915_READ(ISR);
error->base = overlay->flip_addr;
- regs = intel_overlay_map_regs_atomic(overlay);
- if (!regs)
- goto err;
-
- memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
- intel_overlay_unmap_regs_atomic(overlay, regs);
+ memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs));
return error;
-
-err:
- kfree(error);
- return NULL;
}
void
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 978782a77629..28d191192945 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -132,6 +132,11 @@ static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w,
writel(0x0, comp->regs + DISP_REG_OVL_RST);
}
+static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp)
+{
+ return 4;
+}
+
static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx)
{
unsigned int reg;
@@ -157,6 +162,11 @@ static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx)
static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
{
+ /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX"
+ * is defined in mediatek HW data sheet.
+ * The alphabet order in XXX is no relation to data
+ * arrangement in memory.
+ */
switch (fmt) {
default:
case DRM_FORMAT_RGB565:
@@ -221,6 +231,7 @@ static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = {
.stop = mtk_ovl_stop,
.enable_vblank = mtk_ovl_enable_vblank,
.disable_vblank = mtk_ovl_disable_vblank,
+ .layer_nr = mtk_ovl_layer_nr,
.layer_on = mtk_ovl_layer_on,
.layer_off = mtk_ovl_layer_off,
.layer_config = mtk_ovl_layer_config,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 585943c81e1f..b0a5cffe345a 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -31,14 +31,31 @@
#define RDMA_REG_UPDATE_INT BIT(0)
#define DISP_REG_RDMA_GLOBAL_CON 0x0010
#define RDMA_ENGINE_EN BIT(0)
+#define RDMA_MODE_MEMORY BIT(1)
#define DISP_REG_RDMA_SIZE_CON_0 0x0014
+#define RDMA_MATRIX_ENABLE BIT(17)
+#define RDMA_MATRIX_INT_MTX_SEL GENMASK(23, 20)
+#define RDMA_MATRIX_INT_MTX_BT601_to_RGB (6 << 20)
#define DISP_REG_RDMA_SIZE_CON_1 0x0018
#define DISP_REG_RDMA_TARGET_LINE 0x001c
+#define DISP_RDMA_MEM_CON 0x0024
+#define MEM_MODE_INPUT_FORMAT_RGB565 (0x000 << 4)
+#define MEM_MODE_INPUT_FORMAT_RGB888 (0x001 << 4)
+#define MEM_MODE_INPUT_FORMAT_RGBA8888 (0x002 << 4)
+#define MEM_MODE_INPUT_FORMAT_ARGB8888 (0x003 << 4)
+#define MEM_MODE_INPUT_FORMAT_UYVY (0x004 << 4)
+#define MEM_MODE_INPUT_FORMAT_YUYV (0x005 << 4)
+#define MEM_MODE_INPUT_SWAP BIT(8)
+#define DISP_RDMA_MEM_SRC_PITCH 0x002c
+#define DISP_RDMA_MEM_GMC_SETTING_0 0x0030
#define DISP_REG_RDMA_FIFO_CON 0x0040
#define RDMA_FIFO_UNDERFLOW_EN BIT(31)
#define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16)
#define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16)
#define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size)
+#define DISP_RDMA_MEM_START_ADDR 0x0f00
+
+#define RDMA_MEM_GMC 0x40402020
struct mtk_disp_rdma_data {
unsigned int fifo_size;
@@ -138,12 +155,87 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON);
}
+static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma,
+ unsigned int fmt)
+{
+ /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX"
+ * is defined in mediatek HW data sheet.
+ * The alphabet order in XXX is no relation to data
+ * arrangement in memory.
+ */
+ switch (fmt) {
+ default:
+ case DRM_FORMAT_RGB565:
+ return MEM_MODE_INPUT_FORMAT_RGB565;
+ case DRM_FORMAT_BGR565:
+ return MEM_MODE_INPUT_FORMAT_RGB565 | MEM_MODE_INPUT_SWAP;
+ case DRM_FORMAT_RGB888:
+ return MEM_MODE_INPUT_FORMAT_RGB888;
+ case DRM_FORMAT_BGR888:
+ return MEM_MODE_INPUT_FORMAT_RGB888 | MEM_MODE_INPUT_SWAP;
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_RGBA8888:
+ return MEM_MODE_INPUT_FORMAT_ARGB8888;
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_BGRA8888:
+ return MEM_MODE_INPUT_FORMAT_ARGB8888 | MEM_MODE_INPUT_SWAP;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ return MEM_MODE_INPUT_FORMAT_RGBA8888;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ return MEM_MODE_INPUT_FORMAT_RGBA8888 | MEM_MODE_INPUT_SWAP;
+ case DRM_FORMAT_UYVY:
+ return MEM_MODE_INPUT_FORMAT_UYVY;
+ case DRM_FORMAT_YUYV:
+ return MEM_MODE_INPUT_FORMAT_YUYV;
+ }
+}
+
+static unsigned int mtk_rdma_layer_nr(struct mtk_ddp_comp *comp)
+{
+ return 1;
+}
+
+static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
+ struct mtk_plane_state *state)
+{
+ struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
+ struct mtk_plane_pending_state *pending = &state->pending;
+ unsigned int addr = pending->addr;
+ unsigned int pitch = pending->pitch & 0xffff;
+ unsigned int fmt = pending->format;
+ unsigned int con;
+
+ con = rdma_fmt_convert(rdma, fmt);
+ writel_relaxed(con, comp->regs + DISP_RDMA_MEM_CON);
+
+ if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV) {
+ rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
+ RDMA_MATRIX_ENABLE, RDMA_MATRIX_ENABLE);
+ rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
+ RDMA_MATRIX_INT_MTX_SEL,
+ RDMA_MATRIX_INT_MTX_BT601_to_RGB);
+ } else {
+ rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
+ RDMA_MATRIX_ENABLE, 0);
+ }
+
+ writel_relaxed(addr, comp->regs + DISP_RDMA_MEM_START_ADDR);
+ writel_relaxed(pitch, comp->regs + DISP_RDMA_MEM_SRC_PITCH);
+ writel(RDMA_MEM_GMC, comp->regs + DISP_RDMA_MEM_GMC_SETTING_0);
+ rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON,
+ RDMA_MODE_MEMORY, RDMA_MODE_MEMORY);
+}
+
static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = {
.config = mtk_rdma_config,
.start = mtk_rdma_start,
.stop = mtk_rdma_stop,
.enable_vblank = mtk_rdma_enable_vblank,
.disable_vblank = mtk_rdma_disable_vblank,
+ .layer_nr = mtk_rdma_layer_nr,
+ .layer_config = mtk_rdma_layer_config,
};
static int mtk_disp_rdma_bind(struct device *dev, struct device *master,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 2d6aa150a9ff..0b976dfd04df 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -45,7 +45,8 @@ struct mtk_drm_crtc {
bool pending_needs_vblank;
struct drm_pending_vblank_event *event;
- struct drm_plane planes[OVL_LAYER_NR];
+ struct drm_plane *planes;
+ unsigned int layer_nr;
bool pending_planes;
void __iomem *config_regs;
@@ -171,9 +172,9 @@ static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
- mtk_ddp_comp_enable_vblank(ovl, &mtk_crtc->base);
+ mtk_ddp_comp_enable_vblank(comp, &mtk_crtc->base);
return 0;
}
@@ -181,9 +182,9 @@ static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
- mtk_ddp_comp_disable_vblank(ovl);
+ mtk_ddp_comp_disable_vblank(comp);
}
static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
@@ -286,7 +287,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
}
/* Initially configure all planes */
- for (i = 0; i < OVL_LAYER_NR; i++) {
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
@@ -334,7 +335,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
- struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
unsigned int i;
/*
@@ -343,7 +344,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
* queue update module registers on vblank.
*/
if (state->pending_config) {
- mtk_ddp_comp_config(ovl, state->pending_width,
+ mtk_ddp_comp_config(comp, state->pending_width,
state->pending_height,
state->pending_vrefresh, 0);
@@ -351,14 +352,14 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
}
if (mtk_crtc->pending_planes) {
- for (i = 0; i < OVL_LAYER_NR; i++) {
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
plane_state = to_mtk_plane_state(plane->state);
if (plane_state->pending.config) {
- mtk_ddp_comp_layer_config(ovl, i, plane_state);
+ mtk_ddp_comp_layer_config(comp, i, plane_state);
plane_state->pending.config = false;
}
}
@@ -370,12 +371,12 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
int ret;
DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
- ret = mtk_smi_larb_get(ovl->larb_dev);
+ ret = mtk_smi_larb_get(comp->larb_dev);
if (ret) {
DRM_ERROR("Failed to get larb: %d\n", ret);
return;
@@ -383,7 +384,7 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
ret = mtk_crtc_ddp_hw_init(mtk_crtc);
if (ret) {
- mtk_smi_larb_put(ovl->larb_dev);
+ mtk_smi_larb_put(comp->larb_dev);
return;
}
@@ -395,7 +396,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
int i;
DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
@@ -403,7 +404,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
return;
/* Set all pending plane state to disabled */
- for (i = 0; i < OVL_LAYER_NR; i++) {
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
@@ -418,7 +419,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
drm_crtc_vblank_off(crtc);
mtk_crtc_ddp_hw_fini(mtk_crtc);
- mtk_smi_larb_put(ovl->larb_dev);
+ mtk_smi_larb_put(comp->larb_dev);
mtk_crtc->enabled = false;
}
@@ -450,7 +451,7 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
if (mtk_crtc->event)
mtk_crtc->pending_needs_vblank = true;
- for (i = 0; i < OVL_LAYER_NR; i++) {
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
@@ -516,7 +517,7 @@ err_cleanup_crtc:
return ret;
}
-void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl)
+void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_drm_private *priv = crtc->dev->dev_private;
@@ -598,7 +599,12 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
mtk_crtc->ddp_comp[i] = comp;
}
- for (zpos = 0; zpos < OVL_LAYER_NR; zpos++) {
+ mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]);
+ mtk_crtc->planes = devm_kzalloc(dev, mtk_crtc->layer_nr *
+ sizeof(struct drm_plane),
+ GFP_KERNEL);
+
+ for (zpos = 0; zpos < mtk_crtc->layer_nr; zpos++) {
type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY :
(zpos == 1) ? DRM_PLANE_TYPE_CURSOR :
DRM_PLANE_TYPE_OVERLAY;
@@ -609,7 +615,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
}
ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0],
- &mtk_crtc->planes[1], pipe);
+ mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] :
+ NULL, pipe);
if (ret < 0)
goto unprepare;
drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
index 9d9410c67ae9..091adb2087eb 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
@@ -18,13 +18,12 @@
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_plane.h"
-#define OVL_LAYER_NR 4
#define MTK_LUT_SIZE 512
#define MTK_MAX_BPC 10
#define MTK_MIN_BPC 3
void mtk_drm_crtc_commit(struct drm_crtc *crtc);
-void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl);
+void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp);
int mtk_drm_crtc_create(struct drm_device *drm_dev,
const enum mtk_ddp_comp_id *path,
unsigned int path_len);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
index 87e4191c250e..546b3e3b300b 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
@@ -106,6 +106,8 @@
#define OVL1_MOUT_EN_COLOR1 0x1
#define GAMMA_MOUT_EN_RDMA1 0x1
#define RDMA0_SOUT_DPI0 0x2
+#define RDMA0_SOUT_DPI1 0x3
+#define RDMA0_SOUT_DSI1 0x1
#define RDMA0_SOUT_DSI2 0x4
#define RDMA0_SOUT_DSI3 0x5
#define RDMA1_SOUT_DPI0 0x2
@@ -122,6 +124,8 @@
#define DPI0_SEL_IN_RDMA2 0x3
#define DPI1_SEL_IN_RDMA1 (0x1 << 8)
#define DPI1_SEL_IN_RDMA2 (0x3 << 8)
+#define DSI0_SEL_IN_RDMA1 0x1
+#define DSI0_SEL_IN_RDMA2 0x4
#define DSI1_SEL_IN_RDMA1 0x1
#define DSI1_SEL_IN_RDMA2 0x4
#define DSI2_SEL_IN_RDMA1 (0x1 << 16)
@@ -224,6 +228,12 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
} else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) {
*addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
value = RDMA0_SOUT_DPI0;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DPI1;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DSI1;
} else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) {
*addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
value = RDMA0_SOUT_DSI2;
@@ -282,6 +292,9 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
*addr = DISP_REG_CONFIG_DPI_SEL_IN;
value = DPI1_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI0) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI0_SEL_IN_RDMA1;
} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
*addr = DISP_REG_CONFIG_DSIO_SEL_IN;
value = DSI1_SEL_IN_RDMA1;
@@ -297,8 +310,11 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
} else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
*addr = DISP_REG_CONFIG_DPI_SEL_IN;
value = DPI1_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI0) {
*addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI0_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
value = DSI1_SEL_IN_RDMA2;
} else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
*addr = DISP_REG_CONFIG_DSIE_SEL_IN;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 7413ffeb3c9d..8399229e6ad2 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -78,6 +78,7 @@ struct mtk_ddp_comp_funcs {
void (*stop)(struct mtk_ddp_comp *comp);
void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc);
void (*disable_vblank)(struct mtk_ddp_comp *comp);
+ unsigned int (*layer_nr)(struct mtk_ddp_comp *comp);
void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx);
void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx);
void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx,
@@ -128,6 +129,14 @@ static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp)
comp->funcs->disable_vblank(comp);
}
+static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->layer_nr)
+ return comp->funcs->layer_nr(comp);
+
+ return 0;
+}
+
static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp,
unsigned int idx)
{
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 39721119713b..47ec604289b7 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -381,7 +381,7 @@ static int mtk_drm_bind(struct device *dev)
err_deinit:
mtk_drm_kms_deinit(drm);
err_free:
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -390,7 +390,7 @@ static void mtk_drm_unbind(struct device *dev)
struct mtk_drm_private *private = dev_get_drvdata(dev);
drm_dev_unregister(private->drm);
- drm_dev_unref(private->drm);
+ drm_dev_put(private->drm);
private->drm = NULL;
}
@@ -564,7 +564,7 @@ static int mtk_drm_remove(struct platform_device *pdev)
drm_dev_unregister(drm);
mtk_drm_kms_deinit(drm);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
component_master_del(&pdev->dev, &mtk_drm_ops);
pm_runtime_disable(&pdev->dev);
@@ -580,29 +580,24 @@ static int mtk_drm_sys_suspend(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
struct drm_device *drm = private->drm;
+ int ret;
- drm_kms_helper_poll_disable(drm);
-
- private->suspend_state = drm_atomic_helper_suspend(drm);
- if (IS_ERR(private->suspend_state)) {
- drm_kms_helper_poll_enable(drm);
- return PTR_ERR(private->suspend_state);
- }
-
+ ret = drm_mode_config_helper_suspend(drm);
DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n");
- return 0;
+
+ return ret;
}
static int mtk_drm_sys_resume(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
struct drm_device *drm = private->drm;
+ int ret;
- drm_atomic_helper_resume(drm, private->suspend_state);
- drm_kms_helper_poll_enable(drm);
-
+ ret = drm_mode_config_helper_resume(drm);
DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n");
- return 0;
+
+ return ret;
}
#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 8412119bd940..5691dfa1db6f 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1123,17 +1123,21 @@ nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
int ret;
if (dpcd >= 0x12) {
- ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CTRL, &dpcd);
+ /* Even if we're enabling MST, start with disabling the
+ * branching unit to clear any sink-side MST topology state
+ * that wasn't set by us
+ */
+ ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, 0);
if (ret < 0)
return ret;
- dpcd &= ~DP_MST_EN;
- if (state)
- dpcd |= DP_MST_EN;
-
- ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, dpcd);
- if (ret < 0)
- return ret;
+ if (state) {
+ /* Now, start initializing */
+ ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL,
+ DP_MST_EN);
+ if (ret < 0)
+ return ret;
+ }
}
return nvif_mthd(disp, 0, &args, sizeof(args));
@@ -1142,31 +1146,58 @@ nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
int
nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow)
{
- int ret, state = 0;
+ struct drm_dp_aux *aux;
+ int ret;
+ bool old_state, new_state;
+ u8 mstm_ctrl;
if (!mstm)
return 0;
- if (dpcd[0] >= 0x12) {
- ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CAP, &dpcd[1]);
+ mutex_lock(&mstm->mgr.lock);
+
+ old_state = mstm->mgr.mst_state;
+ new_state = old_state;
+ aux = mstm->mgr.aux;
+
+ if (old_state) {
+ /* Just check that the MST hub is still as we expect it */
+ ret = drm_dp_dpcd_readb(aux, DP_MSTM_CTRL, &mstm_ctrl);
+ if (ret < 0 || !(mstm_ctrl & DP_MST_EN)) {
+ DRM_DEBUG_KMS("Hub gone, disabling MST topology\n");
+ new_state = false;
+ }
+ } else if (dpcd[0] >= 0x12) {
+ ret = drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &dpcd[1]);
if (ret < 0)
- return ret;
+ goto probe_error;
if (!(dpcd[1] & DP_MST_CAP))
dpcd[0] = 0x11;
else
- state = allow;
+ new_state = allow;
+ }
+
+ if (new_state == old_state) {
+ mutex_unlock(&mstm->mgr.lock);
+ return new_state;
}
- ret = nv50_mstm_enable(mstm, dpcd[0], state);
+ ret = nv50_mstm_enable(mstm, dpcd[0], new_state);
if (ret)
- return ret;
+ goto probe_error;
+
+ mutex_unlock(&mstm->mgr.lock);
- ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, state);
+ ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, new_state);
if (ret)
return nv50_mstm_enable(mstm, dpcd[0], 0);
- return mstm->mgr.mst_state;
+ return new_state;
+
+probe_error:
+ mutex_unlock(&mstm->mgr.lock);
+ return ret;
}
static void
@@ -2074,7 +2105,7 @@ nv50_disp_atomic_state_alloc(struct drm_device *dev)
static const struct drm_mode_config_funcs
nv50_disp_func = {
.fb_create = nouveau_user_framebuffer_create,
- .output_poll_changed = drm_fb_helper_output_poll_changed,
+ .output_poll_changed = nouveau_fbcon_output_poll_changed,
.atomic_check = nv50_disp_atomic_check,
.atomic_commit = nv50_disp_atomic_commit,
.atomic_state_alloc = nv50_disp_atomic_state_alloc,
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 51932c72334e..247f72cc4d10 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -409,59 +409,45 @@ static struct nouveau_encoder *
nouveau_connector_ddc_detect(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct nouveau_connector *nv_connector = nouveau_connector(connector);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
- struct nouveau_encoder *nv_encoder = NULL;
+ struct nouveau_encoder *nv_encoder = NULL, *found = NULL;
struct drm_encoder *encoder;
- int i, panel = -ENODEV;
-
- /* eDP panels need powering on by us (if the VBIOS doesn't default it
- * to on) before doing any AUX channel transactions. LVDS panel power
- * is handled by the SOR itself, and not required for LVDS DDC.
- */
- if (nv_connector->type == DCB_CONNECTOR_eDP) {
- panel = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
- if (panel == 0) {
- nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
- msleep(300);
- }
- }
+ int i, ret;
+ bool switcheroo_ddc = false;
drm_connector_for_each_possible_encoder(connector, encoder, i) {
nv_encoder = nouveau_encoder(encoder);
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- int ret = nouveau_dp_detect(nv_encoder);
+ switch (nv_encoder->dcb->type) {
+ case DCB_OUTPUT_DP:
+ ret = nouveau_dp_detect(nv_encoder);
if (ret == NOUVEAU_DP_MST)
return NULL;
- if (ret == NOUVEAU_DP_SST)
- break;
- } else
- if ((vga_switcheroo_handler_flags() &
- VGA_SWITCHEROO_CAN_SWITCH_DDC) &&
- nv_encoder->dcb->type == DCB_OUTPUT_LVDS &&
- nv_encoder->i2c) {
- int ret;
- vga_switcheroo_lock_ddc(dev->pdev);
- ret = nvkm_probe_i2c(nv_encoder->i2c, 0x50);
- vga_switcheroo_unlock_ddc(dev->pdev);
- if (ret)
+ else if (ret == NOUVEAU_DP_SST)
+ found = nv_encoder;
+
+ break;
+ case DCB_OUTPUT_LVDS:
+ switcheroo_ddc = !!(vga_switcheroo_handler_flags() &
+ VGA_SWITCHEROO_CAN_SWITCH_DDC);
+ /* fall-through */
+ default:
+ if (!nv_encoder->i2c)
break;
- } else
- if (nv_encoder->i2c) {
+
+ if (switcheroo_ddc)
+ vga_switcheroo_lock_ddc(dev->pdev);
if (nvkm_probe_i2c(nv_encoder->i2c, 0x50))
- break;
+ found = nv_encoder;
+ if (switcheroo_ddc)
+ vga_switcheroo_unlock_ddc(dev->pdev);
+
+ break;
}
+ if (found)
+ break;
}
- /* eDP panel not detected, restore panel power GPIO to previous
- * state to avoid confusing the SOR for other output types.
- */
- if (!nv_encoder && panel == 0)
- nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
-
- return nv_encoder;
+ return found;
}
static struct nouveau_encoder *
@@ -555,12 +541,16 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
nv_connector->edid = NULL;
}
- /* Outputs are only polled while runtime active, so acquiring a
- * runtime PM ref here is unnecessary (and would deadlock upon
- * runtime suspend because it waits for polling to finish).
+ /* Outputs are only polled while runtime active, so resuming the
+ * device here is unnecessary (and would deadlock upon runtime suspend
+ * because it waits for polling to finish). We do however, want to
+ * prevent the autosuspend timer from elapsing during this operation
+ * if possible.
*/
- if (!drm_kms_helper_is_poll_worker()) {
- ret = pm_runtime_get_sync(connector->dev->dev);
+ if (drm_kms_helper_is_poll_worker()) {
+ pm_runtime_get_noresume(dev->dev);
+ } else {
+ ret = pm_runtime_get_sync(dev->dev);
if (ret < 0 && ret != -EACCES)
return conn_status;
}
@@ -638,10 +628,8 @@ detect_analog:
out:
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
- }
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
return conn_status;
}
@@ -1105,6 +1093,26 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
const struct nvif_notify_conn_rep_v0 *rep = notify->data;
const char *name = connector->name;
struct nouveau_encoder *nv_encoder;
+ int ret;
+
+ ret = pm_runtime_get(drm->dev->dev);
+ if (ret == 0) {
+ /* We can't block here if there's a pending PM request
+ * running, as we'll deadlock nouveau_display_fini() when it
+ * calls nvif_put() on our nvif_notify struct. So, simply
+ * defer the hotplug event until the device finishes resuming
+ */
+ NV_DEBUG(drm, "Deferring HPD on %s until runtime resume\n",
+ name);
+ schedule_work(&drm->hpd_work);
+
+ pm_runtime_put_noidle(drm->dev->dev);
+ return NVIF_NOTIFY_KEEP;
+ } else if (ret != 1 && ret != -EACCES) {
+ NV_WARN(drm, "HPD on %s dropped due to RPM failure: %d\n",
+ name, ret);
+ return NVIF_NOTIFY_DROP;
+ }
if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
NV_DEBUG(drm, "service %s\n", name);
@@ -1122,6 +1130,8 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
drm_helper_hpd_irq_event(connector->dev);
}
+ pm_runtime_mark_last_busy(drm->dev->dev);
+ pm_runtime_put_autosuspend(drm->dev->dev);
return NVIF_NOTIFY_KEEP;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 139368b31916..540c0cbbfcee 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -293,7 +293,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
.fb_create = nouveau_user_framebuffer_create,
- .output_poll_changed = drm_fb_helper_output_poll_changed,
+ .output_poll_changed = nouveau_fbcon_output_poll_changed,
};
@@ -355,8 +355,6 @@ nouveau_display_hpd_work(struct work_struct *work)
pm_runtime_get_sync(drm->dev->dev);
drm_helper_hpd_irq_event(drm->dev);
- /* enable polling for external displays */
- drm_kms_helper_poll_enable(drm->dev);
pm_runtime_mark_last_busy(drm->dev->dev);
pm_runtime_put_sync(drm->dev->dev);
@@ -379,15 +377,29 @@ nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val,
{
struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb);
struct acpi_bus_event *info = data;
+ int ret;
if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) {
if (info->type == ACPI_VIDEO_NOTIFY_PROBE) {
- /*
- * This may be the only indication we receive of a
- * connector hotplug on a runtime suspended GPU,
- * schedule hpd_work to check.
- */
- schedule_work(&drm->hpd_work);
+ ret = pm_runtime_get(drm->dev->dev);
+ if (ret == 1 || ret == -EACCES) {
+ /* If the GPU is already awake, or in a state
+ * where we can't wake it up, it can handle
+ * it's own hotplug events.
+ */
+ pm_runtime_put_autosuspend(drm->dev->dev);
+ } else if (ret == 0) {
+ /* This may be the only indication we receive
+ * of a connector hotplug on a runtime
+ * suspended GPU, schedule hpd_work to check.
+ */
+ NV_DEBUG(drm, "ACPI requested connector reprobe\n");
+ schedule_work(&drm->hpd_work);
+ pm_runtime_put_noidle(drm->dev->dev);
+ } else {
+ NV_WARN(drm, "Dropped ACPI reprobe event due to RPM error: %d\n",
+ ret);
+ }
/* acpi-video should not generate keypresses for this */
return NOTIFY_BAD;
@@ -411,6 +423,11 @@ nouveau_display_init(struct drm_device *dev)
if (ret)
return ret;
+ /* enable connector detection and polling for connectors without HPD
+ * support
+ */
+ drm_kms_helper_poll_enable(dev);
+
/* enable hotplug interrupts */
drm_connector_list_iter_begin(dev, &conn_iter);
nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
@@ -425,7 +442,7 @@ nouveau_display_init(struct drm_device *dev)
}
void
-nouveau_display_fini(struct drm_device *dev, bool suspend)
+nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime)
{
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
@@ -450,6 +467,9 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
}
drm_connector_list_iter_end(&conn_iter);
+ if (!runtime)
+ cancel_work_sync(&drm->hpd_work);
+
drm_kms_helper_poll_disable(dev);
disp->fini(dev);
}
@@ -618,11 +638,11 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime)
}
}
- nouveau_display_fini(dev, true);
+ nouveau_display_fini(dev, true, runtime);
return 0;
}
- nouveau_display_fini(dev, true);
+ nouveau_display_fini(dev, true, runtime);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_framebuffer *nouveau_fb;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 54aa7c3fa42d..ff92b54ce448 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -62,7 +62,7 @@ nouveau_display(struct drm_device *dev)
int nouveau_display_create(struct drm_device *dev);
void nouveau_display_destroy(struct drm_device *dev);
int nouveau_display_init(struct drm_device *dev);
-void nouveau_display_fini(struct drm_device *dev, bool suspend);
+void nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime);
int nouveau_display_suspend(struct drm_device *dev, bool runtime);
void nouveau_display_resume(struct drm_device *dev, bool runtime);
int nouveau_display_vblank_enable(struct drm_device *, unsigned int);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index c7ec86d6c3c9..74d2283f2c28 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -230,7 +230,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
mutex_unlock(&drm->master.lock);
}
if (ret) {
- NV_ERROR(drm, "Client allocation failed: %d\n", ret);
+ NV_PRINTK(err, cli, "Client allocation failed: %d\n", ret);
goto done;
}
@@ -240,37 +240,37 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
}, sizeof(struct nv_device_v0),
&cli->device);
if (ret) {
- NV_ERROR(drm, "Device allocation failed: %d\n", ret);
+ NV_PRINTK(err, cli, "Device allocation failed: %d\n", ret);
goto done;
}
ret = nvif_mclass(&cli->device.object, mmus);
if (ret < 0) {
- NV_ERROR(drm, "No supported MMU class\n");
+ NV_PRINTK(err, cli, "No supported MMU class\n");
goto done;
}
ret = nvif_mmu_init(&cli->device.object, mmus[ret].oclass, &cli->mmu);
if (ret) {
- NV_ERROR(drm, "MMU allocation failed: %d\n", ret);
+ NV_PRINTK(err, cli, "MMU allocation failed: %d\n", ret);
goto done;
}
ret = nvif_mclass(&cli->mmu.object, vmms);
if (ret < 0) {
- NV_ERROR(drm, "No supported VMM class\n");
+ NV_PRINTK(err, cli, "No supported VMM class\n");
goto done;
}
ret = nouveau_vmm_init(cli, vmms[ret].oclass, &cli->vmm);
if (ret) {
- NV_ERROR(drm, "VMM allocation failed: %d\n", ret);
+ NV_PRINTK(err, cli, "VMM allocation failed: %d\n", ret);
goto done;
}
ret = nvif_mclass(&cli->mmu.object, mems);
if (ret < 0) {
- NV_ERROR(drm, "No supported MEM class\n");
+ NV_PRINTK(err, cli, "No supported MEM class\n");
goto done;
}
@@ -592,10 +592,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
pm_runtime_allow(dev->dev);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put(dev->dev);
- } else {
- /* enable polling for external displays */
- drm_kms_helper_poll_enable(dev);
}
+
return 0;
fail_dispinit:
@@ -629,7 +627,7 @@ nouveau_drm_unload(struct drm_device *dev)
nouveau_debugfs_fini(drm);
if (dev->mode_config.num_crtc)
- nouveau_display_fini(dev, false);
+ nouveau_display_fini(dev, false, false);
nouveau_display_destroy(dev);
nouveau_bios_takedown(dev);
@@ -835,7 +833,6 @@ nouveau_pmops_runtime_suspend(struct device *dev)
return -EBUSY;
}
- drm_kms_helper_poll_disable(drm_dev);
nouveau_switcheroo_optimus_dsm();
ret = nouveau_do_suspend(drm_dev, true);
pci_save_state(pdev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 844498c4267c..0f64c0a1d4b3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -466,6 +466,7 @@ nouveau_fbcon_set_suspend_work(struct work_struct *work)
console_unlock();
if (state == FBINFO_STATE_RUNNING) {
+ nouveau_fbcon_hotplug_resume(drm->fbcon);
pm_runtime_mark_last_busy(drm->dev->dev);
pm_runtime_put_sync(drm->dev->dev);
}
@@ -487,6 +488,61 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
schedule_work(&drm->fbcon_work);
}
+void
+nouveau_fbcon_output_poll_changed(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_fbdev *fbcon = drm->fbcon;
+ int ret;
+
+ if (!fbcon)
+ return;
+
+ mutex_lock(&fbcon->hotplug_lock);
+
+ ret = pm_runtime_get(dev->dev);
+ if (ret == 1 || ret == -EACCES) {
+ drm_fb_helper_hotplug_event(&fbcon->helper);
+
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+ } else if (ret == 0) {
+ /* If the GPU was already in the process of suspending before
+ * this event happened, then we can't block here as we'll
+ * deadlock the runtime pmops since they wait for us to
+ * finish. So, just defer this event for when we runtime
+ * resume again. It will be handled by fbcon_work.
+ */
+ NV_DEBUG(drm, "fbcon HPD event deferred until runtime resume\n");
+ fbcon->hotplug_waiting = true;
+ pm_runtime_put_noidle(drm->dev->dev);
+ } else {
+ DRM_WARN("fbcon HPD event lost due to RPM failure: %d\n",
+ ret);
+ }
+
+ mutex_unlock(&fbcon->hotplug_lock);
+}
+
+void
+nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon)
+{
+ struct nouveau_drm *drm;
+
+ if (!fbcon)
+ return;
+ drm = nouveau_drm(fbcon->helper.dev);
+
+ mutex_lock(&fbcon->hotplug_lock);
+ if (fbcon->hotplug_waiting) {
+ fbcon->hotplug_waiting = false;
+
+ NV_DEBUG(drm, "Handling deferred fbcon HPD events\n");
+ drm_fb_helper_hotplug_event(&fbcon->helper);
+ }
+ mutex_unlock(&fbcon->hotplug_lock);
+}
+
int
nouveau_fbcon_init(struct drm_device *dev)
{
@@ -505,6 +561,7 @@ nouveau_fbcon_init(struct drm_device *dev)
drm->fbcon = fbcon;
INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
+ mutex_init(&fbcon->hotplug_lock);
drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index a6f192ea3fa6..db9d52047ef8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -41,6 +41,9 @@ struct nouveau_fbdev {
struct nvif_object gdi;
struct nvif_object blit;
struct nvif_object twod;
+
+ struct mutex hotplug_lock;
+ bool hotplug_waiting;
};
void nouveau_fbcon_restore(void);
@@ -68,6 +71,8 @@ void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
void nouveau_fbcon_accel_save_disable(struct drm_device *dev);
void nouveau_fbcon_accel_restore(struct drm_device *dev);
+void nouveau_fbcon_output_poll_changed(struct drm_device *dev);
+void nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon);
extern int nouveau_nofbaccel;
#endif /* __NV50_FBCON_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 3da5a4305aa4..8f1ce4833230 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -46,12 +46,10 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
pr_err("VGA switcheroo: switched nouveau on\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
nouveau_pmops_resume(&pdev->dev);
- drm_kms_helper_poll_enable(dev);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
pr_err("VGA switcheroo: switched nouveau off\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- drm_kms_helper_poll_disable(dev);
nouveau_switcheroo_optimus_dsm();
nouveau_pmops_suspend(&pdev->dev);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index 32fa94a9773f..cbd33e87b799 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -275,6 +275,7 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
struct nvkm_outp *outp, *outt, *pair;
struct nvkm_conn *conn;
struct nvkm_head *head;
+ struct nvkm_ior *ior;
struct nvbios_connE connE;
struct dcb_output dcbE;
u8 hpd = 0, ver, hdr;
@@ -399,6 +400,19 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
return ret;
}
+ /* Enforce identity-mapped SOR assignment for panels, which have
+ * certain bits (ie. backlight controls) wired to a specific SOR.
+ */
+ list_for_each_entry(outp, &disp->outp, head) {
+ if (outp->conn->info.type == DCB_CONNECTOR_LVDS ||
+ outp->conn->info.type == DCB_CONNECTOR_eDP) {
+ ior = nvkm_ior_find(disp, SOR, ffs(outp->info.or) - 1);
+ if (!WARN_ON(!ior))
+ ior->identity = true;
+ outp->identity = true;
+ }
+ }
+
i = 0;
list_for_each_entry(head, &disp->head, head)
i = max(i, head->id + 1);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
index 7c5bed29ffef..5f301e632599 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
@@ -28,6 +28,7 @@
#include <subdev/bios.h>
#include <subdev/bios/init.h>
+#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <nvif/event.h>
@@ -412,14 +413,10 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
}
static void
-nvkm_dp_release(struct nvkm_outp *outp, struct nvkm_ior *ior)
+nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
{
struct nvkm_dp *dp = nvkm_dp(outp);
- /* Prevent link from being retrained if sink sends an IRQ. */
- atomic_set(&dp->lt.done, 0);
- ior->dp.nr = 0;
-
/* Execute DisableLT script from DP Info Table. */
nvbios_init(&ior->disp->engine.subdev, dp->info.script[4],
init.outp = &dp->outp.info;
@@ -428,6 +425,16 @@ nvkm_dp_release(struct nvkm_outp *outp, struct nvkm_ior *ior)
);
}
+static void
+nvkm_dp_release(struct nvkm_outp *outp)
+{
+ struct nvkm_dp *dp = nvkm_dp(outp);
+
+ /* Prevent link from being retrained if sink sends an IRQ. */
+ atomic_set(&dp->lt.done, 0);
+ dp->outp.ior->dp.nr = 0;
+}
+
static int
nvkm_dp_acquire(struct nvkm_outp *outp)
{
@@ -491,7 +498,7 @@ done:
return ret;
}
-static void
+static bool
nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
{
struct nvkm_i2c_aux *aux = dp->aux;
@@ -505,7 +512,7 @@ nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, dp->dpcd,
sizeof(dp->dpcd)))
- return;
+ return true;
}
if (dp->present) {
@@ -515,6 +522,7 @@ nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
}
atomic_set(&dp->lt.done, 0);
+ return false;
}
static int
@@ -555,9 +563,38 @@ nvkm_dp_fini(struct nvkm_outp *outp)
static void
nvkm_dp_init(struct nvkm_outp *outp)
{
+ struct nvkm_gpio *gpio = outp->disp->engine.subdev.device->gpio;
struct nvkm_dp *dp = nvkm_dp(outp);
+
nvkm_notify_put(&dp->outp.conn->hpd);
- nvkm_dp_enable(dp, true);
+
+ /* eDP panels need powering on by us (if the VBIOS doesn't default it
+ * to on) before doing any AUX channel transactions. LVDS panel power
+ * is handled by the SOR itself, and not required for LVDS DDC.
+ */
+ if (dp->outp.conn->info.type == DCB_CONNECTOR_eDP) {
+ int power = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
+ if (power == 0)
+ nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
+
+ /* We delay here unconditionally, even if already powered,
+ * because some laptop panels having a significant resume
+ * delay before the panel begins responding.
+ *
+ * This is likely a bit of a hack, but no better idea for
+ * handling this at the moment.
+ */
+ msleep(300);
+
+ /* If the eDP panel can't be detected, we need to restore
+ * the panel power GPIO to avoid breaking another output.
+ */
+ if (!nvkm_dp_enable(dp, true) && power == 0)
+ nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 0);
+ } else {
+ nvkm_dp_enable(dp, true);
+ }
+
nvkm_notify_get(&dp->hpd);
}
@@ -576,6 +613,7 @@ nvkm_dp_func = {
.fini = nvkm_dp_fini,
.acquire = nvkm_dp_acquire,
.release = nvkm_dp_release,
+ .disable = nvkm_dp_disable,
};
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index e0b4e0c5704e..19911211a12a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -16,6 +16,7 @@ struct nvkm_ior {
char name[8];
struct list_head head;
+ bool identity;
struct nvkm_ior_state {
struct nvkm_outp *outp;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index f89c7b977aa5..def005dd5fda 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -501,11 +501,11 @@ nv50_disp_super_2_0(struct nv50_disp *disp, struct nvkm_head *head)
nv50_disp_super_ied_off(head, ior, 2);
/* If we're shutting down the OR's only active head, execute
- * the output path's release function.
+ * the output path's disable function.
*/
if (ior->arm.head == (1 << head->id)) {
- if ((outp = ior->arm.outp) && outp->func->release)
- outp->func->release(outp, ior);
+ if ((outp = ior->arm.outp) && outp->func->disable)
+ outp->func->disable(outp, ior);
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
index be9e7f8c3b23..c62030c96fba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
@@ -93,6 +93,8 @@ nvkm_outp_release(struct nvkm_outp *outp, u8 user)
if (ior) {
outp->acquired &= ~user;
if (!outp->acquired) {
+ if (outp->func->release && outp->ior)
+ outp->func->release(outp);
outp->ior->asy.outp = NULL;
outp->ior = NULL;
}
@@ -127,17 +129,26 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user)
if (proto == UNKNOWN)
return -ENOSYS;
+ /* Deal with panels requiring identity-mapped SOR assignment. */
+ if (outp->identity) {
+ ior = nvkm_ior_find(outp->disp, SOR, ffs(outp->info.or) - 1);
+ if (WARN_ON(!ior))
+ return -ENOSPC;
+ return nvkm_outp_acquire_ior(outp, user, ior);
+ }
+
/* First preference is to reuse the OR that is currently armed
* on HW, if any, in order to prevent unnecessary switching.
*/
list_for_each_entry(ior, &outp->disp->ior, head) {
- if (!ior->asy.outp && ior->arm.outp == outp)
+ if (!ior->identity && !ior->asy.outp && ior->arm.outp == outp)
return nvkm_outp_acquire_ior(outp, user, ior);
}
/* Failing that, a completely unused OR is the next best thing. */
list_for_each_entry(ior, &outp->disp->ior, head) {
- if (!ior->asy.outp && ior->type == type && !ior->arm.outp &&
+ if (!ior->identity &&
+ !ior->asy.outp && ior->type == type && !ior->arm.outp &&
(ior->func->route.set || ior->id == __ffs(outp->info.or)))
return nvkm_outp_acquire_ior(outp, user, ior);
}
@@ -146,7 +157,7 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user)
* but will be released during the next modeset.
*/
list_for_each_entry(ior, &outp->disp->ior, head) {
- if (!ior->asy.outp && ior->type == type &&
+ if (!ior->identity && !ior->asy.outp && ior->type == type &&
(ior->func->route.set || ior->id == __ffs(outp->info.or)))
return nvkm_outp_acquire_ior(outp, user, ior);
}
@@ -245,7 +256,6 @@ nvkm_outp_ctor(const struct nvkm_outp_func *func, struct nvkm_disp *disp,
outp->index = index;
outp->info = *dcbE;
outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index);
- outp->or = ffs(outp->info.or) - 1;
OUTP_DBG(outp, "type %02x loc %d or %d link %d con %x "
"edid %x bus %d head %x",
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
index ea84d7d5741a..6c8aa5cfed9d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
@@ -13,10 +13,10 @@ struct nvkm_outp {
struct dcb_output info;
struct nvkm_i2c_bus *i2c;
- int or;
struct list_head head;
struct nvkm_conn *conn;
+ bool identity;
/* Assembly state. */
#define NVKM_OUTP_PRIV 1
@@ -41,7 +41,8 @@ struct nvkm_outp_func {
void (*init)(struct nvkm_outp *);
void (*fini)(struct nvkm_outp *);
int (*acquire)(struct nvkm_outp *);
- void (*release)(struct nvkm_outp *, struct nvkm_ior *);
+ void (*release)(struct nvkm_outp *);
+ void (*disable)(struct nvkm_outp *, struct nvkm_ior *);
};
#define OUTP_MSG(o,l,f,a...) do { \
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
index b80618e35491..17235e940ca9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
@@ -86,10 +86,8 @@ pmu_load(struct nv50_devinit *init, u8 type, bool post,
struct nvkm_bios *bios = subdev->device->bios;
struct nvbios_pmuR pmu;
- if (!nvbios_pmuRm(bios, type, &pmu)) {
- nvkm_error(subdev, "VBIOS PMU fuc %02x not found\n", type);
+ if (!nvbios_pmuRm(bios, type, &pmu))
return -EINVAL;
- }
if (!post)
return 0;
@@ -124,29 +122,30 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
return -EINVAL;
}
+ /* Upload DEVINIT application from VBIOS onto PMU. */
ret = pmu_load(init, 0x04, post, &exec, &args);
- if (ret)
+ if (ret) {
+ nvkm_error(subdev, "VBIOS PMU/DEVINIT not found\n");
return ret;
+ }
- /* upload first chunk of init data */
+ /* Upload tables required by opcodes in boot scripts. */
if (post) {
- // devinit tables
u32 pmu = pmu_args(init, args + 0x08, 0x08);
u32 img = nvbios_rd16(bios, bit_I.offset + 0x14);
u32 len = nvbios_rd16(bios, bit_I.offset + 0x16);
pmu_data(init, pmu, img, len);
}
- /* upload second chunk of init data */
+ /* Upload boot scripts. */
if (post) {
- // devinit boot scripts
u32 pmu = pmu_args(init, args + 0x08, 0x10);
u32 img = nvbios_rd16(bios, bit_I.offset + 0x18);
u32 len = nvbios_rd16(bios, bit_I.offset + 0x1a);
pmu_data(init, pmu, img, len);
}
- /* execute init tables */
+ /* Execute DEVINIT. */
if (post) {
nvkm_wr32(device, 0x10a040, 0x00005000);
pmu_exec(init, exec);
@@ -157,8 +156,11 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
return -ETIMEDOUT;
}
- /* load and execute some other ucode image (bios therm?) */
- return pmu_load(init, 0x01, post, NULL, NULL);
+ /* Optional: Execute PRE_OS application on PMU, which should at
+ * least take care of fans until a full PMU has been loaded.
+ */
+ pmu_load(init, 0x01, post, NULL, NULL);
+ return 0;
}
static const struct nvkm_devinit_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index de269eb482dd..7459def78d50 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -1423,7 +1423,7 @@ nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
void
nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{
- if (vmm->func->part && inst) {
+ if (inst && vmm->func->part) {
mutex_lock(&vmm->mutex);
vmm->func->part(vmm, inst);
mutex_unlock(&vmm->mutex);