diff options
Diffstat (limited to 'drivers')
148 files changed, 1602 insertions, 1575 deletions
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c index 393817e849ed..dec3a815455d 100644 --- a/drivers/dma-buf/reservation.c +++ b/drivers/dma-buf/reservation.c @@ -195,8 +195,7 @@ done: if (old) kfree_rcu(old, rcu); - if (old_fence) - dma_fence_put(old_fence); + dma_fence_put(old_fence); } /** @@ -258,12 +257,71 @@ void reservation_object_add_excl_fence(struct reservation_object *obj, dma_fence_put(rcu_dereference_protected(old->shared[i], reservation_object_held(obj))); - if (old_fence) - dma_fence_put(old_fence); + dma_fence_put(old_fence); } EXPORT_SYMBOL(reservation_object_add_excl_fence); /** +* reservation_object_copy_fences - Copy all fences from src to dst. +* @dst: the destination reservation object +* @src: the source reservation object +* +* Copy all fences from src to dst. Both src->lock as well as dst-lock must be +* held. +*/ +int reservation_object_copy_fences(struct reservation_object *dst, + struct reservation_object *src) +{ + struct reservation_object_list *src_list, *dst_list; + struct dma_fence *old, *new; + size_t size; + unsigned i; + + src_list = reservation_object_get_list(src); + + if (src_list) { + size = offsetof(typeof(*src_list), + shared[src_list->shared_count]); + dst_list = kmalloc(size, GFP_KERNEL); + if (!dst_list) + return -ENOMEM; + + dst_list->shared_count = src_list->shared_count; + dst_list->shared_max = src_list->shared_count; + for (i = 0; i < src_list->shared_count; ++i) + dst_list->shared[i] = + dma_fence_get(src_list->shared[i]); + } else { + dst_list = NULL; + } + + kfree(dst->staged); + dst->staged = NULL; + + src_list = reservation_object_get_list(dst); + + old = reservation_object_get_excl(dst); + new = reservation_object_get_excl(src); + + dma_fence_get(new); + + preempt_disable(); + write_seqcount_begin(&dst->seq); + /* write_seqcount_begin provides the necessary memory barrier */ + RCU_INIT_POINTER(dst->fence_excl, new); + RCU_INIT_POINTER(dst->fence, dst_list); + write_seqcount_end(&dst->seq); + preempt_enable(); + + if (src_list) + kfree_rcu(src_list, rcu); + dma_fence_put(old); + + return 0; +} +EXPORT_SYMBOL(reservation_object_copy_fences); + +/** * reservation_object_get_fences_rcu - Get an object's shared and exclusive * fences without update side lock held * @obj: the reservation object @@ -373,12 +431,25 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, long ret = timeout ? timeout : 1; retry: - fence = NULL; shared_count = 0; seq = read_seqcount_begin(&obj->seq); rcu_read_lock(); - if (wait_all) { + fence = rcu_dereference(obj->fence_excl); + if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { + if (!dma_fence_get_rcu(fence)) + goto unlock_retry; + + if (dma_fence_is_signaled(fence)) { + dma_fence_put(fence); + fence = NULL; + } + + } else { + fence = NULL; + } + + if (!fence && wait_all) { struct reservation_object_list *fobj = rcu_dereference(obj->fence); @@ -405,22 +476,6 @@ retry: } } - if (!shared_count) { - struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl); - - if (fence_excl && - !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, - &fence_excl->flags)) { - if (!dma_fence_get_rcu(fence_excl)) - goto unlock_retry; - - if (dma_fence_is_signaled(fence_excl)) - dma_fence_put(fence_excl); - else - fence = fence_excl; - } - } - rcu_read_unlock(); if (fence) { if (read_seqcount_retry(&obj->seq, seq)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 51d1364cf185..a5427cf4b19d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -373,78 +373,10 @@ struct amdgpu_clock { }; /* - * BO. + * GEM. */ -struct amdgpu_bo_list_entry { - struct amdgpu_bo *robj; - struct ttm_validate_buffer tv; - struct amdgpu_bo_va *bo_va; - uint32_t priority; - struct page **user_pages; - int user_invalidated; -}; - -struct amdgpu_bo_va_mapping { - struct list_head list; - struct rb_node rb; - uint64_t start; - uint64_t last; - uint64_t __subtree_last; - uint64_t offset; - uint64_t flags; -}; - -/* bo virtual addresses in a specific vm */ -struct amdgpu_bo_va { - /* protected by bo being reserved */ - struct list_head bo_list; - struct dma_fence *last_pt_update; - unsigned ref_count; - - /* protected by vm mutex and spinlock */ - struct list_head vm_status; - - /* mappings for this bo_va */ - struct list_head invalids; - struct list_head valids; - - /* constant after initialization */ - struct amdgpu_vm *vm; - struct amdgpu_bo *bo; -}; #define AMDGPU_GEM_DOMAIN_MAX 0x3 - -struct amdgpu_bo { - /* Protected by tbo.reserved */ - u32 prefered_domains; - u32 allowed_domains; - struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; - struct ttm_placement placement; - struct ttm_buffer_object tbo; - struct ttm_bo_kmap_obj kmap; - u64 flags; - unsigned pin_count; - void *kptr; - u64 tiling_flags; - u64 metadata_flags; - void *metadata; - u32 metadata_size; - unsigned prime_shared_count; - /* list of all virtual address to which this bo - * is associated to - */ - struct list_head va; - /* Constant after initialization */ - struct drm_gem_object gem_base; - struct amdgpu_bo *parent; - struct amdgpu_bo *shadow; - - struct ttm_bo_kmap_obj dma_buf_vmap; - struct amdgpu_mn *mn; - struct list_head mn_list; - struct list_head shadow_list; -}; #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base) void amdgpu_gem_object_free(struct drm_gem_object *obj); @@ -678,15 +610,15 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT /* overlap the doorbell assignment with VCN as they are mutually exclusive * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD */ - AMDGPU_DOORBELL64_RING0_1 = 0xF8, - AMDGPU_DOORBELL64_RING2_3 = 0xF9, - AMDGPU_DOORBELL64_RING4_5 = 0xFA, - AMDGPU_DOORBELL64_RING6_7 = 0xFB, + AMDGPU_DOORBELL64_UVD_RING0_1 = 0xF8, + AMDGPU_DOORBELL64_UVD_RING2_3 = 0xF9, + AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFA, + AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFB, - AMDGPU_DOORBELL64_UVD_RING0_1 = 0xFC, - AMDGPU_DOORBELL64_UVD_RING2_3 = 0xFD, - AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFE, - AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFF, + AMDGPU_DOORBELL64_VCE_RING0_1 = 0xFC, + AMDGPU_DOORBELL64_VCE_RING2_3 = 0xFD, + AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE, + AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF, AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF, AMDGPU_DOORBELL64_INVALID = 0xFFFF @@ -825,6 +757,14 @@ struct amdgpu_fpriv { /* * residency list */ +struct amdgpu_bo_list_entry { + struct amdgpu_bo *robj; + struct ttm_validate_buffer tv; + struct amdgpu_bo_va *bo_va; + uint32_t priority; + struct page **user_pages; + int user_invalidated; +}; struct amdgpu_bo_list { struct mutex lock; @@ -1191,10 +1131,6 @@ struct amdgpu_wb { int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb); void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb); -int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb); -int amdgpu_wb_get_256Bit(struct amdgpu_device *adev, u32 *wb); -void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb); -void amdgpu_wb_free_256bit(struct amdgpu_device *adev, u32 wb); void amdgpu_get_pcie_info(struct amdgpu_device *adev); @@ -1488,7 +1424,7 @@ struct amdgpu_device { bool is_atom_fw; uint8_t *bios; uint32_t bios_size; - struct amdgpu_bo *stollen_vga_memory; + struct amdgpu_bo *stolen_vga_memory; uint32_t bios_scratch_reg_offset; uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index 06879d1dcabd..a52795d9b458 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -285,19 +285,20 @@ static int acp_hw_init(void *handle) return 0; else if (r) return r; + if (adev->asic_type != CHIP_STONEY) { + adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL); + if (adev->acp.acp_genpd == NULL) + return -ENOMEM; - adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL); - if (adev->acp.acp_genpd == NULL) - return -ENOMEM; - - adev->acp.acp_genpd->gpd.name = "ACP_AUDIO"; - adev->acp.acp_genpd->gpd.power_off = acp_poweroff; - adev->acp.acp_genpd->gpd.power_on = acp_poweron; + adev->acp.acp_genpd->gpd.name = "ACP_AUDIO"; + adev->acp.acp_genpd->gpd.power_off = acp_poweroff; + adev->acp.acp_genpd->gpd.power_on = acp_poweron; - adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device; + adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device; - pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false); + pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false); + } adev->acp.acp_cell = kzalloc(sizeof(struct mfd_cell) * ACP_DEVS, GFP_KERNEL); @@ -319,14 +320,29 @@ static int acp_hw_init(void *handle) return -ENOMEM; } - i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET; + switch (adev->asic_type) { + case CHIP_STONEY: + i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | + DW_I2S_QUIRK_16BIT_IDX_OVERRIDE; + break; + default: + i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET; + } i2s_pdata[0].cap = DWC_I2S_PLAY; i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000; i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET; i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET; + switch (adev->asic_type) { + case CHIP_STONEY: + i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | + DW_I2S_QUIRK_COMP_PARAM1 | + DW_I2S_QUIRK_16BIT_IDX_OVERRIDE; + break; + default: + i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | + DW_I2S_QUIRK_COMP_PARAM1; + } - i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | - DW_I2S_QUIRK_COMP_PARAM1; i2s_pdata[1].cap = DWC_I2S_RECORD; i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000; i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET; @@ -373,12 +389,14 @@ static int acp_hw_init(void *handle) if (r) return r; - for (i = 0; i < ACP_DEVS ; i++) { - dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); - r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev); - if (r) { - dev_err(dev, "Failed to add dev to genpd\n"); - return r; + if (adev->asic_type != CHIP_STONEY) { + for (i = 0; i < ACP_DEVS ; i++) { + dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); + r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev); + if (r) { + dev_err(dev, "Failed to add dev to genpd\n"); + return r; + } } } @@ -398,20 +416,22 @@ static int acp_hw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* return early if no ACP */ - if (!adev->acp.acp_genpd) + if (!adev->acp.acp_cell) return 0; - for (i = 0; i < ACP_DEVS ; i++) { - dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); - ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev); - /* If removal fails, dont giveup and try rest */ - if (ret) - dev_err(dev, "remove dev from genpd failed\n"); + if (adev->acp.acp_genpd) { + for (i = 0; i < ACP_DEVS ; i++) { + dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); + ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev); + /* If removal fails, dont giveup and try rest */ + if (ret) + dev_err(dev, "remove dev from genpd failed\n"); + } + kfree(adev->acp.acp_genpd); } mfd_remove_devices(adev->acp.parent); kfree(adev->acp.acp_res); - kfree(adev->acp.acp_genpd); kfree(adev->acp.acp_cell); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index ef79551b4cb7..57afad79f55d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -30,10 +30,10 @@ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> #include "amdgpu.h" +#include "amdgpu_pm.h" #include "amd_acpi.h" #include "atom.h" -extern void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev); /* Call the ATIF method */ /** @@ -289,7 +289,7 @@ out: * handles it. * Returns NOTIFY code */ -int amdgpu_atif_handler(struct amdgpu_device *adev, +static int amdgpu_atif_handler(struct amdgpu_device *adev, struct acpi_bus_event *event) { struct amdgpu_atif *atif = &adev->atif; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 37971d9402e3..c7bcf5207d79 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -27,7 +27,6 @@ #include "amdgpu_gfx.h" #include <linux/module.h> -const struct kfd2kgd_calls *kfd2kgd; const struct kgd2kfd_calls *kgd2kfd; bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**); @@ -61,8 +60,21 @@ int amdgpu_amdkfd_init(void) return ret; } -bool amdgpu_amdkfd_load_interface(struct amdgpu_device *adev) +void amdgpu_amdkfd_fini(void) +{ + if (kgd2kfd) { + kgd2kfd->exit(); + symbol_put(kgd2kfd_init); + } +} + +void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) { + const struct kfd2kgd_calls *kfd2kgd; + + if (!kgd2kfd) + return; + switch (adev->asic_type) { #ifdef CONFIG_DRM_AMDGPU_CIK case CHIP_KAVERI: @@ -73,25 +85,12 @@ bool amdgpu_amdkfd_load_interface(struct amdgpu_device *adev) kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions(); break; default: - return false; + dev_info(adev->dev, "kfd not supported on this ASIC\n"); + return; } - return true; -} - -void amdgpu_amdkfd_fini(void) -{ - if (kgd2kfd) { - kgd2kfd->exit(); - symbol_put(kgd2kfd_init); - } -} - -void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) -{ - if (kgd2kfd) - adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev, - adev->pdev, kfd2kgd); + adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev, + adev->pdev, kfd2kgd); } void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) @@ -184,7 +183,8 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, return -ENOMEM; r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, - AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo); + AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, 0, + &(*mem)->bo); if (r) { dev_err(adev->dev, "failed to allocate BO for amdkfd (%d)\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 73f83a10ae14..b8802a561cbd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -39,8 +39,6 @@ struct kgd_mem { int amdgpu_amdkfd_init(void); void amdgpu_amdkfd_fini(void); -bool amdgpu_amdkfd_load_interface(struct amdgpu_device *adev); - void amdgpu_amdkfd_suspend(struct amdgpu_device *adev); int amdgpu_amdkfd_resume(struct amdgpu_device *adev); void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c index 2fb299afc12b..63ec1e1bb6aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c @@ -81,7 +81,7 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, n = AMDGPU_BENCHMARK_ITERATIONS; r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, - NULL, &sobj); + NULL, 0, &sobj); if (r) { goto out_cleanup; } @@ -94,7 +94,7 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, goto out_cleanup; } r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, - NULL, &dobj); + NULL, 0, &dobj); if (r) { goto out_cleanup; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index d324e1c24028..59089e027f4d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -136,7 +136,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, } bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); if (usermm) { @@ -156,11 +156,11 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, entry->tv.bo = &entry->robj->tbo; entry->tv.shared = !entry->robj->prime_shared_count; - if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS) + if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS) gds_obj = entry->robj; - if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GWS) + if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS) gws_obj = entry->robj; - if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA) + if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA) oa_obj = entry->robj; total_size += amdgpu_bo_size(entry->robj); @@ -270,7 +270,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, struct amdgpu_fpriv *fpriv = filp->driver_priv; union drm_amdgpu_bo_list *args = data; uint32_t handle = args->in.list_handle; - const void __user *uptr = (const void*)(uintptr_t)args->in.bo_info_ptr; + const void __user *uptr = u64_to_user_ptr(args->in.bo_info_ptr); struct drm_amdgpu_bo_list_entry *info; struct amdgpu_bo_list *list; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index a99e0bca6812..fd435a96481c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -124,7 +124,7 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device, ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE, true, domain, flags, NULL, &placement, NULL, - &obj); + 0, &obj); if (ret) { DRM_ERROR("(%d) bo create failed\n", ret); return ret; @@ -166,7 +166,7 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h r = amdgpu_bo_reserve(obj, true); if (unlikely(r != 0)) return r; - r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains, + r = amdgpu_bo_pin_restricted(obj, obj->preferred_domains, min_offset, max_offset, mcaddr); amdgpu_bo_unreserve(obj); return r; @@ -659,7 +659,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, info->version = (uint16_t)le32_to_cpu(header->header.ucode_version); if (CGS_UCODE_ID_CP_MEC == type) - info->image_size = (header->jt_offset) << 2; + info->image_size = le32_to_cpu(header->jt_offset) << 2; info->fw_version = amdgpu_get_firmware_version(cgs_device, type); info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 33789510e663..c05479ec825a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -54,7 +54,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, *offset = data->offset; - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) { amdgpu_bo_unref(&p->uf_entry.robj); @@ -90,7 +90,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) } /* get chunks */ - chunk_array_user = (uint64_t __user *)(uintptr_t)(cs->in.chunks); + chunk_array_user = u64_to_user_ptr(cs->in.chunks); if (copy_from_user(chunk_array, chunk_array_user, sizeof(uint64_t)*cs->in.num_chunks)) { ret = -EFAULT; @@ -110,7 +110,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) struct drm_amdgpu_cs_chunk user_chunk; uint32_t __user *cdata; - chunk_ptr = (void __user *)(uintptr_t)chunk_array[i]; + chunk_ptr = u64_to_user_ptr(chunk_array[i]); if (copy_from_user(&user_chunk, chunk_ptr, sizeof(struct drm_amdgpu_cs_chunk))) { ret = -EFAULT; @@ -121,7 +121,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) p->chunks[i].length_dw = user_chunk.length_dw; size = p->chunks[i].length_dw; - cdata = (void __user *)(uintptr_t)user_chunk.chunk_data; + cdata = u64_to_user_ptr(user_chunk.chunk_data); p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); if (p->chunks[i].kdata == NULL) { @@ -348,11 +348,11 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, * that. */ if (p->bytes_moved_vis < p->bytes_moved_vis_threshold) - domain = bo->prefered_domains; + domain = bo->preferred_domains; else domain = bo->allowed_domains; } else { - domain = bo->prefered_domains; + domain = bo->preferred_domains; } } else { domain = bo->allowed_domains; @@ -1437,7 +1437,7 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, if (fences == NULL) return -ENOMEM; - fences_user = (void __user *)(uintptr_t)(wait->in.fences); + fences_user = u64_to_user_ptr(wait->in.fences); if (copy_from_user(fences, fences_user, sizeof(struct drm_amdgpu_fence) * fence_count)) { r = -EFAULT; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 6279956e92a4..a6f6cb0f2e02 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -336,51 +336,16 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev, static int amdgpu_vram_scratch_init(struct amdgpu_device *adev) { - int r; - - if (adev->vram_scratch.robj == NULL) { - r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE, - PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, &adev->vram_scratch.robj); - if (r) { - return r; - } - } - - r = amdgpu_bo_reserve(adev->vram_scratch.robj, false); - if (unlikely(r != 0)) - return r; - r = amdgpu_bo_pin(adev->vram_scratch.robj, - AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr); - if (r) { - amdgpu_bo_unreserve(adev->vram_scratch.robj); - return r; - } - r = amdgpu_bo_kmap(adev->vram_scratch.robj, - (void **)&adev->vram_scratch.ptr); - if (r) - amdgpu_bo_unpin(adev->vram_scratch.robj); - amdgpu_bo_unreserve(adev->vram_scratch.robj); - - return r; + return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, + &adev->vram_scratch.robj, + &adev->vram_scratch.gpu_addr, + (void **)&adev->vram_scratch.ptr); } static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev) { - int r; - - if (adev->vram_scratch.robj == NULL) { - return; - } - r = amdgpu_bo_reserve(adev->vram_scratch.robj, true); - if (likely(r == 0)) { - amdgpu_bo_kunmap(adev->vram_scratch.robj); - amdgpu_bo_unpin(adev->vram_scratch.robj); - amdgpu_bo_unreserve(adev->vram_scratch.robj); - } - amdgpu_bo_unref(&adev->vram_scratch.robj); + amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL); } /** @@ -539,7 +504,8 @@ static int amdgpu_wb_init(struct amdgpu_device *adev) int r; if (adev->wb.wb_obj == NULL) { - r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t), + /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */ + r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, &adev->wb.wb_obj, &adev->wb.gpu_addr, (void **)&adev->wb.wb); @@ -570,47 +536,10 @@ static int amdgpu_wb_init(struct amdgpu_device *adev) int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb) { unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); - if (offset < adev->wb.num_wb) { - __set_bit(offset, adev->wb.used); - *wb = offset; - return 0; - } else { - return -EINVAL; - } -} -/** - * amdgpu_wb_get_64bit - Allocate a wb entry - * - * @adev: amdgpu_device pointer - * @wb: wb index - * - * Allocate a wb slot for use by the driver (all asics). - * Returns 0 on success or -EINVAL on failure. - */ -int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb) -{ - unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used, - adev->wb.num_wb, 0, 2, 7, 0); - if ((offset + 1) < adev->wb.num_wb) { + if (offset < adev->wb.num_wb) { __set_bit(offset, adev->wb.used); - __set_bit(offset + 1, adev->wb.used); - *wb = offset; - return 0; - } else { - return -EINVAL; - } -} - -int amdgpu_wb_get_256Bit(struct amdgpu_device *adev, u32 *wb) -{ - int i = 0; - unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used, - adev->wb.num_wb, 0, 8, 63, 0); - if ((offset + 7) < adev->wb.num_wb) { - for (i = 0; i < 8; i++) - __set_bit(offset + i, adev->wb.used); - *wb = offset; + *wb = offset * 8; /* convert to dw offset */ return 0; } else { return -EINVAL; @@ -632,39 +561,6 @@ void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb) } /** - * amdgpu_wb_free_64bit - Free a wb entry - * - * @adev: amdgpu_device pointer - * @wb: wb index - * - * Free a wb slot allocated for use by the driver (all asics) - */ -void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb) -{ - if ((wb + 1) < adev->wb.num_wb) { - __clear_bit(wb, adev->wb.used); - __clear_bit(wb + 1, adev->wb.used); - } -} - -/** - * amdgpu_wb_free_256bit - Free a wb entry - * - * @adev: amdgpu_device pointer - * @wb: wb index - * - * Free a wb slot allocated for use by the driver (all asics) - */ -void amdgpu_wb_free_256bit(struct amdgpu_device *adev, u32 wb) -{ - int i = 0; - - if ((wb + 7) < adev->wb.num_wb) - for (i = 0; i < 8; i++) - __clear_bit(wb + i, adev->wb.used); -} - -/** * amdgpu_vram_location - try to find VRAM location * @adev: amdgpu device structure holding all necessary informations * @mc: memory controller structure holding memory informations @@ -1948,7 +1844,8 @@ static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev) AMD_IP_BLOCK_TYPE_DCE, AMD_IP_BLOCK_TYPE_GFX, AMD_IP_BLOCK_TYPE_SDMA, - AMD_IP_BLOCK_TYPE_VCE, + AMD_IP_BLOCK_TYPE_UVD, + AMD_IP_BLOCK_TYPE_VCE }; for (i = 0; i < ARRAY_SIZE(ip_order); i++) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index cdf2ab20166a..6ad243293a78 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -482,7 +482,7 @@ static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb); - drm_gem_object_unreference_unlocked(amdgpu_fb->obj); + drm_gem_object_put_unlocked(amdgpu_fb->obj); drm_framebuffer_cleanup(fb); kfree(amdgpu_fb); } @@ -542,14 +542,14 @@ amdgpu_user_framebuffer_create(struct drm_device *dev, amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL); if (amdgpu_fb == NULL) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ERR_PTR(-ENOMEM); } ret = amdgpu_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj); if (ret) { kfree(amdgpu_fb); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 0a8ee2411180..9afa9c097e1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -118,7 +118,7 @@ static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj) amdgpu_bo_unpin(abo); amdgpu_bo_unreserve(abo); } - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); } static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, @@ -250,7 +250,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, tmp = amdgpu_bo_gpu_offset(abo) - adev->mc.vram_start; info->fix.smem_start = adev->mc.aper_base + tmp; info->fix.smem_len = amdgpu_bo_size(abo); - info->screen_base = abo->kptr; + info->screen_base = amdgpu_bo_kptr(abo); info->screen_size = amdgpu_bo_size(abo); drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); @@ -280,7 +280,7 @@ out: } if (fb && ret) { - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); drm_framebuffer_unregister_private(fb); drm_framebuffer_cleanup(fb); kfree(fb); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 5cc4987cd887..94c1e2e8e34c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -144,7 +144,7 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev) PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, &adev->gart.robj); + NULL, NULL, 0, &adev->gart.robj); if (r) { return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 917ac5e074a0..81127ffcefb2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -59,7 +59,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, retry: r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, - flags, NULL, NULL, &robj); + flags, NULL, NULL, 0, &robj); if (r) { if (r != -ERESTARTSYS) { if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { @@ -91,7 +91,7 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev) spin_lock(&file->table_lock); idr_for_each_entry(&file->object_idr, gobj, handle) { WARN_ONCE(1, "And also active allocations!\n"); - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); } idr_destroy(&file->object_idr); spin_unlock(&file->table_lock); @@ -263,7 +263,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, r = drm_gem_handle_create(filp, gobj, &handle); /* drop reference from allocate - handle holds it now */ - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); if (r) return r; @@ -306,7 +306,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, return r; bo = gem_to_amdgpu_bo(gobj); - bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT; + bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); if (r) @@ -341,7 +341,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, r = drm_gem_handle_create(filp, gobj, &handle); /* drop reference from allocate - handle holds it now */ - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); if (r) return r; @@ -355,7 +355,7 @@ unlock_mmap_sem: up_read(¤t->mm->mmap_sem); release_object: - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return r; } @@ -374,11 +374,11 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp, robj = gem_to_amdgpu_bo(gobj); if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) || (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return -EPERM; } *offset_p = amdgpu_bo_mmap_offset(robj); - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return 0; } @@ -448,7 +448,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, } else r = ret; - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return r; } @@ -491,7 +491,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, unreserve: amdgpu_bo_unreserve(robj); out: - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return r; } @@ -664,7 +664,7 @@ error_backoff: ttm_eu_backoff_reservation(&ticket, &list); error_unref: - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return r; } @@ -689,11 +689,11 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, switch (args->op) { case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: { struct drm_amdgpu_gem_create_in info; - void __user *out = (void __user *)(uintptr_t)args->value; + void __user *out = u64_to_user_ptr(args->value); info.bo_size = robj->gem_base.size; info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT; - info.domains = robj->prefered_domains; + info.domains = robj->preferred_domains; info.domain_flags = robj->flags; amdgpu_bo_unreserve(robj); if (copy_to_user(out, &info, sizeof(info))) @@ -711,10 +711,10 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, amdgpu_bo_unreserve(robj); break; } - robj->prefered_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM | + robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_CPU); - robj->allowed_domains = robj->prefered_domains; + robj->allowed_domains = robj->preferred_domains; if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; @@ -726,7 +726,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, } out: - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return r; } @@ -754,7 +754,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, r = drm_gem_handle_create(file_priv, gobj, &handle); /* drop reference from allocate - handle holds it now */ - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); if (r) { return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 09f833255ba1..c908f972283c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -158,7 +158,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) "Error during ACPI methods call\n"); } - amdgpu_amdkfd_load_interface(adev); amdgpu_amdkfd_device_probe(adev); amdgpu_amdkfd_device_init(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 3ec43cf9ad78..6e72fe7901ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -220,7 +220,7 @@ static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, } /** - * amdgpu_bo_create_kernel - create BO for kernel use + * amdgpu_bo_create_reserved - create reserved BO for kernel use * * @adev: amdgpu device object * @size: size for the new BO @@ -230,24 +230,30 @@ static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, * @gpu_addr: GPU addr of the pinned BO * @cpu_addr: optional CPU address mapping * - * Allocates and pins a BO for kernel internal use. + * Allocates and pins a BO for kernel internal use, and returns it still + * reserved. * * Returns 0 on success, negative error code otherwise. */ -int amdgpu_bo_create_kernel(struct amdgpu_device *adev, - unsigned long size, int align, - u32 domain, struct amdgpu_bo **bo_ptr, - u64 *gpu_addr, void **cpu_addr) +int amdgpu_bo_create_reserved(struct amdgpu_device *adev, + unsigned long size, int align, + u32 domain, struct amdgpu_bo **bo_ptr, + u64 *gpu_addr, void **cpu_addr) { + bool free = false; int r; - r = amdgpu_bo_create(adev, size, align, true, domain, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, bo_ptr); - if (r) { - dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r); - return r; + if (!*bo_ptr) { + r = amdgpu_bo_create(adev, size, align, true, domain, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | + AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, + NULL, NULL, 0, bo_ptr); + if (r) { + dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", + r); + return r; + } + free = true; } r = amdgpu_bo_reserve(*bo_ptr, false); @@ -270,20 +276,52 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, } } - amdgpu_bo_unreserve(*bo_ptr); - return 0; error_unreserve: amdgpu_bo_unreserve(*bo_ptr); error_free: - amdgpu_bo_unref(bo_ptr); + if (free) + amdgpu_bo_unref(bo_ptr); return r; } /** + * amdgpu_bo_create_kernel - create BO for kernel use + * + * @adev: amdgpu device object + * @size: size for the new BO + * @align: alignment for the new BO + * @domain: where to place it + * @bo_ptr: resulting BO + * @gpu_addr: GPU addr of the pinned BO + * @cpu_addr: optional CPU address mapping + * + * Allocates and pins a BO for kernel internal use. + * + * Returns 0 on success, negative error code otherwise. + */ +int amdgpu_bo_create_kernel(struct amdgpu_device *adev, + unsigned long size, int align, + u32 domain, struct amdgpu_bo **bo_ptr, + u64 *gpu_addr, void **cpu_addr) +{ + int r; + + r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr, + gpu_addr, cpu_addr); + + if (r) + return r; + + amdgpu_bo_unreserve(*bo_ptr); + + return 0; +} + +/** * amdgpu_bo_free_kernel - free BO for kernel use * * @bo: amdgpu BO to free @@ -318,6 +356,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, struct sg_table *sg, struct ttm_placement *placement, struct reservation_object *resv, + uint64_t init_value, struct amdgpu_bo **bo_ptr) { struct amdgpu_bo *bo; @@ -352,13 +391,13 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, } INIT_LIST_HEAD(&bo->shadow_list); INIT_LIST_HEAD(&bo->va); - bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | + bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_CPU | AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA); - bo->allowed_domains = bo->prefered_domains; + bo->allowed_domains = bo->preferred_domains; if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; @@ -418,7 +457,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { struct dma_fence *fence; - r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence); + r = amdgpu_fill_buffer(bo, init_value, bo->tbo.resv, &fence); if (unlikely(r)) goto fail_unreserve; @@ -470,6 +509,7 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, &placement, bo->tbo.resv, + 0, &bo->shadow); if (!r) { bo->shadow->parent = amdgpu_bo_ref(bo); @@ -481,11 +521,15 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, return r; } +/* init_value will only take effect when flags contains + * AMDGPU_GEM_CREATE_VRAM_CLEARED. + */ int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size, int byte_align, bool kernel, u32 domain, u64 flags, struct sg_table *sg, struct reservation_object *resv, + uint64_t init_value, struct amdgpu_bo **bo_ptr) { struct ttm_placement placement = {0}; @@ -500,7 +544,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, domain, flags, sg, &placement, - resv, bo_ptr); + resv, init_value, bo_ptr); if (r) return r; @@ -562,7 +606,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo) if (bo->pin_count) return 0; - domain = bo->prefered_domains; + domain = bo->preferred_domains; retry: amdgpu_ttm_placement_from_domain(bo, domain); @@ -609,16 +653,16 @@ err: int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) { - bool is_iomem; + void *kptr; long r; if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) return -EPERM; - if (bo->kptr) { - if (ptr) { - *ptr = bo->kptr; - } + kptr = amdgpu_bo_kptr(bo); + if (kptr) { + if (ptr) + *ptr = kptr; return 0; } @@ -631,19 +675,23 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) if (r) return r; - bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); if (ptr) - *ptr = bo->kptr; + *ptr = amdgpu_bo_kptr(bo); return 0; } +void *amdgpu_bo_kptr(struct amdgpu_bo *bo) +{ + bool is_iomem; + + return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); +} + void amdgpu_bo_kunmap(struct amdgpu_bo *bo) { - if (bo->kptr == NULL) - return; - bo->kptr = NULL; - ttm_bo_kunmap(&bo->kmap); + if (bo->kmap.bo) + ttm_bo_kunmap(&bo->kmap); } struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 833b172a2c2a..9b7b4fcb047b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -33,6 +33,67 @@ #define AMDGPU_BO_INVALID_OFFSET LONG_MAX +struct amdgpu_bo_va_mapping { + struct list_head list; + struct rb_node rb; + uint64_t start; + uint64_t last; + uint64_t __subtree_last; + uint64_t offset; + uint64_t flags; +}; + +/* bo virtual addresses in a specific vm */ +struct amdgpu_bo_va { + /* protected by bo being reserved */ + struct list_head bo_list; + struct dma_fence *last_pt_update; + unsigned ref_count; + + /* protected by vm mutex and spinlock */ + struct list_head vm_status; + + /* mappings for this bo_va */ + struct list_head invalids; + struct list_head valids; + + /* constant after initialization */ + struct amdgpu_vm *vm; + struct amdgpu_bo *bo; +}; + + +struct amdgpu_bo { + /* Protected by tbo.reserved */ + u32 preferred_domains; + u32 allowed_domains; + struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; + struct ttm_placement placement; + struct ttm_buffer_object tbo; + struct ttm_bo_kmap_obj kmap; + u64 flags; + unsigned pin_count; + u64 tiling_flags; + u64 metadata_flags; + void *metadata; + u32 metadata_size; + unsigned prime_shared_count; + /* list of all virtual address to which this bo is associated to */ + struct list_head va; + /* Constant after initialization */ + struct drm_gem_object gem_base; + struct amdgpu_bo *parent; + struct amdgpu_bo *shadow; + + struct ttm_bo_kmap_obj dma_buf_vmap; + struct amdgpu_mn *mn; + + union { + struct list_head mn_list; + struct list_head shadow_list; + }; +}; + /** * amdgpu_mem_type_to_domain - return domain corresponding to mem_type * @mem_type: ttm memory type @@ -132,6 +193,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, bool kernel, u32 domain, u64 flags, struct sg_table *sg, struct reservation_object *resv, + uint64_t init_value, struct amdgpu_bo **bo_ptr); int amdgpu_bo_create_restricted(struct amdgpu_device *adev, unsigned long size, int byte_align, @@ -139,7 +201,12 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, struct sg_table *sg, struct ttm_placement *placement, struct reservation_object *resv, + uint64_t init_value, struct amdgpu_bo **bo_ptr); +int amdgpu_bo_create_reserved(struct amdgpu_device *adev, + unsigned long size, int align, + u32 domain, struct amdgpu_bo **bo_ptr, + u64 *gpu_addr, void **cpu_addr); int amdgpu_bo_create_kernel(struct amdgpu_device *adev, unsigned long size, int align, u32 domain, struct amdgpu_bo **bo_ptr, @@ -147,6 +214,7 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, void **cpu_addr); int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); +void *amdgpu_bo_kptr(struct amdgpu_bo *bo); void amdgpu_bo_kunmap(struct amdgpu_bo *bo); struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo); void amdgpu_bo_unref(struct amdgpu_bo **bo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h index c19c4d138751..f21a7716b90e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h @@ -30,6 +30,7 @@ struct cg_flag_name const char *name; }; +void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev); int amdgpu_pm_sysfs_init(struct amdgpu_device *adev); void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev); void amdgpu_pm_print_power_states(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index 6bdc866570ab..5b3f92891f89 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c @@ -69,7 +69,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev, ww_mutex_lock(&resv->lock, NULL); ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false, - AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo); + AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, 0, &bo); ww_mutex_unlock(&resv->lock); if (ret) return ERR_PTR(ret); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 15b7149d1204..6c5646b48d1a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -184,47 +184,22 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, return r; } - if (ring->funcs->support_64bit_ptrs) { - r = amdgpu_wb_get_64bit(adev, &ring->rptr_offs); - if (r) { - dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); - return r; - } - - r = amdgpu_wb_get_64bit(adev, &ring->wptr_offs); - if (r) { - dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r); - return r; - } - - } else { - r = amdgpu_wb_get(adev, &ring->rptr_offs); - if (r) { - dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); - return r; - } - - r = amdgpu_wb_get(adev, &ring->wptr_offs); - if (r) { - dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r); - return r; - } - + r = amdgpu_wb_get(adev, &ring->rptr_offs); + if (r) { + dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); + return r; } - if (amdgpu_sriov_vf(adev) && ring->funcs->type == AMDGPU_RING_TYPE_GFX) { - r = amdgpu_wb_get_256Bit(adev, &ring->fence_offs); - if (r) { - dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r); - return r; - } + r = amdgpu_wb_get(adev, &ring->wptr_offs); + if (r) { + dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r); + return r; + } - } else { - r = amdgpu_wb_get(adev, &ring->fence_offs); - if (r) { - dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r); - return r; - } + r = amdgpu_wb_get(adev, &ring->fence_offs); + if (r) { + dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r); + return r; } r = amdgpu_wb_get(adev, &ring->cond_exe_offs); @@ -286,19 +261,15 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) { ring->ready = false; - if (ring->funcs->support_64bit_ptrs) { - amdgpu_wb_free_64bit(ring->adev, ring->rptr_offs); - amdgpu_wb_free_64bit(ring->adev, ring->wptr_offs); - } else { - amdgpu_wb_free(ring->adev, ring->rptr_offs); - amdgpu_wb_free(ring->adev, ring->wptr_offs); - } + /* Not to finish a ring which is not initialized */ + if (!(ring->adev) || !(ring->adev->rings[ring->idx])) + return; + + amdgpu_wb_free(ring->adev, ring->rptr_offs); + amdgpu_wb_free(ring->adev, ring->wptr_offs); amdgpu_wb_free(ring->adev, ring->cond_exe_offs); - if (amdgpu_sriov_vf(ring->adev) && ring->funcs->type == AMDGPU_RING_TYPE_GFX) - amdgpu_wb_free_256bit(ring->adev, ring->fence_offs); - else - amdgpu_wb_free(ring->adev, ring->fence_offs); + amdgpu_wb_free(ring->adev, ring->fence_offs); amdgpu_bo_free_kernel(&ring->ring_obj, &ring->gpu_addr, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 5ca75a456ad2..3144400435b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -64,7 +64,7 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, INIT_LIST_HEAD(&sa_manager->flist[i]); r = amdgpu_bo_create(adev, size, align, true, domain, - 0, NULL, NULL, &sa_manager->bo); + 0, NULL, NULL, 0, &sa_manager->bo); if (r) { dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c index 3c4d7574d704..ed8c3739015b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c @@ -61,7 +61,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0, - NULL, NULL, &vram_obj); + NULL, NULL, 0, &vram_obj); if (r) { DRM_ERROR("Failed to create VRAM object\n"); goto out_cleanup; @@ -82,7 +82,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, - NULL, gtt_obj + i); + NULL, 0, gtt_obj + i); if (r) { DRM_ERROR("Failed to create GTT object %d\n", i); goto out_lclean; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 509f7a63d40c..9ab58245e518 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -105,12 +105,12 @@ TRACE_EVENT(amdgpu_bo_create, __entry->bo = bo; __entry->pages = bo->tbo.num_pages; __entry->type = bo->tbo.mem.mem_type; - __entry->prefer = bo->prefered_domains; + __entry->prefer = bo->preferred_domains; __entry->allow = bo->allowed_domains; __entry->visible = bo->flags; ), - TP_printk("bo=%p, pages=%u, type=%d, prefered=%d, allowed=%d, visible=%d", + TP_printk("bo=%p, pages=%u, type=%d, preferred=%d, allowed=%d, visible=%d", __entry->bo, __entry->pages, __entry->type, __entry->prefer, __entry->allow, __entry->visible) ); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index e6f9a54c959d..c803b082324d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -753,7 +753,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { struct amdgpu_ttm_tt *gtt = (void*)ttm; - int r; + int r = 0; if (gtt->userptr) { r = amdgpu_ttm_tt_pin_userptr(ttm); @@ -1232,23 +1232,12 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) /* Change the size here instead of the init above so only lpfn is affected */ amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); - r = amdgpu_bo_create(adev, adev->mc.stolen_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, &adev->stollen_vga_memory); - if (r) { - return r; - } - r = amdgpu_bo_reserve(adev->stollen_vga_memory, false); + r = amdgpu_bo_create_kernel(adev, adev->mc.stolen_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->stolen_vga_memory, + NULL, NULL); if (r) return r; - r = amdgpu_bo_pin(adev->stollen_vga_memory, AMDGPU_GEM_DOMAIN_VRAM, NULL); - amdgpu_bo_unreserve(adev->stollen_vga_memory); - if (r) { - amdgpu_bo_unref(&adev->stollen_vga_memory); - return r; - } DRM_INFO("amdgpu: %uM of VRAM memory ready\n", (unsigned) (adev->mc.real_vram_size / (1024 * 1024))); @@ -1319,13 +1308,13 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) if (!adev->mman.initialized) return; amdgpu_ttm_debugfs_fini(adev); - if (adev->stollen_vga_memory) { - r = amdgpu_bo_reserve(adev->stollen_vga_memory, true); + if (adev->stolen_vga_memory) { + r = amdgpu_bo_reserve(adev->stolen_vga_memory, true); if (r == 0) { - amdgpu_bo_unpin(adev->stollen_vga_memory); - amdgpu_bo_unreserve(adev->stollen_vga_memory); + amdgpu_bo_unpin(adev->stolen_vga_memory); + amdgpu_bo_unreserve(adev->stolen_vga_memory); } - amdgpu_bo_unref(&adev->stollen_vga_memory); + amdgpu_bo_unref(&adev->stolen_vga_memory); } ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM); ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT); @@ -1509,11 +1498,12 @@ error_free: } int amdgpu_fill_buffer(struct amdgpu_bo *bo, - uint32_t src_data, + uint64_t src_data, struct reservation_object *resv, struct dma_fence **fence) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + /* max_bytes applies to SDMA_OP_PTEPDE as well as SDMA_OP_CONST_FILL*/ uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes; struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; @@ -1545,7 +1535,9 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, num_pages -= mm_node->size; ++mm_node; } - num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw; + + /* 10 double words for each SDMA_OP_PTEPDE cmd */ + num_dw = num_loops * 10; /* for IB padding */ num_dw += 64; @@ -1570,12 +1562,16 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, uint32_t byte_count = mm_node->size << PAGE_SHIFT; uint64_t dst_addr; + WARN_ONCE(byte_count & 0x7, "size should be a multiple of 8"); + dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem); while (byte_count) { uint32_t cur_size_in_bytes = min(byte_count, max_bytes); - amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, - dst_addr, cur_size_in_bytes); + amdgpu_vm_set_pte_pde(adev, &job->ibs[0], + dst_addr, 0, + cur_size_in_bytes >> 3, 0, + src_data); dst_addr += cur_size_in_bytes; byte_count -= cur_size_in_bytes; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index f137c2458ee8..0e2399f32de7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -73,7 +73,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, struct dma_fence **fence, bool direct_submit, bool vm_needs_flush); int amdgpu_fill_buffer(struct amdgpu_bo *bo, - uint32_t src_data, + uint64_t src_data, struct reservation_object *resv, struct dma_fence **fence); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index fcfb9d4f7477..36c763310df5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -358,8 +358,6 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode, (le32_to_cpu(header->jt_offset) * 4); memcpy(dst_addr, src_addr, le32_to_cpu(header->jt_size) * 4); - ucode->ucode_size += le32_to_cpu(header->jt_size) * 4; - return 0; } @@ -381,7 +379,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, bo); + NULL, NULL, 0, bo); if (err) { dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err); goto failed; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 2ca09f111f08..aefecf6c1e7b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1051,7 +1051,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, &bo); + NULL, NULL, 0, &bo); if (r) return r; @@ -1101,7 +1101,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, &bo); + NULL, NULL, 0, &bo); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index b692ad402252..c855366521ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -937,9 +937,9 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) unsigned i; int r, timeout = adev->usec_timeout; - /* workaround VCE ring test slow issue for sriov*/ + /* skip ring test for sriov*/ if (amdgpu_sriov_vf(adev)) - timeout *= 10; + return 0; r = amdgpu_ring_alloc(ring, 16); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 09190fadd228..041e0121590c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -209,9 +209,9 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) if (fences == 0) { if (adev->pm.dpm_enabled) { + /* might be used when with pg/cg amdgpu_dpm_enable_uvd(adev, false); - } else { - amdgpu_asic_set_uvd_clocks(adev, 0, 0); + */ } } else { schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT); @@ -223,12 +223,10 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work); - if (set_clocks) { - if (adev->pm.dpm_enabled) { - amdgpu_dpm_enable_uvd(adev, true); - } else { - amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); - } + if (set_clocks && adev->pm.dpm_enabled) { + /* might be used when with pg/cg + amdgpu_dpm_enable_uvd(adev, true); + */ } } @@ -361,7 +359,7 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, &bo); + NULL, NULL, 0, &bo); if (r) return r; @@ -413,7 +411,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, &bo); + NULL, NULL, 0, &bo); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 250c8e80e646..9ce36652029e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -288,6 +288,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, unsigned pt_idx, from, to; int r; u64 flags; + uint64_t init_value = 0; if (!parent->entries) { unsigned num_entries = amdgpu_vm_num_entries(adev, level); @@ -321,6 +322,12 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS | AMDGPU_GEM_CREATE_SHADOW); + if (vm->pte_support_ats) { + init_value = AMDGPU_PTE_SYSTEM; + if (level != adev->vm_manager.num_level - 1) + init_value |= AMDGPU_PDE_PTE; + } + /* walk over the address space and allocate the page tables */ for (pt_idx = from; pt_idx <= to; ++pt_idx) { struct reservation_object *resv = vm->root.bo->tbo.resv; @@ -333,7 +340,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, AMDGPU_GPU_PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, flags, - NULL, resv, &pt); + NULL, resv, init_value, &pt); if (r) return r; @@ -1060,7 +1067,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, shadow = parent->bo->shadow; if (vm->use_cpu_for_update) { - pd_addr = (unsigned long)parent->bo->kptr; + pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo); r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM); if (unlikely(r)) return r; @@ -1401,7 +1408,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, pt = entry->bo; if (use_cpu_update) { - pe_start = (unsigned long)pt->kptr; + pe_start = (unsigned long)amdgpu_bo_kptr(pt); } else { if (pt->shadow) { pe_start = amdgpu_bo_gpu_offset(pt->shadow); @@ -1995,15 +2002,19 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping *mapping; struct dma_fence *f = NULL; int r; + uint64_t init_pte_value = 0; while (!list_empty(&vm->freed)) { mapping = list_first_entry(&vm->freed, struct amdgpu_bo_va_mapping, list); list_del(&mapping->list); + if (vm->pte_support_ats) + init_pte_value = AMDGPU_PTE_SYSTEM; + r = amdgpu_vm_bo_update_mapping(adev, NULL, 0, NULL, vm, mapping->start, mapping->last, - 0, 0, &f); + init_pte_value, 0, &f); amdgpu_vm_free_mapping(adev, vm, mapping, f); if (r) { dma_fence_put(f); @@ -2494,6 +2505,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amd_sched_rq *rq; int r, i; u64 flags; + uint64_t init_pde_value = 0; vm->va = RB_ROOT; vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter); @@ -2515,10 +2527,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (r) return r; - if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) + vm->pte_support_ats = false; + + if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) { vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & AMDGPU_VM_USE_CPU_FOR_COMPUTE); - else + + if (adev->asic_type == CHIP_RAVEN) { + vm->pte_support_ats = true; + init_pde_value = AMDGPU_PTE_SYSTEM | AMDGPU_PDE_PTE; + } + } else vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & AMDGPU_VM_USE_CPU_FOR_GFX); DRM_DEBUG_DRIVER("VM update mode is %s\n", @@ -2538,7 +2557,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true, AMDGPU_GEM_DOMAIN_VRAM, flags, - NULL, NULL, &vm->root.bo); + NULL, NULL, init_pde_value, &vm->root.bo); if (r) goto error_free_sched_entity; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 34d9174ebff2..217ecba8f4cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -146,6 +146,9 @@ struct amdgpu_vm { /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */ bool use_cpu_for_update; + + /* Flag to indicate ATS support from PTE for GFX9 */ + bool pte_support_ats; }; struct amdgpu_vm_id { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 490e84944851..4e519dc42916 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2431,7 +2431,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, aobj = gem_to_amdgpu_bo(obj); ret = amdgpu_bo_reserve(aobj, false); if (ret != 0) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -2439,7 +2439,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, amdgpu_bo_unreserve(aobj); if (ret) { DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -2473,7 +2473,7 @@ unpin: amdgpu_bo_unpin(aobj); amdgpu_bo_unreserve(aobj); } - drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); + drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo); } amdgpu_crtc->cursor_bo = obj; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 921c6f772f11..11edc75edaa9 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -2506,7 +2506,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, aobj = gem_to_amdgpu_bo(obj); ret = amdgpu_bo_reserve(aobj, false); if (ret != 0) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -2514,7 +2514,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, amdgpu_bo_unreserve(aobj); if (ret) { DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -2548,7 +2548,7 @@ unpin: amdgpu_bo_unpin(aobj); amdgpu_bo_unreserve(aobj); } - drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); + drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo); } amdgpu_crtc->cursor_bo = obj; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index bcd9521237f4..a51e35f824a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -42,6 +42,7 @@ #include "dce/dce_6_0_d.h" #include "dce/dce_6_0_sh_mask.h" #include "gca/gfx_7_2_enum.h" +#include "dce_v6_0.h" #include "si_enums.h" static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev); @@ -2321,7 +2322,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, aobj = gem_to_amdgpu_bo(obj); ret = amdgpu_bo_reserve(aobj, false); if (ret != 0) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -2329,7 +2330,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, amdgpu_bo_unreserve(aobj); if (ret) { DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -2363,7 +2364,7 @@ unpin: amdgpu_bo_unpin(aobj); amdgpu_bo_unreserve(aobj); } - drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); + drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo); } amdgpu_crtc->cursor_bo = obj; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 609438fe8584..9cf14b8b2db9 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2335,7 +2335,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, aobj = gem_to_amdgpu_bo(obj); ret = amdgpu_bo_reserve(aobj, false); if (ret != 0) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -2343,7 +2343,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, amdgpu_bo_unreserve(aobj); if (ret) { DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -2377,7 +2377,7 @@ unpin: amdgpu_bo_unpin(aobj); amdgpu_bo_unreserve(aobj); } - drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); + drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo); } amdgpu_crtc->cursor_bo = obj; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 5ed919e45351..b9ee9073cb0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -479,6 +479,8 @@ static int dce_virtual_hw_init(void *handle) #endif /* no DCE */ break; + case CHIP_VEGA10: + break; default: DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 4ac85f47f287..d228f5a99044 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -2217,40 +2217,9 @@ static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev) { - int r; - - if (adev->gfx.rlc.save_restore_obj) { - r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); - - amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj); - adev->gfx.rlc.save_restore_obj = NULL; - } - - if (adev->gfx.rlc.clear_state_obj) { - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - - amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); - adev->gfx.rlc.clear_state_obj = NULL; - } - - if (adev->gfx.rlc.cp_table_obj) { - r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); - - amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj); - adev->gfx.rlc.cp_table_obj = NULL; - } + amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL); + amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL); + amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL); } static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) @@ -2273,43 +2242,23 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) if (src_ptr) { /* save restore block */ - if (adev->gfx.rlc.save_restore_obj == NULL) { - r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, NULL, - &adev->gfx.rlc.save_restore_obj); - - if (r) { - dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r); - return r; - } - } - - r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false); - if (unlikely(r != 0)) { - gfx_v6_0_rlc_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.save_restore_gpu_addr); + r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.save_restore_obj, + &adev->gfx.rlc.save_restore_gpu_addr, + (void **)&adev->gfx.rlc.sr_ptr); if (r) { - amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); - dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r); + dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", + r); gfx_v6_0_rlc_fini(adev); return r; } - r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr); - if (r) { - dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r); - gfx_v6_0_rlc_fini(adev); - return r; - } /* write the sr buffer */ dst_ptr = adev->gfx.rlc.sr_ptr; for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) dst_ptr[i] = cpu_to_le32(src_ptr[i]); + amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj); amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); } @@ -2319,39 +2268,17 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) adev->gfx.rlc.clear_state_size = gfx_v6_0_get_csb_size(adev); dws = adev->gfx.rlc.clear_state_size + (256 / 4); - if (adev->gfx.rlc.clear_state_obj == NULL) { - r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, NULL, - &adev->gfx.rlc.clear_state_obj); - - if (r) { - dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); - gfx_v6_0_rlc_fini(adev); - return r; - } - } - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); - if (unlikely(r != 0)) { - gfx_v6_0_rlc_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.clear_state_gpu_addr); + r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.clear_state_obj, + &adev->gfx.rlc.clear_state_gpu_addr, + (void **)&adev->gfx.rlc.cs_ptr); if (r) { - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r); + dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); gfx_v6_0_rlc_fini(adev); return r; } - r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr); - if (r) { - dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r); - gfx_v6_0_rlc_fini(adev); - return r; - } /* set up the cs buffer */ dst_ptr = adev->gfx.rlc.cs_ptr; reg_list_mc_addr = adev->gfx.rlc.clear_state_gpu_addr + 256; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 17b7c6934b0a..53a4af7596c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -1823,7 +1823,7 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) } /** - * gmc_v7_0_init_compute_vmid - gart enable + * gfx_v7_0_init_compute_vmid - gart enable * * @adev: amdgpu_device pointer * @@ -1833,7 +1833,7 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) #define DEFAULT_SH_MEM_BASES (0x6000) #define FIRST_COMPUTE_VMID (8) #define LAST_COMPUTE_VMID (16) -static void gmc_v7_0_init_compute_vmid(struct amdgpu_device *adev) +static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev) { int i; uint32_t sh_mem_config; @@ -1939,7 +1939,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) cik_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); - gmc_v7_0_init_compute_vmid(adev); + gfx_v7_0_init_compute_vmid(adev); WREG32(mmSX_DEBUG_1, 0x20); @@ -2774,39 +2774,18 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev) */ static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev) { - int i, r; + int i; for (i = 0; i < adev->gfx.num_compute_rings; i++) { struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; - if (ring->mqd_obj) { - r = amdgpu_bo_reserve(ring->mqd_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r); - - amdgpu_bo_unpin(ring->mqd_obj); - amdgpu_bo_unreserve(ring->mqd_obj); - - amdgpu_bo_unref(&ring->mqd_obj); - ring->mqd_obj = NULL; - } + amdgpu_bo_free_kernel(&ring->mqd_obj, NULL, NULL); } } static void gfx_v7_0_mec_fini(struct amdgpu_device *adev) { - int r; - - if (adev->gfx.mec.hpd_eop_obj) { - r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj); - amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); - - amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj); - adev->gfx.mec.hpd_eop_obj = NULL; - } + amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); } static int gfx_v7_0_mec_init(struct amdgpu_device *adev) @@ -2823,33 +2802,14 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev) /* allocate space for ALL pipes (even the ones we don't own) */ mec_hpd_size = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec * GFX7_MEC_HPD_SIZE * 2; - if (adev->gfx.mec.hpd_eop_obj == NULL) { - r = amdgpu_bo_create(adev, - mec_hpd_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, - &adev->gfx.mec.hpd_eop_obj); - if (r) { - dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); - return r; - } - } - r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false); - if (unlikely(r != 0)) { - gfx_v7_0_mec_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT, - &adev->gfx.mec.hpd_eop_gpu_addr); - if (r) { - dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r); - gfx_v7_0_mec_fini(adev); - return r; - } - r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd); + r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &adev->gfx.mec.hpd_eop_obj, + &adev->gfx.mec.hpd_eop_gpu_addr, + (void **)&hpd); if (r) { - dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r); + dev_warn(adev->dev, "(%d) create, pin or map of HDP EOP bo failed\n", r); gfx_v7_0_mec_fini(adev); return r; } @@ -3108,32 +3068,12 @@ static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id) struct cik_mqd *mqd; struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; - if (ring->mqd_obj == NULL) { - r = amdgpu_bo_create(adev, - sizeof(struct cik_mqd), - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, - &ring->mqd_obj); - if (r) { - dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); - return r; - } - } - - r = amdgpu_bo_reserve(ring->mqd_obj, false); - if (unlikely(r != 0)) - goto out; - - r = amdgpu_bo_pin(ring->mqd_obj, AMDGPU_GEM_DOMAIN_GTT, - &mqd_gpu_addr); - if (r) { - dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r); - goto out_unreserve; - } - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd); + r = amdgpu_bo_create_reserved(adev, sizeof(struct cik_mqd), PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj, + &mqd_gpu_addr, (void **)&mqd); if (r) { - dev_warn(adev->dev, "(%d) map MQD bo failed\n", r); - goto out_unreserve; + dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); + return r; } mutex_lock(&adev->srbm_mutex); @@ -3147,9 +3087,7 @@ static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id) mutex_unlock(&adev->srbm_mutex); amdgpu_bo_kunmap(ring->mqd_obj); -out_unreserve: amdgpu_bo_unreserve(ring->mqd_obj); -out: return 0; } @@ -3361,43 +3299,9 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, */ static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev) { - int r; - - /* save restore block */ - if (adev->gfx.rlc.save_restore_obj) { - r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); - - amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj); - adev->gfx.rlc.save_restore_obj = NULL; - } - - /* clear state block */ - if (adev->gfx.rlc.clear_state_obj) { - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - - amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); - adev->gfx.rlc.clear_state_obj = NULL; - } - - /* clear state block */ - if (adev->gfx.rlc.cp_table_obj) { - r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); - - amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj); - adev->gfx.rlc.cp_table_obj = NULL; - } + amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL); + amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL); + amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL); } static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) @@ -3432,39 +3336,17 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) if (src_ptr) { /* save restore block */ - if (adev->gfx.rlc.save_restore_obj == NULL) { - r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, - &adev->gfx.rlc.save_restore_obj); - if (r) { - dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r); - return r; - } - } - - r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false); - if (unlikely(r != 0)) { - gfx_v7_0_rlc_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.save_restore_gpu_addr); + r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.save_restore_obj, + &adev->gfx.rlc.save_restore_gpu_addr, + (void **)&adev->gfx.rlc.sr_ptr); if (r) { - amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); - dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r); + dev_warn(adev->dev, "(%d) create, pin or map of RLC sr bo failed\n", r); gfx_v7_0_rlc_fini(adev); return r; } - r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr); - if (r) { - dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r); - gfx_v7_0_rlc_fini(adev); - return r; - } /* write the sr buffer */ dst_ptr = adev->gfx.rlc.sr_ptr; for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) @@ -3477,39 +3359,17 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) /* clear state block */ adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev); - if (adev->gfx.rlc.clear_state_obj == NULL) { - r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, - &adev->gfx.rlc.clear_state_obj); - if (r) { - dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); - gfx_v7_0_rlc_fini(adev); - return r; - } - } - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); - if (unlikely(r != 0)) { - gfx_v7_0_rlc_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.clear_state_gpu_addr); + r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.clear_state_obj, + &adev->gfx.rlc.clear_state_gpu_addr, + (void **)&adev->gfx.rlc.cs_ptr); if (r) { - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r); + dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); gfx_v7_0_rlc_fini(adev); return r; } - r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr); - if (r) { - dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r); - gfx_v7_0_rlc_fini(adev); - return r; - } /* set up the cs buffer */ dst_ptr = adev->gfx.rlc.cs_ptr; gfx_v7_0_get_csb_buffer(adev, dst_ptr); @@ -3518,37 +3378,14 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) } if (adev->gfx.rlc.cp_table_size) { - if (adev->gfx.rlc.cp_table_obj == NULL) { - r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, - &adev->gfx.rlc.cp_table_obj); - if (r) { - dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); - gfx_v7_0_rlc_fini(adev); - return r; - } - } - r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false); - if (unlikely(r != 0)) { - dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); - gfx_v7_0_rlc_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.cp_table_gpu_addr); - if (r) { - amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); - dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r); - gfx_v7_0_rlc_fini(adev); - return r; - } - r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr); + r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.cp_table_obj, + &adev->gfx.rlc.cp_table_gpu_addr, + (void **)&adev->gfx.rlc.cp_table_ptr); if (r) { - dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r); + dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); gfx_v7_0_rlc_fini(adev); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 05436b8730b4..0710b0b2e4b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1238,29 +1238,8 @@ static void cz_init_cp_jump_table(struct amdgpu_device *adev) static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev) { - int r; - - /* clear state block */ - if (adev->gfx.rlc.clear_state_obj) { - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve RLC cbs bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); - adev->gfx.rlc.clear_state_obj = NULL; - } - - /* jump table block */ - if (adev->gfx.rlc.cp_table_obj) { - r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); - amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj); - adev->gfx.rlc.cp_table_obj = NULL; - } + amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL); + amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL); } static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) @@ -1278,39 +1257,17 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) /* clear state block */ adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev); - if (adev->gfx.rlc.clear_state_obj == NULL) { - r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, - &adev->gfx.rlc.clear_state_obj); - if (r) { - dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); - gfx_v8_0_rlc_fini(adev); - return r; - } - } - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); - if (unlikely(r != 0)) { - gfx_v8_0_rlc_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.clear_state_gpu_addr); + r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.clear_state_obj, + &adev->gfx.rlc.clear_state_gpu_addr, + (void **)&adev->gfx.rlc.cs_ptr); if (r) { - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - dev_warn(adev->dev, "(%d) pin RLC cbs bo failed\n", r); + dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); gfx_v8_0_rlc_fini(adev); return r; } - r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr); - if (r) { - dev_warn(adev->dev, "(%d) map RLC cbs bo failed\n", r); - gfx_v8_0_rlc_fini(adev); - return r; - } /* set up the cs buffer */ dst_ptr = adev->gfx.rlc.cs_ptr; gfx_v8_0_get_csb_buffer(adev, dst_ptr); @@ -1321,34 +1278,13 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) if ((adev->asic_type == CHIP_CARRIZO) || (adev->asic_type == CHIP_STONEY)) { adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ - if (adev->gfx.rlc.cp_table_obj == NULL) { - r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, - &adev->gfx.rlc.cp_table_obj); - if (r) { - dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); - return r; - } - } - - r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false); - if (unlikely(r != 0)) { - dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); - return r; - } - r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.cp_table_gpu_addr); + r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.cp_table_obj, + &adev->gfx.rlc.cp_table_gpu_addr, + (void **)&adev->gfx.rlc.cp_table_ptr); if (r) { - amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); - dev_warn(adev->dev, "(%d) pin RLC cp table bo failed\n", r); - return r; - } - r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr); - if (r) { - dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r); + dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); return r; } @@ -1363,17 +1299,7 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) static void gfx_v8_0_mec_fini(struct amdgpu_device *adev) { - int r; - - if (adev->gfx.mec.hpd_eop_obj) { - r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj); - amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); - amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj); - adev->gfx.mec.hpd_eop_obj = NULL; - } + amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); } static int gfx_v8_0_mec_init(struct amdgpu_device *adev) @@ -1389,34 +1315,13 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev) mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE; - if (adev->gfx.mec.hpd_eop_obj == NULL) { - r = amdgpu_bo_create(adev, - mec_hpd_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, - &adev->gfx.mec.hpd_eop_obj); - if (r) { - dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); - return r; - } - } - - r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false); - if (unlikely(r != 0)) { - gfx_v8_0_mec_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT, - &adev->gfx.mec.hpd_eop_gpu_addr); - if (r) { - dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r); - gfx_v8_0_mec_fini(adev); - return r; - } - r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd); + r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &adev->gfx.mec.hpd_eop_obj, + &adev->gfx.mec.hpd_eop_gpu_addr, + (void **)&hpd); if (r) { - dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r); - gfx_v8_0_mec_fini(adev); + dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 435db6f5efcf..b39f81dda847 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -116,7 +116,9 @@ static const u32 golden_settings_gc_9_0[] = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_2), 0x08000000, 0x08000080, SOC15_REG_OFFSET(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL), 0x08000000, 0x08000080, SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_UTCL1_CNTL), 0x08000000, 0x08000080, + SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), 0x00001000, 0x00001000, SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_1), 0x0000000f, 0x01000107, + SOC15_REG_OFFSET(GC, 0, mmSQC_CONFIG), 0x03000000, 0x020a2000, SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX), 0xfffffeef, 0x010b0000, SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_HI), 0xffffffff, 0x4a2c0e68, SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_LO), 0xffffffff, 0xb5d3f197, @@ -772,18 +774,16 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) if (cs_data) { /* clear state block */ adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev); - if (adev->gfx.rlc.clear_state_obj == NULL) { - r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.clear_state_obj, - &adev->gfx.rlc.clear_state_gpu_addr, - (void **)&adev->gfx.rlc.cs_ptr); - if (r) { - dev_err(adev->dev, - "(%d) failed to create rlc csb bo\n", r); - gfx_v9_0_rlc_fini(adev); - return r; - } + r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.clear_state_obj, + &adev->gfx.rlc.clear_state_gpu_addr, + (void **)&adev->gfx.rlc.cs_ptr); + if (r) { + dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", + r); + gfx_v9_0_rlc_fini(adev); + return r; } /* set up the cs buffer */ dst_ptr = adev->gfx.rlc.cs_ptr; @@ -795,18 +795,16 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) if (adev->asic_type == CHIP_RAVEN) { /* TODO: double check the cp_table_size for RV */ adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ - if (adev->gfx.rlc.cp_table_obj == NULL) { - r = amdgpu_bo_create_kernel(adev, adev->gfx.rlc.cp_table_size, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.cp_table_obj, - &adev->gfx.rlc.cp_table_gpu_addr, - (void **)&adev->gfx.rlc.cp_table_ptr); - if (r) { - dev_err(adev->dev, - "(%d) failed to create cp table bo\n", r); - gfx_v9_0_rlc_fini(adev); - return r; - } + r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.cp_table_obj, + &adev->gfx.rlc.cp_table_gpu_addr, + (void **)&adev->gfx.rlc.cp_table_ptr); + if (r) { + dev_err(adev->dev, + "(%d) failed to create cp table bo\n", r); + gfx_v9_0_rlc_fini(adev); + return r; } rv_init_cp_jump_table(adev); @@ -821,28 +819,8 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) static void gfx_v9_0_mec_fini(struct amdgpu_device *adev) { - int r; - - if (adev->gfx.mec.hpd_eop_obj) { - r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj); - amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); - - amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj); - adev->gfx.mec.hpd_eop_obj = NULL; - } - if (adev->gfx.mec.mec_fw_obj) { - r = amdgpu_bo_reserve(adev->gfx.mec.mec_fw_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve mec firmware bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.mec.mec_fw_obj); - amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); - - amdgpu_bo_unref(&adev->gfx.mec.mec_fw_obj); - adev->gfx.mec.mec_fw_obj = NULL; - } + amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); + amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL); } static int gfx_v9_0_mec_init(struct amdgpu_device *adev) @@ -862,33 +840,13 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev) amdgpu_gfx_compute_queue_acquire(adev); mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE; - if (adev->gfx.mec.hpd_eop_obj == NULL) { - r = amdgpu_bo_create(adev, - mec_hpd_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, - &adev->gfx.mec.hpd_eop_obj); - if (r) { - dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); - return r; - } - } - - r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false); - if (unlikely(r != 0)) { - gfx_v9_0_mec_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT, - &adev->gfx.mec.hpd_eop_gpu_addr); - if (r) { - dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r); - gfx_v9_0_mec_fini(adev); - return r; - } - r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd); + r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &adev->gfx.mec.hpd_eop_obj, + &adev->gfx.mec.hpd_eop_gpu_addr, + (void **)&hpd); if (r) { - dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r); + dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); gfx_v9_0_mec_fini(adev); return r; } @@ -905,42 +863,22 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev) le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4; - if (adev->gfx.mec.mec_fw_obj == NULL) { - r = amdgpu_bo_create(adev, - mec_hdr->header.ucode_size_bytes, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, - &adev->gfx.mec.mec_fw_obj); - if (r) { - dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r); - return r; - } - } - - r = amdgpu_bo_reserve(adev->gfx.mec.mec_fw_obj, false); - if (unlikely(r != 0)) { - gfx_v9_0_mec_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->gfx.mec.mec_fw_obj, AMDGPU_GEM_DOMAIN_GTT, - &adev->gfx.mec.mec_fw_gpu_addr); - if (r) { - dev_warn(adev->dev, "(%d) pin mec firmware bo failed\n", r); - gfx_v9_0_mec_fini(adev); - return r; - } - r = amdgpu_bo_kmap(adev->gfx.mec.mec_fw_obj, (void **)&fw); + r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, + &adev->gfx.mec.mec_fw_obj, + &adev->gfx.mec.mec_fw_gpu_addr, + (void **)&fw); if (r) { - dev_warn(adev->dev, "(%d) map firmware bo failed\n", r); + dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r); gfx_v9_0_mec_fini(adev); return r; } + memcpy(fw, fw_data, fw_size); amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); - return 0; } @@ -4158,7 +4096,7 @@ static int gfx_v9_0_kiq_irq(struct amdgpu_device *adev, return 0; } -const struct amd_ip_funcs gfx_v9_0_ip_funcs = { +static const struct amd_ip_funcs gfx_v9_0_ip_funcs = { .name = "gfx_v9_0", .early_init = gfx_v9_0_early_init, .late_init = gfx_v9_0_late_init, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h index 56ef652a575d..fa5a3fbaf6ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h @@ -24,7 +24,6 @@ #ifndef __GFX_V9_0_H__ #define __GFX_V9_0_H__ -extern const struct amd_ip_funcs gfx_v9_0_ip_funcs; extern const struct amdgpu_ip_block_version gfx_v9_0_ip_block; void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num); diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h index d2dbb085f480..206e29cad753 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h @@ -30,7 +30,5 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value); void gfxhub_v1_0_init(struct amdgpu_device *adev); u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev); -extern const struct amd_ip_funcs gfxhub_v1_0_ip_funcs; -extern const struct amdgpu_ip_block_version gfxhub_v1_0_ip_block; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h index 57bb940c0ecd..5d38229baf69 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h @@ -36,7 +36,4 @@ void mmhub_v1_0_initialize_power_gating(struct amdgpu_device *adev); void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev, bool enable); -extern const struct amd_ip_funcs mmhub_v1_0_ip_funcs; -extern const struct amdgpu_ip_block_version mmhub_v1_0_ip_block; - #endif diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 591f3e7fb508..fd7c72aaafa6 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -291,6 +291,8 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring) DRM_DEBUG("Setting write pointer\n"); if (ring->use_doorbell) { + u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs]; + DRM_DEBUG("Using doorbell -- " "wptr_offs == 0x%08x " "lower_32_bits(ring->wptr) << 2 == 0x%08x " @@ -299,8 +301,7 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring) lower_32_bits(ring->wptr << 2), upper_32_bits(ring->wptr << 2)); /* XXX check if swapping is necessary on BE */ - adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr << 2); - adev->wb.wb[ring->wptr_offs + 1] = upper_32_bits(ring->wptr << 2); + WRITE_ONCE(*wb, (ring->wptr << 2)); DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n", ring->doorbell_index, ring->wptr << 2); WDOORBELL64(ring->doorbell_index, ring->wptr << 2); @@ -573,12 +574,13 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable) static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev) { struct amdgpu_ring *ring; - u32 rb_cntl, ib_cntl; + u32 rb_cntl, ib_cntl, wptr_poll_cntl; u32 rb_bufsz; u32 wb_offset; u32 doorbell; u32 doorbell_offset; u32 temp; + u64 wptr_gpu_addr; int i, r; for (i = 0; i < adev->sdma.num_instances; i++) { @@ -660,6 +662,19 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev) WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_F32_CNTL), temp); } + /* setup the wptr shadow polling */ + wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); + WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), + lower_32_bits(wptr_gpu_addr)); + WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI), + upper_32_bits(wptr_gpu_addr)); + wptr_poll_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL)); + if (amdgpu_sriov_vf(adev)) + wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1); + else + wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0); + WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl); + /* enable DMA RB */ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_CNTL), rb_cntl); @@ -687,6 +702,7 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev) if (adev->mman.buffer_funcs_ring == ring) amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); + } return 0; @@ -783,15 +799,12 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev) const struct sdma_firmware_header_v1_0 *hdr; const __le32 *fw_data; u32 fw_size; - u32 digest_size = 0; int i, j; /* halt the MEs */ sdma_v4_0_enable(adev, false); for (i = 0; i < adev->sdma.num_instances; i++) { - uint16_t version_major; - uint16_t version_minor; if (!adev->sdma.instance[i].fw) return -EINVAL; @@ -799,23 +812,12 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev) amdgpu_ucode_print_sdma_hdr(&hdr->header); fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; - version_major = le16_to_cpu(hdr->header.header_version_major); - version_minor = le16_to_cpu(hdr->header.header_version_minor); - - if (version_major == 1 && version_minor >= 1) { - const struct sdma_firmware_header_v1_1 *sdma_v1_1_hdr = (const struct sdma_firmware_header_v1_1 *) hdr; - digest_size = le32_to_cpu(sdma_v1_1_hdr->digest_size); - } - - fw_size -= digest_size; - fw_data = (const __le32 *) (adev->sdma.instance[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_ADDR), 0); - for (j = 0; j < fw_size; j++) WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++)); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 987b958368ac..23a85750edd6 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -165,6 +165,9 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring) unsigned i; int r; + if (amdgpu_sriov_vf(adev)) + return 0; + r = amdgpu_ring_alloc(ring, 16); if (r) { DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n", @@ -432,13 +435,19 @@ static int uvd_v7_0_sw_init(void *handle) return r; } - for (i = 0; i < adev->uvd.num_enc_rings; ++i) { ring = &adev->uvd.ring_enc[i]; sprintf(ring->name, "uvd_enc%d", i); if (amdgpu_sriov_vf(adev)) { ring->use_doorbell = true; - ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2; + + /* currently only use the first enconding ring for + * sriov, so set unused location for other unused rings. + */ + if (i == 0) + ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2; + else + ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1; } r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); if (r) @@ -685,6 +694,11 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev, /* 4, set resp to zero */ WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0); + WDOORBELL32(adev->uvd.ring_enc[0].doorbell_index, 0); + adev->wb.wb[adev->uvd.ring_enc[0].wptr_offs] = 0; + adev->uvd.ring_enc[0].wptr = 0; + adev->uvd.ring_enc[0].wptr_old = 0; + /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */ WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001); @@ -702,7 +716,6 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev, dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data); return -EBUSY; } - WDOORBELL32(adev->uvd.ring_enc[0].doorbell_index, 0); return 0; } @@ -736,11 +749,9 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) init_table += header->uvd_table_offset; ring = &adev->uvd.ring; + ring->wptr = 0; size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); - /* disable clock gating */ - MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), - ~UVD_POWER_STATUS__UVD_PG_MODE_MASK, 0); MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0xFFFFFFFF, 0x00000004); /* mc resume*/ @@ -777,12 +788,6 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_ADDR_CONFIG), - adev->gfx.config.gb_addr_config); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG), - adev->gfx.config.gb_addr_config); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG), - adev->gfx.config.gb_addr_config); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH4), adev->uvd.max_handles); /* mc resume end*/ @@ -819,17 +824,6 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) UVD_LMI_CTRL__REQ_MODE_MASK | 0x00100000L)); - /* disable byte swapping */ - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_SWAP_CNTL), 0); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MP_SWAP_CNTL), 0); - - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA0), 0x40c2040); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA1), 0x0); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB0), 0x40c2040); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB1), 0x0); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_ALU), 0); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUX), 0x88); - /* take all subblocks out of reset, except VCPU */ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); @@ -838,15 +832,6 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), UVD_VCPU_CNTL__CLK_EN_MASK); - /* enable UMC */ - MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), - ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0); - - /* boot up the VCPU */ - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0); - - MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0x02, 0x02); - /* enable master interrupt */ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), @@ -859,40 +844,31 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) /* force RBC into idle state */ size = order_base_2(ring->ring_size); tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp); - /* set the write pointer delay */ - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL), 0); - - /* set the wb address */ - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR), - (upper_32_bits(ring->gpu_addr) >> 2)); - - /* programm the RB_BASE for ring buffer */ - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW), - lower_32_bits(ring->gpu_addr)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH), - upper_32_bits(ring->gpu_addr)); - - ring->wptr = 0; ring = &adev->uvd.ring_enc[0]; + ring->wptr = 0; MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO), ring->gpu_addr); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr)); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE), ring->ring_size / 4); + /* boot up the VCPU */ + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0); + + /* enable UMC */ + MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0); + + MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0x02, 0x02); + /* add end packet */ memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end)); table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4; header->uvd_table_size = table_size; - return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table); } - return -EINVAL; /* already initializaed ? */ + return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 1ecd6bb90c1f..11134d5f7443 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -173,6 +173,11 @@ static int vce_v4_0_mmsch_start(struct amdgpu_device *adev, /* 4, set resp to zero */ WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP), 0); + WDOORBELL32(adev->vce.ring[0].doorbell_index, 0); + adev->wb.wb[adev->vce.ring[0].wptr_offs] = 0; + adev->vce.ring[0].wptr = 0; + adev->vce.ring[0].wptr_old = 0; + /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */ WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST), 0x10000001); @@ -190,7 +195,6 @@ static int vce_v4_0_mmsch_start(struct amdgpu_device *adev, dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data); return -EBUSY; } - WDOORBELL32(adev->vce.ring[0].doorbell_index, 0); return 0; } @@ -274,7 +278,8 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev) MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), ~0x100, 0); MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), - 0xffffffff, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); + VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK, + VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); /* end of MC_RESUME */ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), @@ -296,11 +301,9 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev) memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end)); table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4; header->vce_table_size = table_size; - - return vce_v4_0_mmsch_start(adev, &adev->virt.mm_table); } - return -EINVAL; /* already initializaed ? */ + return vce_v4_0_mmsch_start(adev, &adev->virt.mm_table); } /** @@ -443,12 +446,14 @@ static int vce_v4_0_sw_init(void *handle) if (amdgpu_sriov_vf(adev)) { /* DOORBELL only works under SRIOV */ ring->use_doorbell = true; + + /* currently only use the first encoding ring for sriov, + * so set unused location for other unused rings. + */ if (i == 0) - ring->doorbell_index = AMDGPU_DOORBELL64_RING0_1 * 2; - else if (i == 1) - ring->doorbell_index = AMDGPU_DOORBELL64_RING2_3 * 2; + ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING0_1 * 2; else - ring->doorbell_index = AMDGPU_DOORBELL64_RING2_3 * 2 + 1; + ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING2_3 * 2 + 1; } r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0); if (r) @@ -990,11 +995,13 @@ static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev, { uint32_t val = 0; - if (state == AMDGPU_IRQ_STATE_ENABLE) - val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK; + if (!amdgpu_sriov_vf(adev)) { + if (state == AMDGPU_IRQ_STATE_ENABLE) + val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK; - WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), val, - ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); + WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), val, + ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); + } return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 0b74da3dca8b..bc839ff0bdd0 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -1240,13 +1240,18 @@ static int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); - if (cz_hwmgr->sclk_dpm.soft_min_clk != - cz_hwmgr->sclk_dpm.soft_max_clk) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetSclkSoftMin, - cz_get_sclk_level(hwmgr, - cz_hwmgr->sclk_dpm.soft_max_clk, - PPSMC_MSG_SetSclkSoftMin)); + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetSclkSoftMin, + cz_get_sclk_level(hwmgr, + cz_hwmgr->sclk_dpm.soft_max_clk, + PPSMC_MSG_SetSclkSoftMin)); + + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetSclkSoftMax, + cz_get_sclk_level(hwmgr, + cz_hwmgr->sclk_dpm.soft_max_clk, + PPSMC_MSG_SetSclkSoftMax)); + return 0; } @@ -1292,17 +1297,55 @@ static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); - if (cz_hwmgr->sclk_dpm.soft_min_clk != - cz_hwmgr->sclk_dpm.soft_max_clk) { - cz_hwmgr->sclk_dpm.soft_max_clk = - cz_hwmgr->sclk_dpm.soft_min_clk; + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetSclkSoftMax, + cz_get_sclk_level(hwmgr, + cz_hwmgr->sclk_dpm.soft_min_clk, + PPSMC_MSG_SetSclkSoftMax)); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetSclkSoftMin, + cz_get_sclk_level(hwmgr, + cz_hwmgr->sclk_dpm.soft_min_clk, + PPSMC_MSG_SetSclkSoftMin)); + + return 0; +} + +static int cz_phm_force_dpm_sclk(struct pp_hwmgr *hwmgr, uint32_t sclk) +{ + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetSclkSoftMin, + cz_get_sclk_level(hwmgr, + sclk, + PPSMC_MSG_SetSclkSoftMin)); + + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetSclkSoftMax, cz_get_sclk_level(hwmgr, - cz_hwmgr->sclk_dpm.soft_max_clk, + sclk, PPSMC_MSG_SetSclkSoftMax)); + return 0; +} + +static int cz_get_profiling_clk(struct pp_hwmgr *hwmgr, uint32_t *sclk) +{ + struct phm_clock_voltage_dependency_table *table = + hwmgr->dyn_state.vddc_dependency_on_sclk; + int32_t tmp_sclk; + int32_t count; + + tmp_sclk = table->entries[table->count-1].clk * 70 / 100; + + for (count = table->count-1; count >= 0; count--) { + if (tmp_sclk >= table->entries[count].clk) { + tmp_sclk = table->entries[count].clk; + *sclk = tmp_sclk; + break; + } } + if (count < 0) + *sclk = table->entries[0].clk; return 0; } @@ -1310,30 +1353,70 @@ static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr) static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level) { + uint32_t sclk = 0; int ret = 0; + uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | + AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; + + if (level == hwmgr->dpm_level) + return ret; + + if (!(hwmgr->dpm_level & profile_mode_mask)) { + /* enter profile mode, save current level, disable gfx cg*/ + if (level & profile_mode_mask) { + hwmgr->saved_dpm_level = hwmgr->dpm_level; + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_GFX, + AMD_CG_STATE_UNGATE); + } + } else { + /* exit profile mode, restore level, enable gfx cg*/ + if (!(level & profile_mode_mask)) { + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) + level = hwmgr->saved_dpm_level; + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_GFX, + AMD_CG_STATE_GATE); + } + } switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: + case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: ret = cz_phm_force_dpm_highest(hwmgr); if (ret) return ret; + hwmgr->dpm_level = level; break; case AMD_DPM_FORCED_LEVEL_LOW: + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: ret = cz_phm_force_dpm_lowest(hwmgr); if (ret) return ret; + hwmgr->dpm_level = level; break; case AMD_DPM_FORCED_LEVEL_AUTO: ret = cz_phm_unforce_dpm_levels(hwmgr); if (ret) return ret; + hwmgr->dpm_level = level; + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: + ret = cz_get_profiling_clk(hwmgr, &sclk); + if (ret) + return ret; + hwmgr->dpm_level = level; + cz_phm_force_dpm_sclk(hwmgr, sclk); + break; + case AMD_DPM_FORCED_LEVEL_MANUAL: + hwmgr->dpm_level = level; break; + case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: default: break; } - hwmgr->dpm_level = level; - return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index d025653c7823..9547f265a8bb 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -557,9 +557,8 @@ uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, u return vddci_table->entries[i].value; } - PP_ASSERT_WITH_CODE(false, - "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", - return vddci_table->entries[i-1].value); + pr_debug("vddci is larger than max value in vddci_table\n"); + return vddci_table->entries[i-1].value; } int phm_find_boot_level(void *table, @@ -583,26 +582,26 @@ int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t virtual_voltage_id, int32_t *sclk) { - uint8_t entryId; - uint8_t voltageId; + uint8_t entry_id; + uint8_t voltage_id; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL); /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */ - for (entryId = 0; entryId < table_info->vdd_dep_on_sclk->count; entryId++) { - voltageId = table_info->vdd_dep_on_sclk->entries[entryId].vddInd; - if (lookup_table->entries[voltageId].us_vdd == virtual_voltage_id) + for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) { + voltage_id = table_info->vdd_dep_on_sclk->entries[entry_id].vddInd; + if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id) break; } - PP_ASSERT_WITH_CODE(entryId < table_info->vdd_dep_on_sclk->count, - "Can't find requested voltage id in vdd_dep_on_sclk table!", - return -EINVAL; - ); + if (entry_id >= table_info->vdd_dep_on_sclk->count) { + pr_debug("Can't find requested voltage id in vdd_dep_on_sclk table\n"); + return -EINVAL; + } - *sclk = table_info->vdd_dep_on_sclk->entries[entryId].clk; + *sclk = table_info->vdd_dep_on_sclk->entries[entry_id].clk; return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c index cd33eb179db2..c062844b15f3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c @@ -142,7 +142,7 @@ int pp_atomfwctrl_get_voltage_table_v4(struct pp_hwmgr *hwmgr, } } else if (voltage_mode == VOLTAGE_OBJ_SVID2) { voltage_table->psi1_enable = - voltage_object->svid2_voltage_obj.loadline_psi1 & 0x1; + (voltage_object->svid2_voltage_obj.loadline_psi1 & 0x20) >> 5; voltage_table->psi0_enable = voltage_object->svid2_voltage_obj.psi0_enable & 0x1; voltage_table->max_vid_step = diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c index 4c7f430b36eb..edc5fb6412d9 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c @@ -265,6 +265,15 @@ static int rv_tf_set_clock_limit(struct pp_hwmgr *hwmgr, void *input, } } */ + if (((hwmgr->uvd_arbiter.vclk_soft_min / 100) != rv_data->vclk_soft_min) || + ((hwmgr->uvd_arbiter.dclk_soft_min / 100) != rv_data->dclk_soft_min)) { + rv_data->vclk_soft_min = hwmgr->uvd_arbiter.vclk_soft_min / 100; + rv_data->dclk_soft_min = hwmgr->uvd_arbiter.dclk_soft_min / 100; + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetSoftMinVcn, + (rv_data->vclk_soft_min << 16) | rv_data->vclk_soft_min); + } + if((hwmgr->gfx_arbiter.sclk_hard_min != 0) && ((hwmgr->gfx_arbiter.sclk_hard_min / 100) != rv_data->soc_actual_hard_min_freq)) { smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h index afb852295a15..2472b50e54cf 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h @@ -280,6 +280,8 @@ struct rv_hwmgr { uint32_t f_actual_hard_min_freq; uint32_t fabric_actual_soft_min_freq; + uint32_t vclk_soft_min; + uint32_t dclk_soft_min; uint32_t gfx_actual_soft_min_freq; bool vcn_power_gated; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index f01cda93f178..c2743233ba10 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -1962,9 +1962,6 @@ static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr) temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1); break; default: - PP_ASSERT_WITH_CODE(0, - "Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!", - ); break; } cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 01ff5054041b..9d71a259d97d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -2313,7 +2313,7 @@ static int vega10_acg_enable(struct pp_hwmgr *hwmgr) smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_InitializeAcg); smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgBtc); - vega10_read_arg_from_smc(hwmgr->smumgr, &agc_btc_response);; + vega10_read_arg_from_smc(hwmgr->smumgr, &agc_btc_response); if (1 == agc_btc_response) { if (1 == data->acg_loop_state) @@ -2522,6 +2522,9 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) pp_table->DisplayDpmVoltageMode = (uint8_t)(table_info->uc_dcef_dpm_voltage_mode); + data->vddc_voltage_table.psi0_enable = voltage_table.psi0_enable; + data->vddc_voltage_table.psi1_enable = voltage_table.psi1_enable; + if (data->registry_data.ulv_support && table_info->us_ulv_voltage_offset) { result = vega10_populate_ulv_state(hwmgr); @@ -3701,10 +3704,22 @@ static void vega10_apply_dal_minimum_voltage_request( return; } +static int vega10_get_soc_index_for_max_uclk(struct pp_hwmgr *hwmgr) +{ + struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table_on_mclk; + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + + vdd_dep_table_on_mclk = table_info->vdd_dep_on_mclk; + + return vdd_dep_table_on_mclk->entries[NUM_UCLK_DPM_LEVELS - 1].vddInd + 1; +} + static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr) { struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + uint32_t socclk_idx; vega10_apply_dal_minimum_voltage_request(hwmgr); @@ -3725,13 +3740,22 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr) if (!data->registry_data.mclk_dpm_key_disabled) { if (data->smc_state_table.mem_boot_level != data->dpm_table.mem_table.dpm_state.soft_min_level) { + if (data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) { + socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr); PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, - PPSMC_MSG_SetSoftMinUclkByIndex, - data->smc_state_table.mem_boot_level), - "Failed to set soft min mclk index!", - return -EINVAL); - + hwmgr->smumgr, + PPSMC_MSG_SetSoftMinSocclkByIndex, + socclk_idx), + "Failed to set soft min uclk index!", + return -EINVAL); + } else { + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + PPSMC_MSG_SetSoftMinUclkByIndex, + data->smc_state_table.mem_boot_level), + "Failed to set soft min uclk index!", + return -EINVAL); + } data->dpm_table.mem_table.dpm_state.soft_min_level = data->smc_state_table.mem_boot_level; } @@ -4138,7 +4162,7 @@ static int vega10_notify_smc_display_config_after_ps_adjustment( pr_info("Attempt to set Hard Min for DCEFCLK Failed!"); } } else { - pr_info("Cannot find requested DCEFCLK!"); + pr_debug("Cannot find requested DCEFCLK!"); } if (min_clocks.memoryClock != 0) { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c index fbafc849ea71..e7fa67063cdc 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c @@ -543,7 +543,7 @@ static const struct vega10_didt_config_reg SEEDCCtrlForceStallConfig_Vega10[] = * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- */ /* SQ */ - { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0001 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0000 }, { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 }, { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0001 }, @@ -556,7 +556,7 @@ static const struct vega10_didt_config_reg SEEDCCtrlForceStallConfig_Vega10[] = { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 }, /* TD */ - { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_EN_MASK, DIDT_TD_EDC_CTRL__EDC_EN__SHIFT, 0x0001 }, + { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_EN_MASK, DIDT_TD_EDC_CTRL__EDC_EN__SHIFT, 0x0000 }, { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK, DIDT_TD_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 }, { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_TD_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0001 }, @@ -1208,7 +1208,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr) if (0 != result) return result; - vega10_didt_set_mask(hwmgr, true); + vega10_didt_set_mask(hwmgr, false); cgs_enter_safe_mode(hwmgr->device, false); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c index e7ab8eb8a0cf..d44243441d28 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c @@ -321,10 +321,7 @@ int vega10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) { - result = vega10_fan_ctrl_set_static_mode(hwmgr, - FDO_PWM_MODE_STATIC); - if (!result) - result = vega10_fan_ctrl_start_smc_fan_control(hwmgr); + result = vega10_fan_ctrl_start_smc_fan_control(hwmgr); } else result = vega10_fan_ctrl_set_default_mode(hwmgr); @@ -633,7 +630,6 @@ int tf_vega10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) { vega10_fan_ctrl_start_smc_fan_control(hwmgr); - vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); } return 0; diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 47e57bd2c36f..91b0105e8240 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -128,6 +128,8 @@ struct phm_uvd_arbiter { uint32_t dclk; uint32_t vclk_ceiling; uint32_t dclk_ceiling; + uint32_t vclk_soft_min; + uint32_t dclk_soft_min; }; struct phm_vce_arbiter { diff --git a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h index e0e106f1b23a..901c960cfe21 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h @@ -66,7 +66,12 @@ #define PPSMC_MSG_SetMinVddcrSocVoltage 0x22 #define PPSMC_MSG_SetMinVideoFclkFreq 0x23 #define PPSMC_MSG_SetMinDeepSleepDcefclk 0x24 -#define PPSMC_Message_Count 0x25 +#define PPSMC_MSG_ForcePowerDownGfx 0x25 +#define PPSMC_MSG_SetPhyclkVoltageByFreq 0x26 +#define PPSMC_MSG_SetDppclkVoltageByFreq 0x27 +#define PPSMC_MSG_SetSoftMinVcn 0x28 +#define PPSMC_Message_Count 0x29 + typedef uint16_t PPSMC_Result; typedef int PPSMC_Msg; diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c index e3c13aa202b8..289eda54e5aa 100644 --- a/drivers/gpu/drm/arc/arcpgu_drv.c +++ b/drivers/gpu/drm/arc/arcpgu_drv.c @@ -31,7 +31,7 @@ static void arcpgu_fb_output_poll_changed(struct drm_device *dev) drm_fbdev_cma_hotplug_event(arcpgu->fbdev); } -static struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = { +static const struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = { .fb_create = drm_fb_cma_create, .output_poll_changed = arcpgu_fb_output_poll_changed, .atomic_check = drm_atomic_helper_check, diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index 3022b39c00f3..69dab82a3771 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -209,7 +209,6 @@ static struct drm_driver driver = { .gem_free_object_unlocked = ast_gem_free_object, .dumb_create = ast_dumb_create, .dumb_map_offset = ast_dumb_mmap_offset, - .dumb_destroy = drm_gem_dumb_destroy, }; diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c index 9052ebeae8d0..0cd827e11fa2 100644 --- a/drivers/gpu/drm/ast/ast_fb.c +++ b/drivers/gpu/drm/ast/ast_fb.c @@ -266,7 +266,7 @@ static void ast_fbdev_destroy(struct drm_device *dev, drm_fb_helper_unregister_fbi(&afbdev->helper); if (afb->obj) { - drm_gem_object_unreference_unlocked(afb->obj); + drm_gem_object_put_unlocked(afb->obj); afb->obj = NULL; } drm_fb_helper_fini(&afbdev->helper); diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 9a44cdec3bca..dac355812adc 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -387,7 +387,7 @@ static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb); - drm_gem_object_unreference_unlocked(ast_fb->obj); + drm_gem_object_put_unlocked(ast_fb->obj); drm_framebuffer_cleanup(fb); kfree(ast_fb); } @@ -429,13 +429,13 @@ ast_user_framebuffer_create(struct drm_device *dev, ast_fb = kzalloc(sizeof(*ast_fb), GFP_KERNEL); if (!ast_fb) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ERR_PTR(-ENOMEM); } ret = ast_framebuffer_init(dev, ast_fb, mode_cmd, obj); if (ret) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); kfree(ast_fb); return ERR_PTR(ret); } @@ -628,7 +628,7 @@ int ast_dumb_create(struct drm_file *file, return ret; ret = drm_gem_handle_create(file, gobj, &handle); - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); if (ret) return ret; @@ -676,7 +676,7 @@ ast_dumb_mmap_offset(struct drm_file *file, bo = gem_to_ast_bo(obj); *offset = ast_bo_mmap_offset(bo); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return 0; diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 43245229f437..6f3849ec0c1d 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -950,7 +950,7 @@ static void ast_cursor_fini(struct drm_device *dev) { struct ast_private *ast = dev->dev_private; ttm_bo_kunmap(&ast->cache_kmap); - drm_gem_object_unreference_unlocked(ast->cursor_cache); + drm_gem_object_put_unlocked(ast->cursor_cache); } int ast_mode_init(struct drm_device *dev) @@ -1215,10 +1215,10 @@ static int ast_cursor_set(struct drm_crtc *crtc, ast_show_cursor(crtc); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return 0; fail: - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c index a1d28845da5f..7b20318483e4 100644 --- a/drivers/gpu/drm/bochs/bochs_drv.c +++ b/drivers/gpu/drm/bochs/bochs_drv.c @@ -93,7 +93,6 @@ static struct drm_driver bochs_driver = { .gem_free_object_unlocked = bochs_gem_free_object, .dumb_create = bochs_dumb_create, .dumb_map_offset = bochs_dumb_mmap_offset, - .dumb_destroy = drm_gem_dumb_destroy, }; /* ---------------------------------------------------------------------- */ diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 682c090fa3ed..b2431aee7887 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -785,7 +785,7 @@ adv7511_connector_detect(struct drm_connector *connector, bool force) return adv7511_detect(adv, connector); } -static struct drm_connector_funcs adv7511_connector_funcs = { +static const struct drm_connector_funcs adv7511_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .detect = adv7511_connector_detect, .destroy = drm_connector_cleanup, @@ -856,7 +856,7 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge) return ret; } -static struct drm_bridge_funcs adv7511_bridge_funcs = { +static const struct drm_bridge_funcs adv7511_bridge_funcs = { .enable = adv7511_bridge_enable, .disable = adv7511_bridge_disable, .mode_set = adv7511_bridge_mode_set, diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c index 8f2d1379c880..cf3f0caf9c63 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c @@ -517,7 +517,7 @@ static snd_pcm_uframes_t dw_hdmi_pointer(struct snd_pcm_substream *substream) return bytes_to_frames(runtime, dw->buf_offset); } -static struct snd_pcm_ops snd_dw_hdmi_ops = { +static const struct snd_pcm_ops snd_dw_hdmi_ops = { .open = dw_hdmi_open, .close = dw_hdmi_close, .ioctl = snd_pcm_lib_ioctl, diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c index 36f5ccbd1794..63c7a01b7053 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c @@ -811,7 +811,7 @@ static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge) return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge); } -static struct drm_bridge_funcs dw_mipi_dsi_bridge_funcs = { +static const struct drm_bridge_funcs dw_mipi_dsi_bridge_funcs = { .mode_set = dw_mipi_dsi_bridge_mode_set, .enable = dw_mipi_dsi_bridge_enable, .post_disable = dw_mipi_dsi_bridge_post_disable, diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c index 910c300f5c37..69c4e352dd78 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.c +++ b/drivers/gpu/drm/cirrus/cirrus_drv.c @@ -142,7 +142,6 @@ static struct drm_driver driver = { .gem_free_object_unlocked = cirrus_gem_free_object, .dumb_create = cirrus_dumb_create, .dumb_map_offset = cirrus_dumb_mmap_offset, - .dumb_destroy = drm_gem_dumb_destroy, }; static const struct dev_pm_ops cirrus_pm_ops = { diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c index 0f6815f35ad2..32fbfba2c623 100644 --- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c +++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c @@ -251,7 +251,7 @@ static int cirrus_fbdev_destroy(struct drm_device *dev, drm_fb_helper_unregister_fbi(&gfbdev->helper); if (gfb->obj) { - drm_gem_object_unreference_unlocked(gfb->obj); + drm_gem_object_put_unlocked(gfb->obj); gfb->obj = NULL; } diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c index e7fc95f63dca..b5f528543956 100644 --- a/drivers/gpu/drm/cirrus/cirrus_main.c +++ b/drivers/gpu/drm/cirrus/cirrus_main.c @@ -18,7 +18,7 @@ static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb); - drm_gem_object_unreference_unlocked(cirrus_fb->obj); + drm_gem_object_put_unlocked(cirrus_fb->obj); drm_framebuffer_cleanup(fb); kfree(fb); } @@ -67,13 +67,13 @@ cirrus_user_framebuffer_create(struct drm_device *dev, cirrus_fb = kzalloc(sizeof(*cirrus_fb), GFP_KERNEL); if (!cirrus_fb) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ERR_PTR(-ENOMEM); } ret = cirrus_framebuffer_init(dev, cirrus_fb, mode_cmd, obj); if (ret) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); kfree(cirrus_fb); return ERR_PTR(ret); } @@ -261,7 +261,7 @@ int cirrus_dumb_create(struct drm_file *file, return ret; ret = drm_gem_handle_create(file, gobj, &handle); - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); if (ret) return ret; @@ -310,7 +310,7 @@ cirrus_dumb_mmap_offset(struct drm_file *file, bo = gem_to_cirrus_bo(obj); *offset = cirrus_bo_mmap_offset(bo); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return 0; } diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 2ed2d919beae..be38ac7050d4 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -291,7 +291,7 @@ struct drm_minor *drm_minor_acquire(unsigned int minor_id) if (!minor) { return ERR_PTR(-ENODEV); - } else if (drm_device_is_unplugged(minor->dev)) { + } else if (drm_dev_is_unplugged(minor->dev)) { drm_dev_unref(minor->dev); return ERR_PTR(-ENODEV); } @@ -364,26 +364,32 @@ void drm_put_dev(struct drm_device *dev) } EXPORT_SYMBOL(drm_put_dev); -void drm_unplug_dev(struct drm_device *dev) +static void drm_device_set_unplugged(struct drm_device *dev) { - /* for a USB device */ - if (drm_core_check_feature(dev, DRIVER_MODESET)) - drm_modeset_unregister_all(dev); + smp_wmb(); + atomic_set(&dev->unplugged, 1); +} - drm_minor_unregister(dev, DRM_MINOR_PRIMARY); - drm_minor_unregister(dev, DRM_MINOR_RENDER); - drm_minor_unregister(dev, DRM_MINOR_CONTROL); +/** + * drm_dev_unplug - unplug a DRM device + * @dev: DRM device + * + * This unplugs a hotpluggable DRM device, which makes it inaccessible to + * userspace operations. Entry-points can use drm_dev_is_unplugged(). This + * essentially unregisters the device like drm_dev_unregister(), but can be + * called while there are still open users of @dev. + */ +void drm_dev_unplug(struct drm_device *dev) +{ + drm_dev_unregister(dev); mutex_lock(&drm_global_mutex); - drm_device_set_unplugged(dev); - - if (dev->open_count == 0) { - drm_put_dev(dev); - } + if (dev->open_count == 0) + drm_dev_unref(dev); mutex_unlock(&drm_global_mutex); } -EXPORT_SYMBOL(drm_unplug_dev); +EXPORT_SYMBOL(drm_dev_unplug); /* * DRM internal mount @@ -835,6 +841,9 @@ EXPORT_SYMBOL(drm_dev_register); * drm_dev_register() but does not deallocate the device. The caller must call * drm_dev_unref() to drop their final reference. * + * A special form of unregistering for hotpluggable devices is drm_dev_unplug(), + * which can be called while there are still open users of @dev. + * * This should be called first in the device teardown code to make sure * userspace can't access the device instance any more. */ @@ -842,7 +851,8 @@ void drm_dev_unregister(struct drm_device *dev) { struct drm_map_list *r_list, *list_temp; - drm_lastclose(dev); + if (drm_core_check_feature(dev, DRIVER_LEGACY)) + drm_lastclose(dev); dev->registered = false; diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 59b75a974357..b3c6e997ccdb 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -436,7 +436,7 @@ int drm_release(struct inode *inode, struct file *filp) if (!--dev->open_count) { drm_lastclose(dev); - if (drm_device_is_unplugged(dev)) + if (drm_dev_is_unplugged(dev)) drm_put_dev(dev); } mutex_unlock(&drm_global_mutex); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index a8d396bed6a4..ad4e9cfe48a2 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -1001,7 +1001,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) struct drm_vma_offset_node *node; int ret; - if (drm_device_is_unplugged(dev)) + if (drm_dev_is_unplugged(dev)) return -ENODEV; drm_vma_offset_lock_lookup(dev->vma_offset_manager); diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index 275ab872b34f..373e33f22be4 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -264,41 +264,6 @@ int drm_gem_cma_dumb_create(struct drm_file *file_priv, } EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create); -/** - * drm_gem_cma_dumb_map_offset - return the fake mmap offset for a CMA GEM - * object - * @file_priv: DRM file-private structure containing the GEM object - * @drm: DRM device - * @handle: GEM object handle - * @offset: return location for the fake mmap offset - * - * This function look up an object by its handle and returns the fake mmap - * offset associated with it. Drivers using the CMA helpers should set this - * as their &drm_driver.dumb_map_offset callback. - * - * Returns: - * 0 on success or a negative error code on failure. - */ -int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv, - struct drm_device *drm, u32 handle, - u64 *offset) -{ - struct drm_gem_object *gem_obj; - - gem_obj = drm_gem_object_lookup(file_priv, handle); - if (!gem_obj) { - dev_err(drm->dev, "failed to lookup GEM object\n"); - return -EINVAL; - } - - *offset = drm_vma_node_offset_addr(&gem_obj->vma_node); - - drm_gem_object_put_unlocked(gem_obj); - - return 0; -} -EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset); - const struct vm_operations_struct drm_gem_cma_vm_ops = { .open = drm_gem_vm_open, .close = drm_gem_vm_close, @@ -390,7 +355,7 @@ unsigned long drm_gem_cma_get_unmapped_area(struct file *filp, struct drm_device *dev = priv->minor->dev; struct drm_vma_offset_node *node; - if (drm_device_is_unplugged(dev)) + if (drm_dev_is_unplugged(dev)) return -ENODEV; drm_vma_offset_lock_lookup(dev->vma_offset_manager); diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 8bfeb32f8a10..d920b2118a39 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -716,7 +716,7 @@ long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata, struct drm_device *dev = file_priv->minor->dev; int retcode; - if (drm_device_is_unplugged(dev)) + if (drm_dev_is_unplugged(dev)) return -ENODEV; retcode = drm_ioctl_permit(flags, file_priv); @@ -765,7 +765,7 @@ long drm_ioctl(struct file *filp, dev = file_priv->minor->dev; - if (drm_device_is_unplugged(dev)) + if (drm_dev_is_unplugged(dev)) return -ENODEV; is_driver_ioctl = nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END; diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index 5c14beee52ff..85ab1eec73e5 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -126,7 +126,7 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane plane->format_types[j], plane->modifiers[i])) { - mod->formats |= 1 << j; + mod->formats |= 1ULL << j; } } diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index 1170b3209a12..13a59ed2afbc 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -631,7 +631,7 @@ int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma) struct drm_device *dev = priv->minor->dev; int ret; - if (drm_device_is_unplugged(dev)) + if (drm_dev_is_unplugged(dev)) return -ENODEV; mutex_lock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index facc8419f0cd..b1f7299600f0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -145,8 +145,6 @@ static struct drm_driver exynos_drm_driver = { .gem_free_object_unlocked = exynos_drm_gem_free_object, .gem_vm_ops = &exynos_drm_gem_vm_ops, .dumb_create = exynos_drm_gem_dumb_create, - .dumb_map_offset = exynos_drm_gem_dumb_map_offset, - .dumb_destroy = drm_gem_dumb_destroy, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = drm_gem_prime_export, diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index c23479be4850..077de014d610 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -286,8 +286,8 @@ int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data, { struct drm_exynos_gem_map *args = data; - return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle, - &args->offset); + return drm_gem_dumb_map_offset(file_priv, dev, args->handle, + &args->offset); } dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev, @@ -422,32 +422,6 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, return 0; } -int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, - struct drm_device *dev, uint32_t handle, - uint64_t *offset) -{ - struct drm_gem_object *obj; - int ret = 0; - - /* - * get offset of memory allocated for drm framebuffer. - * - this callback would be called by user application - * with DRM_IOCTL_MODE_MAP_DUMB command. - */ - - obj = drm_gem_object_lookup(file_priv, handle); - if (!obj) { - DRM_ERROR("failed to lookup gem object.\n"); - return -EINVAL; - } - - *offset = drm_vma_node_offset_addr(&obj->vma_node); - DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); - - drm_gem_object_unreference_unlocked(obj); - return ret; -} - int exynos_drm_gem_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index 85457255fcd1..e86d1a9518c3 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -110,11 +110,6 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); -/* map memory region for drm framebuffer to user space. */ -int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, - struct drm_device *dev, uint32_t handle, - uint64_t *offset); - /* page fault handler and mmap fault address(virtual) to physical memory. */ int exynos_drm_gem_fault(struct vm_fault *vmf); diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c index 7da061aab729..131239759a75 100644 --- a/drivers/gpu/drm/gma500/gem.c +++ b/drivers/gpu/drm/gma500/gem.c @@ -48,36 +48,6 @@ int psb_gem_get_aperture(struct drm_device *dev, void *data, } /** - * psb_gem_dumb_map_gtt - buffer mapping for dumb interface - * @file: our drm client file - * @dev: drm device - * @handle: GEM handle to the object (from dumb_create) - * - * Do the necessary setup to allow the mapping of the frame buffer - * into user memory. We don't have to do much here at the moment. - */ -int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev, - uint32_t handle, uint64_t *offset) -{ - int ret = 0; - struct drm_gem_object *obj; - - /* GEM does all our handle to object mapping */ - obj = drm_gem_object_lookup(file, handle); - if (obj == NULL) - return -ENOENT; - - /* Make it mmapable */ - ret = drm_gem_create_mmap_offset(obj); - if (ret) - goto out; - *offset = drm_vma_node_offset_addr(&obj->vma_node); -out: - drm_gem_object_unreference_unlocked(obj); - return ret; -} - -/** * psb_gem_create - create a mappable object * @file: the DRM file of the client * @dev: our device diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 747c06b227c5..37a3be71acd9 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -494,8 +494,6 @@ static struct drm_driver driver = { .gem_vm_ops = &psb_gem_vm_ops, .dumb_create = psb_gem_dumb_create, - .dumb_map_offset = psb_gem_dumb_map_gtt, - .dumb_destroy = drm_gem_dumb_destroy, .ioctls = psb_ioctls, .fops = &psb_gem_fops, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index 83667087d6e5..821497dbd3fc 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h @@ -750,8 +750,6 @@ extern int psb_gem_get_aperture(struct drm_device *dev, void *data, struct drm_file *file); extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args); -extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev, - uint32_t handle, uint64_t *offset); extern int psb_gem_fault(struct vm_fault *vmf); /* psb_device.c */ diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c index 9740eed9231a..b92595c477ef 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c @@ -157,7 +157,7 @@ out_unpin_bo: out_unreserve_ttm_bo: ttm_bo_unreserve(&bo->bo); out_unref_gem: - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return ret; } @@ -172,7 +172,7 @@ static void hibmc_fbdev_destroy(struct hibmc_fbdev *fbdev) drm_fb_helper_fini(fbh); if (gfb) - drm_framebuffer_unreference(&gfb->fb); + drm_framebuffer_put(&gfb->fb); } static const struct drm_fb_helper_funcs hibmc_fbdev_helper_funcs = { diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c index ac457c779caa..3518167a7dc4 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c @@ -444,7 +444,7 @@ int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev, } ret = drm_gem_handle_create(file, gobj, &handle); - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); if (ret) { DRM_ERROR("failed to unreference GEM object: %d\n", ret); return ret; @@ -479,7 +479,7 @@ int hibmc_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev, bo = gem_to_hibmc_bo(obj); *offset = hibmc_bo_mmap_offset(bo); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return 0; } @@ -487,7 +487,7 @@ static void hibmc_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct hibmc_framebuffer *hibmc_fb = to_hibmc_framebuffer(fb); - drm_gem_object_unreference_unlocked(hibmc_fb->obj); + drm_gem_object_put_unlocked(hibmc_fb->obj); drm_framebuffer_cleanup(fb); kfree(hibmc_fb); } @@ -543,7 +543,7 @@ hibmc_user_framebuffer_create(struct drm_device *dev, hibmc_fb = hibmc_framebuffer_init(dev, mode_cmd, obj); if (IS_ERR(hibmc_fb)) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ERR_PTR((long)hibmc_fb); } return &hibmc_fb->fb; diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c index 79fcce76f2ad..e27352ca26c4 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c @@ -157,8 +157,6 @@ static struct drm_driver kirin_drm_driver = { .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .dumb_create = kirin_gem_cma_dumb_create, - .dumb_map_offset = drm_gem_cma_dumb_map_offset, - .dumb_destroy = drm_gem_dumb_destroy, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.c b/drivers/gpu/drm/mediatek/mtk_drm_fb.c index d4246c9dceae..0d8d506695f9 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_fb.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_fb.c @@ -58,7 +58,7 @@ static void mtk_drm_fb_destroy(struct drm_framebuffer *fb) drm_framebuffer_cleanup(fb); - drm_gem_object_unreference_unlocked(mtk_fb->gem_obj); + drm_gem_object_put_unlocked(mtk_fb->gem_obj); kfree(mtk_fb); } @@ -160,6 +160,6 @@ struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev, return &mtk_fb->base; unreference: - drm_gem_object_unreference_unlocked(gem); + drm_gem_object_put_unlocked(gem); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c index 8ec963fff8b1..f595ac816b55 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c @@ -122,7 +122,7 @@ int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, goto err_handle_create; /* drop reference from allocate - handle holds it now. */ - drm_gem_object_unreference_unlocked(&mtk_gem->base); + drm_gem_object_put_unlocked(&mtk_gem->base); return 0; diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 5375e6dccdd7..7742c7d81ed8 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -116,8 +116,6 @@ static struct drm_driver meson_driver = { /* GEM Ops */ .dumb_create = drm_gem_cma_dumb_create, - .dumb_destroy = drm_gem_dumb_destroy, - .dumb_map_offset = drm_gem_cma_dumb_map_offset, .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c index 2ac3fcbfea7b..968e20379d54 100644 --- a/drivers/gpu/drm/mgag200/mgag200_cursor.c +++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c @@ -248,7 +248,7 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc, out_unreserve1: mgag200_bo_unreserve(pixels_2); out_unref: - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index 4189160af726..74cdde2ee474 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -102,7 +102,6 @@ static struct drm_driver driver = { .gem_free_object_unlocked = mgag200_gem_free_object, .dumb_create = mgag200_dumb_create, .dumb_map_offset = mgag200_dumb_mmap_offset, - .dumb_destroy = drm_gem_dumb_destroy, }; static struct pci_driver mgag200_pci_driver = { diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c index 9d914ca69996..30726c9fe28c 100644 --- a/drivers/gpu/drm/mgag200/mgag200_fb.c +++ b/drivers/gpu/drm/mgag200/mgag200_fb.c @@ -232,7 +232,7 @@ static int mgag200fb_create(struct drm_fb_helper *helper, err_alloc_fbi: vfree(sysram); err_sysram: - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return ret; } @@ -245,7 +245,7 @@ static int mga_fbdev_destroy(struct drm_device *dev, drm_fb_helper_unregister_fbi(&mfbdev->helper); if (mfb->obj) { - drm_gem_object_unreference_unlocked(mfb->obj); + drm_gem_object_put_unlocked(mfb->obj); mfb->obj = NULL; } drm_fb_helper_fini(&mfbdev->helper); diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c index dce8a3eb5a10..780f983b0294 100644 --- a/drivers/gpu/drm/mgag200/mgag200_main.c +++ b/drivers/gpu/drm/mgag200/mgag200_main.c @@ -18,7 +18,7 @@ static void mga_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct mga_framebuffer *mga_fb = to_mga_framebuffer(fb); - drm_gem_object_unreference_unlocked(mga_fb->obj); + drm_gem_object_put_unlocked(mga_fb->obj); drm_framebuffer_cleanup(fb); kfree(fb); } @@ -59,13 +59,13 @@ mgag200_user_framebuffer_create(struct drm_device *dev, mga_fb = kzalloc(sizeof(*mga_fb), GFP_KERNEL); if (!mga_fb) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ERR_PTR(-ENOMEM); } ret = mgag200_framebuffer_init(dev, mga_fb, mode_cmd, obj); if (ret) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); kfree(mga_fb); return ERR_PTR(ret); } @@ -317,7 +317,7 @@ int mgag200_dumb_create(struct drm_file *file, return ret; ret = drm_gem_handle_create(file, gobj, &handle); - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); if (ret) return ret; @@ -366,6 +366,6 @@ mgag200_dumb_mmap_offset(struct drm_file *file, bo = gem_to_mga_bo(obj); *offset = mgag200_bo_mmap_offset(bo); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return 0; } diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index f49f6ac5585c..b0129e7b29e3 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -832,7 +832,6 @@ static struct drm_driver msm_driver = { .gem_vm_ops = &vm_ops, .dumb_create = msm_gem_dumb_create, .dumb_map_offset = msm_gem_dumb_map_offset, - .dumb_destroy = drm_gem_dumb_destroy, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = drm_gem_prime_export, diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index 93c38eb6d187..7fbad9cb656e 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -337,8 +337,6 @@ static struct drm_driver mxsfb_driver = { .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .dumb_create = drm_gem_cma_dumb_create, - .dumb_map_offset = drm_gem_cma_dumb_map_offset, - .dumb_destroy = drm_gem_dumb_destroy, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = drm_gem_prime_export, diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 2efcfb18024d..f7b4326a4641 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -3897,7 +3897,7 @@ static void nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; - struct drm_crtc_state *new_crtc_state; + struct drm_crtc_state *new_crtc_state, *old_crtc_state; struct drm_crtc *crtc; struct drm_plane_state *new_plane_state; struct drm_plane *plane; @@ -3918,13 +3918,13 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) mutex_lock(&disp->mutex); /* Disable head(s). */ - for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state); struct nv50_head *head = nv50_head(crtc); NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name, asyh->clr.mask, asyh->set.mask); - if (new_crtc_state->active && !asyh->state.active) + if (old_crtc_state->active && !new_crtc_state->active) drm_crtc_vblank_off(crtc); if (asyh->clr.mask) { @@ -4000,7 +4000,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) } /* Update head(s). */ - for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state); struct nv50_head *head = nv50_head(crtc); @@ -4012,10 +4012,10 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) interlock_core = 1; } - if (asyh->state.active) { - if (!new_crtc_state->active) + if (new_crtc_state->active) { + if (!old_crtc_state->active) drm_crtc_vblank_on(crtc); - if (asyh->state.event) + if (new_crtc_state->event) drm_crtc_vblank_get(crtc); } } @@ -4064,13 +4064,14 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) if (new_crtc_state->event) { unsigned long flags; /* Get correct count/ts if racing with vblank irq */ - if (crtc->state->active) + if (new_crtc_state->active) drm_crtc_accurate_vblank_count(crtc); spin_lock_irqsave(&crtc->dev->event_lock, flags); drm_crtc_send_vblank_event(crtc, new_crtc_state->event); spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + new_crtc_state->event = NULL; - if (crtc->state->active) + if (new_crtc_state->active) drm_crtc_vblank_put(crtc); } } diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c index 29653fe5285c..0ea3ca823034 100644 --- a/drivers/gpu/drm/pl111/pl111_drv.c +++ b/drivers/gpu/drm/pl111/pl111_drv.c @@ -72,7 +72,7 @@ #define DRIVER_DESC "DRM module for PL111" -static struct drm_mode_config_funcs mode_config_funcs = { +static const struct drm_mode_config_funcs mode_config_funcs = { .fb_create = drm_fb_cma_create, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 403e135895bf..2445e75cf7ea 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -263,7 +263,6 @@ static struct drm_driver qxl_driver = { .dumb_create = qxl_mode_dumb_create, .dumb_map_offset = qxl_mode_dumb_mmap, - .dumb_destroy = drm_gem_dumb_destroy, #if defined(CONFIG_DEBUG_FS) .debugfs_init = qxl_debugfs_init, #endif diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 5008f3d4cccc..ec63bc5e9de7 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -464,7 +464,7 @@ struct radeon_bo_list { struct radeon_bo *robj; struct ttm_validate_buffer tv; uint64_t gpu_offset; - unsigned prefered_domains; + unsigned preferred_domains; unsigned allowed_domains; uint32_t tiling_flags; }; @@ -2327,7 +2327,7 @@ struct radeon_device { uint8_t *bios; bool is_atom_bios; uint16_t bios_header_start; - struct radeon_bo *stollen_vga_memory; + struct radeon_bo *stolen_vga_memory; /* Register mmio */ resource_size_t rmmio_base; resource_size_t rmmio_size; diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c index 6efbd65c929e..8d3251a10cd4 100644 --- a/drivers/gpu/drm/radeon/radeon_acpi.c +++ b/drivers/gpu/drm/radeon/radeon_acpi.c @@ -351,7 +351,7 @@ out: * handles it. * Returns NOTIFY code */ -int radeon_atif_handler(struct radeon_device *rdev, +static int radeon_atif_handler(struct radeon_device *rdev, struct acpi_bus_event *event) { struct radeon_atif *atif = &rdev->atif; diff --git a/drivers/gpu/drm/radeon/radeon_acpi.h b/drivers/gpu/drm/radeon/radeon_acpi.h index 7af1977c2c68..35202a453e66 100644 --- a/drivers/gpu/drm/radeon/radeon_acpi.h +++ b/drivers/gpu/drm/radeon/radeon_acpi.h @@ -27,9 +27,6 @@ struct radeon_device; struct acpi_bus_event; -int radeon_atif_handler(struct radeon_device *rdev, - struct acpi_bus_event *event); - /* AMD hw uses four ACPI control methods: * 1. ATIF * ARG0: (ACPI_INTEGER) function code diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 00b22af70f5c..1ae31dbc61c6 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -130,7 +130,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) p->rdev->family == CHIP_RS880)) { /* TODO: is this still needed for NI+ ? */ - p->relocs[i].prefered_domains = + p->relocs[i].preferred_domains = RADEON_GEM_DOMAIN_VRAM; p->relocs[i].allowed_domains = @@ -148,14 +148,14 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) return -EINVAL; } - p->relocs[i].prefered_domains = domain; + p->relocs[i].preferred_domains = domain; if (domain == RADEON_GEM_DOMAIN_VRAM) domain |= RADEON_GEM_DOMAIN_GTT; p->relocs[i].allowed_domains = domain; } if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) { - uint32_t domain = p->relocs[i].prefered_domains; + uint32_t domain = p->relocs[i].preferred_domains; if (!(domain & RADEON_GEM_DOMAIN_GTT)) { DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is " "allowed for userptr BOs\n"); @@ -163,7 +163,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) } need_mmap_lock = true; domain = RADEON_GEM_DOMAIN_GTT; - p->relocs[i].prefered_domains = domain; + p->relocs[i].preferred_domains = domain; p->relocs[i].allowed_domains = domain; } @@ -437,7 +437,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo if (bo == NULL) continue; - drm_gem_object_unreference_unlocked(&bo->gem_base); + drm_gem_object_put_unlocked(&bo->gem_base); } } kfree(parser->track); diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 4a4f9533c53b..91952277557e 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c @@ -307,7 +307,7 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc, robj = gem_to_radeon_bo(obj); ret = radeon_bo_reserve(robj, false); if (ret != 0) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } /* Only 27 bit offset for legacy cursor */ @@ -317,7 +317,7 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc, radeon_bo_unreserve(robj); if (ret) { DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -352,7 +352,7 @@ unpin: radeon_bo_unpin(robj); radeon_bo_unreserve(robj); } - drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo); + drm_gem_object_put_unlocked(radeon_crtc->cursor_bo); } radeon_crtc->cursor_bo = obj; diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index ee274c6e374d..ddfe91efa61e 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -267,7 +267,7 @@ static void radeon_unpin_work_func(struct work_struct *__work) } else DRM_ERROR("failed to reserve buffer after flip\n"); - drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); + drm_gem_object_put_unlocked(&work->old_rbo->gem_base); kfree(work); } @@ -504,7 +504,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc, obj = old_radeon_fb->obj; /* take a reference to the old object */ - drm_gem_object_reference(obj); + drm_gem_object_get(obj); work->old_rbo = gem_to_radeon_bo(obj); new_radeon_fb = to_radeon_framebuffer(fb); @@ -603,7 +603,7 @@ pflip_cleanup: radeon_bo_unreserve(new_rbo); cleanup: - drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); + drm_gem_object_put_unlocked(&work->old_rbo->gem_base); dma_fence_put(work->fence); kfree(work); return r; @@ -1288,7 +1288,7 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); - drm_gem_object_unreference_unlocked(radeon_fb->obj); + drm_gem_object_put_unlocked(radeon_fb->obj); drm_framebuffer_cleanup(fb); kfree(radeon_fb); } @@ -1348,14 +1348,14 @@ radeon_user_framebuffer_create(struct drm_device *dev, radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); if (radeon_fb == NULL) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ERR_PTR(-ENOMEM); } ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj); if (ret) { kfree(radeon_fb); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index af6ee7d9b465..fd25361ac681 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -118,7 +118,7 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj) radeon_bo_unpin(rbo); radeon_bo_unreserve(rbo); } - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); } static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, @@ -299,7 +299,7 @@ out: } if (fb && ret) { - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); drm_framebuffer_unregister_private(fb); drm_framebuffer_cleanup(fb); kfree(fb); diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 574bf7e6b118..3386452bd2f0 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -271,7 +271,7 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, } r = drm_gem_handle_create(filp, gobj, &handle); /* drop reference from allocate - handle holds it now */ - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); if (r) { up_read(&rdev->exclusive_lock); r = radeon_gem_handle_lockup(rdev, r); @@ -352,7 +352,7 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, r = drm_gem_handle_create(filp, gobj, &handle); /* drop reference from allocate - handle holds it now */ - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); if (r) goto handle_lockup; @@ -361,7 +361,7 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, return 0; release_object: - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); handle_lockup: up_read(&rdev->exclusive_lock); @@ -395,7 +395,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); up_read(&rdev->exclusive_lock); r = radeon_gem_handle_lockup(robj->rdev, r); return r; @@ -414,11 +414,11 @@ int radeon_mode_dumb_mmap(struct drm_file *filp, } robj = gem_to_radeon_bo(gobj); if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return -EPERM; } *offset_p = radeon_bo_mmap_offset(robj); - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return 0; } @@ -453,7 +453,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type); args->domain = radeon_mem_type_to_domain(cur_placement); - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return r; } @@ -485,7 +485,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, if (rdev->asic->mmio_hdp_flush && radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) robj->rdev->asic->mmio_hdp_flush(rdev); - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); r = radeon_gem_handle_lockup(rdev, r); return r; } @@ -504,7 +504,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, return -ENOENT; robj = gem_to_radeon_bo(gobj); r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return r; } @@ -527,7 +527,7 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); radeon_bo_unreserve(rbo); out: - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return r; } @@ -661,14 +661,14 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data, r = radeon_bo_reserve(rbo, false); if (r) { args->operation = RADEON_VA_RESULT_ERROR; - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return r; } bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); if (!bo_va) { args->operation = RADEON_VA_RESULT_ERROR; radeon_bo_unreserve(rbo); - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return -ENOENT; } @@ -695,7 +695,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data, args->operation = RADEON_VA_RESULT_ERROR; } out: - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return r; } @@ -736,7 +736,7 @@ int radeon_gem_op_ioctl(struct drm_device *dev, void *data, radeon_bo_unreserve(robj); out: - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return r; } @@ -762,7 +762,7 @@ int radeon_mode_dumb_create(struct drm_file *file_priv, r = drm_gem_handle_create(file_priv, gobj, &handle); /* drop reference from allocate - handle holds it now */ - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); if (r) { return r; } diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 8b722297a05c..093594976126 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -445,7 +445,7 @@ void radeon_bo_force_delete(struct radeon_device *rdev) list_del_init(&bo->list); mutex_unlock(&bo->rdev->gem.mutex); /* this should unref the ttm bo */ - drm_gem_object_unreference_unlocked(&bo->gem_base); + drm_gem_object_put_unlocked(&bo->gem_base); } } @@ -546,7 +546,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev, list_for_each_entry(lobj, head, tv.head) { struct radeon_bo *bo = lobj->robj; if (!bo->pin_count) { - u32 domain = lobj->prefered_domains; + u32 domain = lobj->preferred_domains; u32 allowed = lobj->allowed_domains; u32 current_domain = radeon_mem_type_to_domain(bo->tbo.mem.mem_type); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 2804b4a15896..50f60a587648 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -907,17 +907,17 @@ int radeon_ttm_init(struct radeon_device *rdev) r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 0, NULL, - NULL, &rdev->stollen_vga_memory); + NULL, &rdev->stolen_vga_memory); if (r) { return r; } - r = radeon_bo_reserve(rdev->stollen_vga_memory, false); + r = radeon_bo_reserve(rdev->stolen_vga_memory, false); if (r) return r; - r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL); - radeon_bo_unreserve(rdev->stollen_vga_memory); + r = radeon_bo_pin(rdev->stolen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL); + radeon_bo_unreserve(rdev->stolen_vga_memory); if (r) { - radeon_bo_unref(&rdev->stollen_vga_memory); + radeon_bo_unref(&rdev->stolen_vga_memory); return r; } DRM_INFO("radeon: %uM of VRAM memory ready\n", @@ -946,13 +946,13 @@ void radeon_ttm_fini(struct radeon_device *rdev) if (!rdev->mman.initialized) return; radeon_ttm_debugfs_fini(rdev); - if (rdev->stollen_vga_memory) { - r = radeon_bo_reserve(rdev->stollen_vga_memory, false); + if (rdev->stolen_vga_memory) { + r = radeon_bo_reserve(rdev->stolen_vga_memory, false); if (r == 0) { - radeon_bo_unpin(rdev->stollen_vga_memory); - radeon_bo_unreserve(rdev->stollen_vga_memory); + radeon_bo_unpin(rdev->stolen_vga_memory); + radeon_bo_unreserve(rdev->stolen_vga_memory); } - radeon_bo_unref(&rdev->stollen_vga_memory); + radeon_bo_unref(&rdev->stolen_vga_memory); } ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM); ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT); diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 5f68245579a3..5e82b408d522 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -139,7 +139,7 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev, /* add the vm page table to the list */ list[0].robj = vm->page_directory; - list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM; + list[0].preferred_domains = RADEON_GEM_DOMAIN_VRAM; list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM; list[0].tv.bo = &vm->page_directory->tbo; list[0].tv.shared = true; @@ -151,7 +151,7 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev, continue; list[idx].robj = vm->page_tables[i].bo; - list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM; + list[idx].preferred_domains = RADEON_GEM_DOMAIN_VRAM; list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM; list[idx].tv.bo = &list[idx].robj->tbo; list[idx].tv.shared = true; diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c index bd87768dd549..7a251a54e792 100644 --- a/drivers/gpu/drm/rockchip/inno_hdmi.c +++ b/drivers/gpu/drm/rockchip/inno_hdmi.c @@ -592,7 +592,7 @@ static void inno_hdmi_connector_destroy(struct drm_connector *connector) drm_connector_cleanup(connector); } -static struct drm_connector_funcs inno_hdmi_connector_funcs = { +static const struct drm_connector_funcs inno_hdmi_connector_funcs = { .fill_modes = inno_hdmi_probe_single_connector_modes, .detect = inno_hdmi_connector_detect, .destroy = inno_hdmi_connector_destroy, diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c index 8a0f75612d4b..70773041785b 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c @@ -48,7 +48,7 @@ static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb) int i; for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++) - drm_gem_object_unreference_unlocked(rockchip_fb->obj[i]); + drm_gem_object_put_unlocked(rockchip_fb->obj[i]); drm_framebuffer_cleanup(fb); kfree(rockchip_fb); @@ -144,7 +144,7 @@ rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, width * drm_format_plane_cpp(mode_cmd->pixel_format, i); if (obj->size < min_size) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); ret = -EINVAL; goto err_gem_object_unreference; } @@ -161,7 +161,7 @@ rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, err_gem_object_unreference: for (i--; i >= 0; i--) - drm_gem_object_unreference_unlocked(objs[i]); + drm_gem_object_put_unlocked(objs[i]); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c index ce946b9c57a9..724579ebf947 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c @@ -173,7 +173,7 @@ void rockchip_drm_fbdev_fini(struct drm_device *dev) drm_fb_helper_unregister_fbi(helper); if (helper->fb) - drm_framebuffer_unreference(helper->fb); + drm_framebuffer_put(helper->fb); drm_fb_helper_fini(helper); } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index f74333efe4bb..1869c8bb76c8 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -383,7 +383,7 @@ rockchip_gem_create_with_handle(struct drm_file *file_priv, goto err_handle_create; /* drop reference from allocate - handle holds it now. */ - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return rk_obj; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 948719dddc36..bf9ed0e63973 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -1026,7 +1026,7 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc, if (old_plane_state->fb == new_plane_state->fb) continue; - drm_framebuffer_reference(old_plane_state->fb); + drm_framebuffer_get(old_plane_state->fb); drm_flip_work_queue(&vop->fb_unref_work, old_plane_state->fb); set_bit(VOP_PENDING_FB_UNREF, &vop->pending); WARN_ON(drm_crtc_vblank_get(crtc) != 0); @@ -1150,7 +1150,7 @@ static void vop_fb_unref_worker(struct drm_flip_work *work, void *val) struct drm_framebuffer *fb = val; drm_crtc_vblank_put(&vop->crtc); - drm_framebuffer_unreference(fb); + drm_framebuffer_put(fb); } static void vop_handle_vblank(struct vop *vop) diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c index 550bb262943f..42a238bbb899 100644 --- a/drivers/gpu/drm/sun4i/sun4i_rgb.c +++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c @@ -119,7 +119,7 @@ sun4i_rgb_connector_destroy(struct drm_connector *connector) drm_connector_cleanup(connector); } -static struct drm_connector_funcs sun4i_rgb_con_funcs = { +static const struct drm_connector_funcs sun4i_rgb_con_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .destroy = sun4i_rgb_connector_destroy, .reset = drm_atomic_helper_connector_reset, diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c index 7b45ac9383ea..4edf15e299ab 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tv.c +++ b/drivers/gpu/drm/sun4i/sun4i_tv.c @@ -545,7 +545,7 @@ sun4i_tv_comp_connector_destroy(struct drm_connector *connector) drm_connector_cleanup(connector); } -static struct drm_connector_funcs sun4i_tv_comp_connector_funcs = { +static const struct drm_connector_funcs sun4i_tv_comp_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .destroy = sun4i_tv_comp_connector_destroy, .reset = drm_atomic_helper_connector_reset, diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 3ba659a5940d..224ce1dbb1cb 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -1077,8 +1077,6 @@ static struct drm_driver tegra_drm_driver = { .gem_prime_import = tegra_gem_prime_import, .dumb_create = tegra_bo_dumb_create, - .dumb_map_offset = tegra_bo_dumb_map_offset, - .dumb_destroy = drm_gem_dumb_destroy, .ioctls = tegra_drm_ioctls, .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls), diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index 7a39a355678a..c6079affe642 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -423,27 +423,6 @@ int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, return 0; } -int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm, - u32 handle, u64 *offset) -{ - struct drm_gem_object *gem; - struct tegra_bo *bo; - - gem = drm_gem_object_lookup(file, handle); - if (!gem) { - dev_err(drm->dev, "failed to lookup GEM object\n"); - return -EINVAL; - } - - bo = to_tegra_bo(gem); - - *offset = drm_vma_node_offset_addr(&bo->gem.vma_node); - - drm_gem_object_unreference_unlocked(gem); - - return 0; -} - static int tegra_bo_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h index 8b32a6fd586d..8eb9fd24ef0e 100644 --- a/drivers/gpu/drm/tegra/gem.h +++ b/drivers/gpu/drm/tegra/gem.h @@ -67,8 +67,6 @@ struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file, void tegra_bo_free_object(struct drm_gem_object *gem); int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, struct drm_mode_create_dumb *args); -int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm, - u32 handle, u64 *offset); int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma); diff --git a/drivers/gpu/drm/tinydrm/Kconfig b/drivers/gpu/drm/tinydrm/Kconfig index f17c3caceab2..2e790e7dced5 100644 --- a/drivers/gpu/drm/tinydrm/Kconfig +++ b/drivers/gpu/drm/tinydrm/Kconfig @@ -32,3 +32,13 @@ config TINYDRM_REPAPER 2.71" TFT EPD Panel (E2271CS021) If M is selected the module will be called repaper. + +config TINYDRM_ST7586 + tristate "DRM support for Sitronix ST7586 display panels" + depends on DRM_TINYDRM && SPI + select TINYDRM_MIPI_DBI + help + DRM driver for the following Sitronix ST7586 panels: + * LEGO MINDSTORMS EV3 + + If M is selected the module will be called st7586. diff --git a/drivers/gpu/drm/tinydrm/Makefile b/drivers/gpu/drm/tinydrm/Makefile index 95bb4d4fa785..0c184bd1bb59 100644 --- a/drivers/gpu/drm/tinydrm/Makefile +++ b/drivers/gpu/drm/tinydrm/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_TINYDRM_MIPI_DBI) += mipi-dbi.o # Displays obj-$(CONFIG_TINYDRM_MI0283QT) += mi0283qt.o obj-$(CONFIG_TINYDRM_REPAPER) += repaper.o +obj-$(CONFIG_TINYDRM_ST7586) += st7586.o diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c index 75808bb84c9a..bd6cce093a85 100644 --- a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c +++ b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c @@ -185,7 +185,9 @@ EXPORT_SYMBOL(tinydrm_xrgb8888_to_rgb565); /** * tinydrm_xrgb8888_to_gray8 - Convert XRGB8888 to grayscale * @dst: 8-bit grayscale destination buffer + * @vaddr: XRGB8888 source buffer * @fb: DRM framebuffer + * @clip: Clip rectangle area to copy * * Drm doesn't have native monochrome or grayscale support. * Such drivers can announce the commonly supported XR24 format to userspace @@ -195,41 +197,31 @@ EXPORT_SYMBOL(tinydrm_xrgb8888_to_rgb565); * where 1 means foreground color and 0 background color. * * ITU BT.601 is used for the RGB -> luma (brightness) conversion. - * - * Returns: - * Zero on success, negative error code on failure. */ -int tinydrm_xrgb8888_to_gray8(u8 *dst, struct drm_framebuffer *fb) +void tinydrm_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb, + struct drm_clip_rect *clip) { - struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0); - struct dma_buf_attachment *import_attach = cma_obj->base.import_attach; - unsigned int x, y, pitch = fb->pitches[0]; - int ret = 0; + unsigned int len = (clip->x2 - clip->x1) * sizeof(u32); + unsigned int x, y; void *buf; u32 *src; if (WARN_ON(fb->format->format != DRM_FORMAT_XRGB8888)) - return -EINVAL; + return; /* * The cma memory is write-combined so reads are uncached. * Speed up by fetching one line at a time. */ - buf = kmalloc(pitch, GFP_KERNEL); + buf = kmalloc(len, GFP_KERNEL); if (!buf) - return -ENOMEM; - - if (import_attach) { - ret = dma_buf_begin_cpu_access(import_attach->dmabuf, - DMA_FROM_DEVICE); - if (ret) - goto err_free; - } + return; - for (y = 0; y < fb->height; y++) { - src = cma_obj->vaddr + (y * pitch); - memcpy(buf, src, pitch); + for (y = clip->y1; y < clip->y2; y++) { + src = vaddr + (y * fb->pitches[0]); + src += clip->x1; + memcpy(buf, src, len); src = buf; - for (x = 0; x < fb->width; x++) { + for (x = clip->x1; x < clip->x2; x++) { u8 r = (*src & 0x00ff0000) >> 16; u8 g = (*src & 0x0000ff00) >> 8; u8 b = *src & 0x000000ff; @@ -240,13 +232,7 @@ int tinydrm_xrgb8888_to_gray8(u8 *dst, struct drm_framebuffer *fb) } } - if (import_attach) - ret = dma_buf_end_cpu_access(import_attach->dmabuf, - DMA_FROM_DEVICE); -err_free: kfree(buf); - - return ret; } EXPORT_SYMBOL(tinydrm_xrgb8888_to_gray8); diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c index f224b54a30f6..177e9d861001 100644 --- a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c +++ b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c @@ -56,7 +56,7 @@ static const struct drm_connector_helper_funcs tinydrm_connector_hfuncs = { static enum drm_connector_status tinydrm_connector_detect(struct drm_connector *connector, bool force) { - if (drm_device_is_unplugged(connector->dev)) + if (drm_dev_is_unplugged(connector->dev)) return connector_status_disconnected; return connector->status; diff --git a/drivers/gpu/drm/tinydrm/repaper.c b/drivers/gpu/drm/tinydrm/repaper.c index 3343d3f15a90..30dc97b3ff21 100644 --- a/drivers/gpu/drm/tinydrm/repaper.c +++ b/drivers/gpu/drm/tinydrm/repaper.c @@ -18,6 +18,7 @@ */ #include <linux/delay.h> +#include <linux/dma-buf.h> #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/of_device.h> @@ -525,11 +526,20 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb, struct drm_clip_rect *clips, unsigned int num_clips) { + struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0); + struct dma_buf_attachment *import_attach = cma_obj->base.import_attach; struct tinydrm_device *tdev = fb->dev->dev_private; struct repaper_epd *epd = epd_from_tinydrm(tdev); + struct drm_clip_rect clip; u8 *buf = NULL; int ret = 0; + /* repaper can't do partial updates */ + clip.x1 = 0; + clip.x2 = fb->width; + clip.y1 = 0; + clip.y2 = fb->height; + mutex_lock(&tdev->dirty_lock); if (!epd->enabled) @@ -550,9 +560,21 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb, goto out_unlock; } - ret = tinydrm_xrgb8888_to_gray8(buf, fb); - if (ret) - goto out_unlock; + if (import_attach) { + ret = dma_buf_begin_cpu_access(import_attach->dmabuf, + DMA_FROM_DEVICE); + if (ret) + goto out_unlock; + } + + tinydrm_xrgb8888_to_gray8(buf, cma_obj->vaddr, fb, &clip); + + if (import_attach) { + ret = dma_buf_end_cpu_access(import_attach->dmabuf, + DMA_FROM_DEVICE); + if (ret) + goto out_unlock; + } repaper_gray8_to_mono_reversed(buf, fb->width, fb->height); diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tinydrm/st7586.c new file mode 100644 index 000000000000..1b39d3fb17f7 --- /dev/null +++ b/drivers/gpu/drm/tinydrm/st7586.c @@ -0,0 +1,428 @@ +/* + * DRM driver for Sitronix ST7586 panels + * + * Copyright 2017 David Lechner <david@lechnology.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/delay.h> +#include <linux/dma-buf.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/property.h> +#include <linux/spi/spi.h> +#include <video/mipi_display.h> + +#include <drm/tinydrm/mipi-dbi.h> +#include <drm/tinydrm/tinydrm-helpers.h> + +/* controller-specific commands */ +#define ST7586_DISP_MODE_GRAY 0x38 +#define ST7586_DISP_MODE_MONO 0x39 +#define ST7586_ENABLE_DDRAM 0x3a +#define ST7586_SET_DISP_DUTY 0xb0 +#define ST7586_SET_PART_DISP 0xb4 +#define ST7586_SET_NLINE_INV 0xb5 +#define ST7586_SET_VOP 0xc0 +#define ST7586_SET_BIAS_SYSTEM 0xc3 +#define ST7586_SET_BOOST_LEVEL 0xc4 +#define ST7586_SET_VOP_OFFSET 0xc7 +#define ST7586_ENABLE_ANALOG 0xd0 +#define ST7586_AUTO_READ_CTRL 0xd7 +#define ST7586_OTP_RW_CTRL 0xe0 +#define ST7586_OTP_CTRL_OUT 0xe1 +#define ST7586_OTP_READ 0xe3 + +#define ST7586_DISP_CTRL_MX BIT(6) +#define ST7586_DISP_CTRL_MY BIT(7) + +/* + * The ST7586 controller has an unusual pixel format where 2bpp grayscale is + * packed 3 pixels per byte with the first two pixels using 3 bits and the 3rd + * pixel using only 2 bits. + * + * | D7 | D6 | D5 || | || 2bpp | + * | (D4) | (D3) | (D2) || D1 | D0 || GRAY | + * +------+------+------++------+------++------+ + * | 1 | 1 | 1 || 1 | 1 || 0 0 | black + * | 1 | 0 | 0 || 1 | 0 || 0 1 | dark gray + * | 0 | 1 | 0 || 0 | 1 || 1 0 | light gray + * | 0 | 0 | 0 || 0 | 0 || 1 1 | white + */ + +static const u8 st7586_lookup[] = { 0x7, 0x4, 0x2, 0x0 }; + +static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr, + struct drm_framebuffer *fb, + struct drm_clip_rect *clip) +{ + size_t len = (clip->x2 - clip->x1) * (clip->y2 - clip->y1); + unsigned int x, y; + u8 *src, *buf, val; + + buf = kmalloc(len, GFP_KERNEL); + if (!buf) + return; + + tinydrm_xrgb8888_to_gray8(buf, vaddr, fb, clip); + src = buf; + + for (y = clip->y1; y < clip->y2; y++) { + for (x = clip->x1; x < clip->x2; x += 3) { + val = st7586_lookup[*src++ >> 6] << 5; + val |= st7586_lookup[*src++ >> 6] << 2; + val |= st7586_lookup[*src++ >> 6] >> 1; + *dst++ = val; + } + } + + kfree(buf); +} + +static int st7586_buf_copy(void *dst, struct drm_framebuffer *fb, + struct drm_clip_rect *clip) +{ + struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0); + struct dma_buf_attachment *import_attach = cma_obj->base.import_attach; + void *src = cma_obj->vaddr; + int ret = 0; + + if (import_attach) { + ret = dma_buf_begin_cpu_access(import_attach->dmabuf, + DMA_FROM_DEVICE); + if (ret) + return ret; + } + + st7586_xrgb8888_to_gray332(dst, src, fb, clip); + + if (import_attach) + ret = dma_buf_end_cpu_access(import_attach->dmabuf, + DMA_FROM_DEVICE); + + return ret; +} + +static int st7586_fb_dirty(struct drm_framebuffer *fb, + struct drm_file *file_priv, unsigned int flags, + unsigned int color, struct drm_clip_rect *clips, + unsigned int num_clips) +{ + struct tinydrm_device *tdev = fb->dev->dev_private; + struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); + struct drm_clip_rect clip; + int start, end; + int ret = 0; + + mutex_lock(&tdev->dirty_lock); + + if (!mipi->enabled) + goto out_unlock; + + /* fbdev can flush even when we're not interested */ + if (tdev->pipe.plane.fb != fb) + goto out_unlock; + + tinydrm_merge_clips(&clip, clips, num_clips, flags, fb->width, + fb->height); + + /* 3 pixels per byte, so grow clip to nearest multiple of 3 */ + clip.x1 = rounddown(clip.x1, 3); + clip.x2 = roundup(clip.x2, 3); + + DRM_DEBUG("Flushing [FB:%d] x1=%u, x2=%u, y1=%u, y2=%u\n", fb->base.id, + clip.x1, clip.x2, clip.y1, clip.y2); + + ret = st7586_buf_copy(mipi->tx_buf, fb, &clip); + if (ret) + goto out_unlock; + + /* Pixels are packed 3 per byte */ + start = clip.x1 / 3; + end = clip.x2 / 3; + + mipi_dbi_command(mipi, MIPI_DCS_SET_COLUMN_ADDRESS, + (start >> 8) & 0xFF, start & 0xFF, + (end >> 8) & 0xFF, (end - 1) & 0xFF); + mipi_dbi_command(mipi, MIPI_DCS_SET_PAGE_ADDRESS, + (clip.y1 >> 8) & 0xFF, clip.y1 & 0xFF, + (clip.y2 >> 8) & 0xFF, (clip.y2 - 1) & 0xFF); + + ret = mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START, + (u8 *)mipi->tx_buf, + (end - start) * (clip.y2 - clip.y1)); + +out_unlock: + mutex_unlock(&tdev->dirty_lock); + + if (ret) + dev_err_once(fb->dev->dev, "Failed to update display %d\n", + ret); + + return ret; +} + +static const struct drm_framebuffer_funcs st7586_fb_funcs = { + .destroy = drm_fb_cma_destroy, + .create_handle = drm_fb_cma_create_handle, + .dirty = st7586_fb_dirty, +}; + +void st7586_pipe_enable(struct drm_simple_display_pipe *pipe, + struct drm_crtc_state *crtc_state) +{ + struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); + struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); + struct drm_framebuffer *fb = pipe->plane.fb; + struct device *dev = tdev->drm->dev; + int ret; + u8 addr_mode; + + DRM_DEBUG_KMS("\n"); + + mipi_dbi_hw_reset(mipi); + ret = mipi_dbi_command(mipi, ST7586_AUTO_READ_CTRL, 0x9f); + if (ret) { + dev_err(dev, "Error sending command %d\n", ret); + return; + } + + mipi_dbi_command(mipi, ST7586_OTP_RW_CTRL, 0x00); + + msleep(10); + + mipi_dbi_command(mipi, ST7586_OTP_READ); + + msleep(20); + + mipi_dbi_command(mipi, ST7586_OTP_CTRL_OUT); + mipi_dbi_command(mipi, MIPI_DCS_EXIT_SLEEP_MODE); + mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_OFF); + + msleep(50); + + mipi_dbi_command(mipi, ST7586_SET_VOP_OFFSET, 0x00); + mipi_dbi_command(mipi, ST7586_SET_VOP, 0xe3, 0x00); + mipi_dbi_command(mipi, ST7586_SET_BIAS_SYSTEM, 0x02); + mipi_dbi_command(mipi, ST7586_SET_BOOST_LEVEL, 0x04); + mipi_dbi_command(mipi, ST7586_ENABLE_ANALOG, 0x1d); + mipi_dbi_command(mipi, ST7586_SET_NLINE_INV, 0x00); + mipi_dbi_command(mipi, ST7586_DISP_MODE_GRAY); + mipi_dbi_command(mipi, ST7586_ENABLE_DDRAM, 0x02); + + switch (mipi->rotation) { + default: + addr_mode = 0x00; + break; + case 90: + addr_mode = ST7586_DISP_CTRL_MY; + break; + case 180: + addr_mode = ST7586_DISP_CTRL_MX | ST7586_DISP_CTRL_MY; + break; + case 270: + addr_mode = ST7586_DISP_CTRL_MX; + break; + } + mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode); + + mipi_dbi_command(mipi, ST7586_SET_DISP_DUTY, 0x7f); + mipi_dbi_command(mipi, ST7586_SET_PART_DISP, 0xa0); + mipi_dbi_command(mipi, MIPI_DCS_SET_PARTIAL_AREA, 0x00, 0x00, 0x00, 0x77); + mipi_dbi_command(mipi, MIPI_DCS_EXIT_INVERT_MODE); + + msleep(100); + + mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON); + + mipi->enabled = true; + + if (fb) + fb->funcs->dirty(fb, NULL, 0, 0, NULL, 0); +} + +static void st7586_pipe_disable(struct drm_simple_display_pipe *pipe) +{ + struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); + struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); + + DRM_DEBUG_KMS("\n"); + + if (!mipi->enabled) + return; + + mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_OFF); + mipi->enabled = false; +} + +static const u32 st7586_formats[] = { + DRM_FORMAT_XRGB8888, +}; + +static int st7586_init(struct device *dev, struct mipi_dbi *mipi, + const struct drm_simple_display_pipe_funcs *pipe_funcs, + struct drm_driver *driver, const struct drm_display_mode *mode, + unsigned int rotation) +{ + size_t bufsize = (mode->vdisplay + 2) / 3 * mode->hdisplay; + struct tinydrm_device *tdev = &mipi->tinydrm; + int ret; + + mutex_init(&mipi->cmdlock); + + mipi->tx_buf = devm_kmalloc(dev, bufsize, GFP_KERNEL); + if (!mipi->tx_buf) + return -ENOMEM; + + ret = devm_tinydrm_init(dev, tdev, &st7586_fb_funcs, driver); + if (ret) + return ret; + + ret = tinydrm_display_pipe_init(tdev, pipe_funcs, + DRM_MODE_CONNECTOR_VIRTUAL, + st7586_formats, + ARRAY_SIZE(st7586_formats), + mode, rotation); + if (ret) + return ret; + + tdev->drm->mode_config.preferred_depth = 32; + mipi->rotation = rotation; + + drm_mode_config_reset(tdev->drm); + + DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n", + tdev->drm->mode_config.preferred_depth, rotation); + + return 0; +} + +static const struct drm_simple_display_pipe_funcs st7586_pipe_funcs = { + .enable = st7586_pipe_enable, + .disable = st7586_pipe_disable, + .update = tinydrm_display_pipe_update, + .prepare_fb = tinydrm_display_pipe_prepare_fb, +}; + +static const struct drm_display_mode st7586_mode = { + TINYDRM_MODE(178, 128, 37, 27), +}; + +DEFINE_DRM_GEM_CMA_FOPS(st7586_fops); + +static struct drm_driver st7586_driver = { + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | + DRIVER_ATOMIC, + .fops = &st7586_fops, + TINYDRM_GEM_DRIVER_OPS, + .lastclose = tinydrm_lastclose, + .debugfs_init = mipi_dbi_debugfs_init, + .name = "st7586", + .desc = "Sitronix ST7586", + .date = "20170801", + .major = 1, + .minor = 0, +}; + +static const struct of_device_id st7586_of_match[] = { + { .compatible = "lego,ev3-lcd" }, + {}, +}; +MODULE_DEVICE_TABLE(of, st7586_of_match); + +static const struct spi_device_id st7586_id[] = { + { "ev3-lcd", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(spi, st7586_id); + +static int st7586_probe(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + struct tinydrm_device *tdev; + struct mipi_dbi *mipi; + struct gpio_desc *a0; + u32 rotation = 0; + int ret; + + mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL); + if (!mipi) + return -ENOMEM; + + mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(mipi->reset)) { + dev_err(dev, "Failed to get gpio 'reset'\n"); + return PTR_ERR(mipi->reset); + } + + a0 = devm_gpiod_get(dev, "a0", GPIOD_OUT_LOW); + if (IS_ERR(a0)) { + dev_err(dev, "Failed to get gpio 'a0'\n"); + return PTR_ERR(a0); + } + + device_property_read_u32(dev, "rotation", &rotation); + + ret = mipi_dbi_spi_init(spi, mipi, a0); + if (ret) + return ret; + + /* Cannot read from this controller via SPI */ + mipi->read_commands = NULL; + + /* + * we are using 8-bit data, so we are not actually swapping anything, + * but setting mipi->swap_bytes makes mipi_dbi_typec3_command() do the + * right thing and not use 16-bit transfers (which results in swapped + * bytes on little-endian systems and causes out of order data to be + * sent to the display). + */ + mipi->swap_bytes = true; + + ret = st7586_init(&spi->dev, mipi, &st7586_pipe_funcs, &st7586_driver, + &st7586_mode, rotation); + if (ret) + return ret; + + tdev = &mipi->tinydrm; + + ret = devm_tinydrm_register(tdev); + if (ret) + return ret; + + spi_set_drvdata(spi, mipi); + + DRM_DEBUG_DRIVER("Initialized %s:%s @%uMHz on minor %d\n", + tdev->drm->driver->name, dev_name(dev), + spi->max_speed_hz / 1000000, + tdev->drm->primary->index); + + return 0; +} + +static void st7586_shutdown(struct spi_device *spi) +{ + struct mipi_dbi *mipi = spi_get_drvdata(spi); + + tinydrm_shutdown(&mipi->tinydrm); +} + +static struct spi_driver st7586_spi_driver = { + .driver = { + .name = "st7586", + .owner = THIS_MODULE, + .of_match_table = st7586_of_match, + }, + .id_table = st7586_id, + .probe = st7586_probe, + .shutdown = st7586_shutdown, +}; +module_spi_driver(st7586_spi_driver); + +MODULE_DESCRIPTION("Sitronix ST7586 DRM driver"); +MODULE_AUTHOR("David Lechner <david@lechnology.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c index d2f57c52f7db..9f9a49748d17 100644 --- a/drivers/gpu/drm/udl/udl_connector.c +++ b/drivers/gpu/drm/udl/udl_connector.c @@ -96,7 +96,7 @@ static int udl_mode_valid(struct drm_connector *connector, static enum drm_connector_status udl_detect(struct drm_connector *connector, bool force) { - if (drm_device_is_unplugged(connector->dev)) + if (drm_dev_is_unplugged(connector->dev)) return connector_status_disconnected; return connector_status_connected; } diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index 0f02e1acf0ba..bfacb294d5c4 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -54,7 +54,6 @@ static struct drm_driver driver = { .dumb_create = udl_dumb_create, .dumb_map_offset = udl_gem_mmap, - .dumb_destroy = drm_gem_dumb_destroy, .fops = &udl_driver_fops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, @@ -102,7 +101,7 @@ static void udl_usb_disconnect(struct usb_interface *interface) drm_kms_helper_poll_disable(dev); udl_fbdev_unplug(dev); udl_drop_usb(dev); - drm_unplug_dev(dev); + drm_dev_unplug(dev); } /* diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index a5c54dc60def..b7ca90db4e80 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c @@ -198,7 +198,7 @@ static int udl_fb_open(struct fb_info *info, int user) struct udl_device *udl = dev->dev_private; /* If the USB device is gone, we don't accept new opens */ - if (drm_device_is_unplugged(udl->ddev)) + if (drm_dev_is_unplugged(udl->ddev)) return -ENODEV; ufbdev->fb_count++; @@ -309,7 +309,7 @@ static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb) struct udl_framebuffer *ufb = to_udl_fb(fb); if (ufb->obj) - drm_gem_object_unreference_unlocked(&ufb->obj->base); + drm_gem_object_put_unlocked(&ufb->obj->base); drm_framebuffer_cleanup(fb); kfree(ufb); @@ -403,7 +403,7 @@ static int udlfb_create(struct drm_fb_helper *helper, return ret; out_gfree: - drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base); + drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base); out: return ret; } @@ -419,7 +419,7 @@ static void udl_fbdev_destroy(struct drm_device *dev, drm_fb_helper_fini(&ufbdev->helper); drm_framebuffer_unregister_private(&ufbdev->ufb.base); drm_framebuffer_cleanup(&ufbdev->ufb.base); - drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base); + drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base); } int udl_fbdev_init(struct drm_device *dev) diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index db9ceceba30e..dee6bd9a3dd1 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c @@ -52,7 +52,7 @@ udl_gem_create(struct drm_file *file, return ret; } - drm_gem_object_unreference_unlocked(&obj->base); + drm_gem_object_put_unlocked(&obj->base); *handle_p = handle; return 0; } @@ -234,7 +234,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev, *offset = drm_vma_node_offset_addr(&gobj->base.vma_node); out: - drm_gem_object_unreference(&gobj->base); + drm_gem_object_put(&gobj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index b24dd8685590..3afdbf4bc10b 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -366,7 +366,7 @@ int vc4_dumb_create(struct drm_file *file_priv, return PTR_ERR(bo); ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); - drm_gem_object_unreference_unlocked(&bo->base.base); + drm_gem_object_put_unlocked(&bo->base.base); return ret; } @@ -482,7 +482,7 @@ vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags) struct vc4_bo *bo = to_vc4_bo(obj); if (bo->validated_shader) { - DRM_ERROR("Attempting to export shader BO\n"); + DRM_DEBUG("Attempting to export shader BO\n"); return ERR_PTR(-EINVAL); } @@ -503,7 +503,7 @@ int vc4_mmap(struct file *filp, struct vm_area_struct *vma) bo = to_vc4_bo(gem_obj); if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) { - DRM_ERROR("mmaping of shader BOs for writing not allowed.\n"); + DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n"); return -EINVAL; } @@ -528,7 +528,7 @@ int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) struct vc4_bo *bo = to_vc4_bo(obj); if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) { - DRM_ERROR("mmaping of shader BOs for writing not allowed.\n"); + DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n"); return -EINVAL; } @@ -540,7 +540,7 @@ void *vc4_prime_vmap(struct drm_gem_object *obj) struct vc4_bo *bo = to_vc4_bo(obj); if (bo->validated_shader) { - DRM_ERROR("mmaping of shader BOs not allowed.\n"); + DRM_DEBUG("mmaping of shader BOs not allowed.\n"); return ERR_PTR(-EINVAL); } @@ -581,7 +581,7 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data, return PTR_ERR(bo); ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); - drm_gem_object_unreference_unlocked(&bo->base.base); + drm_gem_object_put_unlocked(&bo->base.base); return ret; } @@ -594,14 +594,14 @@ int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, gem_obj = drm_gem_object_lookup(file_priv, args->handle); if (!gem_obj) { - DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); + DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); return -EINVAL; } /* The mmap offset was set up at BO allocation time. */ args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); - drm_gem_object_unreference_unlocked(gem_obj); + drm_gem_object_put_unlocked(gem_obj); return 0; } @@ -657,7 +657,7 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); fail: - drm_gem_object_unreference_unlocked(&bo->base.base); + drm_gem_object_put_unlocked(&bo->base.base); return ret; } @@ -698,13 +698,13 @@ int vc4_set_tiling_ioctl(struct drm_device *dev, void *data, gem_obj = drm_gem_object_lookup(file_priv, args->handle); if (!gem_obj) { - DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); + DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); return -ENOENT; } bo = to_vc4_bo(gem_obj); bo->t_format = t_format; - drm_gem_object_unreference_unlocked(gem_obj); + drm_gem_object_put_unlocked(gem_obj); return 0; } @@ -729,7 +729,7 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data, gem_obj = drm_gem_object_lookup(file_priv, args->handle); if (!gem_obj) { - DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); + DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); return -ENOENT; } bo = to_vc4_bo(gem_obj); @@ -739,7 +739,7 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data, else args->modifier = DRM_FORMAT_MOD_NONE; - drm_gem_object_unreference_unlocked(gem_obj); + drm_gem_object_put_unlocked(gem_obj); return 0; } @@ -830,7 +830,7 @@ int vc4_label_bo_ioctl(struct drm_device *dev, void *data, ret = -ENOMEM; mutex_unlock(&vc4->bo_lock); - drm_gem_object_unreference_unlocked(gem_obj); + drm_gem_object_put_unlocked(gem_obj); return ret; } diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 664a55b45af0..ce1e3b9e14c9 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -763,7 +763,7 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb) } drm_crtc_vblank_put(crtc); - drm_framebuffer_unreference(flip_state->fb); + drm_framebuffer_put(flip_state->fb); kfree(flip_state); up(&vc4->async_modeset); @@ -792,7 +792,7 @@ static int vc4_async_page_flip(struct drm_crtc *crtc, if (!flip_state) return -ENOMEM; - drm_framebuffer_reference(fb); + drm_framebuffer_get(fb); flip_state->fb = fb; flip_state->crtc = crtc; flip_state->event = event; @@ -800,7 +800,7 @@ static int vc4_async_page_flip(struct drm_crtc *crtc, /* Make sure all other async modesetes have landed. */ ret = down_interruptible(&vc4->async_modeset); if (ret) { - drm_framebuffer_unreference(fb); + drm_framebuffer_put(fb); kfree(flip_state); return ret; } diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index e8f0e1790d5e..1c96edcb302b 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -99,6 +99,7 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data, case DRM_VC4_PARAM_SUPPORTS_BRANCHES: case DRM_VC4_PARAM_SUPPORTS_ETC1: case DRM_VC4_PARAM_SUPPORTS_THREADED_FS: + case DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER: args->value = true; break; default: diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c index 629d372633e6..d1e0dc908048 100644 --- a/drivers/gpu/drm/vc4/vc4_dsi.c +++ b/drivers/gpu/drm/vc4/vc4_dsi.c @@ -1636,14 +1636,10 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master, pm_runtime_disable(dev); - drm_bridge_remove(dsi->bridge); vc4_dsi_encoder_destroy(dsi->encoder); mipi_dsi_host_unregister(&dsi->dsi_host); - clk_disable_unprepare(dsi->pll_phy_clock); - clk_disable_unprepare(dsi->escape_clock); - if (dsi->port == 1) vc4->dsi1 = NULL; } diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 209fccd0d3b4..d0c6bfb68c4e 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -55,7 +55,7 @@ vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state) unsigned int i; for (i = 0; i < state->user_state.bo_count; i++) - drm_gem_object_unreference_unlocked(state->bo[i]); + drm_gem_object_put_unlocked(state->bo[i]); kfree(state); } @@ -188,12 +188,12 @@ vc4_save_hang_state(struct drm_device *dev) continue; for (j = 0; j < exec[i]->bo_count; j++) { - drm_gem_object_reference(&exec[i]->bo[j]->base); + drm_gem_object_get(&exec[i]->bo[j]->base); kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base; } list_for_each_entry(bo, &exec[i]->unref_list, unref_head) { - drm_gem_object_reference(&bo->base.base); + drm_gem_object_get(&bo->base.base); kernel_state->bo[j + prev_idx] = &bo->base.base; j++; } @@ -659,7 +659,7 @@ vc4_cl_lookup_bos(struct drm_device *dev, /* See comment on bo_index for why we have to check * this. */ - DRM_ERROR("Rendering requires BOs to validate\n"); + DRM_DEBUG("Rendering requires BOs to validate\n"); return -EINVAL; } @@ -690,13 +690,13 @@ vc4_cl_lookup_bos(struct drm_device *dev, struct drm_gem_object *bo = idr_find(&file_priv->object_idr, handles[i]); if (!bo) { - DRM_ERROR("Failed to look up GEM BO %d: %d\n", + DRM_DEBUG("Failed to look up GEM BO %d: %d\n", i, handles[i]); ret = -EINVAL; spin_unlock(&file_priv->table_lock); goto fail; } - drm_gem_object_reference(bo); + drm_gem_object_get(bo); exec->bo[i] = (struct drm_gem_cma_object *)bo; } spin_unlock(&file_priv->table_lock); @@ -728,7 +728,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec) args->shader_rec_count >= (UINT_MAX / sizeof(struct vc4_shader_state)) || temp_size < exec_size) { - DRM_ERROR("overflow in exec arguments\n"); + DRM_DEBUG("overflow in exec arguments\n"); ret = -EINVAL; goto fail; } @@ -834,7 +834,7 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) if (exec->bo) { for (i = 0; i < exec->bo_count; i++) - drm_gem_object_unreference_unlocked(&exec->bo[i]->base); + drm_gem_object_put_unlocked(&exec->bo[i]->base); kvfree(exec->bo); } @@ -842,7 +842,7 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) struct vc4_bo *bo = list_first_entry(&exec->unref_list, struct vc4_bo, unref_head); list_del(&bo->unref_head); - drm_gem_object_unreference_unlocked(&bo->base.base); + drm_gem_object_put_unlocked(&bo->base.base); } /* Free up the allocation of any bin slots we used. */ @@ -973,7 +973,7 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data, gem_obj = drm_gem_object_lookup(file_priv, args->handle); if (!gem_obj) { - DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); + DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); return -EINVAL; } bo = to_vc4_bo(gem_obj); @@ -981,7 +981,7 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data, ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno, &args->timeout_ns); - drm_gem_object_unreference_unlocked(gem_obj); + drm_gem_object_put_unlocked(gem_obj); return ret; } @@ -1007,8 +1007,11 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data, struct ww_acquire_ctx acquire_ctx; int ret = 0; - if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) { - DRM_ERROR("Unknown flags: 0x%02x\n", args->flags); + if ((args->flags & ~(VC4_SUBMIT_CL_USE_CLEAR_COLOR | + VC4_SUBMIT_CL_FIXED_RCL_ORDER | + VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X | + VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y)) != 0) { + DRM_DEBUG("Unknown flags: 0x%02x\n", args->flags); return -EINVAL; } @@ -1117,6 +1120,4 @@ vc4_gem_destroy(struct drm_device *dev) if (vc4->hang_state) vc4_free_hang_state(dev, vc4->hang_state); - - vc4_bo_cache_destroy(dev); } diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index ff09b8e2f9ee..937da8dd65b8 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -288,6 +288,7 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector) drm_mode_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); drm_edid_to_eld(connector, edid); + kfree(edid); return ret; } diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index aeec6e8703d2..dfe7554268f0 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -169,7 +169,7 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev, gem_obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]); if (!gem_obj) { - DRM_ERROR("Failed to look up GEM BO %d\n", + DRM_DEBUG("Failed to look up GEM BO %d\n", mode_cmd->handles[0]); return ERR_PTR(-ENOENT); } @@ -184,7 +184,7 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev, mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE; } - drm_gem_object_unreference_unlocked(gem_obj); + drm_gem_object_put_unlocked(gem_obj); mode_cmd = &mode_cmd_local; } diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c index 4a8051532f00..273984f71ae2 100644 --- a/drivers/gpu/drm/vc4/vc4_render_cl.c +++ b/drivers/gpu/drm/vc4/vc4_render_cl.c @@ -261,8 +261,17 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec, uint8_t max_y_tile = args->max_y_tile; uint8_t xtiles = max_x_tile - min_x_tile + 1; uint8_t ytiles = max_y_tile - min_y_tile + 1; - uint8_t x, y; + uint8_t xi, yi; uint32_t size, loop_body_size; + bool positive_x = true; + bool positive_y = true; + + if (args->flags & VC4_SUBMIT_CL_FIXED_RCL_ORDER) { + if (!(args->flags & VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X)) + positive_x = false; + if (!(args->flags & VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y)) + positive_y = false; + } size = VC4_PACKET_TILE_RENDERING_MODE_CONFIG_SIZE; loop_body_size = VC4_PACKET_TILE_COORDINATES_SIZE; @@ -354,10 +363,12 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec, rcl_u16(setup, args->height); rcl_u16(setup, args->color_write.bits); - for (y = min_y_tile; y <= max_y_tile; y++) { - for (x = min_x_tile; x <= max_x_tile; x++) { - bool first = (x == min_x_tile && y == min_y_tile); - bool last = (x == max_x_tile && y == max_y_tile); + for (yi = 0; yi < ytiles; yi++) { + int y = positive_y ? min_y_tile + yi : max_y_tile - yi; + for (xi = 0; xi < xtiles; xi++) { + int x = positive_x ? min_x_tile + xi : max_x_tile - xi; + bool first = (xi == 0 && yi == 0); + bool last = (xi == xtiles - 1 && yi == ytiles - 1); emit_tile(exec, setup, x, y, first, last); } @@ -378,14 +389,14 @@ static int vc4_full_res_bounds_check(struct vc4_exec_info *exec, u32 render_tiles_stride = DIV_ROUND_UP(exec->args->width, 32); if (surf->offset > obj->base.size) { - DRM_ERROR("surface offset %d > BO size %zd\n", + DRM_DEBUG("surface offset %d > BO size %zd\n", surf->offset, obj->base.size); return -EINVAL; } if ((obj->base.size - surf->offset) / VC4_TILE_BUFFER_SIZE < render_tiles_stride * args->max_y_tile + args->max_x_tile) { - DRM_ERROR("MSAA tile %d, %d out of bounds " + DRM_DEBUG("MSAA tile %d, %d out of bounds " "(bo size %zd, offset %d).\n", args->max_x_tile, args->max_y_tile, obj->base.size, @@ -401,7 +412,7 @@ static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec, struct drm_vc4_submit_rcl_surface *surf) { if (surf->flags != 0 || surf->bits != 0) { - DRM_ERROR("MSAA surface had nonzero flags/bits\n"); + DRM_DEBUG("MSAA surface had nonzero flags/bits\n"); return -EINVAL; } @@ -415,7 +426,7 @@ static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec, exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj; if (surf->offset & 0xf) { - DRM_ERROR("MSAA write must be 16b aligned.\n"); + DRM_DEBUG("MSAA write must be 16b aligned.\n"); return -EINVAL; } @@ -437,7 +448,7 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec, int ret; if (surf->flags & ~VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) { - DRM_ERROR("Extra flags set\n"); + DRM_DEBUG("Extra flags set\n"); return -EINVAL; } @@ -453,12 +464,12 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec, if (surf->flags & VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) { if (surf == &exec->args->zs_write) { - DRM_ERROR("general zs write may not be a full-res.\n"); + DRM_DEBUG("general zs write may not be a full-res.\n"); return -EINVAL; } if (surf->bits != 0) { - DRM_ERROR("load/store general bits set with " + DRM_DEBUG("load/store general bits set with " "full res load/store.\n"); return -EINVAL; } @@ -473,19 +484,19 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec, if (surf->bits & ~(VC4_LOADSTORE_TILE_BUFFER_TILING_MASK | VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK | VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK)) { - DRM_ERROR("Unknown bits in load/store: 0x%04x\n", + DRM_DEBUG("Unknown bits in load/store: 0x%04x\n", surf->bits); return -EINVAL; } if (tiling > VC4_TILING_FORMAT_LT) { - DRM_ERROR("Bad tiling format\n"); + DRM_DEBUG("Bad tiling format\n"); return -EINVAL; } if (buffer == VC4_LOADSTORE_TILE_BUFFER_ZS) { if (format != 0) { - DRM_ERROR("No color format should be set for ZS\n"); + DRM_DEBUG("No color format should be set for ZS\n"); return -EINVAL; } cpp = 4; @@ -499,16 +510,16 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec, cpp = 4; break; default: - DRM_ERROR("Bad tile buffer format\n"); + DRM_DEBUG("Bad tile buffer format\n"); return -EINVAL; } } else { - DRM_ERROR("Bad load/store buffer %d.\n", buffer); + DRM_DEBUG("Bad load/store buffer %d.\n", buffer); return -EINVAL; } if (surf->offset & 0xf) { - DRM_ERROR("load/store buffer must be 16b aligned.\n"); + DRM_DEBUG("load/store buffer must be 16b aligned.\n"); return -EINVAL; } @@ -533,7 +544,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec, int cpp; if (surf->flags != 0) { - DRM_ERROR("No flags supported on render config.\n"); + DRM_DEBUG("No flags supported on render config.\n"); return -EINVAL; } @@ -541,7 +552,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec, VC4_RENDER_CONFIG_FORMAT_MASK | VC4_RENDER_CONFIG_MS_MODE_4X | VC4_RENDER_CONFIG_DECIMATE_MODE_4X)) { - DRM_ERROR("Unknown bits in render config: 0x%04x\n", + DRM_DEBUG("Unknown bits in render config: 0x%04x\n", surf->bits); return -EINVAL; } @@ -556,7 +567,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec, exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj; if (tiling > VC4_TILING_FORMAT_LT) { - DRM_ERROR("Bad tiling format\n"); + DRM_DEBUG("Bad tiling format\n"); return -EINVAL; } @@ -569,7 +580,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec, cpp = 4; break; default: - DRM_ERROR("Bad tile buffer format\n"); + DRM_DEBUG("Bad tile buffer format\n"); return -EINVAL; } @@ -590,7 +601,7 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec) if (args->min_x_tile > args->max_x_tile || args->min_y_tile > args->max_y_tile) { - DRM_ERROR("Bad render tile set (%d,%d)-(%d,%d)\n", + DRM_DEBUG("Bad render tile set (%d,%d)-(%d,%d)\n", args->min_x_tile, args->min_y_tile, args->max_x_tile, args->max_y_tile); return -EINVAL; @@ -599,7 +610,7 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec) if (has_bin && (args->max_x_tile > exec->bin_tiles_x || args->max_y_tile > exec->bin_tiles_y)) { - DRM_ERROR("Render tiles (%d,%d) outside of bin config " + DRM_DEBUG("Render tiles (%d,%d) outside of bin config " "(%d,%d)\n", args->max_x_tile, args->max_y_tile, exec->bin_tiles_x, exec->bin_tiles_y); @@ -642,7 +653,7 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec) */ if (!setup.color_write && !setup.zs_write && !setup.msaa_color_write && !setup.msaa_zs_write) { - DRM_ERROR("RCL requires color or Z/S write\n"); + DRM_DEBUG("RCL requires color or Z/S write\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c index 814b512c6b9a..2db485abb186 100644 --- a/drivers/gpu/drm/vc4/vc4_validate.c +++ b/drivers/gpu/drm/vc4/vc4_validate.c @@ -109,7 +109,7 @@ vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex) struct vc4_bo *bo; if (hindex >= exec->bo_count) { - DRM_ERROR("BO index %d greater than BO count %d\n", + DRM_DEBUG("BO index %d greater than BO count %d\n", hindex, exec->bo_count); return NULL; } @@ -117,7 +117,7 @@ vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex) bo = to_vc4_bo(&obj->base); if (bo->validated_shader) { - DRM_ERROR("Trying to use shader BO as something other than " + DRM_DEBUG("Trying to use shader BO as something other than " "a shader\n"); return NULL; } @@ -172,7 +172,7 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo, * our math. */ if (width > 4096 || height > 4096) { - DRM_ERROR("Surface dimensions (%d,%d) too large", + DRM_DEBUG("Surface dimensions (%d,%d) too large", width, height); return false; } @@ -191,7 +191,7 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo, aligned_height = round_up(height, utile_h); break; default: - DRM_ERROR("buffer tiling %d unsupported\n", tiling_format); + DRM_DEBUG("buffer tiling %d unsupported\n", tiling_format); return false; } @@ -200,7 +200,7 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo, if (size + offset < size || size + offset > fbo->base.size) { - DRM_ERROR("Overflow in %dx%d (%dx%d) fbo size (%d + %d > %zd)\n", + DRM_DEBUG("Overflow in %dx%d (%dx%d) fbo size (%d + %d > %zd)\n", width, height, aligned_width, aligned_height, size, offset, fbo->base.size); @@ -214,7 +214,7 @@ static int validate_flush(VALIDATE_ARGS) { if (!validate_bin_pos(exec, untrusted, exec->args->bin_cl_size - 1)) { - DRM_ERROR("Bin CL must end with VC4_PACKET_FLUSH\n"); + DRM_DEBUG("Bin CL must end with VC4_PACKET_FLUSH\n"); return -EINVAL; } exec->found_flush = true; @@ -226,13 +226,13 @@ static int validate_start_tile_binning(VALIDATE_ARGS) { if (exec->found_start_tile_binning_packet) { - DRM_ERROR("Duplicate VC4_PACKET_START_TILE_BINNING\n"); + DRM_DEBUG("Duplicate VC4_PACKET_START_TILE_BINNING\n"); return -EINVAL; } exec->found_start_tile_binning_packet = true; if (!exec->found_tile_binning_mode_config_packet) { - DRM_ERROR("missing VC4_PACKET_TILE_BINNING_MODE_CONFIG\n"); + DRM_DEBUG("missing VC4_PACKET_TILE_BINNING_MODE_CONFIG\n"); return -EINVAL; } @@ -243,7 +243,7 @@ static int validate_increment_semaphore(VALIDATE_ARGS) { if (!validate_bin_pos(exec, untrusted, exec->args->bin_cl_size - 2)) { - DRM_ERROR("Bin CL must end with " + DRM_DEBUG("Bin CL must end with " "VC4_PACKET_INCREMENT_SEMAPHORE\n"); return -EINVAL; } @@ -264,7 +264,7 @@ validate_indexed_prim_list(VALIDATE_ARGS) /* Check overflow condition */ if (exec->shader_state_count == 0) { - DRM_ERROR("shader state must precede primitives\n"); + DRM_DEBUG("shader state must precede primitives\n"); return -EINVAL; } shader_state = &exec->shader_state[exec->shader_state_count - 1]; @@ -281,7 +281,7 @@ validate_indexed_prim_list(VALIDATE_ARGS) if (offset > ib->base.size || (ib->base.size - offset) / index_size < length) { - DRM_ERROR("IB access overflow (%d + %d*%d > %zd)\n", + DRM_DEBUG("IB access overflow (%d + %d*%d > %zd)\n", offset, length, index_size, ib->base.size); return -EINVAL; } @@ -301,13 +301,13 @@ validate_gl_array_primitive(VALIDATE_ARGS) /* Check overflow condition */ if (exec->shader_state_count == 0) { - DRM_ERROR("shader state must precede primitives\n"); + DRM_DEBUG("shader state must precede primitives\n"); return -EINVAL; } shader_state = &exec->shader_state[exec->shader_state_count - 1]; if (length + base_index < length) { - DRM_ERROR("primitive vertex count overflow\n"); + DRM_DEBUG("primitive vertex count overflow\n"); return -EINVAL; } max_index = length + base_index - 1; @@ -324,7 +324,7 @@ validate_gl_shader_state(VALIDATE_ARGS) uint32_t i = exec->shader_state_count++; if (i >= exec->shader_state_size) { - DRM_ERROR("More requests for shader states than declared\n"); + DRM_DEBUG("More requests for shader states than declared\n"); return -EINVAL; } @@ -332,7 +332,7 @@ validate_gl_shader_state(VALIDATE_ARGS) exec->shader_state[i].max_index = 0; if (exec->shader_state[i].addr & ~0xf) { - DRM_ERROR("high bits set in GL shader rec reference\n"); + DRM_DEBUG("high bits set in GL shader rec reference\n"); return -EINVAL; } @@ -356,7 +356,7 @@ validate_tile_binning_config(VALIDATE_ARGS) int bin_slot; if (exec->found_tile_binning_mode_config_packet) { - DRM_ERROR("Duplicate VC4_PACKET_TILE_BINNING_MODE_CONFIG\n"); + DRM_DEBUG("Duplicate VC4_PACKET_TILE_BINNING_MODE_CONFIG\n"); return -EINVAL; } exec->found_tile_binning_mode_config_packet = true; @@ -368,14 +368,14 @@ validate_tile_binning_config(VALIDATE_ARGS) if (exec->bin_tiles_x == 0 || exec->bin_tiles_y == 0) { - DRM_ERROR("Tile binning config of %dx%d too small\n", + DRM_DEBUG("Tile binning config of %dx%d too small\n", exec->bin_tiles_x, exec->bin_tiles_y); return -EINVAL; } if (flags & (VC4_BIN_CONFIG_DB_NON_MS | VC4_BIN_CONFIG_TILE_BUFFER_64BIT)) { - DRM_ERROR("unsupported binning config flags 0x%02x\n", flags); + DRM_DEBUG("unsupported binning config flags 0x%02x\n", flags); return -EINVAL; } @@ -493,20 +493,20 @@ vc4_validate_bin_cl(struct drm_device *dev, const struct cmd_info *info; if (cmd >= ARRAY_SIZE(cmd_info)) { - DRM_ERROR("0x%08x: packet %d out of bounds\n", + DRM_DEBUG("0x%08x: packet %d out of bounds\n", src_offset, cmd); return -EINVAL; } info = &cmd_info[cmd]; if (!info->name) { - DRM_ERROR("0x%08x: packet %d invalid\n", + DRM_DEBUG("0x%08x: packet %d invalid\n", src_offset, cmd); return -EINVAL; } if (src_offset + info->len > len) { - DRM_ERROR("0x%08x: packet %d (%s) length 0x%08x " + DRM_DEBUG("0x%08x: packet %d (%s) length 0x%08x " "exceeds bounds (0x%08x)\n", src_offset, cmd, info->name, info->len, src_offset + len); @@ -519,7 +519,7 @@ vc4_validate_bin_cl(struct drm_device *dev, if (info->func && info->func(exec, dst_pkt + 1, src_pkt + 1)) { - DRM_ERROR("0x%08x: packet %d (%s) failed to validate\n", + DRM_DEBUG("0x%08x: packet %d (%s) failed to validate\n", src_offset, cmd, info->name); return -EINVAL; } @@ -537,7 +537,7 @@ vc4_validate_bin_cl(struct drm_device *dev, exec->ct0ea = exec->ct0ca + dst_offset; if (!exec->found_start_tile_binning_packet) { - DRM_ERROR("Bin CL missing VC4_PACKET_START_TILE_BINNING\n"); + DRM_DEBUG("Bin CL missing VC4_PACKET_START_TILE_BINNING\n"); return -EINVAL; } @@ -549,7 +549,7 @@ vc4_validate_bin_cl(struct drm_device *dev, * semaphore increment. */ if (!exec->found_increment_semaphore_packet || !exec->found_flush) { - DRM_ERROR("Bin CL missing VC4_PACKET_INCREMENT_SEMAPHORE + " + DRM_DEBUG("Bin CL missing VC4_PACKET_INCREMENT_SEMAPHORE + " "VC4_PACKET_FLUSH\n"); return -EINVAL; } @@ -588,11 +588,11 @@ reloc_tex(struct vc4_exec_info *exec, uint32_t remaining_size = tex->base.size - p0; if (p0 > tex->base.size - 4) { - DRM_ERROR("UBO offset greater than UBO size\n"); + DRM_DEBUG("UBO offset greater than UBO size\n"); goto fail; } if (p1 > remaining_size - 4) { - DRM_ERROR("UBO clamp would allow reads " + DRM_DEBUG("UBO clamp would allow reads " "outside of UBO\n"); goto fail; } @@ -612,14 +612,14 @@ reloc_tex(struct vc4_exec_info *exec, if (VC4_GET_FIELD(p3, VC4_TEX_P2_PTYPE) == VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE) { if (cube_map_stride) { - DRM_ERROR("Cube map stride set twice\n"); + DRM_DEBUG("Cube map stride set twice\n"); goto fail; } cube_map_stride = p3 & VC4_TEX_P2_CMST_MASK; } if (!cube_map_stride) { - DRM_ERROR("Cube map stride not set\n"); + DRM_DEBUG("Cube map stride not set\n"); goto fail; } } @@ -660,7 +660,7 @@ reloc_tex(struct vc4_exec_info *exec, case VC4_TEXTURE_TYPE_RGBA64: case VC4_TEXTURE_TYPE_YUV422R: default: - DRM_ERROR("Texture format %d unsupported\n", type); + DRM_DEBUG("Texture format %d unsupported\n", type); goto fail; } utile_w = utile_width(cpp); @@ -713,7 +713,7 @@ reloc_tex(struct vc4_exec_info *exec, level_size = aligned_width * cpp * aligned_height; if (offset < level_size) { - DRM_ERROR("Level %d (%dx%d -> %dx%d) size %db " + DRM_DEBUG("Level %d (%dx%d -> %dx%d) size %db " "overflowed buffer bounds (offset %d)\n", i, level_width, level_height, aligned_width, aligned_height, @@ -764,7 +764,7 @@ validate_gl_shader_rec(struct drm_device *dev, nr_relocs = ARRAY_SIZE(shader_reloc_offsets) + nr_attributes; if (nr_relocs * 4 > exec->shader_rec_size) { - DRM_ERROR("overflowed shader recs reading %d handles " + DRM_DEBUG("overflowed shader recs reading %d handles " "from %d bytes left\n", nr_relocs, exec->shader_rec_size); return -EINVAL; @@ -774,7 +774,7 @@ validate_gl_shader_rec(struct drm_device *dev, exec->shader_rec_size -= nr_relocs * 4; if (packet_size > exec->shader_rec_size) { - DRM_ERROR("overflowed shader recs copying %db packet " + DRM_DEBUG("overflowed shader recs copying %db packet " "from %d bytes left\n", packet_size, exec->shader_rec_size); return -EINVAL; @@ -794,7 +794,7 @@ validate_gl_shader_rec(struct drm_device *dev, for (i = 0; i < shader_reloc_count; i++) { if (src_handles[i] > exec->bo_count) { - DRM_ERROR("Shader handle %d too big\n", src_handles[i]); + DRM_DEBUG("Shader handle %d too big\n", src_handles[i]); return -EINVAL; } @@ -810,13 +810,13 @@ validate_gl_shader_rec(struct drm_device *dev, if (((*(uint16_t *)pkt_u & VC4_SHADER_FLAG_FS_SINGLE_THREAD) == 0) != to_vc4_bo(&bo[0]->base)->validated_shader->is_threaded) { - DRM_ERROR("Thread mode of CL and FS do not match\n"); + DRM_DEBUG("Thread mode of CL and FS do not match\n"); return -EINVAL; } if (to_vc4_bo(&bo[1]->base)->validated_shader->is_threaded || to_vc4_bo(&bo[2]->base)->validated_shader->is_threaded) { - DRM_ERROR("cs and vs cannot be threaded\n"); + DRM_DEBUG("cs and vs cannot be threaded\n"); return -EINVAL; } @@ -831,7 +831,7 @@ validate_gl_shader_rec(struct drm_device *dev, *(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset; if (src_offset != 0) { - DRM_ERROR("Shaders must be at offset 0 of " + DRM_DEBUG("Shaders must be at offset 0 of " "the BO.\n"); return -EINVAL; } @@ -842,7 +842,7 @@ validate_gl_shader_rec(struct drm_device *dev, if (validated_shader->uniforms_src_size > exec->uniforms_size) { - DRM_ERROR("Uniforms src buffer overflow\n"); + DRM_DEBUG("Uniforms src buffer overflow\n"); return -EINVAL; } @@ -900,7 +900,7 @@ validate_gl_shader_rec(struct drm_device *dev, if (vbo->base.size < offset || vbo->base.size - offset < attr_size) { - DRM_ERROR("BO offset overflow (%d + %d > %zu)\n", + DRM_DEBUG("BO offset overflow (%d + %d > %zu)\n", offset, attr_size, vbo->base.size); return -EINVAL; } @@ -909,7 +909,7 @@ validate_gl_shader_rec(struct drm_device *dev, max_index = ((vbo->base.size - offset - attr_size) / stride); if (state->max_index > max_index) { - DRM_ERROR("primitives use index %d out of " + DRM_DEBUG("primitives use index %d out of " "supplied %d\n", state->max_index, max_index); return -EINVAL; diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c index 0b2df5c6efb4..d3f15bf60900 100644 --- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c +++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c @@ -200,7 +200,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader, uint32_t clamp_reg, clamp_offset; if (sig == QPU_SIG_SMALL_IMM) { - DRM_ERROR("direct TMU read used small immediate\n"); + DRM_DEBUG("direct TMU read used small immediate\n"); return false; } @@ -209,7 +209,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader, */ if (is_mul || QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) { - DRM_ERROR("direct TMU load wasn't an add\n"); + DRM_DEBUG("direct TMU load wasn't an add\n"); return false; } @@ -220,13 +220,13 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader, */ clamp_reg = raddr_add_a_to_live_reg_index(inst); if (clamp_reg == ~0) { - DRM_ERROR("direct TMU load wasn't clamped\n"); + DRM_DEBUG("direct TMU load wasn't clamped\n"); return false; } clamp_offset = validation_state->live_min_clamp_offsets[clamp_reg]; if (clamp_offset == ~0) { - DRM_ERROR("direct TMU load wasn't clamped\n"); + DRM_DEBUG("direct TMU load wasn't clamped\n"); return false; } @@ -238,7 +238,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader, if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) && !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) { - DRM_ERROR("direct TMU load didn't add to a uniform\n"); + DRM_DEBUG("direct TMU load didn't add to a uniform\n"); return false; } @@ -246,14 +246,14 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader, } else { if (raddr_a == QPU_R_UNIF || (sig != QPU_SIG_SMALL_IMM && raddr_b == QPU_R_UNIF)) { - DRM_ERROR("uniform read in the same instruction as " + DRM_DEBUG("uniform read in the same instruction as " "texture setup.\n"); return false; } } if (validation_state->tmu_write_count[tmu] >= 4) { - DRM_ERROR("TMU%d got too many parameters before dispatch\n", + DRM_DEBUG("TMU%d got too many parameters before dispatch\n", tmu); return false; } @@ -265,7 +265,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader, */ if (!is_direct) { if (validation_state->needs_uniform_address_update) { - DRM_ERROR("Texturing with undefined uniform address\n"); + DRM_DEBUG("Texturing with undefined uniform address\n"); return false; } @@ -336,35 +336,35 @@ validate_uniform_address_write(struct vc4_validated_shader_info *validated_shade case QPU_SIG_LOAD_TMU1: break; default: - DRM_ERROR("uniforms address change must be " + DRM_DEBUG("uniforms address change must be " "normal math\n"); return false; } if (is_mul || QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) { - DRM_ERROR("Uniform address reset must be an ADD.\n"); + DRM_DEBUG("Uniform address reset must be an ADD.\n"); return false; } if (QPU_GET_FIELD(inst, QPU_COND_ADD) != QPU_COND_ALWAYS) { - DRM_ERROR("Uniform address reset must be unconditional.\n"); + DRM_DEBUG("Uniform address reset must be unconditional.\n"); return false; } if (QPU_GET_FIELD(inst, QPU_PACK) != QPU_PACK_A_NOP && !(inst & QPU_PM)) { - DRM_ERROR("No packing allowed on uniforms reset\n"); + DRM_DEBUG("No packing allowed on uniforms reset\n"); return false; } if (add_lri == -1) { - DRM_ERROR("First argument of uniform address write must be " + DRM_DEBUG("First argument of uniform address write must be " "an immediate value.\n"); return false; } if (validation_state->live_immediates[add_lri] != expected_offset) { - DRM_ERROR("Resetting uniforms with offset %db instead of %db\n", + DRM_DEBUG("Resetting uniforms with offset %db instead of %db\n", validation_state->live_immediates[add_lri], expected_offset); return false; @@ -372,7 +372,7 @@ validate_uniform_address_write(struct vc4_validated_shader_info *validated_shade if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) && !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) { - DRM_ERROR("Second argument of uniform address write must be " + DRM_DEBUG("Second argument of uniform address write must be " "a uniform.\n"); return false; } @@ -417,7 +417,7 @@ check_reg_write(struct vc4_validated_shader_info *validated_shader, switch (waddr) { case QPU_W_UNIFORMS_ADDRESS: if (is_b) { - DRM_ERROR("relative uniforms address change " + DRM_DEBUG("relative uniforms address change " "unsupported\n"); return false; } @@ -452,11 +452,11 @@ check_reg_write(struct vc4_validated_shader_info *validated_shader, /* XXX: I haven't thought about these, so don't support them * for now. */ - DRM_ERROR("Unsupported waddr %d\n", waddr); + DRM_DEBUG("Unsupported waddr %d\n", waddr); return false; case QPU_W_VPM_ADDR: - DRM_ERROR("General VPM DMA unsupported\n"); + DRM_DEBUG("General VPM DMA unsupported\n"); return false; case QPU_W_VPM: @@ -559,7 +559,7 @@ check_instruction_writes(struct vc4_validated_shader_info *validated_shader, bool ok; if (is_tmu_write(waddr_add) && is_tmu_write(waddr_mul)) { - DRM_ERROR("ADD and MUL both set up textures\n"); + DRM_DEBUG("ADD and MUL both set up textures\n"); return false; } @@ -588,7 +588,7 @@ check_branch(uint64_t inst, * there's no need for it. */ if (waddr_add != QPU_W_NOP || waddr_mul != QPU_W_NOP) { - DRM_ERROR("branch instruction at %d wrote a register.\n", + DRM_DEBUG("branch instruction at %d wrote a register.\n", validation_state->ip); return false; } @@ -614,7 +614,7 @@ check_instruction_reads(struct vc4_validated_shader_info *validated_shader, validated_shader->uniforms_size += 4; if (validation_state->needs_uniform_address_update) { - DRM_ERROR("Uniform read with undefined uniform " + DRM_DEBUG("Uniform read with undefined uniform " "address\n"); return false; } @@ -660,19 +660,19 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state) continue; if (ip - last_branch < 4) { - DRM_ERROR("Branch at %d during delay slots\n", ip); + DRM_DEBUG("Branch at %d during delay slots\n", ip); return false; } last_branch = ip; if (inst & QPU_BRANCH_REG) { - DRM_ERROR("branching from register relative " + DRM_DEBUG("branching from register relative " "not supported\n"); return false; } if (!(inst & QPU_BRANCH_REL)) { - DRM_ERROR("relative branching required\n"); + DRM_DEBUG("relative branching required\n"); return false; } @@ -682,13 +682,13 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state) * end of the shader object. */ if (branch_imm % sizeof(inst) != 0) { - DRM_ERROR("branch target not aligned\n"); + DRM_DEBUG("branch target not aligned\n"); return false; } branch_target_ip = after_delay_ip + (branch_imm >> 3); if (branch_target_ip >= validation_state->max_ip) { - DRM_ERROR("Branch at %d outside of shader (ip %d/%d)\n", + DRM_DEBUG("Branch at %d outside of shader (ip %d/%d)\n", ip, branch_target_ip, validation_state->max_ip); return false; @@ -699,7 +699,7 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state) * the shader. */ if (after_delay_ip >= validation_state->max_ip) { - DRM_ERROR("Branch at %d continues past shader end " + DRM_DEBUG("Branch at %d continues past shader end " "(%d/%d)\n", ip, after_delay_ip, validation_state->max_ip); return false; @@ -709,7 +709,7 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state) } if (max_branch_target > validation_state->max_ip - 3) { - DRM_ERROR("Branch landed after QPU_SIG_PROG_END"); + DRM_DEBUG("Branch landed after QPU_SIG_PROG_END"); return false; } @@ -750,7 +750,7 @@ vc4_handle_branch_target(struct vc4_shader_validation_state *validation_state) return true; if (texturing_in_progress(validation_state)) { - DRM_ERROR("Branch target landed during TMU setup\n"); + DRM_DEBUG("Branch target landed during TMU setup\n"); return false; } @@ -837,7 +837,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) case QPU_SIG_LAST_THREAD_SWITCH: if (!check_instruction_writes(validated_shader, &validation_state)) { - DRM_ERROR("Bad write at ip %d\n", ip); + DRM_DEBUG("Bad write at ip %d\n", ip); goto fail; } @@ -855,7 +855,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) validated_shader->is_threaded = true; if (ip < last_thread_switch_ip + 3) { - DRM_ERROR("Thread switch too soon after " + DRM_DEBUG("Thread switch too soon after " "last switch at ip %d\n", ip); goto fail; } @@ -867,7 +867,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) case QPU_SIG_LOAD_IMM: if (!check_instruction_writes(validated_shader, &validation_state)) { - DRM_ERROR("Bad LOAD_IMM write at ip %d\n", ip); + DRM_DEBUG("Bad LOAD_IMM write at ip %d\n", ip); goto fail; } break; @@ -878,14 +878,14 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) goto fail; if (ip < last_thread_switch_ip + 3) { - DRM_ERROR("Branch in thread switch at ip %d", + DRM_DEBUG("Branch in thread switch at ip %d", ip); goto fail; } break; default: - DRM_ERROR("Unsupported QPU signal %d at " + DRM_DEBUG("Unsupported QPU signal %d at " "instruction %d\n", sig, ip); goto fail; } @@ -898,7 +898,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) } if (ip == validation_state.max_ip) { - DRM_ERROR("shader failed to terminate before " + DRM_DEBUG("shader failed to terminate before " "shader BO end at %zd\n", shader_obj->base.size); goto fail; @@ -907,7 +907,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) /* Might corrupt other thread */ if (validated_shader->is_threaded && validation_state.all_registers_used) { - DRM_ERROR("Shader uses threading, but uses the upper " + DRM_DEBUG("Shader uses threading, but uses the upper " "half of the registers, too\n"); goto fail; } diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index 12289673f457..2524ff116f00 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -190,7 +190,7 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev, return ERR_CAST(obj); ret = drm_gem_handle_create(file, &obj->base, handle); - drm_gem_object_unreference_unlocked(&obj->base); + drm_gem_object_put_unlocked(&obj->base); if (ret) goto err; @@ -245,7 +245,7 @@ static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev, *offset = drm_vma_node_offset_addr(&obj->vma_node); unref: - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c index 3109c8308eb5..8fd52f211e9d 100644 --- a/drivers/gpu/drm/vgem/vgem_fence.c +++ b/drivers/gpu/drm/vgem/vgem_fence.c @@ -213,7 +213,7 @@ err_fence: dma_fence_put(fence); } err: - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index 63d35c7e416c..49a3d8d5a249 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -122,7 +122,6 @@ static struct drm_driver driver = { .dumb_create = virtio_gpu_mode_dumb_create, .dumb_map_offset = virtio_gpu_mode_dumb_mmap, - .dumb_destroy = virtio_gpu_mode_dumb_destroy, #if defined(CONFIG_DEBUG_FS) .debugfs_init = virtio_gpu_debugfs_init, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 3a66abb8fd50..da2fb585fea4 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -236,9 +236,6 @@ struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev, int virtio_gpu_mode_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); -int virtio_gpu_mode_dumb_destroy(struct drm_file *file_priv, - struct drm_device *dev, - uint32_t handle); int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset_p); diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c index 046e28b69d99..15d18fd0c64b 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fb.c +++ b/drivers/gpu/drm/virtio/virtgpu_fb.c @@ -308,7 +308,7 @@ static int virtio_gpu_fbdev_destroy(struct drm_device *dev, return 0; } -static struct drm_fb_helper_funcs virtio_gpu_fb_helper_funcs = { +static const struct drm_fb_helper_funcs virtio_gpu_fb_helper_funcs = { .fb_probe = virtio_gpufb_create, }; diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c index cc025d8fbe19..72ad7b103448 100644 --- a/drivers/gpu/drm/virtio/virtgpu_gem.c +++ b/drivers/gpu/drm/virtio/virtgpu_gem.c @@ -118,13 +118,6 @@ fail: return ret; } -int virtio_gpu_mode_dumb_destroy(struct drm_file *file_priv, - struct drm_device *dev, - uint32_t handle) -{ - return drm_gem_handle_delete(file_priv, handle); -} - int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset_p) diff --git a/drivers/staging/vboxvideo/vbox_fb.c b/drivers/staging/vboxvideo/vbox_fb.c index bf6635826159..c1572843003a 100644 --- a/drivers/staging/vboxvideo/vbox_fb.c +++ b/drivers/staging/vboxvideo/vbox_fb.c @@ -343,7 +343,7 @@ void vbox_fbdev_fini(struct drm_device *dev) vbox_bo_unpin(bo); vbox_bo_unreserve(bo); } - drm_gem_object_unreference_unlocked(afb->obj); + drm_gem_object_put_unlocked(afb->obj); afb->obj = NULL; } drm_fb_helper_fini(&fbdev->helper); diff --git a/drivers/staging/vboxvideo/vbox_main.c b/drivers/staging/vboxvideo/vbox_main.c index d0c6ec75a3c7..80bd039fa08e 100644 --- a/drivers/staging/vboxvideo/vbox_main.c +++ b/drivers/staging/vboxvideo/vbox_main.c @@ -40,7 +40,7 @@ static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb) struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb); if (vbox_fb->obj) - drm_gem_object_unreference_unlocked(vbox_fb->obj); + drm_gem_object_put_unlocked(vbox_fb->obj); drm_framebuffer_cleanup(fb); kfree(fb); @@ -198,7 +198,7 @@ static struct drm_framebuffer *vbox_user_framebuffer_create( err_free_vbox_fb: kfree(vbox_fb); err_unref_obj: - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ERR_PTR(ret); } @@ -472,7 +472,7 @@ int vbox_dumb_create(struct drm_file *file, return ret; ret = drm_gem_handle_create(file, gobj, &handle); - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); if (ret) return ret; @@ -525,7 +525,7 @@ vbox_dumb_mmap_offset(struct drm_file *file, bo = gem_to_vbox_bo(obj); *offset = vbox_bo_mmap_offset(bo); - drm_gem_object_unreference(obj); + drm_gem_object_put(obj); ret = 0; out_unlock: diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/staging/vboxvideo/vbox_mode.c index 996da1c79158..e5b6383984e7 100644 --- a/drivers/staging/vboxvideo/vbox_mode.c +++ b/drivers/staging/vboxvideo/vbox_mode.c @@ -812,7 +812,7 @@ out_unmap_bo: out_unreserve_bo: vbox_bo_unreserve(bo); out_unref_obj: - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } |