diff options
author | Alex Deucher | 2019-06-25 15:42:25 +0200 |
---|---|---|
committer | Alex Deucher | 2019-06-25 15:42:25 +0200 |
commit | d7929c1e13e3788e7cb741d75b5baec5e53eff21 (patch) | |
tree | cf513071edfc4499b4e025e4846244c9d0e4c6bd /drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |
parent | drm/amdgpu: disable gfxoff on navi10 (diff) | |
parent | drm/edid: use for_each_displayid_db where applicable (diff) | |
download | kernel-qcow2-linux-d7929c1e13e3788e7cb741d75b5baec5e53eff21.tar.gz kernel-qcow2-linux-d7929c1e13e3788e7cb741d75b5baec5e53eff21.tar.xz kernel-qcow2-linux-d7929c1e13e3788e7cb741d75b5baec5e53eff21.zip |
Merge branch 'drm-next' into drm-next-5.3
Backmerge drm-next and fix up conflicts due to drmP.h removal.
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_resource.c')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 245 |
1 files changed, 221 insertions, 24 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 1d38a8b2f2ec..d70ee0df5c13 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -34,6 +34,51 @@ #define VMW_RES_EVICT_ERR_COUNT 10 +/** + * vmw_resource_mob_attach - Mark a resource as attached to its backing mob + * @res: The resource + */ +void vmw_resource_mob_attach(struct vmw_resource *res) +{ + struct vmw_buffer_object *backup = res->backup; + struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL; + + lockdep_assert_held(&backup->base.resv->lock.base); + res->used_prio = (res->res_dirty) ? res->func->dirty_prio : + res->func->prio; + + while (*new) { + struct vmw_resource *this = + container_of(*new, struct vmw_resource, mob_node); + + parent = *new; + new = (res->backup_offset < this->backup_offset) ? + &((*new)->rb_left) : &((*new)->rb_right); + } + + rb_link_node(&res->mob_node, parent, new); + rb_insert_color(&res->mob_node, &backup->res_tree); + + vmw_bo_prio_add(backup, res->used_prio); +} + +/** + * vmw_resource_mob_detach - Mark a resource as detached from its backing mob + * @res: The resource + */ +void vmw_resource_mob_detach(struct vmw_resource *res) +{ + struct vmw_buffer_object *backup = res->backup; + + lockdep_assert_held(&backup->base.resv->lock.base); + if (vmw_resource_mob_attached(res)) { + rb_erase(&res->mob_node, &backup->res_tree); + RB_CLEAR_NODE(&res->mob_node); + vmw_bo_prio_del(backup, res->used_prio); + } +} + + struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) { kref_get(&res->kref); @@ -80,7 +125,7 @@ static void vmw_resource_release(struct kref *kref) struct ttm_buffer_object *bo = &res->backup->base; ttm_bo_reserve(bo, false, false, NULL); - if (!list_empty(&res->mob_head) && + if (vmw_resource_mob_attached(res) && res->func->unbind != NULL) { struct ttm_validate_buffer val_buf; @@ -89,7 +134,11 @@ static void vmw_resource_release(struct kref *kref) res->func->unbind(res, false, &val_buf); } res->backup_dirty = false; - list_del_init(&res->mob_head); + vmw_resource_mob_detach(res); + if (res->dirty) + res->func->dirty_free(res); + if (res->coherent) + vmw_bo_dirty_release(res->backup); ttm_bo_unreserve(bo); vmw_bo_unreference(&res->backup); } @@ -171,14 +220,17 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res, res->res_free = res_free; res->dev_priv = dev_priv; res->func = func; + RB_CLEAR_NODE(&res->mob_node); INIT_LIST_HEAD(&res->lru_head); - INIT_LIST_HEAD(&res->mob_head); INIT_LIST_HEAD(&res->binding_head); res->id = -1; res->backup = NULL; res->backup_offset = 0; res->backup_dirty = false; res->res_dirty = false; + res->coherent = false; + res->used_prio = 3; + res->dirty = NULL; if (delay_id) return 0; else @@ -343,7 +395,8 @@ out_no_bo: * should be retried once resources have been freed up. */ static int vmw_resource_do_validate(struct vmw_resource *res, - struct ttm_validate_buffer *val_buf) + struct ttm_validate_buffer *val_buf, + bool dirtying) { int ret = 0; const struct vmw_res_func *func = res->func; @@ -355,14 +408,47 @@ static int vmw_resource_do_validate(struct vmw_resource *res, } if (func->bind && - ((func->needs_backup && list_empty(&res->mob_head) && + ((func->needs_backup && !vmw_resource_mob_attached(res) && val_buf->bo != NULL) || (!func->needs_backup && val_buf->bo != NULL))) { ret = func->bind(res, val_buf); if (unlikely(ret != 0)) goto out_bind_failed; if (func->needs_backup) - list_add_tail(&res->mob_head, &res->backup->res_list); + vmw_resource_mob_attach(res); + } + + /* + * Handle the case where the backup mob is marked coherent but + * the resource isn't. + */ + if (func->dirty_alloc && vmw_resource_mob_attached(res) && + !res->coherent) { + if (res->backup->dirty && !res->dirty) { + ret = func->dirty_alloc(res); + if (ret) + return ret; + } else if (!res->backup->dirty && res->dirty) { + func->dirty_free(res); + } + } + + /* + * Transfer the dirty regions to the resource and update + * the resource. + */ + if (res->dirty) { + if (dirtying && !res->res_dirty) { + pgoff_t start = res->backup_offset >> PAGE_SHIFT; + pgoff_t end = __KERNEL_DIV_ROUND_UP + (res->backup_offset + res->backup_size, + PAGE_SIZE); + + vmw_bo_dirty_unmap(res->backup, start, end); + } + + vmw_bo_dirty_transfer_to_res(res); + return func->dirty_sync(res); } return 0; @@ -402,19 +488,29 @@ void vmw_resource_unreserve(struct vmw_resource *res, if (switch_backup && new_backup != res->backup) { if (res->backup) { - lockdep_assert_held(&res->backup->base.resv->lock.base); - list_del_init(&res->mob_head); + vmw_resource_mob_detach(res); + if (res->coherent) + vmw_bo_dirty_release(res->backup); vmw_bo_unreference(&res->backup); } if (new_backup) { res->backup = vmw_bo_reference(new_backup); - lockdep_assert_held(&new_backup->base.resv->lock.base); - list_add_tail(&res->mob_head, &new_backup->res_list); + + /* + * The validation code should already have added a + * dirty tracker here. + */ + WARN_ON(res->coherent && !new_backup->dirty); + + vmw_resource_mob_attach(res); } else { res->backup = NULL; } + } else if (switch_backup && res->coherent) { + vmw_bo_dirty_release(res->backup); } + if (switch_backup) res->backup_offset = new_backup_offset; @@ -469,7 +565,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket, if (unlikely(ret != 0)) goto out_no_reserve; - if (res->func->needs_backup && list_empty(&res->mob_head)) + if (res->func->needs_backup && !vmw_resource_mob_attached(res)) return 0; backup_dirty = res->backup_dirty; @@ -574,11 +670,11 @@ static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket, return ret; if (unlikely(func->unbind != NULL && - (!func->needs_backup || !list_empty(&res->mob_head)))) { + (!func->needs_backup || vmw_resource_mob_attached(res)))) { ret = func->unbind(res, res->res_dirty, &val_buf); if (unlikely(ret != 0)) goto out_no_unbind; - list_del_init(&res->mob_head); + vmw_resource_mob_detach(res); } ret = func->destroy(res); res->backup_dirty = true; @@ -595,6 +691,7 @@ out_no_unbind: * to the device. * @res: The resource to make visible to the device. * @intr: Perform waits interruptible if possible. + * @dirtying: Pending GPU operation will dirty the resource * * On succesful return, any backup DMA buffer pointed to by @res->backup will * be reserved and validated. @@ -604,7 +701,8 @@ out_no_unbind: * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code * on failure. */ -int vmw_resource_validate(struct vmw_resource *res, bool intr) +int vmw_resource_validate(struct vmw_resource *res, bool intr, + bool dirtying) { int ret; struct vmw_resource *evict_res; @@ -621,7 +719,7 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr) if (res->backup) val_buf.bo = &res->backup->base; do { - ret = vmw_resource_do_validate(res, &val_buf); + ret = vmw_resource_do_validate(res, &val_buf, dirtying); if (likely(ret != -EBUSY)) break; @@ -660,7 +758,7 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr) if (unlikely(ret != 0)) goto out_no_validate; else if (!res->func->needs_backup && res->backup) { - list_del_init(&res->mob_head); + WARN_ON_ONCE(vmw_resource_mob_attached(res)); vmw_bo_unreference(&res->backup); } @@ -684,22 +782,23 @@ out_no_validate: */ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo) { - - struct vmw_resource *res, *next; struct ttm_validate_buffer val_buf = { .bo = &vbo->base, .num_shared = 0 }; lockdep_assert_held(&vbo->base.resv->lock.base); - list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) { - if (!res->func->unbind) - continue; + while (!RB_EMPTY_ROOT(&vbo->res_tree)) { + struct rb_node *node = vbo->res_tree.rb_node; + struct vmw_resource *res = + container_of(node, struct vmw_resource, mob_node); + + if (!WARN_ON_ONCE(!res->func->unbind)) + (void) res->func->unbind(res, res->res_dirty, &val_buf); - (void) res->func->unbind(res, res->res_dirty, &val_buf); res->backup_dirty = true; res->res_dirty = false; - list_del_init(&res->mob_head); + vmw_resource_mob_detach(res); } (void) ttm_bo_wait(&vbo->base, false, false); @@ -920,7 +1019,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible) /* Do we really need to pin the MOB as well? */ vmw_bo_pin_reserved(vbo, true); } - ret = vmw_resource_validate(res, interruptible); + ret = vmw_resource_validate(res, interruptible, true); if (vbo) ttm_bo_unreserve(&vbo->base); if (ret) @@ -980,3 +1079,101 @@ enum vmw_res_type vmw_res_type(const struct vmw_resource *res) { return res->func->res_type; } + +/** + * vmw_resource_update_dirty - Update a resource's dirty tracker with a + * sequential range of touched backing store memory. + * @res: The resource. + * @start: The first page touched. + * @end: The last page touched + 1. + */ +void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start, + pgoff_t end) +{ + if (res->dirty) + res->func->dirty_range_add(res, start << PAGE_SHIFT, + end << PAGE_SHIFT); +} + +/** + * vmw_resources_clean - Clean resources intersecting a mob range + * @vbo: The mob buffer object + * @start: The mob page offset starting the range + * @end: The mob page offset ending the range + * @num_prefault: Returns how many pages including the first have been + * cleaned and are ok to prefault + */ +int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start, + pgoff_t end, pgoff_t *num_prefault) +{ + struct rb_node *cur = vbo->res_tree.rb_node; + struct vmw_resource *found = NULL; + unsigned long res_start = start << PAGE_SHIFT; + unsigned long res_end = end << PAGE_SHIFT; + unsigned long last_cleaned = 0; + + /* + * Find the resource with lowest backup_offset that intersects the + * range. + */ + while (cur) { + struct vmw_resource *cur_res = + container_of(cur, struct vmw_resource, mob_node); + + if (cur_res->backup_offset >= res_end) { + cur = cur->rb_left; + } else if (cur_res->backup_offset + cur_res->backup_size <= + res_start) { + cur = cur->rb_right; + } else { + found = cur_res; + cur = cur->rb_left; + /* Continue to look for resources with lower offsets */ + } + } + + /* + * In order of increasing backup_offset, clean dirty resorces + * intersecting the range. + */ + while (found) { + if (found->res_dirty) { + int ret; + + if (!found->func->clean) + return -EINVAL; + + ret = found->func->clean(found); + if (ret) + return ret; + + found->res_dirty = false; + } + last_cleaned = found->backup_offset + found->backup_size; + cur = rb_next(&found->mob_node); + if (!cur) + break; + + found = container_of(cur, struct vmw_resource, mob_node); + if (found->backup_offset >= res_end) + break; + } + + /* + * Set number of pages allowed prefaulting and fence the buffer object + */ + *num_prefault = 1; + if (last_cleaned > res_start) { + struct ttm_buffer_object *bo = &vbo->base; + + *num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start, + PAGE_SIZE); + vmw_bo_fence_single(bo, NULL); + if (bo->moving) + dma_fence_put(bo->moving); + bo->moving = dma_fence_get + (reservation_object_get_excl(bo->resv)); + } + + return 0; +} |