From 4251fa5fc3bbe3e1df99f60a738d367373ee335f Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 4 Mar 2019 19:37:40 +0100 Subject: drm/vmwgfx: Assign eviction priorities to resources TTM provides a means to assign eviction priorities to buffer object. This means that all buffer objects with a lower priority will be evicted first on memory pressure. Use this to make sure surfaces and in particular non-dirty surfaces are evicted first. Evicting in particular shaders, cotables and contexts imply a significant performance hit on vmwgfx, so make sure these resources are evicted last. Some buffer objects are sub-allocated in user-space which means we can have many resources attached to a single buffer object or resource. In that case the buffer object is given the highest priority of the attached resources. Signed-off-by: Thomas Hellstrom Reviewed-by: Deepak Rawat Reviewed-by: Emil Velikov --- drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_surface.c') diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 219471903bc1..c40d44f4d9af 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -112,6 +112,8 @@ static const struct vmw_res_func vmw_legacy_surface_func = { .res_type = vmw_res_surface, .needs_backup = false, .may_evict = true, + .prio = 1, + .dirty_prio = 1, .type_name = "legacy surfaces", .backup_placement = &vmw_srf_placement, .create = &vmw_legacy_srf_create, @@ -124,6 +126,8 @@ static const struct vmw_res_func vmw_gb_surface_func = { .res_type = vmw_res_surface, .needs_backup = true, .may_evict = true, + .prio = 1, + .dirty_prio = 2, .type_name = "guest backed surfaces", .backup_placement = &vmw_mob_placement, .create = vmw_gb_surface_create, -- cgit v1.2.3-55-g7522 From 4ba397671237784a212378c271f700e99c66cf39 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 19 Mar 2019 13:27:50 +0100 Subject: drm/vmwgfx: Add surface dirty-tracking callbacks Add the callbacks necessary to implement emulated coherent memory for surfaces. Add a flag to the gb_surface_create ioctl to indicate that surface memory should be coherent. Also bump the drm minor version to signal the availability of coherent surfaces. Signed-off-by: Thomas Hellstrom Reviewed-by: Deepak Rawat --- .../drm/vmwgfx/device_include/svga3d_surfacedefs.h | 233 +++++++++++- drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 4 +- drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 395 ++++++++++++++++++++- include/uapi/drm/vmwgfx_drm.h | 4 +- 4 files changed, 629 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_surface.c') diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h index f2bfd3d80598..61414f105c67 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h @@ -1280,7 +1280,6 @@ svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format, return offset; } - static inline u32 svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format, surf_size_struct baseLevelSize, @@ -1375,4 +1374,236 @@ svga3dsurface_is_screen_target_format(SVGA3dSurfaceFormat format) return svga3dsurface_is_dx_screen_target_format(format); } +/** + * struct svga3dsurface_mip - Mimpmap level information + * @bytes: Bytes required in the backing store of this mipmap level. + * @img_stride: Byte stride per image. + * @row_stride: Byte stride per block row. + * @size: The size of the mipmap. + */ +struct svga3dsurface_mip { + size_t bytes; + size_t img_stride; + size_t row_stride; + struct drm_vmw_size size; + +}; + +/** + * struct svga3dsurface_cache - Cached surface information + * @desc: Pointer to the surface descriptor + * @mip: Array of mipmap level information. Valid size is @num_mip_levels. + * @mip_chain_bytes: Bytes required in the backing store for the whole chain + * of mip levels. + * @sheet_bytes: Bytes required in the backing store for a sheet + * representing a single sample. + * @num_mip_levels: Valid size of the @mip array. Number of mipmap levels in + * a chain. + * @num_layers: Number of slices in an array texture or number of faces in + * a cubemap texture. + */ +struct svga3dsurface_cache { + const struct svga3d_surface_desc *desc; + struct svga3dsurface_mip mip[DRM_VMW_MAX_MIP_LEVELS]; + size_t mip_chain_bytes; + size_t sheet_bytes; + u32 num_mip_levels; + u32 num_layers; +}; + +/** + * struct svga3dsurface_loc - Surface location + * @sub_resource: Surface subresource. Defined as layer * num_mip_levels + + * mip_level. + * @x: X coordinate. + * @y: Y coordinate. + * @z: Z coordinate. + */ +struct svga3dsurface_loc { + u32 sub_resource; + u32 x, y, z; +}; + +/** + * svga3dsurface_subres - Compute the subresource from layer and mipmap. + * @cache: Surface layout data. + * @mip_level: The mipmap level. + * @layer: The surface layer (face or array slice). + * + * Return: The subresource. + */ +static inline u32 svga3dsurface_subres(const struct svga3dsurface_cache *cache, + u32 mip_level, u32 layer) +{ + return cache->num_mip_levels * layer + mip_level; +} + +/** + * svga3dsurface_setup_cache - Build a surface cache entry + * @size: The surface base level dimensions. + * @format: The surface format. + * @num_mip_levels: Number of mipmap levels. + * @num_layers: Number of layers. + * @cache: Pointer to a struct svga3dsurface_cach object to be filled in. + * + * Return: Zero on success, -EINVAL on invalid surface layout. + */ +static inline int svga3dsurface_setup_cache(const struct drm_vmw_size *size, + SVGA3dSurfaceFormat format, + u32 num_mip_levels, + u32 num_layers, + u32 num_samples, + struct svga3dsurface_cache *cache) +{ + const struct svga3d_surface_desc *desc; + u32 i; + + memset(cache, 0, sizeof(*cache)); + cache->desc = desc = svga3dsurface_get_desc(format); + cache->num_mip_levels = num_mip_levels; + cache->num_layers = num_layers; + for (i = 0; i < cache->num_mip_levels; i++) { + struct svga3dsurface_mip *mip = &cache->mip[i]; + + mip->size = svga3dsurface_get_mip_size(*size, i); + mip->bytes = svga3dsurface_get_image_buffer_size + (desc, &mip->size, 0); + mip->row_stride = + __KERNEL_DIV_ROUND_UP(mip->size.width, + desc->block_size.width) * + desc->bytes_per_block * num_samples; + if (!mip->row_stride) + goto invalid_dim; + + mip->img_stride = + __KERNEL_DIV_ROUND_UP(mip->size.height, + desc->block_size.height) * + mip->row_stride; + if (!mip->img_stride) + goto invalid_dim; + + cache->mip_chain_bytes += mip->bytes; + } + cache->sheet_bytes = cache->mip_chain_bytes * num_layers; + if (!cache->sheet_bytes) + goto invalid_dim; + + return 0; + +invalid_dim: + VMW_DEBUG_USER("Invalid surface layout for dirty tracking.\n"); + return -EINVAL; +} + +/** + * svga3dsurface_get_loc - Get a surface location from an offset into the + * backing store + * @cache: Surface layout data. + * @loc: Pointer to a struct svga3dsurface_loc to be filled in. + * @offset: Offset into the surface backing store. + */ +static inline void +svga3dsurface_get_loc(const struct svga3dsurface_cache *cache, + struct svga3dsurface_loc *loc, + size_t offset) +{ + const struct svga3dsurface_mip *mip = &cache->mip[0]; + const struct svga3d_surface_desc *desc = cache->desc; + u32 layer; + int i; + + if (offset >= cache->sheet_bytes) + offset %= cache->sheet_bytes; + + layer = offset / cache->mip_chain_bytes; + offset -= layer * cache->mip_chain_bytes; + for (i = 0; i < cache->num_mip_levels; ++i, ++mip) { + if (mip->bytes > offset) + break; + offset -= mip->bytes; + } + + loc->sub_resource = svga3dsurface_subres(cache, i, layer); + loc->z = offset / mip->img_stride; + offset -= loc->z * mip->img_stride; + loc->z *= desc->block_size.depth; + loc->y = offset / mip->row_stride; + offset -= loc->y * mip->row_stride; + loc->y *= desc->block_size.height; + loc->x = offset / desc->bytes_per_block; + loc->x *= desc->block_size.width; +} + +/** + * svga3dsurface_inc_loc - Clamp increment a surface location with one block + * size + * in each dimension. + * @loc: Pointer to a struct svga3dsurface_loc to be incremented. + * + * When computing the size of a range as size = end - start, the range does not + * include the end element. However a location representing the last byte + * of a touched region in the backing store *is* included in the range. + * This function modifies such a location to match the end definition + * given as start + size which is the one used in a SVGA3dBox. + */ +static inline void +svga3dsurface_inc_loc(const struct svga3dsurface_cache *cache, + struct svga3dsurface_loc *loc) +{ + const struct svga3d_surface_desc *desc = cache->desc; + u32 mip = loc->sub_resource % cache->num_mip_levels; + const struct drm_vmw_size *size = &cache->mip[mip].size; + + loc->sub_resource++; + loc->x += desc->block_size.width; + if (loc->x > size->width) + loc->x = size->width; + loc->y += desc->block_size.height; + if (loc->y > size->height) + loc->y = size->height; + loc->z += desc->block_size.depth; + if (loc->z > size->depth) + loc->z = size->depth; +} + +/** + * svga3dsurface_min_loc - The start location in a subresource + * @cache: Surface layout data. + * @sub_resource: The subresource. + * @loc: Pointer to a struct svga3dsurface_loc to be filled in. + */ +static inline void +svga3dsurface_min_loc(const struct svga3dsurface_cache *cache, + u32 sub_resource, + struct svga3dsurface_loc *loc) +{ + loc->sub_resource = sub_resource; + loc->x = loc->y = loc->z = 0; +} + +/** + * svga3dsurface_min_loc - The end location in a subresource + * @cache: Surface layout data. + * @sub_resource: The subresource. + * @loc: Pointer to a struct svga3dsurface_loc to be filled in. + * + * Following the end definition given in svga3dsurface_inc_loc(), + * Compute the end location of a surface subresource. + */ +static inline void +svga3dsurface_max_loc(const struct svga3dsurface_cache *cache, + u32 sub_resource, + struct svga3dsurface_loc *loc) +{ + const struct drm_vmw_size *size; + u32 mip; + + loc->sub_resource = sub_resource + 1; + mip = sub_resource % cache->num_mip_levels; + size = &cache->mip[mip].size; + loc->x = size->width; + loc->y = size->height; + loc->z = size->depth; +} + #endif /* _SVGA3D_SURFACEDEFS_H_ */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index dae3a39bf402..5971c0d47507 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -44,9 +44,9 @@ #include #define VMWGFX_DRIVER_NAME "vmwgfx" -#define VMWGFX_DRIVER_DATE "20180704" +#define VMWGFX_DRIVER_DATE "20190328" #define VMWGFX_DRIVER_MAJOR 2 -#define VMWGFX_DRIVER_MINOR 15 +#define VMWGFX_DRIVER_MINOR 16 #define VMWGFX_DRIVER_PATCHLEVEL 0 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) #define VMWGFX_MAX_RELOCATIONS 2048 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index c40d44f4d9af..637043f1befa 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -68,6 +68,20 @@ struct vmw_surface_offset { uint32_t bo_offset; }; +/** + * vmw_surface_dirty - Surface dirty-tracker + * @cache: Cached layout information of the surface. + * @size: Accounting size for the struct vmw_surface_dirty. + * @num_subres: Number of subresources. + * @boxes: Array of SVGA3dBoxes indicating dirty regions. One per subresource. + */ +struct vmw_surface_dirty { + struct svga3dsurface_cache cache; + size_t size; + u32 num_subres; + SVGA3dBox boxes[0]; +}; + static void vmw_user_surface_free(struct vmw_resource *res); static struct vmw_resource * vmw_user_surface_base_to_res(struct ttm_base_object *base); @@ -96,6 +110,13 @@ vmw_gb_surface_reference_internal(struct drm_device *dev, struct drm_vmw_gb_surface_ref_ext_rep *rep, struct drm_file *file_priv); +static void vmw_surface_dirty_free(struct vmw_resource *res); +static int vmw_surface_dirty_alloc(struct vmw_resource *res); +static int vmw_surface_dirty_sync(struct vmw_resource *res); +static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start, + size_t end); +static int vmw_surface_clean(struct vmw_resource *res); + static const struct vmw_user_resource_conv user_surface_conv = { .object_type = VMW_RES_SURFACE, .base_obj_to_res = vmw_user_surface_base_to_res, @@ -133,7 +154,12 @@ static const struct vmw_res_func vmw_gb_surface_func = { .create = vmw_gb_surface_create, .destroy = vmw_gb_surface_destroy, .bind = vmw_gb_surface_bind, - .unbind = vmw_gb_surface_unbind + .unbind = vmw_gb_surface_unbind, + .dirty_alloc = vmw_surface_dirty_alloc, + .dirty_free = vmw_surface_dirty_free, + .dirty_sync = vmw_surface_dirty_sync, + .dirty_range_add = vmw_surface_dirty_range_add, + .clean = vmw_surface_clean, }; /** @@ -641,6 +667,7 @@ static void vmw_user_surface_free(struct vmw_resource *res) struct vmw_private *dev_priv = srf->res.dev_priv; uint32_t size = user_srf->size; + WARN_ON_ONCE(res->dirty); if (user_srf->master) drm_master_put(&user_srf->master); kfree(srf->offsets); @@ -1174,10 +1201,16 @@ static int vmw_gb_surface_bind(struct vmw_resource *res, cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE; cmd2->header.size = sizeof(cmd2->body); cmd2->body.sid = res->id; - res->backup_dirty = false; } vmw_fifo_commit(dev_priv, submit_size); + if (res->backup->dirty && res->backup_dirty) { + /* We've just made a full upload. Cear dirty regions. */ + vmw_bo_dirty_clear_res(res); + } + + res->backup_dirty = false; + return 0; } @@ -1642,7 +1675,8 @@ vmw_gb_surface_define_internal(struct drm_device *dev, } } } else if (req->base.drm_surface_flags & - drm_vmw_surface_flag_create_buffer) + (drm_vmw_surface_flag_create_buffer | + drm_vmw_surface_flag_coherent)) ret = vmw_user_bo_alloc(dev_priv, tfile, res->backup_size, req->base.drm_surface_flags & @@ -1656,6 +1690,26 @@ vmw_gb_surface_define_internal(struct drm_device *dev, goto out_unlock; } + if (req->base.drm_surface_flags & drm_vmw_surface_flag_coherent) { + struct vmw_buffer_object *backup = res->backup; + + ttm_bo_reserve(&backup->base, false, false, NULL); + if (!res->func->dirty_alloc) + ret = -EINVAL; + if (!ret) + ret = vmw_bo_dirty_add(backup); + if (!ret) { + res->coherent = true; + ret = res->func->dirty_alloc(res); + } + ttm_bo_unreserve(&backup->base); + if (ret) { + vmw_resource_unreference(&res); + goto out_unlock; + } + + } + tmp = vmw_resource_reference(res); ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, req->base.drm_surface_flags & @@ -1764,3 +1818,338 @@ out_bad_resource: return ret; } + +/** + * vmw_subres_dirty_add - Add a dirty region to a subresource + * @dirty: The surfaces's dirty tracker. + * @loc_start: The location corresponding to the start of the region. + * @loc_end: The location corresponding to the end of the region. + * + * As we are assuming that @loc_start and @loc_end represent a sequential + * range of backing store memory, if the region spans multiple lines then + * regardless of the x coordinate, the full lines are dirtied. + * Correspondingly if the region spans multiple z slices, then full rather + * than partial z slices are dirtied. + */ +static void vmw_subres_dirty_add(struct vmw_surface_dirty *dirty, + const struct svga3dsurface_loc *loc_start, + const struct svga3dsurface_loc *loc_end) +{ + const struct svga3dsurface_cache *cache = &dirty->cache; + SVGA3dBox *box = &dirty->boxes[loc_start->sub_resource]; + u32 mip = loc_start->sub_resource % cache->num_mip_levels; + const struct drm_vmw_size *size = &cache->mip[mip].size; + u32 box_c2 = box->z + box->d; + + if (WARN_ON(loc_start->sub_resource >= dirty->num_subres)) + return; + + if (box->d == 0 || box->z > loc_start->z) + box->z = loc_start->z; + if (box_c2 < loc_end->z) + box->d = loc_end->z - box->z; + + if (loc_start->z + 1 == loc_end->z) { + box_c2 = box->y + box->h; + if (box->h == 0 || box->y > loc_start->y) + box->y = loc_start->y; + if (box_c2 < loc_end->y) + box->h = loc_end->y - box->y; + + if (loc_start->y + 1 == loc_end->y) { + box_c2 = box->x + box->w; + if (box->w == 0 || box->x > loc_start->x) + box->x = loc_start->x; + if (box_c2 < loc_end->x) + box->w = loc_end->x - box->x; + } else { + box->x = 0; + box->w = size->width; + } + } else { + box->y = 0; + box->h = size->height; + box->x = 0; + box->w = size->width; + } +} + +/** + * vmw_subres_dirty_full - Mark a full subresource as dirty + * @dirty: The surface's dirty tracker. + * @subres: The subresource + */ +static void vmw_subres_dirty_full(struct vmw_surface_dirty *dirty, u32 subres) +{ + const struct svga3dsurface_cache *cache = &dirty->cache; + u32 mip = subres % cache->num_mip_levels; + const struct drm_vmw_size *size = &cache->mip[mip].size; + SVGA3dBox *box = &dirty->boxes[subres]; + + box->x = 0; + box->y = 0; + box->z = 0; + box->w = size->width; + box->h = size->height; + box->d = size->depth; +} + +/* + * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for texture + * surfaces. + */ +static void vmw_surface_tex_dirty_range_add(struct vmw_resource *res, + size_t start, size_t end) +{ + struct vmw_surface_dirty *dirty = + (struct vmw_surface_dirty *) res->dirty; + size_t backup_end = res->backup_offset + res->backup_size; + struct svga3dsurface_loc loc1, loc2; + const struct svga3dsurface_cache *cache; + + start = max_t(size_t, start, res->backup_offset) - res->backup_offset; + end = min(end, backup_end) - res->backup_offset; + cache = &dirty->cache; + svga3dsurface_get_loc(cache, &loc1, start); + svga3dsurface_get_loc(cache, &loc2, end - 1); + svga3dsurface_inc_loc(cache, &loc2); + + if (loc1.sub_resource + 1 == loc2.sub_resource) { + /* Dirty range covers a single sub-resource */ + vmw_subres_dirty_add(dirty, &loc1, &loc2); + } else { + /* Dirty range covers multiple sub-resources */ + struct svga3dsurface_loc loc_min, loc_max; + u32 sub_res = loc1.sub_resource; + + svga3dsurface_max_loc(cache, loc1.sub_resource, &loc_max); + vmw_subres_dirty_add(dirty, &loc1, &loc_max); + svga3dsurface_min_loc(cache, loc2.sub_resource - 1, &loc_min); + vmw_subres_dirty_add(dirty, &loc_min, &loc2); + for (sub_res = loc1.sub_resource + 1; + sub_res < loc2.sub_resource - 1; ++sub_res) + vmw_subres_dirty_full(dirty, sub_res); + } +} + +/* + * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for buffer + * surfaces. + */ +static void vmw_surface_buf_dirty_range_add(struct vmw_resource *res, + size_t start, size_t end) +{ + struct vmw_surface_dirty *dirty = + (struct vmw_surface_dirty *) res->dirty; + const struct svga3dsurface_cache *cache = &dirty->cache; + size_t backup_end = res->backup_offset + cache->mip_chain_bytes; + SVGA3dBox *box = &dirty->boxes[0]; + u32 box_c2; + + box->h = box->d = 1; + start = max_t(size_t, start, res->backup_offset) - res->backup_offset; + end = min(end, backup_end) - res->backup_offset; + box_c2 = box->x + box->w; + if (box->w == 0 || box->x > start) + box->x = start; + if (box_c2 < end) + box->w = end - box->x; +} + +/* + * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for surfaces + */ +static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start, + size_t end) +{ + struct vmw_surface *srf = vmw_res_to_srf(res); + + if (WARN_ON(end <= res->backup_offset || + start >= res->backup_offset + res->backup_size)) + return; + + if (srf->format == SVGA3D_BUFFER) + vmw_surface_buf_dirty_range_add(res, start, end); + else + vmw_surface_tex_dirty_range_add(res, start, end); +} + +/* + * vmw_surface_dirty_sync - The surface's dirty_sync callback. + */ +static int vmw_surface_dirty_sync(struct vmw_resource *res) +{ + struct vmw_private *dev_priv = res->dev_priv; + bool has_dx = 0; + u32 i, num_dirty; + struct vmw_surface_dirty *dirty = + (struct vmw_surface_dirty *) res->dirty; + size_t alloc_size; + const struct svga3dsurface_cache *cache = &dirty->cache; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXUpdateSubResource body; + } *cmd1; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdUpdateGBImage body; + } *cmd2; + void *cmd; + + num_dirty = 0; + for (i = 0; i < dirty->num_subres; ++i) { + const SVGA3dBox *box = &dirty->boxes[i]; + + if (box->d) + num_dirty++; + } + + if (!num_dirty) + goto out; + + alloc_size = num_dirty * ((has_dx) ? sizeof(*cmd1) : sizeof(*cmd2)); + cmd = VMW_FIFO_RESERVE(dev_priv, alloc_size); + if (!cmd) + return -ENOMEM; + + cmd1 = cmd; + cmd2 = cmd; + + for (i = 0; i < dirty->num_subres; ++i) { + const SVGA3dBox *box = &dirty->boxes[i]; + + if (!box->d) + continue; + + /* + * DX_UPDATE_SUBRESOURCE is aware of array surfaces. + * UPDATE_GB_IMAGE is not. + */ + if (has_dx) { + cmd1->header.id = SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE; + cmd1->header.size = sizeof(cmd1->body); + cmd1->body.sid = res->id; + cmd1->body.subResource = i; + cmd1->body.box = *box; + cmd1++; + } else { + cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE; + cmd2->header.size = sizeof(cmd2->body); + cmd2->body.image.sid = res->id; + cmd2->body.image.face = i / cache->num_mip_levels; + cmd2->body.image.mipmap = i - + (cache->num_mip_levels * cmd2->body.image.face); + cmd2->body.box = *box; + cmd2++; + } + + } + vmw_fifo_commit(dev_priv, alloc_size); + out: + memset(&dirty->boxes[0], 0, sizeof(dirty->boxes[0]) * + dirty->num_subres); + + return 0; +} + +/* + * vmw_surface_dirty_alloc - The surface's dirty_alloc callback. + */ +static int vmw_surface_dirty_alloc(struct vmw_resource *res) +{ + struct vmw_surface *srf = vmw_res_to_srf(res); + struct vmw_surface_dirty *dirty; + u32 num_layers = 1; + u32 num_mip; + u32 num_subres; + u32 num_samples; + size_t dirty_size, acc_size; + static struct ttm_operation_ctx ctx = { + .interruptible = false, + .no_wait_gpu = false + }; + int ret; + + if (srf->array_size) + num_layers = srf->array_size; + else if (srf->flags & SVGA3D_SURFACE_CUBEMAP) + num_layers *= SVGA3D_MAX_SURFACE_FACES; + + num_mip = srf->mip_levels[0]; + if (!num_mip) + num_mip = 1; + + num_subres = num_layers * num_mip; + dirty_size = sizeof(*dirty) + num_subres * sizeof(dirty->boxes[0]); + acc_size = ttm_round_pot(dirty_size); + ret = ttm_mem_global_alloc(vmw_mem_glob(res->dev_priv), + acc_size, &ctx); + if (ret) { + VMW_DEBUG_USER("Out of graphics memory for surface " + "dirty tracker.\n"); + return ret; + } + + dirty = kvzalloc(dirty_size, GFP_KERNEL); + if (!dirty) { + ret = -ENOMEM; + goto out_no_dirty; + } + + num_samples = max_t(u32, 1, srf->multisample_count); + ret = svga3dsurface_setup_cache(&srf->base_size, srf->format, num_mip, + num_layers, num_samples, &dirty->cache); + if (ret) + goto out_no_cache; + + dirty->num_subres = num_subres; + dirty->size = acc_size; + res->dirty = (struct vmw_resource_dirty *) dirty; + + return 0; + +out_no_cache: + kvfree(dirty); +out_no_dirty: + ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size); + return ret; +} + +/* + * vmw_surface_dirty_free - The surface's dirty_free callback + */ +static void vmw_surface_dirty_free(struct vmw_resource *res) +{ + struct vmw_surface_dirty *dirty = + (struct vmw_surface_dirty *) res->dirty; + size_t acc_size = dirty->size; + + kvfree(dirty); + ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size); + res->dirty = NULL; +} + +/* + * vmw_surface_clean - The surface's clean callback + */ +static int vmw_surface_clean(struct vmw_resource *res) +{ + struct vmw_private *dev_priv = res->dev_priv; + size_t alloc_size; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdReadbackGBSurface body; + } *cmd; + + alloc_size = sizeof(*cmd); + cmd = VMW_FIFO_RESERVE(dev_priv, alloc_size); + if (!cmd) + return -ENOMEM; + + cmd->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE; + cmd->header.size = sizeof(cmd->body); + cmd->body.sid = res->id; + vmw_fifo_commit(dev_priv, alloc_size); + + return 0; +} diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h index 399f58317cff..02cab33f2f25 100644 --- a/include/uapi/drm/vmwgfx_drm.h +++ b/include/uapi/drm/vmwgfx_drm.h @@ -891,11 +891,13 @@ struct drm_vmw_shader_arg { * surface. * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is * given. + * @drm_vmw_surface_flag_coherent: Back surface with coherent memory. */ enum drm_vmw_surface_flags { drm_vmw_surface_flag_shareable = (1 << 0), drm_vmw_surface_flag_scanout = (1 << 1), - drm_vmw_surface_flag_create_buffer = (1 << 2) + drm_vmw_surface_flag_create_buffer = (1 << 2), + drm_vmw_surface_flag_coherent = (1 << 3), }; /** -- cgit v1.2.3-55-g7522 From 9bbfda544ed79e8e9abde27bfe2c85428d582e7b Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 28 May 2019 08:08:55 +0200 Subject: drm/vmwgfx: Kill unneeded legacy security features At one point, the GPU command verifier and user-space handle manager couldn't properly protect GPU clients from accessing each other's data. Instead there was an elaborate mechanism to make sure only the active master's primary clients could render. The other clients were either put to sleep or even killed (if the master had exited). VRAM was evicted on master switch. With the advent of render-node functionality, we relaxed the VRAM eviction, but the other mechanisms stayed in place. Now that the GPU command verifier and ttm object manager properly isolate primary clients from different master realms we can remove the master switch related code and drop those legacy features. Signed-off-by: Thomas Hellstrom Reviewed-by: Brian Paul Reviewed-by: Deepak Rawat Acked-by: Emil Velikov --- drivers/gpu/drm/vmwgfx/ttm_lock.c | 100 -------------------- drivers/gpu/drm/vmwgfx/ttm_lock.h | 30 ------ drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 162 +------------------------------- drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 16 +--- drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 6 -- 5 files changed, 3 insertions(+), 311 deletions(-) (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_surface.c') diff --git a/drivers/gpu/drm/vmwgfx/ttm_lock.c b/drivers/gpu/drm/vmwgfx/ttm_lock.c index 16b2083cb9d4..5971c72e6d10 100644 --- a/drivers/gpu/drm/vmwgfx/ttm_lock.c +++ b/drivers/gpu/drm/vmwgfx/ttm_lock.c @@ -29,7 +29,6 @@ * Authors: Thomas Hellstrom */ -#include #include #include #include @@ -49,8 +48,6 @@ void ttm_lock_init(struct ttm_lock *lock) init_waitqueue_head(&lock->queue); lock->rw = 0; lock->flags = 0; - lock->kill_takers = false; - lock->signal = SIGKILL; } void ttm_read_unlock(struct ttm_lock *lock) @@ -66,11 +63,6 @@ static bool __ttm_read_lock(struct ttm_lock *lock) bool locked = false; spin_lock(&lock->lock); - if (unlikely(lock->kill_takers)) { - send_sig(lock->signal, current, 0); - spin_unlock(&lock->lock); - return false; - } if (lock->rw >= 0 && lock->flags == 0) { ++lock->rw; locked = true; @@ -98,11 +90,6 @@ static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked) *locked = false; spin_lock(&lock->lock); - if (unlikely(lock->kill_takers)) { - send_sig(lock->signal, current, 0); - spin_unlock(&lock->lock); - return false; - } if (lock->rw >= 0 && lock->flags == 0) { ++lock->rw; block = false; @@ -147,11 +134,6 @@ static bool __ttm_write_lock(struct ttm_lock *lock) bool locked = false; spin_lock(&lock->lock); - if (unlikely(lock->kill_takers)) { - send_sig(lock->signal, current, 0); - spin_unlock(&lock->lock); - return false; - } if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) { lock->rw = -1; lock->flags &= ~TTM_WRITE_LOCK_PENDING; @@ -182,88 +164,6 @@ int ttm_write_lock(struct ttm_lock *lock, bool interruptible) return ret; } -static int __ttm_vt_unlock(struct ttm_lock *lock) -{ - int ret = 0; - - spin_lock(&lock->lock); - if (unlikely(!(lock->flags & TTM_VT_LOCK))) - ret = -EINVAL; - lock->flags &= ~TTM_VT_LOCK; - wake_up_all(&lock->queue); - spin_unlock(&lock->lock); - - return ret; -} - -static void ttm_vt_lock_remove(struct ttm_base_object **p_base) -{ - struct ttm_base_object *base = *p_base; - struct ttm_lock *lock = container_of(base, struct ttm_lock, base); - int ret; - - *p_base = NULL; - ret = __ttm_vt_unlock(lock); - BUG_ON(ret != 0); -} - -static bool __ttm_vt_lock(struct ttm_lock *lock) -{ - bool locked = false; - - spin_lock(&lock->lock); - if (lock->rw == 0) { - lock->flags &= ~TTM_VT_LOCK_PENDING; - lock->flags |= TTM_VT_LOCK; - locked = true; - } else { - lock->flags |= TTM_VT_LOCK_PENDING; - } - spin_unlock(&lock->lock); - return locked; -} - -int ttm_vt_lock(struct ttm_lock *lock, - bool interruptible, - struct ttm_object_file *tfile) -{ - int ret = 0; - - if (interruptible) { - ret = wait_event_interruptible(lock->queue, - __ttm_vt_lock(lock)); - if (unlikely(ret != 0)) { - spin_lock(&lock->lock); - lock->flags &= ~TTM_VT_LOCK_PENDING; - wake_up_all(&lock->queue); - spin_unlock(&lock->lock); - return ret; - } - } else - wait_event(lock->queue, __ttm_vt_lock(lock)); - - /* - * Add a base-object, the destructor of which will - * make sure the lock is released if the client dies - * while holding it. - */ - - ret = ttm_base_object_init(tfile, &lock->base, false, - ttm_lock_type, &ttm_vt_lock_remove, NULL); - if (ret) - (void)__ttm_vt_unlock(lock); - else - lock->vt_holder = tfile; - - return ret; -} - -int ttm_vt_unlock(struct ttm_lock *lock) -{ - return ttm_ref_object_base_unref(lock->vt_holder, - lock->base.handle, TTM_REF_USAGE); -} - void ttm_suspend_unlock(struct ttm_lock *lock) { spin_lock(&lock->lock); diff --git a/drivers/gpu/drm/vmwgfx/ttm_lock.h b/drivers/gpu/drm/vmwgfx/ttm_lock.h index 0c3af9836863..3d454e8b491f 100644 --- a/drivers/gpu/drm/vmwgfx/ttm_lock.h +++ b/drivers/gpu/drm/vmwgfx/ttm_lock.h @@ -63,8 +63,6 @@ * @lock: Spinlock protecting some lock members. * @rw: Read-write lock counter. Protected by @lock. * @flags: Lock state. Protected by @lock. - * @kill_takers: Boolean whether to kill takers of the lock. - * @signal: Signal to send when kill_takers is true. */ struct ttm_lock { @@ -73,9 +71,6 @@ struct ttm_lock { spinlock_t lock; int32_t rw; uint32_t flags; - bool kill_takers; - int signal; - struct ttm_object_file *vt_holder; }; @@ -220,29 +215,4 @@ extern void ttm_write_unlock(struct ttm_lock *lock); */ extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible); -/** - * ttm_lock_set_kill - * - * @lock: Pointer to a struct ttm_lock - * @val: Boolean whether to kill processes taking the lock. - * @signal: Signal to send to the process taking the lock. - * - * The kill-when-taking-lock functionality is used to kill processes that keep - * on using the TTM functionality when its resources has been taken down, for - * example when the X server exits. A typical sequence would look like this: - * - X server takes lock in write mode. - * - ttm_lock_set_kill() is called with @val set to true. - * - As part of X server exit, TTM resources are taken down. - * - X server releases the lock on file release. - * - Another dri client wants to render, takes the lock and is killed. - * - */ -static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val, - int signal) -{ - lock->kill_takers = val; - if (val) - lock->signal = signal; -} - #endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index d59c474be38e..8349a6cc126f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -254,7 +254,6 @@ static int vmw_restrict_dma_mask; static int vmw_assume_16bpp; static int vmw_probe(struct pci_dev *, const struct pci_device_id *); -static void vmw_master_init(struct vmw_master *); static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, void *ptr); @@ -762,10 +761,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) DRM_INFO("MMIO at 0x%08x size is %u kiB\n", dev_priv->mmio_start, dev_priv->mmio_size / 1024); - vmw_master_init(&dev_priv->fbdev_master); - ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); - dev_priv->active_master = &dev_priv->fbdev_master; - dev_priv->mmio_virt = memremap(dev_priv->mmio_start, dev_priv->mmio_size, MEMREMAP_WB); @@ -1012,18 +1007,7 @@ static void vmw_driver_unload(struct drm_device *dev) static void vmw_postclose(struct drm_device *dev, struct drm_file *file_priv) { - struct vmw_fpriv *vmw_fp; - - vmw_fp = vmw_fpriv(file_priv); - - if (vmw_fp->locked_master) { - struct vmw_master *vmaster = - vmw_master(vmw_fp->locked_master); - - ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); - ttm_vt_unlock(&vmaster->lock); - drm_master_put(&vmw_fp->locked_master); - } + struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); ttm_object_file_release(&vmw_fp->tfile); kfree(vmw_fp); @@ -1052,55 +1036,6 @@ out_no_tfile: return ret; } -static struct vmw_master *vmw_master_check(struct drm_device *dev, - struct drm_file *file_priv, - unsigned int flags) -{ - int ret; - struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); - struct vmw_master *vmaster; - - if (!drm_is_primary_client(file_priv) || !(flags & DRM_AUTH)) - return NULL; - - ret = mutex_lock_interruptible(&dev->master_mutex); - if (unlikely(ret != 0)) - return ERR_PTR(-ERESTARTSYS); - - if (drm_is_current_master(file_priv)) { - mutex_unlock(&dev->master_mutex); - return NULL; - } - - /* - * Check if we were previously master, but now dropped. In that - * case, allow at least render node functionality. - */ - if (vmw_fp->locked_master) { - mutex_unlock(&dev->master_mutex); - - if (flags & DRM_RENDER_ALLOW) - return NULL; - - DRM_ERROR("Dropped master trying to access ioctl that " - "requires authentication.\n"); - return ERR_PTR(-EACCES); - } - mutex_unlock(&dev->master_mutex); - - /* - * Take the TTM lock. Possibly sleep waiting for the authenticating - * master to become master again, or for a SIGTERM if the - * authenticating master exits. - */ - vmaster = vmw_master(file_priv->master); - ret = ttm_read_lock(&vmaster->lock, true); - if (unlikely(ret != 0)) - vmaster = ERR_PTR(ret); - - return vmaster; -} - static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, unsigned long arg, long (*ioctl_func)(struct file *, unsigned int, @@ -1109,7 +1044,6 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, struct drm_file *file_priv = filp->private_data; struct drm_device *dev = file_priv->minor->dev; unsigned int nr = DRM_IOCTL_NR(cmd); - struct vmw_master *vmaster; unsigned int flags; long ret; @@ -1145,21 +1079,7 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, } else if (!drm_ioctl_flags(nr, &flags)) return -EINVAL; - vmaster = vmw_master_check(dev, file_priv, flags); - if (IS_ERR(vmaster)) { - ret = PTR_ERR(vmaster); - - if (ret != -ERESTARTSYS) - DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n", - nr, ret); - return ret; - } - - ret = ioctl_func(filp, cmd, arg); - if (vmaster) - ttm_read_unlock(&vmaster->lock); - - return ret; + return ioctl_func(filp, cmd, arg); out_io_encoding: DRM_ERROR("Invalid command format, ioctl %d\n", @@ -1186,65 +1106,10 @@ static void vmw_lastclose(struct drm_device *dev) { } -static void vmw_master_init(struct vmw_master *vmaster) -{ - ttm_lock_init(&vmaster->lock); -} - -static int vmw_master_create(struct drm_device *dev, - struct drm_master *master) -{ - struct vmw_master *vmaster; - - vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); - if (unlikely(!vmaster)) - return -ENOMEM; - - vmw_master_init(vmaster); - ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); - master->driver_priv = vmaster; - - return 0; -} - -static void vmw_master_destroy(struct drm_device *dev, - struct drm_master *master) -{ - struct vmw_master *vmaster = vmw_master(master); - - master->driver_priv = NULL; - kfree(vmaster); -} - static int vmw_master_set(struct drm_device *dev, struct drm_file *file_priv, bool from_open) { - struct vmw_private *dev_priv = vmw_priv(dev); - struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); - struct vmw_master *active = dev_priv->active_master; - struct vmw_master *vmaster = vmw_master(file_priv->master); - int ret = 0; - - if (active) { - BUG_ON(active != &dev_priv->fbdev_master); - ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); - if (unlikely(ret != 0)) - return ret; - - ttm_lock_set_kill(&active->lock, true, SIGTERM); - dev_priv->active_master = NULL; - } - - ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); - if (!from_open) { - ttm_vt_unlock(&vmaster->lock); - BUG_ON(vmw_fp->locked_master != file_priv->master); - drm_master_put(&vmw_fp->locked_master); - } - - dev_priv->active_master = vmaster; - /* * Inform a new master that the layout may have changed while * it was gone. @@ -1259,31 +1124,10 @@ static void vmw_master_drop(struct drm_device *dev, struct drm_file *file_priv) { struct vmw_private *dev_priv = vmw_priv(dev); - struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); - struct vmw_master *vmaster = vmw_master(file_priv->master); - int ret; - - /** - * Make sure the master doesn't disappear while we have - * it locked. - */ - vmw_fp->locked_master = drm_master_get(file_priv->master); - ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); vmw_kms_legacy_hotspot_clear(dev_priv); - if (unlikely((ret != 0))) { - DRM_ERROR("Unable to lock TTM at VT switch.\n"); - drm_master_put(&vmw_fp->locked_master); - } - - ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); - if (!dev_priv->enable_fb) vmw_svga_disable(dev_priv); - - dev_priv->active_master = &dev_priv->fbdev_master; - ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); - ttm_vt_unlock(&dev_priv->fbdev_master.lock); } /** @@ -1562,8 +1406,6 @@ static struct drm_driver driver = { .disable_vblank = vmw_disable_vblank, .ioctls = vmw_ioctls, .num_ioctls = ARRAY_SIZE(vmw_ioctls), - .master_create = vmw_master_create, - .master_destroy = vmw_master_destroy, .master_set = vmw_master_set, .master_drop = vmw_master_drop, .open = vmw_driver_open, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index b4c8817dc267..3a358a5495e4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -81,7 +81,6 @@ #define VMW_RES_SHADER ttm_driver_type4 struct vmw_fpriv { - struct drm_master *locked_master; struct ttm_object_file *tfile; bool gb_aware; /* user-space is guest-backed aware */ }; @@ -393,10 +392,6 @@ struct vmw_sw_context{ struct vmw_legacy_display; struct vmw_overlay; -struct vmw_master { - struct ttm_lock lock; -}; - struct vmw_vga_topology_state { uint32_t width; uint32_t height; @@ -559,11 +554,8 @@ struct vmw_private { spinlock_t svga_lock; /** - * Master management. + * PM management. */ - - struct vmw_master *active_master; - struct vmw_master fbdev_master; struct notifier_block pm_nb; bool refuse_hibernation; bool suspend_locked; @@ -632,11 +624,6 @@ static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv) return (struct vmw_fpriv *)file_priv->driver_priv; } -static inline struct vmw_master *vmw_master(struct drm_master *master) -{ - return (struct vmw_master *) master->driver_priv; -} - /* * The locking here is fine-grained, so that it is performed once * for every read- and write operation. This is of course costly, but we @@ -1102,7 +1089,6 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, int vmw_kms_write_svga(struct vmw_private *vmw_priv, unsigned width, unsigned height, unsigned pitch, unsigned bpp, unsigned depth); -void vmw_kms_idle_workqueues(struct vmw_master *vmaster); bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, uint32_t pitch, uint32_t height); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 637043f1befa..862ca44680ca 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -946,12 +946,6 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv, if (unlikely(drm_is_render_client(file_priv))) require_exist = true; - if (READ_ONCE(vmw_fpriv(file_priv)->locked_master)) { - DRM_ERROR("Locked master refused legacy " - "surface reference.\n"); - return -EACCES; - } - handle = u_handle; } -- cgit v1.2.3-55-g7522