summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
authorChristian König2018-02-23 15:12:00 +0100
committerAlex Deucher2018-03-14 20:38:24 +0100
commit75a57669cbc881032c60615a31bfc6bfab4c813c (patch)
treee661f838d1097ca040d1f47b40c6425f4bcdde6b /drivers/gpu/drm/ttm
parentdrm/ttm: move ttm_tt defines into ttm_tt.h (diff)
downloadkernel-qcow2-linux-75a57669cbc881032c60615a31bfc6bfab4c813c.tar.gz
kernel-qcow2-linux-75a57669cbc881032c60615a31bfc6bfab4c813c.tar.xz
kernel-qcow2-linux-75a57669cbc881032c60615a31bfc6bfab4c813c.zip
drm/ttm: add ttm_sg_tt_init
This allows drivers to only allocate dma addresses, but not a page array. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Roger He <Hongbo.He@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c54
1 files changed, 45 insertions, 9 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 8e0b525cda00..971133106ec2 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -108,6 +108,16 @@ static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
return 0;
}
+static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
+{
+ ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
+ sizeof(*ttm->dma_address),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!ttm->dma_address)
+ return -ENOMEM;
+ return 0;
+}
+
#ifdef CONFIG_X86
static inline int ttm_tt_set_page_caching(struct page *p,
enum ttm_caching_state c_old,
@@ -227,8 +237,8 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
ttm->func->destroy(ttm);
}
-int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags)
+void ttm_tt_init_fields(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags)
{
ttm->bdev = bdev;
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -236,6 +246,12 @@ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
ttm->page_flags = page_flags;
ttm->state = tt_unpopulated;
ttm->swap_storage = NULL;
+}
+
+int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags)
+{
+ ttm_tt_init_fields(ttm, bdev, size, page_flags);
if (ttm_tt_alloc_page_directory(ttm)) {
ttm_tt_destroy(ttm);
@@ -258,12 +274,7 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
{
struct ttm_tt *ttm = &ttm_dma->ttm;
- ttm->bdev = bdev;
- ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- ttm->caching_state = tt_cached;
- ttm->page_flags = page_flags;
- ttm->state = tt_unpopulated;
- ttm->swap_storage = NULL;
+ ttm_tt_init_fields(ttm, bdev, size, page_flags);
INIT_LIST_HEAD(&ttm_dma->pages_list);
if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
@@ -275,11 +286,36 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
}
EXPORT_SYMBOL(ttm_dma_tt_init);
+int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags)
+{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+ int ret;
+
+ ttm_tt_init_fields(ttm, bdev, size, page_flags);
+
+ INIT_LIST_HEAD(&ttm_dma->pages_list);
+ if (page_flags & TTM_PAGE_FLAG_SG)
+ ret = ttm_sg_tt_alloc_page_directory(ttm_dma);
+ else
+ ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
+ if (ret) {
+ ttm_tt_destroy(ttm);
+ pr_err("Failed allocating page table\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ttm_sg_tt_init);
+
void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
{
struct ttm_tt *ttm = &ttm_dma->ttm;
- kvfree(ttm->pages);
+ if (ttm->pages)
+ kvfree(ttm->pages);
+ else
+ kvfree(ttm_dma->dma_address);
ttm->pages = NULL;
ttm_dma->dma_address = NULL;
}