drm/msm: avoid memory allocation when sg table is imported
When sg_table is imported, avoid allocating memory. This will help in reducing alloc/free overhead there by improving the composition cycle time. Change-Id: I1514e88f3ef7f7320719445884142f9ded43c49d Signed-off-by: Kalyan Thota <kalyant@codeaurora.org>
This commit is contained in:
@@ -80,6 +80,9 @@ static struct page **get_pages(struct drm_gem_object *obj)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
|
||||
if (obj->import_attach)
|
||||
return msm_obj->pages;
|
||||
|
||||
if (!msm_obj->pages) {
|
||||
struct drm_device *dev = obj->dev;
|
||||
struct page **p;
|
||||
@@ -572,8 +575,13 @@ void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
|
||||
struct page **pages = get_pages(obj);
|
||||
if (IS_ERR(pages))
|
||||
return ERR_CAST(pages);
|
||||
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
|
||||
if (obj->import_attach)
|
||||
msm_obj->vaddr = dma_buf_vmap(
|
||||
obj->import_attach->dmabuf);
|
||||
else
|
||||
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
|
||||
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
|
||||
|
||||
if (msm_obj->vaddr == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
@@ -659,7 +667,11 @@ void msm_gem_vunmap(struct drm_gem_object *obj)
|
||||
if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
|
||||
return;
|
||||
|
||||
vunmap(msm_obj->vaddr);
|
||||
if (obj->import_attach)
|
||||
dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
|
||||
else
|
||||
vunmap(msm_obj->vaddr);
|
||||
|
||||
msm_obj->vaddr = NULL;
|
||||
}
|
||||
|
||||
@@ -1012,7 +1024,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
||||
struct msm_gem_object *msm_obj;
|
||||
struct drm_gem_object *obj = NULL;
|
||||
uint32_t size;
|
||||
int ret, npages;
|
||||
int ret;
|
||||
|
||||
/* if we don't have IOMMU, don't bother pretending we can import: */
|
||||
if (!iommu_present(&platform_bus_type)) {
|
||||
@@ -1033,19 +1045,9 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
||||
|
||||
drm_gem_private_object_init(dev, obj, size);
|
||||
|
||||
npages = size / PAGE_SIZE;
|
||||
|
||||
msm_obj = to_msm_bo(obj);
|
||||
msm_obj->sgt = sgt;
|
||||
msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
|
||||
if (!msm_obj->pages) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
|
||||
if (ret)
|
||||
goto fail;
|
||||
msm_obj->pages = NULL;
|
||||
|
||||
return obj;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user