iommu: msm: validate subsequent lazy map arguments
Validate that attempts to lazy map a buffer, that is already lazy mapped, request the same number of elements, direction and dma mapping attributes as the original mapping. Change-Id: I6094cd6281a22525aa9971f1c5684f423be08580 Signed-off-by: Liam Mark <lmark@codeaurora.org>
This commit is contained in:
@@ -1,4 +1,4 @@
|
||||
/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
|
||||
/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
@@ -27,9 +27,11 @@
|
||||
* @dev - Device this is mapped to. Used as key
|
||||
* @sgl - The scatterlist for this mapping
|
||||
* @nents - Number of entries in sgl
|
||||
* @dir - The direction for the unmap.
|
||||
* @dir - The direction for the map.
|
||||
* @meta - Backpointer to the meta this guy belongs to.
|
||||
* @ref - for reference counting this mapping
|
||||
* @map_attrs - dma mapping attributes
|
||||
* @buf_start_addr - address of start of buffer
|
||||
*
|
||||
* Represents a mapping of one dma_buf buffer to a particular device
|
||||
* and address range. There may exist other mappings of this buffer in
|
||||
@@ -44,6 +46,8 @@ struct msm_iommu_map {
|
||||
enum dma_data_direction dir;
|
||||
struct msm_iommu_meta *meta;
|
||||
struct kref ref;
|
||||
unsigned long map_attrs;
|
||||
dma_addr_t buf_start_addr;
|
||||
};
|
||||
|
||||
struct msm_iommu_meta {
|
||||
@@ -199,21 +203,43 @@ static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
iommu_map->sgl.dma_address = sg->dma_address;
|
||||
iommu_map->sgl.dma_length = sg->dma_length;
|
||||
iommu_map->dev = dev;
|
||||
iommu_map->dir = dir;
|
||||
iommu_map->nents = nents;
|
||||
iommu_map->map_attrs = attrs;
|
||||
iommu_map->buf_start_addr = sg_phys(sg);
|
||||
msm_iommu_add(iommu_meta, iommu_map);
|
||||
|
||||
} else {
|
||||
sg->dma_address = iommu_map->sgl.dma_address;
|
||||
sg->dma_length = iommu_map->sgl.dma_length;
|
||||
if (nents == iommu_map->nents &&
|
||||
dir == iommu_map->dir &&
|
||||
attrs == iommu_map->map_attrs &&
|
||||
sg_phys(sg) == iommu_map->buf_start_addr) {
|
||||
sg->dma_address = iommu_map->sgl.dma_address;
|
||||
sg->dma_length = iommu_map->sgl.dma_length;
|
||||
|
||||
kref_get(&iommu_map->ref);
|
||||
if (is_device_dma_coherent(dev))
|
||||
/*
|
||||
* Ensure all outstanding changes for coherent
|
||||
* buffers are applied to the cache before any
|
||||
* DMA occurs.
|
||||
*/
|
||||
dmb(ish);
|
||||
ret = nents;
|
||||
kref_get(&iommu_map->ref);
|
||||
if (is_device_dma_coherent(dev))
|
||||
/*
|
||||
* Ensure all outstanding changes for coherent
|
||||
* buffers are applied to the cache before any
|
||||
* DMA occurs.
|
||||
*/
|
||||
dmb(ish);
|
||||
ret = nents;
|
||||
} else {
|
||||
bool start_diff = (sg_phys(sg) !=
|
||||
iommu_map->buf_start_addr);
|
||||
|
||||
dev_err(dev, "lazy map request differs:\n"
|
||||
"req dir:%d, original dir:%d\n"
|
||||
"req nents:%d, original nents:%d\n"
|
||||
"req map attrs:%lu, original map attrs:%lu\n"
|
||||
"req buffer start address differs:%d\n",
|
||||
dir, iommu_map->dir, nents,
|
||||
iommu_map->nents, attrs, iommu_map->map_attrs,
|
||||
start_diff);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&iommu_meta->lock);
|
||||
return ret;
|
||||
@@ -321,13 +347,9 @@ void msm_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents,
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Save direction for later use when we actually unmap.
|
||||
* Not used right now but in the future if we go to coherent mapping
|
||||
* API we might want to call the appropriate API when client asks
|
||||
* to unmap
|
||||
*/
|
||||
iommu_map->dir = dir;
|
||||
if (dir != iommu_map->dir)
|
||||
WARN(1, "%s: (%pK) dir:%d differs from original dir:%d\n",
|
||||
__func__, dma_buf, dir, iommu_map->dir);
|
||||
|
||||
kref_put(&iommu_map->ref, msm_iommu_map_release);
|
||||
mutex_unlock(&meta->lock);
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
|
||||
/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
@@ -28,6 +28,21 @@ int msm_dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir, struct dma_buf *dma_buf,
|
||||
unsigned long attrs);
|
||||
|
||||
/*
|
||||
* This function takes an extra reference to the dma_buf.
|
||||
* What this means is that calling msm_dma_unmap_sg will not result in buffer's
|
||||
* iommu mapping being removed, which means that subsequent calls to lazy map
|
||||
* will simply re-use the existing iommu mapping.
|
||||
* The iommu unmapping of the buffer will occur when the ION buffer is
|
||||
* destroyed.
|
||||
* Using lazy mapping can provide a performance benefit because subsequent
|
||||
* mappings are faster.
|
||||
*
|
||||
* The limitation of using this API are that all subsequent iommu mappings
|
||||
* must be the same as the original mapping, ie they must map the same part of
|
||||
* the buffer with the same dma data direction. Also there can't be multiple
|
||||
* mappings of different parts of the buffer.
|
||||
*/
|
||||
static inline int msm_dma_map_sg_lazy(struct device *dev,
|
||||
struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir,
|
||||
|
||||
Reference in New Issue
Block a user