summaryrefslogtreecommitdiff
path: root/drivers/dma-buf/dma-buf.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma-buf/dma-buf.c')
-rw-r--r--drivers/dma-buf/dma-buf.c42
1 files changed, 32 insertions, 10 deletions
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index df23239b04fc2..32f55640890ce 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -216,7 +216,8 @@ static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
struct dma_fence *fence;
int r;
- dma_resv_for_each_fence(&cursor, resv, write, fence) {
+ dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write),
+ fence) {
dma_fence_get(fence);
r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
if (!r)
@@ -407,6 +408,7 @@ static inline int is_dma_buf_file(struct file *file)
static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
{
+ static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
struct file *file;
struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
@@ -416,6 +418,13 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
inode->i_size = dmabuf->size;
inode_set_bytes(inode, dmabuf->size);
+ /*
+ * The ->i_ino acquired from get_next_ino() is not unique thus
+ * not suitable for using it as dentry name by dmabuf stats.
+ * Override ->i_ino with the unique and dmabuffs specific
+ * value.
+ */
+ inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
flags, &dma_buf_fops);
if (IS_ERR(file))
@@ -443,7 +452,7 @@ err_alloc_file:
* as a file descriptor by calling dma_buf_fd().
*
* 2. Userspace passes this file-descriptors to all drivers it wants this buffer
- * to share with: First the filedescriptor is converted to a &dma_buf using
+ * to share with: First the file descriptor is converted to a &dma_buf using
* dma_buf_get(). Then the buffer is attached to the device using
* dma_buf_attach().
*
@@ -543,10 +552,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
file->f_mode |= FMODE_LSEEK;
dmabuf->file = file;
- ret = dma_buf_stats_setup(dmabuf);
- if (ret)
- goto err_sysfs;
-
mutex_init(&dmabuf->lock);
INIT_LIST_HEAD(&dmabuf->attachments);
@@ -554,6 +559,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
list_add(&dmabuf->list_node, &db_list.head);
mutex_unlock(&db_list.lock);
+ ret = dma_buf_stats_setup(dmabuf);
+ if (ret)
+ goto err_sysfs;
+
return dmabuf;
err_sysfs:
@@ -660,12 +669,24 @@ static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction direction)
{
struct sg_table *sg_table;
+ signed long ret;
sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+ if (IS_ERR_OR_NULL(sg_table))
+ return sg_table;
+
+ if (!dma_buf_attachment_is_dynamic(attach)) {
+ ret = dma_resv_wait_timeout(attach->dmabuf->resv,
+ DMA_RESV_USAGE_KERNEL, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0) {
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
+ direction);
+ return ERR_PTR(ret);
+ }
+ }
- if (!IS_ERR_OR_NULL(sg_table))
- mangle_sg_table(sg_table);
-
+ mangle_sg_table(sg_table);
return sg_table;
}
@@ -1124,7 +1145,8 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
long ret;
/* Wait on any implicit rendering fences */
- ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT);
+ ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write),
+ true, MAX_SCHEDULE_TIMEOUT);
if (ret < 0)
return ret;