[go: nahoru, domu]

1/*
2 * Framework for buffer objects that can be shared across devices/subsystems.
3 *
4 * Copyright(C) 2011 Linaro Limited. All rights reserved.
5 * Author: Sumit Semwal <sumit.semwal@ti.com>
6 *
7 * Many thanks to linaro-mm-sig list, and specially
8 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
9 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
10 * refining of this idea.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License version 2 as published by
14 * the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
19 * more details.
20 *
21 * You should have received a copy of the GNU General Public License along with
22 * this program.  If not, see <http://www.gnu.org/licenses/>.
23 */
24
25#include <linux/fs.h>
26#include <linux/slab.h>
27#include <linux/dma-buf.h>
28#include <linux/fence.h>
29#include <linux/anon_inodes.h>
30#include <linux/export.h>
31#include <linux/debugfs.h>
32#include <linux/seq_file.h>
33#include <linux/poll.h>
34#include <linux/reservation.h>
35
36static inline int is_dma_buf_file(struct file *);
37
38struct dma_buf_list {
39	struct list_head head;
40	struct mutex lock;
41};
42
43static struct dma_buf_list db_list;
44
45static int dma_buf_release(struct inode *inode, struct file *file)
46{
47	struct dma_buf *dmabuf;
48
49	if (!is_dma_buf_file(file))
50		return -EINVAL;
51
52	dmabuf = file->private_data;
53
54	BUG_ON(dmabuf->vmapping_counter);
55
56	/*
57	 * Any fences that a dma-buf poll can wait on should be signaled
58	 * before releasing dma-buf. This is the responsibility of each
59	 * driver that uses the reservation objects.
60	 *
61	 * If you hit this BUG() it means someone dropped their ref to the
62	 * dma-buf while still having pending operation to the buffer.
63	 */
64	BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
65
66	dmabuf->ops->release(dmabuf);
67
68	mutex_lock(&db_list.lock);
69	list_del(&dmabuf->list_node);
70	mutex_unlock(&db_list.lock);
71
72	if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
73		reservation_object_fini(dmabuf->resv);
74
75	kfree(dmabuf);
76	return 0;
77}
78
79static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
80{
81	struct dma_buf *dmabuf;
82
83	if (!is_dma_buf_file(file))
84		return -EINVAL;
85
86	dmabuf = file->private_data;
87
88	/* check for overflowing the buffer's size */
89	if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
90	    dmabuf->size >> PAGE_SHIFT)
91		return -EINVAL;
92
93	return dmabuf->ops->mmap(dmabuf, vma);
94}
95
96static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
97{
98	struct dma_buf *dmabuf;
99	loff_t base;
100
101	if (!is_dma_buf_file(file))
102		return -EBADF;
103
104	dmabuf = file->private_data;
105
106	/* only support discovering the end of the buffer,
107	   but also allow SEEK_SET to maintain the idiomatic
108	   SEEK_END(0), SEEK_CUR(0) pattern */
109	if (whence == SEEK_END)
110		base = dmabuf->size;
111	else if (whence == SEEK_SET)
112		base = 0;
113	else
114		return -EINVAL;
115
116	if (offset != 0)
117		return -EINVAL;
118
119	return base + offset;
120}
121
122static void dma_buf_poll_cb(struct fence *fence, struct fence_cb *cb)
123{
124	struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
125	unsigned long flags;
126
127	spin_lock_irqsave(&dcb->poll->lock, flags);
128	wake_up_locked_poll(dcb->poll, dcb->active);
129	dcb->active = 0;
130	spin_unlock_irqrestore(&dcb->poll->lock, flags);
131}
132
133static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
134{
135	struct dma_buf *dmabuf;
136	struct reservation_object *resv;
137	struct reservation_object_list *fobj;
138	struct fence *fence_excl;
139	unsigned long events;
140	unsigned shared_count, seq;
141
142	dmabuf = file->private_data;
143	if (!dmabuf || !dmabuf->resv)
144		return POLLERR;
145
146	resv = dmabuf->resv;
147
148	poll_wait(file, &dmabuf->poll, poll);
149
150	events = poll_requested_events(poll) & (POLLIN | POLLOUT);
151	if (!events)
152		return 0;
153
154retry:
155	seq = read_seqcount_begin(&resv->seq);
156	rcu_read_lock();
157
158	fobj = rcu_dereference(resv->fence);
159	if (fobj)
160		shared_count = fobj->shared_count;
161	else
162		shared_count = 0;
163	fence_excl = rcu_dereference(resv->fence_excl);
164	if (read_seqcount_retry(&resv->seq, seq)) {
165		rcu_read_unlock();
166		goto retry;
167	}
168
169	if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) {
170		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
171		unsigned long pevents = POLLIN;
172
173		if (shared_count == 0)
174			pevents |= POLLOUT;
175
176		spin_lock_irq(&dmabuf->poll.lock);
177		if (dcb->active) {
178			dcb->active |= pevents;
179			events &= ~pevents;
180		} else
181			dcb->active = pevents;
182		spin_unlock_irq(&dmabuf->poll.lock);
183
184		if (events & pevents) {
185			if (!fence_get_rcu(fence_excl)) {
186				/* force a recheck */
187				events &= ~pevents;
188				dma_buf_poll_cb(NULL, &dcb->cb);
189			} else if (!fence_add_callback(fence_excl, &dcb->cb,
190						       dma_buf_poll_cb)) {
191				events &= ~pevents;
192				fence_put(fence_excl);
193			} else {
194				/*
195				 * No callback queued, wake up any additional
196				 * waiters.
197				 */
198				fence_put(fence_excl);
199				dma_buf_poll_cb(NULL, &dcb->cb);
200			}
201		}
202	}
203
204	if ((events & POLLOUT) && shared_count > 0) {
205		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
206		int i;
207
208		/* Only queue a new callback if no event has fired yet */
209		spin_lock_irq(&dmabuf->poll.lock);
210		if (dcb->active)
211			events &= ~POLLOUT;
212		else
213			dcb->active = POLLOUT;
214		spin_unlock_irq(&dmabuf->poll.lock);
215
216		if (!(events & POLLOUT))
217			goto out;
218
219		for (i = 0; i < shared_count; ++i) {
220			struct fence *fence = rcu_dereference(fobj->shared[i]);
221
222			if (!fence_get_rcu(fence)) {
223				/*
224				 * fence refcount dropped to zero, this means
225				 * that fobj has been freed
226				 *
227				 * call dma_buf_poll_cb and force a recheck!
228				 */
229				events &= ~POLLOUT;
230				dma_buf_poll_cb(NULL, &dcb->cb);
231				break;
232			}
233			if (!fence_add_callback(fence, &dcb->cb,
234						dma_buf_poll_cb)) {
235				fence_put(fence);
236				events &= ~POLLOUT;
237				break;
238			}
239			fence_put(fence);
240		}
241
242		/* No callback queued, wake up any additional waiters. */
243		if (i == shared_count)
244			dma_buf_poll_cb(NULL, &dcb->cb);
245	}
246
247out:
248	rcu_read_unlock();
249	return events;
250}
251
252static const struct file_operations dma_buf_fops = {
253	.release	= dma_buf_release,
254	.mmap		= dma_buf_mmap_internal,
255	.llseek		= dma_buf_llseek,
256	.poll		= dma_buf_poll,
257};
258
259/*
260 * is_dma_buf_file - Check if struct file* is associated with dma_buf
261 */
262static inline int is_dma_buf_file(struct file *file)
263{
264	return file->f_op == &dma_buf_fops;
265}
266
267/**
268 * dma_buf_export_named - Creates a new dma_buf, and associates an anon file
269 * with this buffer, so it can be exported.
270 * Also connect the allocator specific data and ops to the buffer.
271 * Additionally, provide a name string for exporter; useful in debugging.
272 *
273 * @priv:	[in]	Attach private data of allocator to this buffer
274 * @ops:	[in]	Attach allocator-defined dma buf ops to the new buffer.
275 * @size:	[in]	Size of the buffer
276 * @flags:	[in]	mode flags for the file.
277 * @exp_name:	[in]	name of the exporting module - useful for debugging.
278 * @resv:	[in]	reservation-object, NULL to allocate default one.
279 *
280 * Returns, on success, a newly created dma_buf object, which wraps the
281 * supplied private data and operations for dma_buf_ops. On either missing
282 * ops, or error in allocating struct dma_buf, will return negative error.
283 *
284 */
285struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
286				size_t size, int flags, const char *exp_name,
287				struct reservation_object *resv)
288{
289	struct dma_buf *dmabuf;
290	struct file *file;
291	size_t alloc_size = sizeof(struct dma_buf);
292	if (!resv)
293		alloc_size += sizeof(struct reservation_object);
294	else
295		/* prevent &dma_buf[1] == dma_buf->resv */
296		alloc_size += 1;
297
298	if (WARN_ON(!priv || !ops
299			  || !ops->map_dma_buf
300			  || !ops->unmap_dma_buf
301			  || !ops->release
302			  || !ops->kmap_atomic
303			  || !ops->kmap
304			  || !ops->mmap)) {
305		return ERR_PTR(-EINVAL);
306	}
307
308	dmabuf = kzalloc(alloc_size, GFP_KERNEL);
309	if (dmabuf == NULL)
310		return ERR_PTR(-ENOMEM);
311
312	dmabuf->priv = priv;
313	dmabuf->ops = ops;
314	dmabuf->size = size;
315	dmabuf->exp_name = exp_name;
316	init_waitqueue_head(&dmabuf->poll);
317	dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
318	dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
319
320	if (!resv) {
321		resv = (struct reservation_object *)&dmabuf[1];
322		reservation_object_init(resv);
323	}
324	dmabuf->resv = resv;
325
326	file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags);
327	if (IS_ERR(file)) {
328		kfree(dmabuf);
329		return ERR_CAST(file);
330	}
331
332	file->f_mode |= FMODE_LSEEK;
333	dmabuf->file = file;
334
335	mutex_init(&dmabuf->lock);
336	INIT_LIST_HEAD(&dmabuf->attachments);
337
338	mutex_lock(&db_list.lock);
339	list_add(&dmabuf->list_node, &db_list.head);
340	mutex_unlock(&db_list.lock);
341
342	return dmabuf;
343}
344EXPORT_SYMBOL_GPL(dma_buf_export_named);
345
346
347/**
348 * dma_buf_fd - returns a file descriptor for the given dma_buf
349 * @dmabuf:	[in]	pointer to dma_buf for which fd is required.
350 * @flags:      [in]    flags to give to fd
351 *
352 * On success, returns an associated 'fd'. Else, returns error.
353 */
354int dma_buf_fd(struct dma_buf *dmabuf, int flags)
355{
356	int fd;
357
358	if (!dmabuf || !dmabuf->file)
359		return -EINVAL;
360
361	fd = get_unused_fd_flags(flags);
362	if (fd < 0)
363		return fd;
364
365	fd_install(fd, dmabuf->file);
366
367	return fd;
368}
369EXPORT_SYMBOL_GPL(dma_buf_fd);
370
371/**
372 * dma_buf_get - returns the dma_buf structure related to an fd
373 * @fd:	[in]	fd associated with the dma_buf to be returned
374 *
375 * On success, returns the dma_buf structure associated with an fd; uses
376 * file's refcounting done by fget to increase refcount. returns ERR_PTR
377 * otherwise.
378 */
379struct dma_buf *dma_buf_get(int fd)
380{
381	struct file *file;
382
383	file = fget(fd);
384
385	if (!file)
386		return ERR_PTR(-EBADF);
387
388	if (!is_dma_buf_file(file)) {
389		fput(file);
390		return ERR_PTR(-EINVAL);
391	}
392
393	return file->private_data;
394}
395EXPORT_SYMBOL_GPL(dma_buf_get);
396
397/**
398 * dma_buf_put - decreases refcount of the buffer
399 * @dmabuf:	[in]	buffer to reduce refcount of
400 *
401 * Uses file's refcounting done implicitly by fput()
402 */
403void dma_buf_put(struct dma_buf *dmabuf)
404{
405	if (WARN_ON(!dmabuf || !dmabuf->file))
406		return;
407
408	fput(dmabuf->file);
409}
410EXPORT_SYMBOL_GPL(dma_buf_put);
411
412/**
413 * dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
414 * calls attach() of dma_buf_ops to allow device-specific attach functionality
415 * @dmabuf:	[in]	buffer to attach device to.
416 * @dev:	[in]	device to be attached.
417 *
418 * Returns struct dma_buf_attachment * for this attachment; returns ERR_PTR on
419 * error.
420 */
421struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
422					  struct device *dev)
423{
424	struct dma_buf_attachment *attach;
425	int ret;
426
427	if (WARN_ON(!dmabuf || !dev))
428		return ERR_PTR(-EINVAL);
429
430	attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL);
431	if (attach == NULL)
432		return ERR_PTR(-ENOMEM);
433
434	attach->dev = dev;
435	attach->dmabuf = dmabuf;
436
437	mutex_lock(&dmabuf->lock);
438
439	if (dmabuf->ops->attach) {
440		ret = dmabuf->ops->attach(dmabuf, dev, attach);
441		if (ret)
442			goto err_attach;
443	}
444	list_add(&attach->node, &dmabuf->attachments);
445
446	mutex_unlock(&dmabuf->lock);
447	return attach;
448
449err_attach:
450	kfree(attach);
451	mutex_unlock(&dmabuf->lock);
452	return ERR_PTR(ret);
453}
454EXPORT_SYMBOL_GPL(dma_buf_attach);
455
456/**
457 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
458 * optionally calls detach() of dma_buf_ops for device-specific detach
459 * @dmabuf:	[in]	buffer to detach from.
460 * @attach:	[in]	attachment to be detached; is free'd after this call.
461 *
462 */
463void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
464{
465	if (WARN_ON(!dmabuf || !attach))
466		return;
467
468	mutex_lock(&dmabuf->lock);
469	list_del(&attach->node);
470	if (dmabuf->ops->detach)
471		dmabuf->ops->detach(dmabuf, attach);
472
473	mutex_unlock(&dmabuf->lock);
474	kfree(attach);
475}
476EXPORT_SYMBOL_GPL(dma_buf_detach);
477
478/**
479 * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
480 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
481 * dma_buf_ops.
482 * @attach:	[in]	attachment whose scatterlist is to be returned
483 * @direction:	[in]	direction of DMA transfer
484 *
485 * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
486 * on error.
487 */
488struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
489					enum dma_data_direction direction)
490{
491	struct sg_table *sg_table = ERR_PTR(-EINVAL);
492
493	might_sleep();
494
495	if (WARN_ON(!attach || !attach->dmabuf))
496		return ERR_PTR(-EINVAL);
497
498	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
499	if (!sg_table)
500		sg_table = ERR_PTR(-ENOMEM);
501
502	return sg_table;
503}
504EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
505
506/**
507 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
508 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
509 * dma_buf_ops.
510 * @attach:	[in]	attachment to unmap buffer from
511 * @sg_table:	[in]	scatterlist info of the buffer to unmap
512 * @direction:  [in]    direction of DMA transfer
513 *
514 */
515void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
516				struct sg_table *sg_table,
517				enum dma_data_direction direction)
518{
519	might_sleep();
520
521	if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
522		return;
523
524	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
525						direction);
526}
527EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
528
529
530/**
531 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
532 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
533 * preparations. Coherency is only guaranteed in the specified range for the
534 * specified access direction.
535 * @dmabuf:	[in]	buffer to prepare cpu access for.
536 * @start:	[in]	start of range for cpu access.
537 * @len:	[in]	length of range for cpu access.
538 * @direction:	[in]	length of range for cpu access.
539 *
540 * Can return negative error values, returns 0 on success.
541 */
542int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
543			     enum dma_data_direction direction)
544{
545	int ret = 0;
546
547	if (WARN_ON(!dmabuf))
548		return -EINVAL;
549
550	if (dmabuf->ops->begin_cpu_access)
551		ret = dmabuf->ops->begin_cpu_access(dmabuf, start, len, direction);
552
553	return ret;
554}
555EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
556
557/**
558 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
559 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
560 * actions. Coherency is only guaranteed in the specified range for the
561 * specified access direction.
562 * @dmabuf:	[in]	buffer to complete cpu access for.
563 * @start:	[in]	start of range for cpu access.
564 * @len:	[in]	length of range for cpu access.
565 * @direction:	[in]	length of range for cpu access.
566 *
567 * This call must always succeed.
568 */
569void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
570			    enum dma_data_direction direction)
571{
572	WARN_ON(!dmabuf);
573
574	if (dmabuf->ops->end_cpu_access)
575		dmabuf->ops->end_cpu_access(dmabuf, start, len, direction);
576}
577EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
578
579/**
580 * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
581 * space. The same restrictions as for kmap_atomic and friends apply.
582 * @dmabuf:	[in]	buffer to map page from.
583 * @page_num:	[in]	page in PAGE_SIZE units to map.
584 *
585 * This call must always succeed, any necessary preparations that might fail
586 * need to be done in begin_cpu_access.
587 */
588void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
589{
590	WARN_ON(!dmabuf);
591
592	return dmabuf->ops->kmap_atomic(dmabuf, page_num);
593}
594EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
595
596/**
597 * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
598 * @dmabuf:	[in]	buffer to unmap page from.
599 * @page_num:	[in]	page in PAGE_SIZE units to unmap.
600 * @vaddr:	[in]	kernel space pointer obtained from dma_buf_kmap_atomic.
601 *
602 * This call must always succeed.
603 */
604void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
605			   void *vaddr)
606{
607	WARN_ON(!dmabuf);
608
609	if (dmabuf->ops->kunmap_atomic)
610		dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr);
611}
612EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
613
614/**
615 * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
616 * same restrictions as for kmap and friends apply.
617 * @dmabuf:	[in]	buffer to map page from.
618 * @page_num:	[in]	page in PAGE_SIZE units to map.
619 *
620 * This call must always succeed, any necessary preparations that might fail
621 * need to be done in begin_cpu_access.
622 */
623void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
624{
625	WARN_ON(!dmabuf);
626
627	return dmabuf->ops->kmap(dmabuf, page_num);
628}
629EXPORT_SYMBOL_GPL(dma_buf_kmap);
630
631/**
632 * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
633 * @dmabuf:	[in]	buffer to unmap page from.
634 * @page_num:	[in]	page in PAGE_SIZE units to unmap.
635 * @vaddr:	[in]	kernel space pointer obtained from dma_buf_kmap.
636 *
637 * This call must always succeed.
638 */
639void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
640		    void *vaddr)
641{
642	WARN_ON(!dmabuf);
643
644	if (dmabuf->ops->kunmap)
645		dmabuf->ops->kunmap(dmabuf, page_num, vaddr);
646}
647EXPORT_SYMBOL_GPL(dma_buf_kunmap);
648
649
650/**
651 * dma_buf_mmap - Setup up a userspace mmap with the given vma
652 * @dmabuf:	[in]	buffer that should back the vma
653 * @vma:	[in]	vma for the mmap
654 * @pgoff:	[in]	offset in pages where this mmap should start within the
655 * 			dma-buf buffer.
656 *
657 * This function adjusts the passed in vma so that it points at the file of the
658 * dma_buf operation. It also adjusts the starting pgoff and does bounds
659 * checking on the size of the vma. Then it calls the exporters mmap function to
660 * set up the mapping.
661 *
662 * Can return negative error values, returns 0 on success.
663 */
664int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
665		 unsigned long pgoff)
666{
667	struct file *oldfile;
668	int ret;
669
670	if (WARN_ON(!dmabuf || !vma))
671		return -EINVAL;
672
673	/* check for offset overflow */
674	if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff)
675		return -EOVERFLOW;
676
677	/* check for overflowing the buffer's size */
678	if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
679	    dmabuf->size >> PAGE_SHIFT)
680		return -EINVAL;
681
682	/* readjust the vma */
683	get_file(dmabuf->file);
684	oldfile = vma->vm_file;
685	vma->vm_file = dmabuf->file;
686	vma->vm_pgoff = pgoff;
687
688	ret = dmabuf->ops->mmap(dmabuf, vma);
689	if (ret) {
690		/* restore old parameters on failure */
691		vma->vm_file = oldfile;
692		fput(dmabuf->file);
693	} else {
694		if (oldfile)
695			fput(oldfile);
696	}
697	return ret;
698
699}
700EXPORT_SYMBOL_GPL(dma_buf_mmap);
701
702/**
703 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
704 * address space. Same restrictions as for vmap and friends apply.
705 * @dmabuf:	[in]	buffer to vmap
706 *
707 * This call may fail due to lack of virtual mapping address space.
708 * These calls are optional in drivers. The intended use for them
709 * is for mapping objects linear in kernel space for high use objects.
710 * Please attempt to use kmap/kunmap before thinking about these interfaces.
711 *
712 * Returns NULL on error.
713 */
714void *dma_buf_vmap(struct dma_buf *dmabuf)
715{
716	void *ptr;
717
718	if (WARN_ON(!dmabuf))
719		return NULL;
720
721	if (!dmabuf->ops->vmap)
722		return NULL;
723
724	mutex_lock(&dmabuf->lock);
725	if (dmabuf->vmapping_counter) {
726		dmabuf->vmapping_counter++;
727		BUG_ON(!dmabuf->vmap_ptr);
728		ptr = dmabuf->vmap_ptr;
729		goto out_unlock;
730	}
731
732	BUG_ON(dmabuf->vmap_ptr);
733
734	ptr = dmabuf->ops->vmap(dmabuf);
735	if (WARN_ON_ONCE(IS_ERR(ptr)))
736		ptr = NULL;
737	if (!ptr)
738		goto out_unlock;
739
740	dmabuf->vmap_ptr = ptr;
741	dmabuf->vmapping_counter = 1;
742
743out_unlock:
744	mutex_unlock(&dmabuf->lock);
745	return ptr;
746}
747EXPORT_SYMBOL_GPL(dma_buf_vmap);
748
749/**
750 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
751 * @dmabuf:	[in]	buffer to vunmap
752 * @vaddr:	[in]	vmap to vunmap
753 */
754void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
755{
756	if (WARN_ON(!dmabuf))
757		return;
758
759	BUG_ON(!dmabuf->vmap_ptr);
760	BUG_ON(dmabuf->vmapping_counter == 0);
761	BUG_ON(dmabuf->vmap_ptr != vaddr);
762
763	mutex_lock(&dmabuf->lock);
764	if (--dmabuf->vmapping_counter == 0) {
765		if (dmabuf->ops->vunmap)
766			dmabuf->ops->vunmap(dmabuf, vaddr);
767		dmabuf->vmap_ptr = NULL;
768	}
769	mutex_unlock(&dmabuf->lock);
770}
771EXPORT_SYMBOL_GPL(dma_buf_vunmap);
772
773#ifdef CONFIG_DEBUG_FS
774static int dma_buf_describe(struct seq_file *s)
775{
776	int ret;
777	struct dma_buf *buf_obj;
778	struct dma_buf_attachment *attach_obj;
779	int count = 0, attach_count;
780	size_t size = 0;
781
782	ret = mutex_lock_interruptible(&db_list.lock);
783
784	if (ret)
785		return ret;
786
787	seq_puts(s, "\nDma-buf Objects:\n");
788	seq_puts(s, "size\tflags\tmode\tcount\texp_name\n");
789
790	list_for_each_entry(buf_obj, &db_list.head, list_node) {
791		ret = mutex_lock_interruptible(&buf_obj->lock);
792
793		if (ret) {
794			seq_puts(s,
795				 "\tERROR locking buffer object: skipping\n");
796			continue;
797		}
798
799		seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n",
800				buf_obj->size,
801				buf_obj->file->f_flags, buf_obj->file->f_mode,
802				file_count(buf_obj->file),
803				buf_obj->exp_name);
804
805		seq_puts(s, "\tAttached Devices:\n");
806		attach_count = 0;
807
808		list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
809			seq_puts(s, "\t");
810
811			seq_printf(s, "%s\n", dev_name(attach_obj->dev));
812			attach_count++;
813		}
814
815		seq_printf(s, "Total %d devices attached\n\n",
816				attach_count);
817
818		count++;
819		size += buf_obj->size;
820		mutex_unlock(&buf_obj->lock);
821	}
822
823	seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
824
825	mutex_unlock(&db_list.lock);
826	return 0;
827}
828
829static int dma_buf_show(struct seq_file *s, void *unused)
830{
831	void (*func)(struct seq_file *) = s->private;
832	func(s);
833	return 0;
834}
835
836static int dma_buf_debug_open(struct inode *inode, struct file *file)
837{
838	return single_open(file, dma_buf_show, inode->i_private);
839}
840
841static const struct file_operations dma_buf_debug_fops = {
842	.open           = dma_buf_debug_open,
843	.read           = seq_read,
844	.llseek         = seq_lseek,
845	.release        = single_release,
846};
847
848static struct dentry *dma_buf_debugfs_dir;
849
850static int dma_buf_init_debugfs(void)
851{
852	int err = 0;
853	dma_buf_debugfs_dir = debugfs_create_dir("dma_buf", NULL);
854	if (IS_ERR(dma_buf_debugfs_dir)) {
855		err = PTR_ERR(dma_buf_debugfs_dir);
856		dma_buf_debugfs_dir = NULL;
857		return err;
858	}
859
860	err = dma_buf_debugfs_create_file("bufinfo", dma_buf_describe);
861
862	if (err)
863		pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
864
865	return err;
866}
867
868static void dma_buf_uninit_debugfs(void)
869{
870	if (dma_buf_debugfs_dir)
871		debugfs_remove_recursive(dma_buf_debugfs_dir);
872}
873
874int dma_buf_debugfs_create_file(const char *name,
875				int (*write)(struct seq_file *))
876{
877	struct dentry *d;
878
879	d = debugfs_create_file(name, S_IRUGO, dma_buf_debugfs_dir,
880			write, &dma_buf_debug_fops);
881
882	return PTR_ERR_OR_ZERO(d);
883}
884#else
885static inline int dma_buf_init_debugfs(void)
886{
887	return 0;
888}
889static inline void dma_buf_uninit_debugfs(void)
890{
891}
892#endif
893
894static int __init dma_buf_init(void)
895{
896	mutex_init(&db_list.lock);
897	INIT_LIST_HEAD(&db_list.head);
898	dma_buf_init_debugfs();
899	return 0;
900}
901subsys_initcall(dma_buf_init);
902
903static void __exit dma_buf_deinit(void)
904{
905	dma_buf_uninit_debugfs();
906}
907__exitcall(dma_buf_deinit);
908