[go: nahoru, domu]

sync.c revision 573632c2eaf87429a89490173f34682bb71f6883
1/*
2 * drivers/base/sync.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/debugfs.h>
18#include <linux/export.h>
19#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/kernel.h>
22#include <linux/poll.h>
23#include <linux/sched.h>
24#include <linux/seq_file.h>
25#include <linux/slab.h>
26#include <linux/uaccess.h>
27#include <linux/anon_inodes.h>
28
29#include "sync.h"
30
31#define CREATE_TRACE_POINTS
32#include "trace/sync.h"
33
34static void sync_fence_signal_pt(struct sync_pt *pt);
35static int _sync_pt_has_signaled(struct sync_pt *pt);
36static void sync_fence_free(struct kref *kref);
37static void sync_dump(void);
38
39static LIST_HEAD(sync_timeline_list_head);
40static DEFINE_SPINLOCK(sync_timeline_list_lock);
41
42static LIST_HEAD(sync_fence_list_head);
43static DEFINE_SPINLOCK(sync_fence_list_lock);
44
45struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
46					   int size, const char *name)
47{
48	struct sync_timeline *obj;
49	unsigned long flags;
50
51	if (size < sizeof(struct sync_timeline))
52		return NULL;
53
54	obj = kzalloc(size, GFP_KERNEL);
55	if (obj == NULL)
56		return NULL;
57
58	kref_init(&obj->kref);
59	obj->ops = ops;
60	strlcpy(obj->name, name, sizeof(obj->name));
61
62	INIT_LIST_HEAD(&obj->child_list_head);
63	spin_lock_init(&obj->child_list_lock);
64
65	INIT_LIST_HEAD(&obj->active_list_head);
66	spin_lock_init(&obj->active_list_lock);
67
68	spin_lock_irqsave(&sync_timeline_list_lock, flags);
69	list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
70	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
71
72	return obj;
73}
74EXPORT_SYMBOL(sync_timeline_create);
75
76static void sync_timeline_free(struct kref *kref)
77{
78	struct sync_timeline *obj =
79		container_of(kref, struct sync_timeline, kref);
80	unsigned long flags;
81
82	if (obj->ops->release_obj)
83		obj->ops->release_obj(obj);
84
85	spin_lock_irqsave(&sync_timeline_list_lock, flags);
86	list_del(&obj->sync_timeline_list);
87	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
88
89	kfree(obj);
90}
91
92void sync_timeline_destroy(struct sync_timeline *obj)
93{
94	obj->destroyed = true;
95
96	/*
97	 * If this is not the last reference, signal any children
98	 * that their parent is going away.
99	 */
100
101	if (!kref_put(&obj->kref, sync_timeline_free))
102		sync_timeline_signal(obj);
103}
104EXPORT_SYMBOL(sync_timeline_destroy);
105
106static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
107{
108	unsigned long flags;
109
110	pt->parent = obj;
111
112	spin_lock_irqsave(&obj->child_list_lock, flags);
113	list_add_tail(&pt->child_list, &obj->child_list_head);
114	spin_unlock_irqrestore(&obj->child_list_lock, flags);
115}
116
117static void sync_timeline_remove_pt(struct sync_pt *pt)
118{
119	struct sync_timeline *obj = pt->parent;
120	unsigned long flags;
121
122	spin_lock_irqsave(&obj->active_list_lock, flags);
123	if (!list_empty(&pt->active_list))
124		list_del_init(&pt->active_list);
125	spin_unlock_irqrestore(&obj->active_list_lock, flags);
126
127	spin_lock_irqsave(&obj->child_list_lock, flags);
128	if (!list_empty(&pt->child_list)) {
129		list_del_init(&pt->child_list);
130	}
131	spin_unlock_irqrestore(&obj->child_list_lock, flags);
132}
133
134void sync_timeline_signal(struct sync_timeline *obj)
135{
136	unsigned long flags;
137	LIST_HEAD(signaled_pts);
138	struct list_head *pos, *n;
139
140	trace_sync_timeline(obj);
141
142	spin_lock_irqsave(&obj->active_list_lock, flags);
143
144	list_for_each_safe(pos, n, &obj->active_list_head) {
145		struct sync_pt *pt =
146			container_of(pos, struct sync_pt, active_list);
147
148		if (_sync_pt_has_signaled(pt)) {
149			list_del_init(pos);
150			list_add(&pt->signaled_list, &signaled_pts);
151			kref_get(&pt->fence->kref);
152		}
153	}
154
155	spin_unlock_irqrestore(&obj->active_list_lock, flags);
156
157	list_for_each_safe(pos, n, &signaled_pts) {
158		struct sync_pt *pt =
159			container_of(pos, struct sync_pt, signaled_list);
160
161		list_del_init(pos);
162		sync_fence_signal_pt(pt);
163		kref_put(&pt->fence->kref, sync_fence_free);
164	}
165}
166EXPORT_SYMBOL(sync_timeline_signal);
167
168struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
169{
170	struct sync_pt *pt;
171
172	if (size < sizeof(struct sync_pt))
173		return NULL;
174
175	pt = kzalloc(size, GFP_KERNEL);
176	if (pt == NULL)
177		return NULL;
178
179	INIT_LIST_HEAD(&pt->active_list);
180	kref_get(&parent->kref);
181	sync_timeline_add_pt(parent, pt);
182
183	return pt;
184}
185EXPORT_SYMBOL(sync_pt_create);
186
187void sync_pt_free(struct sync_pt *pt)
188{
189	if (pt->parent->ops->free_pt)
190		pt->parent->ops->free_pt(pt);
191
192	sync_timeline_remove_pt(pt);
193
194	kref_put(&pt->parent->kref, sync_timeline_free);
195
196	kfree(pt);
197}
198EXPORT_SYMBOL(sync_pt_free);
199
200/* call with pt->parent->active_list_lock held */
201static int _sync_pt_has_signaled(struct sync_pt *pt)
202{
203	int old_status = pt->status;
204
205	if (!pt->status)
206		pt->status = pt->parent->ops->has_signaled(pt);
207
208	if (!pt->status && pt->parent->destroyed)
209		pt->status = -ENOENT;
210
211	if (pt->status != old_status)
212		pt->timestamp = ktime_get();
213
214	return pt->status;
215}
216
217static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
218{
219	return pt->parent->ops->dup(pt);
220}
221
222/* Adds a sync pt to the active queue.  Called when added to a fence */
223static void sync_pt_activate(struct sync_pt *pt)
224{
225	struct sync_timeline *obj = pt->parent;
226	unsigned long flags;
227	int err;
228
229	spin_lock_irqsave(&obj->active_list_lock, flags);
230
231	err = _sync_pt_has_signaled(pt);
232	if (err != 0)
233		goto out;
234
235	list_add_tail(&pt->active_list, &obj->active_list_head);
236
237out:
238	spin_unlock_irqrestore(&obj->active_list_lock, flags);
239}
240
241static int sync_fence_release(struct inode *inode, struct file *file);
242static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
243static long sync_fence_ioctl(struct file *file, unsigned int cmd,
244			     unsigned long arg);
245
246
247static const struct file_operations sync_fence_fops = {
248	.release = sync_fence_release,
249	.poll = sync_fence_poll,
250	.unlocked_ioctl = sync_fence_ioctl,
251};
252
253static struct sync_fence *sync_fence_alloc(const char *name)
254{
255	struct sync_fence *fence;
256	unsigned long flags;
257
258	fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
259	if (fence == NULL)
260		return NULL;
261
262	fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
263					 fence, 0);
264	if (fence->file == NULL)
265		goto err;
266
267	kref_init(&fence->kref);
268	strlcpy(fence->name, name, sizeof(fence->name));
269
270	INIT_LIST_HEAD(&fence->pt_list_head);
271	INIT_LIST_HEAD(&fence->waiter_list_head);
272	spin_lock_init(&fence->waiter_list_lock);
273
274	init_waitqueue_head(&fence->wq);
275
276	spin_lock_irqsave(&sync_fence_list_lock, flags);
277	list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
278	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
279
280	return fence;
281
282err:
283	kfree(fence);
284	return NULL;
285}
286
287/* TODO: implement a create which takes more that one sync_pt */
288struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
289{
290	struct sync_fence *fence;
291
292	if (pt->fence)
293		return NULL;
294
295	fence = sync_fence_alloc(name);
296	if (fence == NULL)
297		return NULL;
298
299	pt->fence = fence;
300	list_add(&pt->pt_list, &fence->pt_list_head);
301	sync_pt_activate(pt);
302
303	/*
304	 * signal the fence in case pt was activated before
305	 * sync_pt_activate(pt) was called
306	 */
307	sync_fence_signal_pt(pt);
308
309	return fence;
310}
311EXPORT_SYMBOL(sync_fence_create);
312
313static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
314{
315	struct list_head *pos;
316
317	list_for_each(pos, &src->pt_list_head) {
318		struct sync_pt *orig_pt =
319			container_of(pos, struct sync_pt, pt_list);
320		struct sync_pt *new_pt = sync_pt_dup(orig_pt);
321
322		if (new_pt == NULL)
323			return -ENOMEM;
324
325		new_pt->fence = dst;
326		list_add(&new_pt->pt_list, &dst->pt_list_head);
327	}
328
329	return 0;
330}
331
332static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
333{
334	struct list_head *src_pos, *dst_pos, *n;
335
336	list_for_each(src_pos, &src->pt_list_head) {
337		struct sync_pt *src_pt =
338			container_of(src_pos, struct sync_pt, pt_list);
339		bool collapsed = false;
340
341		list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
342			struct sync_pt *dst_pt =
343				container_of(dst_pos, struct sync_pt, pt_list);
344			/* collapse two sync_pts on the same timeline
345			 * to a single sync_pt that will signal at
346			 * the later of the two
347			 */
348			if (dst_pt->parent == src_pt->parent) {
349				if (dst_pt->parent->ops->compare(dst_pt, src_pt)
350						 == -1) {
351					struct sync_pt *new_pt =
352						sync_pt_dup(src_pt);
353					if (new_pt == NULL)
354						return -ENOMEM;
355
356					new_pt->fence = dst;
357					list_replace(&dst_pt->pt_list,
358						     &new_pt->pt_list);
359					sync_pt_free(dst_pt);
360				}
361				collapsed = true;
362				break;
363			}
364		}
365
366		if (!collapsed) {
367			struct sync_pt *new_pt = sync_pt_dup(src_pt);
368
369			if (new_pt == NULL)
370				return -ENOMEM;
371
372			new_pt->fence = dst;
373			list_add(&new_pt->pt_list, &dst->pt_list_head);
374		}
375	}
376
377	return 0;
378}
379
380static void sync_fence_detach_pts(struct sync_fence *fence)
381{
382	struct list_head *pos, *n;
383
384	list_for_each_safe(pos, n, &fence->pt_list_head) {
385		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
386		sync_timeline_remove_pt(pt);
387	}
388}
389
390static void sync_fence_free_pts(struct sync_fence *fence)
391{
392	struct list_head *pos, *n;
393
394	list_for_each_safe(pos, n, &fence->pt_list_head) {
395		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
396		sync_pt_free(pt);
397	}
398}
399
400struct sync_fence *sync_fence_fdget(int fd)
401{
402	struct file *file = fget(fd);
403
404	if (file == NULL)
405		return NULL;
406
407	if (file->f_op != &sync_fence_fops)
408		goto err;
409
410	return file->private_data;
411
412err:
413	fput(file);
414	return NULL;
415}
416EXPORT_SYMBOL(sync_fence_fdget);
417
418void sync_fence_put(struct sync_fence *fence)
419{
420	fput(fence->file);
421}
422EXPORT_SYMBOL(sync_fence_put);
423
424void sync_fence_install(struct sync_fence *fence, int fd)
425{
426	fd_install(fd, fence->file);
427}
428EXPORT_SYMBOL(sync_fence_install);
429
430static int sync_fence_get_status(struct sync_fence *fence)
431{
432	struct list_head *pos;
433	int status = 1;
434
435	list_for_each(pos, &fence->pt_list_head) {
436		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
437		int pt_status = pt->status;
438
439		if (pt_status < 0) {
440			status = pt_status;
441			break;
442		} else if (status == 1) {
443			status = pt_status;
444		}
445	}
446
447	return status;
448}
449
450struct sync_fence *sync_fence_merge(const char *name,
451				    struct sync_fence *a, struct sync_fence *b)
452{
453	struct sync_fence *fence;
454	struct list_head *pos;
455	int err;
456
457	fence = sync_fence_alloc(name);
458	if (fence == NULL)
459		return NULL;
460
461	err = sync_fence_copy_pts(fence, a);
462	if (err < 0)
463		goto err;
464
465	err = sync_fence_merge_pts(fence, b);
466	if (err < 0)
467		goto err;
468
469	list_for_each(pos, &fence->pt_list_head) {
470		struct sync_pt *pt =
471			container_of(pos, struct sync_pt, pt_list);
472		sync_pt_activate(pt);
473	}
474
475	/*
476	 * signal the fence in case one of it's pts were activated before
477	 * they were activated
478	 */
479	sync_fence_signal_pt(list_first_entry(&fence->pt_list_head,
480					      struct sync_pt,
481					      pt_list));
482
483	return fence;
484err:
485	sync_fence_free_pts(fence);
486	kfree(fence);
487	return NULL;
488}
489EXPORT_SYMBOL(sync_fence_merge);
490
491static void sync_fence_signal_pt(struct sync_pt *pt)
492{
493	LIST_HEAD(signaled_waiters);
494	struct sync_fence *fence = pt->fence;
495	struct list_head *pos;
496	struct list_head *n;
497	unsigned long flags;
498	int status;
499
500	status = sync_fence_get_status(fence);
501
502	spin_lock_irqsave(&fence->waiter_list_lock, flags);
503	/*
504	 * this should protect against two threads racing on the signaled
505	 * false -> true transition
506	 */
507	if (status && !fence->status) {
508		list_for_each_safe(pos, n, &fence->waiter_list_head)
509			list_move(pos, &signaled_waiters);
510
511		fence->status = status;
512	} else {
513		status = 0;
514	}
515	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
516
517	if (status) {
518		list_for_each_safe(pos, n, &signaled_waiters) {
519			struct sync_fence_waiter *waiter =
520				container_of(pos, struct sync_fence_waiter,
521					     waiter_list);
522
523			list_del(pos);
524			waiter->callback(fence, waiter);
525		}
526		wake_up(&fence->wq);
527	}
528}
529
530int sync_fence_wait_async(struct sync_fence *fence,
531			  struct sync_fence_waiter *waiter)
532{
533	unsigned long flags;
534	int err = 0;
535
536	spin_lock_irqsave(&fence->waiter_list_lock, flags);
537
538	if (fence->status) {
539		err = fence->status;
540		goto out;
541	}
542
543	list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
544out:
545	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
546
547	return err;
548}
549EXPORT_SYMBOL(sync_fence_wait_async);
550
551int sync_fence_cancel_async(struct sync_fence *fence,
552			     struct sync_fence_waiter *waiter)
553{
554	struct list_head *pos;
555	struct list_head *n;
556	unsigned long flags;
557	int ret = -ENOENT;
558
559	spin_lock_irqsave(&fence->waiter_list_lock, flags);
560	/*
561	 * Make sure waiter is still in waiter_list because it is possible for
562	 * the waiter to be removed from the list while the callback is still
563	 * pending.
564	 */
565	list_for_each_safe(pos, n, &fence->waiter_list_head) {
566		struct sync_fence_waiter *list_waiter =
567			container_of(pos, struct sync_fence_waiter,
568				     waiter_list);
569		if (list_waiter == waiter) {
570			list_del(pos);
571			ret = 0;
572			break;
573		}
574	}
575	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
576	return ret;
577}
578EXPORT_SYMBOL(sync_fence_cancel_async);
579
580static bool sync_fence_check(struct sync_fence *fence)
581{
582	/*
583	 * Make sure that reads to fence->status are ordered with the
584	 * wait queue event triggering
585	 */
586	smp_rmb();
587	return fence->status != 0;
588}
589
590int sync_fence_wait(struct sync_fence *fence, long timeout)
591{
592	int err = 0;
593	struct sync_pt *pt;
594
595	trace_sync_wait(fence, 1);
596	list_for_each_entry(pt, &fence->pt_list_head, pt_list)
597		trace_sync_pt(pt);
598
599	if (timeout > 0) {
600		timeout = msecs_to_jiffies(timeout);
601		err = wait_event_interruptible_timeout(fence->wq,
602						       sync_fence_check(fence),
603						       timeout);
604	} else if (timeout < 0) {
605		err = wait_event_interruptible(fence->wq,
606					       sync_fence_check(fence));
607	}
608	trace_sync_wait(fence, 0);
609
610	if (err < 0)
611		return err;
612
613	if (fence->status < 0) {
614		pr_info("fence error %d on [%p]\n", fence->status, fence);
615		sync_dump();
616		return fence->status;
617	}
618
619	if (fence->status == 0) {
620		if (timeout > 0) {
621			pr_info("fence timeout on [%p] after %dms\n", fence,
622				jiffies_to_msecs(timeout));
623			sync_dump();
624		}
625		return -ETIME;
626	}
627
628	return 0;
629}
630EXPORT_SYMBOL(sync_fence_wait);
631
632static void sync_fence_free(struct kref *kref)
633{
634	struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
635
636	sync_fence_free_pts(fence);
637
638	kfree(fence);
639}
640
641static int sync_fence_release(struct inode *inode, struct file *file)
642{
643	struct sync_fence *fence = file->private_data;
644	unsigned long flags;
645
646	/*
647	 * We need to remove all ways to access this fence before droping
648	 * our ref.
649	 *
650	 * start with its membership in the global fence list
651	 */
652	spin_lock_irqsave(&sync_fence_list_lock, flags);
653	list_del(&fence->sync_fence_list);
654	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
655
656	/*
657	 * remove its pts from their parents so that sync_timeline_signal()
658	 * can't reference the fence.
659	 */
660	sync_fence_detach_pts(fence);
661
662	kref_put(&fence->kref, sync_fence_free);
663
664	return 0;
665}
666
667static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
668{
669	struct sync_fence *fence = file->private_data;
670
671	poll_wait(file, &fence->wq, wait);
672
673	/*
674	 * Make sure that reads to fence->status are ordered with the
675	 * wait queue event triggering
676	 */
677	smp_rmb();
678
679	if (fence->status == 1)
680		return POLLIN;
681	else if (fence->status < 0)
682		return POLLERR;
683	else
684		return 0;
685}
686
687static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
688{
689	__s32 value;
690
691	if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
692		return -EFAULT;
693
694	return sync_fence_wait(fence, value);
695}
696
697static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
698{
699	int fd = get_unused_fd();
700	int err;
701	struct sync_fence *fence2, *fence3;
702	struct sync_merge_data data;
703
704	if (fd < 0)
705		return fd;
706
707	if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
708		err = -EFAULT;
709		goto err_put_fd;
710	}
711
712	fence2 = sync_fence_fdget(data.fd2);
713	if (fence2 == NULL) {
714		err = -ENOENT;
715		goto err_put_fd;
716	}
717
718	data.name[sizeof(data.name) - 1] = '\0';
719	fence3 = sync_fence_merge(data.name, fence, fence2);
720	if (fence3 == NULL) {
721		err = -ENOMEM;
722		goto err_put_fence2;
723	}
724
725	data.fence = fd;
726	if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
727		err = -EFAULT;
728		goto err_put_fence3;
729	}
730
731	sync_fence_install(fence3, fd);
732	sync_fence_put(fence2);
733	return 0;
734
735err_put_fence3:
736	sync_fence_put(fence3);
737
738err_put_fence2:
739	sync_fence_put(fence2);
740
741err_put_fd:
742	put_unused_fd(fd);
743	return err;
744}
745
746static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
747{
748	struct sync_pt_info *info = data;
749	int ret;
750
751	if (size < sizeof(struct sync_pt_info))
752		return -ENOMEM;
753
754	info->len = sizeof(struct sync_pt_info);
755
756	if (pt->parent->ops->fill_driver_data) {
757		ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
758							size - sizeof(*info));
759		if (ret < 0)
760			return ret;
761
762		info->len += ret;
763	}
764
765	strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
766	strlcpy(info->driver_name, pt->parent->ops->driver_name,
767		sizeof(info->driver_name));
768	info->status = pt->status;
769	info->timestamp_ns = ktime_to_ns(pt->timestamp);
770
771	return info->len;
772}
773
774static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
775					unsigned long arg)
776{
777	struct sync_fence_info_data *data;
778	struct list_head *pos;
779	__u32 size;
780	__u32 len = 0;
781	int ret;
782
783	if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
784		return -EFAULT;
785
786	if (size < sizeof(struct sync_fence_info_data))
787		return -EINVAL;
788
789	if (size > 4096)
790		size = 4096;
791
792	data = kzalloc(size, GFP_KERNEL);
793	if (data == NULL)
794		return -ENOMEM;
795
796	strlcpy(data->name, fence->name, sizeof(data->name));
797	data->status = fence->status;
798	len = sizeof(struct sync_fence_info_data);
799
800	list_for_each(pos, &fence->pt_list_head) {
801		struct sync_pt *pt =
802			container_of(pos, struct sync_pt, pt_list);
803
804		ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
805
806		if (ret < 0)
807			goto out;
808
809		len += ret;
810	}
811
812	data->len = len;
813
814	if (copy_to_user((void __user *)arg, data, len))
815		ret = -EFAULT;
816	else
817		ret = 0;
818
819out:
820	kfree(data);
821
822	return ret;
823}
824
825static long sync_fence_ioctl(struct file *file, unsigned int cmd,
826			     unsigned long arg)
827{
828	struct sync_fence *fence = file->private_data;
829	switch (cmd) {
830	case SYNC_IOC_WAIT:
831		return sync_fence_ioctl_wait(fence, arg);
832
833	case SYNC_IOC_MERGE:
834		return sync_fence_ioctl_merge(fence, arg);
835
836	case SYNC_IOC_FENCE_INFO:
837		return sync_fence_ioctl_fence_info(fence, arg);
838
839	default:
840		return -ENOTTY;
841	}
842}
843
844#ifdef CONFIG_DEBUG_FS
845static const char *sync_status_str(int status)
846{
847	if (status > 0)
848		return "signaled";
849	else if (status == 0)
850		return "active";
851	else
852		return "error";
853}
854
855static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
856{
857	int status = pt->status;
858	seq_printf(s, "  %s%spt %s",
859		   fence ? pt->parent->name : "",
860		   fence ? "_" : "",
861		   sync_status_str(status));
862	if (pt->status) {
863		struct timeval tv = ktime_to_timeval(pt->timestamp);
864		seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
865	}
866
867	if (pt->parent->ops->timeline_value_str &&
868	    pt->parent->ops->pt_value_str) {
869		char value[64];
870		pt->parent->ops->pt_value_str(pt, value, sizeof(value));
871		seq_printf(s, ": %s", value);
872		if (fence) {
873			pt->parent->ops->timeline_value_str(pt->parent, value,
874						    sizeof(value));
875			seq_printf(s, " / %s", value);
876		}
877	} else if (pt->parent->ops->print_pt) {
878		seq_printf(s, ": ");
879		pt->parent->ops->print_pt(s, pt);
880	}
881
882	seq_printf(s, "\n");
883}
884
885static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
886{
887	struct list_head *pos;
888	unsigned long flags;
889
890	seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
891
892	if (obj->ops->timeline_value_str) {
893		char value[64];
894		obj->ops->timeline_value_str(obj, value, sizeof(value));
895		seq_printf(s, ": %s", value);
896	} else if (obj->ops->print_obj) {
897		seq_printf(s, ": ");
898		obj->ops->print_obj(s, obj);
899	}
900
901	seq_printf(s, "\n");
902
903	spin_lock_irqsave(&obj->child_list_lock, flags);
904	list_for_each(pos, &obj->child_list_head) {
905		struct sync_pt *pt =
906			container_of(pos, struct sync_pt, child_list);
907		sync_print_pt(s, pt, false);
908	}
909	spin_unlock_irqrestore(&obj->child_list_lock, flags);
910}
911
912static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
913{
914	struct list_head *pos;
915	unsigned long flags;
916
917	seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
918		   sync_status_str(fence->status));
919
920	list_for_each(pos, &fence->pt_list_head) {
921		struct sync_pt *pt =
922			container_of(pos, struct sync_pt, pt_list);
923		sync_print_pt(s, pt, true);
924	}
925
926	spin_lock_irqsave(&fence->waiter_list_lock, flags);
927	list_for_each(pos, &fence->waiter_list_head) {
928		struct sync_fence_waiter *waiter =
929			container_of(pos, struct sync_fence_waiter,
930				     waiter_list);
931
932		seq_printf(s, "waiter %pF\n", waiter->callback);
933	}
934	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
935}
936
937static int sync_debugfs_show(struct seq_file *s, void *unused)
938{
939	unsigned long flags;
940	struct list_head *pos;
941
942	seq_printf(s, "objs:\n--------------\n");
943
944	spin_lock_irqsave(&sync_timeline_list_lock, flags);
945	list_for_each(pos, &sync_timeline_list_head) {
946		struct sync_timeline *obj =
947			container_of(pos, struct sync_timeline,
948				     sync_timeline_list);
949
950		sync_print_obj(s, obj);
951		seq_printf(s, "\n");
952	}
953	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
954
955	seq_printf(s, "fences:\n--------------\n");
956
957	spin_lock_irqsave(&sync_fence_list_lock, flags);
958	list_for_each(pos, &sync_fence_list_head) {
959		struct sync_fence *fence =
960			container_of(pos, struct sync_fence, sync_fence_list);
961
962		sync_print_fence(s, fence);
963		seq_printf(s, "\n");
964	}
965	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
966	return 0;
967}
968
969static int sync_debugfs_open(struct inode *inode, struct file *file)
970{
971	return single_open(file, sync_debugfs_show, inode->i_private);
972}
973
974static const struct file_operations sync_debugfs_fops = {
975	.open           = sync_debugfs_open,
976	.read           = seq_read,
977	.llseek         = seq_lseek,
978	.release        = single_release,
979};
980
981static __init int sync_debugfs_init(void)
982{
983	debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
984	return 0;
985}
986late_initcall(sync_debugfs_init);
987
988#define DUMP_CHUNK 256
989static char sync_dump_buf[64 * 1024];
990void sync_dump(void)
991{
992	struct seq_file s = {
993		.buf = sync_dump_buf,
994		.size = sizeof(sync_dump_buf) - 1,
995	};
996	int i;
997
998	sync_debugfs_show(&s, NULL);
999
1000	for (i = 0; i < s.count; i += DUMP_CHUNK) {
1001		if ((s.count - i) > DUMP_CHUNK) {
1002			char c = s.buf[i + DUMP_CHUNK];
1003			s.buf[i + DUMP_CHUNK] = 0;
1004			pr_cont("%s", s.buf + i);
1005			s.buf[i + DUMP_CHUNK] = c;
1006		} else {
1007			s.buf[s.count] = 0;
1008			pr_cont("%s", s.buf + i);
1009		}
1010	}
1011}
1012#else
1013static void sync_dump(void)
1014{
1015}
1016#endif
1017