[go: nahoru, domu]

1/*
2 * Copyright (C) 2013 Google, Inc.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include <linux/kthread.h>
16#include <linux/mutex.h>
17#include <linux/slab.h>
18
19#include "sw_sync.h"
20
21#include <video/adf.h>
22#include <video/adf_client.h>
23#include <video/adf_format.h>
24
25#include "adf.h"
26
27static inline bool vsync_active(u8 state)
28{
29	return state == DRM_MODE_DPMS_ON || state == DRM_MODE_DPMS_STANDBY;
30}
31
32/**
33 * adf_interface_blank - set interface's DPMS state
34 *
35 * @intf: the interface
36 * @state: one of %DRM_MODE_DPMS_*
37 *
38 * Returns 0 on success or -errno on failure.
39 */
40int adf_interface_blank(struct adf_interface *intf, u8 state)
41{
42	struct adf_device *dev = adf_interface_parent(intf);
43	u8 prev_state;
44	bool disable_vsync;
45	bool enable_vsync;
46	int ret = 0;
47	struct adf_event_refcount *vsync_refcount;
48
49	if (!intf->ops || !intf->ops->blank)
50		return -EOPNOTSUPP;
51
52	if (state > DRM_MODE_DPMS_OFF)
53		return -EINVAL;
54
55	mutex_lock(&dev->client_lock);
56	if (state != DRM_MODE_DPMS_ON)
57		flush_kthread_worker(&dev->post_worker);
58	mutex_lock(&intf->base.event_lock);
59
60	vsync_refcount = adf_obj_find_event_refcount(&intf->base,
61			ADF_EVENT_VSYNC);
62	if (!vsync_refcount) {
63		ret = -ENOMEM;
64		goto done;
65	}
66
67	prev_state = intf->dpms_state;
68	if (prev_state == state) {
69		ret = -EBUSY;
70		goto done;
71	}
72
73	disable_vsync = vsync_active(prev_state) &&
74			!vsync_active(state) &&
75			vsync_refcount->refcount;
76	enable_vsync = !vsync_active(prev_state) &&
77			vsync_active(state) &&
78			vsync_refcount->refcount;
79
80	if (disable_vsync)
81		intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC,
82				false);
83
84	ret = intf->ops->blank(intf, state);
85	if (ret < 0) {
86		if (disable_vsync)
87			intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC,
88					true);
89		goto done;
90	}
91
92	if (enable_vsync)
93		intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC,
94				true);
95
96	intf->dpms_state = state;
97done:
98	mutex_unlock(&intf->base.event_lock);
99	mutex_unlock(&dev->client_lock);
100	return ret;
101}
102EXPORT_SYMBOL(adf_interface_blank);
103
104/**
105 * adf_interface_blank - get interface's current DPMS state
106 *
107 * @intf: the interface
108 *
109 * Returns one of %DRM_MODE_DPMS_*.
110 */
111u8 adf_interface_dpms_state(struct adf_interface *intf)
112{
113	struct adf_device *dev = adf_interface_parent(intf);
114	u8 dpms_state;
115
116	mutex_lock(&dev->client_lock);
117	dpms_state = intf->dpms_state;
118	mutex_unlock(&dev->client_lock);
119
120	return dpms_state;
121}
122EXPORT_SYMBOL(adf_interface_dpms_state);
123
124/**
125 * adf_interface_current_mode - get interface's current display mode
126 *
127 * @intf: the interface
128 * @mode: returns the current mode
129 */
130void adf_interface_current_mode(struct adf_interface *intf,
131		struct drm_mode_modeinfo *mode)
132{
133	struct adf_device *dev = adf_interface_parent(intf);
134
135	mutex_lock(&dev->client_lock);
136	memcpy(mode, &intf->current_mode, sizeof(*mode));
137	mutex_unlock(&dev->client_lock);
138}
139EXPORT_SYMBOL(adf_interface_current_mode);
140
141/**
142 * adf_interface_modelist - get interface's modelist
143 *
144 * @intf: the interface
145 * @modelist: storage for the modelist (optional)
146 * @n_modes: length of @modelist
147 *
148 * If @modelist is not NULL, adf_interface_modelist() will copy up to @n_modes
149 * modelist entries into @modelist.
150 *
151 * Returns the length of the modelist.
152 */
153size_t adf_interface_modelist(struct adf_interface *intf,
154		struct drm_mode_modeinfo *modelist, size_t n_modes)
155{
156	unsigned long flags;
157	size_t retval;
158
159	read_lock_irqsave(&intf->hotplug_modelist_lock, flags);
160	if (modelist)
161		memcpy(modelist, intf->modelist, sizeof(modelist[0]) *
162				min(n_modes, intf->n_modes));
163	retval = intf->n_modes;
164	read_unlock_irqrestore(&intf->hotplug_modelist_lock, flags);
165
166	return retval;
167}
168EXPORT_SYMBOL(adf_interface_modelist);
169
170/**
171 * adf_interface_set_mode - set interface's display mode
172 *
173 * @intf: the interface
174 * @mode: the new mode
175 *
176 * Returns 0 on success or -errno on failure.
177 */
178int adf_interface_set_mode(struct adf_interface *intf,
179		struct drm_mode_modeinfo *mode)
180{
181	struct adf_device *dev = adf_interface_parent(intf);
182	int ret = 0;
183
184	if (!intf->ops || !intf->ops->modeset)
185		return -EOPNOTSUPP;
186
187	mutex_lock(&dev->client_lock);
188	flush_kthread_worker(&dev->post_worker);
189
190	ret = intf->ops->modeset(intf, mode);
191	if (ret < 0)
192		goto done;
193
194	memcpy(&intf->current_mode, mode, sizeof(*mode));
195done:
196	mutex_unlock(&dev->client_lock);
197	return ret;
198}
199EXPORT_SYMBOL(adf_interface_set_mode);
200
201/**
202 * adf_interface_screen_size - get size of screen connected to interface
203 *
204 * @intf: the interface
205 * @width_mm: returns the screen width in mm
206 * @height_mm: returns the screen width in mm
207 *
208 * Returns 0 on success or -errno on failure.
209 */
210int adf_interface_get_screen_size(struct adf_interface *intf, u16 *width_mm,
211		u16 *height_mm)
212{
213	struct adf_device *dev = adf_interface_parent(intf);
214	int ret;
215
216	if (!intf->ops || !intf->ops->screen_size)
217		return -EOPNOTSUPP;
218
219	mutex_lock(&dev->client_lock);
220	ret = intf->ops->screen_size(intf, width_mm, height_mm);
221	mutex_unlock(&dev->client_lock);
222
223	return ret;
224}
225EXPORT_SYMBOL(adf_interface_get_screen_size);
226
227/**
228 * adf_overlay_engine_supports_format - returns whether a format is in an
229 * overlay engine's supported list
230 *
231 * @eng: the overlay engine
232 * @format: format fourcc
233 */
234bool adf_overlay_engine_supports_format(struct adf_overlay_engine *eng,
235		u32 format)
236{
237	size_t i;
238	for (i = 0; i < eng->ops->n_supported_formats; i++)
239		if (format == eng->ops->supported_formats[i])
240			return true;
241
242	return false;
243}
244EXPORT_SYMBOL(adf_overlay_engine_supports_format);
245
246static int adf_buffer_validate(struct adf_buffer *buf)
247{
248	struct adf_overlay_engine *eng = buf->overlay_engine;
249	struct device *dev = &eng->base.dev;
250	struct adf_device *parent = adf_overlay_engine_parent(eng);
251	u8 hsub, vsub, num_planes, cpp[ADF_MAX_PLANES], i;
252
253	if (!adf_overlay_engine_supports_format(eng, buf->format)) {
254		char format_str[ADF_FORMAT_STR_SIZE];
255		adf_format_str(buf->format, format_str);
256		dev_err(dev, "unsupported format %s\n", format_str);
257		return -EINVAL;
258	}
259
260	if (!adf_format_is_standard(buf->format))
261		return parent->ops->validate_custom_format(parent, buf);
262
263	hsub = adf_format_horz_chroma_subsampling(buf->format);
264	vsub = adf_format_vert_chroma_subsampling(buf->format);
265	num_planes = adf_format_num_planes(buf->format);
266	for (i = 0; i < num_planes; i++)
267		cpp[i] = adf_format_plane_cpp(buf->format, i);
268
269	return adf_format_validate_yuv(parent, buf, num_planes, hsub, vsub,
270			cpp);
271}
272
273static int adf_buffer_map(struct adf_device *dev, struct adf_buffer *buf,
274		struct adf_buffer_mapping *mapping)
275{
276	int ret = 0;
277	size_t i;
278
279	for (i = 0; i < buf->n_planes; i++) {
280		struct dma_buf_attachment *attachment;
281		struct sg_table *sg_table;
282
283		attachment = dma_buf_attach(buf->dma_bufs[i], dev->dev);
284		if (IS_ERR(attachment)) {
285			ret = PTR_ERR(attachment);
286			dev_err(&dev->base.dev, "attaching plane %zu failed: %d\n",
287					i, ret);
288			goto done;
289		}
290		mapping->attachments[i] = attachment;
291
292		sg_table = dma_buf_map_attachment(attachment, DMA_TO_DEVICE);
293		if (IS_ERR(sg_table)) {
294			ret = PTR_ERR(sg_table);
295			dev_err(&dev->base.dev, "mapping plane %zu failed: %d",
296					i, ret);
297			goto done;
298		} else if (!sg_table) {
299			ret = -ENOMEM;
300			dev_err(&dev->base.dev, "mapping plane %zu failed\n",
301					i);
302			goto done;
303		}
304		mapping->sg_tables[i] = sg_table;
305	}
306
307done:
308	if (ret < 0)
309		adf_buffer_mapping_cleanup(mapping, buf);
310
311	return ret;
312}
313
314static struct sync_fence *adf_sw_complete_fence(struct adf_device *dev)
315{
316	struct sync_pt *pt;
317	struct sync_fence *complete_fence;
318
319	if (!dev->timeline) {
320		dev->timeline = sw_sync_timeline_create(dev->base.name);
321		if (!dev->timeline)
322			return ERR_PTR(-ENOMEM);
323		dev->timeline_max = 1;
324	}
325
326	dev->timeline_max++;
327	pt = sw_sync_pt_create(dev->timeline, dev->timeline_max);
328	if (!pt)
329		goto err_pt_create;
330	complete_fence = sync_fence_create(dev->base.name, pt);
331	if (!complete_fence)
332		goto err_fence_create;
333
334	return complete_fence;
335
336err_fence_create:
337	sync_pt_free(pt);
338err_pt_create:
339	dev->timeline_max--;
340	return ERR_PTR(-ENOSYS);
341}
342
343/**
344 * adf_device_post - flip to a new set of buffers
345 *
346 * @dev: device targeted by the flip
347 * @intfs: interfaces targeted by the flip
348 * @n_intfs: number of targeted interfaces
349 * @bufs: description of buffers displayed
350 * @n_bufs: number of buffers displayed
351 * @custom_data: driver-private data
352 * @custom_data_size: size of driver-private data
353 *
354 * adf_device_post() will copy @intfs, @bufs, and @custom_data, so they may
355 * point to variables on the stack.  adf_device_post() also takes its own
356 * reference on each of the dma-bufs in @bufs.  The adf_device_post_nocopy()
357 * variant transfers ownership of these resources to ADF instead.
358 *
359 * On success, returns a sync fence which signals when the buffers are removed
360 * from the screen.  On failure, returns ERR_PTR(-errno).
361 */
362struct sync_fence *adf_device_post(struct adf_device *dev,
363		struct adf_interface **intfs, size_t n_intfs,
364		struct adf_buffer *bufs, size_t n_bufs, void *custom_data,
365		size_t custom_data_size)
366{
367	struct adf_interface **intfs_copy = NULL;
368	struct adf_buffer *bufs_copy = NULL;
369	void *custom_data_copy = NULL;
370	struct sync_fence *ret;
371	size_t i;
372
373	intfs_copy = kzalloc(sizeof(intfs_copy[0]) * n_intfs, GFP_KERNEL);
374	if (!intfs_copy)
375		return ERR_PTR(-ENOMEM);
376
377	bufs_copy = kzalloc(sizeof(bufs_copy[0]) * n_bufs, GFP_KERNEL);
378	if (!bufs_copy) {
379		ret = ERR_PTR(-ENOMEM);
380		goto err_alloc;
381	}
382
383	custom_data_copy = kzalloc(custom_data_size, GFP_KERNEL);
384	if (!custom_data_copy) {
385		ret = ERR_PTR(-ENOMEM);
386		goto err_alloc;
387	}
388
389	for (i = 0; i < n_bufs; i++) {
390		size_t j;
391		for (j = 0; j < bufs[i].n_planes; j++)
392			get_dma_buf(bufs[i].dma_bufs[j]);
393	}
394
395	memcpy(intfs_copy, intfs, sizeof(intfs_copy[0]) * n_intfs);
396	memcpy(bufs_copy, bufs, sizeof(bufs_copy[0]) * n_bufs);
397	memcpy(custom_data_copy, custom_data, custom_data_size);
398
399	ret = adf_device_post_nocopy(dev, intfs_copy, n_intfs, bufs_copy,
400			n_bufs, custom_data_copy, custom_data_size);
401	if (IS_ERR(ret))
402		goto err_post;
403
404	return ret;
405
406err_post:
407	for (i = 0; i < n_bufs; i++) {
408		size_t j;
409		for (j = 0; j < bufs[i].n_planes; j++)
410			dma_buf_put(bufs[i].dma_bufs[j]);
411	}
412err_alloc:
413	kfree(custom_data_copy);
414	kfree(bufs_copy);
415	kfree(intfs_copy);
416	return ret;
417}
418EXPORT_SYMBOL(adf_device_post);
419
420/**
421 * adf_device_post_nocopy - flip to a new set of buffers
422 *
423 * adf_device_post_nocopy() has the same behavior as adf_device_post(),
424 * except ADF does not copy @intfs, @bufs, or @custom_data, and it does
425 * not take an extra reference on the dma-bufs in @bufs.
426 *
427 * @intfs, @bufs, and @custom_data must point to buffers allocated by
428 * kmalloc().  On success, ADF takes ownership of these buffers and the dma-bufs
429 * in @bufs, and will kfree()/dma_buf_put() them when they are no longer needed.
430 * On failure, adf_device_post_nocopy() does NOT take ownership of these
431 * buffers or the dma-bufs, and the caller must clean them up.
432 *
433 * adf_device_post_nocopy() is mainly intended for implementing ADF's ioctls.
434 * Clients may find the nocopy variant useful in limited cases, but most should
435 * call adf_device_post() instead.
436 */
437struct sync_fence *adf_device_post_nocopy(struct adf_device *dev,
438		struct adf_interface **intfs, size_t n_intfs,
439		struct adf_buffer *bufs, size_t n_bufs,
440		void *custom_data, size_t custom_data_size)
441{
442	struct adf_pending_post *cfg;
443	struct adf_buffer_mapping *mappings;
444	struct sync_fence *ret;
445	size_t i;
446	int err;
447
448	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
449	if (!cfg)
450		return ERR_PTR(-ENOMEM);
451
452	mappings = kzalloc(sizeof(mappings[0]) * n_bufs, GFP_KERNEL);
453	if (!mappings) {
454		ret = ERR_PTR(-ENOMEM);
455		goto err_alloc;
456	}
457
458	mutex_lock(&dev->client_lock);
459
460	for (i = 0; i < n_bufs; i++) {
461		err = adf_buffer_validate(&bufs[i]);
462		if (err < 0) {
463			ret = ERR_PTR(err);
464			goto err_buf;
465		}
466
467		err = adf_buffer_map(dev, &bufs[i], &mappings[i]);
468		if (err < 0) {
469			ret = ERR_PTR(err);
470			goto err_buf;
471		}
472	}
473
474	INIT_LIST_HEAD(&cfg->head);
475	cfg->config.n_bufs = n_bufs;
476	cfg->config.bufs = bufs;
477	cfg->config.mappings = mappings;
478	cfg->config.custom_data = custom_data;
479	cfg->config.custom_data_size = custom_data_size;
480
481	err = dev->ops->validate(dev, &cfg->config, &cfg->state);
482	if (err < 0) {
483		ret = ERR_PTR(err);
484		goto err_buf;
485	}
486
487	mutex_lock(&dev->post_lock);
488
489	if (dev->ops->complete_fence)
490		ret = dev->ops->complete_fence(dev, &cfg->config,
491				cfg->state);
492	else
493		ret = adf_sw_complete_fence(dev);
494
495	if (IS_ERR(ret))
496		goto err_fence;
497
498	list_add_tail(&cfg->head, &dev->post_list);
499	queue_kthread_work(&dev->post_worker, &dev->post_work);
500	mutex_unlock(&dev->post_lock);
501	mutex_unlock(&dev->client_lock);
502	kfree(intfs);
503	return ret;
504
505err_fence:
506	mutex_unlock(&dev->post_lock);
507
508err_buf:
509	for (i = 0; i < n_bufs; i++)
510		adf_buffer_mapping_cleanup(&mappings[i], &bufs[i]);
511
512	mutex_unlock(&dev->client_lock);
513	kfree(mappings);
514
515err_alloc:
516	kfree(cfg);
517	return ret;
518}
519EXPORT_SYMBOL(adf_device_post_nocopy);
520
521static void adf_attachment_list_to_array(struct adf_device *dev,
522		struct list_head *src, struct adf_attachment *dst, size_t size)
523{
524	struct adf_attachment_list *entry;
525	size_t i = 0;
526
527	if (!dst)
528		return;
529
530	list_for_each_entry(entry, src, head) {
531		if (i == size)
532			return;
533		dst[i] = entry->attachment;
534		i++;
535	}
536}
537
538/**
539 * adf_device_attachments - get device's list of active attachments
540 *
541 * @dev: the device
542 * @attachments: storage for the attachment list (optional)
543 * @n_attachments: length of @attachments
544 *
545 * If @attachments is not NULL, adf_device_attachments() will copy up to
546 * @n_attachments entries into @attachments.
547 *
548 * Returns the length of the active attachment list.
549 */
550size_t adf_device_attachments(struct adf_device *dev,
551		struct adf_attachment *attachments, size_t n_attachments)
552{
553	size_t retval;
554
555	mutex_lock(&dev->client_lock);
556	adf_attachment_list_to_array(dev, &dev->attached, attachments,
557			n_attachments);
558	retval = dev->n_attached;
559	mutex_unlock(&dev->client_lock);
560
561	return retval;
562}
563EXPORT_SYMBOL(adf_device_attachments);
564
565/**
566 * adf_device_attachments_allowed - get device's list of allowed attachments
567 *
568 * @dev: the device
569 * @attachments: storage for the attachment list (optional)
570 * @n_attachments: length of @attachments
571 *
572 * If @attachments is not NULL, adf_device_attachments_allowed() will copy up to
573 * @n_attachments entries into @attachments.
574 *
575 * Returns the length of the allowed attachment list.
576 */
577size_t adf_device_attachments_allowed(struct adf_device *dev,
578		struct adf_attachment *attachments, size_t n_attachments)
579{
580	size_t retval;
581
582	mutex_lock(&dev->client_lock);
583	adf_attachment_list_to_array(dev, &dev->attach_allowed, attachments,
584			n_attachments);
585	retval = dev->n_attach_allowed;
586	mutex_unlock(&dev->client_lock);
587
588	return retval;
589}
590EXPORT_SYMBOL(adf_device_attachments_allowed);
591
592/**
593 * adf_device_attached - return whether an overlay engine and interface are
594 * attached
595 *
596 * @dev: the parent device
597 * @eng: the overlay engine
598 * @intf: the interface
599 */
600bool adf_device_attached(struct adf_device *dev, struct adf_overlay_engine *eng,
601		struct adf_interface *intf)
602{
603	struct adf_attachment_list *attachment;
604
605	mutex_lock(&dev->client_lock);
606	attachment = adf_attachment_find(&dev->attached, eng, intf);
607	mutex_unlock(&dev->client_lock);
608
609	return attachment != NULL;
610}
611EXPORT_SYMBOL(adf_device_attached);
612
613/**
614 * adf_device_attach_allowed - return whether the ADF device supports attaching
615 * an overlay engine and interface
616 *
617 * @dev: the parent device
618 * @eng: the overlay engine
619 * @intf: the interface
620 */
621bool adf_device_attach_allowed(struct adf_device *dev,
622		struct adf_overlay_engine *eng, struct adf_interface *intf)
623{
624	struct adf_attachment_list *attachment;
625
626	mutex_lock(&dev->client_lock);
627	attachment = adf_attachment_find(&dev->attach_allowed, eng, intf);
628	mutex_unlock(&dev->client_lock);
629
630	return attachment != NULL;
631}
632EXPORT_SYMBOL(adf_device_attach_allowed);
633/**
634 * adf_device_attach - attach an overlay engine to an interface
635 *
636 * @dev: the parent device
637 * @eng: the overlay engine
638 * @intf: the interface
639 *
640 * Returns 0 on success, -%EINVAL if attaching @intf and @eng is not allowed,
641 * -%EALREADY if @intf and @eng are already attached, or -errno on any other
642 * failure.
643 */
644int adf_device_attach(struct adf_device *dev, struct adf_overlay_engine *eng,
645		struct adf_interface *intf)
646{
647	int ret;
648	struct adf_attachment_list *attachment = NULL;
649
650	ret = adf_attachment_validate(dev, eng, intf);
651	if (ret < 0)
652		return ret;
653
654	mutex_lock(&dev->client_lock);
655
656	if (dev->n_attached == ADF_MAX_ATTACHMENTS) {
657		ret = -ENOMEM;
658		goto done;
659	}
660
661	if (!adf_attachment_find(&dev->attach_allowed, eng, intf)) {
662		ret = -EINVAL;
663		goto done;
664	}
665
666	if (adf_attachment_find(&dev->attached, eng, intf)) {
667		ret = -EALREADY;
668		goto done;
669	}
670
671	ret = adf_device_attach_op(dev, eng, intf);
672	if (ret < 0)
673		goto done;
674
675	attachment = kzalloc(sizeof(*attachment), GFP_KERNEL);
676	if (!attachment) {
677		ret = -ENOMEM;
678		goto done;
679	}
680
681	attachment->attachment.interface = intf;
682	attachment->attachment.overlay_engine = eng;
683	list_add_tail(&attachment->head, &dev->attached);
684	dev->n_attached++;
685
686done:
687	mutex_unlock(&dev->client_lock);
688	if (ret < 0)
689		kfree(attachment);
690
691	return ret;
692}
693EXPORT_SYMBOL(adf_device_attach);
694
695/**
696 * adf_device_detach - detach an overlay engine from an interface
697 *
698 * @dev: the parent device
699 * @eng: the overlay engine
700 * @intf: the interface
701 *
702 * Returns 0 on success, -%EINVAL if @intf and @eng are not attached,
703 * or -errno on any other failure.
704 */
705int adf_device_detach(struct adf_device *dev, struct adf_overlay_engine *eng,
706		struct adf_interface *intf)
707{
708	int ret;
709	struct adf_attachment_list *attachment;
710
711	ret = adf_attachment_validate(dev, eng, intf);
712	if (ret < 0)
713		return ret;
714
715	mutex_lock(&dev->client_lock);
716
717	attachment = adf_attachment_find(&dev->attached, eng, intf);
718	if (!attachment) {
719		ret = -EINVAL;
720		goto done;
721	}
722
723	ret = adf_device_detach_op(dev, eng, intf);
724	if (ret < 0)
725		goto done;
726
727	adf_attachment_free(attachment);
728	dev->n_attached--;
729done:
730	mutex_unlock(&dev->client_lock);
731	return ret;
732}
733EXPORT_SYMBOL(adf_device_detach);
734
735/**
736 * adf_interface_simple_buffer_alloc - allocate a simple buffer
737 *
738 * @intf: target interface
739 * @w: width in pixels
740 * @h: height in pixels
741 * @format: format fourcc
742 * @dma_buf: returns the allocated buffer
743 * @offset: returns the byte offset of the allocated buffer's first pixel
744 * @pitch: returns the allocated buffer's pitch
745 *
746 * See &struct adf_simple_buffer_alloc for a description of simple buffers and
747 * their limitations.
748 *
749 * Returns 0 on success or -errno on failure.
750 */
751int adf_interface_simple_buffer_alloc(struct adf_interface *intf, u16 w, u16 h,
752		u32 format, struct dma_buf **dma_buf, u32 *offset, u32 *pitch)
753{
754	if (!intf->ops || !intf->ops->alloc_simple_buffer)
755		return -EOPNOTSUPP;
756
757	if (!adf_format_is_rgb(format))
758		return -EINVAL;
759
760	return intf->ops->alloc_simple_buffer(intf, w, h, format, dma_buf,
761			offset, pitch);
762}
763EXPORT_SYMBOL(adf_interface_simple_buffer_alloc);
764
765/**
766 * adf_interface_simple_post - flip to a single buffer
767 *
768 * @intf: interface targeted by the flip
769 * @buf: buffer to display
770 *
771 * adf_interface_simple_post() can be used generically for simple display
772 * configurations, since the client does not need to provide any driver-private
773 * configuration data.
774 *
775 * adf_interface_simple_post() has the same copying semantics as
776 * adf_device_post().
777 *
778 * On success, returns a sync fence which signals when the buffer is removed
779 * from the screen.  On failure, returns ERR_PTR(-errno).
780 */
781struct sync_fence *adf_interface_simple_post(struct adf_interface *intf,
782		struct adf_buffer *buf)
783{
784	size_t custom_data_size = 0;
785	void *custom_data = NULL;
786	struct sync_fence *ret;
787
788	if (intf->ops && intf->ops->describe_simple_post) {
789		int err;
790
791		custom_data = kzalloc(ADF_MAX_CUSTOM_DATA_SIZE, GFP_KERNEL);
792		if (!custom_data) {
793			ret = ERR_PTR(-ENOMEM);
794			goto done;
795		}
796
797		err = intf->ops->describe_simple_post(intf, buf, custom_data,
798				&custom_data_size);
799		if (err < 0) {
800			ret = ERR_PTR(err);
801			goto done;
802		}
803	}
804
805	ret = adf_device_post(adf_interface_parent(intf), &intf, 1, buf, 1,
806			custom_data, custom_data_size);
807done:
808	kfree(custom_data);
809	return ret;
810}
811EXPORT_SYMBOL(adf_interface_simple_post);
812